aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 08:59:57 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 08:59:57 -0700
commit40caf5ea5a7d47f8a33e26b63ca81dea4b5109d2 (patch)
tree3f879353d5cb69d2dee707108e4aaeae075f5a0c /drivers
parentd6454706c382ab74e2ecad7803c434cc6bd30343 (diff)
parentbcfd09ee48f77a4fe903dbc3757e7af931998ce1 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (56 commits) ieee1394: remove garbage from Kconfig ieee1394: more help in Kconfig ieee1394: ohci1394: Fix mistake in printk message. ieee1394: ohci1394: remove unnecessary rcvPhyPkt bit flipping in LinkControl register ieee1394: ohci1394: fix cosmetic problem in error logging ieee1394: eth1394: send async streams at S100 on 1394b buses ieee1394: eth1394: fix error path in module_init ieee1394: eth1394: correct return codes in hard_start_xmit ieee1394: eth1394: hard_start_xmit is called in atomic context ieee1394: eth1394: some conditions are unlikely ieee1394: eth1394: clean up fragment_overlap ieee1394: eth1394: don't use alloc_etherdev ieee1394: eth1394: omit useless set_mac_address callback ieee1394: eth1394: CONFIG_INET is always defined ieee1394: eth1394: allow MTU bigger than 1500 ieee1394: unexport highlevel_host_reset ieee1394: eth1394: contain host reset ieee1394: eth1394: shorter error messages ieee1394: eth1394: correct a memset argument ieee1394: eth1394: refactor .probe and .update ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ieee1394/Kconfig52
-rw-r--r--drivers/ieee1394/config_roms.c93
-rw-r--r--drivers/ieee1394/config_roms.h20
-rw-r--r--drivers/ieee1394/csr1212.c870
-rw-r--r--drivers/ieee1394/csr1212.h483
-rw-r--r--drivers/ieee1394/dma.c24
-rw-r--r--drivers/ieee1394/dma.h22
-rw-r--r--drivers/ieee1394/eth1394.c798
-rw-r--r--drivers/ieee1394/eth1394.h25
-rw-r--r--drivers/ieee1394/highlevel.c89
-rw-r--r--drivers/ieee1394/highlevel.h55
-rw-r--r--drivers/ieee1394/hosts.c23
-rw-r--r--drivers/ieee1394/hosts.h10
-rw-r--r--drivers/ieee1394/ieee1394_core.c461
-rw-r--r--drivers/ieee1394/ieee1394_core.h100
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c43
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h20
-rw-r--r--drivers/ieee1394/iso.c85
-rw-r--r--drivers/ieee1394/iso.h35
-rw-r--r--drivers/ieee1394/nodemgr.c61
-rw-r--r--drivers/ieee1394/nodemgr.h24
-rw-r--r--drivers/ieee1394/ohci1394.c12
-rw-r--r--drivers/ieee1394/ohci1394.h4
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/sbp2.c39
-rw-r--r--drivers/ieee1394/sbp2.h8
26 files changed, 1446 insertions, 2013 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index cd84a55ecf2..61d7809a5a2 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,11 +1,8 @@
-# -*- shell-script -*-
-
menu "IEEE 1394 (FireWire) support"
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
depends on PCI || BROKEN
- select NET
help
IEEE 1394 describes a high performance serial bus, which is also
known as FireWire(tm) or i.Link(tm) and is used for connecting all
@@ -35,24 +32,7 @@ config IEEE1394_VERBOSEDEBUG
Say Y if you really want or need the debugging output, everyone
else says N.
-config IEEE1394_EXTRA_CONFIG_ROMS
- bool "Build in extra config rom entries for certain functionality"
- depends on IEEE1394
- help
- Some IEEE1394 functionality depends on extra config rom entries
- being available in the host adapters CSR. These options will
- allow you to choose which ones.
-
-config IEEE1394_CONFIG_ROM_IP1394
- bool "IP-1394 Entry"
- depends on IEEE1394_EXTRA_CONFIG_ROMS && IEEE1394
- help
- Adds an entry for using IP-over-1394. If you want to use your
- IEEE1394 bus as a network for IP systems (including interacting
- with MacOSX and WinXP IP-over-1394), enable this option and the
- eth1394 option below.
-
-comment "Device Drivers"
+comment "Controllers"
depends on IEEE1394
comment "Texas Instruments PCILynx requires I2C"
@@ -70,6 +50,10 @@ config IEEE1394_PCILYNX
To compile this driver as a module, say M here: the
module will be called pcilynx.
+ Only some old and now very rare PCI and CardBus cards and
+ PowerMacs G3 B&W contain the PCILynx controller. Therefore
+ almost everybody can say N here.
+
config IEEE1394_OHCI1394
tristate "OHCI-1394 support"
depends on PCI && IEEE1394
@@ -83,7 +67,7 @@ config IEEE1394_OHCI1394
To compile this driver as a module, say M here: the
module will be called ohci1394.
-comment "Protocol Drivers"
+comment "Protocols"
depends on IEEE1394
config IEEE1394_VIDEO1394
@@ -121,11 +105,15 @@ config IEEE1394_SBP2_PHYS_DMA
This option is buggy and currently broken on some architectures.
If unsure, say N.
+config IEEE1394_ETH1394_ROM_ENTRY
+ depends on IEEE1394
+ bool
+ default n
+
config IEEE1394_ETH1394
- tristate "Ethernet over 1394"
+ tristate "IP over 1394"
depends on IEEE1394 && EXPERIMENTAL && INET
- select IEEE1394_CONFIG_ROM_IP1394
- select IEEE1394_EXTRA_CONFIG_ROMS
+ select IEEE1394_ETH1394_ROM_ENTRY
help
This driver implements a functional majority of RFC 2734: IPv4 over
1394. It will provide IP connectivity with implementations of RFC
@@ -134,6 +122,8 @@ config IEEE1394_ETH1394
This driver is still considered experimental. It does not yet support
MCAP, therefore multicast support is significantly limited.
+ The module is called eth1394 although it does not emulate Ethernet.
+
config IEEE1394_DV1394
tristate "OHCI-DV I/O support (deprecated)"
depends on IEEE1394 && IEEE1394_OHCI1394
@@ -146,12 +136,12 @@ config IEEE1394_RAWIO
tristate "Raw IEEE1394 I/O support"
depends on IEEE1394
help
- Say Y here if you want support for the raw device. This is generally
- a good idea, so you should say Y here. The raw device enables
- direct communication of user programs with the IEEE 1394 bus and
- thus with the attached peripherals.
+ This option adds support for the raw1394 device file which enables
+ direct communication of user programs with the IEEE 1394 bus and thus
+ with the attached peripherals. Almost all application programs which
+ access FireWire require this option.
- To compile this driver as a module, say M here: the
- module will be called raw1394.
+ To compile this driver as a module, say M here: the module will be
+ called raw1394.
endmenu
diff --git a/drivers/ieee1394/config_roms.c b/drivers/ieee1394/config_roms.c
index e2de6fa0c9f..1b981207fa7 100644
--- a/drivers/ieee1394/config_roms.c
+++ b/drivers/ieee1394/config_roms.c
@@ -26,12 +26,6 @@ struct hpsb_config_rom_entry {
/* Base initialization, called at module load */
int (*init)(void);
- /* Add entry to specified host */
- int (*add)(struct hpsb_host *host);
-
- /* Remove entry from specified host */
- void (*remove)(struct hpsb_host *host);
-
/* Cleanup called at module exit */
void (*cleanup)(void);
@@ -39,7 +33,7 @@ struct hpsb_config_rom_entry {
unsigned int flag;
};
-
+/* The default host entry. This must succeed. */
int hpsb_default_host_entry(struct hpsb_host *host)
{
struct csr1212_keyval *root;
@@ -63,9 +57,9 @@ int hpsb_default_host_entry(struct hpsb_host *host)
return -ENOMEM;
}
- ret = csr1212_associate_keyval(vend_id, text);
+ csr1212_associate_keyval(vend_id, text);
csr1212_release_keyval(text);
- ret |= csr1212_attach_keyval_to_directory(root, vend_id);
+ ret = csr1212_attach_keyval_to_directory(root, vend_id);
csr1212_release_keyval(vend_id);
if (ret != CSR1212_SUCCESS) {
csr1212_destroy_csr(host->csr.rom);
@@ -78,7 +72,7 @@ int hpsb_default_host_entry(struct hpsb_host *host)
}
-#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
+#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
#include "eth1394.h"
static struct csr1212_keyval *ip1394_ud;
@@ -103,10 +97,12 @@ static int config_rom_ip1394_init(void)
if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
goto ip1394_fail;
- if (csr1212_associate_keyval(spec_id, spec_desc) == CSR1212_SUCCESS &&
- csr1212_associate_keyval(ver, ver_desc) == CSR1212_SUCCESS &&
- csr1212_attach_keyval_to_directory(ip1394_ud, spec_id) == CSR1212_SUCCESS &&
- csr1212_attach_keyval_to_directory(ip1394_ud, ver) == CSR1212_SUCCESS)
+ csr1212_associate_keyval(spec_id, spec_desc);
+ csr1212_associate_keyval(ver, ver_desc);
+ if (csr1212_attach_keyval_to_directory(ip1394_ud, spec_id)
+ == CSR1212_SUCCESS &&
+ csr1212_attach_keyval_to_directory(ip1394_ud, ver)
+ == CSR1212_SUCCESS)
ret = 0;
ip1394_fail:
@@ -135,7 +131,7 @@ static void config_rom_ip1394_cleanup(void)
}
}
-static int config_rom_ip1394_add(struct hpsb_host *host)
+int hpsb_config_rom_ip1394_add(struct hpsb_host *host)
{
if (!ip1394_ud)
return -ENODEV;
@@ -144,92 +140,55 @@ static int config_rom_ip1394_add(struct hpsb_host *host)
ip1394_ud) != CSR1212_SUCCESS)
return -ENOMEM;
+ host->config_roms |= HPSB_CONFIG_ROM_ENTRY_IP1394;
+ host->update_config_rom = 1;
return 0;
}
+EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_add);
-static void config_rom_ip1394_remove(struct hpsb_host *host)
+void hpsb_config_rom_ip1394_remove(struct hpsb_host *host)
{
csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
+ host->config_roms &= ~HPSB_CONFIG_ROM_ENTRY_IP1394;
+ host->update_config_rom = 1;
}
+EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_remove);
static struct hpsb_config_rom_entry ip1394_entry = {
.name = "ip1394",
.init = config_rom_ip1394_init,
- .add = config_rom_ip1394_add,
- .remove = config_rom_ip1394_remove,
.cleanup = config_rom_ip1394_cleanup,
.flag = HPSB_CONFIG_ROM_ENTRY_IP1394,
};
-#endif /* CONFIG_IEEE1394_CONFIG_ROM_IP1394 */
+#endif /* CONFIG_IEEE1394_ETH1394_ROM_ENTRY */
static struct hpsb_config_rom_entry *const config_rom_entries[] = {
-#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
+#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
&ip1394_entry,
#endif
- NULL,
};
-
+/* Initialize all config roms */
int hpsb_init_config_roms(void)
{
int i, error = 0;
- for (i = 0; config_rom_entries[i]; i++) {
- if (!config_rom_entries[i]->init)
- continue;
-
+ for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
if (config_rom_entries[i]->init()) {
HPSB_ERR("Failed to initialize config rom entry `%s'",
config_rom_entries[i]->name);
error = -1;
- } else
- HPSB_DEBUG("Initialized config rom entry `%s'",
- config_rom_entries[i]->name);
- }
-
- return error;
-}
-
-void hpsb_cleanup_config_roms(void)
-{
- int i;
-
- for (i = 0; config_rom_entries[i]; i++) {
- if (config_rom_entries[i]->cleanup)
- config_rom_entries[i]->cleanup();
- }
-}
-
-int hpsb_add_extra_config_roms(struct hpsb_host *host)
-{
- int i, error = 0;
-
- for (i = 0; config_rom_entries[i]; i++) {
- if (config_rom_entries[i]->add(host)) {
- HPSB_ERR("fw-host%d: Failed to attach config rom entry `%s'",
- host->id, config_rom_entries[i]->name);
- error = -1;
- } else {
- host->config_roms |= config_rom_entries[i]->flag;
- host->update_config_rom = 1;
}
- }
return error;
}
-void hpsb_remove_extra_config_roms(struct hpsb_host *host)
+/* Cleanup all config roms */
+void hpsb_cleanup_config_roms(void)
{
int i;
- for (i = 0; config_rom_entries[i]; i++) {
- if (!(host->config_roms & config_rom_entries[i]->flag))
- continue;
-
- config_rom_entries[i]->remove(host);
-
- host->config_roms &= ~config_rom_entries[i]->flag;
- host->update_config_rom = 1;
- }
+ for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
+ config_rom_entries[i]->cleanup();
}
diff --git a/drivers/ieee1394/config_roms.h b/drivers/ieee1394/config_roms.h
index 0a70544cfe6..1f5cd1f16c4 100644
--- a/drivers/ieee1394/config_roms.h
+++ b/drivers/ieee1394/config_roms.h
@@ -1,27 +1,19 @@
#ifndef _IEEE1394_CONFIG_ROMS_H
#define _IEEE1394_CONFIG_ROMS_H
-#include "ieee1394_types.h"
-#include "hosts.h"
+struct hpsb_host;
-/* The default host entry. This must succeed. */
int hpsb_default_host_entry(struct hpsb_host *host);
-
-/* Initialize all config roms */
int hpsb_init_config_roms(void);
-
-/* Cleanup all config roms */
void hpsb_cleanup_config_roms(void);
-/* Add extra config roms to specified host */
-int hpsb_add_extra_config_roms(struct hpsb_host *host);
-
-/* Remove extra config roms from specified host */
-void hpsb_remove_extra_config_roms(struct hpsb_host *host);
-
-
/* List of flags to check if a host contains a certain extra config rom
* entry. Available in the host->config_roms member. */
#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001
+#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
+int hpsb_config_rom_ip1394_add(struct hpsb_host *host);
+void hpsb_config_rom_ip1394_remove(struct hpsb_host *host);
+#endif
+
#endif /* _IEEE1394_CONFIG_ROMS_H */
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index c28f639823d..d08166bda1c 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -31,12 +31,13 @@
/* TODO List:
* - Verify interface consistency: i.e., public functions that take a size
* parameter expect size to be in bytes.
- * - Convenience functions for reading a block of data from a given offset.
*/
-#ifndef __KERNEL__
-#include <string.h>
-#endif
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/bug.h>
+#include <asm/byteorder.h>
#include "csr1212.h"
@@ -46,7 +47,7 @@
#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
#define __L (1 << CSR1212_KV_TYPE_LEAF)
-static const u_int8_t csr1212_key_id_type_map[0x30] = {
+static const u8 csr1212_key_id_type_map[0x30] = {
__C, /* used by Apple iSight */
__D | __L, /* Descriptor */
__I | __D | __L, /* Bus_Dependent_Info */
@@ -82,10 +83,10 @@ static const u_int8_t csr1212_key_id_type_map[0x30] = {
#undef __L
-#define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
-#define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
+#define quads_to_bytes(_q) ((_q) * sizeof(u32))
+#define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
-static inline void free_keyval(struct csr1212_keyval *kv)
+static void free_keyval(struct csr1212_keyval *kv)
{
if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
(kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
@@ -94,14 +95,14 @@ static inline void free_keyval(struct csr1212_keyval *kv)
CSR1212_FREE(kv);
}
-static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
+static u16 csr1212_crc16(const u32 *buffer, size_t length)
{
int shift;
- u_int32_t data;
- u_int16_t sum, crc = 0;
+ u32 data;
+ u16 sum, crc = 0;
for (; length; length--) {
- data = CSR1212_BE32_TO_CPU(*buffer);
+ data = be32_to_cpu(*buffer);
buffer++;
for (shift = 28; shift >= 0; shift -= 4 ) {
sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
@@ -110,21 +111,18 @@ static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
crc &= 0xffff;
}
- return CSR1212_CPU_TO_BE16(crc);
+ return cpu_to_be16(crc);
}
-#if 0
-/* Microsoft computes the CRC with the bytes in reverse order. Therefore we
- * have a special version of the CRC algorithm to account for their buggy
- * software. */
-static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
+/* Microsoft computes the CRC with the bytes in reverse order. */
+static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
{
int shift;
- u_int32_t data;
- u_int16_t sum, crc = 0;
+ u32 data;
+ u16 sum, crc = 0;
for (; length; length--) {
- data = CSR1212_LE32_TO_CPU(*buffer);
+ data = le32_to_cpu(*buffer);
buffer++;
for (shift = 28; shift >= 0; shift -= 4 ) {
sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
@@ -133,38 +131,35 @@ static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
crc &= 0xffff;
}
- return CSR1212_CPU_TO_BE16(crc);
+ return cpu_to_be16(crc);
}
-#endif
-static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
- struct csr1212_keyval *kv)
+static struct csr1212_dentry *
+csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
{
struct csr1212_dentry *pos;
for (pos = dir->value.directory.dentries_head;
- pos != NULL; pos = pos->next) {
+ pos != NULL; pos = pos->next)
if (pos->kv == kv)
return pos;
- }
return NULL;
}
-
-static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
- u_int32_t offset)
+static struct csr1212_keyval *
+csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
{
struct csr1212_keyval *kv;
- for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
+ for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
if (kv->offset == offset)
return kv;
- }
return NULL;
}
/* Creation Routines */
+
struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
size_t bus_info_size, void *private)
{
@@ -202,27 +197,17 @@ struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
return csr;
}
-
-
void csr1212_init_local_csr(struct csr1212_csr *csr,
- const u_int32_t *bus_info_data, int max_rom)
+ const u32 *bus_info_data, int max_rom)
{
static const int mr_map[] = { 4, 64, 1024, 0 };
-#ifdef __KERNEL__
BUG_ON(max_rom & ~0x3);
csr->max_rom = mr_map[max_rom];
-#else
- if (max_rom & ~0x3) /* caller supplied invalid argument */
- csr->max_rom = 0;
- else
- csr->max_rom = mr_map[max_rom];
-#endif
memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
}
-
-static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
+static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
{
struct csr1212_keyval *kv;
@@ -246,10 +231,11 @@ static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
return kv;
}
-struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
+struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
{
- struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
+ struct csr1212_keyval *kv;
+ kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
if (!kv)
return NULL;
@@ -258,10 +244,12 @@ struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
return kv;
}
-struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
+static struct csr1212_keyval *
+csr1212_new_leaf(u8 key, const void *data, size_t data_len)
{
- struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
+ struct csr1212_keyval *kv;
+ kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
if (!kv)
return NULL;
@@ -285,10 +273,12 @@ struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t d
return kv;
}
-struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
+static struct csr1212_keyval *
+csr1212_new_csr_offset(u8 key, u32 csr_offset)
{
- struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
+ struct csr1212_keyval *kv;
+ kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
if (!kv)
return NULL;
@@ -299,10 +289,11 @@ struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset
return kv;
}
-struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
+struct csr1212_keyval *csr1212_new_directory(u8 key)
{
- struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
+ struct csr1212_keyval *kv;
+ kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
if (!kv)
return NULL;
@@ -314,43 +305,29 @@ struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
return kv;
}
-int csr1212_associate_keyval(struct csr1212_keyval *kv,
- struct csr1212_keyval *associate)
+void csr1212_associate_keyval(struct csr1212_keyval *kv,
+ struct csr1212_keyval *associate)
{
- if (!kv || !associate)
- return CSR1212_EINVAL;
-
- if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
- (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
- associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
- associate->key.id < 0x30))
- return CSR1212_EINVAL;
-
- if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
- return CSR1212_EINVAL;
-
- if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
- associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
- return CSR1212_EINVAL;
-
- if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
- kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
- return CSR1212_EINVAL;
-
- if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
- kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
- return CSR1212_EINVAL;
+ BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
+ (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
+ associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
+ associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
+ associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
+ associate->key.id < 0x30) ||
+ (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
+ associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
+ (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
+ associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
+ (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
+ kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
+ (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
+ kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
if (kv->associate)
csr1212_release_keyval(kv->associate);
associate->refcnt++;
kv->associate = associate;
-
- return CSR1212_SUCCESS;
}
int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
@@ -358,12 +335,11 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
{
struct csr1212_dentry *dentry;
- if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
- return CSR1212_EINVAL;
+ BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
dentry = CSR1212_MALLOC(sizeof(*dentry));
if (!dentry)
- return CSR1212_ENOMEM;
+ return -ENOMEM;
dentry->kv = kv;
@@ -382,66 +358,22 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
return CSR1212_SUCCESS;
}
-struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
- u_int32_t value)
-{
- struct csr1212_keyval *kvs, *kvk, *kvv;
-
- kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
- kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
- kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
-
- if (!kvs || !kvk || !kvv) {
- if (kvs)
- free_keyval(kvs);
- if (kvk)
- free_keyval(kvk);
- if (kvv)
- free_keyval(kvv);
- return NULL;
- }
-
- /* Don't keep a local reference to the extended key or value. */
- kvk->refcnt = 0;
- kvv->refcnt = 0;
-
- csr1212_associate_keyval(kvk, kvv);
- csr1212_associate_keyval(kvs, kvk);
-
- return kvs;
-}
-
-struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
- const void *data, size_t data_len)
-{
- struct csr1212_keyval *kvs, *kvk, *kvv;
-
- kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
- kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
- kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
-
- if (!kvs || !kvk || !kvv) {
- if (kvs)
- free_keyval(kvs);
- if (kvk)
- free_keyval(kvk);
- if (kvv)
- free_keyval(kvv);
- return NULL;
- }
-
- /* Don't keep a local reference to the extended key or value. */
- kvk->refcnt = 0;
- kvv->refcnt = 0;
-
- csr1212_associate_keyval(kvk, kvv);
- csr1212_associate_keyval(kvs, kvk);
-
- return kvs;
-}
-
-struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
- const void *data, size_t data_len)
+#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
+ (&((kv)->value.leaf.data[1]))
+
+#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
+ ((kv)->value.leaf.data[0] = \
+ cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
+ ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
+#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
+ ((kv)->value.leaf.data[0] = \
+ cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
+ CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
+ ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
+
+static struct csr1212_keyval *
+csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
+ const void *data, size_t data_len)
{
struct csr1212_keyval *kv;
@@ -453,197 +385,72 @@ struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t spe
CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
- if (data) {
+ if (data)
memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
- }
-
- return kv;
-}
-
-
-struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
- u_int16_t cset,
- u_int16_t language,
- const void *data,
- size_t data_len)
-{
- struct csr1212_keyval *kv;
- char *lstr;
-
- kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
- if (!kv)
- return NULL;
-
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
-
- lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
-
- /* make sure last quadlet is zeroed out */
- *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
-
- /* don't copy the NUL terminator */
- memcpy(lstr, data, data_len);
return kv;
}
+/* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
static int csr1212_check_minimal_ascii(const char *s)
{
static const char minimal_ascii_table[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
- 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
- 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
- 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
- 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
- 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
- 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
- 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
- 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
- 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
- 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 1 2 4 8 16 32 64 128 */
+ 128, /* --, --, --, --, --, --, --, 07, */
+ 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
+ 0, /* --, --, --, --, --, --, --, --, */
+ 0, /* --, --, --, --, --, --, --, --, */
+ 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
+ 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
+ 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
+ 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
+ 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
+ 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
+ 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
+ 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
+ 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
+ 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
+ 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
+ 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
};
+ int i, j;
+
for (; *s; s++) {
- if (minimal_ascii_table[*s & 0x7F] != *s)
- return -1; /* failed */
+ i = *s >> 3; /* i = *s / 8; */
+ j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
+
+ if (i >= ARRAY_SIZE(minimal_ascii_table) ||
+ !(minimal_ascii_table[i] & j))
+ return -EINVAL;
}
- /* String conforms to minimal-ascii, as specified by IEEE 1212,
- * par. 7.4 */
return 0;
}
+/* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
{
- /* Check if string conform to minimal_ascii format */
- if (csr1212_check_minimal_ascii(s))
- return NULL;
-
- /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
- return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
-}
-
-struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
- u_int8_t palette_depth,
- u_int8_t color_space,
- u_int16_t language,
- u_int16_t hscan,
- u_int16_t vscan,
- u_int32_t *palette,
- u_int32_t *pixels)
-{
- static const int pd[4] = { 0, 4, 16, 256 };
- static const int cs[16] = { 4, 2 };
struct csr1212_keyval *kv;
- int palette_size;
- int pixel_size = (hscan * vscan + 3) & ~0x3;
+ u32 *text;
+ size_t str_len, quads;
- if (!pixels || (!palette && palette_depth) ||
- (palette_depth & ~0x3) || (color_space & ~0xf))
+ if (!s || !*s || csr1212_check_minimal_ascii(s))
return NULL;
- palette_size = pd[palette_depth] * cs[color_space];
-
- kv = csr1212_new_descriptor_leaf(1, 0, NULL,
- palette_size + pixel_size +
- CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
+ str_len = strlen(s);
+ quads = bytes_to_quads(str_len);
+ kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
+ CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
if (!kv)
return NULL;
- CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
- CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
- CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
- CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
- CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
- CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
-
- if (palette_size)
- memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
- palette_size);
-
- memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
-
- return kv;
-}
-
-struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
- u_int64_t address)
-{
- struct csr1212_keyval *kv;
-
- /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
- kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
- if(!kv)
- return NULL;
-
- CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
- CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
- CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
+ kv->value.leaf.data[1] = 0; /* width, character_set, language */
+ text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
+ text[quads - 1] = 0; /* padding */
+ memcpy(text, s, str_len);
return kv;
}
-static int csr1212_check_keyword(const char *s)
-{
- for (; *s; s++) {
-
- if (('A' <= *s) && (*s <= 'Z'))
- continue;
- if (('0' <= *s) && (*s <= '9'))
- continue;
- if (*s == '-')
- continue;
-
- return -1; /* failed */
- }
- /* String conforms to keyword, as specified by IEEE 1212,
- * par. 7.6.5 */
- return CSR1212_SUCCESS;
-}
-
-struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
-{
- struct csr1212_keyval *kv;
- char *buffer;
- int i, data_len = 0;
-
- /* Check all keywords to see if they conform to restrictions:
- * Only the following characters is allowed ['A'..'Z','0'..'9','-']
- * Each word is zero-terminated.
- * Also calculate the total length of the keywords.
- */
- for (i = 0; i < strc; i++) {
- if (!strv[i] || csr1212_check_keyword(strv[i])) {
- return NULL;
- }
- data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
- }
-
- /* IEEE 1212, par. 7.6.5 Keyword leaves */
- kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
- if (!kv)
- return NULL;
-
- buffer = (char *)kv->value.leaf.data;
-
- /* make sure last quadlet is zeroed out */
- *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
-
- /* Copy keyword(s) into leaf data buffer */
- for (i = 0; i < strc; i++) {
- int len = strlen(strv[i]) + 1;
- memcpy(buffer, strv[i], len);
- buffer += len;
- }
- return kv;
-}
-
/* Destruction Routines */
@@ -674,23 +481,12 @@ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
csr1212_release_keyval(kv);
}
-
-void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
-{
- if (kv->associate) {
- csr1212_release_keyval(kv->associate);
- }
-
- kv->associate = NULL;
-}
-
-
/* This function is used to free the memory taken by a keyval. If the given
* keyval is a directory type, then any keyvals contained in that directory
* will be destroyed as well if their respective refcnts are 0. By means of
* list manipulation, this routine will descend a directory structure in a
* non-recursive manner. */
-void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
+static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
{
struct csr1212_keyval *k, *a;
struct csr1212_dentry dentry;
@@ -715,11 +511,13 @@ void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
a = k->associate;
if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
- /* If the current entry is a directory, then move all
+ /* If the current entry is a directory, move all
* the entries to the destruction list. */
if (k->value.directory.dentries_head) {
- tail->next = k->value.directory.dentries_head;
- k->value.directory.dentries_head->prev = tail;
+ tail->next =
+ k->value.directory.dentries_head;
+ k->value.directory.dentries_head->prev =
+ tail;
tail = k->value.directory.dentries_tail;
}
}
@@ -729,15 +527,22 @@ void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
head = head->next;
if (head) {
- if (head->prev && head->prev != &dentry) {
+ if (head->prev && head->prev != &dentry)
CSR1212_FREE(head->prev);
- }
head->prev = NULL;
- } else if (tail != &dentry)
+ } else if (tail != &dentry) {
CSR1212_FREE(tail);
+ }
}
}
+void csr1212_release_keyval(struct csr1212_keyval *kv)
+{
+ if (kv->refcnt > 1)
+ kv->refcnt--;
+ else
+ csr1212_destroy_keyval(kv);
+}
void csr1212_destroy_csr(struct csr1212_csr *csr)
{
@@ -763,49 +568,51 @@ void csr1212_destroy_csr(struct csr1212_csr *csr)
}
-
/* CSR Image Creation */
static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
{
struct csr1212_csr_rom_cache *cache;
- u_int64_t csr_addr;
+ u64 csr_addr;
- if (!csr || !csr->ops || !csr->ops->allocate_addr_range ||
- !csr->ops->release_addr || csr->max_rom < 1)
- return CSR1212_EINVAL;
+ BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
+ !csr->ops->release_addr || csr->max_rom < 1);
/* ROM size must be a multiple of csr->max_rom */
romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
- csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
- if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
- return CSR1212_ENOMEM;
- }
+ csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
+ csr->private);
+ if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
+ return -ENOMEM;
+
if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
/* Invalid address returned from allocate_addr_range(). */
csr->ops->release_addr(csr_addr, csr->private);
- return CSR1212_ENOMEM;
+ return -ENOMEM;
}
- cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
+ cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
+ romsize);
if (!cache) {
csr->ops->release_addr(csr_addr, csr->private);
- return CSR1212_ENOMEM;
+ return -ENOMEM;
}
- cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
+ cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
+ CSR1212_KV_ID_EXTENDED_ROM);
if (!cache->ext_rom) {
csr->ops->release_addr(csr_addr, csr->private);
CSR1212_FREE(cache);
- return CSR1212_ENOMEM;
+ return -ENOMEM;
}
- if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
+ if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
+ CSR1212_SUCCESS) {
csr1212_release_keyval(cache->ext_rom);
csr->ops->release_addr(csr_addr, csr->private);
CSR1212_FREE(cache);
- return CSR1212_ENOMEM;
+ return -ENOMEM;
}
cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
cache->ext_rom->value.leaf.len = -1;
@@ -818,8 +625,8 @@ static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
return CSR1212_SUCCESS;
}
-static inline void csr1212_remove_cache(struct csr1212_csr *csr,
- struct csr1212_csr_rom_cache *cache)
+static void csr1212_remove_cache(struct csr1212_csr *csr,
+ struct csr1212_csr_rom_cache *cache)
{
if (csr->cache_head == cache)
csr->cache_head = cache->next;
@@ -832,7 +639,8 @@ static inline void csr1212_remove_cache(struct csr1212_csr *csr,
cache->next->prev = cache->prev;
if (cache->ext_rom) {
- csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
+ csr1212_detach_keyval_from_directory(csr->root_kv,
+ cache->ext_rom);
csr1212_release_keyval(cache->ext_rom);
}
@@ -852,28 +660,29 @@ static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
dentry = dentry->next) {
for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
/* Special Case: Extended Key Specifier_ID */
- if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
- if (last_extkey_spec == NULL) {
+ if (dkv->key.id ==
+ CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
+ if (last_extkey_spec == NULL)
last_extkey_spec = dkv;
- } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
+ else if (dkv->value.immediate !=
+ last_extkey_spec->value.immediate)
last_extkey_spec = dkv;
- } else {
+ else
continue;
- }
/* Special Case: Extended Key */
} else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
- if (last_extkey == NULL) {
+ if (last_extkey == NULL)
last_extkey = dkv;
- } else if (dkv->value.immediate != last_extkey->value.immediate) {
+ else if (dkv->value.immediate !=
+ last_extkey->value.immediate)
last_extkey = dkv;
- } else {
+ else
continue;
- }
}
num_entries += 1;
- switch(dkv->key.type) {
+ switch (dkv->key.type) {
default:
case CSR1212_KV_TYPE_IMMEDIATE:
case CSR1212_KV_TYPE_CSR_OFFSET:
@@ -891,8 +700,9 @@ static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
/* Special case: Extended ROM leafs */
if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
dkv->value.leaf.len = -1;
- /* Don't add Extended ROM leafs in the layout list,
- * they are handled differently. */
+ /* Don't add Extended ROM leafs in the
+ * layout list, they are handled
+ * differently. */
break;
}
@@ -908,20 +718,21 @@ static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
return num_entries;
}
-size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
+static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
{
struct csr1212_keyval *ltail = kv;
size_t agg_size = 0;
- while(kv) {
- switch(kv->key.type) {
+ while (kv) {
+ switch (kv->key.type) {
case CSR1212_KV_TYPE_LEAF:
/* Add 1 quadlet for crc/len field */
agg_size += kv->value.leaf.len + 1;
break;
case CSR1212_KV_TYPE_DIRECTORY:
- kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail);
+ kv->value.directory.len =
+ csr1212_generate_layout_subdir(kv, &ltail);
/* Add 1 quadlet for crc/len field */
agg_size += kv->value.directory.len + 1;
break;
@@ -931,9 +742,9 @@ size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
return quads_to_bytes(agg_size);
}
-struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
- struct csr1212_keyval *start_kv,
- int start_pos)
+static struct csr1212_keyval *
+csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
+ struct csr1212_keyval *start_kv, int start_pos)
{
struct csr1212_keyval *kv = start_kv;
struct csr1212_keyval *okv = start_kv;
@@ -942,13 +753,12 @@ struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *
cache->layout_head = kv;
- while(kv && pos < cache->size) {
+ while (kv && pos < cache->size) {
/* Special case: Extended ROM leafs */
- if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
+ if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
kv->offset = cache->offset + pos;
- }
- switch(kv->key.type) {
+ switch (kv->key.type) {
case CSR1212_KV_TYPE_LEAF:
kv_len = kv->value.leaf.len;
break;
@@ -959,6 +769,7 @@ struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *
default:
/* Should never get here */
+ WARN_ON(1);
break;
}
@@ -972,46 +783,55 @@ struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *
}
cache->layout_tail = okv;
- cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
+ cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
return kv;
}
-static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
- u_int32_t *data_buffer)
+#define CSR1212_KV_KEY_SHIFT 24
+#define CSR1212_KV_KEY_TYPE_SHIFT 6
+#define CSR1212_KV_KEY_ID_MASK 0x3f
+#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
+
+static void
+csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
{
struct csr1212_dentry *dentry;
struct csr1212_keyval *last_extkey_spec = NULL;
struct csr1212_keyval *last_extkey = NULL;
int index = 0;
- for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
+ for (dentry = dir->value.directory.dentries_head;
+ dentry;
+ dentry = dentry->next) {
struct csr1212_keyval *a;
for (a = dentry->kv; a; a = a->associate) {
- u_int32_t value = 0;
+ u32 value = 0;
/* Special Case: Extended Key Specifier_ID */
- if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
- if (last_extkey_spec == NULL) {
+ if (a->key.id ==
+ CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
+ if (last_extkey_spec == NULL)
last_extkey_spec = a;
- } else if (a->value.immediate != last_extkey_spec->value.immediate) {
+ else if (a->value.immediate !=
+ last_extkey_spec->value.immediate)
last_extkey_spec = a;
- } else {
+ else
continue;
- }
+
/* Special Case: Extended Key */
} else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
- if (last_extkey == NULL) {
+ if (last_extkey == NULL)
last_extkey = a;
- } else if (a->value.immediate != last_extkey->value.immediate) {
+ else if (a->value.immediate !=
+ last_extkey->value.immediate)
last_extkey = a;
- } else {
+ else
continue;
- }
}
- switch(a->key.type) {
+ switch (a->key.type) {
case CSR1212_KV_TYPE_IMMEDIATE:
value = a->value.immediate;
break;
@@ -1030,32 +850,46 @@ static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
break;
default:
/* Should never get here */
- break; /* GDB breakpoint */
+ WARN_ON(1);
+ break;
}
- value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
+ value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
+ CSR1212_KV_KEY_SHIFT;
value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
- (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
- data_buffer[index] = CSR1212_CPU_TO_BE32(value);
+ (CSR1212_KV_KEY_SHIFT +
+ CSR1212_KV_KEY_TYPE_SHIFT);
+ data_buffer[index] = cpu_to_be32(value);
index++;
}
}
}
-void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
+struct csr1212_keyval_img {
+ u16 length;
+ u16 crc;
+
+ /* Must be last */
+ u32 data[0]; /* older gcc can't handle [] which is standard */
+};
+
+static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
{
struct csr1212_keyval *kv, *nkv;
struct csr1212_keyval_img *kvi;
- for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
- kvi = (struct csr1212_keyval_img *)
- (cache->data + bytes_to_quads(kv->offset - cache->offset));
- switch(kv->key.type) {
+ for (kv = cache->layout_head;
+ kv != cache->layout_tail->next;
+ kv = nkv) {
+ kvi = (struct csr1212_keyval_img *)(cache->data +
+ bytes_to_quads(kv->offset - cache->offset));
+ switch (kv->key.type) {
default:
case CSR1212_KV_TYPE_IMMEDIATE:
case CSR1212_KV_TYPE_CSR_OFFSET:
/* Should never get here */
- break; /* GDB breakpoint */
+ WARN_ON(1);
+ break;
case CSR1212_KV_TYPE_LEAF:
/* Don't copy over Extended ROM areas, they are
@@ -1064,15 +898,16 @@ void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
memcpy(kvi->data, kv->value.leaf.data,
quads_to_bytes(kv->value.leaf.len));
- kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
+ kvi->length = cpu_to_be16(kv->value.leaf.len);
kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
break;
case CSR1212_KV_TYPE_DIRECTORY:
csr1212_generate_tree_subdir(kv, kvi->data);
- kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
- kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
+ kvi->length = cpu_to_be16(kv->value.directory.len);
+ kvi->crc = csr1212_crc16(kvi->data,
+ kv->value.directory.len);
break;
}
@@ -1086,6 +921,10 @@ void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
}
}
+/* This size is arbitrarily chosen.
+ * The struct overhead is subtracted for more economic allocations. */
+#define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
+
int csr1212_generate_csr_image(struct csr1212_csr *csr)
{
struct csr1212_bus_info_block_img *bi;
@@ -1095,8 +934,7 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
int ret;
int init_offset;
- if (!csr)
- return CSR1212_EINVAL;
+ BUG_ON(!csr);
cache = csr->cache_head;
@@ -1113,18 +951,21 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
init_offset = csr->bus_info_len;
- for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
+ for (kv = csr->root_kv, cache = csr->cache_head;
+ kv;
+ cache = cache->next) {
if (!cache) {
/* Estimate approximate number of additional cache
* regions needed (it assumes that the cache holding
* the first 1K Config ROM space always exists). */
int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
- (2 * sizeof(u_int32_t))) + 1;
+ (2 * sizeof(u32))) + 1;
/* Add additional cache regions, extras will be
* removed later */
for (; est_c; est_c--) {
- ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
+ ret = csr1212_append_new_cache(csr,
+ CSR1212_EXTENDED_ROM_SIZE);
if (ret != CSR1212_SUCCESS)
return ret;
}
@@ -1136,7 +977,7 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
}
kv = csr1212_generate_positions(cache, kv, init_offset);
agg_size -= cache->len;
- init_offset = sizeof(u_int32_t);
+ init_offset = sizeof(u32);
}
/* Remove unused, excess cache regions */
@@ -1149,15 +990,14 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
/* Go through the list backward so that when done, the correct CRC
* will be calculated for the Extended ROM areas. */
- for(cache = csr->cache_tail; cache; cache = cache->prev) {
+ for (cache = csr->cache_tail; cache; cache = cache->prev) {
/* Only Extended ROM caches should have this set. */
if (cache->ext_rom) {
int leaf_size;
/* Make sure the Extended ROM leaf is a multiple of
* max_rom in size. */
- if (csr->max_rom < 1)
- return CSR1212_EINVAL;
+ BUG_ON(csr->max_rom < 1);
leaf_size = (cache->len + (csr->max_rom - 1)) &
~(csr->max_rom - 1);
@@ -1166,7 +1006,7 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
leaf_size - cache->len);
/* Subtract leaf header */
- leaf_size -= sizeof(u_int32_t);
+ leaf_size -= sizeof(u32);
/* Update the Extended ROM leaf length */
cache->ext_rom->value.leaf.len =
@@ -1184,33 +1024,31 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
/* Set the length and CRC of the extended ROM. */
struct csr1212_keyval_img *kvi =
(struct csr1212_keyval_img*)cache->data;
+ u16 len = bytes_to_quads(cache->len) - 1;
- kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1);
- kvi->crc = csr1212_crc16(kvi->data,
- bytes_to_quads(cache->len) - 1);
-
+ kvi->length = cpu_to_be16(len);
+ kvi->crc = csr1212_crc16(kvi->data, len);
}
}
return CSR1212_SUCCESS;
}
-int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
+int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
{
struct csr1212_csr_rom_cache *cache;
- for (cache = csr->cache_head; cache; cache = cache->next) {
+ for (cache = csr->cache_head; cache; cache = cache->next)
if (offset >= cache->offset &&
(offset + len) <= (cache->offset + cache->size)) {
- memcpy(buffer,
- &cache->data[bytes_to_quads(offset - cache->offset)],
+ memcpy(buffer, &cache->data[
+ bytes_to_quads(offset - cache->offset)],
len);
return CSR1212_SUCCESS;
}
- }
- return CSR1212_ENOENT;
-}
+ return -ENOENT;
+}
/* Parse a chunk of data as a Config ROM */
@@ -1227,46 +1065,43 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
* Unfortunately, many IEEE 1394 devices do not abide by that, so the
* bus info block will be read 1 quadlet at a time. The rest of the
* ConfigROM will be read according to the max_rom field. */
- for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
+ for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
- sizeof(csr1212_quad_t),
- &csr->cache_head->data[bytes_to_quads(i)],
- csr->private);
+ sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
+ csr->private);
if (ret != CSR1212_SUCCESS)
return ret;
/* check ROM header's info_length */
if (i == 0 &&
- CSR1212_BE32_TO_CPU(csr->cache_head->data[0]) >> 24 !=
+ be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
bytes_to_quads(csr->bus_info_len) - 1)
- return CSR1212_EINVAL;
+ return -EINVAL;
}
bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
csr->crc_len = quads_to_bytes(bi->crc_length);
- /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
- * always the case, so read the rest of the crc area 1 quadlet at a time. */
- for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
+ /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
+ * is not always the case, so read the rest of the crc area 1 quadlet at
+ * a time. */
+ for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
- sizeof(csr1212_quad_t),
- &csr->cache_head->data[bytes_to_quads(i)],
- csr->private);
+ sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
+ csr->private);
if (ret != CSR1212_SUCCESS)
return ret;
}
-#if 0
- /* Apparently there are too many differnt wrong implementations of the
- * CRC algorithm that verifying them is moot. */
+ /* Apparently there are many different wrong implementations of the CRC
+ * algorithm. We don't fail, we just warn. */
if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
(csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
- return CSR1212_EINVAL;
-#endif
+ printk(KERN_DEBUG "IEEE 1394 device has ROM CRC error\n");
cr = CSR1212_MALLOC(sizeof(*cr));
if (!cr)
- return CSR1212_ENOMEM;
+ return -ENOMEM;
cr->next = NULL;
cr->prev = NULL;
@@ -1279,21 +1114,26 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
return CSR1212_SUCCESS;
}
-static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
- csr1212_quad_t ki,
- u_int32_t kv_pos)
+#define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
+#define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
+#define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
+#define CSR1212_KV_VAL_MASK 0xffffff
+#define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
+
+static int
+csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
{
int ret = CSR1212_SUCCESS;
struct csr1212_keyval *k = NULL;
- u_int32_t offset;
+ u32 offset;
- switch(CSR1212_KV_KEY_TYPE(ki)) {
+ switch (CSR1212_KV_KEY_TYPE(ki)) {
case CSR1212_KV_TYPE_IMMEDIATE:
k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
CSR1212_KV_VAL(ki));
if (!k) {
- ret = CSR1212_ENOMEM;
- goto fail;
+ ret = -ENOMEM;
+ goto out;
}
k->refcnt = 0; /* Don't keep local reference when parsing. */
@@ -1303,8 +1143,8 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
CSR1212_KV_VAL(ki));
if (!k) {
- ret = CSR1212_ENOMEM;
- goto fail;
+ ret = -ENOMEM;
+ goto out;
}
k->refcnt = 0; /* Don't keep local reference when parsing. */
break;
@@ -1316,8 +1156,8 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
/* Uh-oh. Can't have a relative offset of 0 for Leaves
* or Directories. The Config ROM image is most likely
* messed up, so we'll just abort here. */
- ret = CSR1212_EIO;
- goto fail;
+ ret = -EIO;
+ goto out;
}
k = csr1212_find_keyval_offset(dir, offset);
@@ -1325,14 +1165,14 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
if (k)
break; /* Found it. */
- if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
+ if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
- } else {
+ else
k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
- }
+
if (!k) {
- ret = CSR1212_ENOMEM;
- goto fail;
+ ret = -ENOMEM;
+ goto out;
}
k->refcnt = 0; /* Don't keep local reference when parsing. */
k->valid = 0; /* Contents not read yet so it's not valid. */
@@ -1344,16 +1184,12 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
dir->next = k;
}
ret = csr1212_attach_keyval_to_directory(dir, k);
-
-fail:
- if (ret != CSR1212_SUCCESS) {
- if (k)
- free_keyval(k);
- }
+out:
+ if (ret != CSR1212_SUCCESS && k != NULL)
+ free_keyval(k);
return ret;
}
-
int csr1212_parse_keyval(struct csr1212_keyval *kv,
struct csr1212_csr_rom_cache *cache)
{
@@ -1362,24 +1198,20 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
int ret = CSR1212_SUCCESS;
int kvi_len;
- kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
- cache->offset)];
- kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
+ kvi = (struct csr1212_keyval_img*)
+ &cache->data[bytes_to_quads(kv->offset - cache->offset)];
+ kvi_len = be16_to_cpu(kvi->length);
-#if 0
- /* Apparently there are too many differnt wrong implementations of the
- * CRC algorithm that verifying them is moot. */
+ /* Apparently there are many different wrong implementations of the CRC
+ * algorithm. We don't fail, we just warn. */
if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
- (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
- ret = CSR1212_EINVAL;
- goto fail;
- }
-#endif
+ (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc))
+ printk(KERN_DEBUG "IEEE 1394 device has ROM CRC error\n");
- switch(kv->key.type) {
+ switch (kv->key.type) {
case CSR1212_KV_TYPE_DIRECTORY:
for (i = 0; i < kvi_len; i++) {
- csr1212_quad_t ki = kvi->data[i];
+ u32 ki = kvi->data[i];
/* Some devices put null entries in their unit
* directories. If we come across such an entry,
@@ -1387,76 +1219,72 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
if (ki == 0x0)
continue;
ret = csr1212_parse_dir_entry(kv, ki,
- (kv->offset +
- quads_to_bytes(i + 1)));
+ kv->offset + quads_to_bytes(i + 1));
}
kv->value.directory.len = kvi_len;
break;
case CSR1212_KV_TYPE_LEAF:
if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
- kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
+ size_t size = quads_to_bytes(kvi_len);
+
+ kv->value.leaf.data = CSR1212_MALLOC(size);
if (!kv->value.leaf.data) {
- ret = CSR1212_ENOMEM;
- goto fail;
+ ret = -ENOMEM;
+ goto out;
}
kv->value.leaf.len = kvi_len;
- memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
+ memcpy(kv->value.leaf.data, kvi->data, size);
}
break;
}
kv->valid = 1;
-
-fail:
+out:
return ret;
}
-
-int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
+static int
+csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
{
struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
struct csr1212_keyval_img *kvi = NULL;
struct csr1212_csr_rom_cache *cache;
int cache_index;
- u_int64_t addr;
- u_int32_t *cache_ptr;
- u_int16_t kv_len = 0;
+ u64 addr;
+ u32 *cache_ptr;
+ u16 kv_len = 0;
- if (!csr || !kv || csr->max_rom < 1)
- return CSR1212_EINVAL;
+ BUG_ON(!csr || !kv || csr->max_rom < 1);
/* First find which cache the data should be in (or go in if not read
* yet). */
- for (cache = csr->cache_head; cache; cache = cache->next) {
+ for (cache = csr->cache_head; cache; cache = cache->next)
if (kv->offset >= cache->offset &&
kv->offset < (cache->offset + cache->size))
break;
- }
if (!cache) {
- csr1212_quad_t q;
- u_int32_t cache_size;
+ u32 q, cache_size;
/* Only create a new cache for Extended ROM leaves. */
if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
- return CSR1212_EINVAL;
+ return -EINVAL;
if (csr->ops->bus_read(csr,
CSR1212_REGISTER_SPACE_BASE + kv->offset,
- sizeof(csr1212_quad_t), &q, csr->private)) {
- return CSR1212_EIO;
- }
+ sizeof(u32), &q, csr->private))
+ return -EIO;
- kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16;
+ kv->value.leaf.len = be32_to_cpu(q) >> 16;
cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
(csr->max_rom - 1)) & ~(csr->max_rom - 1);
cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
if (!cache)
- return CSR1212_ENOMEM;
+ return -ENOMEM;
kv->value.leaf.data = &cache->data[1];
csr->cache_tail->next = cache;
@@ -1465,12 +1293,11 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
csr->cache_tail = cache;
cache->filled_head =
CSR1212_MALLOC(sizeof(*cache->filled_head));
- if (!cache->filled_head) {
- return CSR1212_ENOMEM;
- }
+ if (!cache->filled_head)
+ return -ENOMEM;
cache->filled_head->offset_start = 0;
- cache->filled_head->offset_end = sizeof(csr1212_quad_t);
+ cache->filled_head->offset_end = sizeof(u32);
cache->filled_tail = cache->filled_head;
cache->filled_head->next = NULL;
cache->filled_head->prev = NULL;
@@ -1488,7 +1315,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
if (cache_index < cr->offset_start) {
newcr = CSR1212_MALLOC(sizeof(*newcr));
if (!newcr)
- return CSR1212_ENOMEM;
+ return -ENOMEM;
newcr->offset_start = cache_index & ~(csr->max_rom - 1);
newcr->offset_end = newcr->offset_start;
@@ -1501,18 +1328,18 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
(cache_index < cr->offset_end)) {
kvi = (struct csr1212_keyval_img*)
(&cache->data[bytes_to_quads(cache_index)]);
- kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
- 1);
+ kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
break;
- } else if (cache_index == cr->offset_end)
+ } else if (cache_index == cr->offset_end) {
break;
+ }
}
if (!cr) {
cr = cache->filled_tail;
newcr = CSR1212_MALLOC(sizeof(*newcr));
if (!newcr)
- return CSR1212_ENOMEM;
+ return -ENOMEM;
newcr->offset_start = cache_index & ~(csr->max_rom - 1);
newcr->offset_end = newcr->offset_start;
@@ -1534,7 +1361,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
csr->private)) {
if (csr->max_rom == 4)
/* We've got problems! */
- return CSR1212_EIO;
+ return -EIO;
/* Apperently the max_rom value was a lie, set it to
* do quadlet reads and try again. */
@@ -1548,8 +1375,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
if (!kvi && (cr->offset_end > cache_index)) {
kvi = (struct csr1212_keyval_img*)
(&cache->data[bytes_to_quads(cache_index)]);
- kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
- 1);
+ kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
}
if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
@@ -1557,7 +1383,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
* beyond the ConfigROM image region and thus beyond the
* end of our cache region. Therefore, we abort now
* rather than seg faulting later. */
- return CSR1212_EIO;
+ return -EIO;
}
ncr = cr->next;
@@ -1579,7 +1405,16 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
return csr1212_parse_keyval(kv, cache);
}
-
+struct csr1212_keyval *
+csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
+{
+ if (!kv)
+ return NULL;
+ if (!kv->valid)
+ if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
+ return NULL;
+ return kv;
+}
int csr1212_parse_csr(struct csr1212_csr *csr)
{
@@ -1587,20 +1422,19 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
struct csr1212_dentry *dentry;
int ret;
- if (!csr || !csr->ops || !csr->ops->bus_read)
- return CSR1212_EINVAL;
+ BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
ret = csr1212_parse_bus_info_block(csr);
if (ret != CSR1212_SUCCESS)
return ret;
- if (!csr->ops->get_max_rom)
+ if (!csr->ops->get_max_rom) {
csr->max_rom = mr_map[0]; /* default value */
- else {
+ } else {
int i = csr->ops->get_max_rom(csr->bus_info_data,
csr->private);
if (i & ~0x3)
- return CSR1212_EINVAL;
+ return -EINVAL;
csr->max_rom = mr_map[i];
}
@@ -1613,7 +1447,7 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
csr->root_kv->valid = 0;
csr->root_kv->next = csr->root_kv;
csr->root_kv->prev = csr->root_kv;
- ret = _csr1212_read_keyval(csr, csr->root_kv);
+ ret = csr1212_read_keyval(csr, csr->root_kv);
if (ret != CSR1212_SUCCESS)
return ret;
@@ -1623,7 +1457,7 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
dentry; dentry = dentry->next) {
if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
!dentry->kv->valid) {
- ret = _csr1212_read_keyval(csr, dentry->kv);
+ ret = csr1212_read_keyval(csr, dentry->kv);
if (ret != CSR1212_SUCCESS)
return ret;
}
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index 17ddd72dee4..df909ce6630 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -30,94 +30,13 @@
#ifndef __CSR1212_H__
#define __CSR1212_H__
-
-/* Compatibility layer */
-#ifdef __KERNEL__
-
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <asm/pgalloc.h>
-
-#define CSR1212_MALLOC(size) vmalloc((size))
-#define CSR1212_FREE(ptr) vfree(ptr)
-#define CSR1212_BE16_TO_CPU(quad) be16_to_cpu(quad)
-#define CSR1212_CPU_TO_BE16(quad) cpu_to_be16(quad)
-#define CSR1212_BE32_TO_CPU(quad) be32_to_cpu(quad)
-#define CSR1212_CPU_TO_BE32(quad) cpu_to_be32(quad)
-#define CSR1212_BE64_TO_CPU(quad) be64_to_cpu(quad)
-#define CSR1212_CPU_TO_BE64(quad) cpu_to_be64(quad)
-
-#define CSR1212_LE16_TO_CPU(quad) le16_to_cpu(quad)
-#define CSR1212_CPU_TO_LE16(quad) cpu_to_le16(quad)
-#define CSR1212_LE32_TO_CPU(quad) le32_to_cpu(quad)
-#define CSR1212_CPU_TO_LE32(quad) cpu_to_le32(quad)
-#define CSR1212_LE64_TO_CPU(quad) le64_to_cpu(quad)
-#define CSR1212_CPU_TO_LE64(quad) cpu_to_le64(quad)
-
-#include <linux/errno.h>
-#define CSR1212_SUCCESS (0)
-#define CSR1212_EINVAL (-EINVAL)
-#define CSR1212_ENOMEM (-ENOMEM)
-#define CSR1212_ENOENT (-ENOENT)
-#define CSR1212_EIO (-EIO)
-#define CSR1212_EBUSY (-EBUSY)
-
-#else /* Userspace */
-
-#include <sys/types.h>
-#include <malloc.h>
-#define CSR1212_MALLOC(size) malloc(size)
-#define CSR1212_FREE(ptr) free(ptr)
-#include <endian.h>
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#include <byteswap.h>
-#define CSR1212_BE16_TO_CPU(quad) bswap_16(quad)
-#define CSR1212_CPU_TO_BE16(quad) bswap_16(quad)
-#define CSR1212_BE32_TO_CPU(quad) bswap_32(quad)
-#define CSR1212_CPU_TO_BE32(quad) bswap_32(quad)
-#define CSR1212_BE64_TO_CPU(quad) bswap_64(quad)
-#define CSR1212_CPU_TO_BE64(quad) bswap_64(quad)
-
-#define CSR1212_LE16_TO_CPU(quad) (quad)
-#define CSR1212_CPU_TO_LE16(quad) (quad)
-#define CSR1212_LE32_TO_CPU(quad) (quad)
-#define CSR1212_CPU_TO_LE32(quad) (quad)
-#define CSR1212_LE64_TO_CPU(quad) (quad)
-#define CSR1212_CPU_TO_LE64(quad) (quad)
-#else
-#define CSR1212_BE16_TO_CPU(quad) (quad)
-#define CSR1212_CPU_TO_BE16(quad) (quad)
-#define CSR1212_BE32_TO_CPU(quad) (quad)
-#define CSR1212_CPU_TO_BE32(quad) (quad)
-#define CSR1212_BE64_TO_CPU(quad) (quad)
-#define CSR1212_CPU_TO_BE64(quad) (quad)
-
-#define CSR1212_LE16_TO_CPU(quad) bswap_16(quad)
-#define CSR1212_CPU_TO_LE16(quad) bswap_16(quad)
-#define CSR1212_LE32_TO_CPU(quad) bswap_32(quad)
-#define CSR1212_CPU_TO_LE32(quad) bswap_32(quad)
-#define CSR1212_LE64_TO_CPU(quad) bswap_64(quad)
-#define CSR1212_CPU_TO_LE64(quad) bswap_64(quad)
-#endif
-
-#include <errno.h>
-#define CSR1212_SUCCESS (0)
-#define CSR1212_EINVAL (EINVAL)
-#define CSR1212_ENOMEM (ENOMEM)
-#define CSR1212_ENOENT (ENOENT)
-#define CSR1212_EIO (EIO)
-#define CSR1212_EBUSY (EBUSY)
-
-#endif
+#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
+#define CSR1212_FREE(ptr) kfree(ptr)
-#define CSR1212_KV_VAL_MASK 0xffffff
-#define CSR1212_KV_KEY_SHIFT 24
-#define CSR1212_KV_KEY_TYPE_SHIFT 6
-#define CSR1212_KV_KEY_ID_MASK 0x3f
-#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* After shift */
+#define CSR1212_SUCCESS (0)
/* CSR 1212 key types */
@@ -190,48 +109,22 @@
#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
-#define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u_int32_t))
-
#define CSR1212_INVALID_ADDR_SPACE -1
+
/* Config ROM image structures */
struct csr1212_bus_info_block_img {
- u_int8_t length;
- u_int8_t crc_length;
- u_int16_t crc;
+ u8 length;
+ u8 crc_length;
+ u16 crc;
/* Must be last */
- u_int32_t data[0]; /* older gcc can't handle [] which is standard */
-};
-
-#define CSR1212_KV_KEY(quad) (CSR1212_BE32_TO_CPU(quad) >> CSR1212_KV_KEY_SHIFT)
-#define CSR1212_KV_KEY_TYPE(quad) (CSR1212_KV_KEY(quad) >> CSR1212_KV_KEY_TYPE_SHIFT)
-#define CSR1212_KV_KEY_ID(quad) (CSR1212_KV_KEY(quad) & CSR1212_KV_KEY_ID_MASK)
-#define CSR1212_KV_VAL(quad) (CSR1212_BE32_TO_CPU(quad) & CSR1212_KV_VAL_MASK)
-
-#define CSR1212_SET_KV_KEY(quad, key) ((quad) = \
- CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | ((key) << CSR1212_KV_KEY_SHIFT)))
-#define CSR1212_SET_KV_VAL(quad, val) ((quad) = \
- CSR1212_CPU_TO_BE32((CSR1212_KV_KEY(quad) << CSR1212_KV_KEY_SHIFT) | (val)))
-#define CSR1212_SET_KV_TYPEID(quad, type, id) ((quad) = \
- CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | \
- (((((type) & CSR1212_KV_KEY_TYPE_MASK) << CSR1212_KV_KEY_TYPE_SHIFT) | \
- ((id) & CSR1212_KV_KEY_ID_MASK)) << CSR1212_KV_KEY_SHIFT)))
-
-typedef u_int32_t csr1212_quad_t;
-
-
-struct csr1212_keyval_img {
- u_int16_t length;
- u_int16_t crc;
-
- /* Must be last */
- csr1212_quad_t data[0]; /* older gcc can't handle [] which is standard */
+ u32 data[0]; /* older gcc can't handle [] which is standard */
};
struct csr1212_leaf {
int len;
- u_int32_t *data;
+ u32 *data;
};
struct csr1212_dentry {
@@ -246,12 +139,12 @@ struct csr1212_directory {
struct csr1212_keyval {
struct {
- u_int8_t type;
- u_int8_t id;
+ u8 type;
+ u8 id;
} key;
union {
- u_int32_t immediate;
- u_int32_t csr_offset;
+ u32 immediate;
+ u32 csr_offset;
struct csr1212_leaf leaf;
struct csr1212_directory directory;
} value;
@@ -260,15 +153,15 @@ struct csr1212_keyval {
/* used in generating and/or parsing CSR image */
struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
- u_int32_t offset; /* position in CSR from 0xffff f000 0000 */
- u_int8_t valid; /* flag indicating keyval has valid data*/
+ u32 offset; /* position in CSR from 0xffff f000 0000 */
+ u8 valid; /* flag indicating keyval has valid data*/
};
struct csr1212_cache_region {
struct csr1212_cache_region *next, *prev;
- u_int32_t offset_start; /* inclusive */
- u_int32_t offset_end; /* exclusive */
+ u32 offset_start; /* inclusive */
+ u32 offset_end; /* exclusive */
};
struct csr1212_csr_rom_cache {
@@ -276,18 +169,18 @@ struct csr1212_csr_rom_cache {
struct csr1212_cache_region *filled_head, *filled_tail;
struct csr1212_keyval *layout_head, *layout_tail;
size_t size;
- u_int32_t offset;
+ u32 offset;
struct csr1212_keyval *ext_rom;
size_t len;
/* Must be last */
- u_int32_t data[0]; /* older gcc can't handle [] which is standard */
+ u32 data[0]; /* older gcc can't handle [] which is standard */
};
struct csr1212_csr {
size_t bus_info_len; /* bus info block length in bytes */
size_t crc_len; /* crc length in bytes */
- u_int32_t *bus_info_data; /* bus info data incl bus name and EUI */
+ u32 *bus_info_data; /* bus info data incl bus name and EUI */
void *private; /* private, bus specific data */
struct csr1212_bus_ops *ops;
@@ -305,52 +198,38 @@ struct csr1212_bus_ops {
* from remote nodes when parsing a Config ROM (i.e., read Config ROM
* entries located in the Units Space. Must return 0 on success
* anything else indicates an error. */
- int (*bus_read) (struct csr1212_csr *csr, u_int64_t addr,
- u_int16_t length, void *buffer, void *private);
+ int (*bus_read) (struct csr1212_csr *csr, u64 addr,
+ u16 length, void *buffer, void *private);
/* This function is used by csr1212 to allocate a region in units space
* in the event that Config ROM entries don't all fit in the predefined
* 1K region. The void *private parameter is private member of struct
* csr1212_csr. */
- u_int64_t (*allocate_addr_range) (u_int64_t size, u_int32_t alignment,
- void *private);
-
+ u64 (*allocate_addr_range) (u64 size, u32 alignment, void *private);
/* This function is used by csr1212 to release a region in units space
* that is no longer needed. */
- void (*release_addr) (u_int64_t addr, void *private);
+ void (*release_addr) (u64 addr, void *private);
/* This function is used by csr1212 to determine the max read request
* supported by a remote node when reading the ConfigROM space. Must
* return 0, 1, or 2 per IEEE 1212. */
- int (*get_max_rom) (u_int32_t *bus_info, void *private);
+ int (*get_max_rom) (u32 *bus_info, void *private);
};
-
-
/* Descriptor Leaf manipulation macros */
#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
-#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
+#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) >> CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
+ (be32_to_cpu((kv)->value.leaf.data[0]) >> \
+ CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) & \
+ (be32_to_cpu((kv)->value.leaf.data[0]) & \
CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
-#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
- (&((kv)->value.leaf.data[1]))
-
-#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
- ((kv)->value.leaf.data[0] = \
- CSR1212_CPU_TO_BE32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
- ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
-#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
- ((kv)->value.leaf.data[0] = \
- CSR1212_CPU_TO_BE32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
- CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
- ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
+
/* Text Descriptor Leaf manipulation macros */
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
@@ -358,182 +237,21 @@ struct csr1212_bus_ops {
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
+ (be32_to_cpu((kv)->value.leaf.data[1]) >> \
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
- ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
+ ((be32_to_cpu((kv)->value.leaf.data[1]) >> \
+ CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
+ CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) & \
+ (be32_to_cpu((kv)->value.leaf.data[1]) & \
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
(&((kv)->value.leaf.data[2]))
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
- ((kv)->value.leaf.data[1] = \
- ((kv)->value.leaf.data[1] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
- CSR1212_CPU_TO_BE32(((width) & \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
- ((kv)->value.leaf.data[1] = \
- ((kv)->value.leaf.data[1] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
- CSR1212_CPU_TO_BE32(((char_set) & \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
-#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
- ((kv)->value.leaf.data[1] = \
- ((kv)->value.leaf.data[1] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
- CSR1212_CPU_TO_BE32(((language) & \
- CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
-
-
-/* Icon Descriptor Leaf manipulation macros */
-#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK 0xffffff
-#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT 30
-#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK 0x3 /* after shift */
-#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT 16
-#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK 0xf /* after shift */
-#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
-#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT 16
-#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK 0xffff /* after shift */
-#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK 0xffff
-#define CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD (3 * sizeof(u_int32_t))
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[2]) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
- CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT)
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv) \
- ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK)
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN(kv) \
- ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) >> \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_SHIFT) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_MASK)
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN(kv) \
- (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv) \
- (&((kv)->value.leaf.data[5]))
-
-static inline u_int32_t *CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(struct csr1212_keyval *kv)
-{
- static const int pd[4] = { 0, 4, 16, 256 };
- static const int cs[16] = { 4, 2 };
- int ps = pd[CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv)];
-
- return &kv->value.leaf.data[5 +
- (ps * cs[CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv)]) /
- sizeof(u_int32_t)];
-}
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version) \
- ((kv)->value.leaf.data[2] = \
- ((kv)->value.leaf.data[2] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK))) | \
- CSR1212_CPU_TO_BE32(((version) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)))
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth) \
- ((kv)->value.leaf.data[3] = \
- ((kv)->value.leaf.data[3] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK << \
- CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))) | \
- CSR1212_CPU_TO_BE32(((palette_depth) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK) << \
- CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space) \
- ((kv)->value.leaf.data[3] = \
- ((kv)->value.leaf.data[3] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK << \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))) | \
- CSR1212_CPU_TO_BE32(((color_space) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK) << \
- CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
- ((kv)->value.leaf.data[3] = \
- ((kv)->value.leaf.data[3] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
- CSR1212_CPU_TO_BE32(((language) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan) \
- ((kv)->value.leaf.data[4] = \
- ((kv)->value.leaf.data[4] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK << \
- CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))) | \
- CSR1212_CPU_TO_BE32(((hscan) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK) << \
- CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))
-
-#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan) \
- ((kv)->value.leaf.data[4] = \
- (((kv)->value.leaf.data[4] & \
- CSR1212_CPU_TO_BE32(~CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK))) | \
- CSR1212_CPU_TO_BE32(((vscan) & \
- CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)))
-
-
-/* Modifiable Descriptor Leaf manipulation macros */
-#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT 16
-#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK 0xffff
-#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_SHIFT 32
-#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK 0xffff
-#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK 0xffffffffULL
-
-#define CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE(kv) \
- CSR1212_BE16_TO_CPU((kv)->value.leaf.data[0] >> CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE_SHIFT)
-
-#define CSR1212_MODIFIABLE_DESCRIPTOR_ADDRESS(kv) \
- (CSR1212_BE16_TO_CPU(((u_int64_t)((kv)->value.leaf.data[0])) << \
- CSR1212_MODIFIABLE_DESCRIPTOR_ADDR_HI_SHIFT) | \
- CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]))
-
-#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, size) \
- ((kv)->value.leaf.data[0] = \
- ((kv)->value.leaf.data[0] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK << \
- CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))) | \
- CSR1212_CPU_TO_BE32(((size) & \
- CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK) << \
- CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))
-
-#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, addr) \
- ((kv)->value.leaf.data[0] = \
- ((kv)->value.leaf.data[0] & \
- CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK))) | \
- CSR1212_CPU_TO_BE32(((addr) & \
- CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK)))
-
-#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, addr) \
- ((kv)->value.leaf.data[1] = \
- CSR1212_CPU_TO_BE32(addr & CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK))
-
-
/* The following 2 function are for creating new Configuration ROM trees. The
* first function is used for both creating local trees and parsing remote
@@ -543,11 +261,10 @@ extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
size_t bus_info_size,
void *private);
extern void csr1212_init_local_csr(struct csr1212_csr *csr,
- const u_int32_t *bus_info_data, int max_rom);
+ const u32 *bus_info_data, int max_rom);
-/* The following function destroys a Configuration ROM tree and release all
- * memory taken by the tree. */
+/* Destroy a Configuration ROM tree and release all memory taken by the tree. */
extern void csr1212_destroy_csr(struct csr1212_csr *csr);
@@ -555,50 +272,20 @@ extern void csr1212_destroy_csr(struct csr1212_csr *csr);
* a Configuration ROM tree. Code that creates new keyvals with these functions
* must release those keyvals with csr1212_release_keyval() when they are no
* longer needed. */
-extern struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value);
-extern struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data,
- size_t data_len);
-extern struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key,
- u_int32_t csr_offset);
-extern struct csr1212_keyval *csr1212_new_directory(u_int8_t key);
-extern struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec,
- u_int32_t key,
- u_int32_t value);
-extern struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec,
- u_int32_t key,
- const void *data,
- size_t data_len);
-extern struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype,
- u_int32_t specifier_id,
- const void *data,
- size_t data_len);
-extern struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
- u_int16_t cset,
- u_int16_t language,
- const void *data,
- size_t data_len);
+extern struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value);
+extern struct csr1212_keyval *csr1212_new_directory(u8 key);
extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
-extern struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
- u_int8_t palette_depth,
- u_int8_t color_space,
- u_int16_t language,
- u_int16_t hscan,
- u_int16_t vscan,
- u_int32_t *palette,
- u_int32_t *pixels);
-extern struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
- u_int64_t address);
-extern struct csr1212_keyval *csr1212_new_keyword_leaf(int strc,
- const char *strv[]);
-
-
-/* The following functions manage association between keyvals. Typically,
+
+
+/* The following function manages association between keyvals. Typically,
* Descriptor Leaves and Directories will be associated with another keyval and
* it is desirable for the Descriptor keyval to be place immediately after the
- * keyval that it is associated with.*/
-extern int csr1212_associate_keyval(struct csr1212_keyval *kv,
- struct csr1212_keyval *associate);
-extern void csr1212_disassociate_keyval(struct csr1212_keyval *kv);
+ * keyval that it is associated with.
+ * Take care with subsequent ROM modifications: There is no function to remove
+ * previously specified associations.
+ */
+extern void csr1212_associate_keyval(struct csr1212_keyval *kv,
+ struct csr1212_keyval *associate);
/* The following functions manage the association of a keyval and directories.
@@ -609,23 +296,15 @@ extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
struct csr1212_keyval *kv);
-/* The following functions create a Configuration ROM image from the tree of
- * keyvals provided. csr1212_generate_csr_image() creates a complete image in
- * the list of caches available via csr->cache_head. The other functions are
- * provided should there be a need to create a flat image without restrictions
- * placed by IEEE 1212. */
-extern struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
- struct csr1212_keyval *start_kv,
- int start_pos);
-extern size_t csr1212_generate_layout_order(struct csr1212_keyval *kv);
-extern void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache);
+/* Creates a complete Configuration ROM image in the list of caches available
+ * via csr->cache_head. */
extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
/* This is a convience function for reading a block of data out of one of the
* caches in the csr->cache_head list. */
-extern int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer,
- u_int32_t len);
+extern int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer,
+ u32 len);
/* The following functions are in place for parsing Configuration ROM images.
@@ -635,15 +314,11 @@ extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
struct csr1212_csr_rom_cache *cache);
extern int csr1212_parse_csr(struct csr1212_csr *csr);
-/* These are internal functions referenced by inline functions below. */
-extern int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
-extern void _csr1212_destroy_keyval(struct csr1212_keyval *kv);
-
/* This function allocates a new cache which may be used for either parsing or
* generating sub-sets of Configuration ROM images. */
-static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t offset,
- size_t size)
+static inline struct csr1212_csr_rom_cache *
+csr1212_rom_cache_malloc(u32 offset, size_t size)
{
struct csr1212_csr_rom_cache *cache;
@@ -667,16 +342,8 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o
/* This function ensures that a keyval contains data when referencing a keyval
* created by parsing a Configuration ROM. */
-static inline struct csr1212_keyval *csr1212_get_keyval(struct csr1212_csr *csr,
- struct csr1212_keyval *kv)
-{
- if (!kv)
- return NULL;
- if (!kv->valid)
- if (_csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
- return NULL;
- return kv;
-}
+extern struct csr1212_keyval *
+csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
/* This function increments the reference count for a keyval should there be a
@@ -691,37 +358,29 @@ static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
* keyval when there are no more users of the keyval. This should be called by
* any code that calls csr1212_keep_keyval() or any of the keyval creation
* routines csr1212_new_*(). */
-static inline void csr1212_release_keyval(struct csr1212_keyval *kv)
-{
- if (kv->refcnt > 1)
- kv->refcnt--;
- else
- _csr1212_destroy_keyval(kv);
-}
+extern void csr1212_release_keyval(struct csr1212_keyval *kv);
/*
* This macro allows for looping over the keyval entries in a directory and it
* ensures that keyvals from remote ConfigROMs are parsed properly.
*
- * _csr is a struct csr1212_csr * that points to CSR associated with dir.
- * _kv is a struct csr1212_keyval * that'll point to the current keyval (loop index).
- * _dir is a struct csr1212_keyval * that points to the directory to be looped.
- * _pos is a struct csr1212_dentry * that is used internally for indexing.
+ * struct csr1212_csr *_csr points to the CSR associated with dir.
+ * struct csr1212_keyval *_kv points to the current keyval (loop index).
+ * struct csr1212_keyval *_dir points to the directory to be looped.
+ * struct csr1212_dentry *_pos is used internally for indexing.
*
* kv will be NULL upon exit of the loop.
*/
-#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
- for (csr1212_get_keyval((_csr), (_dir)), \
- _pos = (_dir)->value.directory.dentries_head, \
- _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL; \
- (_kv) && (_pos); \
- (_kv->associate == NULL) ? \
- ((_pos = _pos->next), \
- (_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : \
- NULL)) : \
+#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
+ for (csr1212_get_keyval((_csr), (_dir)), \
+ _pos = (_dir)->value.directory.dentries_head, \
+ _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL;\
+ (_kv) && (_pos); \
+ (_kv->associate == NULL) ? \
+ ((_pos = _pos->next), (_kv = (_pos) ? \
+ csr1212_get_keyval((_csr), _pos->kv) : \
+ NULL)) : \
(_kv = csr1212_get_keyval((_csr), _kv->associate)))
-
-
#endif /* __CSR1212_H__ */
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index c68f328e1a2..45d60558192 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -62,6 +62,9 @@ void dma_prog_region_free(struct dma_prog_region *prog)
/* dma_region */
+/**
+ * dma_region_init - clear out all fields but do not allocate anything
+ */
void dma_region_init(struct dma_region *dma)
{
dma->kvirt = NULL;
@@ -71,6 +74,9 @@ void dma_region_init(struct dma_region *dma)
dma->sglist = NULL;
}
+/**
+ * dma_region_alloc - allocate the buffer and map it to the IOMMU
+ */
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
struct pci_dev *dev, int direction)
{
@@ -128,6 +134,9 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
return -ENOMEM;
}
+/**
+ * dma_region_free - unmap and free the buffer
+ */
void dma_region_free(struct dma_region *dma)
{
if (dma->n_dma_pages) {
@@ -167,6 +176,12 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
return i;
}
+/**
+ * dma_region_offset_to_bus - get bus address of an offset within a DMA region
+ *
+ * Returns the DMA bus address of the byte with the given @offset relative to
+ * the beginning of the @dma.
+ */
dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
unsigned long offset)
{
@@ -177,6 +192,9 @@ dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
return sg_dma_address(sg) + rem;
}
+/**
+ * dma_region_sync_for_cpu - sync the CPU's view of the buffer
+ */
void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
unsigned long len)
{
@@ -193,6 +211,9 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
dma->direction);
}
+/**
+ * dma_region_sync_for_device - sync the IO bus' view of the buffer
+ */
void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
unsigned long len)
{
@@ -244,6 +265,9 @@ static struct vm_operations_struct dma_region_vm_ops = {
.nopage = dma_region_pagefault,
};
+/**
+ * dma_region_mmap - map the buffer into a user space process
+ */
int dma_region_mmap(struct dma_region *dma, struct file *file,
struct vm_area_struct *vma)
{
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
index a1682aba71c..2727bcd2419 100644
--- a/drivers/ieee1394/dma.h
+++ b/drivers/ieee1394/dma.h
@@ -66,35 +66,23 @@ struct dma_region {
int direction;
};
-/* clear out all fields but do not allocate anything */
void dma_region_init(struct dma_region *dma);
-
-/* allocate the buffer and map it to the IOMMU */
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
struct pci_dev *dev, int direction);
-
-/* unmap and free the buffer */
void dma_region_free(struct dma_region *dma);
-
-/* sync the CPU's view of the buffer */
void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
unsigned long len);
-
-/* sync the IO bus' view of the buffer */
void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
unsigned long len);
-
-/* map the buffer into a user space process */
int dma_region_mmap(struct dma_region *dma, struct file *file,
struct vm_area_struct *vma);
+dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
+ unsigned long offset);
-/* macro to index into a DMA region (or dma_prog_region) */
+/**
+ * dma_region_i - macro to index into a DMA region (or dma_prog_region)
+ */
#define dma_region_i(_dma, _type, _index) \
( ((_type*) ((_dma)->kvirt)) + (_index) )
-/* return the DMA bus address of the byte with the given offset
- * relative to the beginning of the dma_region */
-dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
- unsigned long offset);
-
#endif /* IEEE1394_DMA_H */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index a364003ba47..2296d43a241 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1,5 +1,5 @@
/*
- * eth1394.c -- Ethernet driver for Linux IEEE-1394 Subsystem
+ * eth1394.c -- IPv4 driver for Linux IEEE-1394 Subsystem
*
* Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
* 2000 Bonin Franck <boninf@free.fr>
@@ -22,10 +22,9 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-/* This driver intends to support RFC 2734, which describes a method for
- * transporting IPv4 datagrams over IEEE-1394 serial busses. This driver
- * will ultimately support that method, but currently falls short in
- * several areas.
+/*
+ * This driver intends to support RFC 2734, which describes a method for
+ * transporting IPv4 datagrams over IEEE-1394 serial busses.
*
* TODO:
* RFC 2734 related:
@@ -40,7 +39,6 @@
* - Consider garbage collecting old partial datagrams after X amount of time
*/
-
#include <linux/module.h>
#include <linux/kernel.h>
@@ -52,7 +50,6 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
@@ -84,10 +81,6 @@
#define ETH1394_PRINT(level, dev_name, fmt, args...) \
printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
-#define DEBUG(fmt, args...) \
- printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
-#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
-
struct fragment_info {
struct list_head list;
int offset;
@@ -105,9 +98,9 @@ struct partial_datagram {
};
struct pdg_list {
- struct list_head list; /* partial datagram list per node */
- unsigned int sz; /* partial datagram list size per node */
- spinlock_t lock; /* partial datagram lock */
+ struct list_head list; /* partial datagram list per node */
+ unsigned int sz; /* partial datagram list size per node */
+ spinlock_t lock; /* partial datagram lock */
};
struct eth1394_host_info {
@@ -121,16 +114,14 @@ struct eth1394_node_ref {
};
struct eth1394_node_info {
- u16 maxpayload; /* Max payload */
- u8 sspd; /* Max speed */
- u64 fifo; /* FIFO address */
- struct pdg_list pdg; /* partial RX datagram lists */
- int dgl; /* Outgoing datagram label */
+ u16 maxpayload; /* max payload */
+ u8 sspd; /* max speed */
+ u64 fifo; /* FIFO address */
+ struct pdg_list pdg; /* partial RX datagram lists */
+ int dgl; /* outgoing datagram label */
};
-/* Our ieee1394 highlevel driver */
-#define ETH1394_DRIVER_NAME "eth1394"
-static const char driver_name[] = ETH1394_DRIVER_NAME;
+static const char driver_name[] = "eth1394";
static struct kmem_cache *packet_task_cache;
@@ -138,18 +129,12 @@ static struct hpsb_highlevel eth1394_highlevel;
/* Use common.lf to determine header len */
static const int hdr_type_len[] = {
- sizeof (struct eth1394_uf_hdr),
- sizeof (struct eth1394_ff_hdr),
- sizeof (struct eth1394_sf_hdr),
- sizeof (struct eth1394_sf_hdr)
+ sizeof(struct eth1394_uf_hdr),
+ sizeof(struct eth1394_ff_hdr),
+ sizeof(struct eth1394_sf_hdr),
+ sizeof(struct eth1394_sf_hdr)
};
-/* Change this to IEEE1394_SPEED_S100 to make testing easier */
-#define ETH1394_SPEED_DEF IEEE1394_SPEED_MAX
-
-/* For now, this needs to be 1500, so that XP works with us */
-#define ETH1394_DATA_LEN ETH_DATA_LEN
-
static const u16 eth1394_speedto_maxpayload[] = {
/* S100, S200, S400, S800, S1600, S3200 */
512, 1024, 2048, 4096, 4096, 4096
@@ -159,7 +144,8 @@ MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
MODULE_LICENSE("GPL");
-/* The max_partial_datagrams parameter is the maximum number of fragmented
+/*
+ * The max_partial_datagrams parameter is the maximum number of fragmented
* datagrams per node that eth1394 will keep in memory. Providing an upper
* bound allows us to limit the amount of memory that partial datagrams
* consume in the event that some partial datagrams are never completed.
@@ -179,10 +165,7 @@ static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr);
static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh);
static void ether1394_header_cache_update(struct hh_cache *hh,
struct net_device *dev,
- unsigned char * haddr);
-static int ether1394_mac_addr(struct net_device *dev, void *p);
-
-static void purge_partial_datagram(struct list_head *old);
+ unsigned char *haddr);
static int ether1394_tx(struct sk_buff *skb, struct net_device *dev);
static void ether1394_iso(struct hpsb_iso *iso);
@@ -190,9 +173,9 @@ static struct ethtool_ops ethtool_ops;
static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
quadlet_t *data, u64 addr, size_t len, u16 flags);
-static void ether1394_add_host (struct hpsb_host *host);
-static void ether1394_remove_host (struct hpsb_host *host);
-static void ether1394_host_reset (struct hpsb_host *host);
+static void ether1394_add_host(struct hpsb_host *host);
+static void ether1394_remove_host(struct hpsb_host *host);
+static void ether1394_host_reset(struct hpsb_host *host);
/* Function for incoming 1394 packets */
static struct hpsb_address_ops addr_ops = {
@@ -207,89 +190,107 @@ static struct hpsb_highlevel eth1394_highlevel = {
.host_reset = ether1394_host_reset,
};
+static int ether1394_recv_init(struct eth1394_priv *priv)
+{
+ unsigned int iso_buf_size;
+
+ /* FIXME: rawiso limits us to PAGE_SIZE */
+ iso_buf_size = min((unsigned int)PAGE_SIZE,
+ 2 * (1U << (priv->host->csr.max_rec + 1)));
+
+ priv->iso = hpsb_iso_recv_init(priv->host,
+ ETHER1394_GASP_BUFFERS * iso_buf_size,
+ ETHER1394_GASP_BUFFERS,
+ priv->broadcast_channel,
+ HPSB_ISO_DMA_PACKET_PER_BUFFER,
+ 1, ether1394_iso);
+ if (priv->iso == NULL) {
+ ETH1394_PRINT_G(KERN_ERR, "Failed to allocate IR context\n");
+ priv->bc_state = ETHER1394_BC_ERROR;
+ return -EAGAIN;
+ }
+
+ if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
+ priv->bc_state = ETHER1394_BC_STOPPED;
+ else
+ priv->bc_state = ETHER1394_BC_RUNNING;
+ return 0;
+}
/* This is called after an "ifup" */
-static int ether1394_open (struct net_device *dev)
+static int ether1394_open(struct net_device *dev)
{
struct eth1394_priv *priv = netdev_priv(dev);
- int ret = 0;
+ int ret;
- /* Something bad happened, don't even try */
if (priv->bc_state == ETHER1394_BC_ERROR) {
- /* we'll try again */
- priv->iso = hpsb_iso_recv_init(priv->host,
- ETHER1394_ISO_BUF_SIZE,
- ETHER1394_GASP_BUFFERS,
- priv->broadcast_channel,
- HPSB_ISO_DMA_PACKET_PER_BUFFER,
- 1, ether1394_iso);
- if (priv->iso == NULL) {
- ETH1394_PRINT(KERN_ERR, dev->name,
- "Could not allocate isochronous receive "
- "context for the broadcast channel\n");
- priv->bc_state = ETHER1394_BC_ERROR;
- ret = -EAGAIN;
- } else {
- if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
- priv->bc_state = ETHER1394_BC_STOPPED;
- else
- priv->bc_state = ETHER1394_BC_RUNNING;
- }
+ ret = ether1394_recv_init(priv);
+ if (ret)
+ return ret;
}
-
- if (ret)
- return ret;
-
- netif_start_queue (dev);
+ netif_start_queue(dev);
return 0;
}
/* This is called after an "ifdown" */
-static int ether1394_stop (struct net_device *dev)
+static int ether1394_stop(struct net_device *dev)
{
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
return 0;
}
/* Return statistics to the caller */
-static struct net_device_stats *ether1394_stats (struct net_device *dev)
+static struct net_device_stats *ether1394_stats(struct net_device *dev)
{
return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
}
-/* What to do if we timeout. I think a host reset is probably in order, so
- * that's what we do. Should we increment the stat counters too? */
-static void ether1394_tx_timeout (struct net_device *dev)
+/* FIXME: What to do if we timeout? I think a host reset is probably in order,
+ * so that's what we do. Should we increment the stat counters too? */
+static void ether1394_tx_timeout(struct net_device *dev)
{
- ETH1394_PRINT (KERN_ERR, dev->name, "Timeout, resetting host %s\n",
- ((struct eth1394_priv *)netdev_priv(dev))->host->driver->name);
+ struct hpsb_host *host =
+ ((struct eth1394_priv *)netdev_priv(dev))->host;
- highlevel_host_reset (((struct eth1394_priv *)netdev_priv(dev))->host);
+ ETH1394_PRINT(KERN_ERR, dev->name, "Timeout, resetting host\n");
+ ether1394_host_reset(host);
+}
- netif_wake_queue (dev);
+static inline int ether1394_max_mtu(struct hpsb_host* host)
+{
+ return (1 << (host->csr.max_rec + 1))
+ - sizeof(union eth1394_hdr) - ETHER1394_GASP_OVERHEAD;
}
static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
{
- struct eth1394_priv *priv = netdev_priv(dev);
+ int max_mtu;
- if ((new_mtu < 68) ||
- (new_mtu > min(ETH1394_DATA_LEN,
- (int)((1 << (priv->host->csr.max_rec + 1)) -
- (sizeof(union eth1394_hdr) +
- ETHER1394_GASP_OVERHEAD)))))
+ if (new_mtu < 68)
return -EINVAL;
+
+ max_mtu = ether1394_max_mtu(
+ ((struct eth1394_priv *)netdev_priv(dev))->host);
+ if (new_mtu > max_mtu) {
+ ETH1394_PRINT(KERN_INFO, dev->name,
+ "Local node constrains MTU to %d\n", max_mtu);
+ return -ERANGE;
+ }
+
dev->mtu = new_mtu;
return 0;
}
static void purge_partial_datagram(struct list_head *old)
{
- struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+ struct partial_datagram *pd;
struct list_head *lh, *n;
+ struct fragment_info *fi;
+
+ pd = list_entry(old, struct partial_datagram, list);
list_for_each_safe(lh, n, &pd->frag_info) {
- struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+ fi = list_entry(lh, struct fragment_info, list);
list_del(lh);
kfree(fi);
}
@@ -330,35 +331,26 @@ static struct eth1394_node_ref *eth1394_find_node_nodeid(struct list_head *inl,
nodeid_t nodeid)
{
struct eth1394_node_ref *node;
- list_for_each_entry(node, inl, list) {
+
+ list_for_each_entry(node, inl, list)
if (node->ud->ne->nodeid == nodeid)
return node;
- }
return NULL;
}
-static int eth1394_probe(struct device *dev)
+static int eth1394_new_node(struct eth1394_host_info *hi,
+ struct unit_directory *ud)
{
- struct unit_directory *ud;
- struct eth1394_host_info *hi;
struct eth1394_priv *priv;
struct eth1394_node_ref *new_node;
struct eth1394_node_info *node_info;
- ud = container_of(dev, struct unit_directory, device);
-
- hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
- if (!hi)
- return -ENOENT;
-
- new_node = kmalloc(sizeof(*new_node),
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+ new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
return -ENOMEM;
- node_info = kmalloc(sizeof(*node_info),
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+ node_info = kmalloc(sizeof(*node_info), GFP_KERNEL);
if (!node_info) {
kfree(new_node);
return -ENOMEM;
@@ -374,10 +366,22 @@ static int eth1394_probe(struct device *dev)
priv = netdev_priv(hi->dev);
list_add_tail(&new_node->list, &priv->ip_node_list);
-
return 0;
}
+static int eth1394_probe(struct device *dev)
+{
+ struct unit_directory *ud;
+ struct eth1394_host_info *hi;
+
+ ud = container_of(dev, struct unit_directory, device);
+ hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
+ if (!hi)
+ return -ENOENT;
+
+ return eth1394_new_node(hi, ud);
+}
+
static int eth1394_remove(struct device *dev)
{
struct unit_directory *ud;
@@ -396,24 +400,23 @@ static int eth1394_remove(struct device *dev)
priv = netdev_priv(hi->dev);
old_node = eth1394_find_node(&priv->ip_node_list, ud);
+ if (!old_node)
+ return 0;
- if (old_node) {
- list_del(&old_node->list);
- kfree(old_node);
+ list_del(&old_node->list);
+ kfree(old_node);
- node_info = (struct eth1394_node_info*)ud->device.driver_data;
+ node_info = (struct eth1394_node_info*)ud->device.driver_data;
- spin_lock_irqsave(&node_info->pdg.lock, flags);
- /* The partial datagram list should be empty, but we'll just
- * make sure anyway... */
- list_for_each_safe(lh, n, &node_info->pdg.list) {
- purge_partial_datagram(lh);
- }
- spin_unlock_irqrestore(&node_info->pdg.lock, flags);
+ spin_lock_irqsave(&node_info->pdg.lock, flags);
+ /* The partial datagram list should be empty, but we'll just
+ * make sure anyway... */
+ list_for_each_safe(lh, n, &node_info->pdg.list)
+ purge_partial_datagram(lh);
+ spin_unlock_irqrestore(&node_info->pdg.lock, flags);
- kfree(node_info);
- ud->device.driver_data = NULL;
- }
+ kfree(node_info);
+ ud->device.driver_data = NULL;
return 0;
}
@@ -422,44 +425,19 @@ static int eth1394_update(struct unit_directory *ud)
struct eth1394_host_info *hi;
struct eth1394_priv *priv;
struct eth1394_node_ref *node;
- struct eth1394_node_info *node_info;
hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
if (!hi)
return -ENOENT;
priv = netdev_priv(hi->dev);
-
node = eth1394_find_node(&priv->ip_node_list, ud);
+ if (node)
+ return 0;
- if (!node) {
- node = kmalloc(sizeof(*node),
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- node_info = kmalloc(sizeof(*node_info),
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if (!node_info) {
- kfree(node);
- return -ENOMEM;
- }
-
- spin_lock_init(&node_info->pdg.lock);
- INIT_LIST_HEAD(&node_info->pdg.list);
- node_info->pdg.sz = 0;
-
- ud->device.driver_data = node_info;
- node->ud = ud;
-
- priv = netdev_priv(hi->dev);
- list_add_tail(&node->list, &priv->ip_node_list);
- }
-
- return 0;
+ return eth1394_new_node(hi, ud);
}
-
static struct ieee1394_device_id eth1394_id_table[] = {
{
.match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
@@ -473,7 +451,7 @@ static struct ieee1394_device_id eth1394_id_table[] = {
MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
static struct hpsb_protocol_driver eth1394_proto_driver = {
- .name = ETH1394_DRIVER_NAME,
+ .name = driver_name,
.id_table = eth1394_id_table,
.update = eth1394_update,
.driver = {
@@ -482,47 +460,50 @@ static struct hpsb_protocol_driver eth1394_proto_driver = {
},
};
-
-static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
+static void ether1394_reset_priv(struct net_device *dev, int set_mtu)
{
unsigned long flags;
int i;
struct eth1394_priv *priv = netdev_priv(dev);
struct hpsb_host *host = priv->host;
- u64 guid = get_unaligned((u64*)&(host->csr.rom->bus_info_data[3]));
- u16 maxpayload = 1 << (host->csr.max_rec + 1);
+ u64 guid = get_unaligned((u64 *)&(host->csr.rom->bus_info_data[3]));
int max_speed = IEEE1394_SPEED_MAX;
- spin_lock_irqsave (&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- memset(priv->ud_list, 0, sizeof(struct node_entry*) * ALL_NODES);
+ memset(priv->ud_list, 0, sizeof(priv->ud_list));
priv->bc_maxpayload = 512;
/* Determine speed limit */
- for (i = 0; i < host->node_count; i++)
+ /* FIXME: This is broken for nodes with link speed < PHY speed,
+ * and it is suboptimal for S200B...S800B hardware.
+ * The result of nodemgr's speed probe should be used somehow. */
+ for (i = 0; i < host->node_count; i++) {
+ /* take care of S100B...S400B PHY ports */
+ if (host->speed[i] == SELFID_SPEED_UNKNOWN) {
+ max_speed = IEEE1394_SPEED_100;
+ break;
+ }
if (max_speed > host->speed[i])
max_speed = host->speed[i];
+ }
priv->bc_sspd = max_speed;
- /* We'll use our maxpayload as the default mtu */
if (set_mtu) {
- dev->mtu = min(ETH1394_DATA_LEN,
- (int)(maxpayload -
- (sizeof(union eth1394_hdr) +
- ETHER1394_GASP_OVERHEAD)));
+ /* Use the RFC 2734 default 1500 octets or the maximum payload
+ * as initial MTU */
+ dev->mtu = min(1500, ether1394_max_mtu(host));
/* Set our hardware address while we're at it */
memcpy(dev->dev_addr, &guid, sizeof(u64));
memset(dev->broadcast, 0xff, sizeof(u64));
}
- spin_unlock_irqrestore (&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
-/* This function is called right before register_netdev */
-static void ether1394_init_dev (struct net_device *dev)
+static void ether1394_init_dev(struct net_device *dev)
{
- /* Our functions */
dev->open = ether1394_open;
dev->stop = ether1394_stop;
dev->hard_start_xmit = ether1394_tx;
@@ -535,10 +516,9 @@ static void ether1394_init_dev (struct net_device *dev)
dev->hard_header_cache = ether1394_header_cache;
dev->header_cache_update= ether1394_header_cache_update;
dev->hard_header_parse = ether1394_header_parse;
- dev->set_mac_address = ether1394_mac_addr;
+
SET_ETHTOOL_OPS(dev, &ethtool_ops);
- /* Some constants */
dev->watchdog_timeo = ETHER1394_TIMEOUT;
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->features = NETIF_F_HIGHDMA;
@@ -546,7 +526,8 @@ static void ether1394_init_dev (struct net_device *dev)
dev->hard_header_len = ETH1394_HLEN;
dev->type = ARPHRD_IEEE1394;
- ether1394_reset_priv (dev, 1);
+ /* FIXME: This value was copied from ether_setup(). Is it too much? */
+ dev->tx_queue_len = 1000;
}
/*
@@ -554,34 +535,33 @@ static void ether1394_init_dev (struct net_device *dev)
* when the module is installed. This is where we add all of our ethernet
* devices. One for each host.
*/
-static void ether1394_add_host (struct hpsb_host *host)
+static void ether1394_add_host(struct hpsb_host *host)
{
struct eth1394_host_info *hi = NULL;
struct net_device *dev = NULL;
struct eth1394_priv *priv;
u64 fifo_addr;
- if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
+ if (hpsb_config_rom_ip1394_add(host) != 0) {
+ ETH1394_PRINT_G(KERN_ERR, "Can't add IP-over-1394 ROM entry\n");
return;
+ }
fifo_addr = hpsb_allocate_and_register_addrspace(
&eth1394_highlevel, host, &addr_ops,
ETHER1394_REGION_ADDR_LEN, ETHER1394_REGION_ADDR_LEN,
CSR1212_INVALID_ADDR_SPACE, CSR1212_INVALID_ADDR_SPACE);
- if (fifo_addr == CSR1212_INVALID_ADDR_SPACE)
- goto out;
-
- /* We should really have our own alloc_hpsbdev() function in
- * net_init.c instead of calling the one for ethernet then hijacking
- * it for ourselves. That way we'd be a real networking device. */
- dev = alloc_etherdev(sizeof (struct eth1394_priv));
+ if (fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
+ ETH1394_PRINT_G(KERN_ERR, "Cannot register CSR space\n");
+ hpsb_config_rom_ip1394_remove(host);
+ return;
+ }
+ dev = alloc_netdev(sizeof(*priv), "eth%d", ether1394_init_dev);
if (dev == NULL) {
- ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate "
- "etherdevice for IEEE 1394 device %s-%d\n",
- host->driver->name, host->id);
+ ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
goto out;
- }
+ }
SET_MODULE_OWNER(dev);
#if 0
@@ -590,31 +570,26 @@ static void ether1394_add_host (struct hpsb_host *host)
#endif
priv = netdev_priv(dev);
-
INIT_LIST_HEAD(&priv->ip_node_list);
-
spin_lock_init(&priv->lock);
priv->host = host;
priv->local_fifo = fifo_addr;
hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
-
if (hi == NULL) {
- ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create "
- "hostinfo for IEEE 1394 device %s-%d\n",
- host->driver->name, host->id);
+ ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
goto out;
- }
+ }
- ether1394_init_dev(dev);
+ ether1394_reset_priv(dev, 1);
- if (register_netdev (dev)) {
- ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
+ if (register_netdev(dev)) {
+ ETH1394_PRINT_G(KERN_ERR, "Cannot register the driver\n");
goto out;
}
- ETH1394_PRINT (KERN_INFO, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet (fw-host%d)\n",
- host->id);
+ ETH1394_PRINT(KERN_INFO, dev->name, "IPv4 over IEEE 1394 (fw-host%d)\n",
+ host->id);
hi->host = host;
hi->dev = dev;
@@ -623,61 +598,37 @@ static void ether1394_add_host (struct hpsb_host *host)
* be checked when the eth device is opened. */
priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
- priv->iso = hpsb_iso_recv_init(host,
- ETHER1394_ISO_BUF_SIZE,
- ETHER1394_GASP_BUFFERS,
- priv->broadcast_channel,
- HPSB_ISO_DMA_PACKET_PER_BUFFER,
- 1, ether1394_iso);
- if (priv->iso == NULL) {
- ETH1394_PRINT(KERN_ERR, dev->name,
- "Could not allocate isochronous receive context "
- "for the broadcast channel\n");
- priv->bc_state = ETHER1394_BC_ERROR;
- } else {
- if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
- priv->bc_state = ETHER1394_BC_STOPPED;
- else
- priv->bc_state = ETHER1394_BC_RUNNING;
- }
-
+ ether1394_recv_init(priv);
return;
-
out:
- if (dev != NULL)
+ if (dev)
free_netdev(dev);
if (hi)
hpsb_destroy_hostinfo(&eth1394_highlevel, host);
-
- return;
+ hpsb_unregister_addrspace(&eth1394_highlevel, host, fifo_addr);
+ hpsb_config_rom_ip1394_remove(host);
}
/* Remove a card from our list */
-static void ether1394_remove_host (struct hpsb_host *host)
+static void ether1394_remove_host(struct hpsb_host *host)
{
struct eth1394_host_info *hi;
+ struct eth1394_priv *priv;
hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
- if (hi != NULL) {
- struct eth1394_priv *priv = netdev_priv(hi->dev);
-
- hpsb_unregister_addrspace(&eth1394_highlevel, host,
- priv->local_fifo);
-
- if (priv->iso != NULL)
- hpsb_iso_shutdown(priv->iso);
-
- if (hi->dev) {
- unregister_netdev (hi->dev);
- free_netdev(hi->dev);
- }
- }
-
- return;
+ if (!hi)
+ return;
+ priv = netdev_priv(hi->dev);
+ hpsb_unregister_addrspace(&eth1394_highlevel, host, priv->local_fifo);
+ hpsb_config_rom_ip1394_remove(host);
+ if (priv->iso)
+ hpsb_iso_shutdown(priv->iso);
+ unregister_netdev(hi->dev);
+ free_netdev(hi->dev);
}
-/* A reset has just arisen */
-static void ether1394_host_reset (struct hpsb_host *host)
+/* A bus reset happened */
+static void ether1394_host_reset(struct hpsb_host *host)
{
struct eth1394_host_info *hi;
struct eth1394_priv *priv;
@@ -690,24 +641,23 @@ static void ether1394_host_reset (struct hpsb_host *host)
hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
/* This can happen for hosts that we don't use */
- if (hi == NULL)
+ if (!hi)
return;
dev = hi->dev;
- priv = (struct eth1394_priv *)netdev_priv(dev);
+ priv = netdev_priv(dev);
- /* Reset our private host data, but not our mtu */
- netif_stop_queue (dev);
- ether1394_reset_priv (dev, 0);
+ /* Reset our private host data, but not our MTU */
+ netif_stop_queue(dev);
+ ether1394_reset_priv(dev, 0);
list_for_each_entry(node, &priv->ip_node_list, list) {
- node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
+ node_info = node->ud->device.driver_data;
spin_lock_irqsave(&node_info->pdg.lock, flags);
- list_for_each_safe(lh, n, &node_info->pdg.list) {
+ list_for_each_safe(lh, n, &node_info->pdg.list)
purge_partial_datagram(lh);
- }
INIT_LIST_HEAD(&(node_info->pdg.list));
node_info->pdg.sz = 0;
@@ -715,7 +665,7 @@ static void ether1394_host_reset (struct hpsb_host *host)
spin_unlock_irqrestore(&node_info->pdg.lock, flags);
}
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
/******************************************
@@ -723,7 +673,6 @@ static void ether1394_host_reset (struct hpsb_host *host)
******************************************/
/* These functions have been adapted from net/ethernet/eth.c */
-
/* Create a fake MAC header for an arbitrary protocol layer.
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp). */
@@ -731,25 +680,24 @@ static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len)
{
- struct eth1394hdr *eth = (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
+ struct eth1394hdr *eth =
+ (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
eth->h_proto = htons(type);
- if (dev->flags & (IFF_LOOPBACK|IFF_NOARP)) {
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
memset(eth->h_dest, 0, dev->addr_len);
- return(dev->hard_header_len);
+ return dev->hard_header_len;
}
if (daddr) {
- memcpy(eth->h_dest,daddr,dev->addr_len);
+ memcpy(eth->h_dest, daddr, dev->addr_len);
return dev->hard_header_len;
}
return -dev->hard_header_len;
-
}
-
/* Rebuild the faked MAC header. This is called after an ARP
* (or in future other address resolution) has completed on this
* sk_buff. We now let ARP fill in the other fields.
@@ -760,38 +708,30 @@ static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
static int ether1394_rebuild_header(struct sk_buff *skb)
{
struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
- struct net_device *dev = skb->dev;
- switch (eth->h_proto) {
-
-#ifdef CONFIG_INET
- case __constant_htons(ETH_P_IP):
- return arp_find((unsigned char*)&eth->h_dest, skb);
-#endif
- default:
- ETH1394_PRINT(KERN_DEBUG, dev->name,
- "unable to resolve type %04x addresses.\n",
- ntohs(eth->h_proto));
- break;
- }
+ if (eth->h_proto == htons(ETH_P_IP))
+ return arp_find((unsigned char *)&eth->h_dest, skb);
+ ETH1394_PRINT(KERN_DEBUG, skb->dev->name,
+ "unable to resolve type %04x addresses\n",
+ ntohs(eth->h_proto));
return 0;
}
static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr)
{
struct net_device *dev = skb->dev;
+
memcpy(haddr, dev->dev_addr, ETH1394_ALEN);
return ETH1394_ALEN;
}
-
static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh)
{
unsigned short type = hh->hh_type;
- struct eth1394hdr *eth = (struct eth1394hdr*)(((u8*)hh->hh_data) +
- (16 - ETH1394_HLEN));
struct net_device *dev = neigh->dev;
+ struct eth1394hdr *eth =
+ (struct eth1394hdr *)((u8 *)hh->hh_data + 16 - ETH1394_HLEN);
if (type == htons(ETH_P_802_3))
return -1;
@@ -808,38 +748,25 @@ static void ether1394_header_cache_update(struct hh_cache *hh,
struct net_device *dev,
unsigned char * haddr)
{
- memcpy(((u8*)hh->hh_data) + (16 - ETH1394_HLEN), haddr, dev->addr_len);
+ memcpy((u8 *)hh->hh_data + 16 - ETH1394_HLEN, haddr, dev->addr_len);
}
-static int ether1394_mac_addr(struct net_device *dev, void *p)
-{
- if (netif_running(dev))
- return -EBUSY;
-
- /* Not going to allow setting the MAC address, we really need to use
- * the real one supplied by the hardware */
- return -EINVAL;
- }
-
-
-
/******************************************
* Datagram reception code
******************************************/
/* Copied from net/ethernet/eth.c */
-static inline u16 ether1394_type_trans(struct sk_buff *skb,
- struct net_device *dev)
+static u16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct eth1394hdr *eth;
unsigned char *rawp;
skb_reset_mac_header(skb);
- skb_pull (skb, ETH1394_HLEN);
+ skb_pull(skb, ETH1394_HLEN);
eth = eth1394_hdr(skb);
if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0)
+ if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len) == 0)
skb->pkt_type = PACKET_BROADCAST;
#if 0
else
@@ -848,47 +775,45 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb,
} else {
if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
- }
+ }
- if (ntohs (eth->h_proto) >= 1536)
+ if (ntohs(eth->h_proto) >= 1536)
return eth->h_proto;
rawp = skb->data;
- if (*(unsigned short *)rawp == 0xFFFF)
- return htons (ETH_P_802_3);
+ if (*(unsigned short *)rawp == 0xFFFF)
+ return htons(ETH_P_802_3);
- return htons (ETH_P_802_2);
+ return htons(ETH_P_802_2);
}
/* Parse an encapsulated IP1394 header into an ethernet frame packet.
* We also perform ARP translation here, if need be. */
-static inline u16 ether1394_parse_encap(struct sk_buff *skb,
- struct net_device *dev,
- nodeid_t srcid, nodeid_t destid,
- u16 ether_type)
+static u16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
+ nodeid_t srcid, nodeid_t destid,
+ u16 ether_type)
{
struct eth1394_priv *priv = netdev_priv(dev);
u64 dest_hw;
unsigned short ret = 0;
- /* Setup our hw addresses. We use these to build the
- * ethernet header. */
+ /* Setup our hw addresses. We use these to build the ethernet header. */
if (destid == (LOCAL_BUS | ALL_NODES))
dest_hw = ~0ULL; /* broadcast */
else
- dest_hw = cpu_to_be64((((u64)priv->host->csr.guid_hi) << 32) |
+ dest_hw = cpu_to_be64((u64)priv->host->csr.guid_hi << 32 |
priv->host->csr.guid_lo);
/* If this is an ARP packet, convert it. First, we want to make
* use of some of the fields, since they tell us a little bit
* about the sending machine. */
if (ether_type == htons(ETH_P_ARP)) {
- struct eth1394_arp *arp1394 = (struct eth1394_arp*)skb->data;
+ struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr = (unsigned char *)(arp + 1);
u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
- ntohl(arp1394->fifo_lo);
+ ntohl(arp1394->fifo_lo);
u8 max_rec = min(priv->host->csr.max_rec,
(u8)(arp1394->max_rec));
int sspd = arp1394->sspd;
@@ -902,16 +827,17 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
if (sspd > 5 || sspd < 0)
sspd = 0;
- maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1)));
+ maxpayload = min(eth1394_speedto_maxpayload[sspd],
+ (u16)(1 << (max_rec + 1)));
guid = get_unaligned(&arp1394->s_uniq_id);
node = eth1394_find_node_guid(&priv->ip_node_list,
be64_to_cpu(guid));
- if (!node) {
+ if (!node)
return 0;
- }
- node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
+ node_info =
+ (struct eth1394_node_info *)node->ud->device.driver_data;
/* Update our speed/payload/fifo_offset table */
node_info->maxpayload = maxpayload;
@@ -930,7 +856,7 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
arp->ar_hln = 8;
arp_ptr += arp->ar_hln; /* skip over sender unique id */
- *(u32*)arp_ptr = arp1394->sip; /* move sender IP addr */
+ *(u32 *)arp_ptr = arp1394->sip; /* move sender IP addr */
arp_ptr += arp->ar_pln; /* skip over sender IP addr */
if (arp->ar_op == htons(ARPOP_REQUEST))
@@ -947,65 +873,65 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
return ret;
}
-static inline int fragment_overlap(struct list_head *frag_list, int offset, int len)
+static int fragment_overlap(struct list_head *frag_list, int offset, int len)
{
struct fragment_info *fi;
+ int end = offset + len;
- list_for_each_entry(fi, frag_list, list) {
- if ( ! ((offset > (fi->offset + fi->len - 1)) ||
- ((offset + len - 1) < fi->offset)))
+ list_for_each_entry(fi, frag_list, list)
+ if (offset < fi->offset + fi->len && end > fi->offset)
return 1;
- }
+
return 0;
}
-static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
+static struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
{
struct partial_datagram *pd;
- list_for_each_entry(pd, pdgl, list) {
+ list_for_each_entry(pd, pdgl, list)
if (pd->dgl == dgl)
return &pd->list;
- }
+
return NULL;
}
/* Assumes that new fragment does not overlap any existing fragments */
-static inline int new_fragment(struct list_head *frag_info, int offset, int len)
+static int new_fragment(struct list_head *frag_info, int offset, int len)
{
struct list_head *lh;
struct fragment_info *fi, *fi2, *new;
list_for_each(lh, frag_info) {
fi = list_entry(lh, struct fragment_info, list);
- if ((fi->offset + fi->len) == offset) {
+ if (fi->offset + fi->len == offset) {
/* The new fragment can be tacked on to the end */
fi->len += len;
/* Did the new fragment plug a hole? */
fi2 = list_entry(lh->next, struct fragment_info, list);
- if ((fi->offset + fi->len) == fi2->offset) {
+ if (fi->offset + fi->len == fi2->offset) {
/* glue fragments together */
fi->len += fi2->len;
list_del(lh->next);
kfree(fi2);
}
return 0;
- } else if ((offset + len) == fi->offset) {
+ } else if (offset + len == fi->offset) {
/* The new fragment can be tacked on to the beginning */
fi->offset = offset;
fi->len += len;
/* Did the new fragment plug a hole? */
fi2 = list_entry(lh->prev, struct fragment_info, list);
- if ((fi2->offset + fi2->len) == fi->offset) {
+ if (fi2->offset + fi2->len == fi->offset) {
/* glue fragments together */
fi2->len += fi->len;
list_del(lh);
kfree(fi);
}
return 0;
- } else if (offset > (fi->offset + fi->len)) {
+ } else if (offset > fi->offset + fi->len) {
break;
- } else if ((offset + len) < fi->offset) {
+ } else if (offset + len < fi->offset) {
lh = lh->prev;
break;
}
@@ -1019,14 +945,12 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len)
new->len = len;
list_add(&new->list, lh);
-
return 0;
}
-static inline int new_partial_datagram(struct net_device *dev,
- struct list_head *pdgl, int dgl,
- int dg_size, char *frag_buf,
- int frag_off, int frag_len)
+static int new_partial_datagram(struct net_device *dev, struct list_head *pdgl,
+ int dgl, int dg_size, char *frag_buf,
+ int frag_off, int frag_len)
{
struct partial_datagram *new;
@@ -1059,33 +983,33 @@ static inline int new_partial_datagram(struct net_device *dev,
memcpy(new->pbuf + frag_off, frag_buf, frag_len);
list_add(&new->list, pdgl);
-
return 0;
}
-static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
- char *frag_buf, int frag_off, int frag_len)
+static int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
+ char *frag_buf, int frag_off, int frag_len)
{
- struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+ struct partial_datagram *pd =
+ list_entry(lh, struct partial_datagram, list);
- if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) {
+ if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0)
return -ENOMEM;
- }
memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
/* Move list entry to beginnig of list so that oldest partial
* datagrams percolate to the end of the list */
list_move(lh, pdgl);
-
return 0;
}
-static inline int is_datagram_complete(struct list_head *lh, int dg_size)
+static int is_datagram_complete(struct list_head *lh, int dg_size)
{
- struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
- struct fragment_info *fi = list_entry(pd->frag_info.next,
- struct fragment_info, list);
+ struct partial_datagram *pd;
+ struct fragment_info *fi;
+
+ pd = list_entry(lh, struct partial_datagram, list);
+ fi = list_entry(pd->frag_info.next, struct fragment_info, list);
return (fi->len == dg_size);
}
@@ -1108,7 +1032,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
if (!ud) {
struct eth1394_node_ref *node;
node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid);
- if (!node) {
+ if (unlikely(!node)) {
HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
"lookup failure: " NODE_BUS_FMT,
NODE_BUS_ARGS(priv->host, srcid));
@@ -1120,7 +1044,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
}
- node_info = (struct eth1394_node_info*)ud->device.driver_data;
+ node_info = (struct eth1394_node_info *)ud->device.driver_data;
/* First, did we receive a fragmented or unfragmented datagram? */
hdr->words.word1 = ntohs(hdr->words.word1);
@@ -1133,13 +1057,14 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
* high level network layer. */
skb = dev_alloc_skb(len + dev->hard_header_len + 15);
- if (!skb) {
- HPSB_PRINT (KERN_ERR, "ether1394 rx: low on mem\n");
+ if (unlikely(!skb)) {
+ ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
priv->stats.rx_dropped++;
return -1;
}
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
- memcpy(skb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len);
+ memcpy(skb_put(skb, len - hdr_len), buf + hdr_len,
+ len - hdr_len);
ether_type = hdr->uf.ether_type;
} else {
/* A datagram fragment has been received, now the fun begins. */
@@ -1224,9 +1149,8 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
pd = list_entry(lh, struct partial_datagram, list);
- if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+ if (hdr->common.lf == ETH1394_HDR_LF_FF)
pd->ether_type = ether_type;
- }
if (is_datagram_complete(lh, dg_size)) {
ether_type = pd->ether_type;
@@ -1253,8 +1177,8 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid,
ether_type);
-
spin_lock_irqsave(&priv->lock, flags);
+
if (!skb->protocol) {
priv->stats.rx_errors++;
priv->stats.rx_dropped++;
@@ -1288,9 +1212,9 @@ static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
struct eth1394_host_info *hi;
hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
- if (hi == NULL) {
- ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
- host->driver->name);
+ if (unlikely(!hi)) {
+ ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
+ host->id);
return RCODE_ADDRESS_ERROR;
}
@@ -1314,9 +1238,9 @@ static void ether1394_iso(struct hpsb_iso *iso)
int nready;
hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
- if (hi == NULL) {
- ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
- iso->host->driver->name);
+ if (unlikely(!hi)) {
+ ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
+ iso->host->id);
return;
}
@@ -1326,20 +1250,20 @@ static void ether1394_iso(struct hpsb_iso *iso)
for (i = 0; i < nready; i++) {
struct hpsb_iso_packet_info *info =
&iso->infos[(iso->first_packet + i) % iso->buf_packets];
- data = (quadlet_t*) (iso->data_buf.kvirt + info->offset);
+ data = (quadlet_t *)(iso->data_buf.kvirt + info->offset);
/* skip over GASP header */
buf = (char *)data + 8;
len = info->len - 8;
- specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) |
- ((be32_to_cpu(data[1]) & 0xff000000) >> 24));
+ specifier_id = (be32_to_cpu(data[0]) & 0xffff) << 8 |
+ (be32_to_cpu(data[1]) & 0xff000000) >> 24;
source_id = be32_to_cpu(data[0]) >> 16;
priv = netdev_priv(dev);
- if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) ||
- specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
+ if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
+ || specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
/* This packet is not for us */
continue;
}
@@ -1367,35 +1291,31 @@ static void ether1394_iso(struct hpsb_iso *iso)
* speed, and unicast FIFO address information between the sender_unique_id
* and the IP addresses.
*/
-static inline void ether1394_arp_to_1394arp(struct sk_buff *skb,
- struct net_device *dev)
+static void ether1394_arp_to_1394arp(struct sk_buff *skb,
+ struct net_device *dev)
{
struct eth1394_priv *priv = netdev_priv(dev);
-
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr = (unsigned char *)(arp + 1);
struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
- /* Believe it or not, all that need to happen is sender IP get moved
- * and set hw_addr_len, max_rec, sspd, fifo_hi and fifo_lo. */
arp1394->hw_addr_len = 16;
arp1394->sip = *(u32*)(arp_ptr + ETH1394_ALEN);
arp1394->max_rec = priv->host->csr.max_rec;
arp1394->sspd = priv->host->csr.lnk_spd;
- arp1394->fifo_hi = htons (priv->local_fifo >> 32);
- arp1394->fifo_lo = htonl (priv->local_fifo & ~0x0);
-
- return;
+ arp1394->fifo_hi = htons(priv->local_fifo >> 32);
+ arp1394->fifo_lo = htonl(priv->local_fifo & ~0x0);
}
/* We need to encapsulate the standard header with our own. We use the
* ethernet header's proto for our own. */
-static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
- __be16 proto,
- union eth1394_hdr *hdr,
- u16 dg_size, u16 dgl)
+static unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
+ __be16 proto,
+ union eth1394_hdr *hdr,
+ u16 dg_size, u16 dgl)
{
- unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
+ unsigned int adj_max_payload =
+ max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
/* Does it all fit in one packet? */
if (dg_size <= adj_max_payload) {
@@ -1408,19 +1328,19 @@ static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
hdr->ff.dgl = dgl;
adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
}
- return((dg_size + (adj_max_payload - 1)) / adj_max_payload);
+ return (dg_size + adj_max_payload - 1) / adj_max_payload;
}
-static inline unsigned int ether1394_encapsulate(struct sk_buff *skb,
- unsigned int max_payload,
- union eth1394_hdr *hdr)
+static unsigned int ether1394_encapsulate(struct sk_buff *skb,
+ unsigned int max_payload,
+ union eth1394_hdr *hdr)
{
union eth1394_hdr *bufhdr;
int ftype = hdr->common.lf;
int hdrsz = hdr_type_len[ftype];
unsigned int adj_max_payload = max_payload - hdrsz;
- switch(ftype) {
+ switch (ftype) {
case ETH1394_HDR_LF_UF:
bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
bufhdr->words.word1 = htons(hdr->words.word1);
@@ -1449,11 +1369,10 @@ static inline unsigned int ether1394_encapsulate(struct sk_buff *skb,
bufhdr->words.word3 = htons(hdr->words.word3);
bufhdr->words.word4 = 0;
}
-
return min(max_payload, skb->len);
}
-static inline struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
+static struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
{
struct hpsb_packet *p;
@@ -1466,61 +1385,57 @@ static inline struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host
return p;
}
-static inline int ether1394_prep_write_packet(struct hpsb_packet *p,
- struct hpsb_host *host,
- nodeid_t node, u64 addr,
- void * data, int tx_len)
+static int ether1394_prep_write_packet(struct hpsb_packet *p,
+ struct hpsb_host *host, nodeid_t node,
+ u64 addr, void *data, int tx_len)
{
p->node_id = node;
p->data = NULL;
p->tcode = TCODE_WRITEB;
- p->header[1] = (host->node_id << 16) | (addr >> 32);
+ p->header[1] = host->node_id << 16 | addr >> 32;
p->header[2] = addr & 0xffffffff;
p->header_size = 16;
p->expect_response = 1;
if (hpsb_get_tlabel(p)) {
- ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending "
- "to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
+ ETH1394_PRINT_G(KERN_ERR, "Out of tlabels\n");
return -1;
}
- p->header[0] = (p->node_id << 16) | (p->tlabel << 10)
- | (1 << 8) | (TCODE_WRITEB << 4);
+ p->header[0] =
+ p->node_id << 16 | p->tlabel << 10 | 1 << 8 | TCODE_WRITEB << 4;
p->header[3] = tx_len << 16;
p->data_size = (tx_len + 3) & ~3;
- p->data = (quadlet_t*)data;
+ p->data = data;
return 0;
}
-static inline void ether1394_prep_gasp_packet(struct hpsb_packet *p,
- struct eth1394_priv *priv,
- struct sk_buff *skb, int length)
+static void ether1394_prep_gasp_packet(struct hpsb_packet *p,
+ struct eth1394_priv *priv,
+ struct sk_buff *skb, int length)
{
p->header_size = 4;
p->tcode = TCODE_STREAM_DATA;
- p->header[0] = (length << 16) | (3 << 14)
- | ((priv->broadcast_channel) << 8)
- | (TCODE_STREAM_DATA << 4);
+ p->header[0] = length << 16 | 3 << 14 | priv->broadcast_channel << 8 |
+ TCODE_STREAM_DATA << 4;
p->data_size = length;
- p->data = ((quadlet_t*)skb->data) - 2;
- p->data[0] = cpu_to_be32((priv->host->node_id << 16) |
+ p->data = (quadlet_t *)skb->data - 2;
+ p->data[0] = cpu_to_be32(priv->host->node_id << 16 |
ETHER1394_GASP_SPECIFIER_ID_HI);
- p->data[1] = cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
+ p->data[1] = cpu_to_be32(ETHER1394_GASP_SPECIFIER_ID_LO << 24 |
ETHER1394_GASP_VERSION);
- /* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
- * prevents hpsb_send_packet() from setting the speed to an arbitrary
- * value based on packet->node_id if packet->node_id is not set. */
- p->node_id = ALL_NODES;
p->speed_code = priv->bc_sspd;
+
+ /* prevent hpsb_send_packet() from overriding our speed code */
+ p->node_id = LOCAL_BUS | ALL_NODES;
}
-static inline void ether1394_free_packet(struct hpsb_packet *packet)
+static void ether1394_free_packet(struct hpsb_packet *packet)
{
if (packet->tcode != TCODE_STREAM_DATA)
hpsb_free_tlabel(packet);
@@ -1539,7 +1454,7 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
return -1;
if (ptask->tx_type == ETH1394_GASP) {
- int length = tx_len + (2 * sizeof(quadlet_t));
+ int length = tx_len + 2 * sizeof(quadlet_t);
ether1394_prep_gasp_packet(packet, priv, ptask->skb, length);
} else if (ether1394_prep_write_packet(packet, priv->host,
@@ -1562,13 +1477,11 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
return 0;
}
-
/* Task function to be run when a datagram transmission is completed */
-static inline void ether1394_dg_complete(struct packet_task *ptask, int fail)
+static void ether1394_dg_complete(struct packet_task *ptask, int fail)
{
struct sk_buff *skb = ptask->skb;
- struct net_device *dev = skb->dev;
- struct eth1394_priv *priv = netdev_priv(dev);
+ struct eth1394_priv *priv = netdev_priv(skb->dev);
unsigned long flags;
/* Statistics */
@@ -1586,7 +1499,6 @@ static inline void ether1394_dg_complete(struct packet_task *ptask, int fail)
kmem_cache_free(packet_task_cache, ptask);
}
-
/* Callback for when a packet has been sent and the status of that packet is
* known */
static void ether1394_complete_cb(void *__ptask)
@@ -1614,19 +1526,15 @@ static void ether1394_complete_cb(void *__ptask)
}
}
-
-
/* Transmit a packet (called by kernel) */
-static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
+static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
{
- gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
struct eth1394hdr *eth;
struct eth1394_priv *priv = netdev_priv(dev);
__be16 proto;
unsigned long flags;
nodeid_t dest_node;
eth1394_tx_type tx_type;
- int ret = 0;
unsigned int tx_len;
unsigned int max_payload;
u16 dg_size;
@@ -1635,29 +1543,24 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
struct eth1394_node_ref *node;
struct eth1394_node_info *node_info = NULL;
- ptask = kmem_cache_alloc(packet_task_cache, kmflags);
- if (ptask == NULL) {
- ret = -ENOMEM;
+ ptask = kmem_cache_alloc(packet_task_cache, GFP_ATOMIC);
+ if (ptask == NULL)
goto fail;
- }
/* XXX Ignore this for now. Noticed that when MacOSX is the IRM,
* it does not set our validity bit. We need to compensate for
* that somewhere else, but not in eth1394. */
#if 0
- if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000) {
- ret = -EAGAIN;
+ if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000)
goto fail;
- }
#endif
- if ((skb = skb_share_check (skb, kmflags)) == NULL) {
- ret = -ENOMEM;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
goto fail;
- }
/* Get rid of the fake eth1394 header, but save a pointer */
- eth = (struct eth1394hdr*)skb->data;
+ eth = (struct eth1394hdr *)skb->data;
skb_pull(skb, ETH1394_HLEN);
proto = eth->h_proto;
@@ -1672,7 +1575,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
tx_type = ETH1394_GASP;
dest_node = LOCAL_BUS | ALL_NODES;
max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
- BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD));
+ BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
dgl = priv->bc_dgl;
if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
priv->bc_dgl++;
@@ -1681,19 +1584,17 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
node = eth1394_find_node_guid(&priv->ip_node_list,
be64_to_cpu(guid));
- if (!node) {
- ret = -EAGAIN;
+ if (!node)
goto fail;
- }
- node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
- if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE) {
- ret = -EAGAIN;
+
+ node_info =
+ (struct eth1394_node_info *)node->ud->device.driver_data;
+ if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
goto fail;
- }
dest_node = node->ud->ne->nodeid;
max_payload = node_info->maxpayload;
- BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD));
+ BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
dgl = node_info->dgl;
if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
@@ -1703,7 +1604,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
/* If this is an ARP packet, convert it */
if (proto == htons(ETH_P_ARP))
- ether1394_arp_to_1394arp (skb, dev);
+ ether1394_arp_to_1394arp(skb, dev);
ptask->hdr.words.word1 = 0;
ptask->hdr.words.word2 = 0;
@@ -1726,9 +1627,8 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
ptask->tx_type = tx_type;
ptask->max_payload = max_payload;
- ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload, proto,
- &ptask->hdr, dg_size,
- dgl);
+ ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload,
+ proto, &ptask->hdr, dg_size, dgl);
/* Add the encapsulation header to the fragment */
tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
@@ -1737,7 +1637,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
goto fail;
netif_wake_queue(dev);
- return 0;
+ return NETDEV_TX_OK;
fail:
if (ptask)
kmem_cache_free(packet_task_cache, ptask);
@@ -1745,40 +1645,56 @@ fail:
if (skb != NULL)
dev_kfree_skb(skb);
- spin_lock_irqsave (&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
- spin_unlock_irqrestore (&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
- return 0; /* returning non-zero causes serious problems */
+ /*
+ * FIXME: According to a patch from 2003-02-26, "returning non-zero
+ * causes serious problems" here, allegedly. Before that patch,
+ * -ERRNO was returned which is not appropriate under Linux 2.6.
+ * Perhaps more needs to be done? Stop the queue in serious
+ * conditions and restart it elsewhere?
+ */
+ /* return NETDEV_TX_BUSY; */
+ return NETDEV_TX_OK;
}
-static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void ether1394_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- strcpy (info->driver, driver_name);
- /* FIXME XXX provide sane businfo */
- strcpy (info->bus_info, "ieee1394");
+ strcpy(info->driver, driver_name);
+ strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */
}
static struct ethtool_ops ethtool_ops = {
.get_drvinfo = ether1394_get_drvinfo
};
-static int __init ether1394_init_module (void)
+static int __init ether1394_init_module(void)
{
- packet_task_cache = kmem_cache_create("packet_task", sizeof(struct packet_task),
+ int err;
+
+ packet_task_cache = kmem_cache_create("packet_task",
+ sizeof(struct packet_task),
0, 0, NULL, NULL);
+ if (!packet_task_cache)
+ return -ENOMEM;
- /* Register ourselves as a highlevel driver */
hpsb_register_highlevel(&eth1394_highlevel);
-
- return hpsb_register_protocol(&eth1394_proto_driver);
+ err = hpsb_register_protocol(&eth1394_proto_driver);
+ if (err) {
+ hpsb_unregister_highlevel(&eth1394_highlevel);
+ kmem_cache_destroy(packet_task_cache);
+ }
+ return err;
}
-static void __exit ether1394_exit_module (void)
+static void __exit ether1394_exit_module(void)
{
hpsb_unregister_protocol(&eth1394_proto_driver);
hpsb_unregister_highlevel(&eth1394_highlevel);
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index 1e835653514..a3439ee7cb4 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -25,8 +25,11 @@
#define __ETH1394_H
#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <asm/byteorder.h>
#include "ieee1394.h"
+#include "ieee1394_types.h"
/* Register for incoming packets. This is 4096 bytes, which supports up to
* S3200 (per Table 16-3 of IEEE 1394b-2002). */
@@ -34,22 +37,15 @@
/* GASP identifier numbers for IPv4 over IEEE 1394 */
#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
-#define ETHER1394_GASP_SPECIFIER_ID_HI ((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
-#define ETHER1394_GASP_SPECIFIER_ID_LO (ETHER1394_GASP_SPECIFIER_ID & 0xff)
+#define ETHER1394_GASP_SPECIFIER_ID_HI ((0x00005E >> 8) & 0xffff)
+#define ETHER1394_GASP_SPECIFIER_ID_LO (0x00005E & 0xff)
#define ETHER1394_GASP_VERSION 1
-#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* GASP header overhead */
+#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* for GASP header */
-#define ETHER1394_GASP_BUFFERS 16
+#define ETHER1394_GASP_BUFFERS 16
-/* rawiso buffer size - due to a limitation in rawiso, we must limit each
- * GASP buffer to be less than PAGE_SIZE. */
-#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \
- min((unsigned int)PAGE_SIZE, \
- 2 * (1U << (priv->host->csr.max_rec + 1)))
-
-/* Node set == 64 */
-#define NODE_SET (ALL_NODES + 1)
+#define NODE_SET (ALL_NODES + 1) /* Node set == 64 */
enum eth1394_bc_states { ETHER1394_BC_ERROR,
ETHER1394_BC_RUNNING,
@@ -85,19 +81,14 @@ struct eth1394hdr {
unsigned short h_proto; /* packet type ID field */
} __attribute__((packed));
-#ifdef __KERNEL__
-#include <linux/skbuff.h>
-
static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
{
return (struct eth1394hdr *)skb_mac_header(skb);
}
-#endif
typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
/* IP1394 headers */
-#include <asm/byteorder.h>
/* Unfragmented */
#if defined __BIG_ENDIAN_BITFIELD
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 694da82d820..83a49331275 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -70,8 +70,12 @@ static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
return NULL;
}
-/* Returns a per host/driver data structure that was previously stored by
- * hpsb_create_hostinfo. */
+/**
+ * hpsb_get_hostinfo - retrieve a hostinfo pointer bound to this driver/host
+ *
+ * Returns a per @host and @hl driver data structure that was previously stored
+ * by hpsb_create_hostinfo.
+ */
void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
{
struct hl_host_info *hi = hl_get_hostinfo(hl, host);
@@ -79,7 +83,13 @@ void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
return hi ? hi->data : NULL;
}
-/* If size is zero, then the return here is only valid for error checking */
+/**
+ * hpsb_create_hostinfo - allocate a hostinfo pointer bound to this driver/host
+ *
+ * Allocate a hostinfo pointer backed by memory with @data_size and bind it to
+ * to this @hl driver and @host. If @data_size is zero, then the return here is
+ * only valid for error checking.
+ */
void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
size_t data_size)
{
@@ -113,6 +123,11 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
return data;
}
+/**
+ * hpsb_set_hostinfo - set the hostinfo pointer to something useful
+ *
+ * Usually follows a call to hpsb_create_hostinfo, where the size is 0.
+ */
int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
void *data)
{
@@ -132,6 +147,11 @@ int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
return -EINVAL;
}
+/**
+ * hpsb_destroy_hostinfo - free and remove a hostinfo pointer
+ *
+ * Free and remove the hostinfo pointer bound to this @hl driver and @host.
+ */
void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
{
struct hl_host_info *hi;
@@ -147,6 +167,12 @@ void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
return;
}
+/**
+ * hpsb_set_hostinfo_key - set an alternate lookup key for an hostinfo
+ *
+ * Sets an alternate lookup key for the hostinfo bound to this @hl driver and
+ * @host.
+ */
void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned long key)
{
@@ -158,6 +184,9 @@ void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
return;
}
+/**
+ * hpsb_get_hostinfo_bykey - retrieve a hostinfo pointer by its alternate key
+ */
void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
{
struct hl_host_info *hi;
@@ -189,6 +218,12 @@ static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
return 0;
}
+/**
+ * hpsb_register_highlevel - register highlevel driver
+ *
+ * The name pointer in @hl has to stay valid at all times because the string is
+ * not copied.
+ */
void hpsb_register_highlevel(struct hpsb_highlevel *hl)
{
unsigned long flags;
@@ -258,6 +293,9 @@ static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
return 0;
}
+/**
+ * hpsb_unregister_highlevel - unregister highlevel driver
+ */
void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
{
unsigned long flags;
@@ -273,6 +311,19 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
}
+/**
+ * hpsb_allocate_and_register_addrspace - alloc' and reg' a host address space
+ *
+ * @start and @end are 48 bit pointers and have to be quadlet aligned.
+ * @end points to the first address behind the handled addresses. This
+ * function can be called multiple times for a single hpsb_highlevel @hl to
+ * implement sparse register sets. The requested region must not overlap any
+ * previously allocated region, otherwise registering will fail.
+ *
+ * It returns true for successful allocation. Address spaces can be
+ * unregistered with hpsb_unregister_addrspace. All remaining address spaces
+ * are automatically deallocated together with the hpsb_highlevel @hl.
+ */
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
struct hpsb_host *host,
struct hpsb_address_ops *ops,
@@ -348,6 +399,19 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
return retval;
}
+/**
+ * hpsb_register_addrspace - register a host address space
+ *
+ * @start and @end are 48 bit pointers and have to be quadlet aligned.
+ * @end points to the first address behind the handled addresses. This
+ * function can be called multiple times for a single hpsb_highlevel @hl to
+ * implement sparse register sets. The requested region must not overlap any
+ * previously allocated region, otherwise registering will fail.
+ *
+ * It returns true for successful allocation. Address spaces can be
+ * unregistered with hpsb_unregister_addrspace. All remaining address spaces
+ * are automatically deallocated together with the hpsb_highlevel @hl.
+ */
int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
struct hpsb_address_ops *ops, u64 start, u64 end)
{
@@ -419,6 +483,11 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return retval;
}
+/**
+ * hpsb_listen_channel - enable receving a certain isochronous channel
+ *
+ * Reception is handled through the @hl's iso_receive op.
+ */
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel)
{
@@ -431,6 +500,9 @@ int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
return 0;
}
+/**
+ * hpsb_unlisten_channel - disable receving a certain isochronous channel
+ */
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel)
{
@@ -528,6 +600,17 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
read_unlock_irqrestore(&hl_irqs_lock, flags);
}
+/*
+ * highlevel_read, highlevel_write, highlevel_lock, highlevel_lock64:
+ *
+ * These functions are called to handle transactions. They are called when a
+ * packet arrives. The flags argument contains the second word of the first
+ * header quadlet of the incoming packet (containing transaction label, retry
+ * code, transaction code and priority). These functions either return a
+ * response code or a negative number. In the first case a response will be
+ * generated. In the latter case, no response will be sent and the driver which
+ * handled the request will send the response itself.
+ */
int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
unsigned int length, u16 flags)
{
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
index 4b330117067..63474f7ee69 100644
--- a/drivers/ieee1394/highlevel.h
+++ b/drivers/ieee1394/highlevel.h
@@ -99,16 +99,6 @@ struct hpsb_address_ops {
void highlevel_add_host(struct hpsb_host *host);
void highlevel_remove_host(struct hpsb_host *host);
void highlevel_host_reset(struct hpsb_host *host);
-
-/*
- * These functions are called to handle transactions. They are called when a
- * packet arrives. The flags argument contains the second word of the first
- * header quadlet of the incoming packet (containing transaction label, retry
- * code, transaction code and priority). These functions either return a
- * response code or a negative number. In the first case a response will be
- * generated. In the latter case, no response will be sent and the driver which
- * handled the request will send the response itself.
- */
int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
unsigned int length, u16 flags);
int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
@@ -119,30 +109,13 @@ int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
-
void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length);
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
void *data, size_t length);
-/*
- * Register highlevel driver. The name pointer has to stay valid at all times
- * because the string is not copied.
- */
void hpsb_register_highlevel(struct hpsb_highlevel *hl);
void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
-/*
- * Register handlers for host address spaces. Start and end are 48 bit pointers
- * and have to be quadlet aligned. Argument "end" points to the first address
- * behind the handled addresses. This function can be called multiple times for
- * a single hpsb_highlevel to implement sparse register sets. The requested
- * region must not overlap any previously allocated region, otherwise
- * registering will fail.
- *
- * It returns true for successful allocation. Address spaces can be
- * unregistered with hpsb_unregister_addrspace. All remaining address spaces
- * are automatically deallocated together with the hpsb_highlevel.
- */
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
struct hpsb_host *host,
struct hpsb_address_ops *ops,
@@ -152,45 +125,19 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
struct hpsb_address_ops *ops, u64 start, u64 end);
int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
u64 start);
-
-/*
- * Enable or disable receving a certain isochronous channel through the
- * iso_receive op.
- */
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
- unsigned int channel);
+ unsigned int channel);
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
-/* Retrieve a hostinfo pointer bound to this driver/host */
void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
-
-/* Allocate a hostinfo pointer of data_size bound to this driver/host */
void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
size_t data_size);
-
-/* Free and remove the hostinfo pointer bound to this driver/host */
void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
-
-/* Set an alternate lookup key for the hostinfo bound to this driver/host */
void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned long key);
-
-/* Retrieve the alternate lookup key for the hostinfo bound to this
- * driver/host */
-unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl,
- struct hpsb_host *host);
-
-/* Retrieve a hostinfo pointer bound to this driver using its alternate key */
void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
-
-/* Set the hostinfo pointer to something useful. Usually follows a call to
- * hpsb_create_hostinfo, where the size is 0. */
int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
void *data);
-/* Retrieve hpsb_host using a highlevel handle and a key */
-struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl,
- unsigned long key);
-
#endif /* IEEE1394_HIGHLEVEL_H */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 32a13092193..6164a9a8339 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -94,14 +94,6 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
return 0;
}
-/*
- * The pending_packet_queue is special in that it's processed
- * from hardirq context too (such as hpsb_bus_reset()). Hence
- * split the lock class from the usual networking skb-head
- * lock class by using a separate key for it:
- */
-static struct lock_class_key pending_packet_queue_key;
-
static DEFINE_MUTEX(host_num_alloc);
/**
@@ -137,9 +129,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
h->hostdata = h + 1;
h->driver = drv;
- skb_queue_head_init(&h->pending_packet_queue);
- lockdep_set_class(&h->pending_packet_queue.lock,
- &pending_packet_queue_key);
+ INIT_LIST_HEAD(&h->pending_packets);
INIT_LIST_HEAD(&h->addr_space);
for (i = 2; i < 16; i++)
@@ -190,7 +180,7 @@ int hpsb_add_host(struct hpsb_host *host)
{
if (hpsb_default_host_entry(host))
return -ENOMEM;
- hpsb_add_extra_config_roms(host);
+
highlevel_add_host(host);
return 0;
}
@@ -212,12 +202,19 @@ void hpsb_remove_host(struct hpsb_host *host)
host->driver = &dummy_driver;
highlevel_remove_host(host);
- hpsb_remove_extra_config_roms(host);
class_device_unregister(&host->class_dev);
device_unregister(&host->device);
}
+/**
+ * hpsb_update_config_rom_image - updates configuration ROM image of a host
+ *
+ * Updates the configuration ROM image of a host. rom_version must be the
+ * current version, otherwise it will fail with return value -1. If this
+ * host does not support config-rom-update, it will return -%EINVAL.
+ * Return value 0 indicates success.
+ */
int hpsb_update_config_rom_image(struct hpsb_host *host)
{
unsigned long reset_delay;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index 4bf4fb7f67b..feb55d03229 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -3,7 +3,6 @@
#include <linux/device.h>
#include <linux/list.h>
-#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -25,8 +24,7 @@ struct hpsb_host {
atomic_t generation;
- struct sk_buff_head pending_packet_queue;
-
+ struct list_head pending_packets;
struct timer_list timeout;
unsigned long timeout_interval;
@@ -202,12 +200,6 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
int hpsb_add_host(struct hpsb_host *host);
void hpsb_resume_host(struct hpsb_host *host);
void hpsb_remove_host(struct hpsb_host *host);
-
-/* Updates the configuration rom image of a host. rom_version must be the
- * current version, otherwise it will fail with return value -1. If this
- * host does not support config-rom-update, it will return -EINVAL.
- * Return value 0 indicates success.
- */
int hpsb_update_config_rom_image(struct hpsb_host *host);
#endif /* _IEEE1394_HOSTS_H */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index d791d08c743..8f71b6a06aa 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,7 +30,6 @@
#include <linux/moduleparam.h>
#include <linux/bitops.h>
#include <linux/kdev_t.h>
-#include <linux/skbuff.h>
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <linux/preempt.h>
@@ -96,13 +95,15 @@ static void queue_packet_complete(struct hpsb_packet *packet);
/**
- * hpsb_set_packet_complete_task - set the task that runs when a packet
- * completes. You cannot call this more than once on a single packet
- * before it is sent.
- *
+ * hpsb_set_packet_complete_task - set task that runs when a packet completes
* @packet: the packet whose completion we want the task added to
* @routine: function to call
* @data: data (if any) to pass to the above function
+ *
+ * Set the task that runs when a packet completes. You cannot call this more
+ * than once on a single packet before it is sent.
+ *
+ * Typically, the complete @routine is responsible to call hpsb_free_packet().
*/
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
void (*routine)(void *), void *data)
@@ -115,12 +116,12 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
/**
* hpsb_alloc_packet - allocate new packet structure
- * @data_size: size of the data block to be allocated
+ * @data_size: size of the data block to be allocated, in bytes
*
* This function allocates, initializes and returns a new &struct hpsb_packet.
- * It can be used in interrupt context. A header block is always included, its
- * size is big enough to contain all possible 1394 headers. The data block is
- * only allocated when @data_size is not zero.
+ * It can be used in interrupt context. A header block is always included and
+ * initialized with zeros. Its size is big enough to contain all possible 1394
+ * headers. The data block is only allocated if @data_size is not zero.
*
* For packets for which responses will be received the @data_size has to be big
* enough to contain the response's data block since no further allocation
@@ -135,50 +136,49 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
*/
struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
{
- struct hpsb_packet *packet = NULL;
- struct sk_buff *skb;
+ struct hpsb_packet *packet;
data_size = ((data_size + 3) & ~3);
- skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
- if (skb == NULL)
+ packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC);
+ if (!packet)
return NULL;
- memset(skb->data, 0, data_size + sizeof(*packet));
-
- packet = (struct hpsb_packet *)skb->data;
- packet->skb = skb;
-
- packet->header = packet->embedded_header;
packet->state = hpsb_unused;
packet->generation = -1;
INIT_LIST_HEAD(&packet->driver_list);
+ INIT_LIST_HEAD(&packet->queue);
atomic_set(&packet->refcnt, 1);
if (data_size) {
- packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
- packet->data_size = data_size;
+ packet->data = packet->embedded_data;
+ packet->allocated_data_size = data_size;
}
-
return packet;
}
-
/**
* hpsb_free_packet - free packet and data associated with it
* @packet: packet to free (is NULL safe)
*
- * This function will free packet->data and finally the packet itself.
+ * Frees @packet->data only if it was allocated through hpsb_alloc_packet().
*/
void hpsb_free_packet(struct hpsb_packet *packet)
{
if (packet && atomic_dec_and_test(&packet->refcnt)) {
- BUG_ON(!list_empty(&packet->driver_list));
- kfree_skb(packet->skb);
+ BUG_ON(!list_empty(&packet->driver_list) ||
+ !list_empty(&packet->queue));
+ kfree(packet);
}
}
-
+/**
+ * hpsb_reset_bus - initiate bus reset on the given host
+ * @host: host controller whose bus to reset
+ * @type: one of enum reset_types
+ *
+ * Returns 1 if bus reset already in progress, 0 otherwise.
+ */
int hpsb_reset_bus(struct hpsb_host *host, int type)
{
if (!host->in_bus_reset) {
@@ -229,6 +229,14 @@ int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
return 0;
}
+/**
+ * hpsb_bus_reset - notify a bus reset to the core
+ *
+ * For host driver module usage. Safe to use in interrupt context, although
+ * quite complex; so you may want to run it in the bottom rather than top half.
+ *
+ * Returns 1 if bus reset already in progress, 0 otherwise.
+ */
int hpsb_bus_reset(struct hpsb_host *host)
{
if (host->in_bus_reset) {
@@ -405,6 +413,14 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
}
+/**
+ * hpsb_selfid_received - hand over received selfid packet to the core
+ *
+ * For host driver module usage. Safe to use in interrupt context.
+ *
+ * The host driver should have done a successful complement check (second
+ * quadlet is complement of first) beforehand.
+ */
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
{
if (host->in_bus_reset) {
@@ -416,6 +432,15 @@ void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
}
}
+/**
+ * hpsb_selfid_complete - notify completion of SelfID stage to the core
+ *
+ * For host driver module usage. Safe to use in interrupt context, although
+ * quite complex; so you may want to run it in the bottom rather than top half.
+ *
+ * Notify completion of SelfID stage to the core and report new physical ID
+ * and whether host is root now.
+ */
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
{
if (!host->in_bus_reset)
@@ -462,30 +487,41 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
highlevel_host_reset(host);
}
+static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED;
+/**
+ * hpsb_packet_sent - notify core of sending a packet
+ *
+ * For host driver module usage. Safe to call from within a transmit packet
+ * routine.
+ *
+ * Notify core of sending a packet. Ackcode is the ack code returned for async
+ * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
+ * for other cases (internal errors that don't justify a panic).
+ */
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
int ackcode)
{
unsigned long flags;
- spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
+ spin_lock_irqsave(&pending_packets_lock, flags);
packet->ack_code = ackcode;
if (packet->no_waiter || packet->state == hpsb_complete) {
/* if packet->no_waiter, must not have a tlabel allocated */
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
hpsb_free_packet(packet);
return;
}
atomic_dec(&packet->refcnt); /* drop HC's reference */
- /* here the packet must be on the host->pending_packet_queue */
+ /* here the packet must be on the host->pending_packets queue */
if (ackcode != ACK_PENDING || !packet->expect_response) {
packet->state = hpsb_complete;
- __skb_unlink(packet->skb, &host->pending_packet_queue);
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+ list_del_init(&packet->queue);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
queue_packet_complete(packet);
return;
}
@@ -493,7 +529,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
packet->state = hpsb_pending;
packet->sendtime = jiffies;
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
mod_timer(&host->timeout, jiffies + host->timeout_interval);
}
@@ -504,9 +540,10 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
* @rootid: root whose force_root bit should get set (-1 = don't set force_root)
* @gapcnt: gap count value to set (-1 = don't set gap count)
*
- * This function sends a PHY config packet on the bus through the specified host.
+ * This function sends a PHY config packet on the bus through the specified
+ * host.
*
- * Return value: 0 for success or error number otherwise.
+ * Return value: 0 for success or negative error number otherwise.
*/
int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
{
@@ -567,12 +604,16 @@ int hpsb_send_packet(struct hpsb_packet *packet)
WARN_ON(packet->no_waiter && packet->expect_response);
if (!packet->no_waiter || packet->expect_response) {
+ unsigned long flags;
+
atomic_inc(&packet->refcnt);
/* Set the initial "sendtime" to 10 seconds from now, to
prevent premature expiry. If a packet takes more than
10 seconds to hit the wire, we have bigger problems :) */
packet->sendtime = jiffies + 10 * HZ;
- skb_queue_tail(&host->pending_packet_queue, packet->skb);
+ spin_lock_irqsave(&pending_packets_lock, flags);
+ list_add_tail(&packet->queue, &host->pending_packets);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
}
if (packet->node_id == host->node_id) {
@@ -621,6 +662,12 @@ static void complete_packet(void *data)
complete((struct completion *) data);
}
+/**
+ * hpsb_send_packet_and_wait - enqueue packet, block until transaction completes
+ * @packet: packet to send
+ *
+ * Return value: 0 on success, negative errno on failure.
+ */
int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
{
struct completion done;
@@ -642,86 +689,97 @@ static void send_packet_nocare(struct hpsb_packet *packet)
}
}
+static size_t packet_size_to_data_size(size_t packet_size, size_t header_size,
+ size_t buffer_size, int tcode)
+{
+ size_t ret = packet_size <= header_size ? 0 : packet_size - header_size;
+
+ if (unlikely(ret > buffer_size))
+ ret = buffer_size;
+
+ if (unlikely(ret + header_size != packet_size))
+ HPSB_ERR("unexpected packet size %zd (tcode %d), bug?",
+ packet_size, tcode);
+ return ret;
+}
static void handle_packet_response(struct hpsb_host *host, int tcode,
quadlet_t *data, size_t size)
{
- struct hpsb_packet *packet = NULL;
- struct sk_buff *skb;
- int tcode_match = 0;
- int tlabel;
+ struct hpsb_packet *packet;
+ int tlabel = (data[0] >> 10) & 0x3f;
+ size_t header_size;
unsigned long flags;
- tlabel = (data[0] >> 10) & 0x3f;
-
- spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
+ spin_lock_irqsave(&pending_packets_lock, flags);
- skb_queue_walk(&host->pending_packet_queue, skb) {
- packet = (struct hpsb_packet *)skb->data;
- if ((packet->tlabel == tlabel)
- && (packet->node_id == (data[1] >> 16))){
- break;
- }
-
- packet = NULL;
- }
+ list_for_each_entry(packet, &host->pending_packets, queue)
+ if (packet->tlabel == tlabel &&
+ packet->node_id == (data[1] >> 16))
+ goto found;
- if (packet == NULL) {
- HPSB_DEBUG("unsolicited response packet received - no tlabel match");
- dump_packet("contents", data, 16, -1);
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
- return;
- }
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
+ HPSB_DEBUG("unsolicited response packet received - %s",
+ "no tlabel match");
+ dump_packet("contents", data, 16, -1);
+ return;
+found:
switch (packet->tcode) {
case TCODE_WRITEQ:
case TCODE_WRITEB:
- if (tcode != TCODE_WRITE_RESPONSE)
+ if (unlikely(tcode != TCODE_WRITE_RESPONSE))
break;
- tcode_match = 1;
- memcpy(packet->header, data, 12);
- break;
+ header_size = 12;
+ size = 0;
+ goto dequeue;
+
case TCODE_READQ:
- if (tcode != TCODE_READQ_RESPONSE)
+ if (unlikely(tcode != TCODE_READQ_RESPONSE))
break;
- tcode_match = 1;
- memcpy(packet->header, data, 16);
- break;
+ header_size = 16;
+ size = 0;
+ goto dequeue;
+
case TCODE_READB:
- if (tcode != TCODE_READB_RESPONSE)
+ if (unlikely(tcode != TCODE_READB_RESPONSE))
break;
- tcode_match = 1;
- BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
- memcpy(packet->header, data, 16);
- memcpy(packet->data, data + 4, size - 16);
- break;
+ header_size = 16;
+ size = packet_size_to_data_size(size, header_size,
+ packet->allocated_data_size,
+ tcode);
+ goto dequeue;
+
case TCODE_LOCK_REQUEST:
- if (tcode != TCODE_LOCK_RESPONSE)
+ if (unlikely(tcode != TCODE_LOCK_RESPONSE))
break;
- tcode_match = 1;
- size = min((size - 16), (size_t)8);
- BUG_ON(packet->skb->len - sizeof(*packet) < size);
- memcpy(packet->header, data, 16);
- memcpy(packet->data, data + 4, size);
- break;
+ header_size = 16;
+ size = packet_size_to_data_size(min(size, (size_t)(16 + 8)),
+ header_size,
+ packet->allocated_data_size,
+ tcode);
+ goto dequeue;
}
- if (!tcode_match) {
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
- HPSB_INFO("unsolicited response packet received - tcode mismatch");
- dump_packet("contents", data, 16, -1);
- return;
- }
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
+ HPSB_DEBUG("unsolicited response packet received - %s",
+ "tcode mismatch");
+ dump_packet("contents", data, 16, -1);
+ return;
- __skb_unlink(skb, &host->pending_packet_queue);
+dequeue:
+ list_del_init(&packet->queue);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
if (packet->state == hpsb_queued) {
packet->sendtime = jiffies;
packet->ack_code = ACK_PENDING;
}
-
packet->state = hpsb_complete;
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+
+ memcpy(packet->header, data, header_size);
+ if (size)
+ memcpy(packet->data, data + 4, size);
queue_packet_complete(packet);
}
@@ -735,6 +793,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
p = hpsb_alloc_packet(dsize);
if (unlikely(p == NULL)) {
/* FIXME - send data_error response */
+ HPSB_ERR("out of memory, cannot send response packet");
return NULL;
}
@@ -784,7 +843,6 @@ static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
{
PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
- packet->header[2] = 0;
packet->header_size = 12;
packet->data_size = 0;
}
@@ -801,12 +859,9 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc
packet->data_size = length;
}
-#define PREP_REPLY_PACKET(length) \
- packet = create_reply_packet(host, data, length); \
- if (packet == NULL) break
-
static void handle_incoming_packet(struct hpsb_host *host, int tcode,
- quadlet_t *data, size_t size, int write_acked)
+ quadlet_t *data, size_t size,
+ int write_acked)
{
struct hpsb_packet *packet;
int length, rcode, extcode;
@@ -816,74 +871,72 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
u16 flags = (u16) data[0];
u64 addr;
- /* big FIXME - no error checking is done for an out of bounds length */
+ /* FIXME?
+ * Out-of-bounds lengths are left for highlevel_read|write to cap. */
switch (tcode) {
case TCODE_WRITEQ:
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, dest, data+3,
+ rcode = highlevel_write(host, source, dest, data + 3,
addr, 4, flags);
-
- if (!write_acked
- && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
- && (rcode >= 0)) {
- /* not a broadcast write, reply */
- PREP_REPLY_PACKET(0);
- fill_async_write_resp(packet, rcode);
- send_packet_nocare(packet);
- }
- break;
+ goto handle_write_request;
case TCODE_WRITEB:
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, dest, data+4,
- addr, data[3]>>16, flags);
-
- if (!write_acked
- && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
- && (rcode >= 0)) {
- /* not a broadcast write, reply */
- PREP_REPLY_PACKET(0);
+ rcode = highlevel_write(host, source, dest, data + 4,
+ addr, data[3] >> 16, flags);
+handle_write_request:
+ if (rcode < 0 || write_acked ||
+ NODEID_TO_NODE(data[0] >> 16) == NODE_MASK)
+ return;
+ /* not a broadcast write, reply */
+ packet = create_reply_packet(host, data, 0);
+ if (packet) {
fill_async_write_resp(packet, rcode);
send_packet_nocare(packet);
}
- break;
+ return;
case TCODE_READQ:
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
+ if (rcode < 0)
+ return;
- if (rcode >= 0) {
- PREP_REPLY_PACKET(0);
+ packet = create_reply_packet(host, data, 0);
+ if (packet) {
fill_async_readquad_resp(packet, rcode, buffer);
send_packet_nocare(packet);
}
- break;
+ return;
case TCODE_READB:
length = data[3] >> 16;
- PREP_REPLY_PACKET(length);
+ packet = create_reply_packet(host, data, length);
+ if (!packet)
+ return;
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
rcode = highlevel_read(host, source, packet->data, addr,
length, flags);
-
- if (rcode >= 0) {
- fill_async_readblock_resp(packet, rcode, length);
- send_packet_nocare(packet);
- } else {
+ if (rcode < 0) {
hpsb_free_packet(packet);
+ return;
}
- break;
+ fill_async_readblock_resp(packet, rcode, length);
+ send_packet_nocare(packet);
+ return;
case TCODE_LOCK_REQUEST:
length = data[3] >> 16;
extcode = data[3] & 0xffff;
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- PREP_REPLY_PACKET(8);
+ packet = create_reply_packet(host, data, 8);
+ if (!packet)
+ return;
- if ((extcode == 0) || (extcode >= 7)) {
+ if (extcode == 0 || extcode >= 7) {
/* let switch default handle error */
length = 0;
}
@@ -891,12 +944,12 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
switch (length) {
case 4:
rcode = highlevel_lock(host, source, packet->data, addr,
- data[4], 0, extcode,flags);
+ data[4], 0, extcode, flags);
fill_async_lock_resp(packet, rcode, extcode, 4);
break;
case 8:
- if ((extcode != EXTCODE_FETCH_ADD)
- && (extcode != EXTCODE_LITTLE_ADD)) {
+ if (extcode != EXTCODE_FETCH_ADD &&
+ extcode != EXTCODE_LITTLE_ADD) {
rcode = highlevel_lock(host, source,
packet->data, addr,
data[5], data[4],
@@ -920,29 +973,38 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
break;
default:
rcode = RCODE_TYPE_ERROR;
- fill_async_lock_resp(packet, rcode,
- extcode, 0);
+ fill_async_lock_resp(packet, rcode, extcode, 0);
}
- if (rcode >= 0) {
- send_packet_nocare(packet);
- } else {
+ if (rcode < 0)
hpsb_free_packet(packet);
- }
- break;
+ else
+ send_packet_nocare(packet);
+ return;
}
-
}
-#undef PREP_REPLY_PACKET
-
+/**
+ * hpsb_packet_received - hand over received packet to the core
+ *
+ * For host driver module usage.
+ *
+ * The contents of data are expected to be the full packet but with the CRCs
+ * left out (data block follows header immediately), with the header (i.e. the
+ * first four quadlets) in machine byte order and the data block in big endian.
+ * *@data can be safely overwritten after this call.
+ *
+ * If the packet is a write request, @write_acked is to be set to true if it was
+ * ack_complete'd already, false otherwise. This argument is ignored for any
+ * other packet type.
+ */
void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
int write_acked)
{
int tcode;
- if (host->in_bus_reset) {
- HPSB_INFO("received packet during reset; ignoring");
+ if (unlikely(host->in_bus_reset)) {
+ HPSB_DEBUG("received packet during reset; ignoring");
return;
}
@@ -976,23 +1038,27 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
break;
default:
- HPSB_NOTICE("received packet with bogus transaction code %d",
- tcode);
+ HPSB_DEBUG("received packet with bogus transaction code %d",
+ tcode);
break;
}
}
-
static void abort_requests(struct hpsb_host *host)
{
- struct hpsb_packet *packet;
- struct sk_buff *skb;
+ struct hpsb_packet *packet, *p;
+ struct list_head tmp;
+ unsigned long flags;
host->driver->devctl(host, CANCEL_REQUESTS, 0);
- while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
- packet = (struct hpsb_packet *)skb->data;
+ INIT_LIST_HEAD(&tmp);
+ spin_lock_irqsave(&pending_packets_lock, flags);
+ list_splice_init(&host->pending_packets, &tmp);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
+ list_for_each_entry_safe(packet, p, &tmp, queue) {
+ list_del_init(&packet->queue);
packet->state = hpsb_complete;
packet->ack_code = ACKX_ABORTED;
queue_packet_complete(packet);
@@ -1002,87 +1068,90 @@ static void abort_requests(struct hpsb_host *host)
void abort_timedouts(unsigned long __opaque)
{
struct hpsb_host *host = (struct hpsb_host *)__opaque;
- unsigned long flags;
- struct hpsb_packet *packet;
- struct sk_buff *skb;
- unsigned long expire;
+ struct hpsb_packet *packet, *p;
+ struct list_head tmp;
+ unsigned long flags, expire, j;
spin_lock_irqsave(&host->csr.lock, flags);
expire = host->csr.expire;
spin_unlock_irqrestore(&host->csr.lock, flags);
- /* Hold the lock around this, since we aren't dequeuing all
- * packets, just ones we need. */
- spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
-
- while (!skb_queue_empty(&host->pending_packet_queue)) {
- skb = skb_peek(&host->pending_packet_queue);
-
- packet = (struct hpsb_packet *)skb->data;
+ j = jiffies;
+ INIT_LIST_HEAD(&tmp);
+ spin_lock_irqsave(&pending_packets_lock, flags);
- if (time_before(packet->sendtime + expire, jiffies)) {
- __skb_unlink(skb, &host->pending_packet_queue);
- packet->state = hpsb_complete;
- packet->ack_code = ACKX_TIMEOUT;
- queue_packet_complete(packet);
- } else {
+ list_for_each_entry_safe(packet, p, &host->pending_packets, queue) {
+ if (time_before(packet->sendtime + expire, j))
+ list_move_tail(&packet->queue, &tmp);
+ else
/* Since packets are added to the tail, the oldest
* ones are first, always. When we get to one that
* isn't timed out, the rest aren't either. */
break;
- }
}
+ if (!list_empty(&host->pending_packets))
+ mod_timer(&host->timeout, j + host->timeout_interval);
- if (!skb_queue_empty(&host->pending_packet_queue))
- mod_timer(&host->timeout, jiffies + host->timeout_interval);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
- spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+ list_for_each_entry_safe(packet, p, &tmp, queue) {
+ list_del_init(&packet->queue);
+ packet->state = hpsb_complete;
+ packet->ack_code = ACKX_TIMEOUT;
+ queue_packet_complete(packet);
+ }
}
-
-/* Kernel thread and vars, which handles packets that are completed. Only
- * packets that have a "complete" function are sent here. This way, the
- * completion is run out of kernel context, and doesn't block the rest of
- * the stack. */
static struct task_struct *khpsbpkt_thread;
-static struct sk_buff_head hpsbpkt_queue;
+static LIST_HEAD(hpsbpkt_queue);
static void queue_packet_complete(struct hpsb_packet *packet)
{
+ unsigned long flags;
+
if (packet->no_waiter) {
hpsb_free_packet(packet);
return;
}
if (packet->complete_routine != NULL) {
- skb_queue_tail(&hpsbpkt_queue, packet->skb);
+ spin_lock_irqsave(&pending_packets_lock, flags);
+ list_add_tail(&packet->queue, &hpsbpkt_queue);
+ spin_unlock_irqrestore(&pending_packets_lock, flags);
wake_up_process(khpsbpkt_thread);
}
return;
}
+/*
+ * Kernel thread which handles packets that are completed. This way the
+ * packet's "complete" function is asynchronously run in process context.
+ * Only packets which have a "complete" function may be sent here.
+ */
static int hpsbpkt_thread(void *__hi)
{
- struct sk_buff *skb;
- struct hpsb_packet *packet;
- void (*complete_routine)(void*);
- void *complete_data;
+ struct hpsb_packet *packet, *p;
+ struct list_head tmp;
+ int may_schedule;
current->flags |= PF_NOFREEZE;
while (!kthread_should_stop()) {
- while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
- packet = (struct hpsb_packet *)skb->data;
-
- complete_routine = packet->complete_routine;
- complete_data = packet->complete_data;
- packet->complete_routine = packet->complete_data = NULL;
+ INIT_LIST_HEAD(&tmp);
+ spin_lock_irq(&pending_packets_lock);
+ list_splice_init(&hpsbpkt_queue, &tmp);
+ spin_unlock_irq(&pending_packets_lock);
- complete_routine(complete_data);
+ list_for_each_entry_safe(packet, p, &tmp, queue) {
+ list_del_init(&packet->queue);
+ packet->complete_routine(packet->complete_data);
}
set_current_state(TASK_INTERRUPTIBLE);
- if (!skb_peek(&hpsbpkt_queue))
+ spin_lock_irq(&pending_packets_lock);
+ may_schedule = list_empty(&hpsbpkt_queue);
+ spin_unlock_irq(&pending_packets_lock);
+ if (may_schedule)
schedule();
__set_current_state(TASK_RUNNING);
}
@@ -1093,8 +1162,6 @@ static int __init ieee1394_init(void)
{
int i, ret;
- skb_queue_head_init(&hpsbpkt_queue);
-
/* non-fatal error */
if (hpsb_init_config_roms()) {
HPSB_ERR("Failed to initialize some config rom entries.\n");
@@ -1268,7 +1335,6 @@ EXPORT_SYMBOL(hpsb_destroy_hostinfo);
EXPORT_SYMBOL(hpsb_set_hostinfo_key);
EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
EXPORT_SYMBOL(hpsb_set_hostinfo);
-EXPORT_SYMBOL(highlevel_host_reset);
/** nodemgr.c **/
EXPORT_SYMBOL(hpsb_node_fill_packet);
@@ -1311,11 +1377,10 @@ EXPORT_SYMBOL(hpsb_iso_wake);
EXPORT_SYMBOL(hpsb_iso_recv_flush);
/** csr1212.c **/
-EXPORT_SYMBOL(csr1212_new_directory);
EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
-EXPORT_SYMBOL(csr1212_release_keyval);
-EXPORT_SYMBOL(csr1212_read);
+EXPORT_SYMBOL(csr1212_get_keyval);
+EXPORT_SYMBOL(csr1212_new_directory);
EXPORT_SYMBOL(csr1212_parse_keyval);
-EXPORT_SYMBOL(_csr1212_read_keyval);
-EXPORT_SYMBOL(_csr1212_destroy_keyval);
+EXPORT_SYMBOL(csr1212_read);
+EXPORT_SYMBOL(csr1212_release_keyval);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index bd29d8ef5bb..ad526523d0e 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -4,7 +4,6 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/list.h>
-#include <linux/skbuff.h>
#include <linux/types.h>
#include <asm/atomic.h>
@@ -13,7 +12,7 @@
struct hpsb_packet {
/* This struct is basically read-only for hosts with the exception of
- * the data buffer contents and xnext - see below. */
+ * the data buffer contents and driver_list. */
/* This can be used for host driver internal linking.
*
@@ -49,134 +48,65 @@ struct hpsb_packet {
/* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
unsigned speed_code:2;
- /*
- * *header and *data are guaranteed to be 32-bit DMAable and may be
- * overwritten to allow in-place byte swapping. Neither of these is
- * CRCed (the sizes also don't include CRC), but contain space for at
- * least one additional quadlet to allow in-place CRCing. The memory is
- * also guaranteed to be DMA mappable.
- */
- quadlet_t *header;
- quadlet_t *data;
- size_t header_size;
- size_t data_size;
-
struct hpsb_host *host;
unsigned int generation;
atomic_t refcnt;
+ struct list_head queue;
/* Function (and possible data to pass to it) to call when this
* packet is completed. */
void (*complete_routine)(void *);
void *complete_data;
- /* XXX This is just a hack at the moment */
- struct sk_buff *skb;
-
/* Store jiffies for implementing bus timeouts. */
unsigned long sendtime;
- quadlet_t embedded_header[5];
+ /* Sizes are in bytes. *data can be DMA-mapped. */
+ size_t allocated_data_size; /* as allocated */
+ size_t data_size; /* as filled in */
+ size_t header_size; /* as filled in, not counting the CRC */
+ quadlet_t *data;
+ quadlet_t header[5];
+ quadlet_t embedded_data[0]; /* keep as last member */
};
-/* Set a task for when a packet completes */
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
void (*routine)(void *), void *data);
-
static inline struct hpsb_packet *driver_packet(struct list_head *l)
{
return list_entry(l, struct hpsb_packet, driver_list);
}
-
void abort_timedouts(unsigned long __opaque);
-
struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
void hpsb_free_packet(struct hpsb_packet *packet);
-/*
- * Generation counter for the complete 1394 subsystem. Generation gets
- * incremented on every change in the subsystem (e.g. bus reset).
+/**
+ * get_hpsb_generation - generation counter for the complete 1394 subsystem
*
- * Use the functions, not the variable.
+ * Generation gets incremented on every change in the subsystem (notably on bus
+ * resets). Use the functions, not the variable.
*/
static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
{
return atomic_read(&host->generation);
}
-/*
- * Send a PHY configuration packet, return 0 on success, negative
- * errno on failure.
- */
int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
-
-/*
- * Queue packet for transmitting, return 0 on success, negative errno
- * on failure.
- */
int hpsb_send_packet(struct hpsb_packet *packet);
-
-/*
- * Queue packet for transmitting, and block until the transaction
- * completes. Return 0 on success, negative errno on failure.
- */
int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
-
-/* Initiate bus reset on the given host. Returns 1 if bus reset already in
- * progress, 0 otherwise. */
int hpsb_reset_bus(struct hpsb_host *host, int type);
-
int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
u64 *local_time);
-/*
- * The following functions are exported for host driver module usage. All of
- * them are safe to use in interrupt contexts, although some are quite
- * complicated so you may want to run them in bottom halves instead of calling
- * them directly.
- */
-
-/* Notify a bus reset to the core. Returns 1 if bus reset already in progress,
- * 0 otherwise. */
int hpsb_bus_reset(struct hpsb_host *host);
-
-/*
- * Hand over received selfid packet to the core. Complement check (second
- * quadlet is complement of first) is expected to be done and successful.
- */
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
-
-/*
- * Notify completion of SelfID stage to the core and report new physical ID
- * and whether host is root now.
- */
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
-
-/*
- * Notify core of sending a packet. Ackcode is the ack code returned for async
- * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
- * for other cases (internal errors that don't justify a panic). Safe to call
- * from within a transmit packet routine.
- */
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
int ackcode);
-
-/*
- * Hand over received packet to the core. The contents of data are expected to
- * be the full packet but with the CRCs left out (data block follows header
- * immediately), with the header (i.e. the first four quadlets) in machine byte
- * order and the data block in big endian. *data can be safely overwritten
- * after this call.
- *
- * If the packet is a write request, write_acked is to be set to true if it was
- * ack_complete'd already, false otherwise. This arg is ignored for any other
- * packet type.
- */
void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
int write_acked);
-
/*
* CHARACTER DEVICE DISPATCHING
*
@@ -217,7 +147,9 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \
IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
-/* return the index (within a minor number block) of a file */
+/**
+ * ieee1394_file_to_instance - get the index within a minor number block
+ */
static inline unsigned char ieee1394_file_to_instance(struct file *file)
{
return file->f_path.dentry->d_inode->i_cindex;
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 0833fc9f50c..40078ce930c 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -10,11 +10,16 @@
*/
#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/hardirq.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sched.h> /* because linux/wait.h is broken if CONFIG_SMP=n */
#include <linux/wait.h>
#include <asm/bug.h>
#include <asm/errno.h>
+#include <asm/system.h>
#include "ieee1394.h"
#include "ieee1394_types.h"
@@ -32,7 +37,7 @@
#ifndef HPSB_DEBUG_TLABELS
static
#endif
-spinlock_t hpsb_tlabel_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(hpsb_tlabel_lock);
static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
@@ -212,6 +217,15 @@ void hpsb_free_tlabel(struct hpsb_packet *packet)
wake_up_interruptible(&tlabel_wq);
}
+/**
+ * hpsb_packet_success - Make sense of the ack and reply codes
+ *
+ * Make sense of the ack and reply codes and return more convenient error codes:
+ * 0 = success. -%EBUSY = node is busy, try again. -%EAGAIN = error which can
+ * probably resolved by retry. -%EREMOTEIO = node suffers from an internal
+ * error. -%EACCES = this transaction is not allowed on requested address.
+ * -%EINVAL = invalid address at node.
+ */
int hpsb_packet_success(struct hpsb_packet *packet)
{
switch (packet->ack_code) {
@@ -364,6 +378,13 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
}
packet->host = host;
+ /* Because it is too difficult to determine all PHY speeds and link
+ * speeds here, we use S100... */
+ packet->speed_code = IEEE1394_SPEED_100;
+
+ /* ...and prevent hpsb_send_packet() from overriding it. */
+ packet->node_id = LOCAL_BUS | ALL_NODES;
+
if (hpsb_get_tlabel(packet)) {
hpsb_free_packet(packet);
return NULL;
@@ -493,6 +514,16 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
* avoid in kernel buffers for user space callers
*/
+/**
+ * hpsb_read - generic read function
+ *
+ * Recognizes the local node ID and act accordingly. Automatically uses a
+ * quadlet read request if @length == 4 and and a block read request otherwise.
+ * It does not yet support lengths that are not a multiple of 4.
+ *
+ * You must explicitly specifiy the @generation for which the node ID is valid,
+ * to avoid sending packets to the wrong nodes when we race with a bus reset.
+ */
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t * buffer, size_t length)
{
@@ -532,6 +563,16 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
return retval;
}
+/**
+ * hpsb_write - generic write function
+ *
+ * Recognizes the local node ID and act accordingly. Automatically uses a
+ * quadlet write request if @length == 4 and and a block write request
+ * otherwise. It does not yet support lengths that are not a multiple of 4.
+ *
+ * You must explicitly specifiy the @generation for which the node ID is valid,
+ * to avoid sending packets to the wrong nodes when we race with a bus reset.
+ */
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t * buffer, size_t length)
{
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
index c1369c41469..86b8ee692ea 100644
--- a/drivers/ieee1394/ieee1394_transactions.h
+++ b/drivers/ieee1394/ieee1394_transactions.h
@@ -27,27 +27,7 @@ struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
int length, int channel, int tag,
int sync);
-
-/*
- * hpsb_packet_success - Make sense of the ack and reply codes and
- * return more convenient error codes:
- * 0 success
- * -EBUSY node is busy, try again
- * -EAGAIN error which can probably resolved by retry
- * -EREMOTEIO node suffers from an internal error
- * -EACCES this transaction is not allowed on requested address
- * -EINVAL invalid address at node
- */
int hpsb_packet_success(struct hpsb_packet *packet);
-
-/*
- * The generic read and write functions. All recognize the local node ID
- * and act accordingly. Read and write automatically use quadlet commands if
- * length == 4 and and block commands otherwise (however, they do not yet
- * support lengths that are not a multiple of 4). You must explicitly specifiy
- * the generation for which the node ID is valid, to avoid sending packets to
- * the wrong nodes when we race with a bus reset.
- */
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index c6227e51136..07ca35c98f9 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -10,11 +10,15 @@
*/
#include <linux/pci.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include "hosts.h"
#include "iso.h"
+/**
+ * hpsb_iso_stop - stop DMA
+ */
void hpsb_iso_stop(struct hpsb_iso *iso)
{
if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
@@ -25,6 +29,9 @@ void hpsb_iso_stop(struct hpsb_iso *iso)
iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
}
+/**
+ * hpsb_iso_shutdown - deallocate buffer and DMA context
+ */
void hpsb_iso_shutdown(struct hpsb_iso *iso)
{
if (iso->flags & HPSB_ISO_DRIVER_INIT) {
@@ -130,6 +137,9 @@ static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
return NULL;
}
+/**
+ * hpsb_iso_n_ready - returns number of packets ready to send or receive
+ */
int hpsb_iso_n_ready(struct hpsb_iso *iso)
{
unsigned long flags;
@@ -142,6 +152,9 @@ int hpsb_iso_n_ready(struct hpsb_iso *iso)
return val;
}
+/**
+ * hpsb_iso_xmit_init - allocate the buffer and DMA context
+ */
struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
@@ -172,6 +185,11 @@ struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
return NULL;
}
+/**
+ * hpsb_iso_recv_init - allocate the buffer and DMA context
+ *
+ * Note, if channel = -1, multi-channel receive is enabled.
+ */
struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
@@ -199,6 +217,11 @@ struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
return NULL;
}
+/**
+ * hpsb_iso_recv_listen_channel
+ *
+ * multi-channel only
+ */
int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
{
if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
@@ -206,6 +229,11 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
}
+/**
+ * hpsb_iso_recv_unlisten_channel
+ *
+ * multi-channel only
+ */
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
{
if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
@@ -213,6 +241,11 @@ int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
}
+/**
+ * hpsb_iso_recv_set_channel_mask
+ *
+ * multi-channel only
+ */
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
{
if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
@@ -221,6 +254,12 @@ int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
(unsigned long)&mask);
}
+/**
+ * hpsb_iso_recv_flush - check for arrival of new packets
+ *
+ * check for arrival of new packets immediately (even if irq_interval
+ * has not yet been reached)
+ */
int hpsb_iso_recv_flush(struct hpsb_iso *iso)
{
if (iso->type != HPSB_ISO_RECV)
@@ -238,6 +277,9 @@ static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
return retval;
}
+/**
+ * hpsb_iso_xmit_start - start DMA
+ */
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
{
if (iso->type != HPSB_ISO_XMIT)
@@ -270,6 +312,9 @@ int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
return 0;
}
+/**
+ * hpsb_iso_recv_start - start DMA
+ */
int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
{
int retval = 0;
@@ -306,8 +351,7 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
}
/* check to make sure the user has not supplied bogus values of offset/len
- that would cause the kernel to access memory outside the buffer */
-
+ * that would cause the kernel to access memory outside the buffer */
static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
unsigned int offset, unsigned short len,
unsigned int *out_offset,
@@ -331,6 +375,12 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
return 0;
}
+/**
+ * hpsb_iso_xmit_queue_packet - queue a packet for transmission.
+ *
+ * @offset is relative to the beginning of the DMA buffer, where the packet's
+ * data payload should already have been placed.
+ */
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
u8 tag, u8 sy)
{
@@ -380,6 +430,9 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
return rv;
}
+/**
+ * hpsb_iso_xmit_sync - wait until all queued packets have been transmitted
+ */
int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
{
if (iso->type != HPSB_ISO_XMIT)
@@ -390,6 +443,15 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
iso->buf_packets);
}
+/**
+ * hpsb_iso_packet_sent
+ *
+ * Available to low-level drivers.
+ *
+ * Call after a packet has been transmitted to the bus (interrupt context is
+ * OK). @cycle is the _exact_ cycle the packet was sent on. @error should be
+ * non-zero if some sort of error occurred when sending the packet.
+ */
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
{
unsigned long flags;
@@ -413,6 +475,13 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
spin_unlock_irqrestore(&iso->lock, flags);
}
+/**
+ * hpsb_iso_packet_received
+ *
+ * Available to low-level drivers.
+ *
+ * Call after a packet has been received (interrupt context is OK).
+ */
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
u16 total_len, u16 cycle, u8 channel, u8 tag,
u8 sy)
@@ -442,6 +511,11 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
spin_unlock_irqrestore(&iso->lock, flags);
}
+/**
+ * hpsb_iso_recv_release_packets - release packets, reuse buffer
+ *
+ * @n_packets have been read out of the buffer, re-use the buffer space
+ */
int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
{
unsigned long flags;
@@ -477,6 +551,13 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
return rv;
}
+/**
+ * hpsb_iso_wake
+ *
+ * Available to low-level drivers.
+ *
+ * Call to wake waiting processes after buffer space has opened up.
+ */
void hpsb_iso_wake(struct hpsb_iso *iso)
{
wake_up_interruptible(&iso->waitq);
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
index 1210a97e868..b94e55e6eaa 100644
--- a/drivers/ieee1394/iso.h
+++ b/drivers/ieee1394/iso.h
@@ -150,8 +150,6 @@ struct hpsb_iso {
/* functions available to high-level drivers (e.g. raw1394) */
-/* allocate the buffer and DMA context */
-
struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
@@ -159,8 +157,6 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
int speed,
int irq_interval,
void (*callback)(struct hpsb_iso*));
-
-/* note: if channel = -1, multi-channel receive is enabled */
struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
@@ -168,56 +164,29 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
int dma_mode,
int irq_interval,
void (*callback)(struct hpsb_iso*));
-
-/* multi-channel only */
int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
-
-/* start/stop DMA */
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle,
int prebuffer);
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle,
int tag_mask, int sync);
void hpsb_iso_stop(struct hpsb_iso *iso);
-
-/* deallocate buffer and DMA context */
void hpsb_iso_shutdown(struct hpsb_iso *iso);
-
-/* queue a packet for transmission.
- * 'offset' is relative to the beginning of the DMA buffer, where the packet's
- * data payload should already have been placed. */
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
u8 tag, u8 sy);
-
-/* wait until all queued packets have been transmitted to the bus */
int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
-
-/* N packets have been read out of the buffer, re-use the buffer space */
-int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
- unsigned int n_packets);
-
-/* check for arrival of new packets immediately (even if irq_interval
- * has not yet been reached) */
+int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
+ unsigned int n_packets);
int hpsb_iso_recv_flush(struct hpsb_iso *iso);
-
-/* returns # of packets ready to send or receive */
int hpsb_iso_n_ready(struct hpsb_iso *iso);
/* the following are callbacks available to low-level drivers */
-/* call after a packet has been transmitted to the bus (interrupt context is OK)
- * 'cycle' is the _exact_ cycle the packet was sent on
- * 'error' should be non-zero if some sort of error occurred when sending the
- * packet */
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
-
-/* call after a packet has been received (interrupt context OK) */
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
u16 total_len, u16 cycle, u8 channel, u8 tag,
u8 sy);
-
-/* call to wake waiting processes after buffer space has opened up. */
void hpsb_iso_wake(struct hpsb_iso *iso);
#endif /* IEEE1394_ISO_H */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index dbeba45a031..6a1a0572275 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -16,6 +16,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/freezer.h>
#include <asm/atomic.h>
@@ -115,7 +116,7 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
{
- return (CSR1212_BE32_TO_CPU(bus_info_data[2]) >> 8) & 0x3;
+ return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
}
static struct csr1212_bus_ops nodemgr_csr_ops = {
@@ -580,7 +581,7 @@ static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
goto fail;
return;
fail:
- HPSB_ERR("Failed to add sysfs attribute for driver %s", driver->name);
+ HPSB_ERR("Failed to add sysfs attribute");
}
@@ -604,8 +605,7 @@ static void nodemgr_create_ne_dev_files(struct node_entry *ne)
goto fail;
return;
fail:
- HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
- (unsigned long long)ne->guid);
+ HPSB_ERR("Failed to add sysfs attribute");
}
@@ -619,7 +619,7 @@ static void nodemgr_create_host_dev_files(struct hpsb_host *host)
goto fail;
return;
fail:
- HPSB_ERR("Failed to add sysfs attribute for host %d", host->id);
+ HPSB_ERR("Failed to add sysfs attribute");
}
@@ -679,8 +679,7 @@ static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
}
return;
fail:
- HPSB_ERR("Failed to add sysfs attributes for unit %s",
- ud->device.bus_id);
+ HPSB_ERR("Failed to add sysfs attribute");
}
@@ -1144,13 +1143,13 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
last_key_id = kv->key.id;
}
- if (ne->vendor_name_kv &&
- device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv))
- goto fail;
- return;
-fail:
- HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
- (unsigned long long)ne->guid);
+ if (ne->vendor_name_kv) {
+ int error = device_create_file(&ne->device,
+ &dev_attr_ne_vendor_name_kv);
+
+ if (error && error != -EEXIST)
+ HPSB_ERR("Failed to add sysfs attribute");
+ }
}
#ifdef CONFIG_HOTPLUG
@@ -1738,7 +1737,19 @@ exit:
return 0;
}
-int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
+/**
+ * nodemgr_for_each_host - call a function for each IEEE 1394 host
+ * @data: an address to supply to the callback
+ * @cb: function to call for each host
+ *
+ * Iterate the hosts, calling a given function with supplied data for each host.
+ * If the callback fails on a host, i.e. if it returns a non-zero value, the
+ * iteration is stopped.
+ *
+ * Return value: 0 on success, non-zero on failure (same as returned by last run
+ * of the callback).
+ */
+int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
{
struct class_device *cdev;
struct hpsb_host *host;
@@ -1748,7 +1759,7 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
list_for_each_entry(cdev, &hpsb_host_class.children, node) {
host = container_of(cdev, struct hpsb_host, class_dev);
- if ((error = cb(host, __data)))
+ if ((error = cb(host, data)))
break;
}
up(&hpsb_host_class.sem);
@@ -1756,7 +1767,7 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
return error;
}
-/* The following four convenience functions use a struct node_entry
+/* The following two convenience functions use a struct node_entry
* for addressing a node on the bus. They are intended for use by any
* process context, not just the nodemgr thread, so we need to be a
* little careful when reading out the node ID and generation. The
@@ -1771,12 +1782,20 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
* ID's.
*/
-void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
+/**
+ * hpsb_node_fill_packet - fill some destination information into a packet
+ * @ne: destination node
+ * @packet: packet to fill in
+ *
+ * This will fill in the given, pre-initialised hpsb_packet with the current
+ * information from the node entry (host, node ID, bus generation number).
+ */
+void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
{
- pkt->host = ne->host;
- pkt->generation = ne->generation;
+ packet->host = ne->host;
+ packet->generation = ne->generation;
barrier();
- pkt->node_id = ne->nodeid;
+ packet->node_id = ne->nodeid;
}
int hpsb_node_write(struct node_entry *ne, u64 addr,
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 4147303ad44..e7ac683c72c 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -153,30 +153,10 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
{
return ne->generation == get_hpsb_generation(ne->host);
}
-
-/*
- * This will fill in the given, pre-initialised hpsb_packet with the current
- * information from the node entry (host, node ID, generation number). It will
- * return false if the node owning the GUID is not accessible (and not modify
- * the hpsb_packet) and return true otherwise.
- *
- * Note that packet sending may still fail in hpsb_send_packet if a bus reset
- * happens while you are trying to set up the packet (due to obsolete generation
- * number). It will at least reliably fail so that you don't accidentally and
- * unknowingly send your packet to the wrong node.
- */
-void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
-
-int hpsb_node_read(struct node_entry *ne, u64 addr,
- quadlet_t *buffer, size_t length);
+void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
int hpsb_node_write(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length);
-int hpsb_node_lock(struct node_entry *ne, u64 addr,
- int extcode, quadlet_t *data, quadlet_t arg);
-
-/* Iterate the hosts, calling a given function with supplied data for each
- * host. */
-int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *));
+int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
int init_ieee1394_nodemgr(void);
void cleanup_ieee1394_nodemgr(void);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 06fac0d2126..5dadfd296f7 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -507,9 +507,8 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Set up self-id dma buffer */
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
- /* enable self-id and phys */
- reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
- OHCI1394_LinkControl_RcvPhyPkt);
+ /* enable self-id */
+ reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
/* Set the Config ROM mapping register */
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
@@ -518,9 +517,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
ohci->max_packet_size =
1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
- /* Don't accept phy packets into AR request context */
- reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
-
/* Clear the interrupt mask */
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
@@ -617,7 +613,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
#endif
PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
- "attempting to setting max_packet_size to 512 bytes");
+ "attempting to set max_packet_size to 512 bytes");
reg_write(ohci, OHCI1394_BusOptions,
(reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
ohci->max_packet_size = 512;
@@ -2377,6 +2373,7 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
if (event & OHCI1394_postedWriteErr) {
PRINT(KERN_ERR, "physical posted write error");
/* no recovery strategy yet, had to involve protocol drivers */
+ event &= ~OHCI1394_postedWriteErr;
}
if (event & OHCI1394_cycleTooLong) {
if(printk_ratelimit())
@@ -3658,6 +3655,7 @@ static struct pci_driver ohci1394_pci_driver = {
/* essentially the only purpose of this code is to allow another
module to hook into ohci's interrupt handler */
+/* returns zero if successful, one if DMA context is locked up */
int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
{
int i=0;
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
index fa05f113f7f..f1ad539e7c1 100644
--- a/drivers/ieee1394/ohci1394.h
+++ b/drivers/ieee1394/ohci1394.h
@@ -461,9 +461,7 @@ int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet);
void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet);
-
-/* returns zero if successful, one if DMA context is locked up */
-int ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
+int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
struct ti_ohci *ohci1394_get_struct(int card_num);
#endif
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bb897a37d9f..c6aefd9ad0e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -938,7 +938,8 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
int header_length = req->req.misc & 0xffff;
int expect_response = req->req.misc >> 16;
- if ((header_length > req->req.length) || (header_length < 12)) {
+ if (header_length > req->req.length || header_length < 12 ||
+ header_length > FIELD_SIZEOF(struct hpsb_packet, header)) {
req->req.error = RAW1394_ERROR_INVALID_ARG;
req->req.length = 0;
queue_complete_req(req);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 4edfff46b1e..4cb6fa2bcfb 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -59,8 +59,10 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
@@ -469,19 +471,13 @@ static void sbp2util_write_doorbell(struct work_struct *work)
static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
{
struct sbp2_fwhost_info *hi = lu->hi;
- int i;
- unsigned long flags, orbs;
struct sbp2_command_info *cmd;
+ int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
- orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
-
- spin_lock_irqsave(&lu->cmd_orb_lock, flags);
for (i = 0; i < orbs; i++) {
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd) {
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
return -ENOMEM;
- }
cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
&cmd->command_orb,
sizeof(struct sbp2_command_orb),
@@ -489,11 +485,10 @@ static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
cmd->sge_dma = dma_map_single(hi->host->device.parent,
&cmd->scatter_gather_element,
sizeof(cmd->scatter_gather_element),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
INIT_LIST_HEAD(&cmd->list);
list_add_tail(&cmd->list, &lu->cmd_orb_completed);
}
- spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return 0;
}
@@ -514,7 +509,7 @@ static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
DMA_TO_DEVICE);
dma_unmap_single(host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
kfree(cmd);
}
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
@@ -757,6 +752,11 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
SBP2_ERR("failed to register lower 4GB address range");
goto failed_alloc;
}
+#else
+ if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) {
+ SBP2_ERR("failed to set 4GB DMA mask");
+ goto failed_alloc;
+ }
#endif
}
@@ -865,11 +865,8 @@ static int sbp2_start_device(struct sbp2_lu *lu)
if (!lu->login_orb)
goto alloc_fail;
- if (sbp2util_create_command_orb_pool(lu)) {
- SBP2_ERR("sbp2util_create_command_orb_pool failed!");
- sbp2_remove_device(lu);
- return -ENOMEM;
- }
+ if (sbp2util_create_command_orb_pool(lu))
+ goto alloc_fail;
/* Wait a second before trying to log in. Previously logged in
* initiators need a chance to reconnect. */
@@ -1628,7 +1625,7 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
DMA_TO_DEVICE);
dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
/* check to see if there are any previous orbs to use */
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
@@ -1794,7 +1791,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
DMA_TO_DEVICE);
dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
/* Grab SCSI command pointers and check status. */
/*
* FIXME: If the src field in the status is 1, the ORB DMA must
@@ -1926,7 +1923,7 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
DMA_TO_DEVICE);
dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
sbp2util_mark_command_completed(lu, cmd);
if (cmd->Current_SCpnt) {
cmd->Current_SCpnt->result = status << 16;
@@ -2057,7 +2054,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
dma_sync_single_for_cpu(hi->host->device.parent,
cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
sbp2util_mark_command_completed(lu, cmd);
if (cmd->Current_SCpnt) {
cmd->Current_SCpnt->result = DID_ABORT << 16;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index 9ae842329bf..44402b9d82a 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -250,15 +250,15 @@ enum sbp2_dma_types {
/* Per SCSI command */
struct sbp2_command_info {
struct list_head list;
- struct sbp2_command_orb command_orb ____cacheline_aligned;
- dma_addr_t command_orb_dma ____cacheline_aligned;
+ struct sbp2_command_orb command_orb;
+ dma_addr_t command_orb_dma;
struct scsi_cmnd *Current_SCpnt;
void (*Current_done)(struct scsi_cmnd *);
/* Also need s/g structure for each sbp2 command */
struct sbp2_unrestricted_page_table
- scatter_gather_element[SG_ALL] ____cacheline_aligned;
- dma_addr_t sge_dma ____cacheline_aligned;
+ scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
+ dma_addr_t sge_dma;
void *sge_buffer;
dma_addr_t cmd_dma;
enum sbp2_dma_types dma_type;