aboutsummaryrefslogtreecommitdiff
path: root/drivers/firewire/core-iso.c
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2009-06-20 13:23:59 +0200
committerStefan Richter <stefanr@s5r6.in-berlin.de>2009-06-25 19:42:36 +0200
commit6fdc03709433ccc2005f0f593ae9d9dd04f7b485 (patch)
treee42e7304084b1b6420d456fbd2a5622e93327a74 /drivers/firewire/core-iso.c
parent0c53decdd0a9f9c459ccabe0b5f79660bde5375b (diff)
firewire: core: do not DMA-map stack addresses
The DMA mapping API cannot map on-stack addresses, as explained in Documentation/DMA-mapping.txt. Convert the two cases of on-stack packet payload buffers in firewire-core (payload of lock requests in the bus manager work and in iso resource management) to slab-allocated memory. There are a number on-stack buffers for quadlet write or quadlet read requests in firewire-core and firewire-sbp2. These are harmless; they are copied to/ from card driver internal DMA buffers since quadlet payloads are inlined with packet headers. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire/core-iso.c')
-rw-r--r--drivers/firewire/core-iso.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 166f19c6d38..110e731f557 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop);
*/
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
- int bandwidth, bool allocate)
+ int bandwidth, bool allocate, __be32 data[2])
{
- __be32 data[2];
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
/*
@@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
}
static int manage_channel(struct fw_card *card, int irm_id, int generation,
- u32 channels_mask, u64 offset, bool allocate)
+ u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
- __be32 data[2], c, all, old;
+ __be32 c, all, old;
int i, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
}
static void deallocate_channel(struct fw_card *card, int irm_id,
- int generation, int channel)
+ int generation, int channel, __be32 buffer[2])
{
u32 mask;
u64 offset;
@@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
- manage_channel(card, irm_id, generation, mask, offset, false);
+ manage_channel(card, irm_id, generation, mask, offset, false, buffer);
}
/**
@@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
*/
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
- bool allocate)
+ bool allocate, __be32 buffer[2])
{
u32 channels_hi = channels_mask; /* channels 31...0 */
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
@@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
- CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
+ allocate, buffer);
if (channels_lo && c < 0) {
c = manage_channel(card, irm_id, generation, channels_lo,
- CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
+ allocate, buffer);
if (c >= 0)
c += 32;
}
@@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (*bandwidth == 0)
return;
- ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+ ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
+ allocate, buffer);
if (ret < 0)
*bandwidth = 0;
if (allocate && ret < 0 && c >= 0) {
- deallocate_channel(card, irm_id, generation, c);
+ deallocate_channel(card, irm_id, generation, c, buffer);
*channel = ret;
}
}