diff options
-rw-r--r-- | drivers/firewire/fw-iso.c | 21 | ||||
-rw-r--r-- | drivers/firewire/fw-ohci.c | 23 | ||||
-rw-r--r-- | drivers/firewire/fw-sbp2.c | 28 |
3 files changed, 47 insertions, 25 deletions
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index 024fab4ef99..6481e3df2c9 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -33,7 +33,7 @@ setup_iso_buffer(struct fw_iso_context *ctx, size_t size, enum dma_data_direction direction) { struct page *page; - int i; + int i, j; void *p; ctx->buffer_size = PAGE_ALIGN(size); @@ -42,24 +42,33 @@ setup_iso_buffer(struct fw_iso_context *ctx, size_t size, ctx->buffer = vmalloc_32_user(ctx->buffer_size); if (ctx->buffer == NULL) - return -ENOMEM; + goto fail_buffer_alloc; ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; ctx->pages = kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); - if (ctx->pages == NULL) { - vfree(ctx->buffer); - return -ENOMEM; - } + if (ctx->pages == NULL) + goto fail_pages_alloc; p = ctx->buffer; for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { page = vmalloc_to_page(p); ctx->pages[i] = dma_map_page(ctx->card->device, page, 0, PAGE_SIZE, direction); + if (dma_mapping_error(ctx->pages[i])) + goto fail_mapping; } return 0; + + fail_mapping: + for (j = 0; j < i; j++) + dma_unmap_page(ctx->card->device, ctx->pages[j], + PAGE_SIZE, DMA_TO_DEVICE); + fail_pages_alloc: + vfree(ctx->buffer); + fail_buffer_alloc: + return -ENOMEM; } static void destroy_iso_buffer(struct fw_iso_context *ctx) diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index e6fa3496183..4512edba6cb 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -431,7 +431,7 @@ at_context_setup_packet(struct at_context *ctx, struct list_head *list) packet->payload, packet->payload_length, DMA_TO_DEVICE); - if (packet->payload_bus == 0) { + if (dma_mapping_error(packet->payload_bus)) { complete_transmission(packet, RCODE_SEND_ERROR, list); return; } @@ -590,7 +590,7 @@ at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 regs) ctx->descriptor_bus = dma_map_single(ohci->card.device, &ctx->d, sizeof ctx->d, DMA_TO_DEVICE); - if (ctx->descriptor_bus == 0) + if (dma_mapping_error(ctx->descriptor_bus)) return -ENOMEM; ctx->regs = regs; @@ -1159,16 +1159,14 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); - if (ctx->buffer == NULL) { - spin_lock_irqsave(&ohci->lock, flags); - *mask |= 1 << index; - spin_unlock_irqrestore(&ohci->lock, flags); - return ERR_PTR(-ENOMEM); - } + if (ctx->buffer == NULL) + goto buffer_alloc_failed; ctx->buffer_bus = dma_map_single(card->device, ctx->buffer, ISO_BUFFER_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(ctx->buffer_bus)) + goto buffer_map_failed; ctx->head_descriptor = ctx->buffer; ctx->prev_descriptor = ctx->buffer; @@ -1187,6 +1185,15 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, ctx->head_descriptor++; return &ctx->base; + + buffer_map_failed: + kfree(ctx->buffer); + buffer_alloc_failed: + spin_lock_irqsave(&ohci->lock, flags); + *mask |= 1 << index; + spin_unlock_irqrestore(&ohci->lock, flags); + + return ERR_PTR(-ENOMEM); } static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index fa59e59766e..2259e222586 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -411,13 +411,13 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, orb->base.request_bus = dma_map_single(device->card->device, &orb->request, sizeof orb->request, DMA_TO_DEVICE); - if (orb->base.request_bus == 0) + if (dma_mapping_error(orb->base.request_bus)) goto out; orb->response_bus = dma_map_single(device->card->device, &orb->response, sizeof orb->response, DMA_FROM_DEVICE); - if (orb->response_bus == 0) + if (dma_mapping_error(orb->response_bus)) goto out; orb->request.response.high = 0; @@ -963,22 +963,20 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) * transfer direction not handled. */ if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); - cmd->result = DID_ERROR << 16; - done(cmd); - return 0; + goto fail_alloc; } orb = kzalloc(sizeof *orb, GFP_ATOMIC); if (orb == NULL) { fw_notify("failed to alloc orb\n"); - cmd->result = DID_NO_CONNECT << 16; - done(cmd); - return 0; + goto fail_alloc; } orb->base.request_bus = dma_map_single(device->card->device, &orb->request, sizeof orb->request, DMA_TO_DEVICE); + if (dma_mapping_error(orb->base.request_bus)) + goto fail_mapping; orb->unit = unit; orb->done = done; @@ -1009,9 +1007,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) * could we get the scsi or blk layer to do that by * reporting our max supported block size? */ fw_error("command > 64k\n"); - cmd->result = DID_ERROR << 16; - done(cmd); - return 0; + goto fail_bufflen; } else if (cmd->request_bufflen > 0) { sbp2_command_orb_map_buffer(orb); } @@ -1028,6 +1024,16 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) sd->command_block_agent_address + SBP2_ORB_POINTER); return 0; + + fail_bufflen: + dma_unmap_single(device->card->device, orb->base.request_bus, + sizeof orb->request, DMA_TO_DEVICE); + fail_mapping: + kfree(orb); + fail_alloc: + cmd->result = DID_ERROR << 16; + done(cmd); + return 0; } static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) |