diff options
Diffstat (limited to 'drivers/ide/ide-dma.c')
-rw-r--r-- | drivers/ide/ide-dma.c | 519 |
1 files changed, 66 insertions, 453 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index adc68275585..fffd11717b2 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -28,24 +28,13 @@ * for supplying a Promise UDMA board & WD UDMA drive for this work! */ -#include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> #include <linux/ide.h> -#include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> -#include <asm/io.h> -#include <asm/irq.h> - -static const struct drive_list_entry drive_whitelist [] = { - +static const struct drive_list_entry drive_whitelist[] = { { "Micropolis 2112A" , NULL }, { "CONNER CTMA 4000" , NULL }, { "CONNER CTT8000-A" , NULL }, @@ -53,8 +42,7 @@ static const struct drive_list_entry drive_whitelist [] = { { NULL , NULL } }; -static const struct drive_list_entry drive_blacklist [] = { - +static const struct drive_list_entry drive_blacklist[] = { { "WDC AC11000H" , NULL }, { "WDC AC22100H" , NULL }, { "WDC AC32500H" , NULL }, @@ -94,11 +82,11 @@ static const struct drive_list_entry drive_blacklist [] = { * ide_dma_intr - IDE DMA interrupt handler * @drive: the drive the interrupt is for * - * Handle an interrupt completing a read/write DMA transfer on an + * Handle an interrupt completing a read/write DMA transfer on an * IDE device */ - -ide_startstop_t ide_dma_intr (ide_drive_t *drive) + +ide_startstop_t ide_dma_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 stat = 0, dma_stat = 0; @@ -106,22 +94,21 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive) dma_stat = hwif->dma_ops->dma_end(drive); stat = hwif->tp_ops->read_status(hwif); - if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { + if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { if (!dma_stat) { - struct request *rq = HWGROUP(drive)->rq; + struct request *rq = hwif->hwgroup->rq; task_end_request(drive, rq, stat); return ide_stopped; } - printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", - drive->name, dma_stat); + printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", + drive->name, __func__, dma_stat); } return ide_error(drive, "dma_intr", stat); } - EXPORT_SYMBOL_GPL(ide_dma_intr); -static int ide_dma_good_drive(ide_drive_t *drive) +int ide_dma_good_drive(ide_drive_t *drive) { return ide_in_drive_list(drive->id, drive_whitelist); } @@ -139,7 +126,7 @@ static int ide_dma_good_drive(ide_drive_t *drive) int ide_build_sglist(ide_drive_t *drive, struct request *rq) { - ide_hwif_t *hwif = HWIF(drive); + ide_hwif_t *hwif = drive->hwif; struct scatterlist *sg = hwif->sg_table; ide_map_sg(drive, rq); @@ -152,106 +139,8 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq) return dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction); } - EXPORT_SYMBOL_GPL(ide_build_sglist); -#ifdef CONFIG_BLK_DEV_IDEDMA_SFF -/** - * ide_build_dmatable - build IDE DMA table - * - * ide_build_dmatable() prepares a dma request. We map the command - * to get the pci bus addresses of the buffers and then build up - * the PRD table that the IDE layer wants to be fed. The code - * knows about the 64K wrap bug in the CS5530. - * - * Returns the number of built PRD entries if all went okay, - * returns 0 otherwise. - * - * May also be invoked from trm290.c - */ - -int ide_build_dmatable (ide_drive_t *drive, struct request *rq) -{ - ide_hwif_t *hwif = HWIF(drive); - __le32 *table = (__le32 *)hwif->dmatable_cpu; - unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0; - unsigned int count = 0; - int i; - struct scatterlist *sg; - - hwif->sg_nents = i = ide_build_sglist(drive, rq); - - if (!i) - return 0; - - sg = hwif->sg_table; - while (i) { - u32 cur_addr; - u32 cur_len; - - cur_addr = sg_dma_address(sg); - cur_len = sg_dma_len(sg); - - /* - * Fill in the dma table, without crossing any 64kB boundaries. - * Most hardware requires 16-bit alignment of all blocks, - * but the trm290 requires 32-bit alignment. - */ - - while (cur_len) { - if (count++ >= PRD_ENTRIES) { - printk(KERN_ERR "%s: DMA table too small\n", drive->name); - goto use_pio_instead; - } else { - u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff); - - if (bcount > cur_len) - bcount = cur_len; - *table++ = cpu_to_le32(cur_addr); - xcount = bcount & 0xffff; - if (is_trm290) - xcount = ((xcount >> 2) - 1) << 16; - if (xcount == 0x0000) { - /* - * Most chipsets correctly interpret a length of 0x0000 as 64KB, - * but at least one (e.g. CS5530) misinterprets it as zero (!). - * So here we break the 64KB entry into two 32KB entries instead. - */ - if (count++ >= PRD_ENTRIES) { - printk(KERN_ERR "%s: DMA table too small\n", drive->name); - goto use_pio_instead; - } - *table++ = cpu_to_le32(0x8000); - *table++ = cpu_to_le32(cur_addr + 0x8000); - xcount = 0x8000; - } - *table++ = cpu_to_le32(xcount); - cur_addr += bcount; - cur_len -= bcount; - } - } - - sg = sg_next(sg); - i--; - } - - if (count) { - if (!is_trm290) - *--table |= cpu_to_le32(0x80000000); - return count; - } - - printk(KERN_ERR "%s: empty DMA table?\n", drive->name); - -use_pio_instead: - ide_destroy_dmatable(drive); - - return 0; /* revert to PIO for this request */ -} - -EXPORT_SYMBOL_GPL(ide_build_dmatable); -#endif - /** * ide_destroy_dmatable - clean up DMA mapping * @drive: The drive to unmap @@ -262,146 +151,30 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable); * an oops as only one mapping can be live for each target at a given * time. */ - -void ide_destroy_dmatable (ide_drive_t *drive) + +void ide_destroy_dmatable(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); } - EXPORT_SYMBOL_GPL(ide_destroy_dmatable); -#ifdef CONFIG_BLK_DEV_IDEDMA_SFF -/** - * config_drive_for_dma - attempt to activate IDE DMA - * @drive: the drive to place in DMA mode - * - * If the drive supports at least mode 2 DMA or UDMA of any kind - * then attempt to place it into DMA mode. Drives that are known to - * support DMA but predate the DMA properties or that are known - * to have DMA handling bugs are also set up appropriately based - * on the good/bad drive lists. - */ - -static int config_drive_for_dma (ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - struct hd_driveid *id = drive->id; - - if (drive->media != ide_disk) { - if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) - return 0; - } - - /* - * Enable DMA on any drive that has - * UltraDMA (mode 0/1/2/3/4/5/6) enabled - */ - if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f)) - return 1; - - /* - * Enable DMA on any drive that has mode2 DMA - * (multi or single) enabled - */ - if (id->field_valid & 2) /* regular DMA */ - if ((id->dma_mword & 0x404) == 0x404 || - (id->dma_1word & 0x404) == 0x404) - return 1; - - /* Consult the list of known "good" drives */ - if (ide_dma_good_drive(drive)) - return 1; - - return 0; -} - -/** - * dma_timer_expiry - handle a DMA timeout - * @drive: Drive that timed out - * - * An IDE DMA transfer timed out. In the event of an error we ask - * the driver to resolve the problem, if a DMA transfer is still - * in progress we continue to wait (arguably we need to add a - * secondary 'I don't care what the drive thinks' timeout here) - * Finally if we have an interrupt we let it complete the I/O. - * But only one time - we clear expiry and if it's still not - * completed after WAIT_CMD, we error and retry in PIO. - * This can occur if an interrupt is lost or due to hang or bugs. - */ - -static int dma_timer_expiry (ide_drive_t *drive) -{ - ide_hwif_t *hwif = HWIF(drive); - u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); - - printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", - drive->name, dma_stat); - - if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ - return WAIT_CMD; - - HWGROUP(drive)->expiry = NULL; /* one free ride for now */ - - /* 1 dmaing, 2 error, 4 intr */ - if (dma_stat & 2) /* ERROR */ - return -1; - - if (dma_stat & 1) /* DMAing */ - return WAIT_CMD; - - if (dma_stat & 4) /* Got an Interrupt */ - return WAIT_CMD; - - return 0; /* Status is unknown -- reset the bus */ -} - -/** - * ide_dma_host_set - Enable/disable DMA on a host - * @drive: drive to control - * - * Enable/disable DMA on an IDE controller following generic - * bus-mastering IDE controller behaviour. - */ - -void ide_dma_host_set(ide_drive_t *drive, int on) -{ - ide_hwif_t *hwif = HWIF(drive); - u8 unit = (drive->select.b.unit & 0x01); - u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); - - if (on) - dma_stat |= (1 << (5 + unit)); - else - dma_stat &= ~(1 << (5 + unit)); - - if (hwif->host_flags & IDE_HFLAG_MMIO) - writeb(dma_stat, - (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); - else - outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); -} - -EXPORT_SYMBOL_GPL(ide_dma_host_set); -#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ - /** * ide_dma_off_quietly - Generic DMA kill * @drive: drive to control * - * Turn off the current DMA on this IDE controller. + * Turn off the current DMA on this IDE controller. */ void ide_dma_off_quietly(ide_drive_t *drive) { - drive->using_dma = 0; + drive->dev_flags &= ~IDE_DFLAG_USING_DMA; ide_toggle_bounce(drive, 0); drive->hwif->dma_ops->dma_host_set(drive, 0); } - EXPORT_SYMBOL(ide_dma_off_quietly); /** @@ -417,7 +190,6 @@ void ide_dma_off(ide_drive_t *drive) printk(KERN_INFO "%s: DMA disabled\n", drive->name); ide_dma_off_quietly(drive); } - EXPORT_SYMBOL(ide_dma_off); /** @@ -429,179 +201,24 @@ EXPORT_SYMBOL(ide_dma_off); void ide_dma_on(ide_drive_t *drive) { - drive->using_dma = 1; + drive->dev_flags |= IDE_DFLAG_USING_DMA; ide_toggle_bounce(drive, 1); drive->hwif->dma_ops->dma_host_set(drive, 1); } -#ifdef CONFIG_BLK_DEV_IDEDMA_SFF -/** - * ide_dma_setup - begin a DMA phase - * @drive: target device - * - * Build an IDE DMA PRD (IDE speak for scatter gather table) - * and then set up the DMA transfer registers for a device - * that follows generic IDE PCI DMA behaviour. Controllers can - * override this function if they need to - * - * Returns 0 on success. If a PIO fallback is required then 1 - * is returned. - */ - -int ide_dma_setup(ide_drive_t *drive) +int __ide_dma_bad_drive(ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; - struct request *rq = HWGROUP(drive)->rq; - unsigned int reading; - u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; - u8 dma_stat; - - if (rq_data_dir(rq)) - reading = 0; - else - reading = 1 << 3; - - /* fall back to pio! */ - if (!ide_build_dmatable(drive, rq)) { - ide_map_sg(drive, rq); - return 1; - } - - /* PRD table */ - if (hwif->host_flags & IDE_HFLAG_MMIO) - writel(hwif->dmatable_dma, - (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); - else - outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); - - /* specify r/w */ - if (mmio) - writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); - else - outb(reading, hwif->dma_base + ATA_DMA_CMD); - - /* read DMA status for INTR & ERROR flags */ - dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); - - /* clear INTR & ERROR flags */ - if (mmio) - writeb(dma_stat | 6, - (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); - else - outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); - - drive->waiting_for_dma = 1; - return 0; -} - -EXPORT_SYMBOL_GPL(ide_dma_setup); - -void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) -{ - /* issue cmd to drive */ - ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); -} -EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); - -void ide_dma_start(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - u8 dma_cmd; - - /* Note that this is done *after* the cmd has - * been issued to the drive, as per the BM-IDE spec. - * The Promise Ultra33 doesn't work correctly when - * we do this part before issuing the drive cmd. - */ - if (hwif->host_flags & IDE_HFLAG_MMIO) { - dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); - /* start DMA */ - writeb(dma_cmd | 1, - (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); - } else { - dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); - outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); - } - - hwif->dma = 1; - wmb(); -} - -EXPORT_SYMBOL_GPL(ide_dma_start); - -/* returns 1 on error, 0 otherwise */ -int __ide_dma_end (ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; - u8 dma_stat = 0, dma_cmd = 0; - - drive->waiting_for_dma = 0; - - if (mmio) { - /* get DMA command mode */ - dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); - /* stop DMA */ - writeb(dma_cmd & ~1, - (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); - } else { - dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); - outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); - } - - /* get DMA status */ - dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); - - if (mmio) - /* clear the INTR & ERROR bits */ - writeb(dma_stat | 6, - (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); - else - outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); - - /* purge DMA mappings */ - ide_destroy_dmatable(drive); - /* verify good DMA status */ - hwif->dma = 0; - wmb(); - return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; -} - -EXPORT_SYMBOL(__ide_dma_end); - -/* returns 1 if dma irq issued, 0 otherwise */ -int ide_dma_test_irq(ide_drive_t *drive) -{ - ide_hwif_t *hwif = HWIF(drive); - u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); - - /* return 1 if INTR asserted */ - if ((dma_stat & 4) == 4) - return 1; - if (!drive->waiting_for_dma) - printk(KERN_WARNING "%s: (%s) called while not waiting\n", - drive->name, __func__); - return 0; -} -EXPORT_SYMBOL_GPL(ide_dma_test_irq); -#else -static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } -#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ - -int __ide_dma_bad_drive (ide_drive_t *drive) -{ - struct hd_driveid *id = drive->id; + u16 *id = drive->id; int blacklist = ide_in_drive_list(id, drive_blacklist); if (blacklist) { printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", - drive->name, id->model); + drive->name, (char *)&id[ATA_ID_PROD]); return blacklist; } return 0; } - EXPORT_SYMBOL(__ide_dma_bad_drive); static const u8 xfer_mode_bases[] = { @@ -612,21 +229,21 @@ static const u8 xfer_mode_bases[] = { static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) { - struct hd_driveid *id = drive->id; + u16 *id = drive->id; ide_hwif_t *hwif = drive->hwif; const struct ide_port_ops *port_ops = hwif->port_ops; unsigned int mask = 0; - switch(base) { + switch (base) { case XFER_UDMA_0: - if ((id->field_valid & 4) == 0) + if ((id[ATA_ID_FIELD_VALID] & 4) == 0) break; if (port_ops && port_ops->udma_filter) mask = port_ops->udma_filter(drive); else mask = hwif->ultra_mask; - mask &= id->dma_ultra; + mask &= id[ATA_ID_UDMA_MODES]; /* * avoid false cable warning from eighty_ninty_three() @@ -637,19 +254,19 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) } break; case XFER_MW_DMA_0: - if ((id->field_valid & 2) == 0) + if ((id[ATA_ID_FIELD_VALID] & 2) == 0) break; if (port_ops && port_ops->mdma_filter) mask = port_ops->mdma_filter(drive); else mask = hwif->mwdma_mask; - mask &= id->dma_mword; + mask &= id[ATA_ID_MWDMA_MODES]; break; case XFER_SW_DMA_0: - if (id->field_valid & 2) { - mask = id->dma_1word & hwif->swdma_mask; - } else if (id->tDMA) { - u8 mode = id->tDMA; + if (id[ATA_ID_FIELD_VALID] & 2) { + mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask; + } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) { + u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; /* * if the mode is valid convert it to the mask @@ -706,7 +323,8 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) /* * is this correct? */ - if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150) + if (ide_dma_good_drive(drive) && + drive->id[ATA_ID_EIDE_DMA_TIME] < 150) mode = XFER_MW_DMA_1; } @@ -717,7 +335,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) return mode; } - EXPORT_SYMBOL_GPL(ide_find_dma_mode); static int ide_tune_dma(ide_drive_t *drive) @@ -725,7 +342,8 @@ static int ide_tune_dma(ide_drive_t *drive) ide_hwif_t *hwif = drive->hwif; u8 speed; - if (drive->nodma || (drive->id->capability & 1) == 0) + if (ata_id_has_dma(drive->id) == 0 || + (drive->dev_flags & IDE_DFLAG_NODMA)) return 0; /* consult the list of known "bad" drives */ @@ -767,13 +385,15 @@ static int ide_dma_check(ide_drive_t *drive) int ide_id_dma_bug(ide_drive_t *drive) { - struct hd_driveid *id = drive->id; + u16 *id = drive->id; - if (id->field_valid & 4) { - if ((id->dma_ultra >> 8) && (id->dma_mword >> 8)) + if (id[ATA_ID_FIELD_VALID] & 4) { + if ((id[ATA_ID_UDMA_MODES] >> 8) && + (id[ATA_ID_MWDMA_MODES] >> 8)) goto err_out; - } else if (id->field_valid & 2) { - if ((id->dma_mword >> 8) && (id->dma_1word >> 8)) + } else if (id[ATA_ID_FIELD_VALID] & 2) { + if ((id[ATA_ID_MWDMA_MODES] >> 8) && + (id[ATA_ID_SWDMA_MODES] >> 8)) goto err_out; } return 0; @@ -823,66 +443,59 @@ void ide_check_dma_crc(ide_drive_t *drive) ide_dma_on(drive); } -#ifdef CONFIG_BLK_DEV_IDEDMA_SFF -void ide_dma_lost_irq (ide_drive_t *drive) +void ide_dma_lost_irq(ide_drive_t *drive) { - printk("%s: DMA interrupt recovery\n", drive->name); + printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name); } +EXPORT_SYMBOL_GPL(ide_dma_lost_irq); -EXPORT_SYMBOL(ide_dma_lost_irq); - -void ide_dma_timeout (ide_drive_t *drive) +void ide_dma_timeout(ide_drive_t *drive) { - ide_hwif_t *hwif = HWIF(drive); + ide_hwif_t *hwif = drive->hwif; printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); if (hwif->dma_ops->dma_test_irq(drive)) return; + ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); + hwif->dma_ops->dma_end(drive); } - -EXPORT_SYMBOL(ide_dma_timeout); +EXPORT_SYMBOL_GPL(ide_dma_timeout); void ide_release_dma_engine(ide_hwif_t *hwif) { if (hwif->dmatable_cpu) { - struct pci_dev *pdev = to_pci_dev(hwif->dev); + int prd_size = hwif->prd_max_nents * hwif->prd_ent_size; - pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES, - hwif->dmatable_cpu, hwif->dmatable_dma); + dma_free_coherent(hwif->dev, prd_size, + hwif->dmatable_cpu, hwif->dmatable_dma); hwif->dmatable_cpu = NULL; } } +EXPORT_SYMBOL_GPL(ide_release_dma_engine); int ide_allocate_dma_engine(ide_hwif_t *hwif) { - struct pci_dev *pdev = to_pci_dev(hwif->dev); + int prd_size; - hwif->dmatable_cpu = pci_alloc_consistent(pdev, - PRD_ENTRIES * PRD_BYTES, - &hwif->dmatable_dma); + if (hwif->prd_max_nents == 0) + hwif->prd_max_nents = PRD_ENTRIES; + if (hwif->prd_ent_size == 0) + hwif->prd_ent_size = PRD_BYTES; - if (hwif->dmatable_cpu) - return 0; + prd_size = hwif->prd_max_nents * hwif->prd_ent_size; - printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", + hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size, + &hwif->dmatable_dma, + GFP_ATOMIC); + if (hwif->dmatable_cpu == NULL) { + printk(KERN_ERR "%s: unable to allocate PRD table\n", hwif->name); + return -ENOMEM; + } - return 1; + return 0; } EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); - -const struct ide_dma_ops sff_dma_ops = { - .dma_host_set = ide_dma_host_set, - .dma_setup = ide_dma_setup, - .dma_exec_cmd = ide_dma_exec_cmd, - .dma_start = ide_dma_start, - .dma_end = __ide_dma_end, - .dma_test_irq = ide_dma_test_irq, - .dma_timeout = ide_dma_timeout, - .dma_lost_irq = ide_dma_lost_irq, -}; -EXPORT_SYMBOL_GPL(sff_dma_ops); -#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ |