ide-dma.c 13.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 *  IDE DMA support (including IDE PCI BM-DMA).
 *
4 5 6
 *  Copyright (C) 1995-1998   Mark Lord
 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
7
 *
L
Linus Torvalds 已提交
8
 *  May be copied or modified under the terms of the GNU General Public License
9 10
 *
 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 */

/*
 *  Special Thanks to Mark for his Six years of work.
 */

/*
 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
 * fixing the problem with the BIOS on some Acer motherboards.
 *
 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
 *
 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
 * at generic DMA -- his patches were referred to when preparing this code.
 *
 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
 * for supplying a Promise UDMA board & WD UDMA drive for this work!
 */

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
35
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
36

B
Bartlomiej Zolnierkiewicz 已提交
37
static const struct drive_list_entry drive_whitelist[] = {
38 39 40 41
	{ "Micropolis 2112A"	,       NULL		},
	{ "CONNER CTMA 4000"	,       NULL		},
	{ "CONNER CTT8000-A"	,       NULL		},
	{ "ST34342A"		,	NULL		},
L
Linus Torvalds 已提交
42 43 44
	{ NULL			,	NULL		}
};

B
Bartlomiej Zolnierkiewicz 已提交
45
static const struct drive_list_entry drive_blacklist[] = {
46 47 48 49 50
	{ "WDC AC11000H"	,	NULL 		},
	{ "WDC AC22100H"	,	NULL 		},
	{ "WDC AC32500H"	,	NULL 		},
	{ "WDC AC33100H"	,	NULL 		},
	{ "WDC AC31600H"	,	NULL 		},
L
Linus Torvalds 已提交
51 52
	{ "WDC AC32100H"	,	"24.09P07"	},
	{ "WDC AC23200L"	,	"21.10N21"	},
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	{ "Compaq CRD-8241B"	,	NULL 		},
	{ "CRD-8400B"		,	NULL 		},
	{ "CRD-8480B",			NULL 		},
	{ "CRD-8482B",			NULL 		},
	{ "CRD-84"		,	NULL 		},
	{ "SanDisk SDP3B"	,	NULL 		},
	{ "SanDisk SDP3B-64"	,	NULL 		},
	{ "SANYO CD-ROM CRD"	,	NULL 		},
	{ "HITACHI CDR-8"	,	NULL 		},
	{ "HITACHI CDR-8335"	,	NULL 		},
	{ "HITACHI CDR-8435"	,	NULL 		},
	{ "Toshiba CD-ROM XM-6202B"	,	NULL 		},
	{ "TOSHIBA CD-ROM XM-1702BC",	NULL 		},
	{ "CD-532E-A"		,	NULL 		},
	{ "E-IDE CD-ROM CR-840",	NULL 		},
	{ "CD-ROM Drive/F5A",	NULL 		},
	{ "WPI CDD-820",		NULL 		},
	{ "SAMSUNG CD-ROM SC-148C",	NULL 		},
	{ "SAMSUNG CD-ROM SC",	NULL 		},
	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	NULL 		},
	{ "_NEC DV5800A",               NULL            },
74
	{ "SAMSUNG CD-ROM SN-124",	"N001" },
75
	{ "Seagate STT20000A",		NULL  },
76
	{ "CD-ROM CDR_U200",		"1.09" },
L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84
	{ NULL			,	NULL		}

};

/**
 *	ide_dma_intr	-	IDE DMA interrupt handler
 *	@drive: the drive the interrupt is for
 *
B
Bartlomiej Zolnierkiewicz 已提交
85
 *	Handle an interrupt completing a read/write DMA transfer on an
L
Linus Torvalds 已提交
86 87
 *	IDE device
 */
B
Bartlomiej Zolnierkiewicz 已提交
88 89

ide_startstop_t ide_dma_intr(ide_drive_t *drive)
L
Linus Torvalds 已提交
90
{
91
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
92 93
	u8 stat = 0, dma_stat = 0;

94
	dma_stat = hwif->dma_ops->dma_end(drive);
95
	ide_destroy_dmatable(drive);
96
	stat = hwif->tp_ops->read_status(hwif);
97

98
	if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
L
Linus Torvalds 已提交
99
		if (!dma_stat) {
100
			struct ide_cmd *cmd = &hwif->cmd;
L
Linus Torvalds 已提交
101

102 103 104
			if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
				ide_finish_cmd(drive, cmd, stat);
			else
105 106
				ide_complete_rq(drive, 0,
						cmd->rq->nr_sectors << 9);
L
Linus Torvalds 已提交
107 108
			return ide_stopped;
		}
B
Bartlomiej Zolnierkiewicz 已提交
109 110
		printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
			drive->name, __func__, dma_stat);
L
Linus Torvalds 已提交
111 112 113 114
	}
	return ide_error(drive, "dma_intr", stat);
}

115
int ide_dma_good_drive(ide_drive_t *drive)
116 117 118 119
{
	return ide_in_drive_list(drive->id, drive_whitelist);
}

L
Linus Torvalds 已提交
120 121 122
/**
 *	ide_build_sglist	-	map IDE scatter gather for DMA I/O
 *	@drive: the drive to build the DMA table for
123
 *	@cmd: command
L
Linus Torvalds 已提交
124
 *
125 126
 *	Perform the DMA mapping magic necessary to access the source or
 *	target buffers of a request via DMA.  The lower layers of the
L
Linus Torvalds 已提交
127
 *	kernel provide the necessary cache management so that we can
128
 *	operate in a portable fashion.
L
Linus Torvalds 已提交
129 130
 */

131
static int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd)
L
Linus Torvalds 已提交
132
{
B
Bartlomiej Zolnierkiewicz 已提交
133
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
134
	struct scatterlist *sg = hwif->sg_table;
135
	int i;
L
Linus Torvalds 已提交
136

137
	ide_map_sg(drive, cmd);
L
Linus Torvalds 已提交
138

139
	if (cmd->tf_flags & IDE_TFLAG_WRITE)
140
		cmd->sg_dma_direction = DMA_TO_DEVICE;
141 142
	else
		cmd->sg_dma_direction = DMA_FROM_DEVICE;
L
Linus Torvalds 已提交
143

144
	i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
145
	if (i == 0)
146
		ide_map_sg(drive, cmd);
147
	else {
148 149
		cmd->orig_sg_nents = cmd->sg_nents;
		cmd->sg_nents = i;
150 151 152
	}

	return i;
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160 161 162 163 164
}

/**
 *	ide_destroy_dmatable	-	clean up DMA mapping
 *	@drive: The drive to unmap
 *
 *	Teardown mappings after DMA has completed. This must be called
 *	after the completion of each use of ide_build_dmatable and before
 *	the next use of ide_build_dmatable. Failure to do so will cause
 *	an oops as only one mapping can be live for each target at a given
 *	time.
 */
B
Bartlomiej Zolnierkiewicz 已提交
165 166

void ide_destroy_dmatable(ide_drive_t *drive)
L
Linus Torvalds 已提交
167
{
168
	ide_hwif_t *hwif = drive->hwif;
169
	struct ide_cmd *cmd = &hwif->cmd;
L
Linus Torvalds 已提交
170

171 172
	dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
		     cmd->sg_dma_direction);
L
Linus Torvalds 已提交
173 174 175 176
}
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);

/**
177
 *	ide_dma_off_quietly	-	Generic DMA kill
L
Linus Torvalds 已提交
178 179
 *	@drive: drive to control
 *
B
Bartlomiej Zolnierkiewicz 已提交
180
 *	Turn off the current DMA on this IDE controller.
L
Linus Torvalds 已提交
181 182
 */

183
void ide_dma_off_quietly(ide_drive_t *drive)
L
Linus Torvalds 已提交
184
{
B
Bartlomiej Zolnierkiewicz 已提交
185
	drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
L
Linus Torvalds 已提交
186 187
	ide_toggle_bounce(drive, 0);

188
	drive->hwif->dma_ops->dma_host_set(drive, 0);
L
Linus Torvalds 已提交
189
}
190
EXPORT_SYMBOL(ide_dma_off_quietly);
L
Linus Torvalds 已提交
191 192

/**
193
 *	ide_dma_off	-	disable DMA on a device
L
Linus Torvalds 已提交
194 195 196 197 198 199
 *	@drive: drive to disable DMA on
 *
 *	Disable IDE DMA for a device on this IDE controller.
 *	Inform the user that DMA has been disabled.
 */

200
void ide_dma_off(ide_drive_t *drive)
L
Linus Torvalds 已提交
201 202
{
	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
203
	ide_dma_off_quietly(drive);
L
Linus Torvalds 已提交
204
}
205
EXPORT_SYMBOL(ide_dma_off);
L
Linus Torvalds 已提交
206 207

/**
208
 *	ide_dma_on		-	Enable DMA on a device
L
Linus Torvalds 已提交
209 210 211 212
 *	@drive: drive to enable DMA on
 *
 *	Enable IDE DMA for a device on this IDE controller.
 */
213 214

void ide_dma_on(ide_drive_t *drive)
L
Linus Torvalds 已提交
215
{
B
Bartlomiej Zolnierkiewicz 已提交
216
	drive->dev_flags |= IDE_DFLAG_USING_DMA;
L
Linus Torvalds 已提交
217 218
	ide_toggle_bounce(drive, 1);

219
	drive->hwif->dma_ops->dma_host_set(drive, 1);
L
Linus Torvalds 已提交
220 221
}

B
Bartlomiej Zolnierkiewicz 已提交
222
int __ide_dma_bad_drive(ide_drive_t *drive)
L
Linus Torvalds 已提交
223
{
224
	u16 *id = drive->id;
L
Linus Torvalds 已提交
225

226
	int blacklist = ide_in_drive_list(id, drive_blacklist);
L
Linus Torvalds 已提交
227 228
	if (blacklist) {
		printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
229
				    drive->name, (char *)&id[ATA_ID_PROD]);
L
Linus Torvalds 已提交
230 231 232 233 234 235
		return blacklist;
	}
	return 0;
}
EXPORT_SYMBOL(__ide_dma_bad_drive);

236 237 238 239 240 241
static const u8 xfer_mode_bases[] = {
	XFER_UDMA_0,
	XFER_MW_DMA_0,
	XFER_SW_DMA_0,
};

242
static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
243
{
244
	u16 *id = drive->id;
245
	ide_hwif_t *hwif = drive->hwif;
246
	const struct ide_port_ops *port_ops = hwif->port_ops;
247 248
	unsigned int mask = 0;

B
Bartlomiej Zolnierkiewicz 已提交
249
	switch (base) {
250
	case XFER_UDMA_0:
251
		if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
252 253
			break;

254 255
		if (port_ops && port_ops->udma_filter)
			mask = port_ops->udma_filter(drive);
256 257
		else
			mask = hwif->ultra_mask;
258
		mask &= id[ATA_ID_UDMA_MODES];
259

260 261 262 263 264 265 266
		/*
		 * avoid false cable warning from eighty_ninty_three()
		 */
		if (req_mode > XFER_UDMA_2) {
			if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
				mask &= 0x07;
		}
267 268
		break;
	case XFER_MW_DMA_0:
269
		if ((id[ATA_ID_FIELD_VALID] & 2) == 0)
270
			break;
271 272
		if (port_ops && port_ops->mdma_filter)
			mask = port_ops->mdma_filter(drive);
273 274
		else
			mask = hwif->mwdma_mask;
275
		mask &= id[ATA_ID_MWDMA_MODES];
276 277
		break;
	case XFER_SW_DMA_0:
278 279
		if (id[ATA_ID_FIELD_VALID] & 2) {
			mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask;
280 281
		} else if (id[ATA_ID_OLD_DMA_MODES] >> 8) {
			u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
282 283 284 285 286 287 288 289

			/*
			 * if the mode is valid convert it to the mask
			 * (the maximum allowed mode is XFER_SW_DMA_2)
			 */
			if (mode <= 2)
				mask = ((2 << mode) - 1) & hwif->swdma_mask;
		}
290 291 292 293 294 295 296 297 298 299
		break;
	default:
		BUG();
		break;
	}

	return mask;
}

/**
300
 *	ide_find_dma_mode	-	compute DMA speed
301
 *	@drive: IDE device
302 303 304 305
 *	@req_mode: requested mode
 *
 *	Checks the drive/host capabilities and finds the speed to use for
 *	the DMA transfer.  The speed is then limited by the requested mode.
306
 *
307 308
 *	Returns 0 if the drive/host combination is incapable of DMA transfers
 *	or if the requested mode is not a DMA mode.
309 310
 */

311
u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
312 313 314 315 316 317
{
	ide_hwif_t *hwif = drive->hwif;
	unsigned int mask;
	int x, i;
	u8 mode = 0;

318 319 320 321
	if (drive->media != ide_disk) {
		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
			return 0;
	}
322 323

	for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
324 325 326
		if (req_mode < xfer_mode_bases[i])
			continue;
		mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
327 328 329 330 331 332 333
		x = fls(mask) - 1;
		if (x >= 0) {
			mode = xfer_mode_bases[i] + x;
			break;
		}
	}

334 335 336 337
	if (hwif->chipset == ide_acorn && mode == 0) {
		/*
		 * is this correct?
		 */
338 339
		if (ide_dma_good_drive(drive) &&
		    drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
340 341 342
			mode = XFER_MW_DMA_1;
	}

343 344 345
	mode = min(mode, req_mode);

	printk(KERN_INFO "%s: %s mode selected\n", drive->name,
346
			  mode ? ide_xfer_verbose(mode) : "no DMA");
347

348
	return mode;
349
}
350
EXPORT_SYMBOL_GPL(ide_find_dma_mode);
351

352
static int ide_tune_dma(ide_drive_t *drive)
353
{
354
	ide_hwif_t *hwif = drive->hwif;
355 356
	u8 speed;

B
Bartlomiej Zolnierkiewicz 已提交
357 358
	if (ata_id_has_dma(drive->id) == 0 ||
	    (drive->dev_flags & IDE_DFLAG_NODMA))
359 360 361 362
		return 0;

	/* consult the list of known "bad" drives */
	if (__ide_dma_bad_drive(drive))
363 364
		return 0;

365 366 367
	if (ide_id_dma_bug(drive))
		return 0;

368
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
369 370
		return config_drive_for_dma(drive);

371 372
	speed = ide_max_dma_mode(drive);

373 374
	if (!speed)
		return 0;
375

376
	if (ide_set_dma_mode(drive, speed))
377
		return 0;
378

379
	return 1;
380 381
}

382 383 384 385
static int ide_dma_check(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;

386
	if (ide_tune_dma(drive))
387 388 389 390 391 392 393 394
		return 0;

	/* TODO: always do PIO fallback */
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
		return -1;

	ide_set_max_pio(drive);

395
	return -1;
396 397
}

398
int ide_id_dma_bug(ide_drive_t *drive)
L
Linus Torvalds 已提交
399
{
400
	u16 *id = drive->id;
L
Linus Torvalds 已提交
401

402 403 404
	if (id[ATA_ID_FIELD_VALID] & 4) {
		if ((id[ATA_ID_UDMA_MODES] >> 8) &&
		    (id[ATA_ID_MWDMA_MODES] >> 8))
405
			goto err_out;
406 407 408
	} else if (id[ATA_ID_FIELD_VALID] & 2) {
		if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
		    (id[ATA_ID_SWDMA_MODES] >> 8))
409
			goto err_out;
L
Linus Torvalds 已提交
410
	}
411 412 413 414
	return 0;
err_out:
	printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
	return 1;
L
Linus Torvalds 已提交
415 416
}

417 418 419 420
int ide_set_dma(ide_drive_t *drive)
{
	int rc;

421 422 423 424 425 426
	/*
	 * Force DMAing for the beginning of the check.
	 * Some chipsets appear to do interesting
	 * things, if not checked and cleared.
	 *   PARANOIA!!!
	 */
427
	ide_dma_off_quietly(drive);
428

429 430 431
	rc = ide_dma_check(drive);
	if (rc)
		return rc;
432

433 434 435
	ide_dma_on(drive);

	return 0;
436 437
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
void ide_check_dma_crc(ide_drive_t *drive)
{
	u8 mode;

	ide_dma_off_quietly(drive);
	drive->crc_count = 0;
	mode = drive->current_speed;
	/*
	 * Don't try non Ultra-DMA modes without iCRC's.  Force the
	 * device to PIO and make the user enable SWDMA/MWDMA modes.
	 */
	if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
		mode--;
	else
		mode = XFER_PIO_4;
	ide_set_xfer_rate(drive, mode);
	if (drive->current_speed >= XFER_SW_DMA_0)
		ide_dma_on(drive);
}

458
void ide_dma_lost_irq(ide_drive_t *drive)
L
Linus Torvalds 已提交
459
{
460
	printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
L
Linus Torvalds 已提交
461
}
462
EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
L
Linus Torvalds 已提交
463

464 465 466 467 468 469 470 471
/*
 * un-busy the port etc, and clear any pending DMA status. we want to
 * retry the current request in pio mode instead of risking tossing it
 * all away
 */
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
{
	ide_hwif_t *hwif = drive->hwif;
472
	const struct ide_dma_ops *dma_ops = hwif->dma_ops;
473 474 475 476 477 478 479 480 481
	struct request *rq;
	ide_startstop_t ret = ide_stopped;

	/*
	 * end current dma transaction
	 */

	if (error < 0) {
		printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
482
		(void)dma_ops->dma_end(drive);
483
		ide_destroy_dmatable(drive);
484 485 486 487
		ret = ide_error(drive, "dma timeout error",
				hwif->tp_ops->read_status(hwif));
	} else {
		printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
488 489
		if (dma_ops->dma_clear)
			dma_ops->dma_clear(drive);
490 491 492 493 494
		printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
		if (dma_ops->dma_test_irq(drive) == 0) {
			ide_dump_status(drive, "DMA timeout",
					hwif->tp_ops->read_status(hwif));
			(void)dma_ops->dma_end(drive);
495
			ide_destroy_dmatable(drive);
496
		}
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	}

	/*
	 * disable dma for now, but remember that we did so because of
	 * a timeout -- we'll reenable after we finish this next request
	 * (or rather the first chunk of it) in pio.
	 */
	drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
	drive->retry_pio++;
	ide_dma_off_quietly(drive);

	/*
	 * un-busy drive etc and make sure request is sane
	 */

	rq = hwif->rq;
	if (!rq)
		goto out;

	hwif->rq = NULL;

	rq->errors = 0;

	if (!rq->bio)
		goto out;

	rq->sector = rq->bio->bi_sector;
	rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->buffer = bio_data(rq->bio);
out:
	return ret;
}

531
void ide_release_dma_engine(ide_hwif_t *hwif)
L
Linus Torvalds 已提交
532 533
{
	if (hwif->dmatable_cpu) {
534
		int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
535

536 537
		dma_free_coherent(hwif->dev, prd_size,
				  hwif->dmatable_cpu, hwif->dmatable_dma);
L
Linus Torvalds 已提交
538 539 540
		hwif->dmatable_cpu = NULL;
	}
}
541
EXPORT_SYMBOL_GPL(ide_release_dma_engine);
L
Linus Torvalds 已提交
542

543
int ide_allocate_dma_engine(ide_hwif_t *hwif)
L
Linus Torvalds 已提交
544
{
545
	int prd_size;
546

547 548 549 550
	if (hwif->prd_max_nents == 0)
		hwif->prd_max_nents = PRD_ENTRIES;
	if (hwif->prd_ent_size == 0)
		hwif->prd_ent_size = PRD_BYTES;
L
Linus Torvalds 已提交
551

552
	prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
L
Linus Torvalds 已提交
553

554 555 556 557 558
	hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
						&hwif->dmatable_dma,
						GFP_ATOMIC);
	if (hwif->dmatable_cpu == NULL) {
		printk(KERN_ERR "%s: unable to allocate PRD table\n",
559
			hwif->name);
560 561
		return -ENOMEM;
	}
L
Linus Torvalds 已提交
562

563
	return 0;
L
Linus Torvalds 已提交
564
}
565
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
566 567 568

int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
{
569 570
	const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;

571
	if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
572
	    (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)) ||
573
	    ide_build_sglist(drive, cmd) == 0)
574
		return 1;
575 576 577 578 579
	if (dma_ops->dma_setup(drive, cmd)) {
		ide_destroy_dmatable(drive);
		ide_map_sg(drive, cmd);
		return 1;
	}
580 581
	return 0;
}