ide-dma.c 13.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 *  IDE DMA support (including IDE PCI BM-DMA).
 *
4 5 6
 *  Copyright (C) 1995-1998   Mark Lord
 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
7
 *
L
Linus Torvalds 已提交
8
 *  May be copied or modified under the terms of the GNU General Public License
9 10
 *
 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 */

/*
 *  Special Thanks to Mark for his Six years of work.
 */

/*
 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
 * fixing the problem with the BIOS on some Acer motherboards.
 *
 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
 *
 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
 * at generic DMA -- his patches were referred to when preparing this code.
 *
 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
 * for supplying a Promise UDMA board & WD UDMA drive for this work!
 */

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
35
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
36

B
Bartlomiej Zolnierkiewicz 已提交
37
static const struct drive_list_entry drive_whitelist[] = {
38 39 40 41
	{ "Micropolis 2112A"	,       NULL		},
	{ "CONNER CTMA 4000"	,       NULL		},
	{ "CONNER CTT8000-A"	,       NULL		},
	{ "ST34342A"		,	NULL		},
L
Linus Torvalds 已提交
42 43 44
	{ NULL			,	NULL		}
};

B
Bartlomiej Zolnierkiewicz 已提交
45
static const struct drive_list_entry drive_blacklist[] = {
46 47 48 49 50
	{ "WDC AC11000H"	,	NULL 		},
	{ "WDC AC22100H"	,	NULL 		},
	{ "WDC AC32500H"	,	NULL 		},
	{ "WDC AC33100H"	,	NULL 		},
	{ "WDC AC31600H"	,	NULL 		},
L
Linus Torvalds 已提交
51 52
	{ "WDC AC32100H"	,	"24.09P07"	},
	{ "WDC AC23200L"	,	"21.10N21"	},
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	{ "Compaq CRD-8241B"	,	NULL 		},
	{ "CRD-8400B"		,	NULL 		},
	{ "CRD-8480B",			NULL 		},
	{ "CRD-8482B",			NULL 		},
	{ "CRD-84"		,	NULL 		},
	{ "SanDisk SDP3B"	,	NULL 		},
	{ "SanDisk SDP3B-64"	,	NULL 		},
	{ "SANYO CD-ROM CRD"	,	NULL 		},
	{ "HITACHI CDR-8"	,	NULL 		},
	{ "HITACHI CDR-8335"	,	NULL 		},
	{ "HITACHI CDR-8435"	,	NULL 		},
	{ "Toshiba CD-ROM XM-6202B"	,	NULL 		},
	{ "TOSHIBA CD-ROM XM-1702BC",	NULL 		},
	{ "CD-532E-A"		,	NULL 		},
	{ "E-IDE CD-ROM CR-840",	NULL 		},
	{ "CD-ROM Drive/F5A",	NULL 		},
	{ "WPI CDD-820",		NULL 		},
	{ "SAMSUNG CD-ROM SC-148C",	NULL 		},
	{ "SAMSUNG CD-ROM SC",	NULL 		},
	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	NULL 		},
	{ "_NEC DV5800A",               NULL            },
74
	{ "SAMSUNG CD-ROM SN-124",	"N001" },
75
	{ "Seagate STT20000A",		NULL  },
76
	{ "CD-ROM CDR_U200",		"1.09" },
L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84
	{ NULL			,	NULL		}

};

/**
 *	ide_dma_intr	-	IDE DMA interrupt handler
 *	@drive: the drive the interrupt is for
 *
B
Bartlomiej Zolnierkiewicz 已提交
85
 *	Handle an interrupt completing a read/write DMA transfer on an
L
Linus Torvalds 已提交
86 87
 *	IDE device
 */
B
Bartlomiej Zolnierkiewicz 已提交
88 89

ide_startstop_t ide_dma_intr(ide_drive_t *drive)
L
Linus Torvalds 已提交
90
{
91
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
92 93
	u8 stat = 0, dma_stat = 0;

94
	dma_stat = hwif->dma_ops->dma_end(drive);
95
	stat = hwif->tp_ops->read_status(hwif);
96

97
	if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
L
Linus Torvalds 已提交
98
		if (!dma_stat) {
99
			struct ide_cmd *cmd = &hwif->cmd;
L
Linus Torvalds 已提交
100

101 102 103
			if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
				ide_finish_cmd(drive, cmd, stat);
			else
104 105
				ide_complete_rq(drive, 0,
						cmd->rq->nr_sectors << 9);
L
Linus Torvalds 已提交
106 107
			return ide_stopped;
		}
B
Bartlomiej Zolnierkiewicz 已提交
108 109
		printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
			drive->name, __func__, dma_stat);
L
Linus Torvalds 已提交
110 111 112 113
	}
	return ide_error(drive, "dma_intr", stat);
}

114
int ide_dma_good_drive(ide_drive_t *drive)
115 116 117 118
{
	return ide_in_drive_list(drive->id, drive_whitelist);
}

L
Linus Torvalds 已提交
119 120 121
/**
 *	ide_build_sglist	-	map IDE scatter gather for DMA I/O
 *	@drive: the drive to build the DMA table for
122
 *	@cmd: command
L
Linus Torvalds 已提交
123
 *
124 125
 *	Perform the DMA mapping magic necessary to access the source or
 *	target buffers of a request via DMA.  The lower layers of the
L
Linus Torvalds 已提交
126
 *	kernel provide the necessary cache management so that we can
127
 *	operate in a portable fashion.
L
Linus Torvalds 已提交
128 129
 */

130
int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd)
L
Linus Torvalds 已提交
131
{
B
Bartlomiej Zolnierkiewicz 已提交
132
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
133
	struct scatterlist *sg = hwif->sg_table;
134
	int i;
L
Linus Torvalds 已提交
135

136
	ide_map_sg(drive, cmd);
L
Linus Torvalds 已提交
137

138
	if (cmd->tf_flags & IDE_TFLAG_WRITE)
139
		cmd->sg_dma_direction = DMA_TO_DEVICE;
140 141
	else
		cmd->sg_dma_direction = DMA_FROM_DEVICE;
L
Linus Torvalds 已提交
142

143
	i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
144
	if (i == 0)
145
		ide_map_sg(drive, cmd);
146
	else {
147 148
		cmd->orig_sg_nents = cmd->sg_nents;
		cmd->sg_nents = i;
149 150 151
	}

	return i;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158 159 160 161 162 163
}

/**
 *	ide_destroy_dmatable	-	clean up DMA mapping
 *	@drive: The drive to unmap
 *
 *	Teardown mappings after DMA has completed. This must be called
 *	after the completion of each use of ide_build_dmatable and before
 *	the next use of ide_build_dmatable. Failure to do so will cause
 *	an oops as only one mapping can be live for each target at a given
 *	time.
 */
B
Bartlomiej Zolnierkiewicz 已提交
164 165

void ide_destroy_dmatable(ide_drive_t *drive)
L
Linus Torvalds 已提交
166
{
167
	ide_hwif_t *hwif = drive->hwif;
168
	struct ide_cmd *cmd = &hwif->cmd;
L
Linus Torvalds 已提交
169

170 171
	dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
		     cmd->sg_dma_direction);
L
Linus Torvalds 已提交
172 173 174 175
}
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);

/**
176
 *	ide_dma_off_quietly	-	Generic DMA kill
L
Linus Torvalds 已提交
177 178
 *	@drive: drive to control
 *
B
Bartlomiej Zolnierkiewicz 已提交
179
 *	Turn off the current DMA on this IDE controller.
L
Linus Torvalds 已提交
180 181
 */

182
void ide_dma_off_quietly(ide_drive_t *drive)
L
Linus Torvalds 已提交
183
{
B
Bartlomiej Zolnierkiewicz 已提交
184
	drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
L
Linus Torvalds 已提交
185 186
	ide_toggle_bounce(drive, 0);

187
	drive->hwif->dma_ops->dma_host_set(drive, 0);
L
Linus Torvalds 已提交
188
}
189
EXPORT_SYMBOL(ide_dma_off_quietly);
L
Linus Torvalds 已提交
190 191

/**
192
 *	ide_dma_off	-	disable DMA on a device
L
Linus Torvalds 已提交
193 194 195 196 197 198
 *	@drive: drive to disable DMA on
 *
 *	Disable IDE DMA for a device on this IDE controller.
 *	Inform the user that DMA has been disabled.
 */

199
void ide_dma_off(ide_drive_t *drive)
L
Linus Torvalds 已提交
200 201
{
	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
202
	ide_dma_off_quietly(drive);
L
Linus Torvalds 已提交
203
}
204
EXPORT_SYMBOL(ide_dma_off);
L
Linus Torvalds 已提交
205 206

/**
207
 *	ide_dma_on		-	Enable DMA on a device
L
Linus Torvalds 已提交
208 209 210 211
 *	@drive: drive to enable DMA on
 *
 *	Enable IDE DMA for a device on this IDE controller.
 */
212 213

void ide_dma_on(ide_drive_t *drive)
L
Linus Torvalds 已提交
214
{
B
Bartlomiej Zolnierkiewicz 已提交
215
	drive->dev_flags |= IDE_DFLAG_USING_DMA;
L
Linus Torvalds 已提交
216 217
	ide_toggle_bounce(drive, 1);

218
	drive->hwif->dma_ops->dma_host_set(drive, 1);
L
Linus Torvalds 已提交
219 220
}

B
Bartlomiej Zolnierkiewicz 已提交
221
int __ide_dma_bad_drive(ide_drive_t *drive)
L
Linus Torvalds 已提交
222
{
223
	u16 *id = drive->id;
L
Linus Torvalds 已提交
224

225
	int blacklist = ide_in_drive_list(id, drive_blacklist);
L
Linus Torvalds 已提交
226 227
	if (blacklist) {
		printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
228
				    drive->name, (char *)&id[ATA_ID_PROD]);
L
Linus Torvalds 已提交
229 230 231 232 233 234
		return blacklist;
	}
	return 0;
}
EXPORT_SYMBOL(__ide_dma_bad_drive);

235 236 237 238 239 240
static const u8 xfer_mode_bases[] = {
	XFER_UDMA_0,
	XFER_MW_DMA_0,
	XFER_SW_DMA_0,
};

241
static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
242
{
243
	u16 *id = drive->id;
244
	ide_hwif_t *hwif = drive->hwif;
245
	const struct ide_port_ops *port_ops = hwif->port_ops;
246 247
	unsigned int mask = 0;

B
Bartlomiej Zolnierkiewicz 已提交
248
	switch (base) {
249
	case XFER_UDMA_0:
250
		if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
251 252
			break;

253 254
		if (port_ops && port_ops->udma_filter)
			mask = port_ops->udma_filter(drive);
255 256
		else
			mask = hwif->ultra_mask;
257
		mask &= id[ATA_ID_UDMA_MODES];
258

259 260 261 262 263 264 265
		/*
		 * avoid false cable warning from eighty_ninty_three()
		 */
		if (req_mode > XFER_UDMA_2) {
			if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
				mask &= 0x07;
		}
266 267
		break;
	case XFER_MW_DMA_0:
268
		if ((id[ATA_ID_FIELD_VALID] & 2) == 0)
269
			break;
270 271
		if (port_ops && port_ops->mdma_filter)
			mask = port_ops->mdma_filter(drive);
272 273
		else
			mask = hwif->mwdma_mask;
274
		mask &= id[ATA_ID_MWDMA_MODES];
275 276
		break;
	case XFER_SW_DMA_0:
277 278
		if (id[ATA_ID_FIELD_VALID] & 2) {
			mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask;
279 280
		} else if (id[ATA_ID_OLD_DMA_MODES] >> 8) {
			u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
281 282 283 284 285 286 287 288

			/*
			 * if the mode is valid convert it to the mask
			 * (the maximum allowed mode is XFER_SW_DMA_2)
			 */
			if (mode <= 2)
				mask = ((2 << mode) - 1) & hwif->swdma_mask;
		}
289 290 291 292 293 294 295 296 297 298
		break;
	default:
		BUG();
		break;
	}

	return mask;
}

/**
299
 *	ide_find_dma_mode	-	compute DMA speed
300
 *	@drive: IDE device
301 302 303 304
 *	@req_mode: requested mode
 *
 *	Checks the drive/host capabilities and finds the speed to use for
 *	the DMA transfer.  The speed is then limited by the requested mode.
305
 *
306 307
 *	Returns 0 if the drive/host combination is incapable of DMA transfers
 *	or if the requested mode is not a DMA mode.
308 309
 */

310
u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
311 312 313 314 315 316
{
	ide_hwif_t *hwif = drive->hwif;
	unsigned int mask;
	int x, i;
	u8 mode = 0;

317 318 319 320
	if (drive->media != ide_disk) {
		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
			return 0;
	}
321 322

	for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
323 324 325
		if (req_mode < xfer_mode_bases[i])
			continue;
		mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
326 327 328 329 330 331 332
		x = fls(mask) - 1;
		if (x >= 0) {
			mode = xfer_mode_bases[i] + x;
			break;
		}
	}

333 334 335 336
	if (hwif->chipset == ide_acorn && mode == 0) {
		/*
		 * is this correct?
		 */
337 338
		if (ide_dma_good_drive(drive) &&
		    drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
339 340 341
			mode = XFER_MW_DMA_1;
	}

342 343 344
	mode = min(mode, req_mode);

	printk(KERN_INFO "%s: %s mode selected\n", drive->name,
345
			  mode ? ide_xfer_verbose(mode) : "no DMA");
346

347
	return mode;
348
}
349
EXPORT_SYMBOL_GPL(ide_find_dma_mode);
350

351
static int ide_tune_dma(ide_drive_t *drive)
352
{
353
	ide_hwif_t *hwif = drive->hwif;
354 355
	u8 speed;

B
Bartlomiej Zolnierkiewicz 已提交
356 357
	if (ata_id_has_dma(drive->id) == 0 ||
	    (drive->dev_flags & IDE_DFLAG_NODMA))
358 359 360 361
		return 0;

	/* consult the list of known "bad" drives */
	if (__ide_dma_bad_drive(drive))
362 363
		return 0;

364 365 366
	if (ide_id_dma_bug(drive))
		return 0;

367
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
368 369
		return config_drive_for_dma(drive);

370 371
	speed = ide_max_dma_mode(drive);

372 373
	if (!speed)
		return 0;
374

375
	if (ide_set_dma_mode(drive, speed))
376
		return 0;
377

378
	return 1;
379 380
}

381 382 383 384
static int ide_dma_check(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;

385
	if (ide_tune_dma(drive))
386 387 388 389 390 391 392 393
		return 0;

	/* TODO: always do PIO fallback */
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
		return -1;

	ide_set_max_pio(drive);

394
	return -1;
395 396
}

397
int ide_id_dma_bug(ide_drive_t *drive)
L
Linus Torvalds 已提交
398
{
399
	u16 *id = drive->id;
L
Linus Torvalds 已提交
400

401 402 403
	if (id[ATA_ID_FIELD_VALID] & 4) {
		if ((id[ATA_ID_UDMA_MODES] >> 8) &&
		    (id[ATA_ID_MWDMA_MODES] >> 8))
404
			goto err_out;
405 406 407
	} else if (id[ATA_ID_FIELD_VALID] & 2) {
		if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
		    (id[ATA_ID_SWDMA_MODES] >> 8))
408
			goto err_out;
L
Linus Torvalds 已提交
409
	}
410 411 412 413
	return 0;
err_out:
	printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
	return 1;
L
Linus Torvalds 已提交
414 415
}

416 417 418 419
int ide_set_dma(ide_drive_t *drive)
{
	int rc;

420 421 422 423 424 425
	/*
	 * Force DMAing for the beginning of the check.
	 * Some chipsets appear to do interesting
	 * things, if not checked and cleared.
	 *   PARANOIA!!!
	 */
426
	ide_dma_off_quietly(drive);
427

428 429 430
	rc = ide_dma_check(drive);
	if (rc)
		return rc;
431

432 433 434
	ide_dma_on(drive);

	return 0;
435 436
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
void ide_check_dma_crc(ide_drive_t *drive)
{
	u8 mode;

	ide_dma_off_quietly(drive);
	drive->crc_count = 0;
	mode = drive->current_speed;
	/*
	 * Don't try non Ultra-DMA modes without iCRC's.  Force the
	 * device to PIO and make the user enable SWDMA/MWDMA modes.
	 */
	if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
		mode--;
	else
		mode = XFER_PIO_4;
	ide_set_xfer_rate(drive, mode);
	if (drive->current_speed >= XFER_SW_DMA_0)
		ide_dma_on(drive);
}

457
void ide_dma_lost_irq(ide_drive_t *drive)
L
Linus Torvalds 已提交
458
{
459
	printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
L
Linus Torvalds 已提交
460
}
461
EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
L
Linus Torvalds 已提交
462

463
static void ide_dma_timeout(ide_drive_t *drive)
L
Linus Torvalds 已提交
464
{
B
Bartlomiej Zolnierkiewicz 已提交
465
	ide_hwif_t *hwif = drive->hwif;
466

L
Linus Torvalds 已提交
467 468
	printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);

469
	if (hwif->dma_ops->dma_test_irq(drive))
470 471
		return;

472 473
	ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));

474
	hwif->dma_ops->dma_end(drive);
L
Linus Torvalds 已提交
475 476
}

477 478 479 480 481 482 483 484
/*
 * un-busy the port etc, and clear any pending DMA status. we want to
 * retry the current request in pio mode instead of risking tossing it
 * all away
 */
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
{
	ide_hwif_t *hwif = drive->hwif;
485
	const struct ide_dma_ops *dma_ops = hwif->dma_ops;
486 487 488 489 490 491 492 493 494
	struct request *rq;
	ide_startstop_t ret = ide_stopped;

	/*
	 * end current dma transaction
	 */

	if (error < 0) {
		printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
495
		(void)dma_ops->dma_end(drive);
496 497 498 499
		ret = ide_error(drive, "dma timeout error",
				hwif->tp_ops->read_status(hwif));
	} else {
		printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
500 501 502
		if (dma_ops->dma_clear)
			dma_ops->dma_clear(drive);
		ide_dma_timeout(drive);
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
	}

	/*
	 * disable dma for now, but remember that we did so because of
	 * a timeout -- we'll reenable after we finish this next request
	 * (or rather the first chunk of it) in pio.
	 */
	drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
	drive->retry_pio++;
	ide_dma_off_quietly(drive);

	/*
	 * un-busy drive etc and make sure request is sane
	 */

	rq = hwif->rq;
	if (!rq)
		goto out;

	hwif->rq = NULL;

	rq->errors = 0;

	if (!rq->bio)
		goto out;

	rq->sector = rq->bio->bi_sector;
	rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->buffer = bio_data(rq->bio);
out:
	return ret;
}

537
void ide_release_dma_engine(ide_hwif_t *hwif)
L
Linus Torvalds 已提交
538 539
{
	if (hwif->dmatable_cpu) {
540
		int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
541

542 543
		dma_free_coherent(hwif->dev, prd_size,
				  hwif->dmatable_cpu, hwif->dmatable_dma);
L
Linus Torvalds 已提交
544 545 546
		hwif->dmatable_cpu = NULL;
	}
}
547
EXPORT_SYMBOL_GPL(ide_release_dma_engine);
L
Linus Torvalds 已提交
548

549
int ide_allocate_dma_engine(ide_hwif_t *hwif)
L
Linus Torvalds 已提交
550
{
551
	int prd_size;
552

553 554 555 556
	if (hwif->prd_max_nents == 0)
		hwif->prd_max_nents = PRD_ENTRIES;
	if (hwif->prd_ent_size == 0)
		hwif->prd_ent_size = PRD_BYTES;
L
Linus Torvalds 已提交
557

558
	prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
L
Linus Torvalds 已提交
559

560 561 562 563 564
	hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
						&hwif->dmatable_dma,
						GFP_ATOMIC);
	if (hwif->dmatable_cpu == NULL) {
		printk(KERN_ERR "%s: unable to allocate PRD table\n",
565
			hwif->name);
566 567
		return -ENOMEM;
	}
L
Linus Torvalds 已提交
568

569
	return 0;
L
Linus Torvalds 已提交
570
}
571
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);