ide-dma.c 13.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 *  IDE DMA support (including IDE PCI BM-DMA).
 *
4 5 6
 *  Copyright (C) 1995-1998   Mark Lord
 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
7
 *
L
Linus Torvalds 已提交
8
 *  May be copied or modified under the terms of the GNU General Public License
9 10
 *
 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 */

/*
 *  Special Thanks to Mark for his Six years of work.
 */

/*
 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
 * fixing the problem with the BIOS on some Acer motherboards.
 *
 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
 *
 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
 * at generic DMA -- his patches were referred to when preparing this code.
 *
 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
 * for supplying a Promise UDMA board & WD UDMA drive for this work!
 */

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
35
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
36

B
Bartlomiej Zolnierkiewicz 已提交
37
static const struct drive_list_entry drive_whitelist[] = {
38 39 40 41
	{ "Micropolis 2112A"	,       NULL		},
	{ "CONNER CTMA 4000"	,       NULL		},
	{ "CONNER CTT8000-A"	,       NULL		},
	{ "ST34342A"		,	NULL		},
L
Linus Torvalds 已提交
42 43 44
	{ NULL			,	NULL		}
};

B
Bartlomiej Zolnierkiewicz 已提交
45
static const struct drive_list_entry drive_blacklist[] = {
46 47 48 49 50
	{ "WDC AC11000H"	,	NULL 		},
	{ "WDC AC22100H"	,	NULL 		},
	{ "WDC AC32500H"	,	NULL 		},
	{ "WDC AC33100H"	,	NULL 		},
	{ "WDC AC31600H"	,	NULL 		},
L
Linus Torvalds 已提交
51 52
	{ "WDC AC32100H"	,	"24.09P07"	},
	{ "WDC AC23200L"	,	"21.10N21"	},
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	{ "Compaq CRD-8241B"	,	NULL 		},
	{ "CRD-8400B"		,	NULL 		},
	{ "CRD-8480B",			NULL 		},
	{ "CRD-8482B",			NULL 		},
	{ "CRD-84"		,	NULL 		},
	{ "SanDisk SDP3B"	,	NULL 		},
	{ "SanDisk SDP3B-64"	,	NULL 		},
	{ "SANYO CD-ROM CRD"	,	NULL 		},
	{ "HITACHI CDR-8"	,	NULL 		},
	{ "HITACHI CDR-8335"	,	NULL 		},
	{ "HITACHI CDR-8435"	,	NULL 		},
	{ "Toshiba CD-ROM XM-6202B"	,	NULL 		},
	{ "TOSHIBA CD-ROM XM-1702BC",	NULL 		},
	{ "CD-532E-A"		,	NULL 		},
	{ "E-IDE CD-ROM CR-840",	NULL 		},
	{ "CD-ROM Drive/F5A",	NULL 		},
	{ "WPI CDD-820",		NULL 		},
	{ "SAMSUNG CD-ROM SC-148C",	NULL 		},
	{ "SAMSUNG CD-ROM SC",	NULL 		},
	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	NULL 		},
	{ "_NEC DV5800A",               NULL            },
74
	{ "SAMSUNG CD-ROM SN-124",	"N001" },
75
	{ "Seagate STT20000A",		NULL  },
76
	{ "CD-ROM CDR_U200",		"1.09" },
L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84
	{ NULL			,	NULL		}

};

/**
 *	ide_dma_intr	-	IDE DMA interrupt handler
 *	@drive: the drive the interrupt is for
 *
B
Bartlomiej Zolnierkiewicz 已提交
85
 *	Handle an interrupt completing a read/write DMA transfer on an
L
Linus Torvalds 已提交
86 87
 *	IDE device
 */
B
Bartlomiej Zolnierkiewicz 已提交
88 89

ide_startstop_t ide_dma_intr(ide_drive_t *drive)
L
Linus Torvalds 已提交
90
{
91
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
92 93
	u8 stat = 0, dma_stat = 0;

94
	dma_stat = hwif->dma_ops->dma_end(drive);
95
	stat = hwif->tp_ops->read_status(hwif);
96

97
	if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
L
Linus Torvalds 已提交
98
		if (!dma_stat) {
99
			struct request *rq = hwif->rq;
L
Linus Torvalds 已提交
100

T
Tejun Heo 已提交
101
			task_end_request(drive, rq, stat);
L
Linus Torvalds 已提交
102 103
			return ide_stopped;
		}
B
Bartlomiej Zolnierkiewicz 已提交
104 105
		printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
			drive->name, __func__, dma_stat);
L
Linus Torvalds 已提交
106 107 108 109 110
	}
	return ide_error(drive, "dma_intr", stat);
}
EXPORT_SYMBOL_GPL(ide_dma_intr);

111
int ide_dma_good_drive(ide_drive_t *drive)
112 113 114 115
{
	return ide_in_drive_list(drive->id, drive_whitelist);
}

L
Linus Torvalds 已提交
116 117 118 119 120
/**
 *	ide_build_sglist	-	map IDE scatter gather for DMA I/O
 *	@drive: the drive to build the DMA table for
 *	@rq: the request holding the sg list
 *
121 122
 *	Perform the DMA mapping magic necessary to access the source or
 *	target buffers of a request via DMA.  The lower layers of the
L
Linus Torvalds 已提交
123
 *	kernel provide the necessary cache management so that we can
124
 *	operate in a portable fashion.
L
Linus Torvalds 已提交
125 126 127 128
 */

int ide_build_sglist(ide_drive_t *drive, struct request *rq)
{
B
Bartlomiej Zolnierkiewicz 已提交
129
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
130
	struct scatterlist *sg = hwif->sg_table;
131
	int i;
L
Linus Torvalds 已提交
132 133 134 135

	ide_map_sg(drive, rq);

	if (rq_data_dir(rq) == READ)
136
		hwif->sg_dma_direction = DMA_FROM_DEVICE;
L
Linus Torvalds 已提交
137
	else
138
		hwif->sg_dma_direction = DMA_TO_DEVICE;
L
Linus Torvalds 已提交
139

140 141 142 143 144 145 146
	i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
	if (i) {
		hwif->orig_sg_nents = hwif->sg_nents;
		hwif->sg_nents = i;
	}

	return i;
L
Linus Torvalds 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159
}
EXPORT_SYMBOL_GPL(ide_build_sglist);

/**
 *	ide_destroy_dmatable	-	clean up DMA mapping
 *	@drive: The drive to unmap
 *
 *	Teardown mappings after DMA has completed. This must be called
 *	after the completion of each use of ide_build_dmatable and before
 *	the next use of ide_build_dmatable. Failure to do so will cause
 *	an oops as only one mapping can be live for each target at a given
 *	time.
 */
B
Bartlomiej Zolnierkiewicz 已提交
160 161

void ide_destroy_dmatable(ide_drive_t *drive)
L
Linus Torvalds 已提交
162
{
163
	ide_hwif_t *hwif = drive->hwif;
L
Linus Torvalds 已提交
164

165
	dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->orig_sg_nents,
166
		     hwif->sg_dma_direction);
L
Linus Torvalds 已提交
167 168 169 170
}
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);

/**
171
 *	ide_dma_off_quietly	-	Generic DMA kill
L
Linus Torvalds 已提交
172 173
 *	@drive: drive to control
 *
B
Bartlomiej Zolnierkiewicz 已提交
174
 *	Turn off the current DMA on this IDE controller.
L
Linus Torvalds 已提交
175 176
 */

177
void ide_dma_off_quietly(ide_drive_t *drive)
L
Linus Torvalds 已提交
178
{
B
Bartlomiej Zolnierkiewicz 已提交
179
	drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
L
Linus Torvalds 已提交
180 181
	ide_toggle_bounce(drive, 0);

182
	drive->hwif->dma_ops->dma_host_set(drive, 0);
L
Linus Torvalds 已提交
183
}
184
EXPORT_SYMBOL(ide_dma_off_quietly);
L
Linus Torvalds 已提交
185 186

/**
187
 *	ide_dma_off	-	disable DMA on a device
L
Linus Torvalds 已提交
188 189 190 191 192 193
 *	@drive: drive to disable DMA on
 *
 *	Disable IDE DMA for a device on this IDE controller.
 *	Inform the user that DMA has been disabled.
 */

194
void ide_dma_off(ide_drive_t *drive)
L
Linus Torvalds 已提交
195 196
{
	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
197
	ide_dma_off_quietly(drive);
L
Linus Torvalds 已提交
198
}
199
EXPORT_SYMBOL(ide_dma_off);
L
Linus Torvalds 已提交
200 201

/**
202
 *	ide_dma_on		-	Enable DMA on a device
L
Linus Torvalds 已提交
203 204 205 206
 *	@drive: drive to enable DMA on
 *
 *	Enable IDE DMA for a device on this IDE controller.
 */
207 208

void ide_dma_on(ide_drive_t *drive)
L
Linus Torvalds 已提交
209
{
B
Bartlomiej Zolnierkiewicz 已提交
210
	drive->dev_flags |= IDE_DFLAG_USING_DMA;
L
Linus Torvalds 已提交
211 212
	ide_toggle_bounce(drive, 1);

213
	drive->hwif->dma_ops->dma_host_set(drive, 1);
L
Linus Torvalds 已提交
214 215
}

B
Bartlomiej Zolnierkiewicz 已提交
216
int __ide_dma_bad_drive(ide_drive_t *drive)
L
Linus Torvalds 已提交
217
{
218
	u16 *id = drive->id;
L
Linus Torvalds 已提交
219

220
	int blacklist = ide_in_drive_list(id, drive_blacklist);
L
Linus Torvalds 已提交
221 222
	if (blacklist) {
		printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
223
				    drive->name, (char *)&id[ATA_ID_PROD]);
L
Linus Torvalds 已提交
224 225 226 227 228 229
		return blacklist;
	}
	return 0;
}
EXPORT_SYMBOL(__ide_dma_bad_drive);

230 231 232 233 234 235
static const u8 xfer_mode_bases[] = {
	XFER_UDMA_0,
	XFER_MW_DMA_0,
	XFER_SW_DMA_0,
};

236
static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
237
{
238
	u16 *id = drive->id;
239
	ide_hwif_t *hwif = drive->hwif;
240
	const struct ide_port_ops *port_ops = hwif->port_ops;
241 242
	unsigned int mask = 0;

B
Bartlomiej Zolnierkiewicz 已提交
243
	switch (base) {
244
	case XFER_UDMA_0:
245
		if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
246 247
			break;

248 249
		if (port_ops && port_ops->udma_filter)
			mask = port_ops->udma_filter(drive);
250 251
		else
			mask = hwif->ultra_mask;
252
		mask &= id[ATA_ID_UDMA_MODES];
253

254 255 256 257 258 259 260
		/*
		 * avoid false cable warning from eighty_ninty_three()
		 */
		if (req_mode > XFER_UDMA_2) {
			if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
				mask &= 0x07;
		}
261 262
		break;
	case XFER_MW_DMA_0:
263
		if ((id[ATA_ID_FIELD_VALID] & 2) == 0)
264
			break;
265 266
		if (port_ops && port_ops->mdma_filter)
			mask = port_ops->mdma_filter(drive);
267 268
		else
			mask = hwif->mwdma_mask;
269
		mask &= id[ATA_ID_MWDMA_MODES];
270 271
		break;
	case XFER_SW_DMA_0:
272 273
		if (id[ATA_ID_FIELD_VALID] & 2) {
			mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask;
274 275
		} else if (id[ATA_ID_OLD_DMA_MODES] >> 8) {
			u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
276 277 278 279 280 281 282 283

			/*
			 * if the mode is valid convert it to the mask
			 * (the maximum allowed mode is XFER_SW_DMA_2)
			 */
			if (mode <= 2)
				mask = ((2 << mode) - 1) & hwif->swdma_mask;
		}
284 285 286 287 288 289 290 291 292 293
		break;
	default:
		BUG();
		break;
	}

	return mask;
}

/**
294
 *	ide_find_dma_mode	-	compute DMA speed
295
 *	@drive: IDE device
296 297 298 299
 *	@req_mode: requested mode
 *
 *	Checks the drive/host capabilities and finds the speed to use for
 *	the DMA transfer.  The speed is then limited by the requested mode.
300
 *
301 302
 *	Returns 0 if the drive/host combination is incapable of DMA transfers
 *	or if the requested mode is not a DMA mode.
303 304
 */

305
u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
306 307 308 309 310 311
{
	ide_hwif_t *hwif = drive->hwif;
	unsigned int mask;
	int x, i;
	u8 mode = 0;

312 313 314 315
	if (drive->media != ide_disk) {
		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
			return 0;
	}
316 317

	for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
318 319 320
		if (req_mode < xfer_mode_bases[i])
			continue;
		mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
321 322 323 324 325 326 327
		x = fls(mask) - 1;
		if (x >= 0) {
			mode = xfer_mode_bases[i] + x;
			break;
		}
	}

328 329 330 331
	if (hwif->chipset == ide_acorn && mode == 0) {
		/*
		 * is this correct?
		 */
332 333
		if (ide_dma_good_drive(drive) &&
		    drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
334 335 336
			mode = XFER_MW_DMA_1;
	}

337 338 339
	mode = min(mode, req_mode);

	printk(KERN_INFO "%s: %s mode selected\n", drive->name,
340
			  mode ? ide_xfer_verbose(mode) : "no DMA");
341

342
	return mode;
343
}
344
EXPORT_SYMBOL_GPL(ide_find_dma_mode);
345

346
static int ide_tune_dma(ide_drive_t *drive)
347
{
348
	ide_hwif_t *hwif = drive->hwif;
349 350
	u8 speed;

B
Bartlomiej Zolnierkiewicz 已提交
351 352
	if (ata_id_has_dma(drive->id) == 0 ||
	    (drive->dev_flags & IDE_DFLAG_NODMA))
353 354 355 356
		return 0;

	/* consult the list of known "bad" drives */
	if (__ide_dma_bad_drive(drive))
357 358
		return 0;

359 360 361
	if (ide_id_dma_bug(drive))
		return 0;

362
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
363 364
		return config_drive_for_dma(drive);

365 366
	speed = ide_max_dma_mode(drive);

367 368
	if (!speed)
		return 0;
369

370
	if (ide_set_dma_mode(drive, speed))
371
		return 0;
372

373
	return 1;
374 375
}

376 377 378 379
static int ide_dma_check(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;

380
	if (ide_tune_dma(drive))
381 382 383 384 385 386 387 388
		return 0;

	/* TODO: always do PIO fallback */
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
		return -1;

	ide_set_max_pio(drive);

389
	return -1;
390 391
}

392
int ide_id_dma_bug(ide_drive_t *drive)
L
Linus Torvalds 已提交
393
{
394
	u16 *id = drive->id;
L
Linus Torvalds 已提交
395

396 397 398
	if (id[ATA_ID_FIELD_VALID] & 4) {
		if ((id[ATA_ID_UDMA_MODES] >> 8) &&
		    (id[ATA_ID_MWDMA_MODES] >> 8))
399
			goto err_out;
400 401 402
	} else if (id[ATA_ID_FIELD_VALID] & 2) {
		if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
		    (id[ATA_ID_SWDMA_MODES] >> 8))
403
			goto err_out;
L
Linus Torvalds 已提交
404
	}
405 406 407 408
	return 0;
err_out:
	printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
	return 1;
L
Linus Torvalds 已提交
409 410
}

411 412 413 414
int ide_set_dma(ide_drive_t *drive)
{
	int rc;

415 416 417 418 419 420
	/*
	 * Force DMAing for the beginning of the check.
	 * Some chipsets appear to do interesting
	 * things, if not checked and cleared.
	 *   PARANOIA!!!
	 */
421
	ide_dma_off_quietly(drive);
422

423 424 425
	rc = ide_dma_check(drive);
	if (rc)
		return rc;
426

427 428 429
	ide_dma_on(drive);

	return 0;
430 431
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
void ide_check_dma_crc(ide_drive_t *drive)
{
	u8 mode;

	ide_dma_off_quietly(drive);
	drive->crc_count = 0;
	mode = drive->current_speed;
	/*
	 * Don't try non Ultra-DMA modes without iCRC's.  Force the
	 * device to PIO and make the user enable SWDMA/MWDMA modes.
	 */
	if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
		mode--;
	else
		mode = XFER_PIO_4;
	ide_set_xfer_rate(drive, mode);
	if (drive->current_speed >= XFER_SW_DMA_0)
		ide_dma_on(drive);
}

452
void ide_dma_lost_irq(ide_drive_t *drive)
L
Linus Torvalds 已提交
453
{
454
	printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
L
Linus Torvalds 已提交
455
}
456
EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
L
Linus Torvalds 已提交
457

458
void ide_dma_timeout(ide_drive_t *drive)
L
Linus Torvalds 已提交
459
{
B
Bartlomiej Zolnierkiewicz 已提交
460
	ide_hwif_t *hwif = drive->hwif;
461

L
Linus Torvalds 已提交
462 463
	printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);

464
	if (hwif->dma_ops->dma_test_irq(drive))
465 466
		return;

467 468
	ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));

469
	hwif->dma_ops->dma_end(drive);
L
Linus Torvalds 已提交
470
}
471
EXPORT_SYMBOL_GPL(ide_dma_timeout);
L
Linus Torvalds 已提交
472

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
/*
 * un-busy the port etc, and clear any pending DMA status. we want to
 * retry the current request in pio mode instead of risking tossing it
 * all away
 */
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq;
	ide_startstop_t ret = ide_stopped;

	/*
	 * end current dma transaction
	 */

	if (error < 0) {
		printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
		(void)hwif->dma_ops->dma_end(drive);
		ret = ide_error(drive, "dma timeout error",
				hwif->tp_ops->read_status(hwif));
	} else {
		printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
		hwif->dma_ops->dma_timeout(drive);
	}

	/*
	 * disable dma for now, but remember that we did so because of
	 * a timeout -- we'll reenable after we finish this next request
	 * (or rather the first chunk of it) in pio.
	 */
	drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
	drive->retry_pio++;
	ide_dma_off_quietly(drive);

	/*
	 * un-busy drive etc and make sure request is sane
	 */

	rq = hwif->rq;
	if (!rq)
		goto out;

	hwif->rq = NULL;

	rq->errors = 0;

	if (!rq->bio)
		goto out;

	rq->sector = rq->bio->bi_sector;
	rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->buffer = bio_data(rq->bio);
out:
	return ret;
}

530
void ide_release_dma_engine(ide_hwif_t *hwif)
L
Linus Torvalds 已提交
531 532
{
	if (hwif->dmatable_cpu) {
533
		int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
534

535 536
		dma_free_coherent(hwif->dev, prd_size,
				  hwif->dmatable_cpu, hwif->dmatable_dma);
L
Linus Torvalds 已提交
537 538 539
		hwif->dmatable_cpu = NULL;
	}
}
540
EXPORT_SYMBOL_GPL(ide_release_dma_engine);
L
Linus Torvalds 已提交
541

542
int ide_allocate_dma_engine(ide_hwif_t *hwif)
L
Linus Torvalds 已提交
543
{
544
	int prd_size;
545

546 547 548 549
	if (hwif->prd_max_nents == 0)
		hwif->prd_max_nents = PRD_ENTRIES;
	if (hwif->prd_ent_size == 0)
		hwif->prd_ent_size = PRD_BYTES;
L
Linus Torvalds 已提交
550

551
	prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
L
Linus Torvalds 已提交
552

553 554 555 556 557
	hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
						&hwif->dmatable_dma,
						GFP_ATOMIC);
	if (hwif->dmatable_cpu == NULL) {
		printk(KERN_ERR "%s: unable to allocate PRD table\n",
558
			hwif->name);
559 560
		return -ENOMEM;
	}
L
Linus Torvalds 已提交
561

562
	return 0;
L
Linus Torvalds 已提交
563
}
564
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);