libata-sff.c 27.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 *  libata-bmdma.c - helper library for PCI IDE BMDMA
 *
 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
 *		    on emails.
 *
 *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
 *  Copyright 2003-2006 Jeff Garzik
 *
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2, or (at your option)
 *  any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; see the file COPYING.  If not, write to
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 *
 *  libata documentation is available via 'make {ps|pdf}docs',
 *  as Documentation/DocBook/libata.*
 *
 *  Hardware documentation available from http://www.t13.org/ and
 *  http://www.sata-io.org/
 *
 */

#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/libata.h>

#include "libata.h"

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/**
 *	ata_irq_on - Enable interrupts on a port.
 *	@ap: Port on which interrupts are enabled.
 *
 *	Enable interrupts on a legacy IDE device using MMIO or PIO,
 *	wait for idle, clear any pending interrupts.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
u8 ata_irq_on(struct ata_port *ap)
{
	struct ata_ioports *ioaddr = &ap->ioaddr;
	u8 tmp;

	ap->ctl &= ~ATA_NIEN;
	ap->last_ctl = ap->ctl;

T
Tejun Heo 已提交
59
	iowrite8(ap->ctl, ioaddr->ctl_addr);
60 61 62 63 64 65 66
	tmp = ata_wait_idle(ap);

	ap->ops->irq_clear(ap);

	return tmp;
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
u8 ata_dummy_irq_on (struct ata_port *ap) 	{ return 0; }

/**
 *	ata_irq_ack - Acknowledge a device interrupt.
 *	@ap: Port on which interrupts are enabled.
 *
 *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
 *	or BUSY+DRQ clear).  Obtain dma status and port status from
 *	device.  Clear the interrupt.  Return port status.
 *
 *	LOCKING:
 */

u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
{
	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
83
	u8 host_stat = 0, post_stat = 0, status;
84 85 86 87 88 89

	status = ata_busy_wait(ap, bits, 1000);
	if (status & bits)
		if (ata_msg_err(ap))
			printk(KERN_ERR "abnormal status 0x%X\n", status);

90 91 92 93 94
	if (ap->ioaddr.bmdma_addr) {
		/* get controller status; clear intr, err bits */
		host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
		iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
			 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
95

96 97
		post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
	}
98 99 100 101 102 103 104 105 106
	if (ata_msg_intr(ap))
		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
			__FUNCTION__,
			host_stat, post_stat, status);
	return status;
}

u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; }

107
/**
T
Tejun Heo 已提交
108
 *	ata_tf_load - send taskfile registers to host controller
109 110 111 112 113 114 115 116 117
 *	@ap: Port to which output is sent
 *	@tf: ATA taskfile register set
 *
 *	Outputs ATA taskfile to standard ATA host controller.
 *
 *	LOCKING:
 *	Inherited from caller.
 */

T
Tejun Heo 已提交
118
void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
119 120 121 122 123
{
	struct ata_ioports *ioaddr = &ap->ioaddr;
	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;

	if (tf->ctl != ap->last_ctl) {
T
Tejun Heo 已提交
124
		iowrite8(tf->ctl, ioaddr->ctl_addr);
125 126 127 128 129
		ap->last_ctl = tf->ctl;
		ata_wait_idle(ap);
	}

	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
T
Tejun Heo 已提交
130 131 132 133 134
		iowrite8(tf->hob_feature, ioaddr->feature_addr);
		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
135 136 137 138 139 140 141 142 143
		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
			tf->hob_feature,
			tf->hob_nsect,
			tf->hob_lbal,
			tf->hob_lbam,
			tf->hob_lbah);
	}

	if (is_addr) {
T
Tejun Heo 已提交
144 145 146 147 148
		iowrite8(tf->feature, ioaddr->feature_addr);
		iowrite8(tf->nsect, ioaddr->nsect_addr);
		iowrite8(tf->lbal, ioaddr->lbal_addr);
		iowrite8(tf->lbam, ioaddr->lbam_addr);
		iowrite8(tf->lbah, ioaddr->lbah_addr);
149 150 151 152 153 154 155 156 157
		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
			tf->feature,
			tf->nsect,
			tf->lbal,
			tf->lbam,
			tf->lbah);
	}

	if (tf->flags & ATA_TFLAG_DEVICE) {
T
Tejun Heo 已提交
158
		iowrite8(tf->device, ioaddr->device_addr);
159 160 161 162 163 164 165
		VPRINTK("device 0x%X\n", tf->device);
	}

	ata_wait_idle(ap);
}

/**
T
Tejun Heo 已提交
166
 *	ata_exec_command - issue ATA command to host controller
167 168 169
 *	@ap: port to which command is being issued
 *	@tf: ATA taskfile register set
 *
T
Tejun Heo 已提交
170 171
 *	Issues ATA command, with proper synchronization with interrupt
 *	handler / other threads.
172
 *
173
 *	LOCKING:
J
Jeff Garzik 已提交
174
 *	spin_lock_irqsave(host lock)
175
 */
T
Tejun Heo 已提交
176
void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
177
{
T
Tejun Heo 已提交
178
	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
179

T
Tejun Heo 已提交
180
	iowrite8(tf->command, ap->ioaddr.command_addr);
181 182 183 184
	ata_pause(ap);
}

/**
T
Tejun Heo 已提交
185
 *	ata_tf_read - input device's ATA taskfile shadow registers
186 187 188 189 190 191 192 193 194
 *	@ap: Port from which input is read
 *	@tf: ATA taskfile register set for storing input
 *
 *	Reads ATA taskfile registers for currently-selected device
 *	into @tf.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
T
Tejun Heo 已提交
195
void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
196 197 198 199
{
	struct ata_ioports *ioaddr = &ap->ioaddr;

	tf->command = ata_check_status(ap);
T
Tejun Heo 已提交
200 201 202 203 204 205
	tf->feature = ioread8(ioaddr->error_addr);
	tf->nsect = ioread8(ioaddr->nsect_addr);
	tf->lbal = ioread8(ioaddr->lbal_addr);
	tf->lbam = ioread8(ioaddr->lbam_addr);
	tf->lbah = ioread8(ioaddr->lbah_addr);
	tf->device = ioread8(ioaddr->device_addr);
206 207

	if (tf->flags & ATA_TFLAG_LBA48) {
T
Tejun Heo 已提交
208 209 210 211 212 213
		iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
		tf->hob_feature = ioread8(ioaddr->error_addr);
		tf->hob_nsect = ioread8(ioaddr->nsect_addr);
		tf->hob_lbal = ioread8(ioaddr->lbal_addr);
		tf->hob_lbam = ioread8(ioaddr->lbam_addr);
		tf->hob_lbah = ioread8(ioaddr->lbah_addr);
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	}
}

/**
 *	ata_check_status - Read device status reg & clear interrupt
 *	@ap: port where the device is
 *
 *	Reads ATA taskfile status register for currently-selected device
 *	and return its value. This also clears pending interrupts
 *      from this device
 *
 *	LOCKING:
 *	Inherited from caller.
 */
u8 ata_check_status(struct ata_port *ap)
{
T
Tejun Heo 已提交
230
	return ioread8(ap->ioaddr.status_addr);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
}

/**
 *	ata_altstatus - Read device alternate status reg
 *	@ap: port where the device is
 *
 *	Reads ATA taskfile alternate status register for
 *	currently-selected device and return its value.
 *
 *	Note: may NOT be used as the check_altstatus() entry in
 *	ata_port_operations.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
u8 ata_altstatus(struct ata_port *ap)
{
	if (ap->ops->check_altstatus)
		return ap->ops->check_altstatus(ap);

T
Tejun Heo 已提交
251
	return ioread8(ap->ioaddr.altstatus_addr);
252 253
}

254
/**
T
Tejun Heo 已提交
255
 *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
256 257 258
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
259
 *	spin_lock_irqsave(host lock)
260
 */
T
Tejun Heo 已提交
261
void ata_bmdma_setup(struct ata_queued_cmd *qc)
262 263 264 265 266 267 268
{
	struct ata_port *ap = qc->ap;
	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
	u8 dmactl;

	/* load PRD table addr. */
	mb();	/* make sure PRD table writes are visible to controller */
T
Tejun Heo 已提交
269
	iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
270 271

	/* specify data direction, triple-check start bit is clear */
T
Tejun Heo 已提交
272
	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
273 274 275
	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
	if (!rw)
		dmactl |= ATA_DMA_WR;
T
Tejun Heo 已提交
276
	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
277 278 279 280 281 282

	/* issue r/w command */
	ap->ops->exec_command(ap, &qc->tf);
}

/**
T
Tejun Heo 已提交
283
 *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
284 285 286
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
287
 *	spin_lock_irqsave(host lock)
288
 */
T
Tejun Heo 已提交
289
void ata_bmdma_start (struct ata_queued_cmd *qc)
290 291 292 293 294
{
	struct ata_port *ap = qc->ap;
	u8 dmactl;

	/* start host DMA transaction */
T
Tejun Heo 已提交
295 296
	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319

	/* Strictly, one may wish to issue a readb() here, to
	 * flush the mmio write.  However, control also passes
	 * to the hardware at this point, and it will interrupt
	 * us when we are to resume control.  So, in effect,
	 * we don't care when the mmio write flushes.
	 * Further, a read of the DMA status register _immediately_
	 * following the write may not be what certain flaky hardware
	 * is expected, so I think it is best to not add a readb()
	 * without first all the MMIO ATA cards/mobos.
	 * Or maybe I'm just being paranoid.
	 */
}

/**
 *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
 *	@ap: Port associated with this ATA transaction.
 *
 *	Clear interrupt and error flags in DMA status register.
 *
 *	May be used as the irq_clear() entry in ata_port_operations.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
320
 *	spin_lock_irqsave(host lock)
321 322 323
 */
void ata_bmdma_irq_clear(struct ata_port *ap)
{
T
Tejun Heo 已提交
324 325 326
	void __iomem *mmio = ap->ioaddr.bmdma_addr;

	if (!mmio)
327 328
		return;

T
Tejun Heo 已提交
329
	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
330 331 332 333 334 335 336 337 338 339 340
}

/**
 *	ata_bmdma_status - Read PCI IDE BMDMA status
 *	@ap: Port associated with this ATA transaction.
 *
 *	Read and return BMDMA status register.
 *
 *	May be used as the bmdma_status() entry in ata_port_operations.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
341
 *	spin_lock_irqsave(host lock)
342 343 344
 */
u8 ata_bmdma_status(struct ata_port *ap)
{
T
Tejun Heo 已提交
345
	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
346 347 348 349 350 351 352 353 354 355 356
}

/**
 *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
 *	@qc: Command we are ending DMA for
 *
 *	Clears the ATA_DMA_START flag in the dma control register
 *
 *	May be used as the bmdma_stop() entry in ata_port_operations.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
357
 *	spin_lock_irqsave(host lock)
358 359 360 361
 */
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
T
Tejun Heo 已提交
362 363 364 365 366
	void __iomem *mmio = ap->ioaddr.bmdma_addr;

	/* clear start/stop bit */
	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
		 mmio + ATA_DMA_CMD);
367 368 369 370 371

	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
	ata_altstatus(ap);        /* dummy read */
}

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
/**
 *	ata_bmdma_freeze - Freeze BMDMA controller port
 *	@ap: port to freeze
 *
 *	Freeze BMDMA controller port.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
void ata_bmdma_freeze(struct ata_port *ap)
{
	struct ata_ioports *ioaddr = &ap->ioaddr;

	ap->ctl |= ATA_NIEN;
	ap->last_ctl = ap->ctl;

T
Tejun Heo 已提交
388
	iowrite8(ap->ctl, ioaddr->ctl_addr);
389 390 391 392 393 394 395 396

	/* Under certain circumstances, some controllers raise IRQ on
	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
	 */
	ata_chk_status(ap);

	ap->ops->irq_clear(ap);
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
}

/**
 *	ata_bmdma_thaw - Thaw BMDMA controller port
 *	@ap: port to thaw
 *
 *	Thaw BMDMA controller port.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
void ata_bmdma_thaw(struct ata_port *ap)
{
	/* clear & re-enable interrupts */
	ata_chk_status(ap);
	ap->ops->irq_clear(ap);
413
	ap->ops->irq_on(ap);
414 415 416 417 418
}

/**
 *	ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
 *	@ap: port to handle error for
419
 *	@prereset: prereset method (can be NULL)
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
 *	@softreset: softreset method (can be NULL)
 *	@hardreset: hardreset method (can be NULL)
 *	@postreset: postreset method (can be NULL)
 *
 *	Handle error for ATA BMDMA controller.  It can handle both
 *	PATA and SATA controllers.  Many controllers should be able to
 *	use this EH as-is or with some added handling before and
 *	after.
 *
 *	This function is intended to be used for constructing
 *	->error_handler callback by low level drivers.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 */
435 436 437
void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
			ata_postreset_fn_t postreset)
438 439 440 441 442 443 444 445 446 447
{
	struct ata_queued_cmd *qc;
	unsigned long flags;
	int thaw = 0;

	qc = __ata_qc_from_tag(ap, ap->active_tag);
	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
		qc = NULL;

	/* reset PIO HSM and stop DMA engine */
448
	spin_lock_irqsave(ap->lock, flags);
449 450 451 452 453 454 455

	ap->hsm_task_state = HSM_ST_IDLE;

	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
		u8 host_stat;

456
		host_stat = ap->ops->bmdma_status(ap);
457 458 459 460 461 462

		/* BMDMA controllers indicate host bus error by
		 * setting DMA_ERR bit and timing out.  As it wasn't
		 * really a timeout event, adjust error mask and
		 * cancel frozen state.
		 */
A
Alan 已提交
463
		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
464 465 466 467 468 469 470 471 472 473 474
			qc->err_mask = AC_ERR_HOST_BUS;
			thaw = 1;
		}

		ap->ops->bmdma_stop(qc);
	}

	ata_altstatus(ap);
	ata_chk_status(ap);
	ap->ops->irq_clear(ap);

475
	spin_unlock_irqrestore(ap->lock, flags);
476 477 478 479 480

	if (thaw)
		ata_eh_thaw_port(ap);

	/* PIO and DMA engines have been stopped, perform recovery */
481
	ata_do_eh(ap, prereset, softreset, hardreset, postreset);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
}

/**
 *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
 *	@ap: port to handle error for
 *
 *	Stock error handler for BMDMA controller.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 */
void ata_bmdma_error_handler(struct ata_port *ap)
{
	ata_reset_fn_t hardreset;

	hardreset = NULL;
	if (sata_scr_valid(ap))
		hardreset = sata_std_hardreset;

501 502
	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
			   ata_std_postreset);
503 504 505 506 507 508 509 510 511 512 513 514
}

/**
 *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
 *				      BMDMA controller
 *	@qc: internal command to clean up
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 */
void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
{
515 516
	if (qc->ap->ioaddr.bmdma_addr)
		ata_bmdma_stop(qc);
517 518
}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
/**
 *	ata_sff_port_start - Set port up for dma.
 *	@ap: Port to initialize
 *
 *	Called just after data structures for each port are
 *	initialized.  Allocates space for PRD table if the device
 *	is DMA capable SFF.
 *
 *	May be used as the port_start() entry in ata_port_operations.
 *
 *	LOCKING:
 *	Inherited from caller.
 */

int ata_sff_port_start(struct ata_port *ap)
{
	if (ap->ioaddr.bmdma_addr)
		return ata_port_start(ap);
	return 0;
}

540
#ifdef CONFIG_PCI
541 542 543 544

static int ata_resources_present(struct pci_dev *pdev, int port)
{
	int i;
J
Jeff Garzik 已提交
545

546 547 548 549
	/* Check the PCI resources for this channel are enabled */
	port = port * 2;
	for (i = 0; i < 2; i ++) {
		if (pci_resource_start(pdev, port + i) == 0 ||
550 551
		    pci_resource_len(pdev, port + i) == 0)
			return 0;
552 553 554
	}
	return 1;
}
J
Jeff Garzik 已提交
555

556 557 558 559 560 561 562 563 564 565 566 567
/**
 *	ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
 *	@host: target ATA host
 *
 *	Acquire PCI BMDMA resources and initialize @host accordingly.
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
T
Tejun Heo 已提交
568
int ata_pci_init_bmdma(struct ata_host *host)
569
{
570 571 572
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
	int i, rc;
T
Tejun Heo 已提交
573

574 575 576 577 578 579 580 581 582 583 584 585 586
	/* TODO: If we get no DMA mask we should fall back to PIO */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	/* request and iomap DMA region */
	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
		return -ENOMEM;
T
Tejun Heo 已提交
587
	}
588
	host->iomap = pcim_iomap_table(pdev);
T
Tejun Heo 已提交
589

T
Tejun Heo 已提交
590
	for (i = 0; i < 2; i++) {
591 592 593 594 595 596
		struct ata_port *ap = host->ports[i];
		void __iomem *bmdma = host->iomap[4] + 8 * i;

		if (ata_port_is_dummy(ap))
			continue;

597
		ap->ioaddr.bmdma_addr = bmdma;
598 599 600
		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
		    (ioread8(bmdma + 2) & 0x80))
			host->flags |= ATA_HOST_SIMPLEX;
T
Tejun Heo 已提交
601 602
	}

603 604
	return 0;
}
605

606 607 608 609
/**
 *	ata_pci_init_native_host - acquire native ATA resources and init host
 *	@host: target ATA host
 *
T
Tejun Heo 已提交
610 611 612
 *	Acquire native PCI ATA resources for @host and initialize the
 *	first two ports of @host accordingly.  Ports marked dummy are
 *	skipped and allocation failure makes the port dummy.
613 614 615 616 617
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
T
Tejun Heo 已提交
618 619
 *	0 if at least one port is initialized, -ENODEV if no port is
 *	available.
620
 */
T
Tejun Heo 已提交
621
int ata_pci_init_native_host(struct ata_host *host)
622 623 624
{
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
T
Tejun Heo 已提交
625
	unsigned int mask = 0;
626 627 628 629 630 631 632 633
	int i, rc;

	/* request, iomap BARs and init port addresses accordingly */
	for (i = 0; i < 2; i++) {
		struct ata_port *ap = host->ports[i];
		int base = i * 2;
		void __iomem * const *iomap;

T
Tejun Heo 已提交
634 635 636 637 638 639 640 641 642
		if (ata_port_is_dummy(ap))
			continue;

		/* Discard disabled ports.  Some controllers show
		 * their unused channels this way.  Disabled ports are
		 * made dummy.
		 */
		if (!ata_resources_present(pdev, i)) {
			ap->ops = &ata_dummy_port_ops;
643
			continue;
T
Tejun Heo 已提交
644
		}
645 646 647

		rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
		if (rc) {
T
Tejun Heo 已提交
648 649 650
			dev_printk(KERN_WARNING, gdev,
				   "failed to request/iomap BARs for port %d "
				   "(errno=%d)\n", i, rc);
651 652
			if (rc == -EBUSY)
				pcim_pin_device(pdev);
T
Tejun Heo 已提交
653 654
			ap->ops = &ata_dummy_port_ops;
			continue;
655 656 657 658 659 660 661 662
		}
		host->iomap = iomap = pcim_iomap_table(pdev);

		ap->ioaddr.cmd_addr = iomap[base];
		ap->ioaddr.altstatus_addr =
		ap->ioaddr.ctl_addr = (void __iomem *)
			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
		ata_std_ports(&ap->ioaddr);
T
Tejun Heo 已提交
663 664 665 666 667 668 669

		mask |= 1 << i;
	}

	if (!mask) {
		dev_printk(KERN_ERR, gdev, "no available native port\n");
		return -ENODEV;
670 671 672 673 674
	}

	return 0;
}

675 676 677
/**
 *	ata_pci_prepare_native_host - helper to prepare native PCI ATA host
 *	@pdev: target PCI device
T
Tejun Heo 已提交
678
 *	@ppi: array of port_info, must be enough for two ports
679 680 681 682 683 684 685 686 687 688 689 690 691
 *	@r_host: out argument for the initialized ATA host
 *
 *	Helper to allocate ATA host for @pdev, acquire all native PCI
 *	resources and initialize it accordingly in one go.
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
int ata_pci_prepare_native_host(struct pci_dev *pdev,
				const struct ata_port_info * const * ppi,
T
Tejun Heo 已提交
692
				struct ata_host **r_host)
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
{
	struct ata_host *host;
	int rc;

	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
		return -ENOMEM;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "failed to allocate ATA host\n");
		rc = -ENOMEM;
		goto err_out;
	}

T
Tejun Heo 已提交
708
	rc = ata_pci_init_native_host(host);
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	if (rc)
		goto err_out;

	/* init DMA related stuff */
	rc = ata_pci_init_bmdma(host);
	if (rc)
		goto err_bmdma;

	devres_remove_group(&pdev->dev, NULL);
	*r_host = host;
	return 0;

 err_bmdma:
	/* This is necessary because PCI and iomap resources are
	 * merged and releasing the top group won't release the
	 * acquired resources if some of those have been acquired
	 * before entering this function.
	 */
	pcim_iounmap_regions(pdev, 0xf);
 err_out:
	devres_release_group(&pdev->dev, NULL);
	return rc;
}

733 734 735 736 737 738 739 740
struct ata_legacy_devres {
	unsigned int	mask;
	unsigned long	cmd_port[2];
	void __iomem *	cmd_addr[2];
	void __iomem *	ctl_addr[2];
	unsigned int	irq[2];
	void *		irq_dev_id[2];
};
741

742 743 744
static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr)
{
	int i;
745

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
	for (i = 0; i < 2; i++) {
		if (!legacy_dr->irq[i])
			continue;

		free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]);
		legacy_dr->irq[i] = 0;
		legacy_dr->irq_dev_id[i] = NULL;
	}
}

static void ata_legacy_release(struct device *gdev, void *res)
{
	struct ata_legacy_devres *this = res;
	int i;

	ata_legacy_free_irqs(this);

	for (i = 0; i < 2; i++) {
		if (this->cmd_addr[i])
			ioport_unmap(this->cmd_addr[i]);
		if (this->ctl_addr[i])
			ioport_unmap(this->ctl_addr[i]);
		if (this->cmd_port[i])
			release_region(this->cmd_port[i], 8);
	}
}

static int ata_init_legacy_port(struct ata_port *ap,
				struct ata_legacy_devres *legacy_dr)
{
	struct ata_host *host = ap->host;
	int port_no = ap->port_no;
	unsigned long cmd_port, ctl_port;

	if (port_no == 0) {
		cmd_port = ATA_PRIMARY_CMD;
		ctl_port = ATA_PRIMARY_CTL;
	} else {
		cmd_port = ATA_SECONDARY_CMD;
		ctl_port = ATA_SECONDARY_CTL;
	}

	/* request cmd_port */
	if (request_region(cmd_port, 8, "libata"))
		legacy_dr->cmd_port[port_no] = cmd_port;
	else {
		dev_printk(KERN_WARNING, host->dev,
			   "0x%0lX IDE port busy\n", cmd_port);
		return -EBUSY;
	}

	/* iomap cmd and ctl ports */
	legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8);
	legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1);
T
Tejun Heo 已提交
800 801 802
	if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) {
		dev_printk(KERN_WARNING, host->dev,
			   "failed to map cmd/ctl ports\n");
803
		return -ENOMEM;
T
Tejun Heo 已提交
804
	}
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819

	/* init IO addresses */
	ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no];
	ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no];
	ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no];
	ata_std_ports(&ap->ioaddr);

	return 0;
}

/**
 *	ata_init_legacy_host - acquire legacy ATA resources and init ATA host
 *	@host: target ATA host
 *	@was_busy: out parameter, indicates whether any port was busy
 *
T
Tejun Heo 已提交
820 821 822
 *	Acquire legacy ATA resources for the first two ports of @host
 *	and initialize it accordingly.  Ports marked dummy are skipped
 *	and resource acquistion failure makes the port dummy.
823 824 825 826 827
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
T
Tejun Heo 已提交
828 829
 *	0 if at least one port is initialized, -ENODEV if no port is
 *	available.
830
 */
T
Tejun Heo 已提交
831
static int ata_init_legacy_host(struct ata_host *host, int *was_busy)
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
{
	struct device *gdev = host->dev;
	struct ata_legacy_devres *legacy_dr;
	int i, rc;

	if (!devres_open_group(gdev, NULL, GFP_KERNEL))
		return -ENOMEM;

	rc = -ENOMEM;
	legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr),
				 GFP_KERNEL);
	if (!legacy_dr)
		goto err_out;
	devres_add(gdev, legacy_dr);

	for (i = 0; i < 2; i++) {
T
Tejun Heo 已提交
848 849 850
		if (ata_port_is_dummy(host->ports[i]))
			continue;

851 852 853
		rc = ata_init_legacy_port(host->ports[i], legacy_dr);
		if (rc == 0)
			legacy_dr->mask |= 1 << i;
T
Tejun Heo 已提交
854 855 856
		else {
			if (rc == -EBUSY)
				(*was_busy)++;
857
			host->ports[i]->ops = &ata_dummy_port_ops;
T
Tejun Heo 已提交
858 859
		}
	}
860

T
Tejun Heo 已提交
861 862 863 864
	if (!legacy_dr->mask) {
		dev_printk(KERN_ERR, gdev, "no available legacy port\n");
		return -ENODEV;
	}
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902

	devres_remove_group(gdev, NULL);
	return 0;

 err_out:
	devres_release_group(gdev, NULL);
	return rc;
}

/**
 *	ata_request_legacy_irqs - request legacy ATA IRQs
 *	@host: target ATA host
 *	@handler: array of IRQ handlers
 *	@irq_flags: array of IRQ flags
 *	@dev_id: array of IRQ dev_ids
 *
 *	Request legacy IRQs for non-dummy legacy ports in @host.  All
 *	IRQ parameters are passed as array to allow ports to have
 *	separate IRQ handlers.
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
static int ata_request_legacy_irqs(struct ata_host *host,
				   irq_handler_t const *handler,
				   const unsigned int *irq_flags,
				   void * const *dev_id)
{
	struct device *gdev = host->dev;
	struct ata_legacy_devres *legacy_dr;
	int i, rc;

	legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL);
	BUG_ON(!legacy_dr);

T
Tejun Heo 已提交
903
	for (i = 0; i < 2; i++) {
904 905 906 907 908
		unsigned int irq;

		/* FIXME: ATA_*_IRQ() should take generic device not pci_dev */
		if (i == 0)
			irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev));
909
		else
910 911 912 913 914 915 916 917 918 919
			irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev));

		if (!(legacy_dr->mask & (1 << i)))
			continue;

		if (!handler[i]) {
			dev_printk(KERN_ERR, gdev,
				   "NULL handler specified for port %d\n", i);
			rc = -EINVAL;
			goto err_out;
920
		}
921

922 923 924 925 926 927 928
		rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME,
				 dev_id[i]);
		if (rc) {
			dev_printk(KERN_ERR, gdev,
				"irq %u request failed (errno=%d)\n", irq, rc);
			goto err_out;
		}
929

930 931 932
		/* record irq allocation in legacy_dr */
		legacy_dr->irq[i] = irq;
		legacy_dr->irq_dev_id[i] = dev_id[i];
933

934 935 936 937 938 939 940 941 942 943 944 945 946
		/* only used to print info */
		if (i == 0)
			host->irq = irq;
		else
			host->irq2 = irq;
	}

	return 0;

 err_out:
	ata_legacy_free_irqs(legacy_dr);
	return rc;
}
947 948 949 950

/**
 *	ata_pci_init_one - Initialize/register PCI IDE host controller
 *	@pdev: Controller to be initialized
T
Tejun Heo 已提交
951
 *	@ppi: array of port_info, must be enough for two ports
952 953 954 955 956 957 958 959 960
 *
 *	This is a helper function which can be called from a driver's
 *	xxx_init_one() probe function if the hardware uses traditional
 *	IDE taskfile registers.
 *
 *	This function calls pci_enable_device(), reserves its register
 *	regions, sets the dma mask, enables bus master mode, and calls
 *	ata_device_add()
 *
961 962 963 964
 *	ASSUMPTION:
 *	Nobody makes a single channel controller that appears solely as
 *	the secondary legacy port on PCI.
 *
965 966 967 968 969 970
 *	LOCKING:
 *	Inherited from PCI layer (may sleep).
 *
 *	RETURNS:
 *	Zero on success, negative on errno-based value on error.
 */
T
Tejun Heo 已提交
971 972
int ata_pci_init_one(struct pci_dev *pdev,
		     const struct ata_port_info * const * ppi)
973
{
974
	struct device *dev = &pdev->dev;
T
Tejun Heo 已提交
975
	const struct ata_port_info *pi = NULL;
976
	struct ata_host *host = NULL;
977
	u8 mask;
T
Tejun Heo 已提交
978 979
	int legacy_mode = 0;
	int i, rc;
980 981 982

	DPRINTK("ENTER\n");

T
Tejun Heo 已提交
983 984 985 986 987 988 989
	/* look up the first valid port_info */
	for (i = 0; i < 2 && ppi[i]; i++) {
		if (ppi[i]->port_ops != &ata_dummy_port_ops) {
			pi = ppi[i];
			break;
		}
	}
990

T
Tejun Heo 已提交
991 992 993 994 995
	if (!pi) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "no valid port_info specified\n");
		return -EINVAL;
	}
996

T
Tejun Heo 已提交
997 998
	if (!devres_open_group(dev, NULL, GFP_KERNEL))
		return -ENOMEM;
999 1000 1001 1002 1003 1004 1005

	/* FIXME: Really for ATA it isn't safe because the device may be
	   multi-purpose and we want to leave it alone if it was already
	   enabled. Secondly for shared use as Arjan says we want refcounting

	   Checking dev->is_enabled is insufficient as this is not set at
	   boot for the primary video which is BIOS enabled
1006
	  */
1007

1008
	rc = pcim_enable_device(pdev);
1009
	if (rc)
1010
		goto err_out;
1011

1012 1013 1014 1015 1016 1017 1018
	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
		u8 tmp8;

		/* TODO: What if one channel is in native mode ... */
		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
		mask = (1 << 2) | (1 << 0);
		if ((tmp8 & mask) != mask)
T
Tejun Heo 已提交
1019
			legacy_mode = 1;
1020 1021 1022 1023 1024 1025
#if defined(CONFIG_NO_ATA_LEGACY)
		/* Some platforms with PCI limits cannot address compat
		   port space. In that case we punt if their firmware has
		   left a device in compatibility mode */
		if (legacy_mode) {
			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
1026 1027
			rc = -EOPNOTSUPP;
			goto err_out;
1028 1029
		}
#endif
1030 1031
	}

1032
	/* alloc and init host */
T
Tejun Heo 已提交
1033
	host = ata_host_alloc_pinfo(dev, ppi, 2);
1034 1035 1036 1037 1038 1039 1040
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "failed to allocate ATA host\n");
		rc = -ENOMEM;
		goto err_out;
	}

A
Alan 已提交
1041
	if (!legacy_mode) {
T
Tejun Heo 已提交
1042
		rc = ata_pci_init_native_host(host);
1043 1044
		if (rc)
			goto err_out;
A
Alan 已提交
1045
	} else {
1046
		int was_busy = 0;
1047

T
Tejun Heo 已提交
1048
		rc = ata_init_legacy_host(host, &was_busy);
1049
		if (was_busy)
1050
			pcim_pin_device(pdev);
1051 1052
		if (rc)
			goto err_out;
1053

1054 1055 1056 1057
		/* request respective PCI regions, may fail */
		rc = pci_request_region(pdev, 1, DRV_NAME);
		rc = pci_request_region(pdev, 3, DRV_NAME);
	}
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	/* init BMDMA, may fail */
	ata_pci_init_bmdma(host);
	pci_set_master(pdev);

	/* start host and request IRQ */
	rc = ata_host_start(host);
	if (rc)
		goto err_out;

	if (!legacy_mode)
T
Tejun Heo 已提交
1069
		rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
1070 1071
				      IRQF_SHARED, DRV_NAME, host);
	else {
1072 1073 1074 1075 1076 1077
		irq_handler_t handler[2] = { host->ops->irq_handler,
					     host->ops->irq_handler };
		unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED };
		void *dev_id[2] = { host, host };

		rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id);
1078 1079 1080
	}
	if (rc)
		goto err_out;
1081

1082
	/* register */
T
Tejun Heo 已提交
1083
	rc = ata_host_register(host, pi->sht);
1084 1085
	if (rc)
		goto err_out;
1086

1087
	devres_remove_group(dev, NULL);
1088 1089 1090
	return 0;

err_out:
1091
	devres_release_group(dev, NULL);
1092 1093 1094
	return rc;
}

A
Alan Cox 已提交
1095 1096 1097 1098 1099
/**
 *	ata_pci_clear_simplex	-	attempt to kick device out of simplex
 *	@pdev: PCI device
 *
 *	Some PCI ATA devices report simplex mode but in fact can be told to
1100
 *	enter non simplex mode. This implements the neccessary logic to
A
Alan Cox 已提交
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
 *	perform the task on such devices. Calling it on other devices will
 *	have -undefined- behaviour.
 */

int ata_pci_clear_simplex(struct pci_dev *pdev)
{
	unsigned long bmdma = pci_resource_start(pdev, 4);
	u8 simplex;

	if (bmdma == 0)
		return -ENOENT;

	simplex = inb(bmdma + 0x02);
	outb(simplex & 0x60, bmdma + 0x02);
	simplex = inb(bmdma + 0x02);
	if (simplex & 0x80)
		return -EOPNOTSUPP;
	return 0;
}

1121
unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
A
Alan Cox 已提交
1122 1123 1124
{
	/* Filter out DMA modes if the device has been configured by
	   the BIOS as PIO only */
1125

1126
	if (adev->ap->ioaddr.bmdma_addr == 0)
A
Alan Cox 已提交
1127 1128 1129 1130
		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
	return xfer_mask;
}

1131 1132
#endif /* CONFIG_PCI */