libata-sff.c 23.6 KB
Newer Older
1
/*
D
Dave Jones 已提交
2
 *  libata-sff.c - helper library for PCI IDE BMDMA
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
 *		    on emails.
 *
 *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
 *  Copyright 2003-2006 Jeff Garzik
 *
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2, or (at your option)
 *  any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; see the file COPYING.  If not, write to
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 *
 *  libata documentation is available via 'make {ps|pdf}docs',
 *  as Documentation/DocBook/libata.*
 *
 *  Hardware documentation available from http://www.t13.org/ and
 *  http://www.sata-io.org/
 *
 */

#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/libata.h>

#include "libata.h"

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/**
 *	ata_irq_on - Enable interrupts on a port.
 *	@ap: Port on which interrupts are enabled.
 *
 *	Enable interrupts on a legacy IDE device using MMIO or PIO,
 *	wait for idle, clear any pending interrupts.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
u8 ata_irq_on(struct ata_port *ap)
{
	struct ata_ioports *ioaddr = &ap->ioaddr;
	u8 tmp;

	ap->ctl &= ~ATA_NIEN;
	ap->last_ctl = ap->ctl;

59 60
	if (ioaddr->ctl_addr)
		iowrite8(ap->ctl, ioaddr->ctl_addr);
61 62 63 64 65 66 67
	tmp = ata_wait_idle(ap);

	ap->ops->irq_clear(ap);

	return tmp;
}

68
/**
T
Tejun Heo 已提交
69
 *	ata_tf_load - send taskfile registers to host controller
70 71 72 73 74 75 76 77 78
 *	@ap: Port to which output is sent
 *	@tf: ATA taskfile register set
 *
 *	Outputs ATA taskfile to standard ATA host controller.
 *
 *	LOCKING:
 *	Inherited from caller.
 */

T
Tejun Heo 已提交
79
void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
80 81 82 83 84
{
	struct ata_ioports *ioaddr = &ap->ioaddr;
	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;

	if (tf->ctl != ap->last_ctl) {
85 86
		if (ioaddr->ctl_addr)
			iowrite8(tf->ctl, ioaddr->ctl_addr);
87 88 89 90 91
		ap->last_ctl = tf->ctl;
		ata_wait_idle(ap);
	}

	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
92
		WARN_ON(!ioaddr->ctl_addr);
T
Tejun Heo 已提交
93 94 95 96 97
		iowrite8(tf->hob_feature, ioaddr->feature_addr);
		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
98 99 100 101 102 103 104 105 106
		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
			tf->hob_feature,
			tf->hob_nsect,
			tf->hob_lbal,
			tf->hob_lbam,
			tf->hob_lbah);
	}

	if (is_addr) {
T
Tejun Heo 已提交
107 108 109 110 111
		iowrite8(tf->feature, ioaddr->feature_addr);
		iowrite8(tf->nsect, ioaddr->nsect_addr);
		iowrite8(tf->lbal, ioaddr->lbal_addr);
		iowrite8(tf->lbam, ioaddr->lbam_addr);
		iowrite8(tf->lbah, ioaddr->lbah_addr);
112 113 114 115 116 117 118 119 120
		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
			tf->feature,
			tf->nsect,
			tf->lbal,
			tf->lbam,
			tf->lbah);
	}

	if (tf->flags & ATA_TFLAG_DEVICE) {
T
Tejun Heo 已提交
121
		iowrite8(tf->device, ioaddr->device_addr);
122 123 124 125 126 127 128
		VPRINTK("device 0x%X\n", tf->device);
	}

	ata_wait_idle(ap);
}

/**
T
Tejun Heo 已提交
129
 *	ata_exec_command - issue ATA command to host controller
130 131 132
 *	@ap: port to which command is being issued
 *	@tf: ATA taskfile register set
 *
T
Tejun Heo 已提交
133 134
 *	Issues ATA command, with proper synchronization with interrupt
 *	handler / other threads.
135
 *
136
 *	LOCKING:
J
Jeff Garzik 已提交
137
 *	spin_lock_irqsave(host lock)
138
 */
T
Tejun Heo 已提交
139
void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
140
{
T
Tejun Heo 已提交
141
	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
142

T
Tejun Heo 已提交
143
	iowrite8(tf->command, ap->ioaddr.command_addr);
144 145 146 147
	ata_pause(ap);
}

/**
T
Tejun Heo 已提交
148
 *	ata_tf_read - input device's ATA taskfile shadow registers
149 150 151 152
 *	@ap: Port from which input is read
 *	@tf: ATA taskfile register set for storing input
 *
 *	Reads ATA taskfile registers for currently-selected device
A
Alan Cox 已提交
153 154 155
 *	into @tf. Assumes the device has a fully SFF compliant task file
 *	layout and behaviour. If you device does not (eg has a different
 *	status method) then you will need to provide a replacement tf_read
156 157 158 159
 *
 *	LOCKING:
 *	Inherited from caller.
 */
T
Tejun Heo 已提交
160
void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
161 162 163
{
	struct ata_ioports *ioaddr = &ap->ioaddr;

A
Alan Cox 已提交
164
	tf->command = ata_check_status(ap);
T
Tejun Heo 已提交
165 166 167 168 169 170
	tf->feature = ioread8(ioaddr->error_addr);
	tf->nsect = ioread8(ioaddr->nsect_addr);
	tf->lbal = ioread8(ioaddr->lbal_addr);
	tf->lbam = ioread8(ioaddr->lbam_addr);
	tf->lbah = ioread8(ioaddr->lbah_addr);
	tf->device = ioread8(ioaddr->device_addr);
171 172

	if (tf->flags & ATA_TFLAG_LBA48) {
173 174 175 176 177 178 179 180 181 182 183
		if (likely(ioaddr->ctl_addr)) {
			iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
			tf->hob_feature = ioread8(ioaddr->error_addr);
			tf->hob_nsect = ioread8(ioaddr->nsect_addr);
			tf->hob_lbal = ioread8(ioaddr->lbal_addr);
			tf->hob_lbam = ioread8(ioaddr->lbam_addr);
			tf->hob_lbah = ioread8(ioaddr->lbah_addr);
			iowrite8(tf->ctl, ioaddr->ctl_addr);
			ap->last_ctl = tf->ctl;
		} else
			WARN_ON(1);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	}
}

/**
 *	ata_check_status - Read device status reg & clear interrupt
 *	@ap: port where the device is
 *
 *	Reads ATA taskfile status register for currently-selected device
 *	and return its value. This also clears pending interrupts
 *      from this device
 *
 *	LOCKING:
 *	Inherited from caller.
 */
u8 ata_check_status(struct ata_port *ap)
{
T
Tejun Heo 已提交
200
	return ioread8(ap->ioaddr.status_addr);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
}

/**
 *	ata_altstatus - Read device alternate status reg
 *	@ap: port where the device is
 *
 *	Reads ATA taskfile alternate status register for
 *	currently-selected device and return its value.
 *
 *	Note: may NOT be used as the check_altstatus() entry in
 *	ata_port_operations.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
u8 ata_altstatus(struct ata_port *ap)
{
	if (ap->ops->check_altstatus)
		return ap->ops->check_altstatus(ap);

T
Tejun Heo 已提交
221
	return ioread8(ap->ioaddr.altstatus_addr);
222 223
}

224
/**
T
Tejun Heo 已提交
225
 *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
226 227 228
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
229
 *	spin_lock_irqsave(host lock)
230
 */
T
Tejun Heo 已提交
231
void ata_bmdma_setup(struct ata_queued_cmd *qc)
232 233 234 235 236 237 238
{
	struct ata_port *ap = qc->ap;
	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
	u8 dmactl;

	/* load PRD table addr. */
	mb();	/* make sure PRD table writes are visible to controller */
T
Tejun Heo 已提交
239
	iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
240 241

	/* specify data direction, triple-check start bit is clear */
T
Tejun Heo 已提交
242
	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
243 244 245
	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
	if (!rw)
		dmactl |= ATA_DMA_WR;
T
Tejun Heo 已提交
246
	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
247 248 249 250 251 252

	/* issue r/w command */
	ap->ops->exec_command(ap, &qc->tf);
}

/**
T
Tejun Heo 已提交
253
 *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
254 255 256
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
257
 *	spin_lock_irqsave(host lock)
258
 */
259
void ata_bmdma_start(struct ata_queued_cmd *qc)
260 261 262 263 264
{
	struct ata_port *ap = qc->ap;
	u8 dmactl;

	/* start host DMA transaction */
T
Tejun Heo 已提交
265 266
	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
267

A
Alan Cox 已提交
268
	/* Strictly, one may wish to issue an ioread8() here, to
269 270 271 272 273 274 275 276 277
	 * flush the mmio write.  However, control also passes
	 * to the hardware at this point, and it will interrupt
	 * us when we are to resume control.  So, in effect,
	 * we don't care when the mmio write flushes.
	 * Further, a read of the DMA status register _immediately_
	 * following the write may not be what certain flaky hardware
	 * is expected, so I think it is best to not add a readb()
	 * without first all the MMIO ATA cards/mobos.
	 * Or maybe I'm just being paranoid.
A
Alan Cox 已提交
278 279 280
	 *
	 * FIXME: The posting of this write means I/O starts are
	 * unneccessarily delayed for MMIO
281 282 283 284 285 286 287 288 289 290 291 292
	 */
}

/**
 *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
 *	@ap: Port associated with this ATA transaction.
 *
 *	Clear interrupt and error flags in DMA status register.
 *
 *	May be used as the irq_clear() entry in ata_port_operations.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
293
 *	spin_lock_irqsave(host lock)
294 295 296
 */
void ata_bmdma_irq_clear(struct ata_port *ap)
{
T
Tejun Heo 已提交
297 298 299
	void __iomem *mmio = ap->ioaddr.bmdma_addr;

	if (!mmio)
300 301
		return;

T
Tejun Heo 已提交
302
	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
303 304
}

305 306 307 308 309 310 311 312
/**
 *	ata_noop_irq_clear - Noop placeholder for irq_clear
 *	@ap: Port associated with this ATA transaction.
 */
void ata_noop_irq_clear(struct ata_port *ap)
{
}

313 314 315 316 317 318 319 320 321
/**
 *	ata_bmdma_status - Read PCI IDE BMDMA status
 *	@ap: Port associated with this ATA transaction.
 *
 *	Read and return BMDMA status register.
 *
 *	May be used as the bmdma_status() entry in ata_port_operations.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
322
 *	spin_lock_irqsave(host lock)
323 324 325
 */
u8 ata_bmdma_status(struct ata_port *ap)
{
T
Tejun Heo 已提交
326
	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
327 328 329 330 331 332 333 334 335 336 337
}

/**
 *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
 *	@qc: Command we are ending DMA for
 *
 *	Clears the ATA_DMA_START flag in the dma control register
 *
 *	May be used as the bmdma_stop() entry in ata_port_operations.
 *
 *	LOCKING:
J
Jeff Garzik 已提交
338
 *	spin_lock_irqsave(host lock)
339 340 341 342
 */
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
T
Tejun Heo 已提交
343 344 345 346 347
	void __iomem *mmio = ap->ioaddr.bmdma_addr;

	/* clear start/stop bit */
	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
		 mmio + ATA_DMA_CMD);
348 349 350 351 352

	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
	ata_altstatus(ap);        /* dummy read */
}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
/**
 *	ata_bmdma_freeze - Freeze BMDMA controller port
 *	@ap: port to freeze
 *
 *	Freeze BMDMA controller port.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
void ata_bmdma_freeze(struct ata_port *ap)
{
	struct ata_ioports *ioaddr = &ap->ioaddr;

	ap->ctl |= ATA_NIEN;
	ap->last_ctl = ap->ctl;

369 370
	if (ioaddr->ctl_addr)
		iowrite8(ap->ctl, ioaddr->ctl_addr);
371 372 373 374 375 376 377 378

	/* Under certain circumstances, some controllers raise IRQ on
	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
	 */
	ata_chk_status(ap);

	ap->ops->irq_clear(ap);
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
}

/**
 *	ata_bmdma_thaw - Thaw BMDMA controller port
 *	@ap: port to thaw
 *
 *	Thaw BMDMA controller port.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
void ata_bmdma_thaw(struct ata_port *ap)
{
	/* clear & re-enable interrupts */
	ata_chk_status(ap);
	ap->ops->irq_clear(ap);
395
	ap->ops->irq_on(ap);
396 397 398 399 400
}

/**
 *	ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
 *	@ap: port to handle error for
401
 *	@prereset: prereset method (can be NULL)
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
 *	@softreset: softreset method (can be NULL)
 *	@hardreset: hardreset method (can be NULL)
 *	@postreset: postreset method (can be NULL)
 *
 *	Handle error for ATA BMDMA controller.  It can handle both
 *	PATA and SATA controllers.  Many controllers should be able to
 *	use this EH as-is or with some added handling before and
 *	after.
 *
 *	This function is intended to be used for constructing
 *	->error_handler callback by low level drivers.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 */
417 418 419
void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
			ata_postreset_fn_t postreset)
420 421 422 423 424
{
	struct ata_queued_cmd *qc;
	unsigned long flags;
	int thaw = 0;

T
Tejun Heo 已提交
425
	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
426 427 428 429
	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
		qc = NULL;

	/* reset PIO HSM and stop DMA engine */
430
	spin_lock_irqsave(ap->lock, flags);
431 432 433 434

	ap->hsm_task_state = HSM_ST_IDLE;

	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
435
		   qc->tf.protocol == ATAPI_PROT_DMA)) {
436 437
		u8 host_stat;

438
		host_stat = ap->ops->bmdma_status(ap);
439 440 441 442 443 444

		/* BMDMA controllers indicate host bus error by
		 * setting DMA_ERR bit and timing out.  As it wasn't
		 * really a timeout event, adjust error mask and
		 * cancel frozen state.
		 */
A
Alan 已提交
445
		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
446 447 448 449 450 451 452 453 454 455 456
			qc->err_mask = AC_ERR_HOST_BUS;
			thaw = 1;
		}

		ap->ops->bmdma_stop(qc);
	}

	ata_altstatus(ap);
	ata_chk_status(ap);
	ap->ops->irq_clear(ap);

457
	spin_unlock_irqrestore(ap->lock, flags);
458 459 460 461 462

	if (thaw)
		ata_eh_thaw_port(ap);

	/* PIO and DMA engines have been stopped, perform recovery */
463
	ata_do_eh(ap, prereset, softreset, hardreset, postreset);
464 465 466 467 468 469 470 471 472 473 474 475 476
}

/**
 *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
 *	@ap: port to handle error for
 *
 *	Stock error handler for BMDMA controller.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 */
void ata_bmdma_error_handler(struct ata_port *ap)
{
477
	ata_reset_fn_t softreset = NULL, hardreset = NULL;
478

479 480
	if (ap->ioaddr.ctl_addr)
		softreset = ata_std_softreset;
481
	if (sata_scr_valid(&ap->link))
482 483
		hardreset = sata_std_hardreset;

484
	ata_bmdma_drive_eh(ap, ata_std_prereset, softreset, hardreset,
485
			   ata_std_postreset);
486 487 488 489 490 491 492 493 494 495 496 497
}

/**
 *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
 *				      BMDMA controller
 *	@qc: internal command to clean up
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 */
void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
{
498 499
	if (qc->ap->ioaddr.bmdma_addr)
		ata_bmdma_stop(qc);
500 501
}

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
/**
 *	ata_sff_port_start - Set port up for dma.
 *	@ap: Port to initialize
 *
 *	Called just after data structures for each port are
 *	initialized.  Allocates space for PRD table if the device
 *	is DMA capable SFF.
 *
 *	May be used as the port_start() entry in ata_port_operations.
 *
 *	LOCKING:
 *	Inherited from caller.
 */

int ata_sff_port_start(struct ata_port *ap)
{
	if (ap->ioaddr.bmdma_addr)
		return ata_port_start(ap);
	return 0;
}

523
#ifdef CONFIG_PCI
524 525 526 527

static int ata_resources_present(struct pci_dev *pdev, int port)
{
	int i;
J
Jeff Garzik 已提交
528

529 530 531 532
	/* Check the PCI resources for this channel are enabled */
	port = port * 2;
	for (i = 0; i < 2; i ++) {
		if (pci_resource_start(pdev, port + i) == 0 ||
533 534
		    pci_resource_len(pdev, port + i) == 0)
			return 0;
535 536 537
	}
	return 1;
}
J
Jeff Garzik 已提交
538

539 540 541 542 543 544 545 546 547 548 549 550
/**
 *	ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
 *	@host: target ATA host
 *
 *	Acquire PCI BMDMA resources and initialize @host accordingly.
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
T
Tejun Heo 已提交
551
int ata_pci_init_bmdma(struct ata_host *host)
552
{
553 554 555
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
	int i, rc;
T
Tejun Heo 已提交
556

557 558 559 560
	/* No BAR4 allocation: No DMA */
	if (pci_resource_start(pdev, 4) == 0)
		return 0;

561 562 563 564 565 566 567 568 569
	/* TODO: If we get no DMA mask we should fall back to PIO */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	/* request and iomap DMA region */
570
	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
571 572 573
	if (rc) {
		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
		return -ENOMEM;
T
Tejun Heo 已提交
574
	}
575
	host->iomap = pcim_iomap_table(pdev);
T
Tejun Heo 已提交
576

T
Tejun Heo 已提交
577
	for (i = 0; i < 2; i++) {
578 579 580 581 582 583
		struct ata_port *ap = host->ports[i];
		void __iomem *bmdma = host->iomap[4] + 8 * i;

		if (ata_port_is_dummy(ap))
			continue;

584
		ap->ioaddr.bmdma_addr = bmdma;
585 586 587
		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
		    (ioread8(bmdma + 2) & 0x80))
			host->flags |= ATA_HOST_SIMPLEX;
588 589 590

		ata_port_desc(ap, "bmdma 0x%llx",
			(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
T
Tejun Heo 已提交
591 592
	}

593 594
	return 0;
}
595

596
/**
597
 *	ata_pci_init_sff_host - acquire native PCI ATA resources and init host
598 599
 *	@host: target ATA host
 *
T
Tejun Heo 已提交
600 601 602
 *	Acquire native PCI ATA resources for @host and initialize the
 *	first two ports of @host accordingly.  Ports marked dummy are
 *	skipped and allocation failure makes the port dummy.
603
 *
604 605 606 607
 *	Note that native PCI resources are valid even for legacy hosts
 *	as we fix up pdev resources array early in boot, so this
 *	function can be used for both native and legacy SFF hosts.
 *
608 609 610 611
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
T
Tejun Heo 已提交
612 613
 *	0 if at least one port is initialized, -ENODEV if no port is
 *	available.
614
 */
615
int ata_pci_init_sff_host(struct ata_host *host)
616 617 618
{
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
T
Tejun Heo 已提交
619
	unsigned int mask = 0;
620 621 622 623 624 625 626 627
	int i, rc;

	/* request, iomap BARs and init port addresses accordingly */
	for (i = 0; i < 2; i++) {
		struct ata_port *ap = host->ports[i];
		int base = i * 2;
		void __iomem * const *iomap;

T
Tejun Heo 已提交
628 629 630 631 632 633 634 635 636
		if (ata_port_is_dummy(ap))
			continue;

		/* Discard disabled ports.  Some controllers show
		 * their unused channels this way.  Disabled ports are
		 * made dummy.
		 */
		if (!ata_resources_present(pdev, i)) {
			ap->ops = &ata_dummy_port_ops;
637
			continue;
T
Tejun Heo 已提交
638
		}
639

640 641
		rc = pcim_iomap_regions(pdev, 0x3 << base,
					dev_driver_string(gdev));
642
		if (rc) {
T
Tejun Heo 已提交
643 644 645
			dev_printk(KERN_WARNING, gdev,
				   "failed to request/iomap BARs for port %d "
				   "(errno=%d)\n", i, rc);
646 647
			if (rc == -EBUSY)
				pcim_pin_device(pdev);
T
Tejun Heo 已提交
648 649
			ap->ops = &ata_dummy_port_ops;
			continue;
650 651 652 653 654 655 656 657
		}
		host->iomap = iomap = pcim_iomap_table(pdev);

		ap->ioaddr.cmd_addr = iomap[base];
		ap->ioaddr.altstatus_addr =
		ap->ioaddr.ctl_addr = (void __iomem *)
			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
		ata_std_ports(&ap->ioaddr);
T
Tejun Heo 已提交
658

659 660 661 662
		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
			(unsigned long long)pci_resource_start(pdev, base),
			(unsigned long long)pci_resource_start(pdev, base + 1));

T
Tejun Heo 已提交
663 664 665 666 667 668
		mask |= 1 << i;
	}

	if (!mask) {
		dev_printk(KERN_ERR, gdev, "no available native port\n");
		return -ENODEV;
669 670 671 672 673
	}

	return 0;
}

674
/**
675
 *	ata_pci_prepare_sff_host - helper to prepare native PCI ATA host
676
 *	@pdev: target PCI device
T
Tejun Heo 已提交
677
 *	@ppi: array of port_info, must be enough for two ports
678 679 680 681 682 683 684 685 686 687 688
 *	@r_host: out argument for the initialized ATA host
 *
 *	Helper to allocate ATA host for @pdev, acquire all native PCI
 *	resources and initialize it accordingly in one go.
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
689 690 691
int ata_pci_prepare_sff_host(struct pci_dev *pdev,
			     const struct ata_port_info * const * ppi,
			     struct ata_host **r_host)
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
{
	struct ata_host *host;
	int rc;

	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
		return -ENOMEM;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "failed to allocate ATA host\n");
		rc = -ENOMEM;
		goto err_out;
	}

707
	rc = ata_pci_init_sff_host(host);
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	if (rc)
		goto err_out;

	/* init DMA related stuff */
	rc = ata_pci_init_bmdma(host);
	if (rc)
		goto err_bmdma;

	devres_remove_group(&pdev->dev, NULL);
	*r_host = host;
	return 0;

 err_bmdma:
	/* This is necessary because PCI and iomap resources are
	 * merged and releasing the top group won't release the
	 * acquired resources if some of those have been acquired
	 * before entering this function.
	 */
	pcim_iounmap_regions(pdev, 0xf);
 err_out:
	devres_release_group(&pdev->dev, NULL);
	return rc;
}

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
/**
 *	ata_pci_activate_sff_host - start SFF host, request IRQ and register it
 *	@host: target SFF ATA host
 *	@irq_handler: irq_handler used when requesting IRQ(s)
 *	@sht: scsi_host_template to use when registering the host
 *
 *	This is the counterpart of ata_host_activate() for SFF ATA
 *	hosts.  This separate helper is necessary because SFF hosts
 *	use two separate interrupts in legacy mode.
 *
 *	LOCKING:
 *	Inherited from calling layer (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
int ata_pci_activate_sff_host(struct ata_host *host,
			      irq_handler_t irq_handler,
			      struct scsi_host_template *sht)
{
	struct device *dev = host->dev;
	struct pci_dev *pdev = to_pci_dev(dev);
	const char *drv_name = dev_driver_string(host->dev);
	int legacy_mode = 0, rc;

	rc = ata_host_start(host);
	if (rc)
		return rc;

	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
		u8 tmp8, mask;

		/* TODO: What if one channel is in native mode ... */
		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
		mask = (1 << 2) | (1 << 0);
		if ((tmp8 & mask) != mask)
			legacy_mode = 1;
#if defined(CONFIG_NO_ATA_LEGACY)
		/* Some platforms with PCI limits cannot address compat
		   port space. In that case we punt if their firmware has
		   left a device in compatibility mode */
		if (legacy_mode) {
			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
			return -EOPNOTSUPP;
		}
#endif
	}

	if (!devres_open_group(dev, NULL, GFP_KERNEL))
		return -ENOMEM;

	if (!legacy_mode && pdev->irq) {
		rc = devm_request_irq(dev, pdev->irq, irq_handler,
				      IRQF_SHARED, drv_name, host);
		if (rc)
			goto out;

		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
	} else if (legacy_mode) {
		if (!ata_port_is_dummy(host->ports[0])) {
			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
					      irq_handler, IRQF_SHARED,
					      drv_name, host);
			if (rc)
				goto out;

			ata_port_desc(host->ports[0], "irq %d",
				      ATA_PRIMARY_IRQ(pdev));
		}

		if (!ata_port_is_dummy(host->ports[1])) {
			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
					      irq_handler, IRQF_SHARED,
					      drv_name, host);
			if (rc)
				goto out;

			ata_port_desc(host->ports[1], "irq %d",
				      ATA_SECONDARY_IRQ(pdev));
		}
	}

	rc = ata_host_register(host, sht);
 out:
	if (rc == 0)
		devres_remove_group(dev, NULL);
	else
		devres_release_group(dev, NULL);

	return rc;
}

825 826 827
/**
 *	ata_pci_init_one - Initialize/register PCI IDE host controller
 *	@pdev: Controller to be initialized
T
Tejun Heo 已提交
828
 *	@ppi: array of port_info, must be enough for two ports
829
 *	@sht: scsi_host_template to use when registering the host
830
 *	@host_priv: host private_data
831 832 833 834 835 836 837 838 839
 *
 *	This is a helper function which can be called from a driver's
 *	xxx_init_one() probe function if the hardware uses traditional
 *	IDE taskfile registers.
 *
 *	This function calls pci_enable_device(), reserves its register
 *	regions, sets the dma mask, enables bus master mode, and calls
 *	ata_device_add()
 *
840 841 842 843
 *	ASSUMPTION:
 *	Nobody makes a single channel controller that appears solely as
 *	the secondary legacy port on PCI.
 *
844 845 846 847 848 849
 *	LOCKING:
 *	Inherited from PCI layer (may sleep).
 *
 *	RETURNS:
 *	Zero on success, negative on errno-based value on error.
 */
T
Tejun Heo 已提交
850
int ata_pci_init_one(struct pci_dev *pdev,
851
		     const struct ata_port_info * const * ppi,
852
		     struct scsi_host_template *sht, void *host_priv)
853
{
854
	struct device *dev = &pdev->dev;
T
Tejun Heo 已提交
855
	const struct ata_port_info *pi = NULL;
856
	struct ata_host *host = NULL;
T
Tejun Heo 已提交
857
	int i, rc;
858 859 860

	DPRINTK("ENTER\n");

T
Tejun Heo 已提交
861 862 863 864 865 866 867
	/* look up the first valid port_info */
	for (i = 0; i < 2 && ppi[i]; i++) {
		if (ppi[i]->port_ops != &ata_dummy_port_ops) {
			pi = ppi[i];
			break;
		}
	}
868

T
Tejun Heo 已提交
869 870 871 872 873
	if (!pi) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "no valid port_info specified\n");
		return -EINVAL;
	}
874

T
Tejun Heo 已提交
875 876
	if (!devres_open_group(dev, NULL, GFP_KERNEL))
		return -ENOMEM;
877

878
	rc = pcim_enable_device(pdev);
879
	if (rc)
880
		goto out;
881

882
	/* prepare and activate SFF host */
883 884
	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
	if (rc)
885
		goto out;
886
	host->private_data = host_priv;
887 888

	pci_set_master(pdev);
889
	rc = ata_pci_activate_sff_host(host, ata_interrupt, sht);
890 891 892 893 894
 out:
	if (rc == 0)
		devres_remove_group(&pdev->dev, NULL);
	else
		devres_release_group(&pdev->dev, NULL);
895

896 897 898
	return rc;
}

A
Alan Cox 已提交
899 900 901 902 903
/**
 *	ata_pci_clear_simplex	-	attempt to kick device out of simplex
 *	@pdev: PCI device
 *
 *	Some PCI ATA devices report simplex mode but in fact can be told to
904
 *	enter non simplex mode. This implements the necessary logic to
A
Alan Cox 已提交
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
 *	perform the task on such devices. Calling it on other devices will
 *	have -undefined- behaviour.
 */

int ata_pci_clear_simplex(struct pci_dev *pdev)
{
	unsigned long bmdma = pci_resource_start(pdev, 4);
	u8 simplex;

	if (bmdma == 0)
		return -ENOENT;

	simplex = inb(bmdma + 0x02);
	outb(simplex & 0x60, bmdma + 0x02);
	simplex = inb(bmdma + 0x02);
	if (simplex & 0x80)
		return -EOPNOTSUPP;
	return 0;
}

925
unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
A
Alan Cox 已提交
926 927 928
{
	/* Filter out DMA modes if the device has been configured by
	   the BIOS as PIO only */
929

930
	if (adev->link->ap->ioaddr.bmdma_addr == NULL)
A
Alan Cox 已提交
931 932 933 934
		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
	return xfer_mask;
}

935 936
#endif /* CONFIG_PCI */