scc_pata.c 24.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Support for IDE interfaces on Celleb platform
 *
 * (C) Copyright 2006 TOSHIBA CORPORATION
 *
 * This code is based on drivers/ide/pci/siimage.c:
 * Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
 * Copyright (C) 2003		Red Hat <alan@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/init.h>

#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA            0x01b4

#define SCC_PATA_NAME           "scc IDE"

#define TDVHSEL_MASTER          0x00000001
#define TDVHSEL_SLAVE           0x00000004

#define MODE_JCUSFEN            0x00000080

#define CCKCTRL_ATARESET        0x00040000
#define CCKCTRL_BUFCNT          0x00020000
#define CCKCTRL_CRST            0x00010000
#define CCKCTRL_OCLKEN          0x00000100
#define CCKCTRL_ATACLKOEN       0x00000002
#define CCKCTRL_LCLKEN          0x00000001

#define QCHCD_IOS_SS		0x00000001

#define QCHSD_STPDIAG		0x00020000

#define INTMASK_MSK             0xD1000012
#define INTSTS_SERROR		0x80000000
#define INTSTS_PRERR		0x40000000
#define INTSTS_RERR		0x10000000
#define INTSTS_ICERR		0x01000000
#define INTSTS_BMSINT		0x00000010
#define INTSTS_BMHE		0x00000008
#define INTSTS_IOIRQS           0x00000004
#define INTSTS_INTRQ            0x00000002
#define INTSTS_ACTEINT          0x00000001

#define ECMODE_VALUE 0x01

static struct scc_ports {
	unsigned long ctl, dma;
67
	struct ide_host *host;	/* for removing port from system */
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
} scc_ports[MAX_HWIFS];

/* PIO transfer mode  table */
/* JCHST */
static unsigned long JCHSTtbl[2][7] = {
	{0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00},   /* 100MHz */
	{0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00}    /* 133MHz */
};

/* JCHHT */
static unsigned long JCHHTtbl[2][7] = {
	{0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00},   /* 100MHz */
	{0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00}    /* 133MHz */
};

/* JCHCT */
static unsigned long JCHCTtbl[2][7] = {
	{0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00},   /* 100MHz */
	{0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00}    /* 133MHz */
};


/* DMA transfer mode  table */
/* JCHDCTM/JCHDCTS */
static unsigned long JCHDCTxtbl[2][7] = {
	{0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00},   /* 100MHz */
	{0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00}    /* 133MHz */
};

/* JCSTWTM/JCSTWTS  */
static unsigned long JCSTWTxtbl[2][7] = {
	{0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00},   /* 100MHz */
	{0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02}    /* 133MHz */
};

/* JCTSS */
static unsigned long JCTSStbl[2][7] = {
	{0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00},   /* 100MHz */
	{0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05}    /* 133MHz */
};

/* JCENVT */
static unsigned long JCENVTtbl[2][7] = {
	{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00},   /* 100MHz */
	{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}    /* 133MHz */
};

/* JCACTSELS/JCACTSELM */
static unsigned long JCACTSELtbl[2][7] = {
	{0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00},   /* 100MHz */
	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}    /* 133MHz */
};


static u8 scc_ide_inb(unsigned long port)
{
	u32 data = in_be32((void*)port);
	return (u8)data;
}

128 129 130 131 132 133 134 135
static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
{
	out_be32((void *)hwif->io_ports.command_addr, cmd);
	eieio();
	in_be32((void *)(hwif->dma_base + 0x01c));
	eieio();
}

136 137 138 139 140
static u8 scc_read_status(ide_hwif_t *hwif)
{
	return (u8)in_be32((void *)hwif->io_ports.status_addr);
}

141 142 143 144 145
static u8 scc_read_altstatus(ide_hwif_t *hwif)
{
	return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
}

146 147
static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
{
148
	return (u8)in_be32((void *)(hwif->dma_base + 4));
149 150
}

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
static void scc_set_irq(ide_hwif_t *hwif, int on)
{
	u8 ctl = ATA_DEVCTL_OBS;

	if (on == 4) { /* hack for SRST */
		ctl |= 4;
		on &= ~4;
	}

	ctl |= on ? 0 : 2;

	out_be32((void *)hwif->io_ports.ctl_addr, ctl);
	eieio();
	in_be32((void *)(hwif->dma_base + 0x01c));
	eieio();
}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static void scc_ide_insw(unsigned long port, void *addr, u32 count)
{
	u16 *ptr = (u16 *)addr;
	while (count--) {
		*ptr++ = le16_to_cpu(in_be32((void*)port));
	}
}

static void scc_ide_insl(unsigned long port, void *addr, u32 count)
{
	u16 *ptr = (u16 *)addr;
	while (count--) {
		*ptr++ = le16_to_cpu(in_be32((void*)port));
		*ptr++ = le16_to_cpu(in_be32((void*)port));
	}
}

static void scc_ide_outb(u8 addr, unsigned long port)
{
	out_be32((void*)port, addr);
}

static void
scc_ide_outsw(unsigned long port, void *addr, u32 count)
{
	u16 *ptr = (u16 *)addr;
	while (count--) {
		out_be32((void*)port, cpu_to_le16(*ptr++));
	}
}

static void
scc_ide_outsl(unsigned long port, void *addr, u32 count)
{
	u16 *ptr = (u16 *)addr;
	while (count--) {
		out_be32((void*)port, cpu_to_le16(*ptr++));
		out_be32((void*)port, cpu_to_le16(*ptr++));
	}
}

/**
210 211 212
 *	scc_set_pio_mode	-	set host controller for PIO mode
 *	@drive: drive
 *	@pio: PIO mode number
213 214 215 216 217
 *
 *	Load the timing settings for this device mode into the
 *	controller.
 */

218
static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
219 220 221 222 223 224 225 226 227 228
{
	ide_hwif_t *hwif = HWIF(drive);
	struct scc_ports *ports = ide_get_hwifdata(hwif);
	unsigned long ctl_base = ports->ctl;
	unsigned long cckctrl_port = ctl_base + 0xff0;
	unsigned long piosht_port = ctl_base + 0x000;
	unsigned long pioct_port = ctl_base + 0x004;
	unsigned long reg;
	int offset;

229
	reg = in_be32((void __iomem *)cckctrl_port);
230 231 232 233 234
	if (reg & CCKCTRL_ATACLKOEN) {
		offset = 1; /* 133MHz */
	} else {
		offset = 0; /* 100MHz */
	}
B
Bartlomiej Zolnierkiewicz 已提交
235
	reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
236
	out_be32((void __iomem *)piosht_port, reg);
B
Bartlomiej Zolnierkiewicz 已提交
237
	reg = JCHCTtbl[offset][pio];
238
	out_be32((void __iomem *)pioct_port, reg);
B
Bartlomiej Zolnierkiewicz 已提交
239
}
240 241

/**
242 243 244
 *	scc_set_dma_mode	-	set host controller for DMA mode
 *	@drive: drive
 *	@speed: DMA mode
245 246 247 248 249
 *
 *	Load the timing settings for this device mode into the
 *	controller.
 */

250
static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
{
	ide_hwif_t *hwif = HWIF(drive);
	struct scc_ports *ports = ide_get_hwifdata(hwif);
	unsigned long ctl_base = ports->ctl;
	unsigned long cckctrl_port = ctl_base + 0xff0;
	unsigned long mdmact_port = ctl_base + 0x008;
	unsigned long mcrcst_port = ctl_base + 0x00c;
	unsigned long sdmact_port = ctl_base + 0x010;
	unsigned long scrcst_port = ctl_base + 0x014;
	unsigned long udenvt_port = ctl_base + 0x018;
	unsigned long tdvhsel_port   = ctl_base + 0x020;
	int is_slave = (&hwif->drives[1] == drive);
	int offset, idx;
	unsigned long reg;
	unsigned long jcactsel;

267
	reg = in_be32((void __iomem *)cckctrl_port);
268 269 270 271 272 273
	if (reg & CCKCTRL_ATACLKOEN) {
		offset = 1; /* 133MHz */
	} else {
		offset = 0; /* 100MHz */
	}

274
	idx = speed - XFER_UDMA_0;
275 276 277

	jcactsel = JCACTSELtbl[offset][idx];
	if (is_slave) {
278 279 280 281
		out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
		out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
		jcactsel = jcactsel << 2;
		out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
282
	} else {
283 284 285
		out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
		out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
		out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
286 287
	}
	reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
288
	out_be32((void __iomem *)udenvt_port, reg);
289 290
}

291 292 293
static void scc_dma_host_set(ide_drive_t *drive, int on)
{
	ide_hwif_t *hwif = drive->hwif;
294
	u8 unit = drive->dn & 1;
295
	u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
296 297 298 299 300 301

	if (on)
		dma_stat |= (1 << (5 + unit));
	else
		dma_stat &= ~(1 << (5 + unit));

302
	scc_ide_outb(dma_stat, hwif->dma_base + 4);
303 304
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
/**
 *	scc_ide_dma_setup	-	begin a DMA phase
 *	@drive: target device
 *
 *	Build an IDE DMA PRD (IDE speak for scatter gather table)
 *	and then set up the DMA transfer registers.
 *
 *	Returns 0 on success. If a PIO fallback is required then 1
 *	is returned.
 */

static int scc_dma_setup(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = HWGROUP(drive)->rq;
	unsigned int reading;
	u8 dma_stat;

	if (rq_data_dir(rq))
		reading = 0;
	else
		reading = 1 << 3;

	/* fall back to pio! */
	if (!ide_build_dmatable(drive, rq)) {
		ide_map_sg(drive, rq);
		return 1;
	}

	/* PRD table */
335
	out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
336 337

	/* specify r/w */
338
	out_be32((void __iomem *)hwif->dma_base, reading);
339

340 341
	/* read DMA status for INTR & ERROR flags */
	dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
342 343

	/* clear INTR & ERROR flags */
344
	out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
345 346 347 348
	drive->waiting_for_dma = 1;
	return 0;
}

349 350 351
static void scc_dma_start(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
352
	u8 dma_cmd = scc_ide_inb(hwif->dma_base);
353 354

	/* start DMA */
355
	scc_ide_outb(dma_cmd | 1, hwif->dma_base);
356 357 358 359 360 361 362 363 364 365
	wmb();
}

static int __scc_dma_end(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	u8 dma_stat, dma_cmd;

	drive->waiting_for_dma = 0;
	/* get DMA command mode */
366
	dma_cmd = scc_ide_inb(hwif->dma_base);
367
	/* stop DMA */
368
	scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
369
	/* get DMA status */
370
	dma_stat = scc_ide_inb(hwif->dma_base + 4);
371
	/* clear the INTR & ERROR bits */
372
	scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
373 374 375 376 377 378
	/* purge DMA mappings */
	ide_destroy_dmatable(drive);
	/* verify good DMA status */
	wmb();
	return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
}
379

380
/**
381
 *	scc_dma_end	-	Stop DMA
382 383 384
 *	@drive: IDE drive
 *
 *	Check and clear INT Status register.
385
 *	Then call __scc_dma_end().
386 387
 */

388
static int scc_dma_end(ide_drive_t *drive)
389 390
{
	ide_hwif_t *hwif = HWIF(drive);
391
	void __iomem *dma_base = (void __iomem *)hwif->dma_base;
392 393
	unsigned long intsts_port = hwif->dma_base + 0x014;
	u32 reg;
394 395 396 397 398
	int dma_stat, data_loss = 0;
	static int retry = 0;

	/* errata A308 workaround: Step5 (check data loss) */
	/* We don't check non ide_disk because it is limited to UDMA4 */
399
	if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
400
	      & ATA_ERR) &&
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	    drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
		reg = in_be32((void __iomem *)intsts_port);
		if (!(reg & INTSTS_ACTEINT)) {
			printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
			       drive->name);
			data_loss = 1;
			if (retry++) {
				struct request *rq = HWGROUP(drive)->rq;
				int unit;
				/* ERROR_RESET and drive->crc_count are needed
				 * to reduce DMA transfer mode in retry process.
				 */
				if (rq)
					rq->errors |= ERROR_RESET;
				for (unit = 0; unit < MAX_DRIVES; unit++) {
					ide_drive_t *drive = &hwif->drives[unit];
					drive->crc_count++;
				}
			}
		}
	}
422 423

	while (1) {
424
		reg = in_be32((void __iomem *)intsts_port);
425 426 427

		if (reg & INTSTS_SERROR) {
			printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
428
			out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
429

430
			out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
431 432 433 434 435 436 437
			continue;
		}

		if (reg & INTSTS_PRERR) {
			u32 maea0, maec0;
			unsigned long ctl_base = hwif->config_data;

438 439
			maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
			maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
440 441 442

			printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);

443
			out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
444

445
			out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
446 447 448 449 450
			continue;
		}

		if (reg & INTSTS_RERR) {
			printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
451
			out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
452

453
			out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
454 455 456 457
			continue;
		}

		if (reg & INTSTS_ICERR) {
458
			out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
459 460

			printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
461
			out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
462 463 464 465 466
			continue;
		}

		if (reg & INTSTS_BMSINT) {
			printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
467
			out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
468 469 470 471 472 473

			ide_do_reset(drive);
			continue;
		}

		if (reg & INTSTS_BMHE) {
474
			out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
475 476 477 478
			continue;
		}

		if (reg & INTSTS_ACTEINT) {
479
			out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
480 481 482 483
			continue;
		}

		if (reg & INTSTS_IOIRQS) {
484
			out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
485 486 487 488 489
			continue;
		}
		break;
	}

490
	dma_stat = __scc_dma_end(drive);
491 492 493
	if (data_loss)
		dma_stat |= 2; /* emulate DMA error (to retry command) */
	return dma_stat;
494 495
}

496 497 498
/* returns 1 if dma irq issued, 0 otherwise */
static int scc_dma_test_irq(ide_drive_t *drive)
{
499 500
	ide_hwif_t *hwif = HWIF(drive);
	u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
501

502
	/* SCC errata A252,A308 workaround: Step4 */
503
	if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
504
	     & ATA_ERR) &&
505
	    (int_stat & INTSTS_INTRQ))
506 507
		return 1;

508 509
	/* SCC errata A308 workaround: Step5 (polling IOIRQS) */
	if (int_stat & INTSTS_IOIRQS)
510 511 512 513 514
		return 1;

	return 0;
}

515 516 517 518 519 520 521 522 523
static u8 scc_udma_filter(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	u8 mask = hwif->ultra_mask;

	/* errata A308 workaround: limit non ide_disk drive to UDMA4 */
	if ((drive->media != ide_disk) && (mask & 0xE0)) {
		printk(KERN_INFO "%s: limit %s to UDMA4\n",
		       SCC_PATA_NAME, drive->name);
524
		mask = ATA_UDMA4;
525 526 527 528 529
	}

	return mask;
}

530 531 532 533 534 535 536 537 538 539 540 541 542
/**
 *	setup_mmio_scc	-	map CTRL/BMID region
 *	@dev: PCI device we are configuring
 *	@name: device name
 *
 */

static int setup_mmio_scc (struct pci_dev *dev, const char *name)
{
	unsigned long ctl_base = pci_resource_start(dev, 0);
	unsigned long dma_base = pci_resource_start(dev, 1);
	unsigned long ctl_size = pci_resource_len(dev, 0);
	unsigned long dma_size = pci_resource_len(dev, 1);
A
Al Viro 已提交
543 544
	void __iomem *ctl_addr;
	void __iomem *dma_addr;
545
	int i, ret;
546 547 548 549 550 551 552 553

	for (i = 0; i < MAX_HWIFS; i++) {
		if (scc_ports[i].ctl == 0)
			break;
	}
	if (i >= MAX_HWIFS)
		return -ENOMEM;

554 555 556 557
	ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
	if (ret < 0) {
		printk(KERN_ERR "%s: can't reserve resources\n", name);
		return ret;
558 559 560
	}

	if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
561
		goto fail_0;
562 563

	if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
564
		goto fail_1;
565 566 567 568 569 570 571 572 573

	pci_set_master(dev);
	scc_ports[i].ctl = (unsigned long)ctl_addr;
	scc_ports[i].dma = (unsigned long)dma_addr;
	pci_set_drvdata(dev, (void *) &scc_ports[i]);

	return 1;

 fail_1:
574
	iounmap(ctl_addr);
575 576 577 578
 fail_0:
	return -ENOMEM;
}

579 580 581 582
static int scc_ide_setup_pci_device(struct pci_dev *dev,
				    const struct ide_port_info *d)
{
	struct scc_ports *ports = pci_get_drvdata(dev);
583
	struct ide_host *host;
584
	hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
585
	int i, rc;
586 587

	memset(&hw, 0, sizeof(hw));
588 589
	for (i = 0; i <= 8; i++)
		hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
590 591 592 593
	hw.irq = dev->irq;
	hw.dev = &dev->dev;
	hw.chipset = ide_pci;

594 595 596
	rc = ide_host_add(d, hws, &host);
	if (rc)
		return rc;
597 598

	ports->host = host;
599 600 601 602

	return 0;
}

603 604 605
/**
 *	init_setup_scc	-	set up an SCC PATA Controller
 *	@dev: PCI device
606
 *	@d: IDE port info
607 608 609 610
 *
 *	Perform the initial set up for this device.
 */

611
static int __devinit init_setup_scc(struct pci_dev *dev,
612
				    const struct ide_port_info *d)
613 614 615 616 617 618 619 620 621 622 623 624
{
	unsigned long ctl_base;
	unsigned long dma_base;
	unsigned long cckctrl_port;
	unsigned long intmask_port;
	unsigned long mode_port;
	unsigned long ecmode_port;
	unsigned long dma_status_port;
	u32 reg = 0;
	struct scc_ports *ports;
	int rc;

625 626 627 628
	rc = pci_enable_device(dev);
	if (rc)
		goto end;

629
	rc = setup_mmio_scc(dev, d->name);
630 631
	if (rc < 0)
		goto end;
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665

	ports = pci_get_drvdata(dev);
	ctl_base = ports->ctl;
	dma_base = ports->dma;
	cckctrl_port = ctl_base + 0xff0;
	intmask_port = dma_base + 0x010;
	mode_port = ctl_base + 0x024;
	ecmode_port = ctl_base + 0xf00;
	dma_status_port = dma_base + 0x004;

	/* controller initialization */
	reg = 0;
	out_be32((void*)cckctrl_port, reg);
	reg |= CCKCTRL_ATACLKOEN;
	out_be32((void*)cckctrl_port, reg);
	reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
	out_be32((void*)cckctrl_port, reg);
	reg |= CCKCTRL_CRST;
	out_be32((void*)cckctrl_port, reg);

	for (;;) {
		reg = in_be32((void*)cckctrl_port);
		if (reg & CCKCTRL_CRST)
			break;
		udelay(5000);
	}

	reg |= CCKCTRL_ATARESET;
	out_be32((void*)cckctrl_port, reg);

	out_be32((void*)ecmode_port, ECMODE_VALUE);
	out_be32((void*)mode_port, MODE_JCUSFEN);
	out_be32((void*)intmask_port, INTMASK_MSK);

666 667 668 669
	rc = scc_ide_setup_pci_device(dev, d);

 end:
	return rc;
670 671
}

672 673 674 675 676 677 678 679 680 681
static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
{
	struct ide_io_ports *io_ports = &drive->hwif->io_ports;
	struct ide_taskfile *tf = &task->tf;
	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;

	if (task->tf_flags & IDE_TFLAG_FLAGGED)
		HIHI = 0xFF;

	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
682 683
		out_be32((void *)io_ports->data_addr,
			 (tf->hob_data << 8) | tf->data);
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717

	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
		scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
		scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
		scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
		scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
		scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);

	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
		scc_ide_outb(tf->feature, io_ports->feature_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
		scc_ide_outb(tf->nsect, io_ports->nsect_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
		scc_ide_outb(tf->lbal, io_ports->lbal_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
		scc_ide_outb(tf->lbam, io_ports->lbam_addr);
	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
		scc_ide_outb(tf->lbah, io_ports->lbah_addr);

	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
		scc_ide_outb((tf->device & HIHI) | drive->select.all,
			     io_ports->device_addr);
}

static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
{
	struct ide_io_ports *io_ports = &drive->hwif->io_ports;
	struct ide_taskfile *tf = &task->tf;

	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
718
		u16 data = (u16)in_be32((void *)io_ports->data_addr);
719 720 721 722 723 724

		tf->data = data & 0xff;
		tf->hob_data = (data >> 8) & 0xff;
	}

	/* be sure we're looking at the low order bits */
725
	scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
726

727 728
	if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
		tf->feature = scc_ide_inb(io_ports->feature_addr);
729 730 731 732 733 734 735 736 737 738 739 740
	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
		tf->nsect  = scc_ide_inb(io_ports->nsect_addr);
	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
		tf->lbal   = scc_ide_inb(io_ports->lbal_addr);
	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
		tf->lbam   = scc_ide_inb(io_ports->lbam_addr);
	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
		tf->lbah   = scc_ide_inb(io_ports->lbah_addr);
	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
		tf->device = scc_ide_inb(io_ports->device_addr);

	if (task->tf_flags & IDE_TFLAG_LBA48) {
741
		scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
742 743 744 745 746 747 748 749 750 751 752 753 754 755

		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
			tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
			tf->hob_nsect   = scc_ide_inb(io_ports->nsect_addr);
		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
			tf->hob_lbal    = scc_ide_inb(io_ports->lbal_addr);
		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
			tf->hob_lbam    = scc_ide_inb(io_ports->lbam_addr);
		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
			tf->hob_lbah    = scc_ide_inb(io_ports->lbah_addr);
	}
}

756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
static void scc_input_data(ide_drive_t *drive, struct request *rq,
			   void *buf, unsigned int len)
{
	unsigned long data_addr = drive->hwif->io_ports.data_addr;

	len++;

	if (drive->io_32bit) {
		scc_ide_insl(data_addr, buf, len / 4);

		if ((len & 3) >= 2)
			scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
	} else
		scc_ide_insw(data_addr, buf, len / 2);
}

static void scc_output_data(ide_drive_t *drive,  struct request *rq,
			    void *buf, unsigned int len)
{
	unsigned long data_addr = drive->hwif->io_ports.data_addr;

	len++;

	if (drive->io_32bit) {
		scc_ide_outsl(data_addr, buf, len / 4);

		if ((len & 3) >= 2)
			scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
	} else
		scc_ide_outsw(data_addr, buf, len / 2);
}

788 789 790 791 792 793 794 795
/**
 *	init_mmio_iops_scc	-	set up the iops for MMIO
 *	@hwif: interface to set up
 *
 */

static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
{
796
	struct pci_dev *dev = to_pci_dev(hwif->dev);
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	struct scc_ports *ports = pci_get_drvdata(dev);
	unsigned long dma_base = ports->dma;

	ide_set_hwifdata(hwif, ports);

	hwif->dma_base = dma_base;
	hwif->config_data = ports->ctl;
}

/**
 *	init_iops_scc	-	set up iops
 *	@hwif: interface to set up
 *
 *	Do the basic setup for the SCC hardware interface
 *	and then do the MMIO setup.
 */

static void __devinit init_iops_scc(ide_hwif_t *hwif)
{
816 817
	struct pci_dev *dev = to_pci_dev(hwif->dev);

818 819 820 821 822 823
	hwif->hwif_data = NULL;
	if (pci_get_drvdata(dev) == NULL)
		return;
	init_mmio_iops_scc(hwif);
}

824
static u8 scc_cable_detect(ide_hwif_t *hwif)
825 826 827 828
{
	return ATA_CBL_PATA80;
}

829 830 831 832 833 834 835 836 837 838 839 840 841
/**
 *	init_hwif_scc	-	set up hwif
 *	@hwif: interface to set up
 *
 *	We do the basic set up of the interface structure. The SCC
 *	requires several custom handlers so we override the default
 *	ide DMA handlers appropriately.
 */

static void __devinit init_hwif_scc(ide_hwif_t *hwif)
{
	struct scc_ports *ports = ide_get_hwifdata(hwif);

842 843
	/* PTERADD */
	out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
844

845 846 847 848
	if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
		hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
	else
		hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
849 850
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
static const struct ide_tp_ops scc_tp_ops = {
	.exec_command		= scc_exec_command,
	.read_status		= scc_read_status,
	.read_altstatus		= scc_read_altstatus,
	.read_sff_dma_status	= scc_read_sff_dma_status,

	.set_irq		= scc_set_irq,

	.tf_load		= scc_tf_load,
	.tf_read		= scc_tf_read,

	.input_data		= scc_input_data,
	.output_data		= scc_output_data,
};

866 867 868 869 870 871 872
static const struct ide_port_ops scc_port_ops = {
	.set_pio_mode		= scc_set_pio_mode,
	.set_dma_mode		= scc_set_dma_mode,
	.udma_filter		= scc_udma_filter,
	.cable_detect		= scc_cable_detect,
};

873
static const struct ide_dma_ops scc_dma_ops = {
874
	.dma_host_set		= scc_dma_host_set,
875
	.dma_setup		= scc_dma_setup,
876
	.dma_exec_cmd		= ide_dma_exec_cmd,
877
	.dma_start		= scc_dma_start,
878 879
	.dma_end		= scc_dma_end,
	.dma_test_irq		= scc_dma_test_irq,
880 881
	.dma_lost_irq		= ide_dma_lost_irq,
	.dma_timeout		= ide_dma_timeout,
882 883
};

884 885 886 887 888
#define DECLARE_SCC_DEV(name_str)			\
  {							\
      .name		= name_str,			\
      .init_iops	= init_iops_scc,		\
      .init_hwif	= init_hwif_scc,		\
889
      .tp_ops		= &scc_tp_ops,		\
890
      .port_ops		= &scc_port_ops,		\
891
      .dma_ops		= &scc_dma_ops,			\
892
      .host_flags	= IDE_HFLAG_SINGLE,		\
B
Bartlomiej Zolnierkiewicz 已提交
893
      .pio_mask		= ATA_PIO4,			\
894 895
  }

896
static const struct ide_port_info scc_chipsets[] __devinitdata = {
897 898 899 900 901 902 903 904 905 906 907 908 909 910
	/* 0 */ DECLARE_SCC_DEV("sccIDE"),
};

/**
 *	scc_init_one	-	pci layer discovery entry
 *	@dev: PCI device
 *	@id: ident table entry
 *
 *	Called by the PCI code when it finds an SCC PATA controller.
 *	We then use the IDE PCI generic helper to do most of the work.
 */

static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
911
	return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
912 913 914 915 916 917 918 919 920 921 922 923
}

/**
 *	scc_remove	-	pci layer remove entry
 *	@dev: PCI device
 *
 *	Called by the PCI code when it removes an SCC PATA controller.
 */

static void __devexit scc_remove(struct pci_dev *dev)
{
	struct scc_ports *ports = pci_get_drvdata(dev);
924 925
	struct ide_host *host = ports->host;
	ide_hwif_t *hwif = host->ports[0];
926 927

	if (hwif->dmatable_cpu) {
928 929
		pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
				    hwif->dmatable_cpu, hwif->dmatable_dma);
930 931 932
		hwif->dmatable_cpu = NULL;
	}

933
	ide_host_remove(host);
934 935 936

	iounmap((void*)ports->dma);
	iounmap((void*)ports->ctl);
937
	pci_release_selected_regions(dev, (1 << 2) - 1);
938 939 940
	memset(ports, 0, sizeof(*ports));
}

941 942
static const struct pci_device_id scc_pci_tbl[] = {
	{ PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
943 944 945 946 947 948 949 950
	{ 0, },
};
MODULE_DEVICE_TABLE(pci, scc_pci_tbl);

static struct pci_driver driver = {
	.name = "SCC IDE",
	.id_table = scc_pci_tbl,
	.probe = scc_init_one,
951
	.remove = __devexit_p(scc_remove),
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
};

static int scc_ide_init(void)
{
	return ide_pci_register_driver(&driver);
}

module_init(scc_ide_init);
/* -- No exit code?
static void scc_ide_exit(void)
{
	ide_pci_unregister_driver(&driver);
}
module_exit(scc_ide_exit);
 */


MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
MODULE_LICENSE("GPL");