sata_mv.c 70.3 KB
Newer Older
1 2 3
/*
 * sata_mv.c - Marvell SATA support
 *
4
 * Copyright 2005: EMC Corporation, all rights reserved.
5
 * Copyright 2005 Red Hat, Inc.  All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 */

J
Jeff Garzik 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
  sata_mv TODO list:

  1) Needs a full errata audit for all chipsets.  I implemented most
  of the errata workarounds found in the Marvell vendor driver, but
  I distinctly remember a couple workarounds (one related to PCI-X)
  are still needed.

  4) Add NCQ support (easy to intermediate, once new-EH support appears)

  5) Investigate problems with PCI Message Signalled Interrupts (MSI).

  6) Add port multiplier support (intermediate)

  8) Develop a low-power-consumption strategy, and implement it.

  9) [Experiment, low priority] See if ATAPI can be supported using
  "unknown FIS" or "vendor-specific FIS" support, or something creative
  like that.

  10) [Experiment, low priority] Investigate interrupt coalescing.
  Quite often, especially with PCI Message Signalled Interrupts (MSI),
  the overhead reduced by interrupt mitigation is quite often not
  worth the latency cost.

  11) [Experiment, Marvell value added] Is it possible to use target
  mode to cross-connect two Linux boxes with Marvell cards?  If so,
  creating LibATA target mode support would be very interesting.

  Target mode, for those without docs, is the ability to directly
  connect two SATA controllers.

  13) Verify that 7042 is fully supported.  I only have a 6042.

*/


61 62 63 64 65 66 67 68
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
69
#include <linux/device.h>
70
#include <scsi/scsi_host.h>
71
#include <scsi/scsi_cmnd.h>
72 73 74
#include <linux/libata.h>

#define DRV_NAME	"sata_mv"
J
Jeff Garzik 已提交
75
#define DRV_VERSION	"0.81"
76 77 78 79 80 81 82 83 84 85 86 87

enum {
	/* BAR's are enumerated in terms of pci_resource_start() terms */
	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */

	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */

	MV_PCI_REG_BASE		= 0,
	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
88 89 90 91 92 93
	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),

94
	MV_SATAHC0_REG_BASE	= 0x20000,
95
	MV_FLASH_CTL		= 0x1046c,
96 97
	MV_GPIO_PORT_CTL	= 0x104f0,
	MV_RESET_CFG		= 0x180d8,
98 99 100 101 102 103

	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,

104 105 106 107 108 109 110 111 112 113 114 115 116 117
	MV_MAX_Q_DEPTH		= 32,
	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,

	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
	 * CRPB needs alignment on a 256B boundary. Size == 256B
	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
	 */
	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
	MV_MAX_SG_CT		= 176,
	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),

118 119 120
	MV_PORTS_PER_HC		= 4,
	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
	MV_PORT_HC_SHIFT	= 2,
121
	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
122 123 124 125 126
	MV_PORT_MASK		= 3,

	/* Host Flags */
	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
127
	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 129
				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
				  ATA_FLAG_PIO_POLLING,
130
	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
131

132 133
	CRQB_FLAG_READ		= (1 << 0),
	CRQB_TAG_SHIFT		= 1,
134 135
	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
136 137 138 139 140
	CRQB_CMD_ADDR_SHIFT	= 8,
	CRQB_CMD_CS		= (0x2 << 11),
	CRQB_CMD_LAST		= (1 << 15),

	CRPB_FLAG_STATUS_SHIFT	= 8,
141 142
	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
143 144 145

	EPRD_FLAG_END_OF_TBL	= (1 << 31),

146 147
	/* PCI interface registers */

148 149
	PCI_COMMAND_OFS		= 0xc00,

150 151 152 153 154
	PCI_MAIN_CMD_STS_OFS	= 0xd30,
	STOP_PCI_MASTER		= (1 << 2),
	PCI_MASTER_EMPTY	= (1 << 3),
	GLOB_SFT_RST		= (1 << 4),

155 156 157 158 159 160 161 162 163 164 165 166 167
	MV_PCI_MODE		= 0xd00,
	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
	MV_PCI_DISC_TIMER	= 0xd04,
	MV_PCI_MSI_TRIGGER	= 0xc38,
	MV_PCI_SERR_MASK	= 0xc28,
	MV_PCI_XBAR_TMOUT	= 0x1d04,
	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
	MV_PCI_ERR_COMMAND	= 0x1d50,

	PCI_IRQ_CAUSE_OFS		= 0x1d58,
	PCI_IRQ_MASK_OFS		= 0x1d5c,
168 169 170 171 172 173 174 175 176 177 178
	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */

	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
	PORT0_ERR		= (1 << 0),	/* shift by port # */
	PORT0_DONE		= (1 << 1),	/* shift by port # */
	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
	PCI_ERR			= (1 << 18),
	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
179 180
	PORTS_0_3_COAL_DONE	= (1 << 8),
	PORTS_4_7_COAL_DONE	= (1 << 17),
181 182 183 184 185
	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
	GPIO_INT		= (1 << 22),
	SELF_INT		= (1 << 23),
	TWSI_INT		= (1 << 24),
	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
186
	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
187
	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
188 189
				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
				   HC_MAIN_RSVD),
190 191
	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
				   HC_MAIN_RSVD_5),
192 193 194 195 196

	/* SATAHC registers */
	HC_CFG_OFS		= 0,

	HC_IRQ_CAUSE_OFS	= 0x14,
197
	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
198 199 200 201
	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
	DEV_IRQ			= (1 << 8),	/* shift by port # */

	/* Shadow block registers */
202 203
	SHD_BLK_OFS		= 0x100,
	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
204 205 206 207

	/* SATA registers */
	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
	SATA_ACTIVE_OFS		= 0x350,
208
	PHY_MODE3		= 0x310,
209 210
	PHY_MODE4		= 0x314,
	PHY_MODE2		= 0x330,
211 212 213
	MV5_PHY_MODE		= 0x74,
	MV5_LT_MODE		= 0x30,
	MV5_PHY_CTL		= 0x0C,
214 215 216
	SATA_INTERFACE_CTL	= 0x050,

	MV_M2_PREAMP_MASK	= 0x7e0,
217 218 219

	/* Port registers */
	EDMA_CFG_OFS		= 0,
220 221 222 223 224
	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
	EDMA_CFG_NCQ		= (1 << 5),
	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
225 226 227

	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
228 229 230 231 232 233
	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
	EDMA_ERR_DEV		= (1 << 2),	/* device error */
	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
234 235
	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
236
	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
237
	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
238 239 240 241 242
	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
243
	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
244 245 246 247
	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
248 249
	EDMA_ERR_OVERRUN_5	= (1 << 5),
	EDMA_ERR_UNDERRUN_5	= (1 << 6),
250 251 252 253 254 255
	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_SERR |
				  EDMA_ERR_SELF_DIS |
256
				  EDMA_ERR_CRQB_PAR |
257 258 259 260 261 262 263 264 265 266 267 268 269 270
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY |
				  EDMA_ERR_LNK_CTRL_RX_2 |
				  EDMA_ERR_LNK_DATA_RX |
				  EDMA_ERR_LNK_DATA_TX |
				  EDMA_ERR_TRANS_PROTO,
	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_OVERRUN_5 |
				  EDMA_ERR_UNDERRUN_5 |
				  EDMA_ERR_SELF_DIS_5 |
271
				  EDMA_ERR_CRQB_PAR |
272 273 274
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY,
275

276 277 278 279 280 281 282 283 284 285 286
	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */

	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
	EDMA_REQ_Q_PTR_SHIFT	= 5,

	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
	EDMA_RSP_Q_PTR_SHIFT	= 3,

J
Jeff Garzik 已提交
287 288 289 290
	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
	EDMA_EN			= (1 << 0),	/* enable EDMA */
	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
291

292
	EDMA_IORDY_TMOUT	= 0x34,
293 294
	EDMA_ARB_CFG		= 0x38,

295 296
	/* Host private flags (hp_flags) */
	MV_HP_FLAG_MSI		= (1 << 0),
297 298 299 300
	MV_HP_ERRATA_50XXB0	= (1 << 1),
	MV_HP_ERRATA_50XXB2	= (1 << 2),
	MV_HP_ERRATA_60X1B2	= (1 << 3),
	MV_HP_ERRATA_60X1C0	= (1 << 4),
301
	MV_HP_ERRATA_XX42A0	= (1 << 5),
J
Jeff Garzik 已提交
302 303 304
	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
305

306
	/* Port private flags (pp_flags) */
J
Jeff Garzik 已提交
307 308
	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
309 310
};

311 312
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
314

J
Jeff Garzik 已提交
315
enum {
316
	MV_DMA_BOUNDARY		= 0xffffffffU,
J
Jeff Garzik 已提交
317

J
Jeff Garzik 已提交
318 319 320
	/* mask of register bits containing lower 32 bits
	 * of EDMA request queue DMA address
	 */
J
Jeff Garzik 已提交
321 322
	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,

J
Jeff Garzik 已提交
323
	/* ditto, for response queue */
J
Jeff Garzik 已提交
324 325 326
	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
};

327 328 329 330 331 332
enum chip_type {
	chip_504x,
	chip_508x,
	chip_5080,
	chip_604x,
	chip_608x,
333 334
	chip_6042,
	chip_7042,
335 336
};

337 338
/* Command ReQuest Block: 32B */
struct mv_crqb {
M
Mark Lord 已提交
339 340 341 342
	__le32			sg_addr;
	__le32			sg_addr_hi;
	__le16			ctrl_flags;
	__le16			ata_cmd[11];
343
};
344

345
struct mv_crqb_iie {
M
Mark Lord 已提交
346 347 348 349 350
	__le32			addr;
	__le32			addr_hi;
	__le32			flags;
	__le32			len;
	__le32			ata_cmd[4];
351 352
};

353 354
/* Command ResPonse Block: 8B */
struct mv_crpb {
M
Mark Lord 已提交
355 356 357
	__le16			id;
	__le16			flags;
	__le32			tmstmp;
358 359
};

360 361
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
M
Mark Lord 已提交
362 363 364 365
	__le32			addr;
	__le32			flags_size;
	__le32			addr_hi;
	__le32			reserved;
366
};
367

368 369 370 371 372 373 374
struct mv_port_priv {
	struct mv_crqb		*crqb;
	dma_addr_t		crqb_dma;
	struct mv_crpb		*crpb;
	dma_addr_t		crpb_dma;
	struct mv_sg		*sg_tbl;
	dma_addr_t		sg_tbl_dma;
375 376 377 378

	unsigned int		req_idx;
	unsigned int		resp_idx;

379 380 381
	u32			pp_flags;
};

382 383 384 385 386
struct mv_port_signal {
	u32			amps;
	u32			pre;
};

387 388
struct mv_host_priv;
struct mv_hw_ops {
389 390
	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
391 392 393
	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
394 395
	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
396 397
	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
398 399
};

400 401
struct mv_host_priv {
	u32			hp_flags;
402
	struct mv_port_signal	signal[8];
403
	const struct mv_hw_ops	*ops;
404 405 406 407 408
};

static void mv_irq_clear(struct ata_port *ap);
static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409 410
static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
411 412 413
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
414
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
415
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
416 417 418 419
static void mv_error_handler(struct ata_port *ap);
static void mv_post_int_cmd(struct ata_queued_cmd *qc);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
420 421
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);

422 423
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
424 425 426
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
427 428
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
429 430
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
431

432 433
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
434 435 436
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
437 438
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
439 440
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
441 442
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port_no);
443

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
static struct scsi_host_template mv5_sht = {
	.module			= THIS_MODULE,
	.name			= DRV_NAME,
	.ioctl			= ata_scsi_ioctl,
	.queuecommand		= ata_scsi_queuecmd,
	.can_queue		= ATA_DEF_QUEUE,
	.this_id		= ATA_SHT_THIS_ID,
	.sg_tablesize		= MV_MAX_SG_CT,
	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
	.emulated		= ATA_SHT_EMULATED,
	.use_clustering		= 1,
	.proc_name		= DRV_NAME,
	.dma_boundary		= MV_DMA_BOUNDARY,
	.slave_configure	= ata_scsi_slave_config,
	.slave_destroy		= ata_scsi_slave_destroy,
	.bios_param		= ata_std_bios_param,
};

static struct scsi_host_template mv6_sht = {
463 464 465 466
	.module			= THIS_MODULE,
	.name			= DRV_NAME,
	.ioctl			= ata_scsi_ioctl,
	.queuecommand		= ata_scsi_queuecmd,
467
	.can_queue		= ATA_DEF_QUEUE,
468
	.this_id		= ATA_SHT_THIS_ID,
469
	.sg_tablesize		= MV_MAX_SG_CT,
470 471
	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
	.emulated		= ATA_SHT_EMULATED,
472
	.use_clustering		= 1,
473 474 475
	.proc_name		= DRV_NAME,
	.dma_boundary		= MV_DMA_BOUNDARY,
	.slave_configure	= ata_scsi_slave_config,
T
Tejun Heo 已提交
476
	.slave_destroy		= ata_scsi_slave_destroy,
477 478 479
	.bios_param		= ata_std_bios_param,
};

480 481 482 483 484 485 486 487 488
static const struct ata_port_operations mv5_ops = {
	.port_disable		= ata_port_disable,

	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,

489
	.cable_detect		= ata_cable_sata,
490 491 492

	.qc_prep		= mv_qc_prep,
	.qc_issue		= mv_qc_issue,
T
Tejun Heo 已提交
493
	.data_xfer		= ata_data_xfer,
494 495

	.irq_clear		= mv_irq_clear,
496 497
	.irq_on			= ata_irq_on,
	.irq_ack		= ata_irq_ack,
498

499 500 501 502 503
	.error_handler		= mv_error_handler,
	.post_internal_cmd	= mv_post_int_cmd,
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,

504 505 506 507 508 509 510 511
	.scr_read		= mv5_scr_read,
	.scr_write		= mv5_scr_write,

	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
};

static const struct ata_port_operations mv6_ops = {
512 513 514 515 516 517 518 519
	.port_disable		= ata_port_disable,

	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,

520
	.cable_detect		= ata_cable_sata,
521

522 523
	.qc_prep		= mv_qc_prep,
	.qc_issue		= mv_qc_issue,
T
Tejun Heo 已提交
524
	.data_xfer		= ata_data_xfer,
525 526

	.irq_clear		= mv_irq_clear,
527 528
	.irq_on			= ata_irq_on,
	.irq_ack		= ata_irq_ack,
529

530 531 532 533 534
	.error_handler		= mv_error_handler,
	.post_internal_cmd	= mv_post_int_cmd,
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,

535 536 537
	.scr_read		= mv_scr_read,
	.scr_write		= mv_scr_write,

538 539
	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
540 541
};

542 543 544 545 546 547 548 549 550
static const struct ata_port_operations mv_iie_ops = {
	.port_disable		= ata_port_disable,

	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,

551
	.cable_detect		= ata_cable_sata,
552 553 554

	.qc_prep		= mv_qc_prep_iie,
	.qc_issue		= mv_qc_issue,
T
Tejun Heo 已提交
555
	.data_xfer		= ata_data_xfer,
556 557

	.irq_clear		= mv_irq_clear,
558 559
	.irq_on			= ata_irq_on,
	.irq_ack		= ata_irq_ack,
560

561 562 563 564 565
	.error_handler		= mv_error_handler,
	.post_internal_cmd	= mv_post_int_cmd,
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,

566 567 568 569 570 571 572
	.scr_read		= mv_scr_read,
	.scr_write		= mv_scr_write,

	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
};

573
static const struct ata_port_info mv_port_info[] = {
574
	{  /* chip_504x */
J
Jeff Garzik 已提交
575
		.flags		= MV_COMMON_FLAGS,
576
		.pio_mask	= 0x1f,	/* pio0-4 */
577
		.udma_mask	= ATA_UDMA6,
578
		.port_ops	= &mv5_ops,
579 580
	},
	{  /* chip_508x */
581
		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582
		.pio_mask	= 0x1f,	/* pio0-4 */
583
		.udma_mask	= ATA_UDMA6,
584
		.port_ops	= &mv5_ops,
585
	},
586
	{  /* chip_5080 */
587
		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
588
		.pio_mask	= 0x1f,	/* pio0-4 */
589
		.udma_mask	= ATA_UDMA6,
590
		.port_ops	= &mv5_ops,
591
	},
592
	{  /* chip_604x */
593
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
594
		.pio_mask	= 0x1f,	/* pio0-4 */
595
		.udma_mask	= ATA_UDMA6,
596
		.port_ops	= &mv6_ops,
597 598
	},
	{  /* chip_608x */
599 600
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
				  MV_FLAG_DUAL_HC,
601
		.pio_mask	= 0x1f,	/* pio0-4 */
602
		.udma_mask	= ATA_UDMA6,
603
		.port_ops	= &mv6_ops,
604
	},
605
	{  /* chip_6042 */
606
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
607
		.pio_mask	= 0x1f,	/* pio0-4 */
608
		.udma_mask	= ATA_UDMA6,
609 610 611
		.port_ops	= &mv_iie_ops,
	},
	{  /* chip_7042 */
612
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
613
		.pio_mask	= 0x1f,	/* pio0-4 */
614
		.udma_mask	= ATA_UDMA6,
615 616
		.port_ops	= &mv_iie_ops,
	},
617 618
};

619
static const struct pci_device_id mv_pci_tbl[] = {
620 621 622 623 624 625 626 627 628 629 630 631 632
	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },

	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },

	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },

633 634 635
	/* Adaptec 1430SA */
	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },

636 637
	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },

M
Morrison, Tom 已提交
638 639 640
	/* add Marvell 7042 support */
	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },

641
	{ }			/* terminate list */
642 643 644 645 646 647 648 649 650
};

static struct pci_driver mv_pci_driver = {
	.name			= DRV_NAME,
	.id_table		= mv_pci_tbl,
	.probe			= mv_init_one,
	.remove			= ata_pci_remove_one,
};

651 652 653 654 655
static const struct mv_hw_ops mv5xxx_ops = {
	.phy_errata		= mv5_phy_errata,
	.enable_leds		= mv5_enable_leds,
	.read_preamp		= mv5_read_preamp,
	.reset_hc		= mv5_reset_hc,
656 657
	.reset_flash		= mv5_reset_flash,
	.reset_bus		= mv5_reset_bus,
658 659 660 661 662 663 664
};

static const struct mv_hw_ops mv6xxx_ops = {
	.phy_errata		= mv6_phy_errata,
	.enable_leds		= mv6_enable_leds,
	.read_preamp		= mv6_read_preamp,
	.reset_hc		= mv6_reset_hc,
665 666
	.reset_flash		= mv6_reset_flash,
	.reset_bus		= mv_reset_pci_bus,
667 668
};

669 670 671 672 673 674
/*
 * module options
 */
static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */


675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
	int rc;

	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
		if (rc) {
			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
			if (rc) {
				dev_printk(KERN_ERR, &pdev->dev,
					   "64-bit DMA enable failed\n");
				return rc;
			}
		}
	} else {
		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit DMA enable failed\n");
			return rc;
		}
		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit consistent DMA enable failed\n");
			return rc;
		}
	}

	return rc;
}

708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
/*
 * Functions
 */

static inline void writelfl(unsigned long data, void __iomem *addr)
{
	writel(data, addr);
	(void) readl(addr);	/* flush to avoid PCI posted write */
}

static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}

723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
static inline unsigned int mv_hc_from_port(unsigned int port)
{
	return port >> MV_PORT_HC_SHIFT;
}

static inline unsigned int mv_hardport_from_port(unsigned int port)
{
	return port & MV_PORT_MASK;
}

static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
						 unsigned int port)
{
	return mv_hc_base(base, mv_hc_from_port(port));
}

739 740
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
741
	return  mv_hc_base_from_port(base, port) +
742
		MV_SATAHC_ARBTR_REG_SZ +
743
		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
744 745 746 747
}

static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
T
Tejun Heo 已提交
748
	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
749 750
}

J
Jeff Garzik 已提交
751
static inline int mv_get_hc_count(unsigned long port_flags)
752
{
J
Jeff Garzik 已提交
753
	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
754 755 756
}

static void mv_irq_clear(struct ata_port *ap)
757 758 759
{
}

760 761 762 763
static void mv_set_edma_ptrs(void __iomem *port_mmio,
			     struct mv_host_priv *hpriv,
			     struct mv_port_priv *pp)
{
764 765
	u32 index;

766 767 768
	/*
	 * initialize request queue
	 */
769 770
	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;

771 772
	WARN_ON(pp->crqb_dma & 0x3ff);
	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
773
	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
774 775 776
		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);

	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
777
		writelfl((pp->crqb_dma & 0xffffffff) | index,
778 779
			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
	else
780
		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
781 782 783 784

	/*
	 * initialize response queue
	 */
785 786
	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;

787 788 789 790
	WARN_ON(pp->crpb_dma & 0xff);
	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);

	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
791
		writelfl((pp->crpb_dma & 0xffffffff) | index,
792 793
			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
	else
794
		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
795

796
	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
797 798 799
		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
}

800 801 802 803 804
/**
 *      mv_start_dma - Enable eDMA engine
 *      @base: port base address
 *      @pp: port private data
 *
805 806
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
807 808 809 810
 *
 *      LOCKING:
 *      Inherited from caller.
 */
811 812
static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
			 struct mv_port_priv *pp)
813
{
814
	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
815 816 817 818 819
		/* clear EDMA event indicators, if any */
		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);

		mv_set_edma_ptrs(base, hpriv, pp);

820 821 822
		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
	}
823
	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
824 825
}

826
/**
J
Jeff Garzik 已提交
827
 *      __mv_stop_dma - Disable eDMA engine
828 829
 *      @ap: ATA channel to manipulate
 *
830 831
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
832 833 834 835
 *
 *      LOCKING:
 *      Inherited from caller.
 */
J
Jeff Garzik 已提交
836
static int __mv_stop_dma(struct ata_port *ap)
837
{
838 839 840
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp	= ap->private_data;
	u32 reg;
841
	int i, err = 0;
842

843
	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
844
		/* Disable EDMA if active.   The disable bit auto clears.
845 846 847
		 */
		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
848
	} else {
849
		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
850
  	}
851

852 853 854
	/* now properly wait for the eDMA to stop */
	for (i = 1000; i > 0; i--) {
		reg = readl(port_mmio + EDMA_CMD_OFS);
855
		if (!(reg & EDMA_EN))
856
			break;
857

858 859 860
		udelay(100);
	}

861
	if (reg & EDMA_EN) {
862
		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
863
		err = -EIO;
864
	}
865 866

	return err;
867 868
}

J
Jeff Garzik 已提交
869 870 871 872 873 874 875 876 877 878 879 880
static int mv_stop_dma(struct ata_port *ap)
{
	unsigned long flags;
	int rc;

	spin_lock_irqsave(&ap->host->lock, flags);
	rc = __mv_stop_dma(ap);
	spin_unlock_irqrestore(&ap->host->lock, flags);

	return rc;
}

J
Jeff Garzik 已提交
881
#ifdef ATA_DEBUG
882
static void mv_dump_mem(void __iomem *start, unsigned bytes)
883
{
884 885 886 887 888 889 890 891 892 893
	int b, w;
	for (b = 0; b < bytes; ) {
		DPRINTK("%p: ", start + b);
		for (w = 0; b < bytes && w < 4; w++) {
			printk("%08x ",readl(start + b));
			b += sizeof(u32);
		}
		printk("\n");
	}
}
J
Jeff Garzik 已提交
894 895
#endif

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
#ifdef ATA_DEBUG
	int b, w;
	u32 dw;
	for (b = 0; b < bytes; ) {
		DPRINTK("%02x: ", b);
		for (w = 0; b < bytes && w < 4; w++) {
			(void) pci_read_config_dword(pdev,b,&dw);
			printk("%08x ",dw);
			b += sizeof(u32);
		}
		printk("\n");
	}
#endif
}
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
			     struct pci_dev *pdev)
{
#ifdef ATA_DEBUG
916
	void __iomem *hc_base = mv_hc_base(mmio_base,
917 918 919 920 921 922 923 924 925 926 927 928 929
					   port >> MV_PORT_HC_SHIFT);
	void __iomem *port_base;
	int start_port, num_ports, p, start_hc, num_hcs, hc;

	if (0 > port) {
		start_hc = start_port = 0;
		num_ports = 8;		/* shld be benign for 4 port devs */
		num_hcs = 2;
	} else {
		start_hc = port >> MV_PORT_HC_SHIFT;
		start_port = port;
		num_ports = num_hcs = 1;
	}
930
	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
931 932 933 934 935 936 937 938 939 940 941 942
		num_ports > 1 ? num_ports - 1 : start_port);

	if (NULL != pdev) {
		DPRINTK("PCI config space regs:\n");
		mv_dump_pci_cfg(pdev, 0x68);
	}
	DPRINTK("PCI regs:\n");
	mv_dump_mem(mmio_base+0xc00, 0x3c);
	mv_dump_mem(mmio_base+0xd00, 0x34);
	mv_dump_mem(mmio_base+0xf00, 0x4);
	mv_dump_mem(mmio_base+0x1d00, 0x6c);
	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
943
		hc_base = mv_hc_base(mmio_base, hc);
944 945 946 947 948 949 950 951 952 953 954
		DPRINTK("HC regs (HC %i):\n", hc);
		mv_dump_mem(hc_base, 0x1c);
	}
	for (p = start_port; p < start_port + num_ports; p++) {
		port_base = mv_port_base(mmio_base, p);
		DPRINTK("EDMA regs (port %i):\n",p);
		mv_dump_mem(port_base, 0x54);
		DPRINTK("SATA regs (port %i):\n",p);
		mv_dump_mem(port_base+0x300, 0x60);
	}
#endif
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
}

static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_CONTROL:
	case SCR_ERROR:
		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
		break;
	case SCR_ACTIVE:
		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

J
Jeff Garzik 已提交
981
	if (ofs != 0xffffffffU)
982
		return readl(mv_ap_base(ap) + ofs);
983
	else
984 985 986 987 988 989 990
		return (u32) ofs;
}

static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

J
Jeff Garzik 已提交
991
	if (ofs != 0xffffffffU)
992 993 994
		writelfl(val, mv_ap_base(ap) + ofs);
}

995 996
static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
			void __iomem *port_mmio)
997 998 999 1000
{
	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);

	/* set up non-NCQ EDMA configuration */
1001
	cfg &= ~(1 << 9);	/* disable eQue */
1002

1003 1004
	if (IS_GEN_I(hpriv)) {
		cfg &= ~0x1f;		/* clear queue depth */
1005
		cfg |= (1 << 8);	/* enab config burst size mask */
1006
	}
1007

1008 1009
	else if (IS_GEN_II(hpriv)) {
		cfg &= ~0x1f;		/* clear queue depth */
1010
		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1011 1012
		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
	}
1013 1014

	else if (IS_GEN_IIE(hpriv)) {
1015 1016
		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1017 1018
		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
		cfg |= (1 << 18);	/* enab early completion */
1019 1020
		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
1021
		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */
1022 1023 1024 1025 1026
	}

	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
/**
 *      mv_port_start - Port specific init/start routine.
 *      @ap: ATA channel to manipulate
 *
 *      Allocate and point to DMA memory, init port private memory,
 *      zero indices.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1037 1038
static int mv_port_start(struct ata_port *ap)
{
J
Jeff Garzik 已提交
1039 1040
	struct device *dev = ap->host->dev;
	struct mv_host_priv *hpriv = ap->host->private_data;
1041 1042 1043 1044
	struct mv_port_priv *pp;
	void __iomem *port_mmio = mv_ap_base(ap);
	void *mem;
	dma_addr_t mem_dma;
J
Jeff Garzik 已提交
1045
	unsigned long flags;
1046
	int rc;
1047

1048
	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1049
	if (!pp)
1050
		return -ENOMEM;
1051

1052 1053
	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
				  GFP_KERNEL);
1054
	if (!mem)
1055
		return -ENOMEM;
1056 1057
	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);

1058 1059
	rc = ata_pad_alloc(ap, dev);
	if (rc)
1060
		return rc;
1061

1062
	/* First item in chunk of DMA memory:
1063 1064 1065 1066 1067 1068 1069
	 * 32-slot command request table (CRQB), 32 bytes each in size
	 */
	pp->crqb = mem;
	pp->crqb_dma = mem_dma;
	mem += MV_CRQB_Q_SZ;
	mem_dma += MV_CRQB_Q_SZ;

1070
	/* Second item:
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	 * 32-slot command response table (CRPB), 8 bytes each in size
	 */
	pp->crpb = mem;
	pp->crpb_dma = mem_dma;
	mem += MV_CRPB_Q_SZ;
	mem_dma += MV_CRPB_Q_SZ;

	/* Third item:
	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
	 */
	pp->sg_tbl = mem;
	pp->sg_tbl_dma = mem_dma;

J
Jeff Garzik 已提交
1084 1085
	spin_lock_irqsave(&ap->host->lock, flags);

1086
	mv_edma_cfg(ap, hpriv, port_mmio);
1087

1088
	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1089

J
Jeff Garzik 已提交
1090 1091
	spin_unlock_irqrestore(&ap->host->lock, flags);

1092 1093 1094 1095 1096 1097 1098 1099
	/* Don't turn on EDMA here...do it before DMA commands only.  Else
	 * we'll be unable to send non-data, PIO, etc due to restricted access
	 * to shadow regs.
	 */
	ap->private_data = pp;
	return 0;
}

1100 1101 1102 1103 1104 1105 1106
/**
 *      mv_port_stop - Port specific cleanup/stop routine.
 *      @ap: ATA channel to manipulate
 *
 *      Stop DMA, cleanup port memory.
 *
 *      LOCKING:
J
Jeff Garzik 已提交
1107
 *      This routine uses the host lock to protect the DMA stop.
1108
 */
1109 1110 1111 1112 1113
static void mv_port_stop(struct ata_port *ap)
{
	mv_stop_dma(ap);
}

1114 1115 1116 1117 1118 1119 1120 1121 1122
/**
 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
 *      @qc: queued command whose SG list to source from
 *
 *      Populate the SG list and mark the last entry.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1123
static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1124 1125
{
	struct mv_port_priv *pp = qc->ap->private_data;
1126
	unsigned int n_sg = 0;
1127
	struct scatterlist *sg;
1128
	struct mv_sg *mv_sg;
1129

1130
	mv_sg = pp->sg_tbl;
1131
	ata_for_each_sg(sg, qc) {
1132 1133
		dma_addr_t addr = sg_dma_address(sg);
		u32 sg_len = sg_dma_len(sg);
1134

1135 1136 1137
		mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
		mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
		mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1138

1139 1140
		if (ata_sg_is_last(sg, qc))
			mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1141

1142 1143
		mv_sg++;
		n_sg++;
1144
	}
1145 1146

	return n_sg;
1147 1148
}

M
Mark Lord 已提交
1149
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1150
{
M
Mark Lord 已提交
1151
	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1152
		(last ? CRQB_CMD_LAST : 0);
M
Mark Lord 已提交
1153
	*cmdw = cpu_to_le16(tmp);
1154 1155
}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
/**
 *      mv_qc_prep - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1168 1169 1170 1171
static void mv_qc_prep(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct mv_port_priv *pp = ap->private_data;
M
Mark Lord 已提交
1172
	__le16 *cw;
1173 1174
	struct ata_taskfile *tf;
	u16 flags = 0;
1175
	unsigned in_index;
1176

1177
 	if (qc->tf.protocol != ATA_PROT_DMA)
1178
		return;
1179

1180 1181
	/* Fill in command request block
	 */
1182
	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1183
		flags |= CRQB_FLAG_READ;
1184
	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1185
	flags |= qc->tag << CRQB_TAG_SHIFT;
1186
	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/
1187

1188 1189
	/* get current queue index from software */
	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1190 1191

	pp->crqb[in_index].sg_addr =
1192
		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1193
	pp->crqb[in_index].sg_addr_hi =
1194
		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1195
	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1196

1197
	cw = &pp->crqb[in_index].ata_cmd[0];
1198 1199 1200 1201 1202 1203 1204
	tf = &qc->tf;

	/* Sadly, the CRQB cannot accomodate all registers--there are
	 * only 11 bytes...so we must pick and choose required
	 * registers based on the command.  So, we drop feature and
	 * hob_feature for [RW] DMA commands, but they are needed for
	 * NCQ.  NCQ will drop hob_nsect.
1205
	 */
1206 1207 1208 1209 1210
	switch (tf->command) {
	case ATA_CMD_READ:
	case ATA_CMD_READ_EXT:
	case ATA_CMD_WRITE:
	case ATA_CMD_WRITE_EXT:
1211
	case ATA_CMD_WRITE_FUA_EXT:
1212 1213 1214 1215 1216
		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
		break;
#ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
	case ATA_CMD_FPDMA_READ:
	case ATA_CMD_FPDMA_WRITE:
1217
		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
		break;
#endif				/* FIXME: remove this line when NCQ added */
	default:
		/* The only other commands EDMA supports in non-queued and
		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
		 * of which are defined/used by Linux.  If we get here, this
		 * driver needs work.
		 *
		 * FIXME: modify libata to give qc_prep a return value and
		 * return error here.
		 */
		BUG_ON(tf->command);
		break;
	}
	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
		return;
	mv_fill_sg(qc);
}

/**
 *      mv_qc_prep_iie - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct mv_port_priv *pp = ap->private_data;
	struct mv_crqb_iie *crqb;
	struct ata_taskfile *tf;
1266
	unsigned in_index;
1267 1268
	u32 flags = 0;

1269
 	if (qc->tf.protocol != ATA_PROT_DMA)
1270 1271 1272 1273 1274 1275 1276
		return;

	/* Fill in Gen IIE command request block
	 */
	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
		flags |= CRQB_FLAG_READ;

1277
	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1278
	flags |= qc->tag << CRQB_TAG_SHIFT;
1279
	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-
1280
						   what we use as our tag */
1281

1282 1283
	/* get current queue index from software */
	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1284 1285

	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
	crqb->flags = cpu_to_le32(flags);

	tf = &qc->tf;
	crqb->ata_cmd[0] = cpu_to_le32(
			(tf->command << 16) |
			(tf->feature << 24)
		);
	crqb->ata_cmd[1] = cpu_to_le32(
			(tf->lbal << 0) |
			(tf->lbam << 8) |
			(tf->lbah << 16) |
			(tf->device << 24)
		);
	crqb->ata_cmd[2] = cpu_to_le32(
			(tf->hob_lbal << 0) |
			(tf->hob_lbam << 8) |
			(tf->hob_lbah << 16) |
			(tf->hob_feature << 24)
		);
	crqb->ata_cmd[3] = cpu_to_le32(
			(tf->nsect << 0) |
			(tf->hob_nsect << 8)
		);

	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1313 1314 1315 1316
		return;
	mv_fill_sg(qc);
}

1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
/**
 *      mv_qc_issue - Initiate a command to the host
 *      @qc: queued command to start
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it sanity checks our local
 *      caches of the request producer/consumer indices then enables
 *      DMA and bumps the request producer index.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1329
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1330
{
1331 1332 1333 1334
	struct ata_port *ap = qc->ap;
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp = ap->private_data;
	struct mv_host_priv *hpriv = ap->host->private_data;
1335
	u32 in_index;
1336

1337
	if (qc->tf.protocol != ATA_PROT_DMA) {
1338 1339 1340 1341
		/* We're about to send a non-EDMA capable command to the
		 * port.  Turn off EDMA so there won't be problems accessing
		 * shadow block, etc registers.
		 */
J
Jeff Garzik 已提交
1342
		__mv_stop_dma(ap);
1343 1344 1345
		return ata_qc_issue_prot(qc);
	}

1346 1347 1348
	mv_start_dma(port_mmio, hpriv, pp);

	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1349 1350

	/* until we do queuing, the queue should be empty at this point */
1351 1352
	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1353

1354
	pp->req_idx++;
1355

1356
	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1357 1358

	/* and write the request in pointer to kick the EDMA to life */
1359 1360
	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1361 1362 1363 1364

	return 0;
}

1365 1366 1367
/**
 *      mv_err_intr - Handle error interrupts on the port
 *      @ap: ATA channel to manipulate
1368
 *      @reset_allowed: bool: 0 == don't trigger from reset here
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
 *
 *      In most cases, just clear the interrupt and move on.  However,
 *      some cases require an eDMA reset, which is done right before
 *      the COMRESET in mv_phy_reset().  The SERR case requires a
 *      clear of pending errors in the SATA SERROR register.  Finally,
 *      if the port disabled DMA, update our cached copy to match.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1379
static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1380 1381
{
	void __iomem *port_mmio = mv_ap_base(ap);
1382 1383 1384 1385 1386 1387
	u32 edma_err_cause, eh_freeze_mask, serr = 0;
	struct mv_port_priv *pp = ap->private_data;
	struct mv_host_priv *hpriv = ap->host->private_data;
	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
	unsigned int action = 0, err_mask = 0;
	struct ata_eh_info *ehi = &ap->eh_info;
1388

1389
	ata_ehi_clear_desc(ehi);
1390

1391 1392 1393 1394
	if (!edma_enabled) {
		/* just a guess: do we need to do this? should we
		 * expand this, and do it in all cases?
		 */
1395 1396
		sata_scr_read(ap, SCR_ERROR, &serr);
		sata_scr_write_flush(ap, SCR_ERROR, serr);
1397
	}
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409

	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);

	/*
	 * all generations share these EDMA error cause bits
	 */

	if (edma_err_cause & EDMA_ERR_DEV)
		err_mask |= AC_ERR_DEV;
	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1410
			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1411 1412 1413
			EDMA_ERR_INTRL_PAR)) {
		err_mask |= AC_ERR_ATA_BUS;
		action |= ATA_EH_HARDRESET;
T
Tejun Heo 已提交
1414
		ata_ehi_push_desc(ehi, "parity error");
1415 1416 1417 1418
	}
	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
		ata_ehi_hotplugged(ehi);
		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
T
Tejun Heo 已提交
1419
			"dev disconnect" : "dev connect");
1420 1421
	}

1422
	if (IS_GEN_I(hpriv)) {
1423 1424 1425 1426 1427
		eh_freeze_mask = EDMA_EH_FREEZE_5;

		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
			struct mv_port_priv *pp	= ap->private_data;
			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
T
Tejun Heo 已提交
1428
			ata_ehi_push_desc(ehi, "EDMA self-disable");
1429 1430 1431 1432 1433 1434 1435
		}
	} else {
		eh_freeze_mask = EDMA_EH_FREEZE;

		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
			struct mv_port_priv *pp	= ap->private_data;
			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
T
Tejun Heo 已提交
1436
			ata_ehi_push_desc(ehi, "EDMA self-disable");
1437 1438 1439 1440 1441 1442 1443 1444
		}

		if (edma_err_cause & EDMA_ERR_SERR) {
			sata_scr_read(ap, SCR_ERROR, &serr);
			sata_scr_write_flush(ap, SCR_ERROR, serr);
			err_mask = AC_ERR_ATA_BUS;
			action |= ATA_EH_HARDRESET;
		}
1445
	}
1446 1447 1448 1449

	/* Clear EDMA now that SERR cleanup done */
	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
	if (!err_mask) {
		err_mask = AC_ERR_OTHER;
		action |= ATA_EH_HARDRESET;
	}

	ehi->serror |= serr;
	ehi->action |= action;

	if (qc)
		qc->err_mask |= err_mask;
	else
		ehi->err_mask |= err_mask;

	if (edma_err_cause & eh_freeze_mask)
		ata_port_freeze(ap);
	else
		ata_port_abort(ap);
}

static void mv_intr_pio(struct ata_port *ap)
{
	struct ata_queued_cmd *qc;
	u8 ata_status;

	/* ignore spurious intr if drive still BUSY */
	ata_status = readb(ap->ioaddr.status_addr);
	if (unlikely(ata_status & ATA_BUSY))
		return;

	/* get active ATA command */
	qc = ata_qc_from_tag(ap, ap->active_tag);
	if (unlikely(!qc))			/* no active tag */
		return;
	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
		return;

	/* and finally, complete the ATA command */
	qc->err_mask |= ac_err_mask(ata_status);
	ata_qc_complete(qc);
}

static void mv_intr_edma(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_host_priv *hpriv = ap->host->private_data;
	struct mv_port_priv *pp = ap->private_data;
	struct ata_queued_cmd *qc;
	u32 out_index, in_index;
	bool work_done = false;

	/* get h/w response queue pointer */
	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;

	while (1) {
		u16 status;
1506
		unsigned int tag;
1507 1508 1509 1510 1511 1512 1513

		/* get s/w response queue last-read pointer, and compare */
		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
		if (in_index == out_index)
			break;

		/* 50xx: get active ATA command */
J
Jeff Garzik 已提交
1514
		if (IS_GEN_I(hpriv))
1515
			tag = ap->active_tag;
1516

1517 1518 1519
		/* Gen II/IIE: get active ATA command via tag, to enable
		 * support for queueing.  this works transparently for
		 * queued and non-queued modes.
1520
		 */
1521 1522 1523
		else if (IS_GEN_II(hpriv))
			tag = (le16_to_cpu(pp->crpb[out_index].id)
				>> CRPB_IOID_SHIFT_6) & 0x3f;
1524

1525 1526 1527
		else /* IS_GEN_IIE */
			tag = (le16_to_cpu(pp->crpb[out_index].id)
				>> CRPB_IOID_SHIFT_7) & 0x3f;
1528

1529
		qc = ata_qc_from_tag(ap, tag);
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548

		/* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
		 * bits (WARNING: might not necessarily be associated
		 * with this command), which -should- be clear
		 * if all is well
		 */
		status = le16_to_cpu(pp->crpb[out_index].flags);
		if (unlikely(status & 0xff)) {
			mv_err_intr(ap, qc);
			return;
		}

		/* and finally, complete the ATA command */
		if (qc) {
			qc->err_mask |=
				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
			ata_qc_complete(qc);
		}

J
Jeff Garzik 已提交
1549
		/* advance software response queue pointer, to
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		 * indicate (after the loop completes) to hardware
		 * that we have consumed a response queue entry.
		 */
		work_done = true;
		pp->resp_idx++;
	}

	if (work_done)
		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1561 1562
}

1563 1564
/**
 *      mv_host_intr - Handle all interrupts on the given host controller
J
Jeff Garzik 已提交
1565
 *      @host: host specific structure
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
 *      @relevant: port error bits relevant to this host controller
 *      @hc: which host controller we're to look at
 *
 *      Read then write clear the HC interrupt status then walk each
 *      port connected to the HC and see if it needs servicing.  Port
 *      success ints are reported in the HC interrupt status reg, the
 *      port error ints are reported in the higher level main
 *      interrupt status register and thus are passed in via the
 *      'relevant' argument.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
J
Jeff Garzik 已提交
1579
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1580
{
T
Tejun Heo 已提交
1581
	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1582 1583
	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
	u32 hc_irq_cause;
1584
	int port, port0;
1585

1586
	if (hc == 0)
1587
		port0 = 0;
1588
	else
1589 1590 1591 1592
		port0 = MV_PORTS_PER_HC;

	/* we'll need the HC success int register in most cases */
	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1593 1594 1595 1596
	if (!hc_irq_cause)
		return;

	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1597 1598 1599 1600 1601

	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
		hc,relevant,hc_irq_cause);

	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
J
Jeff Garzik 已提交
1602
		struct ata_port *ap = host->ports[port];
M
Mark Lord 已提交
1603
		struct mv_port_priv *pp = ap->private_data;
1604
		int have_err_bits, hard_port, shift;
J
Jeff Garzik 已提交
1605

1606
		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1607 1608
			continue;

1609
		shift = port << 1;		/* (port * 2) */
1610 1611 1612
		if (port >= MV_PORTS_PER_HC) {
			shift++;	/* skip bit 8 in the HC Main IRQ reg */
		}
1613 1614 1615 1616
		have_err_bits = ((PORT0_ERR << shift) & relevant);

		if (unlikely(have_err_bits)) {
			struct ata_queued_cmd *qc;
1617

1618
			qc = ata_qc_from_tag(ap, ap->active_tag);
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
				continue;

			mv_err_intr(ap, qc);
			continue;
		}

		hard_port = mv_hardport_from_port(port); /* range 0..3 */

		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
				mv_intr_edma(ap);
		} else {
			if ((DEV_IRQ << hard_port) & hc_irq_cause)
				mv_intr_pio(ap);
1634 1635 1636 1637 1638
		}
	}
	VPRINTK("EXIT\n");
}

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
{
	struct ata_port *ap;
	struct ata_queued_cmd *qc;
	struct ata_eh_info *ehi;
	unsigned int i, err_mask, printed = 0;
	u32 err_cause;

	err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);

	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
		   err_cause);

	DPRINTK("All regs @ PCI error\n");
	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));

	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);

	for (i = 0; i < host->n_ports; i++) {
		ap = host->ports[i];
		if (!ata_port_offline(ap)) {
			ehi = &ap->eh_info;
			ata_ehi_clear_desc(ehi);
			if (!printed++)
				ata_ehi_push_desc(ehi,
					"PCI err cause 0x%08x", err_cause);
			err_mask = AC_ERR_HOST_BUS;
			ehi->action = ATA_EH_HARDRESET;
			qc = ata_qc_from_tag(ap, ap->active_tag);
			if (qc)
				qc->err_mask |= err_mask;
			else
				ehi->err_mask |= err_mask;

			ata_port_freeze(ap);
		}
	}
}

1678
/**
1679
 *      mv_interrupt - Main interrupt event handler
1680 1681 1682 1683 1684 1685 1686 1687
 *      @irq: unused
 *      @dev_instance: private data; in this case the host structure
 *
 *      Read the read only register to determine if any host
 *      controllers have pending interrupts.  If so, call lower level
 *      routine to handle.  Also check for PCI errors which are only
 *      reported here.
 *
1688
 *      LOCKING:
J
Jeff Garzik 已提交
1689
 *      This routine holds the host lock while processing pending
1690 1691
 *      interrupts.
 */
1692
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1693
{
J
Jeff Garzik 已提交
1694
	struct ata_host *host = dev_instance;
1695
	unsigned int hc, handled = 0, n_hcs;
T
Tejun Heo 已提交
1696
	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1697 1698 1699 1700 1701 1702 1703
	u32 irq_stat;

	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);

	/* check the cases where we either have nothing pending or have read
	 * a bogus register value which can indicate HW removal or PCI fault
	 */
1704
	if (!irq_stat || (0xffffffffU == irq_stat))
1705 1706
		return IRQ_NONE;

J
Jeff Garzik 已提交
1707 1708
	n_hcs = mv_get_hc_count(host->ports[0]->flags);
	spin_lock(&host->lock);
1709

1710 1711 1712 1713 1714 1715
	if (unlikely(irq_stat & PCI_ERR)) {
		mv_pci_error(host, mmio);
		handled = 1;
		goto out_unlock;	/* skip all other HC irq handling */
	}

1716 1717 1718
	for (hc = 0; hc < n_hcs; hc++) {
		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
		if (relevant) {
J
Jeff Garzik 已提交
1719
			mv_host_intr(host, relevant, hc);
1720
			handled = 1;
1721 1722
		}
	}
1723

1724
out_unlock:
J
Jeff Garzik 已提交
1725
	spin_unlock(&host->lock);
1726 1727 1728 1729

	return IRQ_RETVAL(handled);
}

1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;

	return hc_mmio + ofs;
}

static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_ERROR:
	case SCR_CONTROL:
		ofs = sc_reg_in * sizeof(u32);
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
{
T
Tejun Heo 已提交
1757 1758
	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1759 1760 1761
	unsigned int ofs = mv5_scr_offset(sc_reg_in);

	if (ofs != 0xffffffffU)
T
Tejun Heo 已提交
1762
		return readl(addr + ofs);
1763 1764 1765 1766 1767 1768
	else
		return (u32) ofs;
}

static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
T
Tejun Heo 已提交
1769 1770
	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1771 1772 1773
	unsigned int ofs = mv5_scr_offset(sc_reg_in);

	if (ofs != 0xffffffffU)
T
Tejun Heo 已提交
1774
		writelfl(val, addr + ofs);
1775 1776
}

1777 1778 1779 1780
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
{
	int early_5080;

1781
	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796

	if (!early_5080) {
		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
		tmp |= (1 << 0);
		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
	}

	mv_reset_pci_bus(pdev, mmio);
}

static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
}

1797
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
J
Jeff Garzik 已提交
1798 1799
			   void __iomem *mmio)
{
1800 1801 1802 1803 1804 1805 1806
	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
	u32 tmp;

	tmp = readl(phy_mmio + MV5_PHY_MODE);

	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
J
Jeff Garzik 已提交
1807 1808
}

1809
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
J
Jeff Garzik 已提交
1810
{
1811 1812 1813 1814 1815 1816 1817 1818 1819
	u32 tmp;

	writel(0, mmio + MV_GPIO_PORT_CTL);

	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */

	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
	tmp |= ~(1 << 0);
	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
J
Jeff Garzik 已提交
1820 1821
}

1822 1823
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port)
1824
{
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
	u32 tmp;
	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);

	if (fix_apm_sq) {
		tmp = readl(phy_mmio + MV5_LT_MODE);
		tmp |= (1 << 19);
		writel(tmp, phy_mmio + MV5_LT_MODE);

		tmp = readl(phy_mmio + MV5_PHY_CTL);
		tmp &= ~0x3;
		tmp |= 0x1;
		writel(tmp, phy_mmio + MV5_PHY_CTL);
	}

	tmp = readl(phy_mmio + MV5_PHY_MODE);
	tmp &= ~mask;
	tmp |= hpriv->signal[port].pre;
	tmp |= hpriv->signal[port].amps;
	writel(tmp, phy_mmio + MV5_PHY_MODE);
1846 1847
}

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878

#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port)
{
	void __iomem *port_mmio = mv_port_base(mmio, port);

	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);

	mv_channel_reset(hpriv, mmio, port);

	ZERO(0x028);	/* command */
	writel(0x11f, port_mmio + EDMA_CFG_OFS);
	ZERO(0x004);	/* timer */
	ZERO(0x008);	/* irq err cause */
	ZERO(0x00c);	/* irq err mask */
	ZERO(0x010);	/* rq bah */
	ZERO(0x014);	/* rq inp */
	ZERO(0x018);	/* rq outp */
	ZERO(0x01c);	/* respq bah */
	ZERO(0x024);	/* respq outp */
	ZERO(0x020);	/* respq inp */
	ZERO(0x02c);	/* test control */
	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO

#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int hc)
1879
{
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
	u32 tmp;

	ZERO(0x00c);
	ZERO(0x010);
	ZERO(0x014);
	ZERO(0x018);

	tmp = readl(hc_mmio + 0x20);
	tmp &= 0x1c1c1c1c;
	tmp |= 0x03030303;
	writel(tmp, hc_mmio + 0x20);
}
#undef ZERO

static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc)
{
	unsigned int hc, port;

	for (hc = 0; hc < n_hc; hc++) {
		for (port = 0; port < MV_PORTS_PER_HC; port++)
			mv5_reset_hc_port(hpriv, mmio,
					  (hc * MV_PORTS_PER_HC) + port);

		mv5_reset_one_hc(hpriv, mmio, hc);
	}

	return 0;
1909 1910
}

J
Jeff Garzik 已提交
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
{
	u32 tmp;

	tmp = readl(mmio + MV_PCI_MODE);
	tmp &= 0xff00ffff;
	writel(tmp, mmio + MV_PCI_MODE);

	ZERO(MV_PCI_DISC_TIMER);
	ZERO(MV_PCI_MSI_TRIGGER);
	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
	ZERO(HC_MAIN_IRQ_MASK_OFS);
	ZERO(MV_PCI_SERR_MASK);
	ZERO(PCI_IRQ_CAUSE_OFS);
	ZERO(PCI_IRQ_MASK_OFS);
	ZERO(MV_PCI_ERR_LOW_ADDRESS);
	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
	ZERO(MV_PCI_ERR_ATTRIBUTE);
	ZERO(MV_PCI_ERR_COMMAND);
}
#undef ZERO

static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
	u32 tmp;

	mv5_reset_flash(hpriv, mmio);

	tmp = readl(mmio + MV_GPIO_PORT_CTL);
	tmp &= 0x3;
	tmp |= (1 << 5) | (1 << 6);
	writel(tmp, mmio + MV_GPIO_PORT_CTL);
}

/**
 *      mv6_reset_hc - Perform the 6xxx global soft reset
 *      @mmio: base address of the HBA
 *
 *      This routine only applies to 6xxx parts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1956 1957
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc)
J
Jeff Garzik 已提交
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
{
	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
	int i, rc = 0;
	u32 t;

	/* Following procedure defined in PCI "main command and status
	 * register" table.
	 */
	t = readl(reg);
	writel(t | STOP_PCI_MASTER, reg);

	for (i = 0; i < 1000; i++) {
		udelay(1);
		t = readl(reg);
		if (PCI_MASTER_EMPTY & t) {
			break;
		}
	}
	if (!(PCI_MASTER_EMPTY & t)) {
		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
		rc = 1;
		goto done;
	}

	/* set reset */
	i = 5;
	do {
		writel(t | GLOB_SFT_RST, reg);
		t = readl(reg);
		udelay(1);
	} while (!(GLOB_SFT_RST & t) && (i-- > 0));

	if (!(GLOB_SFT_RST & t)) {
		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
		rc = 1;
		goto done;
	}

	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
	i = 5;
	do {
		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
		t = readl(reg);
		udelay(1);
	} while ((GLOB_SFT_RST & t) && (i-- > 0));

	if (GLOB_SFT_RST & t) {
		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
		rc = 1;
	}
done:
	return rc;
}

2012
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
J
Jeff Garzik 已提交
2013 2014 2015 2016 2017 2018 2019
			   void __iomem *mmio)
{
	void __iomem *port_mmio;
	u32 tmp;

	tmp = readl(mmio + MV_RESET_CFG);
	if ((tmp & (1 << 0)) == 0) {
2020
		hpriv->signal[idx].amps = 0x7 << 8;
J
Jeff Garzik 已提交
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
		hpriv->signal[idx].pre = 0x1 << 5;
		return;
	}

	port_mmio = mv_port_base(mmio, idx);
	tmp = readl(port_mmio + PHY_MODE2);

	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
}

2032
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
J
Jeff Garzik 已提交
2033
{
2034
	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
J
Jeff Garzik 已提交
2035 2036
}

2037
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2038
			   unsigned int port)
2039
{
2040 2041
	void __iomem *port_mmio = mv_port_base(mmio, port);

2042
	u32 hp_flags = hpriv->hp_flags;
2043 2044
	int fix_phy_mode2 =
		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2045
	int fix_phy_mode4 =
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
	u32 m2, tmp;

	if (fix_phy_mode2) {
		m2 = readl(port_mmio + PHY_MODE2);
		m2 &= ~(1 << 16);
		m2 |= (1 << 31);
		writel(m2, port_mmio + PHY_MODE2);

		udelay(200);

		m2 = readl(port_mmio + PHY_MODE2);
		m2 &= ~((1 << 16) | (1 << 31));
		writel(m2, port_mmio + PHY_MODE2);

		udelay(200);
	}

	/* who knows what this magic does */
	tmp = readl(port_mmio + PHY_MODE3);
	tmp &= ~0x7F800000;
	tmp |= 0x2A800000;
	writel(tmp, port_mmio + PHY_MODE3);
2069 2070

	if (fix_phy_mode4) {
2071
		u32 m4;
2072 2073

		m4 = readl(port_mmio + PHY_MODE4);
2074 2075 2076

		if (hp_flags & MV_HP_ERRATA_60X1B2)
			tmp = readl(port_mmio + 0x310);
2077 2078 2079 2080

		m4 = (m4 & ~(1 << 1)) | (1 << 0);

		writel(m4, port_mmio + PHY_MODE4);
2081 2082 2083

		if (hp_flags & MV_HP_ERRATA_60X1B2)
			writel(tmp, port_mmio + 0x310);
2084 2085 2086 2087 2088 2089
	}

	/* Revert values of pre-emphasis and signal amps to the saved ones */
	m2 = readl(port_mmio + PHY_MODE2);

	m2 &= ~MV_M2_PREAMP_MASK;
2090 2091
	m2 |= hpriv->signal[port].amps;
	m2 |= hpriv->signal[port].pre;
2092
	m2 &= ~(1 << 16);
2093

2094 2095 2096 2097 2098 2099
	/* according to mvSata 3.6.1, some IIE values are fixed */
	if (IS_GEN_IIE(hpriv)) {
		m2 &= ~0xC30FF01F;
		m2 |= 0x0000900F;
	}

2100 2101 2102
	writel(m2, port_mmio + PHY_MODE2);
}

2103 2104 2105 2106 2107 2108 2109
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port_no)
{
	void __iomem *port_mmio = mv_port_base(mmio, port_no);

	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);

2110
	if (IS_GEN_II(hpriv)) {
2111
		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2112 2113
		ifctl |= (1 << 7);		/* enable gen2i speed */
		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
	}

	udelay(25);		/* allow reset propagation */

	/* Spec never mentions clearing the bit.  Marvell's driver does
	 * clear the bit, however.
	 */
	writelfl(0, port_mmio + EDMA_CMD_OFS);

	hpriv->ops->phy_errata(hpriv, mmio, port_no);

2126
	if (IS_GEN_I(hpriv))
2127 2128 2129
		mdelay(1);
}

2130
/**
2131
 *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2132 2133 2134 2135 2136 2137 2138 2139
 *      @ap: ATA channel to manipulate
 *
 *      Part of this is taken from __sata_phy_reset and modified to
 *      not sleep since this routine gets called from interrupt level.
 *
 *      LOCKING:
 *      Inherited from caller.  This is coded to safe to call at
 *      interrupt level, i.e. it does not sleep.
2140
 */
2141 2142
static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
			 unsigned long deadline)
2143
{
J
Jeff Garzik 已提交
2144
	struct mv_port_priv *pp	= ap->private_data;
J
Jeff Garzik 已提交
2145
	struct mv_host_priv *hpriv = ap->host->private_data;
2146
	void __iomem *port_mmio = mv_ap_base(ap);
2147 2148
	int retry = 5;
	u32 sstatus;
2149 2150 2151

	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);

J
Jeff Garzik 已提交
2152
	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2153 2154
		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2155

2156 2157
	/* Issue COMRESET via SControl */
comreset_retry:
2158
	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2159
	msleep(1);
2160

2161
	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2162
	msleep(20);
2163

2164
	do {
2165
		sata_scr_read(ap, SCR_STATUS, &sstatus);
2166
		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2167
			break;
2168

2169
		msleep(1);
2170
	} while (time_before(jiffies, deadline));
2171

2172
	/* work around errata */
2173
	if (IS_GEN_II(hpriv) &&
2174 2175 2176
	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
	    (retry-- > 0))
		goto comreset_retry;
J
Jeff Garzik 已提交
2177 2178

	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2179 2180 2181
		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));

2182 2183
	if (ata_port_offline(ap)) {
		*class = ATA_DEV_NONE;
2184 2185 2186
		return;
	}

2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
	/* even after SStatus reflects that device is ready,
	 * it seems to take a while for link to be fully
	 * established (and thus Status no longer 0x80/0x7F),
	 * so we poll a bit for that, here.
	 */
	retry = 20;
	while (1) {
		u8 drv_stat = ata_check_status(ap);
		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
			break;
2197
		msleep(500);
2198 2199
		if (retry-- <= 0)
			break;
2200 2201
		if (time_after(jiffies, deadline))
			break;
2202 2203
	}

2204 2205 2206
	/* FIXME: if we passed the deadline, the following
	 * code probably produces an invalid result
	 */
2207

2208 2209
	/* finally, read device signature from TF registers */
	*class = ata_dev_try_classify(ap, 0, NULL);
J
Jeff Garzik 已提交
2210 2211 2212

	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

2213
	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
J
Jeff Garzik 已提交
2214

2215
	VPRINTK("EXIT\n");
2216 2217
}

2218
static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2219
{
2220 2221 2222
	struct mv_port_priv *pp	= ap->private_data;
	struct ata_eh_context *ehc = &ap->eh_context;
	int rc;
J
Jeff Garzik 已提交
2223

2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
	rc = mv_stop_dma(ap);
	if (rc)
		ehc->i.action |= ATA_EH_HARDRESET;

	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
		ehc->i.action |= ATA_EH_HARDRESET;
	}

	/* if we're about to do hardreset, nothing more to do */
	if (ehc->i.action & ATA_EH_HARDRESET)
		return 0;

	if (ata_port_online(ap))
		rc = ata_wait_ready(ap, deadline);
	else
		rc = -ENODEV;

	return rc;
2243 2244
}

2245 2246
static int mv_hardreset(struct ata_port *ap, unsigned int *class,
			unsigned long deadline)
2247
{
2248
	struct mv_host_priv *hpriv = ap->host->private_data;
T
Tejun Heo 已提交
2249
	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2250

2251
	mv_stop_dma(ap);
2252

2253
	mv_channel_reset(hpriv, mmio, ap->port_no);
2254

2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265
	mv_phy_reset(ap, class, deadline);

	return 0;
}

static void mv_postreset(struct ata_port *ap, unsigned int *classes)
{
	u32 serr;

	/* print link status */
	sata_print_link_status(ap);
2266

2267 2268 2269 2270 2271 2272 2273 2274
	/* clear SError */
	sata_scr_read(ap, SCR_ERROR, &serr);
	sata_scr_write_flush(ap, SCR_ERROR, serr);

	/* bail out if no device is present */
	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
		DPRINTK("EXIT, no device\n");
		return;
2275
	}
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342

	/* set up device control */
	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
}

static void mv_error_handler(struct ata_port *ap)
{
	ata_do_eh(ap, mv_prereset, ata_std_softreset,
		  mv_hardreset, mv_postreset);
}

static void mv_post_int_cmd(struct ata_queued_cmd *qc)
{
	mv_stop_dma(qc->ap);
}

static void mv_eh_freeze(struct ata_port *ap)
{
	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
	u32 tmp, mask;
	unsigned int shift;

	/* FIXME: handle coalescing completion events properly */

	shift = ap->port_no * 2;
	if (hc > 0)
		shift++;

	mask = 0x3 << shift;

	/* disable assertion of portN err, done events */
	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
	writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
}

static void mv_eh_thaw(struct ata_port *ap)
{
	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
	void __iomem *port_mmio = mv_ap_base(ap);
	u32 tmp, mask, hc_irq_cause;
	unsigned int shift, hc_port_no = ap->port_no;

	/* FIXME: handle coalescing completion events properly */

	shift = ap->port_no * 2;
	if (hc > 0) {
		shift++;
		hc_port_no -= 4;
	}

	mask = 0x3 << shift;

	/* clear EDMA errors on this port */
	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

	/* clear pending irq events */
	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);

	/* enable assertion of portN err, done events */
	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
	writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2343 2344
}

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
/**
 *      mv_port_init - Perform some early initialization on a single port.
 *      @port: libata data structure storing shadow register addresses
 *      @port_mmio: base address of the port
 *
 *      Initialize shadow register mmio addresses, clear outstanding
 *      interrupts on the port, and unmask interrupts for the future
 *      start of the port.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2357
static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2358
{
T
Tejun Heo 已提交
2359
	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2360 2361
	unsigned serr_ofs;

2362
	/* PIO related setup
2363 2364
	 */
	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2365
	port->error_addr =
2366 2367 2368 2369 2370 2371
		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2372
	port->status_addr =
2373 2374 2375 2376 2377
		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
	/* special case: control/altstatus doesn't have ATA_REG_ address */
	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;

	/* unused: */
R
Randy Dunlap 已提交
2378
	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2379

2380 2381 2382 2383 2384
	/* Clear any currently outstanding port interrupt conditions */
	serr_ofs = mv_scr_offset(SCR_ERROR);
	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

2385
	/* unmask all EDMA error interrupts */
2386
	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2387

2388
	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2389 2390 2391
		readl(port_mmio + EDMA_CFG_OFS),
		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2392 2393
}

2394
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2395
{
2396 2397
	struct pci_dev *pdev = to_pci_dev(host->dev);
	struct mv_host_priv *hpriv = host->private_data;
2398 2399 2400
	u32 hp_flags = hpriv->hp_flags;

	switch(board_idx) {
2401 2402
	case chip_5080:
		hpriv->ops = &mv5xxx_ops;
2403
		hp_flags |= MV_HP_GEN_I;
2404

2405
		switch (pdev->revision) {
2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
		case 0x1:
			hp_flags |= MV_HP_ERRATA_50XXB0;
			break;
		case 0x3:
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying 50XXB2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		}
		break;

2420 2421
	case chip_504x:
	case chip_508x:
2422
		hpriv->ops = &mv5xxx_ops;
2423
		hp_flags |= MV_HP_GEN_I;
2424

2425
		switch (pdev->revision) {
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
		case 0x0:
			hp_flags |= MV_HP_ERRATA_50XXB0;
			break;
		case 0x3:
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying B2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
2437 2438 2439 2440 2441
		}
		break;

	case chip_604x:
	case chip_608x:
2442
		hpriv->ops = &mv6xxx_ops;
2443
		hp_flags |= MV_HP_GEN_II;
2444

2445
		switch (pdev->revision) {
2446 2447 2448 2449 2450
		case 0x7:
			hp_flags |= MV_HP_ERRATA_60X1B2;
			break;
		case 0x9:
			hp_flags |= MV_HP_ERRATA_60X1C0;
2451 2452 2453
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
2454 2455
				   "Applying B2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_60X1B2;
2456 2457 2458 2459
			break;
		}
		break;

2460 2461 2462 2463 2464
	case chip_7042:
	case chip_6042:
		hpriv->ops = &mv6xxx_ops;
		hp_flags |= MV_HP_GEN_IIE;

2465
		switch (pdev->revision) {
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
		case 0x0:
			hp_flags |= MV_HP_ERRATA_XX42A0;
			break;
		case 0x1:
			hp_flags |= MV_HP_ERRATA_60X1C0;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying 60X1C0 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_60X1C0;
			break;
		}
		break;

2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
	default:
		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
		return 1;
	}

	hpriv->hp_flags = hp_flags;

	return 0;
}

2490
/**
2491
 *      mv_init_host - Perform some early initialization of the host.
2492 2493
 *	@host: ATA host to initialize
 *      @board_idx: controller index
2494 2495 2496 2497 2498 2499 2500
 *
 *      If possible, do an early global reset of the host.  Then do
 *      our port init and clear/unmask all/relevant host interrupts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2501
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2502 2503
{
	int rc = 0, n_hc, port, hc;
2504 2505 2506
	struct pci_dev *pdev = to_pci_dev(host->dev);
	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
	struct mv_host_priv *hpriv = host->private_data;
2507

2508 2509 2510
	/* global interrupt mask */
	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);

2511
	rc = mv_chip_id(host, board_idx);
2512 2513 2514
	if (rc)
		goto done;

2515
	n_hc = mv_get_hc_count(host->ports[0]->flags);
2516

2517
	for (port = 0; port < host->n_ports; port++)
2518
		hpriv->ops->read_preamp(hpriv, port, mmio);
2519

2520
	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2521
	if (rc)
2522 2523
		goto done;

2524 2525
	hpriv->ops->reset_flash(hpriv, mmio);
	hpriv->ops->reset_bus(pdev, mmio);
2526
	hpriv->ops->enable_leds(hpriv, mmio);
2527

2528
	for (port = 0; port < host->n_ports; port++) {
2529
		if (IS_GEN_II(hpriv)) {
2530 2531
			void __iomem *port_mmio = mv_port_base(mmio, port);

2532
			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2533 2534
			ifctl |= (1 << 7);		/* enable gen2i speed */
			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2535 2536 2537
			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
		}

2538
		hpriv->ops->phy_errata(hpriv, mmio, port);
2539 2540
	}

2541
	for (port = 0; port < host->n_ports; port++) {
2542
		void __iomem *port_mmio = mv_port_base(mmio, port);
2543
		mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2544 2545 2546
	}

	for (hc = 0; hc < n_hc; hc++) {
2547 2548 2549 2550 2551 2552 2553 2554 2555
		void __iomem *hc_mmio = mv_hc_base(mmio, hc);

		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
			"(before clear)=0x%08x\n", hc,
			readl(hc_mmio + HC_CFG_OFS),
			readl(hc_mmio + HC_IRQ_CAUSE_OFS));

		/* Clear any currently outstanding hc interrupt conditions */
		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2556 2557
	}

2558 2559 2560 2561 2562
	/* Clear any currently outstanding host interrupt conditions */
	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);

	/* and unmask interrupt generation for host regs */
	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2563

2564
	if (IS_GEN_I(hpriv))
2565 2566 2567
		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
	else
		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2568 2569

	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2570
		"PCI int cause/mask=0x%08x/0x%08x\n",
2571 2572 2573 2574
		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
		readl(mmio + PCI_IRQ_CAUSE_OFS),
		readl(mmio + PCI_IRQ_MASK_OFS));
2575

2576
done:
2577 2578 2579
	return rc;
}

2580 2581
/**
 *      mv_print_info - Dump key info to kernel log for perusal.
2582
 *      @host: ATA host to print info about
2583 2584 2585 2586 2587 2588
 *
 *      FIXME: complete this.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2589
static void mv_print_info(struct ata_host *host)
2590
{
2591 2592
	struct pci_dev *pdev = to_pci_dev(host->dev);
	struct mv_host_priv *hpriv = host->private_data;
2593
	u8 scc;
2594
	const char *scc_s, *gen;
2595 2596 2597 2598 2599 2600 2601 2602 2603 2604

	/* Use this to determine the HW stepping of the chip so we know
	 * what errata to workaround
	 */
	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
	if (scc == 0)
		scc_s = "SCSI";
	else if (scc == 0x01)
		scc_s = "RAID";
	else
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
		scc_s = "?";

	if (IS_GEN_I(hpriv))
		gen = "I";
	else if (IS_GEN_II(hpriv))
		gen = "II";
	else if (IS_GEN_IIE(hpriv))
		gen = "IIE";
	else
		gen = "?";
2615

2616
	dev_printk(KERN_INFO, &pdev->dev,
2617 2618
	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2619 2620 2621
	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}

2622 2623 2624 2625 2626 2627 2628 2629
/**
 *      mv_init_one - handle a positive probe of a Marvell host
 *      @pdev: PCI device found
 *      @ent: PCI device ID entry for the matched host
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2630 2631 2632 2633
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	static int printed_version = 0;
	unsigned int board_idx = (unsigned int)ent->driver_data;
2634 2635 2636 2637
	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
	struct ata_host *host;
	struct mv_host_priv *hpriv;
	int n_ports, rc;
2638

2639 2640
	if (!printed_version++)
		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2641

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
	/* allocate host */
	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
	if (!host || !hpriv)
		return -ENOMEM;
	host->private_data = hpriv;

	/* acquire resources */
2652 2653
	rc = pcim_enable_device(pdev);
	if (rc)
2654 2655
		return rc;

T
Tejun Heo 已提交
2656 2657
	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
	if (rc == -EBUSY)
2658
		pcim_pin_device(pdev);
T
Tejun Heo 已提交
2659
	if (rc)
2660
		return rc;
2661
	host->iomap = pcim_iomap_table(pdev);
2662

2663 2664 2665 2666
	rc = pci_go_64(pdev);
	if (rc)
		return rc;

2667
	/* initialize adapter */
2668
	rc = mv_init_host(host, board_idx);
2669 2670
	if (rc)
		return rc;
2671

2672
	/* Enable interrupts */
2673
	if (msi && pci_enable_msi(pdev))
2674
		pci_intx(pdev, 1);
2675

2676
	mv_dump_pci_cfg(pdev, 0x68);
2677
	mv_print_info(host);
2678

2679
	pci_set_master(pdev);
2680
	pci_try_set_mwi(pdev);
2681
	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2682
				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2683 2684 2685 2686
}

static int __init mv_init(void)
{
2687
	return pci_register_driver(&mv_pci_driver);
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
}

static void __exit mv_exit(void)
{
	pci_unregister_driver(&mv_pci_driver);
}

MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);

2701 2702 2703
module_param(msi, int, 0444);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");

2704 2705
module_init(mv_init);
module_exit(mv_exit);