sata_mv.c 95.3 KB
Newer Older
1 2 3
/*
 * sata_mv.c - Marvell SATA support
 *
M
Mark Lord 已提交
4
 * Copyright 2008: Marvell Corporation, all rights reserved.
5
 * Copyright 2005: EMC Corporation, all rights reserved.
6
 * Copyright 2005 Red Hat, Inc.  All rights reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 */

J
Jeff Garzik 已提交
25
/*
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 * sata_mv TODO list:
 *
 * --> Errata workaround for NCQ device errors.
 *
 * --> More errata workarounds for PCI-X.
 *
 * --> Complete a full errata audit for all chipsets to identify others.
 *
 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
 *
 * --> Develop a low-power-consumption strategy, and implement it.
 *
 * --> [Experiment, low priority] Investigate interrupt coalescing.
 *       Quite often, especially with PCI Message Signalled Interrupts (MSI),
 *       the overhead reduced by interrupt mitigation is quite often not
 *       worth the latency cost.
 *
 * --> [Experiment, Marvell value added] Is it possible to use target
 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
 *       creating LibATA target mode support would be very interesting.
 *
 *       Target mode, for those without docs, is the ability to directly
 *       connect two SATA ports.
 */
J
Jeff Garzik 已提交
50

51 52 53 54 55 56 57
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
58
#include <linux/dmapool.h>
59
#include <linux/dma-mapping.h>
60
#include <linux/device.h>
S
Saeed Bishara 已提交
61 62
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
63
#include <linux/mbus.h>
64
#include <linux/bitops.h>
65
#include <scsi/scsi_host.h>
66
#include <scsi/scsi_cmnd.h>
J
Jeff Garzik 已提交
67
#include <scsi/scsi_device.h>
68 69 70
#include <linux/libata.h>

#define DRV_NAME	"sata_mv"
M
Mark Lord 已提交
71
#define DRV_VERSION	"1.25"
72 73 74 75 76 77 78 79 80 81 82 83

enum {
	/* BAR's are enumerated in terms of pci_resource_start() terms */
	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */

	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */

	MV_PCI_REG_BASE		= 0,
	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
84 85 86 87 88 89
	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),

90
	MV_SATAHC0_REG_BASE	= 0x20000,
M
Mark Lord 已提交
91 92 93
	MV_FLASH_CTL_OFS	= 0x1046c,
	MV_GPIO_PORT_CTL_OFS	= 0x104f0,
	MV_RESET_CFG_OFS	= 0x180d8,
94 95 96 97 98 99

	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,

100 101 102 103 104 105 106 107 108
	MV_MAX_Q_DEPTH		= 32,
	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,

	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
	 * CRPB needs alignment on a 256B boundary. Size == 256B
	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
	 */
	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
109
	MV_MAX_SG_CT		= 256,
110 111
	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),

M
Mark Lord 已提交
112
	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
113
	MV_PORT_HC_SHIFT	= 2,
M
Mark Lord 已提交
114 115 116
	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
117 118 119 120

	/* Host Flags */
	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
S
Saeed Bishara 已提交
121

122
	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
123
				  ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
M
Mark Lord 已提交
124

125
	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
126

127
	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | MV_FLAG_IRQ_COALESCE |
M
Mark Lord 已提交
128
				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
129 130 131
				  ATA_FLAG_NCQ | ATA_FLAG_NO_ATAPI,

	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
M
Mark Lord 已提交
132

133 134
	CRQB_FLAG_READ		= (1 << 0),
	CRQB_TAG_SHIFT		= 1,
135
	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
M
Mark Lord 已提交
136
	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
137
	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
138 139 140 141 142
	CRQB_CMD_ADDR_SHIFT	= 8,
	CRQB_CMD_CS		= (0x2 << 11),
	CRQB_CMD_LAST		= (1 << 15),

	CRPB_FLAG_STATUS_SHIFT	= 8,
143 144
	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
145 146 147

	EPRD_FLAG_END_OF_TBL	= (1 << 31),

148 149
	/* PCI interface registers */

150
	PCI_COMMAND_OFS		= 0xc00,
M
Mark Lord 已提交
151
	PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
152

153 154 155 156 157
	PCI_MAIN_CMD_STS_OFS	= 0xd30,
	STOP_PCI_MASTER		= (1 << 2),
	PCI_MASTER_EMPTY	= (1 << 3),
	GLOB_SFT_RST		= (1 << 4),

M
Mark Lord 已提交
158 159 160
	MV_PCI_MODE_OFS		= 0xd00,
	MV_PCI_MODE_MASK	= 0x30,

161 162 163 164
	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
	MV_PCI_DISC_TIMER	= 0xd04,
	MV_PCI_MSI_TRIGGER	= 0xc38,
	MV_PCI_SERR_MASK	= 0xc28,
M
Mark Lord 已提交
165
	MV_PCI_XBAR_TMOUT_OFS	= 0x1d04,
166 167 168 169 170
	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
	MV_PCI_ERR_COMMAND	= 0x1d50,

171 172
	PCI_IRQ_CAUSE_OFS	= 0x1d58,
	PCI_IRQ_MASK_OFS	= 0x1d5c,
173 174
	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */

175 176
	PCIE_IRQ_CAUSE_OFS	= 0x1900,
	PCIE_IRQ_MASK_OFS	= 0x1910,
M
Mark Lord 已提交
177
	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
178

179 180 181 182 183
	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
	PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
	PCI_HC_MAIN_IRQ_MASK_OFS  = 0x1d64,
	SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
	SOC_HC_MAIN_IRQ_MASK_OFS  = 0x20024,
M
Mark Lord 已提交
184 185
	ERR_IRQ			= (1 << 0),	/* shift by port # */
	DONE_IRQ		= (1 << 1),	/* shift by port # */
186 187 188 189 190
	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
	PCI_ERR			= (1 << 18),
	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
191 192
	PORTS_0_3_COAL_DONE	= (1 << 8),
	PORTS_4_7_COAL_DONE	= (1 << 17),
193 194 195 196 197
	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
	GPIO_INT		= (1 << 22),
	SELF_INT		= (1 << 23),
	TWSI_INT		= (1 << 24),
	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
198
	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
M
Mark Lord 已提交
199
	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
200 201 202 203 204

	/* SATAHC registers */
	HC_CFG_OFS		= 0,

	HC_IRQ_CAUSE_OFS	= 0x14,
M
Mark Lord 已提交
205 206
	DMA_IRQ			= (1 << 0),	/* shift by port # */
	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
207 208 209
	DEV_IRQ			= (1 << 8),	/* shift by port # */

	/* Shadow block registers */
210 211
	SHD_BLK_OFS		= 0x100,
	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
212 213 214 215

	/* SATA registers */
	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
	SATA_ACTIVE_OFS		= 0x350,
M
Mark Lord 已提交
216
	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
217
	SATA_FIS_IRQ_AN		= (1 << 9),	/* async notification */
M
Mark Lord 已提交
218

M
Mark Lord 已提交
219
	LTMODE_OFS		= 0x30c,
M
Mark Lord 已提交
220 221
	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */

222
	PHY_MODE3		= 0x310,
223
	PHY_MODE4		= 0x314,
M
Mark Lord 已提交
224 225 226 227 228
	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */

229
	PHY_MODE2		= 0x330,
M
Mark Lord 已提交
230
	SATA_IFCTL_OFS		= 0x344,
M
Mark Lord 已提交
231
	SATA_TESTCTL_OFS	= 0x348,
M
Mark Lord 已提交
232 233
	SATA_IFSTAT_OFS		= 0x34c,
	VENDOR_UNIQUE_FIS_OFS	= 0x35c,
M
Mark Lord 已提交
234

M
Mark Lord 已提交
235 236 237
	FISCFG_OFS		= 0x360,
	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
M
Mark Lord 已提交
238

239
	MV5_PHY_MODE		= 0x74,
M
Mark Lord 已提交
240 241 242
	MV5_LTMODE_OFS		= 0x30,
	MV5_PHY_CTL_OFS		= 0x0C,
	SATA_INTERFACE_CFG_OFS	= 0x050,
243 244

	MV_M2_PREAMP_MASK	= 0x7e0,
245 246 247

	/* Port registers */
	EDMA_CFG_OFS		= 0,
M
Mark Lord 已提交
248 249 250 251 252
	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
M
Mark Lord 已提交
253 254
	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
255 256 257

	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
258 259 260 261 262 263
	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
	EDMA_ERR_DEV		= (1 << 2),	/* device error */
	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
264 265
	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
266
	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
267
	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
268 269 270 271
	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
M
Mark Lord 已提交
272

273
	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
M
Mark Lord 已提交
274 275 276 277 278
	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */

279
	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
M
Mark Lord 已提交
280

281
	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
M
Mark Lord 已提交
282 283 284 285 286 287
	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */

288
	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
M
Mark Lord 已提交
289

290
	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
291 292
	EDMA_ERR_OVERRUN_5	= (1 << 5),
	EDMA_ERR_UNDERRUN_5	= (1 << 6),
M
Mark Lord 已提交
293 294 295 296

	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
				  EDMA_ERR_LNK_CTRL_RX_1 |
				  EDMA_ERR_LNK_CTRL_RX_3 |
297
				  EDMA_ERR_LNK_CTRL_TX,
M
Mark Lord 已提交
298

299 300 301 302 303 304
	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_SERR |
				  EDMA_ERR_SELF_DIS |
305
				  EDMA_ERR_CRQB_PAR |
306 307 308 309 310 311 312
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY |
				  EDMA_ERR_LNK_CTRL_RX_2 |
				  EDMA_ERR_LNK_DATA_RX |
				  EDMA_ERR_LNK_DATA_TX |
				  EDMA_ERR_TRANS_PROTO,
M
Mark Lord 已提交
313

314 315 316 317 318 319 320
	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_OVERRUN_5 |
				  EDMA_ERR_UNDERRUN_5 |
				  EDMA_ERR_SELF_DIS_5 |
321
				  EDMA_ERR_CRQB_PAR |
322 323 324
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY,
325

326 327 328 329 330 331 332 333 334 335 336
	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */

	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
	EDMA_REQ_Q_PTR_SHIFT	= 5,

	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
	EDMA_RSP_Q_PTR_SHIFT	= 3,

J
Jeff Garzik 已提交
337 338 339
	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
	EDMA_EN			= (1 << 0),	/* enable EDMA */
	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
M
Mark Lord 已提交
340 341 342 343 344
	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */

	EDMA_STATUS_OFS		= 0x30,		/* EDMA engine status */
	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
345

M
Mark Lord 已提交
346 347 348 349
	EDMA_IORDY_TMOUT_OFS	= 0x34,
	EDMA_ARB_CFG_OFS	= 0x38,

	EDMA_HALTCOND_OFS	= 0x60,		/* GenIIe halt conditions */
350

351 352
	/* Host private flags (hp_flags) */
	MV_HP_FLAG_MSI		= (1 << 0),
353 354 355 356
	MV_HP_ERRATA_50XXB0	= (1 << 1),
	MV_HP_ERRATA_50XXB2	= (1 << 2),
	MV_HP_ERRATA_60X1B2	= (1 << 3),
	MV_HP_ERRATA_60X1C0	= (1 << 4),
J
Jeff Garzik 已提交
357 358 359
	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
360
	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
M
Mark Lord 已提交
361
	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
M
Mark Lord 已提交
362
	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
363

364
	/* Port private flags (pp_flags) */
J
Jeff Garzik 已提交
365
	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
366
	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
M
Mark Lord 已提交
367
	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
M
Mark Lord 已提交
368
	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
369 370
};

371 372
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
373
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
M
Mark Lord 已提交
374
#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
M
Mark Lord 已提交
375
#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
376

377 378 379
#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))

J
Jeff Garzik 已提交
380
enum {
J
Jeff Garzik 已提交
381 382 383 384
	/* DMA boundary 0xffff is required by the s/g splitting
	 * we need on /length/ in mv_fill-sg().
	 */
	MV_DMA_BOUNDARY		= 0xffffU,
J
Jeff Garzik 已提交
385

J
Jeff Garzik 已提交
386 387 388
	/* mask of register bits containing lower 32 bits
	 * of EDMA request queue DMA address
	 */
J
Jeff Garzik 已提交
389 390
	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,

J
Jeff Garzik 已提交
391
	/* ditto, for response queue */
J
Jeff Garzik 已提交
392 393 394
	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
};

395 396 397 398 399 400
enum chip_type {
	chip_504x,
	chip_508x,
	chip_5080,
	chip_604x,
	chip_608x,
401 402
	chip_6042,
	chip_7042,
S
Saeed Bishara 已提交
403
	chip_soc,
404 405
};

406 407
/* Command ReQuest Block: 32B */
struct mv_crqb {
M
Mark Lord 已提交
408 409 410 411
	__le32			sg_addr;
	__le32			sg_addr_hi;
	__le16			ctrl_flags;
	__le16			ata_cmd[11];
412
};
413

414
struct mv_crqb_iie {
M
Mark Lord 已提交
415 416 417 418 419
	__le32			addr;
	__le32			addr_hi;
	__le32			flags;
	__le32			len;
	__le32			ata_cmd[4];
420 421
};

422 423
/* Command ResPonse Block: 8B */
struct mv_crpb {
M
Mark Lord 已提交
424 425 426
	__le16			id;
	__le16			flags;
	__le32			tmstmp;
427 428
};

429 430
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
M
Mark Lord 已提交
431 432 433 434
	__le32			addr;
	__le32			flags_size;
	__le32			addr_hi;
	__le32			reserved;
435
};
436

437 438 439 440 441
struct mv_port_priv {
	struct mv_crqb		*crqb;
	dma_addr_t		crqb_dma;
	struct mv_crpb		*crpb;
	dma_addr_t		crpb_dma;
442 443
	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
444 445 446 447

	unsigned int		req_idx;
	unsigned int		resp_idx;

448
	u32			pp_flags;
M
Mark Lord 已提交
449
	unsigned int		delayed_eh_pmp_map;
450 451
};

452 453 454 455 456
struct mv_port_signal {
	u32			amps;
	u32			pre;
};

457 458
struct mv_host_priv {
	u32			hp_flags;
459
	u32			main_irq_mask;
460 461
	struct mv_port_signal	signal[8];
	const struct mv_hw_ops	*ops;
S
Saeed Bishara 已提交
462 463
	int			n_ports;
	void __iomem		*base;
464 465
	void __iomem		*main_irq_cause_addr;
	void __iomem		*main_irq_mask_addr;
466 467 468
	u32			irq_cause_ofs;
	u32			irq_mask_ofs;
	u32			unmask_all_irqs;
469 470 471 472 473 474 475 476
	/*
	 * These consistent DMA memory pools give us guaranteed
	 * alignment for hardware-accessed data structures,
	 * and less memory waste in accomplishing the alignment.
	 */
	struct dma_pool		*crqb_pool;
	struct dma_pool		*crpb_pool;
	struct dma_pool		*sg_tbl_pool;
477 478
};

479
struct mv_hw_ops {
480 481
	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
482 483 484
	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
485 486
	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
487
	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
S
Saeed Bishara 已提交
488
	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
489 490
};

T
Tejun Heo 已提交
491 492 493 494
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
495 496
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
M
Mark Lord 已提交
497
static int mv_qc_defer(struct ata_queued_cmd *qc);
498
static void mv_qc_prep(struct ata_queued_cmd *qc);
499
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
500
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
501 502
static int mv_hardreset(struct ata_link *link, unsigned int *class,
			unsigned long deadline);
503 504
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
505
static void mv6_dev_config(struct ata_device *dev);
506

507 508
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
509 510 511
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
512 513
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
514
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
S
Saeed Bishara 已提交
515
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
516

517 518
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
519 520 521
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
522 523
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
524
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
S
Saeed Bishara 已提交
525 526 527 528 529 530 531 532 533
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
				      void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
				      void __iomem *mmio);
static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
				  void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
				      void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
S
Saeed Bishara 已提交
534
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
M
Mark Lord 已提交
535
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
536
			     unsigned int port_no);
M
Mark Lord 已提交
537
static int mv_stop_edma(struct ata_port *ap);
M
Mark Lord 已提交
538
static int mv_stop_edma_engine(void __iomem *port_mmio);
539
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
540

541 542 543 544 545
static void mv_pmp_select(struct ata_port *ap, int pmp);
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline);
static int  mv_softreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline);
M
Mark Lord 已提交
546
static void mv_pmp_error_handler(struct ata_port *ap);
547 548
static void mv_process_crpb_entries(struct ata_port *ap,
					struct mv_port_priv *pp);
549

550 551 552 553
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 * because we have to allow room for worst case splitting of
 * PRDs for 64K boundaries in mv_fill_sg().
 */
554
static struct scsi_host_template mv5_sht = {
555
	ATA_BASE_SHT(DRV_NAME),
J
Jeff Garzik 已提交
556
	.sg_tablesize		= MV_MAX_SG_CT / 2,
557 558 559 560
	.dma_boundary		= MV_DMA_BOUNDARY,
};

static struct scsi_host_template mv6_sht = {
561
	ATA_NCQ_SHT(DRV_NAME),
M
Mark Lord 已提交
562
	.can_queue		= MV_MAX_Q_DEPTH - 1,
J
Jeff Garzik 已提交
563
	.sg_tablesize		= MV_MAX_SG_CT / 2,
564 565 566
	.dma_boundary		= MV_DMA_BOUNDARY,
};

567 568
static struct ata_port_operations mv5_ops = {
	.inherits		= &ata_sff_port_ops,
569

M
Mark Lord 已提交
570
	.qc_defer		= mv_qc_defer,
571 572 573
	.qc_prep		= mv_qc_prep,
	.qc_issue		= mv_qc_issue,

574 575
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,
576 577
	.hardreset		= mv_hardreset,
	.error_handler		= ata_std_error_handler, /* avoid SFF EH */
578
	.post_internal_cmd	= ATA_OP_NULL,
579

580 581 582 583 584 585 586
	.scr_read		= mv5_scr_read,
	.scr_write		= mv5_scr_write,

	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
};

587 588
static struct ata_port_operations mv6_ops = {
	.inherits		= &mv5_ops,
589
	.dev_config             = mv6_dev_config,
590 591 592
	.scr_read		= mv_scr_read,
	.scr_write		= mv_scr_write,

593 594 595
	.pmp_hardreset		= mv_pmp_hardreset,
	.pmp_softreset		= mv_softreset,
	.softreset		= mv_softreset,
M
Mark Lord 已提交
596
	.error_handler		= mv_pmp_error_handler,
597 598
};

599 600 601
static struct ata_port_operations mv_iie_ops = {
	.inherits		= &mv6_ops,
	.dev_config		= ATA_OP_NULL,
602 603 604
	.qc_prep		= mv_qc_prep_iie,
};

605
static const struct ata_port_info mv_port_info[] = {
606
	{  /* chip_504x */
607
		.flags		= MV_GEN_I_FLAGS,
608
		.pio_mask	= 0x1f,	/* pio0-4 */
609
		.udma_mask	= ATA_UDMA6,
610
		.port_ops	= &mv5_ops,
611 612
	},
	{  /* chip_508x */
613
		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
614
		.pio_mask	= 0x1f,	/* pio0-4 */
615
		.udma_mask	= ATA_UDMA6,
616
		.port_ops	= &mv5_ops,
617
	},
618
	{  /* chip_5080 */
619
		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
620
		.pio_mask	= 0x1f,	/* pio0-4 */
621
		.udma_mask	= ATA_UDMA6,
622
		.port_ops	= &mv5_ops,
623
	},
624
	{  /* chip_604x */
625
		.flags		= MV_GEN_II_FLAGS,
626
		.pio_mask	= 0x1f,	/* pio0-4 */
627
		.udma_mask	= ATA_UDMA6,
628
		.port_ops	= &mv6_ops,
629 630
	},
	{  /* chip_608x */
631
		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
632
		.pio_mask	= 0x1f,	/* pio0-4 */
633
		.udma_mask	= ATA_UDMA6,
634
		.port_ops	= &mv6_ops,
635
	},
636
	{  /* chip_6042 */
637
		.flags		= MV_GEN_IIE_FLAGS,
638
		.pio_mask	= 0x1f,	/* pio0-4 */
639
		.udma_mask	= ATA_UDMA6,
640 641 642
		.port_ops	= &mv_iie_ops,
	},
	{  /* chip_7042 */
643
		.flags		= MV_GEN_IIE_FLAGS,
644
		.pio_mask	= 0x1f,	/* pio0-4 */
645
		.udma_mask	= ATA_UDMA6,
646 647
		.port_ops	= &mv_iie_ops,
	},
S
Saeed Bishara 已提交
648
	{  /* chip_soc */
649
		.flags		= MV_GEN_IIE_FLAGS,
M
Mark Lord 已提交
650 651 652
		.pio_mask	= 0x1f,	/* pio0-4 */
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &mv_iie_ops,
S
Saeed Bishara 已提交
653
	},
654 655
};

656
static const struct pci_device_id mv_pci_tbl[] = {
657 658 659 660
	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
661 662
	/* RocketRAID 1720/174x have different identifiers */
	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
663 664
	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
665 666 667 668 669 670 671 672 673

	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },

	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },

674 675 676
	/* Adaptec 1430SA */
	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },

677
	/* Marvell 7042 support */
M
Morrison, Tom 已提交
678 679
	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },

680 681 682 683
	/* Highpoint RocketRAID PCIe series */
	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },

684
	{ }			/* terminate list */
685 686
};

687 688 689 690 691
static const struct mv_hw_ops mv5xxx_ops = {
	.phy_errata		= mv5_phy_errata,
	.enable_leds		= mv5_enable_leds,
	.read_preamp		= mv5_read_preamp,
	.reset_hc		= mv5_reset_hc,
692 693
	.reset_flash		= mv5_reset_flash,
	.reset_bus		= mv5_reset_bus,
694 695 696 697 698 699 700
};

static const struct mv_hw_ops mv6xxx_ops = {
	.phy_errata		= mv6_phy_errata,
	.enable_leds		= mv6_enable_leds,
	.read_preamp		= mv6_read_preamp,
	.reset_hc		= mv6_reset_hc,
701 702
	.reset_flash		= mv6_reset_flash,
	.reset_bus		= mv_reset_pci_bus,
703 704
};

S
Saeed Bishara 已提交
705 706 707 708 709 710 711 712 713
static const struct mv_hw_ops mv_soc_ops = {
	.phy_errata		= mv6_phy_errata,
	.enable_leds		= mv_soc_enable_leds,
	.read_preamp		= mv_soc_read_preamp,
	.reset_hc		= mv_soc_reset_hc,
	.reset_flash		= mv_soc_reset_flash,
	.reset_bus		= mv_soc_reset_bus,
};

714 715 716 717 718 719 720 721 722 723
/*
 * Functions
 */

static inline void writelfl(unsigned long data, void __iomem *addr)
{
	writel(data, addr);
	(void) readl(addr);	/* flush to avoid PCI posted write */
}

724 725 726 727 728 729 730 731 732 733
static inline unsigned int mv_hc_from_port(unsigned int port)
{
	return port >> MV_PORT_HC_SHIFT;
}

static inline unsigned int mv_hardport_from_port(unsigned int port)
{
	return port & MV_PORT_MASK;
}

734 735 736 737 738 739
/*
 * Consolidate some rather tricky bit shift calculations.
 * This is hot-path stuff, so not a function.
 * Simple code, with two return values, so macro rather than inline.
 *
 * port is the sole input, in range 0..7.
740 741
 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 * hardport is the other output, in range 0..3.
742 743 744 745 746 747 748 749 750 751
 *
 * Note that port and hardport may be the same variable in some cases.
 */
#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
{								\
	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
	hardport = mv_hardport_from_port(port);			\
	shift   += hardport * 2;				\
}

M
Mark Lord 已提交
752 753 754 755 756
static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}

757 758 759 760 761 762
static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
						 unsigned int port)
{
	return mv_hc_base(base, mv_hc_from_port(port));
}

763 764
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
765
	return  mv_hc_base_from_port(base, port) +
766
		MV_SATAHC_ARBTR_REG_SZ +
767
		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
768 769
}

M
Mark Lord 已提交
770 771 772 773 774 775 776 777
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;

	return hc_mmio + ofs;
}

S
Saeed Bishara 已提交
778 779 780 781 782 783
static inline void __iomem *mv_host_base(struct ata_host *host)
{
	struct mv_host_priv *hpriv = host->private_data;
	return hpriv->base;
}

784 785
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
S
Saeed Bishara 已提交
786
	return mv_port_base(mv_host_base(ap->host), ap->port_no);
787 788
}

J
Jeff Garzik 已提交
789
static inline int mv_get_hc_count(unsigned long port_flags)
790
{
J
Jeff Garzik 已提交
791
	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
792 793
}

794 795 796 797
static void mv_set_edma_ptrs(void __iomem *port_mmio,
			     struct mv_host_priv *hpriv,
			     struct mv_port_priv *pp)
{
798 799
	u32 index;

800 801 802
	/*
	 * initialize request queue
	 */
803 804
	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
805

806 807
	WARN_ON(pp->crqb_dma & 0x3ff);
	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
808
	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
809
		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
810
	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
811 812 813 814

	/*
	 * initialize response queue
	 */
815 816
	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
817

818 819
	WARN_ON(pp->crpb_dma & 0xff);
	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
820
	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
821
	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
822 823 824
		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
}

825 826 827 828 829 830
static void mv_set_main_irq_mask(struct ata_host *host,
				 u32 disable_bits, u32 enable_bits)
{
	struct mv_host_priv *hpriv = host->private_data;
	u32 old_mask, new_mask;

831
	old_mask = hpriv->main_irq_mask;
832
	new_mask = (old_mask & ~disable_bits) | enable_bits;
833 834
	if (new_mask != old_mask) {
		hpriv->main_irq_mask = new_mask;
835
		writelfl(new_mask, hpriv->main_irq_mask_addr);
836
	}
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
}

static void mv_enable_port_irqs(struct ata_port *ap,
				     unsigned int port_bits)
{
	unsigned int shift, hardport, port = ap->port_no;
	u32 disable_bits, enable_bits;

	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);

	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
	enable_bits  = port_bits << shift;
	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
					  void __iomem *port_mmio,
					  unsigned int port_irqs)
{
	struct mv_host_priv *hpriv = ap->host->private_data;
	int hardport = mv_hardport_from_port(ap->port_no);
	void __iomem *hc_mmio = mv_hc_base_from_port(
				mv_host_base(ap->host), ap->port_no);
	u32 hc_irq_cause;

	/* clear EDMA event indicators, if any */
	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

	/* clear pending irq events */
	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);

	/* clear FIS IRQ Cause */
	if (IS_GEN_IIE(hpriv))
		writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);

	mv_enable_port_irqs(ap, port_irqs);
}

876
/**
877
 *      mv_start_edma - Enable eDMA engine
878 879 880
 *      @base: port base address
 *      @pp: port private data
 *
881 882
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
883 884 885 886
 *
 *      LOCKING:
 *      Inherited from caller.
 */
887
static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
888
			 struct mv_port_priv *pp, u8 protocol)
889
{
890 891 892 893 894
	int want_ncq = (protocol == ATA_PROT_NCQ);

	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
		if (want_ncq != using_ncq)
M
Mark Lord 已提交
895
			mv_stop_edma(ap);
896
	}
897
	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
M
Mark Lord 已提交
898 899
		struct mv_host_priv *hpriv = ap->host->private_data;

900
		mv_edma_cfg(ap, want_ncq, 1);
M
Mark Lord 已提交
901

M
Mark Lord 已提交
902
		mv_set_edma_ptrs(port_mmio, hpriv, pp);
903
		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
904

M
Mark Lord 已提交
905
		writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
906 907
		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
	}
908 909
}

M
Mark Lord 已提交
910 911 912 913 914 915 916 917 918
static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);
	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
	int i;

	/*
	 * Wait for the EDMA engine to finish transactions in progress.
919 920 921 922
	 * No idea what a good "timeout" value might be, but measurements
	 * indicate that it often requires hundreds of microseconds
	 * with two drives in-use.  So we use the 15msec value above
	 * as a rough guess at what even more drives might require.
M
Mark Lord 已提交
923 924 925 926 927 928 929 930 931 932
	 */
	for (i = 0; i < timeout; ++i) {
		u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
		if ((edma_stat & empty_idle) == empty_idle)
			break;
		udelay(per_loop);
	}
	/* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
}

933
/**
M
Mark Lord 已提交
934
 *      mv_stop_edma_engine - Disable eDMA engine
M
Mark Lord 已提交
935
 *      @port_mmio: io base address
936 937 938 939
 *
 *      LOCKING:
 *      Inherited from caller.
 */
M
Mark Lord 已提交
940
static int mv_stop_edma_engine(void __iomem *port_mmio)
941
{
M
Mark Lord 已提交
942
	int i;
943

M
Mark Lord 已提交
944 945
	/* Disable eDMA.  The disable bit auto clears. */
	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
946

M
Mark Lord 已提交
947 948 949
	/* Wait for the chip to confirm eDMA is off. */
	for (i = 10000; i > 0; i--) {
		u32 reg = readl(port_mmio + EDMA_CMD_OFS);
950
		if (!(reg & EDMA_EN))
M
Mark Lord 已提交
951 952
			return 0;
		udelay(10);
953
	}
M
Mark Lord 已提交
954
	return -EIO;
955 956
}

M
Mark Lord 已提交
957
static int mv_stop_edma(struct ata_port *ap)
J
Jeff Garzik 已提交
958
{
M
Mark Lord 已提交
959 960
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp = ap->private_data;
J
Jeff Garzik 已提交
961

M
Mark Lord 已提交
962 963 964
	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
		return 0;
	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
M
Mark Lord 已提交
965
	mv_wait_for_edma_empty_idle(ap);
M
Mark Lord 已提交
966 967 968 969 970
	if (mv_stop_edma_engine(port_mmio)) {
		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
		return -EIO;
	}
	return 0;
J
Jeff Garzik 已提交
971 972
}

J
Jeff Garzik 已提交
973
#ifdef ATA_DEBUG
974
static void mv_dump_mem(void __iomem *start, unsigned bytes)
975
{
976 977 978 979
	int b, w;
	for (b = 0; b < bytes; ) {
		DPRINTK("%p: ", start + b);
		for (w = 0; b < bytes && w < 4; w++) {
980
			printk("%08x ", readl(start + b));
981 982 983 984 985
			b += sizeof(u32);
		}
		printk("\n");
	}
}
J
Jeff Garzik 已提交
986 987
#endif

988 989 990 991 992 993 994 995
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
#ifdef ATA_DEBUG
	int b, w;
	u32 dw;
	for (b = 0; b < bytes; ) {
		DPRINTK("%02x: ", b);
		for (w = 0; b < bytes && w < 4; w++) {
996 997
			(void) pci_read_config_dword(pdev, b, &dw);
			printk("%08x ", dw);
998 999 1000 1001 1002 1003 1004 1005 1006 1007
			b += sizeof(u32);
		}
		printk("\n");
	}
#endif
}
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
			     struct pci_dev *pdev)
{
#ifdef ATA_DEBUG
1008
	void __iomem *hc_base = mv_hc_base(mmio_base,
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
					   port >> MV_PORT_HC_SHIFT);
	void __iomem *port_base;
	int start_port, num_ports, p, start_hc, num_hcs, hc;

	if (0 > port) {
		start_hc = start_port = 0;
		num_ports = 8;		/* shld be benign for 4 port devs */
		num_hcs = 2;
	} else {
		start_hc = port >> MV_PORT_HC_SHIFT;
		start_port = port;
		num_ports = num_hcs = 1;
	}
1022
	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
		num_ports > 1 ? num_ports - 1 : start_port);

	if (NULL != pdev) {
		DPRINTK("PCI config space regs:\n");
		mv_dump_pci_cfg(pdev, 0x68);
	}
	DPRINTK("PCI regs:\n");
	mv_dump_mem(mmio_base+0xc00, 0x3c);
	mv_dump_mem(mmio_base+0xd00, 0x34);
	mv_dump_mem(mmio_base+0xf00, 0x4);
	mv_dump_mem(mmio_base+0x1d00, 0x6c);
	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1035
		hc_base = mv_hc_base(mmio_base, hc);
1036 1037 1038 1039 1040
		DPRINTK("HC regs (HC %i):\n", hc);
		mv_dump_mem(hc_base, 0x1c);
	}
	for (p = start_port; p < start_port + num_ports; p++) {
		port_base = mv_port_base(mmio_base, p);
1041
		DPRINTK("EDMA regs (port %i):\n", p);
1042
		mv_dump_mem(port_base, 0x54);
1043
		DPRINTK("SATA regs (port %i):\n", p);
1044 1045 1046
		mv_dump_mem(port_base+0x300, 0x60);
	}
#endif
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
}

static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_CONTROL:
	case SCR_ERROR:
		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
		break;
	case SCR_ACTIVE:
		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

T
Tejun Heo 已提交
1069
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1070 1071 1072
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

1073
	if (ofs != 0xffffffffU) {
T
Tejun Heo 已提交
1074
		*val = readl(mv_ap_base(link->ap) + ofs);
1075 1076 1077
		return 0;
	} else
		return -EINVAL;
1078 1079
}

T
Tejun Heo 已提交
1080
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1081 1082 1083
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

1084
	if (ofs != 0xffffffffU) {
T
Tejun Heo 已提交
1085
		writelfl(val, mv_ap_base(link->ap) + ofs);
1086 1087 1088
		return 0;
	} else
		return -EINVAL;
1089 1090
}

1091 1092 1093
static void mv6_dev_config(struct ata_device *adev)
{
	/*
1094 1095 1096 1097
	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
	 *
	 * Gen-II does not support NCQ over a port multiplier
	 *  (no FIS-based switching).
1098
	 */
1099
	if (adev->flags & ATA_DFLAG_NCQ) {
M
Mark Lord 已提交
1100
		if (sata_pmp_attached(adev->link->ap)) {
1101
			adev->flags &= ~ATA_DFLAG_NCQ;
M
Mark Lord 已提交
1102 1103 1104
			ata_dev_printk(adev, KERN_INFO,
				"NCQ disabled for command-based switching\n");
		}
1105
	}
1106 1107
}

M
Mark Lord 已提交
1108 1109 1110 1111 1112 1113
static int mv_qc_defer(struct ata_queued_cmd *qc)
{
	struct ata_link *link = qc->dev->link;
	struct ata_port *ap = link->ap;
	struct mv_port_priv *pp = ap->private_data;

M
Mark Lord 已提交
1114 1115 1116 1117 1118 1119
	/*
	 * Don't allow new commands if we're in a delayed EH state
	 * for NCQ and/or FIS-based switching.
	 */
	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
		return ATA_DEFER_PORT;
M
Mark Lord 已提交
1120 1121 1122 1123 1124 1125
	/*
	 * If the port is completely idle, then allow the new qc.
	 */
	if (ap->nr_active_links == 0)
		return 0;

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	/*
	 * The port is operating in host queuing mode (EDMA) with NCQ
	 * enabled, allow multiple NCQ commands.  EDMA also allows
	 * queueing multiple DMA commands but libata core currently
	 * doesn't allow it.
	 */
	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
		return 0;

M
Mark Lord 已提交
1136 1137 1138
	return ATA_DEFER_PORT;
}

M
Mark Lord 已提交
1139
static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
1140
{
M
Mark Lord 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	u32 new_fiscfg, old_fiscfg;
	u32 new_ltmode, old_ltmode;
	u32 new_haltcond, old_haltcond;

	old_fiscfg   = readl(port_mmio + FISCFG_OFS);
	old_ltmode   = readl(port_mmio + LTMODE_OFS);
	old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);

	new_fiscfg   = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
	new_ltmode   = old_ltmode & ~LTMODE_BIT8;
	new_haltcond = old_haltcond | EDMA_ERR_DEV;

	if (want_fbs) {
		new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
		new_ltmode = old_ltmode | LTMODE_BIT8;
1156 1157 1158 1159
		if (want_ncq)
			new_haltcond &= ~EDMA_ERR_DEV;
		else
			new_fiscfg |=  FISCFG_WAIT_DEV_ERR;
1160
	}
M
Mark Lord 已提交
1161

M
Mark Lord 已提交
1162 1163
	if (new_fiscfg != old_fiscfg)
		writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
1164 1165
	if (new_ltmode != old_ltmode)
		writelfl(new_ltmode, port_mmio + LTMODE_OFS);
M
Mark Lord 已提交
1166 1167
	if (new_haltcond != old_haltcond)
		writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
1168 1169
}

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
{
	struct mv_host_priv *hpriv = ap->host->private_data;
	u32 old, new;

	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
	old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
	if (want_ncq)
		new = old | (1 << 22);
	else
		new = old & ~(1 << 22);
	if (new != old)
		writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
}

1185
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1186
{
M
Mark Lord 已提交
1187
	u32 cfg;
M
Mark Lord 已提交
1188 1189 1190
	struct mv_port_priv *pp    = ap->private_data;
	struct mv_host_priv *hpriv = ap->host->private_data;
	void __iomem *port_mmio    = mv_ap_base(ap);
1191 1192

	/* set up non-NCQ EDMA configuration */
M
Mark Lord 已提交
1193
	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1194
	pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN);
1195

M
Mark Lord 已提交
1196
	if (IS_GEN_I(hpriv))
1197 1198
		cfg |= (1 << 8);	/* enab config burst size mask */

1199
	else if (IS_GEN_II(hpriv)) {
1200
		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1201
		mv_60x1_errata_sata25(ap, want_ncq);
1202

1203
	} else if (IS_GEN_IIE(hpriv)) {
M
Mark Lord 已提交
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
		int want_fbs = sata_pmp_attached(ap);
		/*
		 * Possible future enhancement:
		 *
		 * The chip can use FBS with non-NCQ, if we allow it,
		 * But first we need to have the error handling in place
		 * for this mode (datasheet section 7.3.15.4.2.3).
		 * So disallow non-NCQ FBS for now.
		 */
		want_fbs &= want_ncq;

		mv_config_fbs(port_mmio, want_ncq, want_fbs);

		if (want_fbs) {
			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
		}

1222
		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1223 1224 1225 1226 1227
		if (want_edma) {
			cfg |= (1 << 22); /* enab 4-entry host queue cache */
			if (!IS_SOC(hpriv))
				cfg |= (1 << 18); /* enab early completion */
		}
M
Mark Lord 已提交
1228 1229
		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1230 1231
	}

1232 1233 1234
	if (want_ncq) {
		cfg |= EDMA_CFG_NCQ;
		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1235
	}
1236

1237 1238 1239
	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
}

1240 1241 1242 1243
static void mv_port_free_dma_mem(struct ata_port *ap)
{
	struct mv_host_priv *hpriv = ap->host->private_data;
	struct mv_port_priv *pp = ap->private_data;
1244
	int tag;
1245 1246 1247 1248 1249 1250 1251 1252 1253

	if (pp->crqb) {
		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
		pp->crqb = NULL;
	}
	if (pp->crpb) {
		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
		pp->crpb = NULL;
	}
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	/*
	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
	 * For later hardware, we have one unique sg_tbl per NCQ tag.
	 */
	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
		if (pp->sg_tbl[tag]) {
			if (tag == 0 || !IS_GEN_I(hpriv))
				dma_pool_free(hpriv->sg_tbl_pool,
					      pp->sg_tbl[tag],
					      pp->sg_tbl_dma[tag]);
			pp->sg_tbl[tag] = NULL;
		}
1266 1267 1268
	}
}

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
/**
 *      mv_port_start - Port specific init/start routine.
 *      @ap: ATA channel to manipulate
 *
 *      Allocate and point to DMA memory, init port private memory,
 *      zero indices.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1279 1280
static int mv_port_start(struct ata_port *ap)
{
J
Jeff Garzik 已提交
1281 1282
	struct device *dev = ap->host->dev;
	struct mv_host_priv *hpriv = ap->host->private_data;
1283
	struct mv_port_priv *pp;
1284
	int tag;
1285

1286
	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1287
	if (!pp)
1288
		return -ENOMEM;
1289
	ap->private_data = pp;
1290

1291 1292 1293 1294
	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
	if (!pp->crqb)
		return -ENOMEM;
	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1295

1296 1297 1298 1299
	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
	if (!pp->crpb)
		goto out_port_free_dma_mem;
	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1300

1301 1302 1303
	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
		ap->flags |= ATA_FLAG_AN;
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	/*
	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
	 * For later hardware, we need one unique sg_tbl per NCQ tag.
	 */
	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
		if (tag == 0 || !IS_GEN_I(hpriv)) {
			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
			if (!pp->sg_tbl[tag])
				goto out_port_free_dma_mem;
		} else {
			pp->sg_tbl[tag]     = pp->sg_tbl[0];
			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
		}
	}
1319
	return 0;
1320 1321 1322 1323

out_port_free_dma_mem:
	mv_port_free_dma_mem(ap);
	return -ENOMEM;
1324 1325
}

1326 1327 1328 1329 1330 1331 1332
/**
 *      mv_port_stop - Port specific cleanup/stop routine.
 *      @ap: ATA channel to manipulate
 *
 *      Stop DMA, cleanup port memory.
 *
 *      LOCKING:
J
Jeff Garzik 已提交
1333
 *      This routine uses the host lock to protect the DMA stop.
1334
 */
1335 1336
static void mv_port_stop(struct ata_port *ap)
{
M
Mark Lord 已提交
1337
	mv_stop_edma(ap);
M
Mark Lord 已提交
1338
	mv_enable_port_irqs(ap, 0);
1339
	mv_port_free_dma_mem(ap);
1340 1341
}

1342 1343 1344 1345 1346 1347 1348 1349 1350
/**
 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
 *      @qc: queued command whose SG list to source from
 *
 *      Populate the SG list and mark the last entry.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
J
Jeff Garzik 已提交
1351
static void mv_fill_sg(struct ata_queued_cmd *qc)
1352 1353
{
	struct mv_port_priv *pp = qc->ap->private_data;
1354
	struct scatterlist *sg;
J
Jeff Garzik 已提交
1355
	struct mv_sg *mv_sg, *last_sg = NULL;
T
Tejun Heo 已提交
1356
	unsigned int si;
1357

1358
	mv_sg = pp->sg_tbl[qc->tag];
T
Tejun Heo 已提交
1359
	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1360 1361
		dma_addr_t addr = sg_dma_address(sg);
		u32 sg_len = sg_dma_len(sg);
1362

1363 1364 1365
		while (sg_len) {
			u32 offset = addr & 0xffff;
			u32 len = sg_len;
1366

1367 1368 1369 1370 1371
			if ((offset + sg_len > 0x10000))
				len = 0x10000 - offset;

			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
J
Jeff Garzik 已提交
1372
			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1373 1374 1375 1376

			sg_len -= len;
			addr += len;

J
Jeff Garzik 已提交
1377
			last_sg = mv_sg;
1378 1379
			mv_sg++;
		}
1380
	}
J
Jeff Garzik 已提交
1381 1382 1383

	if (likely(last_sg))
		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1384 1385
}

1386
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1387
{
M
Mark Lord 已提交
1388
	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1389
		(last ? CRQB_CMD_LAST : 0);
M
Mark Lord 已提交
1390
	*cmdw = cpu_to_le16(tmp);
1391 1392
}

1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
/**
 *      mv_qc_prep - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1405 1406 1407 1408
static void mv_qc_prep(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct mv_port_priv *pp = ap->private_data;
M
Mark Lord 已提交
1409
	__le16 *cw;
1410 1411
	struct ata_taskfile *tf;
	u16 flags = 0;
1412
	unsigned in_index;
1413

M
Mark Lord 已提交
1414 1415
	if ((qc->tf.protocol != ATA_PROT_DMA) &&
	    (qc->tf.protocol != ATA_PROT_NCQ))
1416
		return;
1417

1418 1419
	/* Fill in command request block
	 */
1420
	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1421
		flags |= CRQB_FLAG_READ;
1422
	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1423
	flags |= qc->tag << CRQB_TAG_SHIFT;
1424
	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1425

1426
	/* get current queue index from software */
1427
	in_index = pp->req_idx;
1428 1429

	pp->crqb[in_index].sg_addr =
1430
		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1431
	pp->crqb[in_index].sg_addr_hi =
1432
		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1433
	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1434

1435
	cw = &pp->crqb[in_index].ata_cmd[0];
1436 1437 1438 1439 1440 1441
	tf = &qc->tf;

	/* Sadly, the CRQB cannot accomodate all registers--there are
	 * only 11 bytes...so we must pick and choose required
	 * registers based on the command.  So, we drop feature and
	 * hob_feature for [RW] DMA commands, but they are needed for
1442 1443
	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1444
	 */
1445 1446 1447 1448 1449
	switch (tf->command) {
	case ATA_CMD_READ:
	case ATA_CMD_READ_EXT:
	case ATA_CMD_WRITE:
	case ATA_CMD_WRITE_EXT:
1450
	case ATA_CMD_WRITE_FUA_EXT:
1451 1452 1453 1454
		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
		break;
	case ATA_CMD_FPDMA_READ:
	case ATA_CMD_FPDMA_WRITE:
1455
		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
		break;
	default:
		/* The only other commands EDMA supports in non-queued and
		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
		 * of which are defined/used by Linux.  If we get here, this
		 * driver needs work.
		 *
		 * FIXME: modify libata to give qc_prep a return value and
		 * return error here.
		 */
		BUG_ON(tf->command);
		break;
	}
	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
		return;
	mv_fill_sg(qc);
}

/**
 *      mv_qc_prep_iie - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct mv_port_priv *pp = ap->private_data;
	struct mv_crqb_iie *crqb;
	struct ata_taskfile *tf;
1503
	unsigned in_index;
1504 1505
	u32 flags = 0;

M
Mark Lord 已提交
1506 1507
	if ((qc->tf.protocol != ATA_PROT_DMA) &&
	    (qc->tf.protocol != ATA_PROT_NCQ))
1508 1509
		return;

M
Mark Lord 已提交
1510
	/* Fill in Gen IIE command request block */
1511 1512 1513
	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
		flags |= CRQB_FLAG_READ;

1514
	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1515
	flags |= qc->tag << CRQB_TAG_SHIFT;
1516
	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1517
	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1518

1519
	/* get current queue index from software */
1520
	in_index = pp->req_idx;
1521 1522

	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1523 1524
	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
	crqb->flags = cpu_to_le32(flags);

	tf = &qc->tf;
	crqb->ata_cmd[0] = cpu_to_le32(
			(tf->command << 16) |
			(tf->feature << 24)
		);
	crqb->ata_cmd[1] = cpu_to_le32(
			(tf->lbal << 0) |
			(tf->lbam << 8) |
			(tf->lbah << 16) |
			(tf->device << 24)
		);
	crqb->ata_cmd[2] = cpu_to_le32(
			(tf->hob_lbal << 0) |
			(tf->hob_lbam << 8) |
			(tf->hob_lbah << 16) |
			(tf->hob_feature << 24)
		);
	crqb->ata_cmd[3] = cpu_to_le32(
			(tf->nsect << 0) |
			(tf->hob_nsect << 8)
		);

	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1550 1551 1552 1553
		return;
	mv_fill_sg(qc);
}

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
/**
 *      mv_qc_issue - Initiate a command to the host
 *      @qc: queued command to start
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it sanity checks our local
 *      caches of the request producer/consumer indices then enables
 *      DMA and bumps the request producer index.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1566
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1567
{
M
Mark Lord 已提交
1568
	static int limit_warnings = 10;
1569 1570 1571
	struct ata_port *ap = qc->ap;
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp = ap->private_data;
1572
	u32 in_index;
M
Mark Lord 已提交
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
	unsigned int port_irqs = DONE_IRQ | ERR_IRQ;

	switch (qc->tf.protocol) {
	case ATA_PROT_DMA:
	case ATA_PROT_NCQ:
		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;

		/* Write the request in pointer to kick the EDMA to life */
		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
					port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
		return 0;
1586

M
Mark Lord 已提交
1587
	case ATA_PROT_PIO:
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
		/*
		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
		 *
		 * Someday, we might implement special polling workarounds
		 * for these, but it all seems rather unnecessary since we
		 * normally use only DMA for commands which transfer more
		 * than a single block of data.
		 *
		 * Much of the time, this could just work regardless.
		 * So for now, just log the incident, and allow the attempt.
		 */
1599
		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
1600 1601 1602 1603 1604
			--limit_warnings;
			ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
					": attempting PIO w/multiple DRQ: "
					"this may fail due to h/w errata\n");
		}
M
Mark Lord 已提交
1605 1606 1607 1608 1609
		/* drop through */
	case ATAPI_PROT_PIO:
		port_irqs = ERR_IRQ;	/* leave DONE_IRQ masked for PIO */
		/* drop through */
	default:
M
Mark Lord 已提交
1610 1611
		/*
		 * We're about to send a non-EDMA capable command to the
1612 1613 1614
		 * port.  Turn off EDMA so there won't be problems accessing
		 * shadow block, etc registers.
		 */
M
Mark Lord 已提交
1615
		mv_stop_edma(ap);
M
Mark Lord 已提交
1616 1617
		mv_edma_cfg(ap, 0, 0);
		mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
1618
		mv_pmp_select(ap, qc->dev->link->pmp);
T
Tejun Heo 已提交
1619
		return ata_sff_qc_issue(qc);
1620 1621 1622
	}
}

1623 1624 1625 1626 1627 1628 1629 1630
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
{
	struct mv_port_priv *pp = ap->private_data;
	struct ata_queued_cmd *qc;

	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
		return NULL;
	qc = ata_qc_from_tag(ap, ap->link.active_tag);
M
Mark Lord 已提交
1631 1632 1633 1634 1635 1636
	if (qc) {
		if (qc->tf.flags & ATA_TFLAG_POLLING)
			qc = NULL;
		else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
			qc = NULL;
	}
1637 1638 1639 1640 1641
	if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
		qc = NULL;
	return qc;
}

M
Mark Lord 已提交
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
static void mv_pmp_error_handler(struct ata_port *ap)
{
	unsigned int pmp, pmp_map;
	struct mv_port_priv *pp = ap->private_data;

	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
		/*
		 * Perform NCQ error analysis on failed PMPs
		 * before we freeze the port entirely.
		 *
		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
		 */
		pmp_map = pp->delayed_eh_pmp_map;
		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
		for (pmp = 0; pmp_map != 0; pmp++) {
			unsigned int this_pmp = (1 << pmp);
			if (pmp_map & this_pmp) {
				struct ata_link *link = &ap->pmp_link[pmp];
				pmp_map &= ~this_pmp;
				ata_eh_analyze_ncq_error(link);
			}
		}
		ata_port_freeze(ap);
	}
	sata_pmp_error_handler(ap);
}

1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);

	return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
}

static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
	struct ata_eh_info *ehi;
	unsigned int pmp;

	/*
	 * Initialize EH info for PMPs which saw device errors
	 */
	ehi = &ap->link.eh_info;
	for (pmp = 0; pmp_map != 0; pmp++) {
		unsigned int this_pmp = (1 << pmp);
		if (pmp_map & this_pmp) {
			struct ata_link *link = &ap->pmp_link[pmp];

			pmp_map &= ~this_pmp;
			ehi = &link->eh_info;
			ata_ehi_clear_desc(ehi);
			ata_ehi_push_desc(ehi, "dev err");
			ehi->err_mask |= AC_ERR_DEV;
			ehi->action |= ATA_EH_RESET;
			ata_link_abort(link);
		}
	}
}

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
static int mv_req_q_empty(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);
	u32 in_ptr, out_ptr;

	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
}

1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
{
	struct mv_port_priv *pp = ap->private_data;
	int failed_links;
	unsigned int old_map, new_map;

	/*
	 * Device error during FBS+NCQ operation:
	 *
	 * Set a port flag to prevent further I/O being enqueued.
	 * Leave the EDMA running to drain outstanding commands from this port.
	 * Perform the post-mortem/EH only when all responses are complete.
	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
	 */
	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
		pp->delayed_eh_pmp_map = 0;
	}
	old_map = pp->delayed_eh_pmp_map;
	new_map = old_map | mv_get_err_pmp_map(ap);

	if (old_map != new_map) {
		pp->delayed_eh_pmp_map = new_map;
		mv_pmp_eh_prep(ap, new_map & ~old_map);
	}
1738
	failed_links = hweight16(new_map);
1739 1740 1741 1742 1743 1744 1745

	ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
			"failed_links=%d nr_active_links=%d\n",
			__func__, pp->delayed_eh_pmp_map,
			ap->qc_active, failed_links,
			ap->nr_active_links);

1746
	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
		mv_process_crpb_entries(ap, pp);
		mv_stop_edma(ap);
		mv_eh_freeze(ap);
		ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
		return 1;	/* handled */
	}
	ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
	return 1;	/* handled */
}

static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
{
	/*
	 * Possible future enhancement:
	 *
	 * FBS+non-NCQ operation is not yet implemented.
	 * See related notes in mv_edma_cfg().
	 *
	 * Device error during FBS+non-NCQ operation:
	 *
	 * We need to snapshot the shadow registers for each failed command.
	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
	 */
	return 0;	/* not handled */
}

static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
{
	struct mv_port_priv *pp = ap->private_data;

	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
		return 0;	/* EDMA was not active: not handled */
	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
		return 0;	/* FBS was not active: not handled */

	if (!(edma_err_cause & EDMA_ERR_DEV))
		return 0;	/* non DEV error: not handled */
	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
		return 0;	/* other problems: not handled */

	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
		/*
		 * EDMA should NOT have self-disabled for this case.
		 * If it did, then something is wrong elsewhere,
		 * and we cannot handle it here.
		 */
		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
			ata_port_printk(ap, KERN_WARNING,
				"%s: err_cause=0x%x pp_flags=0x%x\n",
				__func__, edma_err_cause, pp->pp_flags);
			return 0; /* not handled */
		}
		return mv_handle_fbs_ncq_dev_err(ap);
	} else {
		/*
		 * EDMA should have self-disabled for this case.
		 * If it did not, then something is wrong elsewhere,
		 * and we cannot handle it here.
		 */
		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
			ata_port_printk(ap, KERN_WARNING,
				"%s: err_cause=0x%x pp_flags=0x%x\n",
				__func__, edma_err_cause, pp->pp_flags);
			return 0; /* not handled */
		}
		return mv_handle_fbs_non_ncq_dev_err(ap);
	}
	return 0;	/* not handled */
}

M
Mark Lord 已提交
1818
static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1819 1820
{
	struct ata_eh_info *ehi = &ap->link.eh_info;
M
Mark Lord 已提交
1821
	char *when = "idle";
1822 1823

	ata_ehi_clear_desc(ehi);
M
Mark Lord 已提交
1824 1825 1826 1827
	if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
		when = "disabled";
	} else if (edma_was_enabled) {
		when = "EDMA enabled";
1828 1829 1830
	} else {
		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
M
Mark Lord 已提交
1831
			when = "polling";
1832
	}
M
Mark Lord 已提交
1833
	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
1834 1835 1836 1837 1838
	ehi->err_mask |= AC_ERR_OTHER;
	ehi->action   |= ATA_EH_RESET;
	ata_port_freeze(ap);
}

1839 1840 1841 1842
/**
 *      mv_err_intr - Handle error interrupts on the port
 *      @ap: ATA channel to manipulate
 *
1843 1844 1845
 *      Most cases require a full reset of the chip's state machine,
 *      which also performs a COMRESET.
 *      Also, if the port disabled DMA, update our cached copy to match.
1846 1847 1848 1849
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1850
static void mv_err_intr(struct ata_port *ap)
1851 1852
{
	void __iomem *port_mmio = mv_ap_base(ap);
1853
	u32 edma_err_cause, eh_freeze_mask, serr = 0;
M
Mark Lord 已提交
1854
	u32 fis_cause = 0;
1855 1856 1857
	struct mv_port_priv *pp = ap->private_data;
	struct mv_host_priv *hpriv = ap->host->private_data;
	unsigned int action = 0, err_mask = 0;
T
Tejun Heo 已提交
1858
	struct ata_eh_info *ehi = &ap->link.eh_info;
1859 1860
	struct ata_queued_cmd *qc;
	int abort = 0;
1861

1862
	/*
1863
	 * Read and clear the SError and err_cause bits.
M
Mark Lord 已提交
1864 1865
	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1866
	 */
1867 1868 1869
	sata_scr_read(&ap->link, SCR_ERROR, &serr);
	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);

1870
	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
M
Mark Lord 已提交
1871 1872 1873 1874
	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
		fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
		writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
	}
1875
	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1876

1877 1878 1879 1880 1881 1882 1883 1884 1885
	if (edma_err_cause & EDMA_ERR_DEV) {
		/*
		 * Device errors during FIS-based switching operation
		 * require special handling.
		 */
		if (mv_handle_dev_err(ap, edma_err_cause))
			return;
	}

1886 1887 1888 1889
	qc = mv_get_active_qc(ap);
	ata_ehi_clear_desc(ehi);
	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
			  edma_err_cause, pp->pp_flags);
M
Mark Lord 已提交
1890

1891
	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
M
Mark Lord 已提交
1892
		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
1893 1894 1895 1896 1897 1898 1899 1900 1901
		if (fis_cause & SATA_FIS_IRQ_AN) {
			u32 ec = edma_err_cause &
			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
			sata_async_notification(ap);
			if (!ec)
				return; /* Just an AN; no need for the nukes */
			ata_ehi_push_desc(ehi, "SDB notify");
		}
	}
1902
	/*
M
Mark Lord 已提交
1903
	 * All generations share these EDMA error cause bits:
1904
	 */
1905
	if (edma_err_cause & EDMA_ERR_DEV) {
1906
		err_mask |= AC_ERR_DEV;
1907 1908 1909
		action |= ATA_EH_RESET;
		ata_ehi_push_desc(ehi, "dev error");
	}
1910
	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1911
			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1912 1913
			EDMA_ERR_INTRL_PAR)) {
		err_mask |= AC_ERR_ATA_BUS;
T
Tejun Heo 已提交
1914
		action |= ATA_EH_RESET;
T
Tejun Heo 已提交
1915
		ata_ehi_push_desc(ehi, "parity error");
1916 1917 1918 1919
	}
	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
		ata_ehi_hotplugged(ehi);
		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
T
Tejun Heo 已提交
1920
			"dev disconnect" : "dev connect");
T
Tejun Heo 已提交
1921
		action |= ATA_EH_RESET;
1922 1923
	}

M
Mark Lord 已提交
1924 1925 1926 1927
	/*
	 * Gen-I has a different SELF_DIS bit,
	 * different FREEZE bits, and no SERR bit:
	 */
1928
	if (IS_GEN_I(hpriv)) {
1929 1930 1931
		eh_freeze_mask = EDMA_EH_FREEZE_5;
		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
T
Tejun Heo 已提交
1932
			ata_ehi_push_desc(ehi, "EDMA self-disable");
1933 1934 1935 1936 1937
		}
	} else {
		eh_freeze_mask = EDMA_EH_FREEZE;
		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
T
Tejun Heo 已提交
1938
			ata_ehi_push_desc(ehi, "EDMA self-disable");
1939 1940
		}
		if (edma_err_cause & EDMA_ERR_SERR) {
1941 1942
			ata_ehi_push_desc(ehi, "SError=%08x", serr);
			err_mask |= AC_ERR_ATA_BUS;
T
Tejun Heo 已提交
1943
			action |= ATA_EH_RESET;
1944
		}
1945
	}
1946

1947 1948
	if (!err_mask) {
		err_mask = AC_ERR_OTHER;
T
Tejun Heo 已提交
1949
		action |= ATA_EH_RESET;
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
	}

	ehi->serror |= serr;
	ehi->action |= action;

	if (qc)
		qc->err_mask |= err_mask;
	else
		ehi->err_mask |= err_mask;

1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
	if (err_mask == AC_ERR_DEV) {
		/*
		 * Cannot do ata_port_freeze() here,
		 * because it would kill PIO access,
		 * which is needed for further diagnosis.
		 */
		mv_eh_freeze(ap);
		abort = 1;
	} else if (edma_err_cause & eh_freeze_mask) {
		/*
		 * Note to self: ata_port_freeze() calls ata_port_abort()
		 */
1972
		ata_port_freeze(ap);
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
	} else {
		abort = 1;
	}

	if (abort) {
		if (qc)
			ata_link_abort(qc->dev->link);
		else
			ata_port_abort(ap);
	}
1983 1984
}

1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
static void mv_process_crpb_response(struct ata_port *ap,
		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);

	if (qc) {
		u8 ata_status;
		u16 edma_status = le16_to_cpu(response->flags);
		/*
		 * edma_status from a response queue entry:
		 *   LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
		 *   MSB is saved ATA status from command completion.
		 */
		if (!ncq_enabled) {
			u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
			if (err_cause) {
				/*
				 * Error will be seen/handled by mv_err_intr().
				 * So do nothing at all here.
				 */
				return;
			}
		}
		ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2009 2010 2011
		if (!ac_err_mask(ata_status))
			ata_qc_complete(qc);
		/* else: leave it for mv_err_intr() */
2012 2013 2014 2015 2016 2017 2018
	} else {
		ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
				__func__, tag);
	}
}

static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2019 2020 2021
{
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_host_priv *hpriv = ap->host->private_data;
2022
	u32 in_index;
2023
	bool work_done = false;
2024
	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2025

2026
	/* Get the hardware queue position index */
2027 2028 2029
	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;

2030 2031
	/* Process new responses from since the last time we looked */
	while (in_index != pp->resp_idx) {
2032
		unsigned int tag;
2033
		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2034

2035
		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2036

2037 2038
		if (IS_GEN_I(hpriv)) {
			/* 50xx: no NCQ, only one command active at a time */
T
Tejun Heo 已提交
2039
			tag = ap->link.active_tag;
2040 2041 2042
		} else {
			/* Gen II/IIE: get command tag from CRPB entry */
			tag = le16_to_cpu(response->id) & 0x1f;
2043
		}
2044
		mv_process_crpb_response(ap, response, tag, ncq_enabled);
2045 2046 2047
		work_done = true;
	}

M
Mark Lord 已提交
2048
	/* Update the software queue position index in hardware */
2049 2050
	if (work_done)
		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2051
			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2052
			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
2053 2054
}

M
Mark Lord 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
static void mv_port_intr(struct ata_port *ap, u32 port_cause)
{
	struct mv_port_priv *pp;
	int edma_was_enabled;

	if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
		mv_unexpected_intr(ap, 0);
		return;
	}
	/*
	 * Grab a snapshot of the EDMA_EN flag setting,
	 * so that we have a consistent view for this port,
	 * even if something we call of our routines changes it.
	 */
	pp = ap->private_data;
	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
	/*
	 * Process completed CRPB response(s) before other events.
	 */
	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
		mv_process_crpb_entries(ap, pp);
2076 2077
		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
			mv_handle_fbs_ncq_dev_err(ap);
M
Mark Lord 已提交
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
	}
	/*
	 * Handle chip-reported errors, or continue on to handle PIO.
	 */
	if (unlikely(port_cause & ERR_IRQ)) {
		mv_err_intr(ap);
	} else if (!edma_was_enabled) {
		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
		if (qc)
			ata_sff_host_intr(ap, qc);
		else
			mv_unexpected_intr(ap, edma_was_enabled);
	}
}

2093 2094
/**
 *      mv_host_intr - Handle all interrupts on the given host controller
J
Jeff Garzik 已提交
2095
 *      @host: host specific structure
2096
 *      @main_irq_cause: Main interrupt cause register for the chip.
2097 2098 2099 2100
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2101
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2102
{
S
Saeed Bishara 已提交
2103
	struct mv_host_priv *hpriv = host->private_data;
2104
	void __iomem *mmio = hpriv->base, *hc_mmio;
2105
	unsigned int handled = 0, port;
2106

2107
	for (port = 0; port < hpriv->n_ports; port++) {
J
Jeff Garzik 已提交
2108
		struct ata_port *ap = host->ports[port];
2109 2110
		unsigned int p, shift, hardport, port_cause;

2111 2112
		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
		/*
2113 2114
		 * Each hc within the host has its own hc_irq_cause register,
		 * where the interrupting ports bits get ack'd.
2115
		 */
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
		if (hardport == 0) {	/* first port on this hc ? */
			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
			u32 port_mask, ack_irqs;
			/*
			 * Skip this entire hc if nothing pending for any ports
			 */
			if (!hc_cause) {
				port += MV_PORTS_PER_HC - 1;
				continue;
			}
			/*
			 * We don't need/want to read the hc_irq_cause register,
			 * because doing so hurts performance, and
			 * main_irq_cause already gives us everything we need.
			 *
			 * But we do have to *write* to the hc_irq_cause to ack
			 * the ports that we are handling this time through.
			 *
			 * This requires that we create a bitmap for those
			 * ports which interrupted us, and use that bitmap
			 * to ack (only) those ports via hc_irq_cause.
			 */
			ack_irqs = 0;
			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
				if ((port + p) >= hpriv->n_ports)
					break;
				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
				if (hc_cause & port_mask)
					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
			}
2146
			hc_mmio = mv_hc_base_from_port(mmio, port);
2147
			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
2148 2149
			handled = 1;
		}
2150
		/*
M
Mark Lord 已提交
2151
		 * Handle interrupts signalled for this port:
2152
		 */
M
Mark Lord 已提交
2153 2154 2155
		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
		if (port_cause)
			mv_port_intr(ap, port_cause);
2156
	}
2157
	return handled;
2158 2159
}

2160
static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2161
{
2162
	struct mv_host_priv *hpriv = host->private_data;
2163 2164 2165 2166 2167 2168
	struct ata_port *ap;
	struct ata_queued_cmd *qc;
	struct ata_eh_info *ehi;
	unsigned int i, err_mask, printed = 0;
	u32 err_cause;

2169
	err_cause = readl(mmio + hpriv->irq_cause_ofs);
2170 2171 2172 2173 2174 2175 2176

	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
		   err_cause);

	DPRINTK("All regs @ PCI error\n");
	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));

2177
	writelfl(0, mmio + hpriv->irq_cause_ofs);
2178 2179 2180

	for (i = 0; i < host->n_ports; i++) {
		ap = host->ports[i];
2181
		if (!ata_link_offline(&ap->link)) {
T
Tejun Heo 已提交
2182
			ehi = &ap->link.eh_info;
2183 2184 2185 2186 2187
			ata_ehi_clear_desc(ehi);
			if (!printed++)
				ata_ehi_push_desc(ehi,
					"PCI err cause 0x%08x", err_cause);
			err_mask = AC_ERR_HOST_BUS;
T
Tejun Heo 已提交
2188
			ehi->action = ATA_EH_RESET;
T
Tejun Heo 已提交
2189
			qc = ata_qc_from_tag(ap, ap->link.active_tag);
2190 2191 2192 2193 2194 2195 2196 2197
			if (qc)
				qc->err_mask |= err_mask;
			else
				ehi->err_mask |= err_mask;

			ata_port_freeze(ap);
		}
	}
2198
	return 1;	/* handled */
2199 2200
}

2201
/**
2202
 *      mv_interrupt - Main interrupt event handler
2203 2204 2205 2206 2207 2208 2209 2210
 *      @irq: unused
 *      @dev_instance: private data; in this case the host structure
 *
 *      Read the read only register to determine if any host
 *      controllers have pending interrupts.  If so, call lower level
 *      routine to handle.  Also check for PCI errors which are only
 *      reported here.
 *
2211
 *      LOCKING:
J
Jeff Garzik 已提交
2212
 *      This routine holds the host lock while processing pending
2213 2214
 *      interrupts.
 */
2215
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2216
{
J
Jeff Garzik 已提交
2217
	struct ata_host *host = dev_instance;
S
Saeed Bishara 已提交
2218
	struct mv_host_priv *hpriv = host->private_data;
2219
	unsigned int handled = 0;
M
Mark Lord 已提交
2220
	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2221
	u32 main_irq_cause, pending_irqs;
2222

M
Mark Lord 已提交
2223
	spin_lock(&host->lock);
M
Mark Lord 已提交
2224 2225 2226 2227 2228

	/* for MSI:  block new interrupts while in here */
	if (using_msi)
		writel(0, hpriv->main_irq_mask_addr);

2229
	main_irq_cause = readl(hpriv->main_irq_cause_addr);
2230
	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
M
Mark Lord 已提交
2231 2232 2233
	/*
	 * Deal with cases where we either have nothing pending, or have read
	 * a bogus register value which can indicate HW removal or PCI fault.
2234
	 */
M
Mark Lord 已提交
2235
	if (pending_irqs && main_irq_cause != 0xffffffffU) {
M
Mark Lord 已提交
2236
		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2237 2238
			handled = mv_pci_error(host, hpriv->base);
		else
M
Mark Lord 已提交
2239
			handled = mv_host_intr(host, pending_irqs);
2240
	}
M
Mark Lord 已提交
2241 2242 2243 2244 2245

	/* for MSI: unmask; interrupt cause bits will retrigger now */
	if (using_msi)
		writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr);

M
Mark Lord 已提交
2246 2247
	spin_unlock(&host->lock);

2248 2249 2250
	return IRQ_RETVAL(handled);
}

2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_ERROR:
	case SCR_CONTROL:
		ofs = sc_reg_in * sizeof(u32);
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

T
Tejun Heo 已提交
2268
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2269
{
T
Tejun Heo 已提交
2270
	struct mv_host_priv *hpriv = link->ap->host->private_data;
S
Saeed Bishara 已提交
2271
	void __iomem *mmio = hpriv->base;
T
Tejun Heo 已提交
2272
	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2273 2274
	unsigned int ofs = mv5_scr_offset(sc_reg_in);

2275 2276 2277 2278 2279
	if (ofs != 0xffffffffU) {
		*val = readl(addr + ofs);
		return 0;
	} else
		return -EINVAL;
2280 2281
}

T
Tejun Heo 已提交
2282
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2283
{
T
Tejun Heo 已提交
2284
	struct mv_host_priv *hpriv = link->ap->host->private_data;
S
Saeed Bishara 已提交
2285
	void __iomem *mmio = hpriv->base;
T
Tejun Heo 已提交
2286
	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2287 2288
	unsigned int ofs = mv5_scr_offset(sc_reg_in);

2289
	if (ofs != 0xffffffffU) {
T
Tejun Heo 已提交
2290
		writelfl(val, addr + ofs);
2291 2292 2293
		return 0;
	} else
		return -EINVAL;
2294 2295
}

S
Saeed Bishara 已提交
2296
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
2297
{
S
Saeed Bishara 已提交
2298
	struct pci_dev *pdev = to_pci_dev(host->dev);
2299 2300
	int early_5080;

2301
	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
2302 2303 2304 2305 2306 2307 2308

	if (!early_5080) {
		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
		tmp |= (1 << 0);
		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
	}

S
Saeed Bishara 已提交
2309
	mv_reset_pci_bus(host, mmio);
2310 2311 2312 2313
}

static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
M
Mark Lord 已提交
2314
	writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
2315 2316
}

2317
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
J
Jeff Garzik 已提交
2318 2319
			   void __iomem *mmio)
{
2320 2321 2322 2323 2324 2325 2326
	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
	u32 tmp;

	tmp = readl(phy_mmio + MV5_PHY_MODE);

	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
J
Jeff Garzik 已提交
2327 2328
}

2329
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
J
Jeff Garzik 已提交
2330
{
2331 2332
	u32 tmp;

M
Mark Lord 已提交
2333
	writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
2334 2335 2336 2337 2338 2339

	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */

	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
	tmp |= ~(1 << 0);
	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
J
Jeff Garzik 已提交
2340 2341
}

2342 2343
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port)
2344
{
2345 2346 2347 2348 2349 2350
	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
	u32 tmp;
	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);

	if (fix_apm_sq) {
M
Mark Lord 已提交
2351
		tmp = readl(phy_mmio + MV5_LTMODE_OFS);
2352
		tmp |= (1 << 19);
M
Mark Lord 已提交
2353
		writel(tmp, phy_mmio + MV5_LTMODE_OFS);
2354

M
Mark Lord 已提交
2355
		tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
2356 2357
		tmp &= ~0x3;
		tmp |= 0x1;
M
Mark Lord 已提交
2358
		writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
2359 2360 2361 2362 2363 2364 2365
	}

	tmp = readl(phy_mmio + MV5_PHY_MODE);
	tmp &= ~mask;
	tmp |= hpriv->signal[port].pre;
	tmp |= hpriv->signal[port].amps;
	writel(tmp, phy_mmio + MV5_PHY_MODE);
2366 2367
}

2368 2369 2370 2371 2372 2373 2374 2375

#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port)
{
	void __iomem *port_mmio = mv_port_base(mmio, port);

M
Mark Lord 已提交
2376
	mv_reset_channel(hpriv, mmio, port);
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389

	ZERO(0x028);	/* command */
	writel(0x11f, port_mmio + EDMA_CFG_OFS);
	ZERO(0x004);	/* timer */
	ZERO(0x008);	/* irq err cause */
	ZERO(0x00c);	/* irq err mask */
	ZERO(0x010);	/* rq bah */
	ZERO(0x014);	/* rq inp */
	ZERO(0x018);	/* rq outp */
	ZERO(0x01c);	/* respq bah */
	ZERO(0x024);	/* respq outp */
	ZERO(0x020);	/* respq inp */
	ZERO(0x02c);	/* test control */
M
Mark Lord 已提交
2390
	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2391 2392 2393 2394 2395 2396
}
#undef ZERO

#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int hc)
2397
{
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
	u32 tmp;

	ZERO(0x00c);
	ZERO(0x010);
	ZERO(0x014);
	ZERO(0x018);

	tmp = readl(hc_mmio + 0x20);
	tmp &= 0x1c1c1c1c;
	tmp |= 0x03030303;
	writel(tmp, hc_mmio + 0x20);
}
#undef ZERO

static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc)
{
	unsigned int hc, port;

	for (hc = 0; hc < n_hc; hc++) {
		for (port = 0; port < MV_PORTS_PER_HC; port++)
			mv5_reset_hc_port(hpriv, mmio,
					  (hc * MV_PORTS_PER_HC) + port);

		mv5_reset_one_hc(hpriv, mmio, hc);
	}

	return 0;
2427 2428
}

J
Jeff Garzik 已提交
2429 2430
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
S
Saeed Bishara 已提交
2431
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
J
Jeff Garzik 已提交
2432
{
2433
	struct mv_host_priv *hpriv = host->private_data;
J
Jeff Garzik 已提交
2434 2435
	u32 tmp;

M
Mark Lord 已提交
2436
	tmp = readl(mmio + MV_PCI_MODE_OFS);
J
Jeff Garzik 已提交
2437
	tmp &= 0xff00ffff;
M
Mark Lord 已提交
2438
	writel(tmp, mmio + MV_PCI_MODE_OFS);
J
Jeff Garzik 已提交
2439 2440 2441

	ZERO(MV_PCI_DISC_TIMER);
	ZERO(MV_PCI_MSI_TRIGGER);
M
Mark Lord 已提交
2442
	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
J
Jeff Garzik 已提交
2443
	ZERO(MV_PCI_SERR_MASK);
2444 2445
	ZERO(hpriv->irq_cause_ofs);
	ZERO(hpriv->irq_mask_ofs);
J
Jeff Garzik 已提交
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
	ZERO(MV_PCI_ERR_LOW_ADDRESS);
	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
	ZERO(MV_PCI_ERR_ATTRIBUTE);
	ZERO(MV_PCI_ERR_COMMAND);
}
#undef ZERO

static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
	u32 tmp;

	mv5_reset_flash(hpriv, mmio);

M
Mark Lord 已提交
2459
	tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
J
Jeff Garzik 已提交
2460 2461
	tmp &= 0x3;
	tmp |= (1 << 5) | (1 << 6);
M
Mark Lord 已提交
2462
	writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
J
Jeff Garzik 已提交
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
}

/**
 *      mv6_reset_hc - Perform the 6xxx global soft reset
 *      @mmio: base address of the HBA
 *
 *      This routine only applies to 6xxx parts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2474 2475
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc)
J
Jeff Garzik 已提交
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
{
	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
	int i, rc = 0;
	u32 t;

	/* Following procedure defined in PCI "main command and status
	 * register" table.
	 */
	t = readl(reg);
	writel(t | STOP_PCI_MASTER, reg);

	for (i = 0; i < 1000; i++) {
		udelay(1);
		t = readl(reg);
2490
		if (PCI_MASTER_EMPTY & t)
J
Jeff Garzik 已提交
2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528
			break;
	}
	if (!(PCI_MASTER_EMPTY & t)) {
		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
		rc = 1;
		goto done;
	}

	/* set reset */
	i = 5;
	do {
		writel(t | GLOB_SFT_RST, reg);
		t = readl(reg);
		udelay(1);
	} while (!(GLOB_SFT_RST & t) && (i-- > 0));

	if (!(GLOB_SFT_RST & t)) {
		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
		rc = 1;
		goto done;
	}

	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
	i = 5;
	do {
		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
		t = readl(reg);
		udelay(1);
	} while ((GLOB_SFT_RST & t) && (i-- > 0));

	if (GLOB_SFT_RST & t) {
		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
		rc = 1;
	}
done:
	return rc;
}

2529
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
J
Jeff Garzik 已提交
2530 2531 2532 2533 2534
			   void __iomem *mmio)
{
	void __iomem *port_mmio;
	u32 tmp;

M
Mark Lord 已提交
2535
	tmp = readl(mmio + MV_RESET_CFG_OFS);
J
Jeff Garzik 已提交
2536
	if ((tmp & (1 << 0)) == 0) {
2537
		hpriv->signal[idx].amps = 0x7 << 8;
J
Jeff Garzik 已提交
2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
		hpriv->signal[idx].pre = 0x1 << 5;
		return;
	}

	port_mmio = mv_port_base(mmio, idx);
	tmp = readl(port_mmio + PHY_MODE2);

	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
}

2549
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
J
Jeff Garzik 已提交
2550
{
M
Mark Lord 已提交
2551
	writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
J
Jeff Garzik 已提交
2552 2553
}

2554
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2555
			   unsigned int port)
2556
{
2557 2558
	void __iomem *port_mmio = mv_port_base(mmio, port);

2559
	u32 hp_flags = hpriv->hp_flags;
2560 2561
	int fix_phy_mode2 =
		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2562
	int fix_phy_mode4 =
2563
		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
M
Mark Lord 已提交
2564
	u32 m2, m3;
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580

	if (fix_phy_mode2) {
		m2 = readl(port_mmio + PHY_MODE2);
		m2 &= ~(1 << 16);
		m2 |= (1 << 31);
		writel(m2, port_mmio + PHY_MODE2);

		udelay(200);

		m2 = readl(port_mmio + PHY_MODE2);
		m2 &= ~((1 << 16) | (1 << 31));
		writel(m2, port_mmio + PHY_MODE2);

		udelay(200);
	}

M
Mark Lord 已提交
2581 2582 2583 2584 2585 2586
	/*
	 * Gen-II/IIe PHY_MODE3 errata RM#2:
	 * Achieves better receiver noise performance than the h/w default:
	 */
	m3 = readl(port_mmio + PHY_MODE3);
	m3 = (m3 & 0x1f) | (0x5555601 << 5);
2587

2588 2589 2590 2591
	/* Guideline 88F5182 (GL# SATA-S11) */
	if (IS_SOC(hpriv))
		m3 &= ~0x1c;

2592
	if (fix_phy_mode4) {
M
Mark Lord 已提交
2593 2594 2595 2596 2597 2598
		u32 m4 = readl(port_mmio + PHY_MODE4);
		/*
		 * Enforce reserved-bit restrictions on GenIIe devices only.
		 * For earlier chipsets, force only the internal config field
		 *  (workaround for errata FEr SATA#10 part 1).
		 */
M
Mark Lord 已提交
2599
		if (IS_GEN_IIE(hpriv))
M
Mark Lord 已提交
2600 2601 2602
			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
		else
			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
M
Mark Lord 已提交
2603
		writel(m4, port_mmio + PHY_MODE4);
2604
	}
2605 2606 2607 2608 2609 2610
	/*
	 * Workaround for 60x1-B2 errata SATA#13:
	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
	 */
	writel(m3, port_mmio + PHY_MODE3);
2611 2612 2613 2614 2615

	/* Revert values of pre-emphasis and signal amps to the saved ones */
	m2 = readl(port_mmio + PHY_MODE2);

	m2 &= ~MV_M2_PREAMP_MASK;
2616 2617
	m2 |= hpriv->signal[port].amps;
	m2 |= hpriv->signal[port].pre;
2618
	m2 &= ~(1 << 16);
2619

2620 2621 2622 2623 2624 2625
	/* according to mvSata 3.6.1, some IIE values are fixed */
	if (IS_GEN_IIE(hpriv)) {
		m2 &= ~0xC30FF01F;
		m2 |= 0x0000900F;
	}

2626 2627 2628
	writel(m2, port_mmio + PHY_MODE2);
}

S
Saeed Bishara 已提交
2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
/* TODO: use the generic LED interface to configure the SATA Presence */
/* & Acitivy LEDs on the board */
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
				      void __iomem *mmio)
{
	return;
}

static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio)
{
	void __iomem *port_mmio;
	u32 tmp;

	port_mmio = mv_port_base(mmio, idx);
	tmp = readl(port_mmio + PHY_MODE2);

	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
}

#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
					void __iomem *mmio, unsigned int port)
{
	void __iomem *port_mmio = mv_port_base(mmio, port);

M
Mark Lord 已提交
2657
	mv_reset_channel(hpriv, mmio, port);
S
Saeed Bishara 已提交
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670

	ZERO(0x028);		/* command */
	writel(0x101f, port_mmio + EDMA_CFG_OFS);
	ZERO(0x004);		/* timer */
	ZERO(0x008);		/* irq err cause */
	ZERO(0x00c);		/* irq err mask */
	ZERO(0x010);		/* rq bah */
	ZERO(0x014);		/* rq inp */
	ZERO(0x018);		/* rq outp */
	ZERO(0x01c);		/* respq bah */
	ZERO(0x024);		/* respq outp */
	ZERO(0x020);		/* respq inp */
	ZERO(0x02c);		/* test control */
M
Mark Lord 已提交
2671
	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
S
Saeed Bishara 已提交
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
}

#undef ZERO

#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
				       void __iomem *mmio)
{
	void __iomem *hc_mmio = mv_hc_base(mmio, 0);

	ZERO(0x00c);
	ZERO(0x010);
	ZERO(0x014);

}

#undef ZERO

static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
				  void __iomem *mmio, unsigned int n_hc)
{
	unsigned int port;

	for (port = 0; port < hpriv->n_ports; port++)
		mv_soc_reset_hc_port(hpriv, mmio, port);

	mv_soc_reset_one_hc(hpriv, mmio);

	return 0;
}

static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
				      void __iomem *mmio)
{
	return;
}

static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
{
	return;
}

M
Mark Lord 已提交
2714
static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
M
Mark Lord 已提交
2715
{
M
Mark Lord 已提交
2716
	u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
M
Mark Lord 已提交
2717

M
Mark Lord 已提交
2718
	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
M
Mark Lord 已提交
2719
	if (want_gen2i)
M
Mark Lord 已提交
2720 2721
		ifcfg |= (1 << 7);		/* enable gen2i speed */
	writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
M
Mark Lord 已提交
2722 2723
}

M
Mark Lord 已提交
2724
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2725 2726 2727 2728
			     unsigned int port_no)
{
	void __iomem *port_mmio = mv_port_base(mmio, port_no);

M
Mark Lord 已提交
2729 2730 2731 2732 2733
	/*
	 * The datasheet warns against setting EDMA_RESET when EDMA is active
	 * (but doesn't say what the problem might be).  So we first try
	 * to disable the EDMA engine before doing the EDMA_RESET operation.
	 */
M
Mark Lord 已提交
2734
	mv_stop_edma_engine(port_mmio);
M
Mark Lord 已提交
2735
	writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2736

M
Mark Lord 已提交
2737
	if (!IS_GEN_I(hpriv)) {
M
Mark Lord 已提交
2738 2739
		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
		mv_setup_ifcfg(port_mmio, 1);
2740
	}
M
Mark Lord 已提交
2741
	/*
M
Mark Lord 已提交
2742
	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
M
Mark Lord 已提交
2743 2744
	 * link, and physical layers.  It resets all SATA interface registers
	 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2745
	 */
M
Mark Lord 已提交
2746
	writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
M
Mark Lord 已提交
2747
	udelay(25);	/* allow reset propagation */
2748 2749 2750 2751
	writelfl(0, port_mmio + EDMA_CMD_OFS);

	hpriv->ops->phy_errata(hpriv, mmio, port_no);

2752
	if (IS_GEN_I(hpriv))
2753 2754 2755
		mdelay(1);
}

2756
static void mv_pmp_select(struct ata_port *ap, int pmp)
2757
{
2758 2759 2760 2761
	if (sata_pmp_supported(ap)) {
		void __iomem *port_mmio = mv_ap_base(ap);
		u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
		int old = reg & 0xf;
2762

2763 2764 2765 2766
		if (old != pmp) {
			reg = (reg & ~0xf) | pmp;
			writelfl(reg, port_mmio + SATA_IFCTL_OFS);
		}
2767
	}
2768 2769
}

2770 2771
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline)
2772
{
2773 2774 2775
	mv_pmp_select(link->ap, sata_srst_pmp(link));
	return sata_std_hardreset(link, class, deadline);
}
2776

2777 2778 2779 2780 2781
static int mv_softreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline)
{
	mv_pmp_select(link->ap, sata_srst_pmp(link));
	return ata_sff_softreset(link, class, deadline);
2782 2783
}

T
Tejun Heo 已提交
2784
static int mv_hardreset(struct ata_link *link, unsigned int *class,
2785
			unsigned long deadline)
2786
{
T
Tejun Heo 已提交
2787
	struct ata_port *ap = link->ap;
2788
	struct mv_host_priv *hpriv = ap->host->private_data;
M
Mark Lord 已提交
2789
	struct mv_port_priv *pp = ap->private_data;
S
Saeed Bishara 已提交
2790
	void __iomem *mmio = hpriv->base;
M
Mark Lord 已提交
2791 2792 2793
	int rc, attempts = 0, extra = 0;
	u32 sstatus;
	bool online;
2794

M
Mark Lord 已提交
2795
	mv_reset_channel(hpriv, mmio, ap->port_no);
M
Mark Lord 已提交
2796
	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2797

M
Mark Lord 已提交
2798 2799
	/* Workaround for errata FEr SATA#10 (part 2) */
	do {
M
Mark Lord 已提交
2800 2801
		const unsigned long *timing =
				sata_ehc_deb_timing(&link->eh_context);
2802

M
Mark Lord 已提交
2803 2804
		rc = sata_link_hardreset(link, timing, deadline + extra,
					 &online, NULL);
M
Mark Lord 已提交
2805
		rc = online ? -EAGAIN : rc;
M
Mark Lord 已提交
2806
		if (rc)
M
Mark Lord 已提交
2807 2808 2809 2810
			return rc;
		sata_scr_read(link, SCR_STATUS, &sstatus);
		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
			/* Force 1.5gb/s link speed and try again */
M
Mark Lord 已提交
2811
			mv_setup_ifcfg(mv_ap_base(ap), 0);
M
Mark Lord 已提交
2812 2813 2814 2815
			if (time_after(jiffies + HZ, deadline))
				extra = HZ; /* only extend it once, max */
		}
	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2816

M
Mark Lord 已提交
2817
	return rc;
2818 2819 2820 2821
}

static void mv_eh_freeze(struct ata_port *ap)
{
2822
	mv_stop_edma(ap);
2823
	mv_enable_port_irqs(ap, 0);
2824 2825 2826 2827
}

static void mv_eh_thaw(struct ata_port *ap)
{
S
Saeed Bishara 已提交
2828
	struct mv_host_priv *hpriv = ap->host->private_data;
2829 2830
	unsigned int port = ap->port_no;
	unsigned int hardport = mv_hardport_from_port(port);
2831
	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2832
	void __iomem *port_mmio = mv_ap_base(ap);
2833
	u32 hc_irq_cause;
2834 2835 2836 2837 2838

	/* clear EDMA errors on this port */
	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

	/* clear pending irq events */
M
Mark Lord 已提交
2839
	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
2840
	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2841

M
Mark Lord 已提交
2842
	mv_enable_port_irqs(ap, ERR_IRQ);
2843 2844
}

2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
/**
 *      mv_port_init - Perform some early initialization on a single port.
 *      @port: libata data structure storing shadow register addresses
 *      @port_mmio: base address of the port
 *
 *      Initialize shadow register mmio addresses, clear outstanding
 *      interrupts on the port, and unmask interrupts for the future
 *      start of the port.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2857
static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2858
{
T
Tejun Heo 已提交
2859
	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2860 2861
	unsigned serr_ofs;

2862
	/* PIO related setup
2863 2864
	 */
	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2865
	port->error_addr =
2866 2867 2868 2869 2870 2871
		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2872
	port->status_addr =
2873 2874 2875 2876 2877
		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
	/* special case: control/altstatus doesn't have ATA_REG_ address */
	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;

	/* unused: */
R
Randy Dunlap 已提交
2878
	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2879

2880 2881 2882 2883 2884
	/* Clear any currently outstanding port interrupt conditions */
	serr_ofs = mv_scr_offset(SCR_ERROR);
	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

M
Mark Lord 已提交
2885 2886
	/* unmask all non-transient EDMA error interrupts */
	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2887

2888
	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2889 2890 2891
		readl(port_mmio + EDMA_CFG_OFS),
		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2892 2893
}

M
Mark Lord 已提交
2894 2895 2896 2897 2898 2899
static unsigned int mv_in_pcix_mode(struct ata_host *host)
{
	struct mv_host_priv *hpriv = host->private_data;
	void __iomem *mmio = hpriv->base;
	u32 reg;

M
Mark Lord 已提交
2900
	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
M
Mark Lord 已提交
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
		return 0;	/* not PCI-X capable */
	reg = readl(mmio + MV_PCI_MODE_OFS);
	if ((reg & MV_PCI_MODE_MASK) == 0)
		return 0;	/* conventional PCI mode */
	return 1;	/* chip is in PCI-X mode */
}

static int mv_pci_cut_through_okay(struct ata_host *host)
{
	struct mv_host_priv *hpriv = host->private_data;
	void __iomem *mmio = hpriv->base;
	u32 reg;

	if (!mv_in_pcix_mode(host)) {
		reg = readl(mmio + PCI_COMMAND_OFS);
		if (reg & PCI_COMMAND_MRDTRIG)
			return 0; /* not okay */
	}
	return 1; /* okay */
}

2922
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2923
{
2924 2925
	struct pci_dev *pdev = to_pci_dev(host->dev);
	struct mv_host_priv *hpriv = host->private_data;
2926 2927
	u32 hp_flags = hpriv->hp_flags;

2928
	switch (board_idx) {
2929 2930
	case chip_5080:
		hpriv->ops = &mv5xxx_ops;
2931
		hp_flags |= MV_HP_GEN_I;
2932

2933
		switch (pdev->revision) {
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947
		case 0x1:
			hp_flags |= MV_HP_ERRATA_50XXB0;
			break;
		case 0x3:
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying 50XXB2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		}
		break;

2948 2949
	case chip_504x:
	case chip_508x:
2950
		hpriv->ops = &mv5xxx_ops;
2951
		hp_flags |= MV_HP_GEN_I;
2952

2953
		switch (pdev->revision) {
2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
		case 0x0:
			hp_flags |= MV_HP_ERRATA_50XXB0;
			break;
		case 0x3:
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying B2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
2965 2966 2967 2968 2969
		}
		break;

	case chip_604x:
	case chip_608x:
2970
		hpriv->ops = &mv6xxx_ops;
2971
		hp_flags |= MV_HP_GEN_II;
2972

2973
		switch (pdev->revision) {
2974 2975 2976 2977 2978
		case 0x7:
			hp_flags |= MV_HP_ERRATA_60X1B2;
			break;
		case 0x9:
			hp_flags |= MV_HP_ERRATA_60X1C0;
2979 2980 2981
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
2982 2983
				   "Applying B2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_60X1B2;
2984 2985 2986 2987
			break;
		}
		break;

2988
	case chip_7042:
M
Mark Lord 已提交
2989
		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
2990 2991 2992
		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
		    (pdev->device == 0x2300 || pdev->device == 0x2310))
		{
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017
			/*
			 * Highpoint RocketRAID PCIe 23xx series cards:
			 *
			 * Unconfigured drives are treated as "Legacy"
			 * by the BIOS, and it overwrites sector 8 with
			 * a "Lgcy" metadata block prior to Linux boot.
			 *
			 * Configured drives (RAID or JBOD) leave sector 8
			 * alone, but instead overwrite a high numbered
			 * sector for the RAID metadata.  This sector can
			 * be determined exactly, by truncating the physical
			 * drive capacity to a nice even GB value.
			 *
			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
			 *
			 * Warn the user, lest they think we're just buggy.
			 */
			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
				" BIOS CORRUPTS DATA on all attached drives,"
				" regardless of if/how they are configured."
				" BEWARE!\n");
			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
				" use sectors 8-9 on \"Legacy\" drives,"
				" and avoid the final two gigabytes on"
				" all RocketRAID BIOS initialized drives.\n");
3018
		}
M
Mark Lord 已提交
3019
		/* drop through */
3020 3021 3022
	case chip_6042:
		hpriv->ops = &mv6xxx_ops;
		hp_flags |= MV_HP_GEN_IIE;
M
Mark Lord 已提交
3023 3024
		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
			hp_flags |= MV_HP_CUT_THROUGH;
3025

3026
		switch (pdev->revision) {
3027
		case 0x2: /* Rev.B0: the first/only public release */
3028 3029 3030 3031 3032 3033 3034 3035 3036
			hp_flags |= MV_HP_ERRATA_60X1C0;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying 60X1C0 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_60X1C0;
			break;
		}
		break;
S
Saeed Bishara 已提交
3037 3038
	case chip_soc:
		hpriv->ops = &mv_soc_ops;
3039 3040
		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
			MV_HP_ERRATA_60X1C0;
S
Saeed Bishara 已提交
3041
		break;
3042

3043
	default:
S
Saeed Bishara 已提交
3044
		dev_printk(KERN_ERR, host->dev,
3045
			   "BUG: invalid board index %u\n", board_idx);
3046 3047 3048 3049
		return 1;
	}

	hpriv->hp_flags = hp_flags;
3050 3051 3052 3053 3054 3055 3056 3057 3058
	if (hp_flags & MV_HP_PCIE) {
		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
	} else {
		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
	}
3059 3060 3061 3062

	return 0;
}

3063
/**
3064
 *      mv_init_host - Perform some early initialization of the host.
3065 3066
 *	@host: ATA host to initialize
 *      @board_idx: controller index
3067 3068 3069 3070 3071 3072 3073
 *
 *      If possible, do an early global reset of the host.  Then do
 *      our port init and clear/unmask all/relevant host interrupts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
3074
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3075 3076
{
	int rc = 0, n_hc, port, hc;
3077
	struct mv_host_priv *hpriv = host->private_data;
S
Saeed Bishara 已提交
3078
	void __iomem *mmio = hpriv->base;
3079

3080
	rc = mv_chip_id(host, board_idx);
3081
	if (rc)
M
Mark Lord 已提交
3082
		goto done;
S
Saeed Bishara 已提交
3083

M
Mark Lord 已提交
3084
	if (IS_SOC(hpriv)) {
3085 3086
		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
M
Mark Lord 已提交
3087 3088 3089
	} else {
		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
S
Saeed Bishara 已提交
3090
	}
M
Mark Lord 已提交
3091

3092 3093 3094
	/* initialize shadow irq mask with register's value */
	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);

M
Mark Lord 已提交
3095
	/* global interrupt mask: 0 == mask everything */
3096
	mv_set_main_irq_mask(host, ~0, 0);
3097

3098
	n_hc = mv_get_hc_count(host->ports[0]->flags);
3099

3100
	for (port = 0; port < host->n_ports; port++)
3101
		hpriv->ops->read_preamp(hpriv, port, mmio);
3102

3103
	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3104
	if (rc)
3105 3106
		goto done;

3107
	hpriv->ops->reset_flash(hpriv, mmio);
S
Saeed Bishara 已提交
3108
	hpriv->ops->reset_bus(host, mmio);
3109
	hpriv->ops->enable_leds(hpriv, mmio);
3110

3111
	for (port = 0; port < host->n_ports; port++) {
3112
		struct ata_port *ap = host->ports[port];
3113
		void __iomem *port_mmio = mv_port_base(mmio, port);
3114 3115 3116

		mv_port_init(&ap->ioaddr, port_mmio);

S
Saeed Bishara 已提交
3117
#ifdef CONFIG_PCI
M
Mark Lord 已提交
3118
		if (!IS_SOC(hpriv)) {
S
Saeed Bishara 已提交
3119 3120 3121 3122
			unsigned int offset = port_mmio - mmio;
			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
		}
S
Saeed Bishara 已提交
3123
#endif
3124 3125 3126
	}

	for (hc = 0; hc < n_hc; hc++) {
3127 3128 3129 3130 3131 3132 3133 3134 3135
		void __iomem *hc_mmio = mv_hc_base(mmio, hc);

		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
			"(before clear)=0x%08x\n", hc,
			readl(hc_mmio + HC_CFG_OFS),
			readl(hc_mmio + HC_IRQ_CAUSE_OFS));

		/* Clear any currently outstanding hc interrupt conditions */
		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3136 3137
	}

M
Mark Lord 已提交
3138 3139
	/* Clear any currently outstanding host interrupt conditions */
	writelfl(0, mmio + hpriv->irq_cause_ofs);
3140

M
Mark Lord 已提交
3141 3142
	/* and unmask interrupt generation for host regs */
	writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
M
Mark Lord 已提交
3143

M
Mark Lord 已提交
3144 3145 3146 3147 3148
	/*
	 * enable only global host interrupts for now.
	 * The per-port interrupts get done later as ports are set up.
	 */
	mv_set_main_irq_mask(host, 0, PCI_ERR);
S
Saeed Bishara 已提交
3149 3150 3151
done:
	return rc;
}
3152

3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172
static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
{
	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
							     MV_CRQB_Q_SZ, 0);
	if (!hpriv->crqb_pool)
		return -ENOMEM;

	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
							     MV_CRPB_Q_SZ, 0);
	if (!hpriv->crpb_pool)
		return -ENOMEM;

	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
							     MV_SG_TBL_SZ, 0);
	if (!hpriv->sg_tbl_pool)
		return -ENOMEM;

	return 0;
}

3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
				 struct mbus_dram_target_info *dram)
{
	int i;

	for (i = 0; i < 4; i++) {
		writel(0, hpriv->base + WINDOW_CTRL(i));
		writel(0, hpriv->base + WINDOW_BASE(i));
	}

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel(((cs->size - 1) & 0xffff0000) |
			(cs->mbus_attr << 8) |
			(dram->mbus_dram_target_id << 4) | 1,
			hpriv->base + WINDOW_CTRL(i));
		writel(cs->base, hpriv->base + WINDOW_BASE(i));
	}
}

S
Saeed Bishara 已提交
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
/**
 *      mv_platform_probe - handle a positive probe of an soc Marvell
 *      host
 *      @pdev: platform device found
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_platform_probe(struct platform_device *pdev)
{
	static int printed_version;
	const struct mv_sata_platform_data *mv_platform_data;
	const struct ata_port_info *ppi[] =
	    { &mv_port_info[chip_soc], NULL };
	struct ata_host *host;
	struct mv_host_priv *hpriv;
	struct resource *res;
	int n_ports, rc;
3212

S
Saeed Bishara 已提交
3213 3214
	if (!printed_version++)
		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3215

S
Saeed Bishara 已提交
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
	/*
	 * Simple resource validation ..
	 */
	if (unlikely(pdev->num_resources != 2)) {
		dev_err(&pdev->dev, "invalid number of resources\n");
		return -EINVAL;
	}

	/*
	 * Get the register base first
	 */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		return -EINVAL;

	/* allocate host */
	mv_platform_data = pdev->dev.platform_data;
	n_ports = mv_platform_data->n_ports;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);

	if (!host || !hpriv)
		return -ENOMEM;
	host->private_data = hpriv;
	hpriv->n_ports = n_ports;

	host->iomap = NULL;
3244 3245
	hpriv->base = devm_ioremap(&pdev->dev, res->start,
				   res->end - res->start + 1);
S
Saeed Bishara 已提交
3246 3247
	hpriv->base -= MV_SATAHC0_REG_BASE;

3248 3249 3250 3251 3252 3253
	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (mv_platform_data->dram != NULL)
		mv_conf_mbus_windows(hpriv, mv_platform_data->dram);

3254 3255 3256 3257
	rc = mv_create_dma_pools(hpriv, &pdev->dev);
	if (rc)
		return rc;

S
Saeed Bishara 已提交
3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
	/* initialize adapter */
	rc = mv_init_host(host, chip_soc);
	if (rc)
		return rc;

	dev_printk(KERN_INFO, &pdev->dev,
		   "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
		   host->n_ports);

	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
				 IRQF_SHARED, &mv6_sht);
}

/*
 *
 *      mv_platform_remove    -       unplug a platform interface
 *      @pdev: platform device
 *
 *      A platform bus SATA device has been unplugged. Perform the needed
 *      cleanup. Also called on module unload for any active devices.
 */
static int __devexit mv_platform_remove(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ata_host *host = dev_get_drvdata(dev);

	ata_host_detach(host);
	return 0;
3286 3287
}

S
Saeed Bishara 已提交
3288 3289 3290 3291 3292 3293 3294 3295 3296 3297
static struct platform_driver mv_platform_driver = {
	.probe			= mv_platform_probe,
	.remove			= __devexit_p(mv_platform_remove),
	.driver			= {
				   .name = DRV_NAME,
				   .owner = THIS_MODULE,
				  },
};


S
Saeed Bishara 已提交
3298
#ifdef CONFIG_PCI
S
Saeed Bishara 已提交
3299 3300 3301
static int mv_pci_init_one(struct pci_dev *pdev,
			   const struct pci_device_id *ent);

S
Saeed Bishara 已提交
3302 3303 3304 3305

static struct pci_driver mv_pci_driver = {
	.name			= DRV_NAME,
	.id_table		= mv_pci_tbl,
S
Saeed Bishara 已提交
3306
	.probe			= mv_pci_init_one,
S
Saeed Bishara 已提交
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
	.remove			= ata_pci_remove_one,
};

/*
 * module options
 */
static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */


/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
	int rc;

	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
		if (rc) {
			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
			if (rc) {
				dev_printk(KERN_ERR, &pdev->dev,
					   "64-bit DMA enable failed\n");
				return rc;
			}
		}
	} else {
		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit DMA enable failed\n");
			return rc;
		}
		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit consistent DMA enable failed\n");
			return rc;
		}
	}

	return rc;
}

3349 3350
/**
 *      mv_print_info - Dump key info to kernel log for perusal.
3351
 *      @host: ATA host to print info about
3352 3353 3354 3355 3356 3357
 *
 *      FIXME: complete this.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
3358
static void mv_print_info(struct ata_host *host)
3359
{
3360 3361
	struct pci_dev *pdev = to_pci_dev(host->dev);
	struct mv_host_priv *hpriv = host->private_data;
3362
	u8 scc;
3363
	const char *scc_s, *gen;
3364 3365 3366 3367 3368 3369 3370 3371 3372 3373

	/* Use this to determine the HW stepping of the chip so we know
	 * what errata to workaround
	 */
	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
	if (scc == 0)
		scc_s = "SCSI";
	else if (scc == 0x01)
		scc_s = "RAID";
	else
3374 3375 3376 3377 3378 3379 3380 3381 3382 3383
		scc_s = "?";

	if (IS_GEN_I(hpriv))
		gen = "I";
	else if (IS_GEN_II(hpriv))
		gen = "II";
	else if (IS_GEN_IIE(hpriv))
		gen = "IIE";
	else
		gen = "?";
3384

3385
	dev_printk(KERN_INFO, &pdev->dev,
3386 3387
	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3388 3389 3390
	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}

3391
/**
S
Saeed Bishara 已提交
3392
 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
3393 3394 3395 3396 3397 3398
 *      @pdev: PCI device found
 *      @ent: PCI device ID entry for the matched host
 *
 *      LOCKING:
 *      Inherited from caller.
 */
S
Saeed Bishara 已提交
3399 3400
static int mv_pci_init_one(struct pci_dev *pdev,
			   const struct pci_device_id *ent)
3401
{
3402
	static int printed_version;
3403
	unsigned int board_idx = (unsigned int)ent->driver_data;
3404 3405 3406 3407
	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
	struct ata_host *host;
	struct mv_host_priv *hpriv;
	int n_ports, rc;
3408

3409 3410
	if (!printed_version++)
		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3411

3412 3413 3414 3415 3416 3417 3418 3419
	/* allocate host */
	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
	if (!host || !hpriv)
		return -ENOMEM;
	host->private_data = hpriv;
S
Saeed Bishara 已提交
3420
	hpriv->n_ports = n_ports;
3421 3422

	/* acquire resources */
3423 3424
	rc = pcim_enable_device(pdev);
	if (rc)
3425 3426
		return rc;

T
Tejun Heo 已提交
3427 3428
	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
	if (rc == -EBUSY)
3429
		pcim_pin_device(pdev);
T
Tejun Heo 已提交
3430
	if (rc)
3431
		return rc;
3432
	host->iomap = pcim_iomap_table(pdev);
S
Saeed Bishara 已提交
3433
	hpriv->base = host->iomap[MV_PRIMARY_BAR];
3434

3435 3436 3437 3438
	rc = pci_go_64(pdev);
	if (rc)
		return rc;

3439 3440 3441 3442
	rc = mv_create_dma_pools(hpriv, &pdev->dev);
	if (rc)
		return rc;

3443
	/* initialize adapter */
3444
	rc = mv_init_host(host, board_idx);
3445 3446
	if (rc)
		return rc;
3447

M
Mark Lord 已提交
3448 3449 3450
	/* Enable message-switched interrupts, if requested */
	if (msi && pci_enable_msi(pdev) == 0)
		hpriv->hp_flags |= MV_HP_FLAG_MSI;
3451

3452
	mv_dump_pci_cfg(pdev, 0x68);
3453
	mv_print_info(host);
3454

3455
	pci_set_master(pdev);
3456
	pci_try_set_mwi(pdev);
3457
	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3458
				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3459
}
S
Saeed Bishara 已提交
3460
#endif
3461

S
Saeed Bishara 已提交
3462 3463 3464
static int mv_platform_probe(struct platform_device *pdev);
static int __devexit mv_platform_remove(struct platform_device *pdev);

3465 3466
static int __init mv_init(void)
{
S
Saeed Bishara 已提交
3467 3468 3469
	int rc = -ENODEV;
#ifdef CONFIG_PCI
	rc = pci_register_driver(&mv_pci_driver);
S
Saeed Bishara 已提交
3470 3471 3472 3473 3474 3475 3476 3477
	if (rc < 0)
		return rc;
#endif
	rc = platform_driver_register(&mv_platform_driver);

#ifdef CONFIG_PCI
	if (rc < 0)
		pci_unregister_driver(&mv_pci_driver);
S
Saeed Bishara 已提交
3478 3479
#endif
	return rc;
3480 3481 3482 3483
}

static void __exit mv_exit(void)
{
S
Saeed Bishara 已提交
3484
#ifdef CONFIG_PCI
3485
	pci_unregister_driver(&mv_pci_driver);
S
Saeed Bishara 已提交
3486
#endif
S
Saeed Bishara 已提交
3487
	platform_driver_unregister(&mv_platform_driver);
3488 3489 3490 3491 3492 3493 3494
}

MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
M
Mark Lord 已提交
3495
MODULE_ALIAS("platform:" DRV_NAME);
3496

S
Saeed Bishara 已提交
3497
#ifdef CONFIG_PCI
3498 3499
module_param(msi, int, 0444);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
S
Saeed Bishara 已提交
3500
#endif
3501

3502 3503
module_init(mv_init);
module_exit(mv_exit);