sata_mv.c 95.0 KB
Newer Older
1 2 3
/*
 * sata_mv.c - Marvell SATA support
 *
M
Mark Lord 已提交
4
 * Copyright 2008: Marvell Corporation, all rights reserved.
5
 * Copyright 2005: EMC Corporation, all rights reserved.
6
 * Copyright 2005 Red Hat, Inc.  All rights reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 */

J
Jeff Garzik 已提交
25
/*
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * sata_mv TODO list:
 *
 * --> Errata workaround for NCQ device errors.
 *
 * --> More errata workarounds for PCI-X.
 *
 * --> Complete a full errata audit for all chipsets to identify others.
 *
 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
 *
 * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
 *
 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
 *
 * --> Develop a low-power-consumption strategy, and implement it.
 *
 * --> [Experiment, low priority] Investigate interrupt coalescing.
 *       Quite often, especially with PCI Message Signalled Interrupts (MSI),
 *       the overhead reduced by interrupt mitigation is quite often not
 *       worth the latency cost.
 *
 * --> [Experiment, Marvell value added] Is it possible to use target
 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
 *       creating LibATA target mode support would be very interesting.
 *
 *       Target mode, for those without docs, is the ability to directly
 *       connect two SATA ports.
 */
J
Jeff Garzik 已提交
54

55 56 57 58 59 60 61
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
62
#include <linux/dmapool.h>
63
#include <linux/dma-mapping.h>
64
#include <linux/device.h>
S
Saeed Bishara 已提交
65 66
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
67
#include <linux/mbus.h>
68
#include <linux/bitops.h>
69
#include <scsi/scsi_host.h>
70
#include <scsi/scsi_cmnd.h>
J
Jeff Garzik 已提交
71
#include <scsi/scsi_device.h>
72 73 74
#include <linux/libata.h>

#define DRV_NAME	"sata_mv"
75
#define DRV_VERSION	"1.24"
76 77 78 79 80 81 82 83 84 85 86 87

enum {
	/* BAR's are enumerated in terms of pci_resource_start() terms */
	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */

	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */

	MV_PCI_REG_BASE		= 0,
	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
88 89 90 91 92 93
	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),

94
	MV_SATAHC0_REG_BASE	= 0x20000,
M
Mark Lord 已提交
95 96 97
	MV_FLASH_CTL_OFS	= 0x1046c,
	MV_GPIO_PORT_CTL_OFS	= 0x104f0,
	MV_RESET_CFG_OFS	= 0x180d8,
98 99 100 101 102 103

	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,

104 105 106 107 108 109 110 111 112
	MV_MAX_Q_DEPTH		= 32,
	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,

	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
	 * CRPB needs alignment on a 256B boundary. Size == 256B
	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
	 */
	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
113
	MV_MAX_SG_CT		= 256,
114 115
	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),

M
Mark Lord 已提交
116
	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
117
	MV_PORT_HC_SHIFT	= 2,
M
Mark Lord 已提交
118 119 120
	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
121 122 123 124

	/* Host Flags */
	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
S
Saeed Bishara 已提交
125

126
	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
127 128
				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
				  ATA_FLAG_PIO_POLLING,
M
Mark Lord 已提交
129

130
	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
131

M
Mark Lord 已提交
132 133
	MV_GENIIE_FLAGS		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
134
				  ATA_FLAG_NCQ | ATA_FLAG_AN,
M
Mark Lord 已提交
135

136 137
	CRQB_FLAG_READ		= (1 << 0),
	CRQB_TAG_SHIFT		= 1,
138
	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
M
Mark Lord 已提交
139
	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
140
	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
141 142 143 144 145
	CRQB_CMD_ADDR_SHIFT	= 8,
	CRQB_CMD_CS		= (0x2 << 11),
	CRQB_CMD_LAST		= (1 << 15),

	CRPB_FLAG_STATUS_SHIFT	= 8,
146 147
	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
148 149 150

	EPRD_FLAG_END_OF_TBL	= (1 << 31),

151 152
	/* PCI interface registers */

153
	PCI_COMMAND_OFS		= 0xc00,
M
Mark Lord 已提交
154
	PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
155

156 157 158 159 160
	PCI_MAIN_CMD_STS_OFS	= 0xd30,
	STOP_PCI_MASTER		= (1 << 2),
	PCI_MASTER_EMPTY	= (1 << 3),
	GLOB_SFT_RST		= (1 << 4),

M
Mark Lord 已提交
161 162 163
	MV_PCI_MODE_OFS		= 0xd00,
	MV_PCI_MODE_MASK	= 0x30,

164 165 166 167
	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
	MV_PCI_DISC_TIMER	= 0xd04,
	MV_PCI_MSI_TRIGGER	= 0xc38,
	MV_PCI_SERR_MASK	= 0xc28,
M
Mark Lord 已提交
168
	MV_PCI_XBAR_TMOUT_OFS	= 0x1d04,
169 170 171 172 173
	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
	MV_PCI_ERR_COMMAND	= 0x1d50,

174 175
	PCI_IRQ_CAUSE_OFS	= 0x1d58,
	PCI_IRQ_MASK_OFS	= 0x1d5c,
176 177
	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */

178 179
	PCIE_IRQ_CAUSE_OFS	= 0x1900,
	PCIE_IRQ_MASK_OFS	= 0x1910,
M
Mark Lord 已提交
180
	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
181

182 183 184 185 186
	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
	PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
	PCI_HC_MAIN_IRQ_MASK_OFS  = 0x1d64,
	SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
	SOC_HC_MAIN_IRQ_MASK_OFS  = 0x20024,
M
Mark Lord 已提交
187 188
	ERR_IRQ			= (1 << 0),	/* shift by port # */
	DONE_IRQ		= (1 << 1),	/* shift by port # */
189 190 191 192 193
	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
	PCI_ERR			= (1 << 18),
	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
194 195
	PORTS_0_3_COAL_DONE	= (1 << 8),
	PORTS_4_7_COAL_DONE	= (1 << 17),
196 197 198 199 200
	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
	GPIO_INT		= (1 << 22),
	SELF_INT		= (1 << 23),
	TWSI_INT		= (1 << 24),
	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
201
	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
M
Mark Lord 已提交
202
	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
203 204 205 206 207

	/* SATAHC registers */
	HC_CFG_OFS		= 0,

	HC_IRQ_CAUSE_OFS	= 0x14,
M
Mark Lord 已提交
208 209
	DMA_IRQ			= (1 << 0),	/* shift by port # */
	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
210 211 212
	DEV_IRQ			= (1 << 8),	/* shift by port # */

	/* Shadow block registers */
213 214
	SHD_BLK_OFS		= 0x100,
	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
215 216 217 218

	/* SATA registers */
	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
	SATA_ACTIVE_OFS		= 0x350,
M
Mark Lord 已提交
219
	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
220
	SATA_FIS_IRQ_AN		= (1 << 9),	/* async notification */
M
Mark Lord 已提交
221

M
Mark Lord 已提交
222
	LTMODE_OFS		= 0x30c,
M
Mark Lord 已提交
223 224
	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */

225
	PHY_MODE3		= 0x310,
226
	PHY_MODE4		= 0x314,
M
Mark Lord 已提交
227 228 229 230 231
	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */

232
	PHY_MODE2		= 0x330,
M
Mark Lord 已提交
233
	SATA_IFCTL_OFS		= 0x344,
M
Mark Lord 已提交
234
	SATA_TESTCTL_OFS	= 0x348,
M
Mark Lord 已提交
235 236
	SATA_IFSTAT_OFS		= 0x34c,
	VENDOR_UNIQUE_FIS_OFS	= 0x35c,
M
Mark Lord 已提交
237

M
Mark Lord 已提交
238 239 240
	FISCFG_OFS		= 0x360,
	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
M
Mark Lord 已提交
241

242
	MV5_PHY_MODE		= 0x74,
M
Mark Lord 已提交
243 244 245
	MV5_LTMODE_OFS		= 0x30,
	MV5_PHY_CTL_OFS		= 0x0C,
	SATA_INTERFACE_CFG_OFS	= 0x050,
246 247

	MV_M2_PREAMP_MASK	= 0x7e0,
248 249 250

	/* Port registers */
	EDMA_CFG_OFS		= 0,
M
Mark Lord 已提交
251 252 253 254 255
	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
M
Mark Lord 已提交
256 257
	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
258 259 260

	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
261 262 263 264 265 266
	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
	EDMA_ERR_DEV		= (1 << 2),	/* device error */
	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
267 268
	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
269
	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
270
	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
271 272 273 274
	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
M
Mark Lord 已提交
275

276
	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
M
Mark Lord 已提交
277 278 279 280 281
	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */

282
	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
M
Mark Lord 已提交
283

284
	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
M
Mark Lord 已提交
285 286 287 288 289 290
	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */

291
	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
M
Mark Lord 已提交
292

293
	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
294 295
	EDMA_ERR_OVERRUN_5	= (1 << 5),
	EDMA_ERR_UNDERRUN_5	= (1 << 6),
M
Mark Lord 已提交
296 297 298 299

	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
				  EDMA_ERR_LNK_CTRL_RX_1 |
				  EDMA_ERR_LNK_CTRL_RX_3 |
300
				  EDMA_ERR_LNK_CTRL_TX,
M
Mark Lord 已提交
301

302 303 304 305 306 307
	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_SERR |
				  EDMA_ERR_SELF_DIS |
308
				  EDMA_ERR_CRQB_PAR |
309 310 311 312 313 314 315
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY |
				  EDMA_ERR_LNK_CTRL_RX_2 |
				  EDMA_ERR_LNK_DATA_RX |
				  EDMA_ERR_LNK_DATA_TX |
				  EDMA_ERR_TRANS_PROTO,
M
Mark Lord 已提交
316

317 318 319 320 321 322 323
	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_OVERRUN_5 |
				  EDMA_ERR_UNDERRUN_5 |
				  EDMA_ERR_SELF_DIS_5 |
324
				  EDMA_ERR_CRQB_PAR |
325 326 327
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY,
328

329 330 331 332 333 334 335 336 337 338 339
	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */

	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
	EDMA_REQ_Q_PTR_SHIFT	= 5,

	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
	EDMA_RSP_Q_PTR_SHIFT	= 3,

J
Jeff Garzik 已提交
340 341 342
	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
	EDMA_EN			= (1 << 0),	/* enable EDMA */
	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
M
Mark Lord 已提交
343 344 345 346 347
	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */

	EDMA_STATUS_OFS		= 0x30,		/* EDMA engine status */
	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
348

M
Mark Lord 已提交
349 350 351 352
	EDMA_IORDY_TMOUT_OFS	= 0x34,
	EDMA_ARB_CFG_OFS	= 0x38,

	EDMA_HALTCOND_OFS	= 0x60,		/* GenIIe halt conditions */
353

M
Mark Lord 已提交
354 355
	GEN_II_NCQ_MAX_SECTORS	= 256,		/* max sects/io on Gen2 w/NCQ */

356 357
	/* Host private flags (hp_flags) */
	MV_HP_FLAG_MSI		= (1 << 0),
358 359 360 361
	MV_HP_ERRATA_50XXB0	= (1 << 1),
	MV_HP_ERRATA_50XXB2	= (1 << 2),
	MV_HP_ERRATA_60X1B2	= (1 << 3),
	MV_HP_ERRATA_60X1C0	= (1 << 4),
J
Jeff Garzik 已提交
362 363 364
	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
365
	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
M
Mark Lord 已提交
366
	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
M
Mark Lord 已提交
367
	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
368

369
	/* Port private flags (pp_flags) */
J
Jeff Garzik 已提交
370
	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
371
	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
M
Mark Lord 已提交
372
	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
M
Mark Lord 已提交
373
	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
374 375
};

376 377
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
378
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
M
Mark Lord 已提交
379
#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
M
Mark Lord 已提交
380
#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
381

382 383 384
#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))

J
Jeff Garzik 已提交
385
enum {
J
Jeff Garzik 已提交
386 387 388 389
	/* DMA boundary 0xffff is required by the s/g splitting
	 * we need on /length/ in mv_fill-sg().
	 */
	MV_DMA_BOUNDARY		= 0xffffU,
J
Jeff Garzik 已提交
390

J
Jeff Garzik 已提交
391 392 393
	/* mask of register bits containing lower 32 bits
	 * of EDMA request queue DMA address
	 */
J
Jeff Garzik 已提交
394 395
	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,

J
Jeff Garzik 已提交
396
	/* ditto, for response queue */
J
Jeff Garzik 已提交
397 398 399
	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
};

400 401 402 403 404 405
enum chip_type {
	chip_504x,
	chip_508x,
	chip_5080,
	chip_604x,
	chip_608x,
406 407
	chip_6042,
	chip_7042,
S
Saeed Bishara 已提交
408
	chip_soc,
409 410
};

411 412
/* Command ReQuest Block: 32B */
struct mv_crqb {
M
Mark Lord 已提交
413 414 415 416
	__le32			sg_addr;
	__le32			sg_addr_hi;
	__le16			ctrl_flags;
	__le16			ata_cmd[11];
417
};
418

419
struct mv_crqb_iie {
M
Mark Lord 已提交
420 421 422 423 424
	__le32			addr;
	__le32			addr_hi;
	__le32			flags;
	__le32			len;
	__le32			ata_cmd[4];
425 426
};

427 428
/* Command ResPonse Block: 8B */
struct mv_crpb {
M
Mark Lord 已提交
429 430 431
	__le16			id;
	__le16			flags;
	__le32			tmstmp;
432 433
};

434 435
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
M
Mark Lord 已提交
436 437 438 439
	__le32			addr;
	__le32			flags_size;
	__le32			addr_hi;
	__le32			reserved;
440
};
441

442 443 444 445 446
struct mv_port_priv {
	struct mv_crqb		*crqb;
	dma_addr_t		crqb_dma;
	struct mv_crpb		*crpb;
	dma_addr_t		crpb_dma;
447 448
	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
449 450 451 452

	unsigned int		req_idx;
	unsigned int		resp_idx;

453
	u32			pp_flags;
M
Mark Lord 已提交
454
	unsigned int		delayed_eh_pmp_map;
455 456
};

457 458 459 460 461
struct mv_port_signal {
	u32			amps;
	u32			pre;
};

462 463
struct mv_host_priv {
	u32			hp_flags;
464
	u32			main_irq_mask;
465 466
	struct mv_port_signal	signal[8];
	const struct mv_hw_ops	*ops;
S
Saeed Bishara 已提交
467 468
	int			n_ports;
	void __iomem		*base;
469 470
	void __iomem		*main_irq_cause_addr;
	void __iomem		*main_irq_mask_addr;
471 472 473
	u32			irq_cause_ofs;
	u32			irq_mask_ofs;
	u32			unmask_all_irqs;
474 475 476 477 478 479 480 481
	/*
	 * These consistent DMA memory pools give us guaranteed
	 * alignment for hardware-accessed data structures,
	 * and less memory waste in accomplishing the alignment.
	 */
	struct dma_pool		*crqb_pool;
	struct dma_pool		*crpb_pool;
	struct dma_pool		*sg_tbl_pool;
482 483
};

484
struct mv_hw_ops {
485 486
	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
487 488 489
	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
490 491
	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
492
	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
S
Saeed Bishara 已提交
493
	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
494 495
};

T
Tejun Heo 已提交
496 497 498 499
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
500 501
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
M
Mark Lord 已提交
502
static int mv_qc_defer(struct ata_queued_cmd *qc);
503
static void mv_qc_prep(struct ata_queued_cmd *qc);
504
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
505
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
506 507
static int mv_hardreset(struct ata_link *link, unsigned int *class,
			unsigned long deadline);
508 509
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
510
static void mv6_dev_config(struct ata_device *dev);
511

512 513
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
514 515 516
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
517 518
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
519
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
S
Saeed Bishara 已提交
520
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
521

522 523
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
524 525 526
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
527 528
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
529
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
S
Saeed Bishara 已提交
530 531 532 533 534 535 536 537 538
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
				      void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
				      void __iomem *mmio);
static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
				  void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
				      void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
S
Saeed Bishara 已提交
539
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
M
Mark Lord 已提交
540
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
541
			     unsigned int port_no);
M
Mark Lord 已提交
542
static int mv_stop_edma(struct ata_port *ap);
M
Mark Lord 已提交
543
static int mv_stop_edma_engine(void __iomem *port_mmio);
M
Mark Lord 已提交
544
static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
545

546 547 548 549 550
static void mv_pmp_select(struct ata_port *ap, int pmp);
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline);
static int  mv_softreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline);
M
Mark Lord 已提交
551
static void mv_pmp_error_handler(struct ata_port *ap);
552 553
static void mv_process_crpb_entries(struct ata_port *ap,
					struct mv_port_priv *pp);
554

555 556 557 558
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 * because we have to allow room for worst case splitting of
 * PRDs for 64K boundaries in mv_fill_sg().
 */
559
static struct scsi_host_template mv5_sht = {
560
	ATA_BASE_SHT(DRV_NAME),
J
Jeff Garzik 已提交
561
	.sg_tablesize		= MV_MAX_SG_CT / 2,
562 563 564 565
	.dma_boundary		= MV_DMA_BOUNDARY,
};

static struct scsi_host_template mv6_sht = {
566
	ATA_NCQ_SHT(DRV_NAME),
M
Mark Lord 已提交
567
	.can_queue		= MV_MAX_Q_DEPTH - 1,
J
Jeff Garzik 已提交
568
	.sg_tablesize		= MV_MAX_SG_CT / 2,
569 570 571
	.dma_boundary		= MV_DMA_BOUNDARY,
};

572 573
static struct ata_port_operations mv5_ops = {
	.inherits		= &ata_sff_port_ops,
574

M
Mark Lord 已提交
575
	.qc_defer		= mv_qc_defer,
576 577 578
	.qc_prep		= mv_qc_prep,
	.qc_issue		= mv_qc_issue,

579 580
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,
581 582
	.hardreset		= mv_hardreset,
	.error_handler		= ata_std_error_handler, /* avoid SFF EH */
583
	.post_internal_cmd	= ATA_OP_NULL,
584

585 586 587 588 589 590 591
	.scr_read		= mv5_scr_read,
	.scr_write		= mv5_scr_write,

	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
};

592 593
static struct ata_port_operations mv6_ops = {
	.inherits		= &mv5_ops,
594
	.dev_config             = mv6_dev_config,
595 596 597
	.scr_read		= mv_scr_read,
	.scr_write		= mv_scr_write,

598 599 600
	.pmp_hardreset		= mv_pmp_hardreset,
	.pmp_softreset		= mv_softreset,
	.softreset		= mv_softreset,
M
Mark Lord 已提交
601
	.error_handler		= mv_pmp_error_handler,
602 603
};

604 605 606
static struct ata_port_operations mv_iie_ops = {
	.inherits		= &mv6_ops,
	.dev_config		= ATA_OP_NULL,
607 608 609
	.qc_prep		= mv_qc_prep_iie,
};

610
static const struct ata_port_info mv_port_info[] = {
611
	{  /* chip_504x */
J
Jeff Garzik 已提交
612
		.flags		= MV_COMMON_FLAGS,
613
		.pio_mask	= 0x1f,	/* pio0-4 */
614
		.udma_mask	= ATA_UDMA6,
615
		.port_ops	= &mv5_ops,
616 617
	},
	{  /* chip_508x */
618
		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
619
		.pio_mask	= 0x1f,	/* pio0-4 */
620
		.udma_mask	= ATA_UDMA6,
621
		.port_ops	= &mv5_ops,
622
	},
623
	{  /* chip_5080 */
624
		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
625
		.pio_mask	= 0x1f,	/* pio0-4 */
626
		.udma_mask	= ATA_UDMA6,
627
		.port_ops	= &mv5_ops,
628
	},
629
	{  /* chip_604x */
M
Mark Lord 已提交
630
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
631
				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
M
Mark Lord 已提交
632
				  ATA_FLAG_NCQ,
633
		.pio_mask	= 0x1f,	/* pio0-4 */
634
		.udma_mask	= ATA_UDMA6,
635
		.port_ops	= &mv6_ops,
636 637
	},
	{  /* chip_608x */
638
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
639
				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
M
Mark Lord 已提交
640
				  ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
641
		.pio_mask	= 0x1f,	/* pio0-4 */
642
		.udma_mask	= ATA_UDMA6,
643
		.port_ops	= &mv6_ops,
644
	},
645
	{  /* chip_6042 */
M
Mark Lord 已提交
646
		.flags		= MV_GENIIE_FLAGS,
647
		.pio_mask	= 0x1f,	/* pio0-4 */
648
		.udma_mask	= ATA_UDMA6,
649 650 651
		.port_ops	= &mv_iie_ops,
	},
	{  /* chip_7042 */
M
Mark Lord 已提交
652
		.flags		= MV_GENIIE_FLAGS,
653
		.pio_mask	= 0x1f,	/* pio0-4 */
654
		.udma_mask	= ATA_UDMA6,
655 656
		.port_ops	= &mv_iie_ops,
	},
S
Saeed Bishara 已提交
657
	{  /* chip_soc */
M
Mark Lord 已提交
658
		.flags		= MV_GENIIE_FLAGS,
M
Mark Lord 已提交
659 660 661
		.pio_mask	= 0x1f,	/* pio0-4 */
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &mv_iie_ops,
S
Saeed Bishara 已提交
662
	},
663 664
};

665
static const struct pci_device_id mv_pci_tbl[] = {
666 667 668 669
	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
670 671
	/* RocketRAID 1720/174x have different identifiers */
	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
672 673
	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
674 675 676 677 678 679 680 681 682

	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },

	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },

683 684 685
	/* Adaptec 1430SA */
	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },

686
	/* Marvell 7042 support */
M
Morrison, Tom 已提交
687 688
	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },

689 690 691 692
	/* Highpoint RocketRAID PCIe series */
	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },

693
	{ }			/* terminate list */
694 695
};

696 697 698 699 700
static const struct mv_hw_ops mv5xxx_ops = {
	.phy_errata		= mv5_phy_errata,
	.enable_leds		= mv5_enable_leds,
	.read_preamp		= mv5_read_preamp,
	.reset_hc		= mv5_reset_hc,
701 702
	.reset_flash		= mv5_reset_flash,
	.reset_bus		= mv5_reset_bus,
703 704 705 706 707 708 709
};

static const struct mv_hw_ops mv6xxx_ops = {
	.phy_errata		= mv6_phy_errata,
	.enable_leds		= mv6_enable_leds,
	.read_preamp		= mv6_read_preamp,
	.reset_hc		= mv6_reset_hc,
710 711
	.reset_flash		= mv6_reset_flash,
	.reset_bus		= mv_reset_pci_bus,
712 713
};

S
Saeed Bishara 已提交
714 715 716 717 718 719 720 721 722
static const struct mv_hw_ops mv_soc_ops = {
	.phy_errata		= mv6_phy_errata,
	.enable_leds		= mv_soc_enable_leds,
	.read_preamp		= mv_soc_read_preamp,
	.reset_hc		= mv_soc_reset_hc,
	.reset_flash		= mv_soc_reset_flash,
	.reset_bus		= mv_soc_reset_bus,
};

723 724 725 726 727 728 729 730 731 732
/*
 * Functions
 */

static inline void writelfl(unsigned long data, void __iomem *addr)
{
	writel(data, addr);
	(void) readl(addr);	/* flush to avoid PCI posted write */
}

733 734 735 736 737 738 739 740 741 742
static inline unsigned int mv_hc_from_port(unsigned int port)
{
	return port >> MV_PORT_HC_SHIFT;
}

static inline unsigned int mv_hardport_from_port(unsigned int port)
{
	return port & MV_PORT_MASK;
}

743 744 745 746 747 748
/*
 * Consolidate some rather tricky bit shift calculations.
 * This is hot-path stuff, so not a function.
 * Simple code, with two return values, so macro rather than inline.
 *
 * port is the sole input, in range 0..7.
749 750
 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 * hardport is the other output, in range 0..3.
751 752 753 754 755 756 757 758 759 760
 *
 * Note that port and hardport may be the same variable in some cases.
 */
#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
{								\
	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
	hardport = mv_hardport_from_port(port);			\
	shift   += hardport * 2;				\
}

M
Mark Lord 已提交
761 762 763 764 765
static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}

766 767 768 769 770 771
static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
						 unsigned int port)
{
	return mv_hc_base(base, mv_hc_from_port(port));
}

772 773
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
774
	return  mv_hc_base_from_port(base, port) +
775
		MV_SATAHC_ARBTR_REG_SZ +
776
		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
777 778
}

M
Mark Lord 已提交
779 780 781 782 783 784 785 786
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;

	return hc_mmio + ofs;
}

S
Saeed Bishara 已提交
787 788 789 790 791 792
static inline void __iomem *mv_host_base(struct ata_host *host)
{
	struct mv_host_priv *hpriv = host->private_data;
	return hpriv->base;
}

793 794
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
S
Saeed Bishara 已提交
795
	return mv_port_base(mv_host_base(ap->host), ap->port_no);
796 797
}

J
Jeff Garzik 已提交
798
static inline int mv_get_hc_count(unsigned long port_flags)
799
{
J
Jeff Garzik 已提交
800
	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
801 802
}

803 804 805 806
static void mv_set_edma_ptrs(void __iomem *port_mmio,
			     struct mv_host_priv *hpriv,
			     struct mv_port_priv *pp)
{
807 808
	u32 index;

809 810 811
	/*
	 * initialize request queue
	 */
812 813
	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
814

815 816
	WARN_ON(pp->crqb_dma & 0x3ff);
	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
817
	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
818
		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
819
	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
820 821 822 823

	/*
	 * initialize response queue
	 */
824 825
	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
826

827 828
	WARN_ON(pp->crpb_dma & 0xff);
	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829
	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
830
	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
831 832 833
		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
}

834 835 836 837 838 839
static void mv_set_main_irq_mask(struct ata_host *host,
				 u32 disable_bits, u32 enable_bits)
{
	struct mv_host_priv *hpriv = host->private_data;
	u32 old_mask, new_mask;

840
	old_mask = hpriv->main_irq_mask;
841
	new_mask = (old_mask & ~disable_bits) | enable_bits;
842 843
	if (new_mask != old_mask) {
		hpriv->main_irq_mask = new_mask;
844
		writelfl(new_mask, hpriv->main_irq_mask_addr);
845
	}
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
}

static void mv_enable_port_irqs(struct ata_port *ap,
				     unsigned int port_bits)
{
	unsigned int shift, hardport, port = ap->port_no;
	u32 disable_bits, enable_bits;

	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);

	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
	enable_bits  = port_bits << shift;
	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
}

861 862 863 864 865
/**
 *      mv_start_dma - Enable eDMA engine
 *      @base: port base address
 *      @pp: port private data
 *
866 867
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
868 869 870 871
 *
 *      LOCKING:
 *      Inherited from caller.
 */
M
Mark Lord 已提交
872
static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
873
			 struct mv_port_priv *pp, u8 protocol)
874
{
875 876 877 878 879
	int want_ncq = (protocol == ATA_PROT_NCQ);

	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
		if (want_ncq != using_ncq)
M
Mark Lord 已提交
880
			mv_stop_edma(ap);
881
	}
882
	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
M
Mark Lord 已提交
883
		struct mv_host_priv *hpriv = ap->host->private_data;
M
Mark Lord 已提交
884
		int hardport = mv_hardport_from_port(ap->port_no);
M
Mark Lord 已提交
885
		void __iomem *hc_mmio = mv_hc_base_from_port(
M
Mark Lord 已提交
886
					mv_host_base(ap->host), hardport);
M
Mark Lord 已提交
887 888
		u32 hc_irq_cause, ipending;

889
		/* clear EDMA event indicators, if any */
M
Mark Lord 已提交
890
		writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
891

M
Mark Lord 已提交
892 893
		/* clear EDMA interrupt indicator, if any */
		hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
M
Mark Lord 已提交
894
		ipending = (DEV_IRQ | DMA_IRQ) << hardport;
M
Mark Lord 已提交
895 896 897 898 899
		if (hc_irq_cause & ipending) {
			writelfl(hc_irq_cause & ~ipending,
				 hc_mmio + HC_IRQ_CAUSE_OFS);
		}

M
Mark Lord 已提交
900
		mv_edma_cfg(ap, want_ncq);
M
Mark Lord 已提交
901 902

		/* clear FIS IRQ Cause */
M
Mark Lord 已提交
903 904
		if (IS_GEN_IIE(hpriv))
			writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
M
Mark Lord 已提交
905

M
Mark Lord 已提交
906
		mv_set_edma_ptrs(port_mmio, hpriv, pp);
M
Mark Lord 已提交
907
		mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
908

M
Mark Lord 已提交
909
		writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
910 911
		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
	}
912 913
}

M
Mark Lord 已提交
914 915 916 917 918 919 920 921 922
static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);
	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
	int i;

	/*
	 * Wait for the EDMA engine to finish transactions in progress.
923 924 925 926
	 * No idea what a good "timeout" value might be, but measurements
	 * indicate that it often requires hundreds of microseconds
	 * with two drives in-use.  So we use the 15msec value above
	 * as a rough guess at what even more drives might require.
M
Mark Lord 已提交
927 928 929 930 931 932 933 934 935 936
	 */
	for (i = 0; i < timeout; ++i) {
		u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
		if ((edma_stat & empty_idle) == empty_idle)
			break;
		udelay(per_loop);
	}
	/* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
}

937
/**
M
Mark Lord 已提交
938
 *      mv_stop_edma_engine - Disable eDMA engine
M
Mark Lord 已提交
939
 *      @port_mmio: io base address
940 941 942 943
 *
 *      LOCKING:
 *      Inherited from caller.
 */
M
Mark Lord 已提交
944
static int mv_stop_edma_engine(void __iomem *port_mmio)
945
{
M
Mark Lord 已提交
946
	int i;
947

M
Mark Lord 已提交
948 949
	/* Disable eDMA.  The disable bit auto clears. */
	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
950

M
Mark Lord 已提交
951 952 953
	/* Wait for the chip to confirm eDMA is off. */
	for (i = 10000; i > 0; i--) {
		u32 reg = readl(port_mmio + EDMA_CMD_OFS);
954
		if (!(reg & EDMA_EN))
M
Mark Lord 已提交
955 956
			return 0;
		udelay(10);
957
	}
M
Mark Lord 已提交
958
	return -EIO;
959 960
}

M
Mark Lord 已提交
961
static int mv_stop_edma(struct ata_port *ap)
J
Jeff Garzik 已提交
962
{
M
Mark Lord 已提交
963 964
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp = ap->private_data;
J
Jeff Garzik 已提交
965

M
Mark Lord 已提交
966 967 968
	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
		return 0;
	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
M
Mark Lord 已提交
969
	mv_wait_for_edma_empty_idle(ap);
M
Mark Lord 已提交
970 971 972 973 974
	if (mv_stop_edma_engine(port_mmio)) {
		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
		return -EIO;
	}
	return 0;
J
Jeff Garzik 已提交
975 976
}

J
Jeff Garzik 已提交
977
#ifdef ATA_DEBUG
978
static void mv_dump_mem(void __iomem *start, unsigned bytes)
979
{
980 981 982 983
	int b, w;
	for (b = 0; b < bytes; ) {
		DPRINTK("%p: ", start + b);
		for (w = 0; b < bytes && w < 4; w++) {
984
			printk("%08x ", readl(start + b));
985 986 987 988 989
			b += sizeof(u32);
		}
		printk("\n");
	}
}
J
Jeff Garzik 已提交
990 991
#endif

992 993 994 995 996 997 998 999
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
#ifdef ATA_DEBUG
	int b, w;
	u32 dw;
	for (b = 0; b < bytes; ) {
		DPRINTK("%02x: ", b);
		for (w = 0; b < bytes && w < 4; w++) {
1000 1001
			(void) pci_read_config_dword(pdev, b, &dw);
			printk("%08x ", dw);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
			b += sizeof(u32);
		}
		printk("\n");
	}
#endif
}
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
			     struct pci_dev *pdev)
{
#ifdef ATA_DEBUG
1012
	void __iomem *hc_base = mv_hc_base(mmio_base,
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
					   port >> MV_PORT_HC_SHIFT);
	void __iomem *port_base;
	int start_port, num_ports, p, start_hc, num_hcs, hc;

	if (0 > port) {
		start_hc = start_port = 0;
		num_ports = 8;		/* shld be benign for 4 port devs */
		num_hcs = 2;
	} else {
		start_hc = port >> MV_PORT_HC_SHIFT;
		start_port = port;
		num_ports = num_hcs = 1;
	}
1026
	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
		num_ports > 1 ? num_ports - 1 : start_port);

	if (NULL != pdev) {
		DPRINTK("PCI config space regs:\n");
		mv_dump_pci_cfg(pdev, 0x68);
	}
	DPRINTK("PCI regs:\n");
	mv_dump_mem(mmio_base+0xc00, 0x3c);
	mv_dump_mem(mmio_base+0xd00, 0x34);
	mv_dump_mem(mmio_base+0xf00, 0x4);
	mv_dump_mem(mmio_base+0x1d00, 0x6c);
	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1039
		hc_base = mv_hc_base(mmio_base, hc);
1040 1041 1042 1043 1044
		DPRINTK("HC regs (HC %i):\n", hc);
		mv_dump_mem(hc_base, 0x1c);
	}
	for (p = start_port; p < start_port + num_ports; p++) {
		port_base = mv_port_base(mmio_base, p);
1045
		DPRINTK("EDMA regs (port %i):\n", p);
1046
		mv_dump_mem(port_base, 0x54);
1047
		DPRINTK("SATA regs (port %i):\n", p);
1048 1049 1050
		mv_dump_mem(port_base+0x300, 0x60);
	}
#endif
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
}

static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_CONTROL:
	case SCR_ERROR:
		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
		break;
	case SCR_ACTIVE:
		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

T
Tejun Heo 已提交
1073
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1074 1075 1076
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

1077
	if (ofs != 0xffffffffU) {
T
Tejun Heo 已提交
1078
		*val = readl(mv_ap_base(link->ap) + ofs);
1079 1080 1081
		return 0;
	} else
		return -EINVAL;
1082 1083
}

T
Tejun Heo 已提交
1084
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1085 1086 1087
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

1088
	if (ofs != 0xffffffffU) {
T
Tejun Heo 已提交
1089
		writelfl(val, mv_ap_base(link->ap) + ofs);
1090 1091 1092
		return 0;
	} else
		return -EINVAL;
1093 1094
}

1095 1096 1097
static void mv6_dev_config(struct ata_device *adev)
{
	/*
1098 1099 1100 1101 1102
	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
	 *
	 * Gen-II does not support NCQ over a port multiplier
	 *  (no FIS-based switching).
	 *
1103 1104 1105
	 * We don't have hob_nsect when doing NCQ commands on Gen-II.
	 * See mv_qc_prep() for more info.
	 */
1106
	if (adev->flags & ATA_DFLAG_NCQ) {
M
Mark Lord 已提交
1107
		if (sata_pmp_attached(adev->link->ap)) {
1108
			adev->flags &= ~ATA_DFLAG_NCQ;
M
Mark Lord 已提交
1109 1110 1111 1112 1113 1114 1115 1116
			ata_dev_printk(adev, KERN_INFO,
				"NCQ disabled for command-based switching\n");
		} else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
			adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
			ata_dev_printk(adev, KERN_INFO,
				"max_sectors limited to %u for NCQ\n",
				adev->max_sectors);
		}
1117
	}
1118 1119
}

M
Mark Lord 已提交
1120 1121 1122 1123 1124 1125
static int mv_qc_defer(struct ata_queued_cmd *qc)
{
	struct ata_link *link = qc->dev->link;
	struct ata_port *ap = link->ap;
	struct mv_port_priv *pp = ap->private_data;

M
Mark Lord 已提交
1126 1127 1128 1129 1130 1131
	/*
	 * Don't allow new commands if we're in a delayed EH state
	 * for NCQ and/or FIS-based switching.
	 */
	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
		return ATA_DEFER_PORT;
M
Mark Lord 已提交
1132 1133 1134 1135 1136 1137
	/*
	 * If the port is completely idle, then allow the new qc.
	 */
	if (ap->nr_active_links == 0)
		return 0;

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	/*
	 * The port is operating in host queuing mode (EDMA) with NCQ
	 * enabled, allow multiple NCQ commands.  EDMA also allows
	 * queueing multiple DMA commands but libata core currently
	 * doesn't allow it.
	 */
	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
		return 0;

M
Mark Lord 已提交
1148 1149 1150
	return ATA_DEFER_PORT;
}

M
Mark Lord 已提交
1151
static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
1152
{
M
Mark Lord 已提交
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	u32 new_fiscfg, old_fiscfg;
	u32 new_ltmode, old_ltmode;
	u32 new_haltcond, old_haltcond;

	old_fiscfg   = readl(port_mmio + FISCFG_OFS);
	old_ltmode   = readl(port_mmio + LTMODE_OFS);
	old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);

	new_fiscfg   = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
	new_ltmode   = old_ltmode & ~LTMODE_BIT8;
	new_haltcond = old_haltcond | EDMA_ERR_DEV;

	if (want_fbs) {
		new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
		new_ltmode = old_ltmode | LTMODE_BIT8;
1168 1169 1170 1171
		if (want_ncq)
			new_haltcond &= ~EDMA_ERR_DEV;
		else
			new_fiscfg |=  FISCFG_WAIT_DEV_ERR;
1172
	}
M
Mark Lord 已提交
1173

M
Mark Lord 已提交
1174 1175
	if (new_fiscfg != old_fiscfg)
		writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
1176 1177
	if (new_ltmode != old_ltmode)
		writelfl(new_ltmode, port_mmio + LTMODE_OFS);
M
Mark Lord 已提交
1178 1179
	if (new_haltcond != old_haltcond)
		writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
1180 1181
}

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
{
	struct mv_host_priv *hpriv = ap->host->private_data;
	u32 old, new;

	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
	old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
	if (want_ncq)
		new = old | (1 << 22);
	else
		new = old & ~(1 << 22);
	if (new != old)
		writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
}

M
Mark Lord 已提交
1197
static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1198
{
M
Mark Lord 已提交
1199
	u32 cfg;
M
Mark Lord 已提交
1200 1201 1202
	struct mv_port_priv *pp    = ap->private_data;
	struct mv_host_priv *hpriv = ap->host->private_data;
	void __iomem *port_mmio    = mv_ap_base(ap);
1203 1204

	/* set up non-NCQ EDMA configuration */
M
Mark Lord 已提交
1205
	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
M
Mark Lord 已提交
1206
	pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
1207

M
Mark Lord 已提交
1208
	if (IS_GEN_I(hpriv))
1209 1210
		cfg |= (1 << 8);	/* enab config burst size mask */

1211
	else if (IS_GEN_II(hpriv)) {
1212
		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1213
		mv_60x1_errata_sata25(ap, want_ncq);
1214

1215
	} else if (IS_GEN_IIE(hpriv)) {
M
Mark Lord 已提交
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
		int want_fbs = sata_pmp_attached(ap);
		/*
		 * Possible future enhancement:
		 *
		 * The chip can use FBS with non-NCQ, if we allow it,
		 * But first we need to have the error handling in place
		 * for this mode (datasheet section 7.3.15.4.2.3).
		 * So disallow non-NCQ FBS for now.
		 */
		want_fbs &= want_ncq;

		mv_config_fbs(port_mmio, want_ncq, want_fbs);

		if (want_fbs) {
			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
		}

1234 1235
		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
M
Mark Lord 已提交
1236
		if (!IS_SOC(hpriv))
M
Mark Lord 已提交
1237 1238 1239
			cfg |= (1 << 18);	/* enab early completion */
		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1240 1241
	}

1242 1243 1244 1245 1246 1247
	if (want_ncq) {
		cfg |= EDMA_CFG_NCQ;
		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
	} else
		pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;

1248 1249 1250
	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
}

1251 1252 1253 1254
static void mv_port_free_dma_mem(struct ata_port *ap)
{
	struct mv_host_priv *hpriv = ap->host->private_data;
	struct mv_port_priv *pp = ap->private_data;
1255
	int tag;
1256 1257 1258 1259 1260 1261 1262 1263 1264

	if (pp->crqb) {
		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
		pp->crqb = NULL;
	}
	if (pp->crpb) {
		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
		pp->crpb = NULL;
	}
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
	/*
	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
	 * For later hardware, we have one unique sg_tbl per NCQ tag.
	 */
	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
		if (pp->sg_tbl[tag]) {
			if (tag == 0 || !IS_GEN_I(hpriv))
				dma_pool_free(hpriv->sg_tbl_pool,
					      pp->sg_tbl[tag],
					      pp->sg_tbl_dma[tag]);
			pp->sg_tbl[tag] = NULL;
		}
1277 1278 1279
	}
}

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
/**
 *      mv_port_start - Port specific init/start routine.
 *      @ap: ATA channel to manipulate
 *
 *      Allocate and point to DMA memory, init port private memory,
 *      zero indices.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1290 1291
static int mv_port_start(struct ata_port *ap)
{
J
Jeff Garzik 已提交
1292 1293
	struct device *dev = ap->host->dev;
	struct mv_host_priv *hpriv = ap->host->private_data;
1294
	struct mv_port_priv *pp;
1295
	int tag;
1296

1297
	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1298
	if (!pp)
1299
		return -ENOMEM;
1300
	ap->private_data = pp;
1301

1302 1303 1304 1305
	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
	if (!pp->crqb)
		return -ENOMEM;
	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1306

1307 1308 1309 1310
	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
	if (!pp->crpb)
		goto out_port_free_dma_mem;
	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1311

1312 1313 1314
	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
		ap->flags |= ATA_FLAG_AN;
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
	/*
	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
	 * For later hardware, we need one unique sg_tbl per NCQ tag.
	 */
	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
		if (tag == 0 || !IS_GEN_I(hpriv)) {
			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
			if (!pp->sg_tbl[tag])
				goto out_port_free_dma_mem;
		} else {
			pp->sg_tbl[tag]     = pp->sg_tbl[0];
			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
		}
	}
1330
	return 0;
1331 1332 1333 1334

out_port_free_dma_mem:
	mv_port_free_dma_mem(ap);
	return -ENOMEM;
1335 1336
}

1337 1338 1339 1340 1341 1342 1343
/**
 *      mv_port_stop - Port specific cleanup/stop routine.
 *      @ap: ATA channel to manipulate
 *
 *      Stop DMA, cleanup port memory.
 *
 *      LOCKING:
J
Jeff Garzik 已提交
1344
 *      This routine uses the host lock to protect the DMA stop.
1345
 */
1346 1347
static void mv_port_stop(struct ata_port *ap)
{
M
Mark Lord 已提交
1348
	mv_stop_edma(ap);
M
Mark Lord 已提交
1349
	mv_enable_port_irqs(ap, 0);
1350
	mv_port_free_dma_mem(ap);
1351 1352
}

1353 1354 1355 1356 1357 1358 1359 1360 1361
/**
 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
 *      @qc: queued command whose SG list to source from
 *
 *      Populate the SG list and mark the last entry.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
J
Jeff Garzik 已提交
1362
static void mv_fill_sg(struct ata_queued_cmd *qc)
1363 1364
{
	struct mv_port_priv *pp = qc->ap->private_data;
1365
	struct scatterlist *sg;
J
Jeff Garzik 已提交
1366
	struct mv_sg *mv_sg, *last_sg = NULL;
T
Tejun Heo 已提交
1367
	unsigned int si;
1368

1369
	mv_sg = pp->sg_tbl[qc->tag];
T
Tejun Heo 已提交
1370
	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1371 1372
		dma_addr_t addr = sg_dma_address(sg);
		u32 sg_len = sg_dma_len(sg);
1373

1374 1375 1376
		while (sg_len) {
			u32 offset = addr & 0xffff;
			u32 len = sg_len;
1377

1378 1379 1380 1381 1382
			if ((offset + sg_len > 0x10000))
				len = 0x10000 - offset;

			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
J
Jeff Garzik 已提交
1383
			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1384 1385 1386 1387

			sg_len -= len;
			addr += len;

J
Jeff Garzik 已提交
1388
			last_sg = mv_sg;
1389 1390
			mv_sg++;
		}
1391
	}
J
Jeff Garzik 已提交
1392 1393 1394

	if (likely(last_sg))
		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1395 1396
}

1397
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1398
{
M
Mark Lord 已提交
1399
	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1400
		(last ? CRQB_CMD_LAST : 0);
M
Mark Lord 已提交
1401
	*cmdw = cpu_to_le16(tmp);
1402 1403
}

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
/**
 *      mv_qc_prep - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1416 1417 1418 1419
static void mv_qc_prep(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct mv_port_priv *pp = ap->private_data;
M
Mark Lord 已提交
1420
	__le16 *cw;
1421 1422
	struct ata_taskfile *tf;
	u16 flags = 0;
1423
	unsigned in_index;
1424

M
Mark Lord 已提交
1425 1426
	if ((qc->tf.protocol != ATA_PROT_DMA) &&
	    (qc->tf.protocol != ATA_PROT_NCQ))
1427
		return;
1428

1429 1430
	/* Fill in command request block
	 */
1431
	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1432
		flags |= CRQB_FLAG_READ;
1433
	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1434
	flags |= qc->tag << CRQB_TAG_SHIFT;
1435
	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1436

1437
	/* get current queue index from software */
1438
	in_index = pp->req_idx;
1439 1440

	pp->crqb[in_index].sg_addr =
1441
		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1442
	pp->crqb[in_index].sg_addr_hi =
1443
		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1444
	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1445

1446
	cw = &pp->crqb[in_index].ata_cmd[0];
1447 1448 1449 1450 1451 1452 1453
	tf = &qc->tf;

	/* Sadly, the CRQB cannot accomodate all registers--there are
	 * only 11 bytes...so we must pick and choose required
	 * registers based on the command.  So, we drop feature and
	 * hob_feature for [RW] DMA commands, but they are needed for
	 * NCQ.  NCQ will drop hob_nsect.
1454
	 */
1455 1456 1457 1458 1459
	switch (tf->command) {
	case ATA_CMD_READ:
	case ATA_CMD_READ_EXT:
	case ATA_CMD_WRITE:
	case ATA_CMD_WRITE_EXT:
1460
	case ATA_CMD_WRITE_FUA_EXT:
1461 1462 1463 1464
		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
		break;
	case ATA_CMD_FPDMA_READ:
	case ATA_CMD_FPDMA_WRITE:
1465
		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
		break;
	default:
		/* The only other commands EDMA supports in non-queued and
		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
		 * of which are defined/used by Linux.  If we get here, this
		 * driver needs work.
		 *
		 * FIXME: modify libata to give qc_prep a return value and
		 * return error here.
		 */
		BUG_ON(tf->command);
		break;
	}
	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */

1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
		return;
	mv_fill_sg(qc);
}

/**
 *      mv_qc_prep_iie - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct mv_port_priv *pp = ap->private_data;
	struct mv_crqb_iie *crqb;
	struct ata_taskfile *tf;
1513
	unsigned in_index;
1514 1515
	u32 flags = 0;

M
Mark Lord 已提交
1516 1517
	if ((qc->tf.protocol != ATA_PROT_DMA) &&
	    (qc->tf.protocol != ATA_PROT_NCQ))
1518 1519
		return;

M
Mark Lord 已提交
1520
	/* Fill in Gen IIE command request block */
1521 1522 1523
	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
		flags |= CRQB_FLAG_READ;

1524
	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1525
	flags |= qc->tag << CRQB_TAG_SHIFT;
1526
	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1527
	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1528

1529
	/* get current queue index from software */
1530
	in_index = pp->req_idx;
1531 1532

	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1533 1534
	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
	crqb->flags = cpu_to_le32(flags);

	tf = &qc->tf;
	crqb->ata_cmd[0] = cpu_to_le32(
			(tf->command << 16) |
			(tf->feature << 24)
		);
	crqb->ata_cmd[1] = cpu_to_le32(
			(tf->lbal << 0) |
			(tf->lbam << 8) |
			(tf->lbah << 16) |
			(tf->device << 24)
		);
	crqb->ata_cmd[2] = cpu_to_le32(
			(tf->hob_lbal << 0) |
			(tf->hob_lbam << 8) |
			(tf->hob_lbah << 16) |
			(tf->hob_feature << 24)
		);
	crqb->ata_cmd[3] = cpu_to_le32(
			(tf->nsect << 0) |
			(tf->hob_nsect << 8)
		);

	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1560 1561 1562 1563
		return;
	mv_fill_sg(qc);
}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
/**
 *      mv_qc_issue - Initiate a command to the host
 *      @qc: queued command to start
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it sanity checks our local
 *      caches of the request producer/consumer indices then enables
 *      DMA and bumps the request producer index.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1576
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1577
{
1578 1579 1580
	struct ata_port *ap = qc->ap;
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp = ap->private_data;
1581
	u32 in_index;
1582

M
Mark Lord 已提交
1583 1584
	if ((qc->tf.protocol != ATA_PROT_DMA) &&
	    (qc->tf.protocol != ATA_PROT_NCQ)) {
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		static int limit_warnings = 10;
		/*
		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
		 *
		 * Someday, we might implement special polling workarounds
		 * for these, but it all seems rather unnecessary since we
		 * normally use only DMA for commands which transfer more
		 * than a single block of data.
		 *
		 * Much of the time, this could just work regardless.
		 * So for now, just log the incident, and allow the attempt.
		 */
1597
		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
1598 1599 1600 1601 1602
			--limit_warnings;
			ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
					": attempting PIO w/multiple DRQ: "
					"this may fail due to h/w errata\n");
		}
M
Mark Lord 已提交
1603 1604
		/*
		 * We're about to send a non-EDMA capable command to the
1605 1606 1607
		 * port.  Turn off EDMA so there won't be problems accessing
		 * shadow block, etc registers.
		 */
M
Mark Lord 已提交
1608
		mv_stop_edma(ap);
M
Mark Lord 已提交
1609
		mv_enable_port_irqs(ap, ERR_IRQ);
1610
		mv_pmp_select(ap, qc->dev->link->pmp);
T
Tejun Heo 已提交
1611
		return ata_sff_qc_issue(qc);
1612 1613
	}

1614
	mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1615

1616 1617
	pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
	in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1618 1619

	/* and write the request in pointer to kick the EDMA to life */
1620 1621
	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1622 1623 1624 1625

	return 0;
}

1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
{
	struct mv_port_priv *pp = ap->private_data;
	struct ata_queued_cmd *qc;

	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
		return NULL;
	qc = ata_qc_from_tag(ap, ap->link.active_tag);
	if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
		qc = NULL;
	return qc;
}

M
Mark Lord 已提交
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
static void mv_pmp_error_handler(struct ata_port *ap)
{
	unsigned int pmp, pmp_map;
	struct mv_port_priv *pp = ap->private_data;

	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
		/*
		 * Perform NCQ error analysis on failed PMPs
		 * before we freeze the port entirely.
		 *
		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
		 */
		pmp_map = pp->delayed_eh_pmp_map;
		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
		for (pmp = 0; pmp_map != 0; pmp++) {
			unsigned int this_pmp = (1 << pmp);
			if (pmp_map & this_pmp) {
				struct ata_link *link = &ap->pmp_link[pmp];
				pmp_map &= ~this_pmp;
				ata_eh_analyze_ncq_error(link);
			}
		}
		ata_port_freeze(ap);
	}
	sata_pmp_error_handler(ap);
}

1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);

	return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
}

static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
	struct ata_eh_info *ehi;
	unsigned int pmp;

	/*
	 * Initialize EH info for PMPs which saw device errors
	 */
	ehi = &ap->link.eh_info;
	for (pmp = 0; pmp_map != 0; pmp++) {
		unsigned int this_pmp = (1 << pmp);
		if (pmp_map & this_pmp) {
			struct ata_link *link = &ap->pmp_link[pmp];

			pmp_map &= ~this_pmp;
			ehi = &link->eh_info;
			ata_ehi_clear_desc(ehi);
			ata_ehi_push_desc(ehi, "dev err");
			ehi->err_mask |= AC_ERR_DEV;
			ehi->action |= ATA_EH_RESET;
			ata_link_abort(link);
		}
	}
}

1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
static int mv_req_q_empty(struct ata_port *ap)
{
	void __iomem *port_mmio = mv_ap_base(ap);
	u32 in_ptr, out_ptr;

	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
}

1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
{
	struct mv_port_priv *pp = ap->private_data;
	int failed_links;
	unsigned int old_map, new_map;

	/*
	 * Device error during FBS+NCQ operation:
	 *
	 * Set a port flag to prevent further I/O being enqueued.
	 * Leave the EDMA running to drain outstanding commands from this port.
	 * Perform the post-mortem/EH only when all responses are complete.
	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
	 */
	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
		pp->delayed_eh_pmp_map = 0;
	}
	old_map = pp->delayed_eh_pmp_map;
	new_map = old_map | mv_get_err_pmp_map(ap);

	if (old_map != new_map) {
		pp->delayed_eh_pmp_map = new_map;
		mv_pmp_eh_prep(ap, new_map & ~old_map);
	}
1735
	failed_links = hweight16(new_map);
1736 1737 1738 1739 1740 1741 1742

	ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
			"failed_links=%d nr_active_links=%d\n",
			__func__, pp->delayed_eh_pmp_map,
			ap->qc_active, failed_links,
			ap->nr_active_links);

1743
	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
		mv_process_crpb_entries(ap, pp);
		mv_stop_edma(ap);
		mv_eh_freeze(ap);
		ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
		return 1;	/* handled */
	}
	ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
	return 1;	/* handled */
}

static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
{
	/*
	 * Possible future enhancement:
	 *
	 * FBS+non-NCQ operation is not yet implemented.
	 * See related notes in mv_edma_cfg().
	 *
	 * Device error during FBS+non-NCQ operation:
	 *
	 * We need to snapshot the shadow registers for each failed command.
	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
	 */
	return 0;	/* not handled */
}

static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
{
	struct mv_port_priv *pp = ap->private_data;

	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
		return 0;	/* EDMA was not active: not handled */
	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
		return 0;	/* FBS was not active: not handled */

	if (!(edma_err_cause & EDMA_ERR_DEV))
		return 0;	/* non DEV error: not handled */
	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
		return 0;	/* other problems: not handled */

	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
		/*
		 * EDMA should NOT have self-disabled for this case.
		 * If it did, then something is wrong elsewhere,
		 * and we cannot handle it here.
		 */
		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
			ata_port_printk(ap, KERN_WARNING,
				"%s: err_cause=0x%x pp_flags=0x%x\n",
				__func__, edma_err_cause, pp->pp_flags);
			return 0; /* not handled */
		}
		return mv_handle_fbs_ncq_dev_err(ap);
	} else {
		/*
		 * EDMA should have self-disabled for this case.
		 * If it did not, then something is wrong elsewhere,
		 * and we cannot handle it here.
		 */
		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
			ata_port_printk(ap, KERN_WARNING,
				"%s: err_cause=0x%x pp_flags=0x%x\n",
				__func__, edma_err_cause, pp->pp_flags);
			return 0; /* not handled */
		}
		return mv_handle_fbs_non_ncq_dev_err(ap);
	}
	return 0;	/* not handled */
}

M
Mark Lord 已提交
1815
static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1816 1817
{
	struct ata_eh_info *ehi = &ap->link.eh_info;
M
Mark Lord 已提交
1818
	char *when = "idle";
1819 1820

	ata_ehi_clear_desc(ehi);
M
Mark Lord 已提交
1821 1822 1823 1824
	if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
		when = "disabled";
	} else if (edma_was_enabled) {
		when = "EDMA enabled";
1825 1826 1827
	} else {
		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
M
Mark Lord 已提交
1828
			when = "polling";
1829
	}
M
Mark Lord 已提交
1830
	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
1831 1832 1833 1834 1835
	ehi->err_mask |= AC_ERR_OTHER;
	ehi->action   |= ATA_EH_RESET;
	ata_port_freeze(ap);
}

1836 1837 1838 1839
/**
 *      mv_err_intr - Handle error interrupts on the port
 *      @ap: ATA channel to manipulate
 *
1840 1841 1842
 *      Most cases require a full reset of the chip's state machine,
 *      which also performs a COMRESET.
 *      Also, if the port disabled DMA, update our cached copy to match.
1843 1844 1845 1846
 *
 *      LOCKING:
 *      Inherited from caller.
 */
1847
static void mv_err_intr(struct ata_port *ap)
1848 1849
{
	void __iomem *port_mmio = mv_ap_base(ap);
1850
	u32 edma_err_cause, eh_freeze_mask, serr = 0;
M
Mark Lord 已提交
1851
	u32 fis_cause = 0;
1852 1853 1854
	struct mv_port_priv *pp = ap->private_data;
	struct mv_host_priv *hpriv = ap->host->private_data;
	unsigned int action = 0, err_mask = 0;
T
Tejun Heo 已提交
1855
	struct ata_eh_info *ehi = &ap->link.eh_info;
1856 1857
	struct ata_queued_cmd *qc;
	int abort = 0;
1858

1859
	/*
1860
	 * Read and clear the SError and err_cause bits.
M
Mark Lord 已提交
1861 1862
	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1863
	 */
1864 1865 1866
	sata_scr_read(&ap->link, SCR_ERROR, &serr);
	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);

1867
	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
M
Mark Lord 已提交
1868 1869 1870 1871
	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
		fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
		writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
	}
1872
	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1873

1874 1875 1876 1877 1878 1879 1880 1881 1882
	if (edma_err_cause & EDMA_ERR_DEV) {
		/*
		 * Device errors during FIS-based switching operation
		 * require special handling.
		 */
		if (mv_handle_dev_err(ap, edma_err_cause))
			return;
	}

1883 1884 1885 1886
	qc = mv_get_active_qc(ap);
	ata_ehi_clear_desc(ehi);
	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
			  edma_err_cause, pp->pp_flags);
M
Mark Lord 已提交
1887

1888
	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
M
Mark Lord 已提交
1889
		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
1890 1891 1892 1893 1894 1895 1896 1897 1898
		if (fis_cause & SATA_FIS_IRQ_AN) {
			u32 ec = edma_err_cause &
			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
			sata_async_notification(ap);
			if (!ec)
				return; /* Just an AN; no need for the nukes */
			ata_ehi_push_desc(ehi, "SDB notify");
		}
	}
1899
	/*
M
Mark Lord 已提交
1900
	 * All generations share these EDMA error cause bits:
1901
	 */
1902
	if (edma_err_cause & EDMA_ERR_DEV) {
1903
		err_mask |= AC_ERR_DEV;
1904 1905 1906
		action |= ATA_EH_RESET;
		ata_ehi_push_desc(ehi, "dev error");
	}
1907
	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1908
			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1909 1910
			EDMA_ERR_INTRL_PAR)) {
		err_mask |= AC_ERR_ATA_BUS;
T
Tejun Heo 已提交
1911
		action |= ATA_EH_RESET;
T
Tejun Heo 已提交
1912
		ata_ehi_push_desc(ehi, "parity error");
1913 1914 1915 1916
	}
	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
		ata_ehi_hotplugged(ehi);
		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
T
Tejun Heo 已提交
1917
			"dev disconnect" : "dev connect");
T
Tejun Heo 已提交
1918
		action |= ATA_EH_RESET;
1919 1920
	}

M
Mark Lord 已提交
1921 1922 1923 1924
	/*
	 * Gen-I has a different SELF_DIS bit,
	 * different FREEZE bits, and no SERR bit:
	 */
1925
	if (IS_GEN_I(hpriv)) {
1926 1927 1928
		eh_freeze_mask = EDMA_EH_FREEZE_5;
		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
T
Tejun Heo 已提交
1929
			ata_ehi_push_desc(ehi, "EDMA self-disable");
1930 1931 1932 1933 1934
		}
	} else {
		eh_freeze_mask = EDMA_EH_FREEZE;
		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
T
Tejun Heo 已提交
1935
			ata_ehi_push_desc(ehi, "EDMA self-disable");
1936 1937
		}
		if (edma_err_cause & EDMA_ERR_SERR) {
1938 1939
			ata_ehi_push_desc(ehi, "SError=%08x", serr);
			err_mask |= AC_ERR_ATA_BUS;
T
Tejun Heo 已提交
1940
			action |= ATA_EH_RESET;
1941
		}
1942
	}
1943

1944 1945
	if (!err_mask) {
		err_mask = AC_ERR_OTHER;
T
Tejun Heo 已提交
1946
		action |= ATA_EH_RESET;
1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
	}

	ehi->serror |= serr;
	ehi->action |= action;

	if (qc)
		qc->err_mask |= err_mask;
	else
		ehi->err_mask |= err_mask;

1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	if (err_mask == AC_ERR_DEV) {
		/*
		 * Cannot do ata_port_freeze() here,
		 * because it would kill PIO access,
		 * which is needed for further diagnosis.
		 */
		mv_eh_freeze(ap);
		abort = 1;
	} else if (edma_err_cause & eh_freeze_mask) {
		/*
		 * Note to self: ata_port_freeze() calls ata_port_abort()
		 */
1969
		ata_port_freeze(ap);
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979
	} else {
		abort = 1;
	}

	if (abort) {
		if (qc)
			ata_link_abort(qc->dev->link);
		else
			ata_port_abort(ap);
	}
1980 1981
}

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
static void mv_process_crpb_response(struct ata_port *ap,
		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);

	if (qc) {
		u8 ata_status;
		u16 edma_status = le16_to_cpu(response->flags);
		/*
		 * edma_status from a response queue entry:
		 *   LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
		 *   MSB is saved ATA status from command completion.
		 */
		if (!ncq_enabled) {
			u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
			if (err_cause) {
				/*
				 * Error will be seen/handled by mv_err_intr().
				 * So do nothing at all here.
				 */
				return;
			}
		}
		ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2006 2007 2008
		if (!ac_err_mask(ata_status))
			ata_qc_complete(qc);
		/* else: leave it for mv_err_intr() */
2009 2010 2011 2012 2013 2014 2015
	} else {
		ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
				__func__, tag);
	}
}

static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2016 2017 2018
{
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_host_priv *hpriv = ap->host->private_data;
2019
	u32 in_index;
2020
	bool work_done = false;
2021
	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2022

2023
	/* Get the hardware queue position index */
2024 2025 2026
	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;

2027 2028
	/* Process new responses from since the last time we looked */
	while (in_index != pp->resp_idx) {
2029
		unsigned int tag;
2030
		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2031

2032
		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2033

2034 2035
		if (IS_GEN_I(hpriv)) {
			/* 50xx: no NCQ, only one command active at a time */
T
Tejun Heo 已提交
2036
			tag = ap->link.active_tag;
2037 2038 2039
		} else {
			/* Gen II/IIE: get command tag from CRPB entry */
			tag = le16_to_cpu(response->id) & 0x1f;
2040
		}
2041
		mv_process_crpb_response(ap, response, tag, ncq_enabled);
2042 2043 2044
		work_done = true;
	}

M
Mark Lord 已提交
2045
	/* Update the software queue position index in hardware */
2046 2047
	if (work_done)
		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2048
			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2049
			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
2050 2051
}

M
Mark Lord 已提交
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
static void mv_port_intr(struct ata_port *ap, u32 port_cause)
{
	struct mv_port_priv *pp;
	int edma_was_enabled;

	if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
		mv_unexpected_intr(ap, 0);
		return;
	}
	/*
	 * Grab a snapshot of the EDMA_EN flag setting,
	 * so that we have a consistent view for this port,
	 * even if something we call of our routines changes it.
	 */
	pp = ap->private_data;
	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
	/*
	 * Process completed CRPB response(s) before other events.
	 */
	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
		mv_process_crpb_entries(ap, pp);
2073 2074
		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
			mv_handle_fbs_ncq_dev_err(ap);
M
Mark Lord 已提交
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089
	}
	/*
	 * Handle chip-reported errors, or continue on to handle PIO.
	 */
	if (unlikely(port_cause & ERR_IRQ)) {
		mv_err_intr(ap);
	} else if (!edma_was_enabled) {
		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
		if (qc)
			ata_sff_host_intr(ap, qc);
		else
			mv_unexpected_intr(ap, edma_was_enabled);
	}
}

2090 2091
/**
 *      mv_host_intr - Handle all interrupts on the given host controller
J
Jeff Garzik 已提交
2092
 *      @host: host specific structure
2093
 *      @main_irq_cause: Main interrupt cause register for the chip.
2094 2095 2096 2097
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2098
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2099
{
S
Saeed Bishara 已提交
2100
	struct mv_host_priv *hpriv = host->private_data;
2101
	void __iomem *mmio = hpriv->base, *hc_mmio;
2102
	unsigned int handled = 0, port;
2103

2104
	for (port = 0; port < hpriv->n_ports; port++) {
J
Jeff Garzik 已提交
2105
		struct ata_port *ap = host->ports[port];
2106 2107
		unsigned int p, shift, hardport, port_cause;

2108 2109
		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
		/*
2110 2111
		 * Each hc within the host has its own hc_irq_cause register,
		 * where the interrupting ports bits get ack'd.
2112
		 */
2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
		if (hardport == 0) {	/* first port on this hc ? */
			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
			u32 port_mask, ack_irqs;
			/*
			 * Skip this entire hc if nothing pending for any ports
			 */
			if (!hc_cause) {
				port += MV_PORTS_PER_HC - 1;
				continue;
			}
			/*
			 * We don't need/want to read the hc_irq_cause register,
			 * because doing so hurts performance, and
			 * main_irq_cause already gives us everything we need.
			 *
			 * But we do have to *write* to the hc_irq_cause to ack
			 * the ports that we are handling this time through.
			 *
			 * This requires that we create a bitmap for those
			 * ports which interrupted us, and use that bitmap
			 * to ack (only) those ports via hc_irq_cause.
			 */
			ack_irqs = 0;
			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
				if ((port + p) >= hpriv->n_ports)
					break;
				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
				if (hc_cause & port_mask)
					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
			}
2143
			hc_mmio = mv_hc_base_from_port(mmio, port);
2144
			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
2145 2146
			handled = 1;
		}
2147
		/*
M
Mark Lord 已提交
2148
		 * Handle interrupts signalled for this port:
2149
		 */
M
Mark Lord 已提交
2150 2151 2152
		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
		if (port_cause)
			mv_port_intr(ap, port_cause);
2153
	}
2154
	return handled;
2155 2156
}

2157
static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2158
{
2159
	struct mv_host_priv *hpriv = host->private_data;
2160 2161 2162 2163 2164 2165
	struct ata_port *ap;
	struct ata_queued_cmd *qc;
	struct ata_eh_info *ehi;
	unsigned int i, err_mask, printed = 0;
	u32 err_cause;

2166
	err_cause = readl(mmio + hpriv->irq_cause_ofs);
2167 2168 2169 2170 2171 2172 2173

	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
		   err_cause);

	DPRINTK("All regs @ PCI error\n");
	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));

2174
	writelfl(0, mmio + hpriv->irq_cause_ofs);
2175 2176 2177

	for (i = 0; i < host->n_ports; i++) {
		ap = host->ports[i];
2178
		if (!ata_link_offline(&ap->link)) {
T
Tejun Heo 已提交
2179
			ehi = &ap->link.eh_info;
2180 2181 2182 2183 2184
			ata_ehi_clear_desc(ehi);
			if (!printed++)
				ata_ehi_push_desc(ehi,
					"PCI err cause 0x%08x", err_cause);
			err_mask = AC_ERR_HOST_BUS;
T
Tejun Heo 已提交
2185
			ehi->action = ATA_EH_RESET;
T
Tejun Heo 已提交
2186
			qc = ata_qc_from_tag(ap, ap->link.active_tag);
2187 2188 2189 2190 2191 2192 2193 2194
			if (qc)
				qc->err_mask |= err_mask;
			else
				ehi->err_mask |= err_mask;

			ata_port_freeze(ap);
		}
	}
2195
	return 1;	/* handled */
2196 2197
}

2198
/**
2199
 *      mv_interrupt - Main interrupt event handler
2200 2201 2202 2203 2204 2205 2206 2207
 *      @irq: unused
 *      @dev_instance: private data; in this case the host structure
 *
 *      Read the read only register to determine if any host
 *      controllers have pending interrupts.  If so, call lower level
 *      routine to handle.  Also check for PCI errors which are only
 *      reported here.
 *
2208
 *      LOCKING:
J
Jeff Garzik 已提交
2209
 *      This routine holds the host lock while processing pending
2210 2211
 *      interrupts.
 */
2212
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2213
{
J
Jeff Garzik 已提交
2214
	struct ata_host *host = dev_instance;
S
Saeed Bishara 已提交
2215
	struct mv_host_priv *hpriv = host->private_data;
2216
	unsigned int handled = 0;
2217
	u32 main_irq_cause, pending_irqs;
2218

M
Mark Lord 已提交
2219
	spin_lock(&host->lock);
2220
	main_irq_cause = readl(hpriv->main_irq_cause_addr);
2221
	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
M
Mark Lord 已提交
2222 2223 2224
	/*
	 * Deal with cases where we either have nothing pending, or have read
	 * a bogus register value which can indicate HW removal or PCI fault.
2225
	 */
M
Mark Lord 已提交
2226
	if (pending_irqs && main_irq_cause != 0xffffffffU) {
M
Mark Lord 已提交
2227
		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2228 2229
			handled = mv_pci_error(host, hpriv->base);
		else
M
Mark Lord 已提交
2230
			handled = mv_host_intr(host, pending_irqs);
2231
	}
J
Jeff Garzik 已提交
2232
	spin_unlock(&host->lock);
2233 2234 2235
	return IRQ_RETVAL(handled);
}

2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_ERROR:
	case SCR_CONTROL:
		ofs = sc_reg_in * sizeof(u32);
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

T
Tejun Heo 已提交
2253
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2254
{
T
Tejun Heo 已提交
2255
	struct mv_host_priv *hpriv = link->ap->host->private_data;
S
Saeed Bishara 已提交
2256
	void __iomem *mmio = hpriv->base;
T
Tejun Heo 已提交
2257
	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2258 2259
	unsigned int ofs = mv5_scr_offset(sc_reg_in);

2260 2261 2262 2263 2264
	if (ofs != 0xffffffffU) {
		*val = readl(addr + ofs);
		return 0;
	} else
		return -EINVAL;
2265 2266
}

T
Tejun Heo 已提交
2267
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2268
{
T
Tejun Heo 已提交
2269
	struct mv_host_priv *hpriv = link->ap->host->private_data;
S
Saeed Bishara 已提交
2270
	void __iomem *mmio = hpriv->base;
T
Tejun Heo 已提交
2271
	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2272 2273
	unsigned int ofs = mv5_scr_offset(sc_reg_in);

2274
	if (ofs != 0xffffffffU) {
T
Tejun Heo 已提交
2275
		writelfl(val, addr + ofs);
2276 2277 2278
		return 0;
	} else
		return -EINVAL;
2279 2280
}

S
Saeed Bishara 已提交
2281
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
2282
{
S
Saeed Bishara 已提交
2283
	struct pci_dev *pdev = to_pci_dev(host->dev);
2284 2285
	int early_5080;

2286
	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
2287 2288 2289 2290 2291 2292 2293

	if (!early_5080) {
		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
		tmp |= (1 << 0);
		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
	}

S
Saeed Bishara 已提交
2294
	mv_reset_pci_bus(host, mmio);
2295 2296 2297 2298
}

static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
M
Mark Lord 已提交
2299
	writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
2300 2301
}

2302
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
J
Jeff Garzik 已提交
2303 2304
			   void __iomem *mmio)
{
2305 2306 2307 2308 2309 2310 2311
	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
	u32 tmp;

	tmp = readl(phy_mmio + MV5_PHY_MODE);

	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
J
Jeff Garzik 已提交
2312 2313
}

2314
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
J
Jeff Garzik 已提交
2315
{
2316 2317
	u32 tmp;

M
Mark Lord 已提交
2318
	writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
2319 2320 2321 2322 2323 2324

	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */

	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
	tmp |= ~(1 << 0);
	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
J
Jeff Garzik 已提交
2325 2326
}

2327 2328
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port)
2329
{
2330 2331 2332 2333 2334 2335
	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
	u32 tmp;
	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);

	if (fix_apm_sq) {
M
Mark Lord 已提交
2336
		tmp = readl(phy_mmio + MV5_LTMODE_OFS);
2337
		tmp |= (1 << 19);
M
Mark Lord 已提交
2338
		writel(tmp, phy_mmio + MV5_LTMODE_OFS);
2339

M
Mark Lord 已提交
2340
		tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
2341 2342
		tmp &= ~0x3;
		tmp |= 0x1;
M
Mark Lord 已提交
2343
		writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
2344 2345 2346 2347 2348 2349 2350
	}

	tmp = readl(phy_mmio + MV5_PHY_MODE);
	tmp &= ~mask;
	tmp |= hpriv->signal[port].pre;
	tmp |= hpriv->signal[port].amps;
	writel(tmp, phy_mmio + MV5_PHY_MODE);
2351 2352
}

2353 2354 2355 2356 2357 2358 2359 2360

#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port)
{
	void __iomem *port_mmio = mv_port_base(mmio, port);

M
Mark Lord 已提交
2361
	mv_reset_channel(hpriv, mmio, port);
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374

	ZERO(0x028);	/* command */
	writel(0x11f, port_mmio + EDMA_CFG_OFS);
	ZERO(0x004);	/* timer */
	ZERO(0x008);	/* irq err cause */
	ZERO(0x00c);	/* irq err mask */
	ZERO(0x010);	/* rq bah */
	ZERO(0x014);	/* rq inp */
	ZERO(0x018);	/* rq outp */
	ZERO(0x01c);	/* respq bah */
	ZERO(0x024);	/* respq outp */
	ZERO(0x020);	/* respq inp */
	ZERO(0x02c);	/* test control */
M
Mark Lord 已提交
2375
	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2376 2377 2378 2379 2380 2381
}
#undef ZERO

#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int hc)
2382
{
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
	u32 tmp;

	ZERO(0x00c);
	ZERO(0x010);
	ZERO(0x014);
	ZERO(0x018);

	tmp = readl(hc_mmio + 0x20);
	tmp &= 0x1c1c1c1c;
	tmp |= 0x03030303;
	writel(tmp, hc_mmio + 0x20);
}
#undef ZERO

static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc)
{
	unsigned int hc, port;

	for (hc = 0; hc < n_hc; hc++) {
		for (port = 0; port < MV_PORTS_PER_HC; port++)
			mv5_reset_hc_port(hpriv, mmio,
					  (hc * MV_PORTS_PER_HC) + port);

		mv5_reset_one_hc(hpriv, mmio, hc);
	}

	return 0;
2412 2413
}

J
Jeff Garzik 已提交
2414 2415
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
S
Saeed Bishara 已提交
2416
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
J
Jeff Garzik 已提交
2417
{
2418
	struct mv_host_priv *hpriv = host->private_data;
J
Jeff Garzik 已提交
2419 2420
	u32 tmp;

M
Mark Lord 已提交
2421
	tmp = readl(mmio + MV_PCI_MODE_OFS);
J
Jeff Garzik 已提交
2422
	tmp &= 0xff00ffff;
M
Mark Lord 已提交
2423
	writel(tmp, mmio + MV_PCI_MODE_OFS);
J
Jeff Garzik 已提交
2424 2425 2426

	ZERO(MV_PCI_DISC_TIMER);
	ZERO(MV_PCI_MSI_TRIGGER);
M
Mark Lord 已提交
2427
	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
J
Jeff Garzik 已提交
2428
	ZERO(MV_PCI_SERR_MASK);
2429 2430
	ZERO(hpriv->irq_cause_ofs);
	ZERO(hpriv->irq_mask_ofs);
J
Jeff Garzik 已提交
2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
	ZERO(MV_PCI_ERR_LOW_ADDRESS);
	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
	ZERO(MV_PCI_ERR_ATTRIBUTE);
	ZERO(MV_PCI_ERR_COMMAND);
}
#undef ZERO

static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
	u32 tmp;

	mv5_reset_flash(hpriv, mmio);

M
Mark Lord 已提交
2444
	tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
J
Jeff Garzik 已提交
2445 2446
	tmp &= 0x3;
	tmp |= (1 << 5) | (1 << 6);
M
Mark Lord 已提交
2447
	writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
J
Jeff Garzik 已提交
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
}

/**
 *      mv6_reset_hc - Perform the 6xxx global soft reset
 *      @mmio: base address of the HBA
 *
 *      This routine only applies to 6xxx parts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2459 2460
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc)
J
Jeff Garzik 已提交
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
{
	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
	int i, rc = 0;
	u32 t;

	/* Following procedure defined in PCI "main command and status
	 * register" table.
	 */
	t = readl(reg);
	writel(t | STOP_PCI_MASTER, reg);

	for (i = 0; i < 1000; i++) {
		udelay(1);
		t = readl(reg);
2475
		if (PCI_MASTER_EMPTY & t)
J
Jeff Garzik 已提交
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
			break;
	}
	if (!(PCI_MASTER_EMPTY & t)) {
		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
		rc = 1;
		goto done;
	}

	/* set reset */
	i = 5;
	do {
		writel(t | GLOB_SFT_RST, reg);
		t = readl(reg);
		udelay(1);
	} while (!(GLOB_SFT_RST & t) && (i-- > 0));

	if (!(GLOB_SFT_RST & t)) {
		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
		rc = 1;
		goto done;
	}

	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
	i = 5;
	do {
		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
		t = readl(reg);
		udelay(1);
	} while ((GLOB_SFT_RST & t) && (i-- > 0));

	if (GLOB_SFT_RST & t) {
		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
		rc = 1;
	}
done:
	return rc;
}

2514
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
J
Jeff Garzik 已提交
2515 2516 2517 2518 2519
			   void __iomem *mmio)
{
	void __iomem *port_mmio;
	u32 tmp;

M
Mark Lord 已提交
2520
	tmp = readl(mmio + MV_RESET_CFG_OFS);
J
Jeff Garzik 已提交
2521
	if ((tmp & (1 << 0)) == 0) {
2522
		hpriv->signal[idx].amps = 0x7 << 8;
J
Jeff Garzik 已提交
2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
		hpriv->signal[idx].pre = 0x1 << 5;
		return;
	}

	port_mmio = mv_port_base(mmio, idx);
	tmp = readl(port_mmio + PHY_MODE2);

	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
}

2534
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
J
Jeff Garzik 已提交
2535
{
M
Mark Lord 已提交
2536
	writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
J
Jeff Garzik 已提交
2537 2538
}

2539
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2540
			   unsigned int port)
2541
{
2542 2543
	void __iomem *port_mmio = mv_port_base(mmio, port);

2544
	u32 hp_flags = hpriv->hp_flags;
2545 2546
	int fix_phy_mode2 =
		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2547
	int fix_phy_mode4 =
2548
		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
M
Mark Lord 已提交
2549
	u32 m2, m3;
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565

	if (fix_phy_mode2) {
		m2 = readl(port_mmio + PHY_MODE2);
		m2 &= ~(1 << 16);
		m2 |= (1 << 31);
		writel(m2, port_mmio + PHY_MODE2);

		udelay(200);

		m2 = readl(port_mmio + PHY_MODE2);
		m2 &= ~((1 << 16) | (1 << 31));
		writel(m2, port_mmio + PHY_MODE2);

		udelay(200);
	}

M
Mark Lord 已提交
2566 2567 2568 2569 2570 2571
	/*
	 * Gen-II/IIe PHY_MODE3 errata RM#2:
	 * Achieves better receiver noise performance than the h/w default:
	 */
	m3 = readl(port_mmio + PHY_MODE3);
	m3 = (m3 & 0x1f) | (0x5555601 << 5);
2572

2573 2574 2575 2576
	/* Guideline 88F5182 (GL# SATA-S11) */
	if (IS_SOC(hpriv))
		m3 &= ~0x1c;

2577
	if (fix_phy_mode4) {
M
Mark Lord 已提交
2578 2579 2580 2581 2582 2583
		u32 m4 = readl(port_mmio + PHY_MODE4);
		/*
		 * Enforce reserved-bit restrictions on GenIIe devices only.
		 * For earlier chipsets, force only the internal config field
		 *  (workaround for errata FEr SATA#10 part 1).
		 */
M
Mark Lord 已提交
2584
		if (IS_GEN_IIE(hpriv))
M
Mark Lord 已提交
2585 2586 2587
			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
		else
			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
M
Mark Lord 已提交
2588
		writel(m4, port_mmio + PHY_MODE4);
2589
	}
2590 2591 2592 2593 2594 2595
	/*
	 * Workaround for 60x1-B2 errata SATA#13:
	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
	 */
	writel(m3, port_mmio + PHY_MODE3);
2596 2597 2598 2599 2600

	/* Revert values of pre-emphasis and signal amps to the saved ones */
	m2 = readl(port_mmio + PHY_MODE2);

	m2 &= ~MV_M2_PREAMP_MASK;
2601 2602
	m2 |= hpriv->signal[port].amps;
	m2 |= hpriv->signal[port].pre;
2603
	m2 &= ~(1 << 16);
2604

2605 2606 2607 2608 2609 2610
	/* according to mvSata 3.6.1, some IIE values are fixed */
	if (IS_GEN_IIE(hpriv)) {
		m2 &= ~0xC30FF01F;
		m2 |= 0x0000900F;
	}

2611 2612 2613
	writel(m2, port_mmio + PHY_MODE2);
}

S
Saeed Bishara 已提交
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
/* TODO: use the generic LED interface to configure the SATA Presence */
/* & Acitivy LEDs on the board */
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
				      void __iomem *mmio)
{
	return;
}

static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio)
{
	void __iomem *port_mmio;
	u32 tmp;

	port_mmio = mv_port_base(mmio, idx);
	tmp = readl(port_mmio + PHY_MODE2);

	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
}

#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
					void __iomem *mmio, unsigned int port)
{
	void __iomem *port_mmio = mv_port_base(mmio, port);

M
Mark Lord 已提交
2642
	mv_reset_channel(hpriv, mmio, port);
S
Saeed Bishara 已提交
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655

	ZERO(0x028);		/* command */
	writel(0x101f, port_mmio + EDMA_CFG_OFS);
	ZERO(0x004);		/* timer */
	ZERO(0x008);		/* irq err cause */
	ZERO(0x00c);		/* irq err mask */
	ZERO(0x010);		/* rq bah */
	ZERO(0x014);		/* rq inp */
	ZERO(0x018);		/* rq outp */
	ZERO(0x01c);		/* respq bah */
	ZERO(0x024);		/* respq outp */
	ZERO(0x020);		/* respq inp */
	ZERO(0x02c);		/* test control */
M
Mark Lord 已提交
2656
	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
S
Saeed Bishara 已提交
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
}

#undef ZERO

#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
				       void __iomem *mmio)
{
	void __iomem *hc_mmio = mv_hc_base(mmio, 0);

	ZERO(0x00c);
	ZERO(0x010);
	ZERO(0x014);

}

#undef ZERO

static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
				  void __iomem *mmio, unsigned int n_hc)
{
	unsigned int port;

	for (port = 0; port < hpriv->n_ports; port++)
		mv_soc_reset_hc_port(hpriv, mmio, port);

	mv_soc_reset_one_hc(hpriv, mmio);

	return 0;
}

static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
				      void __iomem *mmio)
{
	return;
}

static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
{
	return;
}

M
Mark Lord 已提交
2699
static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
M
Mark Lord 已提交
2700
{
M
Mark Lord 已提交
2701
	u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
M
Mark Lord 已提交
2702

M
Mark Lord 已提交
2703
	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
M
Mark Lord 已提交
2704
	if (want_gen2i)
M
Mark Lord 已提交
2705 2706
		ifcfg |= (1 << 7);		/* enable gen2i speed */
	writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
M
Mark Lord 已提交
2707 2708
}

M
Mark Lord 已提交
2709
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2710 2711 2712 2713
			     unsigned int port_no)
{
	void __iomem *port_mmio = mv_port_base(mmio, port_no);

M
Mark Lord 已提交
2714 2715 2716 2717 2718
	/*
	 * The datasheet warns against setting EDMA_RESET when EDMA is active
	 * (but doesn't say what the problem might be).  So we first try
	 * to disable the EDMA engine before doing the EDMA_RESET operation.
	 */
M
Mark Lord 已提交
2719
	mv_stop_edma_engine(port_mmio);
M
Mark Lord 已提交
2720
	writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2721

M
Mark Lord 已提交
2722
	if (!IS_GEN_I(hpriv)) {
M
Mark Lord 已提交
2723 2724
		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
		mv_setup_ifcfg(port_mmio, 1);
2725
	}
M
Mark Lord 已提交
2726
	/*
M
Mark Lord 已提交
2727
	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
M
Mark Lord 已提交
2728 2729
	 * link, and physical layers.  It resets all SATA interface registers
	 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2730
	 */
M
Mark Lord 已提交
2731
	writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
M
Mark Lord 已提交
2732
	udelay(25);	/* allow reset propagation */
2733 2734 2735 2736
	writelfl(0, port_mmio + EDMA_CMD_OFS);

	hpriv->ops->phy_errata(hpriv, mmio, port_no);

2737
	if (IS_GEN_I(hpriv))
2738 2739 2740
		mdelay(1);
}

2741
static void mv_pmp_select(struct ata_port *ap, int pmp)
2742
{
2743 2744 2745 2746
	if (sata_pmp_supported(ap)) {
		void __iomem *port_mmio = mv_ap_base(ap);
		u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
		int old = reg & 0xf;
2747

2748 2749 2750 2751
		if (old != pmp) {
			reg = (reg & ~0xf) | pmp;
			writelfl(reg, port_mmio + SATA_IFCTL_OFS);
		}
2752
	}
2753 2754
}

2755 2756
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline)
2757
{
2758 2759 2760
	mv_pmp_select(link->ap, sata_srst_pmp(link));
	return sata_std_hardreset(link, class, deadline);
}
2761

2762 2763 2764 2765 2766
static int mv_softreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline)
{
	mv_pmp_select(link->ap, sata_srst_pmp(link));
	return ata_sff_softreset(link, class, deadline);
2767 2768
}

T
Tejun Heo 已提交
2769
static int mv_hardreset(struct ata_link *link, unsigned int *class,
2770
			unsigned long deadline)
2771
{
T
Tejun Heo 已提交
2772
	struct ata_port *ap = link->ap;
2773
	struct mv_host_priv *hpriv = ap->host->private_data;
M
Mark Lord 已提交
2774
	struct mv_port_priv *pp = ap->private_data;
S
Saeed Bishara 已提交
2775
	void __iomem *mmio = hpriv->base;
M
Mark Lord 已提交
2776 2777 2778
	int rc, attempts = 0, extra = 0;
	u32 sstatus;
	bool online;
2779

M
Mark Lord 已提交
2780
	mv_reset_channel(hpriv, mmio, ap->port_no);
M
Mark Lord 已提交
2781
	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2782

M
Mark Lord 已提交
2783 2784
	/* Workaround for errata FEr SATA#10 (part 2) */
	do {
M
Mark Lord 已提交
2785 2786
		const unsigned long *timing =
				sata_ehc_deb_timing(&link->eh_context);
2787

M
Mark Lord 已提交
2788 2789
		rc = sata_link_hardreset(link, timing, deadline + extra,
					 &online, NULL);
M
Mark Lord 已提交
2790
		rc = online ? -EAGAIN : rc;
M
Mark Lord 已提交
2791
		if (rc)
M
Mark Lord 已提交
2792 2793 2794 2795
			return rc;
		sata_scr_read(link, SCR_STATUS, &sstatus);
		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
			/* Force 1.5gb/s link speed and try again */
M
Mark Lord 已提交
2796
			mv_setup_ifcfg(mv_ap_base(ap), 0);
M
Mark Lord 已提交
2797 2798 2799 2800
			if (time_after(jiffies + HZ, deadline))
				extra = HZ; /* only extend it once, max */
		}
	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2801

M
Mark Lord 已提交
2802
	return rc;
2803 2804 2805 2806
}

static void mv_eh_freeze(struct ata_port *ap)
{
2807
	mv_stop_edma(ap);
2808
	mv_enable_port_irqs(ap, 0);
2809 2810 2811 2812
}

static void mv_eh_thaw(struct ata_port *ap)
{
S
Saeed Bishara 已提交
2813
	struct mv_host_priv *hpriv = ap->host->private_data;
2814 2815
	unsigned int port = ap->port_no;
	unsigned int hardport = mv_hardport_from_port(port);
2816
	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2817
	void __iomem *port_mmio = mv_ap_base(ap);
2818
	u32 hc_irq_cause;
2819 2820 2821 2822 2823 2824

	/* clear EDMA errors on this port */
	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

	/* clear pending irq events */
	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2825 2826
	hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2827

M
Mark Lord 已提交
2828
	mv_enable_port_irqs(ap, ERR_IRQ);
2829 2830
}

2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
/**
 *      mv_port_init - Perform some early initialization on a single port.
 *      @port: libata data structure storing shadow register addresses
 *      @port_mmio: base address of the port
 *
 *      Initialize shadow register mmio addresses, clear outstanding
 *      interrupts on the port, and unmask interrupts for the future
 *      start of the port.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
2843
static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2844
{
T
Tejun Heo 已提交
2845
	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2846 2847
	unsigned serr_ofs;

2848
	/* PIO related setup
2849 2850
	 */
	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2851
	port->error_addr =
2852 2853 2854 2855 2856 2857
		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2858
	port->status_addr =
2859 2860 2861 2862 2863
		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
	/* special case: control/altstatus doesn't have ATA_REG_ address */
	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;

	/* unused: */
R
Randy Dunlap 已提交
2864
	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2865

2866 2867 2868 2869 2870
	/* Clear any currently outstanding port interrupt conditions */
	serr_ofs = mv_scr_offset(SCR_ERROR);
	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);

M
Mark Lord 已提交
2871 2872
	/* unmask all non-transient EDMA error interrupts */
	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2873

2874
	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2875 2876 2877
		readl(port_mmio + EDMA_CFG_OFS),
		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2878 2879
}

M
Mark Lord 已提交
2880 2881 2882 2883 2884 2885
static unsigned int mv_in_pcix_mode(struct ata_host *host)
{
	struct mv_host_priv *hpriv = host->private_data;
	void __iomem *mmio = hpriv->base;
	u32 reg;

M
Mark Lord 已提交
2886
	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
M
Mark Lord 已提交
2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
		return 0;	/* not PCI-X capable */
	reg = readl(mmio + MV_PCI_MODE_OFS);
	if ((reg & MV_PCI_MODE_MASK) == 0)
		return 0;	/* conventional PCI mode */
	return 1;	/* chip is in PCI-X mode */
}

static int mv_pci_cut_through_okay(struct ata_host *host)
{
	struct mv_host_priv *hpriv = host->private_data;
	void __iomem *mmio = hpriv->base;
	u32 reg;

	if (!mv_in_pcix_mode(host)) {
		reg = readl(mmio + PCI_COMMAND_OFS);
		if (reg & PCI_COMMAND_MRDTRIG)
			return 0; /* not okay */
	}
	return 1; /* okay */
}

2908
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2909
{
2910 2911
	struct pci_dev *pdev = to_pci_dev(host->dev);
	struct mv_host_priv *hpriv = host->private_data;
2912 2913
	u32 hp_flags = hpriv->hp_flags;

2914
	switch (board_idx) {
2915 2916
	case chip_5080:
		hpriv->ops = &mv5xxx_ops;
2917
		hp_flags |= MV_HP_GEN_I;
2918

2919
		switch (pdev->revision) {
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933
		case 0x1:
			hp_flags |= MV_HP_ERRATA_50XXB0;
			break;
		case 0x3:
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying 50XXB2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		}
		break;

2934 2935
	case chip_504x:
	case chip_508x:
2936
		hpriv->ops = &mv5xxx_ops;
2937
		hp_flags |= MV_HP_GEN_I;
2938

2939
		switch (pdev->revision) {
2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
		case 0x0:
			hp_flags |= MV_HP_ERRATA_50XXB0;
			break;
		case 0x3:
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying B2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_50XXB2;
			break;
2951 2952 2953 2954 2955
		}
		break;

	case chip_604x:
	case chip_608x:
2956
		hpriv->ops = &mv6xxx_ops;
2957
		hp_flags |= MV_HP_GEN_II;
2958

2959
		switch (pdev->revision) {
2960 2961 2962 2963 2964
		case 0x7:
			hp_flags |= MV_HP_ERRATA_60X1B2;
			break;
		case 0x9:
			hp_flags |= MV_HP_ERRATA_60X1C0;
2965 2966 2967
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
2968 2969
				   "Applying B2 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_60X1B2;
2970 2971 2972 2973
			break;
		}
		break;

2974
	case chip_7042:
M
Mark Lord 已提交
2975
		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
2976 2977 2978
		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
		    (pdev->device == 0x2300 || pdev->device == 0x2310))
		{
2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
			/*
			 * Highpoint RocketRAID PCIe 23xx series cards:
			 *
			 * Unconfigured drives are treated as "Legacy"
			 * by the BIOS, and it overwrites sector 8 with
			 * a "Lgcy" metadata block prior to Linux boot.
			 *
			 * Configured drives (RAID or JBOD) leave sector 8
			 * alone, but instead overwrite a high numbered
			 * sector for the RAID metadata.  This sector can
			 * be determined exactly, by truncating the physical
			 * drive capacity to a nice even GB value.
			 *
			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
			 *
			 * Warn the user, lest they think we're just buggy.
			 */
			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
				" BIOS CORRUPTS DATA on all attached drives,"
				" regardless of if/how they are configured."
				" BEWARE!\n");
			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
				" use sectors 8-9 on \"Legacy\" drives,"
				" and avoid the final two gigabytes on"
				" all RocketRAID BIOS initialized drives.\n");
3004
		}
M
Mark Lord 已提交
3005
		/* drop through */
3006 3007 3008
	case chip_6042:
		hpriv->ops = &mv6xxx_ops;
		hp_flags |= MV_HP_GEN_IIE;
M
Mark Lord 已提交
3009 3010
		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
			hp_flags |= MV_HP_CUT_THROUGH;
3011

3012
		switch (pdev->revision) {
3013
		case 0x2: /* Rev.B0: the first/only public release */
3014 3015 3016 3017 3018 3019 3020 3021 3022
			hp_flags |= MV_HP_ERRATA_60X1C0;
			break;
		default:
			dev_printk(KERN_WARNING, &pdev->dev,
			   "Applying 60X1C0 workarounds to unknown rev\n");
			hp_flags |= MV_HP_ERRATA_60X1C0;
			break;
		}
		break;
S
Saeed Bishara 已提交
3023 3024
	case chip_soc:
		hpriv->ops = &mv_soc_ops;
3025 3026
		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
			MV_HP_ERRATA_60X1C0;
S
Saeed Bishara 已提交
3027
		break;
3028

3029
	default:
S
Saeed Bishara 已提交
3030
		dev_printk(KERN_ERR, host->dev,
3031
			   "BUG: invalid board index %u\n", board_idx);
3032 3033 3034 3035
		return 1;
	}

	hpriv->hp_flags = hp_flags;
3036 3037 3038 3039 3040 3041 3042 3043 3044
	if (hp_flags & MV_HP_PCIE) {
		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
	} else {
		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
	}
3045 3046 3047 3048

	return 0;
}

3049
/**
3050
 *      mv_init_host - Perform some early initialization of the host.
3051 3052
 *	@host: ATA host to initialize
 *      @board_idx: controller index
3053 3054 3055 3056 3057 3058 3059
 *
 *      If possible, do an early global reset of the host.  Then do
 *      our port init and clear/unmask all/relevant host interrupts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
3060
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3061 3062
{
	int rc = 0, n_hc, port, hc;
3063
	struct mv_host_priv *hpriv = host->private_data;
S
Saeed Bishara 已提交
3064
	void __iomem *mmio = hpriv->base;
3065

3066
	rc = mv_chip_id(host, board_idx);
3067
	if (rc)
M
Mark Lord 已提交
3068
		goto done;
S
Saeed Bishara 已提交
3069

M
Mark Lord 已提交
3070
	if (IS_SOC(hpriv)) {
3071 3072
		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
M
Mark Lord 已提交
3073 3074 3075
	} else {
		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
S
Saeed Bishara 已提交
3076
	}
M
Mark Lord 已提交
3077 3078

	/* global interrupt mask: 0 == mask everything */
3079
	mv_set_main_irq_mask(host, ~0, 0);
3080

3081
	n_hc = mv_get_hc_count(host->ports[0]->flags);
3082

3083
	for (port = 0; port < host->n_ports; port++)
3084
		hpriv->ops->read_preamp(hpriv, port, mmio);
3085

3086
	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3087
	if (rc)
3088 3089
		goto done;

3090
	hpriv->ops->reset_flash(hpriv, mmio);
S
Saeed Bishara 已提交
3091
	hpriv->ops->reset_bus(host, mmio);
3092
	hpriv->ops->enable_leds(hpriv, mmio);
3093

3094
	for (port = 0; port < host->n_ports; port++) {
3095
		struct ata_port *ap = host->ports[port];
3096
		void __iomem *port_mmio = mv_port_base(mmio, port);
3097 3098 3099

		mv_port_init(&ap->ioaddr, port_mmio);

S
Saeed Bishara 已提交
3100
#ifdef CONFIG_PCI
M
Mark Lord 已提交
3101
		if (!IS_SOC(hpriv)) {
S
Saeed Bishara 已提交
3102 3103 3104 3105
			unsigned int offset = port_mmio - mmio;
			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
		}
S
Saeed Bishara 已提交
3106
#endif
3107 3108 3109
	}

	for (hc = 0; hc < n_hc; hc++) {
3110 3111 3112 3113 3114 3115 3116 3117 3118
		void __iomem *hc_mmio = mv_hc_base(mmio, hc);

		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
			"(before clear)=0x%08x\n", hc,
			readl(hc_mmio + HC_CFG_OFS),
			readl(hc_mmio + HC_IRQ_CAUSE_OFS));

		/* Clear any currently outstanding hc interrupt conditions */
		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3119 3120
	}

M
Mark Lord 已提交
3121
	if (!IS_SOC(hpriv)) {
S
Saeed Bishara 已提交
3122 3123
		/* Clear any currently outstanding host interrupt conditions */
		writelfl(0, mmio + hpriv->irq_cause_ofs);
3124

S
Saeed Bishara 已提交
3125 3126
		/* and unmask interrupt generation for host regs */
		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
M
Mark Lord 已提交
3127 3128 3129 3130 3131

		/*
		 * enable only global host interrupts for now.
		 * The per-port interrupts get done later as ports are set up.
		 */
3132
		mv_set_main_irq_mask(host, 0, PCI_ERR);
S
Saeed Bishara 已提交
3133 3134 3135 3136
	}
done:
	return rc;
}
3137

3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157
static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
{
	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
							     MV_CRQB_Q_SZ, 0);
	if (!hpriv->crqb_pool)
		return -ENOMEM;

	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
							     MV_CRPB_Q_SZ, 0);
	if (!hpriv->crpb_pool)
		return -ENOMEM;

	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
							     MV_SG_TBL_SZ, 0);
	if (!hpriv->sg_tbl_pool)
		return -ENOMEM;

	return 0;
}

3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
				 struct mbus_dram_target_info *dram)
{
	int i;

	for (i = 0; i < 4; i++) {
		writel(0, hpriv->base + WINDOW_CTRL(i));
		writel(0, hpriv->base + WINDOW_BASE(i));
	}

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel(((cs->size - 1) & 0xffff0000) |
			(cs->mbus_attr << 8) |
			(dram->mbus_dram_target_id << 4) | 1,
			hpriv->base + WINDOW_CTRL(i));
		writel(cs->base, hpriv->base + WINDOW_BASE(i));
	}
}

S
Saeed Bishara 已提交
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196
/**
 *      mv_platform_probe - handle a positive probe of an soc Marvell
 *      host
 *      @pdev: platform device found
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_platform_probe(struct platform_device *pdev)
{
	static int printed_version;
	const struct mv_sata_platform_data *mv_platform_data;
	const struct ata_port_info *ppi[] =
	    { &mv_port_info[chip_soc], NULL };
	struct ata_host *host;
	struct mv_host_priv *hpriv;
	struct resource *res;
	int n_ports, rc;
3197

S
Saeed Bishara 已提交
3198 3199
	if (!printed_version++)
		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3200

S
Saeed Bishara 已提交
3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
	/*
	 * Simple resource validation ..
	 */
	if (unlikely(pdev->num_resources != 2)) {
		dev_err(&pdev->dev, "invalid number of resources\n");
		return -EINVAL;
	}

	/*
	 * Get the register base first
	 */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		return -EINVAL;

	/* allocate host */
	mv_platform_data = pdev->dev.platform_data;
	n_ports = mv_platform_data->n_ports;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);

	if (!host || !hpriv)
		return -ENOMEM;
	host->private_data = hpriv;
	hpriv->n_ports = n_ports;

	host->iomap = NULL;
3229 3230
	hpriv->base = devm_ioremap(&pdev->dev, res->start,
				   res->end - res->start + 1);
S
Saeed Bishara 已提交
3231 3232
	hpriv->base -= MV_SATAHC0_REG_BASE;

3233 3234 3235 3236 3237 3238
	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (mv_platform_data->dram != NULL)
		mv_conf_mbus_windows(hpriv, mv_platform_data->dram);

3239 3240 3241 3242
	rc = mv_create_dma_pools(hpriv, &pdev->dev);
	if (rc)
		return rc;

S
Saeed Bishara 已提交
3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270
	/* initialize adapter */
	rc = mv_init_host(host, chip_soc);
	if (rc)
		return rc;

	dev_printk(KERN_INFO, &pdev->dev,
		   "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
		   host->n_ports);

	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
				 IRQF_SHARED, &mv6_sht);
}

/*
 *
 *      mv_platform_remove    -       unplug a platform interface
 *      @pdev: platform device
 *
 *      A platform bus SATA device has been unplugged. Perform the needed
 *      cleanup. Also called on module unload for any active devices.
 */
static int __devexit mv_platform_remove(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ata_host *host = dev_get_drvdata(dev);

	ata_host_detach(host);
	return 0;
3271 3272
}

S
Saeed Bishara 已提交
3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
static struct platform_driver mv_platform_driver = {
	.probe			= mv_platform_probe,
	.remove			= __devexit_p(mv_platform_remove),
	.driver			= {
				   .name = DRV_NAME,
				   .owner = THIS_MODULE,
				  },
};


S
Saeed Bishara 已提交
3283
#ifdef CONFIG_PCI
S
Saeed Bishara 已提交
3284 3285 3286
static int mv_pci_init_one(struct pci_dev *pdev,
			   const struct pci_device_id *ent);

S
Saeed Bishara 已提交
3287 3288 3289 3290

static struct pci_driver mv_pci_driver = {
	.name			= DRV_NAME,
	.id_table		= mv_pci_tbl,
S
Saeed Bishara 已提交
3291
	.probe			= mv_pci_init_one,
S
Saeed Bishara 已提交
3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
	.remove			= ata_pci_remove_one,
};

/*
 * module options
 */
static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */


/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
	int rc;

	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
		if (rc) {
			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
			if (rc) {
				dev_printk(KERN_ERR, &pdev->dev,
					   "64-bit DMA enable failed\n");
				return rc;
			}
		}
	} else {
		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit DMA enable failed\n");
			return rc;
		}
		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit consistent DMA enable failed\n");
			return rc;
		}
	}

	return rc;
}

3334 3335
/**
 *      mv_print_info - Dump key info to kernel log for perusal.
3336
 *      @host: ATA host to print info about
3337 3338 3339 3340 3341 3342
 *
 *      FIXME: complete this.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
3343
static void mv_print_info(struct ata_host *host)
3344
{
3345 3346
	struct pci_dev *pdev = to_pci_dev(host->dev);
	struct mv_host_priv *hpriv = host->private_data;
3347
	u8 scc;
3348
	const char *scc_s, *gen;
3349 3350 3351 3352 3353 3354 3355 3356 3357 3358

	/* Use this to determine the HW stepping of the chip so we know
	 * what errata to workaround
	 */
	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
	if (scc == 0)
		scc_s = "SCSI";
	else if (scc == 0x01)
		scc_s = "RAID";
	else
3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
		scc_s = "?";

	if (IS_GEN_I(hpriv))
		gen = "I";
	else if (IS_GEN_II(hpriv))
		gen = "II";
	else if (IS_GEN_IIE(hpriv))
		gen = "IIE";
	else
		gen = "?";
3369

3370
	dev_printk(KERN_INFO, &pdev->dev,
3371 3372
	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3373 3374 3375
	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}

3376
/**
S
Saeed Bishara 已提交
3377
 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
3378 3379 3380 3381 3382 3383
 *      @pdev: PCI device found
 *      @ent: PCI device ID entry for the matched host
 *
 *      LOCKING:
 *      Inherited from caller.
 */
S
Saeed Bishara 已提交
3384 3385
static int mv_pci_init_one(struct pci_dev *pdev,
			   const struct pci_device_id *ent)
3386
{
3387
	static int printed_version;
3388
	unsigned int board_idx = (unsigned int)ent->driver_data;
3389 3390 3391 3392
	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
	struct ata_host *host;
	struct mv_host_priv *hpriv;
	int n_ports, rc;
3393

3394 3395
	if (!printed_version++)
		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3396

3397 3398 3399 3400 3401 3402 3403 3404
	/* allocate host */
	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
	if (!host || !hpriv)
		return -ENOMEM;
	host->private_data = hpriv;
S
Saeed Bishara 已提交
3405
	hpriv->n_ports = n_ports;
3406 3407

	/* acquire resources */
3408 3409
	rc = pcim_enable_device(pdev);
	if (rc)
3410 3411
		return rc;

T
Tejun Heo 已提交
3412 3413
	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
	if (rc == -EBUSY)
3414
		pcim_pin_device(pdev);
T
Tejun Heo 已提交
3415
	if (rc)
3416
		return rc;
3417
	host->iomap = pcim_iomap_table(pdev);
S
Saeed Bishara 已提交
3418
	hpriv->base = host->iomap[MV_PRIMARY_BAR];
3419

3420 3421 3422 3423
	rc = pci_go_64(pdev);
	if (rc)
		return rc;

3424 3425 3426 3427
	rc = mv_create_dma_pools(hpriv, &pdev->dev);
	if (rc)
		return rc;

3428
	/* initialize adapter */
3429
	rc = mv_init_host(host, board_idx);
3430 3431
	if (rc)
		return rc;
3432

3433
	/* Enable interrupts */
3434
	if (msi && pci_enable_msi(pdev))
3435
		pci_intx(pdev, 1);
3436

3437
	mv_dump_pci_cfg(pdev, 0x68);
3438
	mv_print_info(host);
3439

3440
	pci_set_master(pdev);
3441
	pci_try_set_mwi(pdev);
3442
	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3443
				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3444
}
S
Saeed Bishara 已提交
3445
#endif
3446

S
Saeed Bishara 已提交
3447 3448 3449
static int mv_platform_probe(struct platform_device *pdev);
static int __devexit mv_platform_remove(struct platform_device *pdev);

3450 3451
static int __init mv_init(void)
{
S
Saeed Bishara 已提交
3452 3453 3454
	int rc = -ENODEV;
#ifdef CONFIG_PCI
	rc = pci_register_driver(&mv_pci_driver);
S
Saeed Bishara 已提交
3455 3456 3457 3458 3459 3460 3461 3462
	if (rc < 0)
		return rc;
#endif
	rc = platform_driver_register(&mv_platform_driver);

#ifdef CONFIG_PCI
	if (rc < 0)
		pci_unregister_driver(&mv_pci_driver);
S
Saeed Bishara 已提交
3463 3464
#endif
	return rc;
3465 3466 3467 3468
}

static void __exit mv_exit(void)
{
S
Saeed Bishara 已提交
3469
#ifdef CONFIG_PCI
3470
	pci_unregister_driver(&mv_pci_driver);
S
Saeed Bishara 已提交
3471
#endif
S
Saeed Bishara 已提交
3472
	platform_driver_unregister(&mv_platform_driver);
3473 3474 3475 3476 3477 3478 3479
}

MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
M
Mark Lord 已提交
3480
MODULE_ALIAS("platform:" DRV_NAME);
3481

S
Saeed Bishara 已提交
3482
#ifdef CONFIG_PCI
3483 3484
module_param(msi, int, 0444);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
S
Saeed Bishara 已提交
3485
#endif
3486

3487 3488
module_init(mv_init);
module_exit(mv_exit);