bfa_ioc_ct.c 11.5 KB
Newer Older
1
/*
K
Krishna Gudipati 已提交
2
 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * All rights reserved
 * www.brocade.com
 *
 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License (GPL) Version 2 as
 * published by the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

K
Krishna Gudipati 已提交
18 19 20
#include "bfa_ioc.h"
#include "bfi_ctreg.h"
#include "bfa_defs.h"
21 22 23 24 25 26 27 28 29 30 31 32 33 34

BFA_TRC_FILE(CNA, IOC_CT);

/*
 * forward declarations
 */
static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);

J
Jing Huang 已提交
35
struct bfa_ioc_hwif_s hwif_ct;
36 37 38 39 40 41 42

/**
 * Called from bfa_ioc_attach() to map asic specific calls.
 */
void
bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
{
J
Jing Huang 已提交
43 44 45 46 47 48 49 50
	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
	hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
	hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
	hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
51

J
Jing Huang 已提交
52
	ioc->ioc_hwif = &hwif_ct;
53 54 55 56 57 58 59 60 61
}

/**
 * Return true if firmware of current driver matches the running firmware.
 */
static bfa_boolean_t
bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
{
	enum bfi_ioc_state ioc_fwstate;
62
	u32 usecnt;
63 64 65 66 67 68 69 70 71 72 73
	struct bfi_ioc_image_hdr_s fwhdr;

	/**
	 * Firmware match check is relevant only for CNA.
	 */
	if (!ioc->cna)
		return BFA_TRUE;

	/**
	 * If bios boot (flash based) -- do not increment usage count
	 */
K
Krishna Gudipati 已提交
74 75
	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
						BFA_IOC_FWIMG_MINSZ)
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
		return BFA_TRUE;

	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);

	/**
	 * If usage count is 0, always return TRUE.
	 */
	if (usecnt == 0) {
		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
		bfa_trc(ioc, usecnt);
		return BFA_TRUE;
	}

	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
	bfa_trc(ioc, ioc_fwstate);

	/**
	 * Use count cannot be non-zero and chip in uninitialized state.
	 */
	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);

	/**
	 * Check if another driver with a different firmware is active
	 */
	bfa_ioc_fwver_get(ioc, &fwhdr);
	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
		bfa_trc(ioc, usecnt);
		return BFA_FALSE;
	}

	/**
	 * Same firmware version. Increment the reference count.
	 */
	usecnt++;
	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
	bfa_trc(ioc, usecnt);
	return BFA_TRUE;
}

static void
bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
{
122
	u32 usecnt;
123 124 125

	/**
	 * Firmware lock is relevant only for CNA.
J
Jing Huang 已提交
126 127 128 129 130
	 */
	if (!ioc->cna)
		return;

	/**
131 132
	 * If bios boot (flash based) -- do not decrement usage count
	 */
K
Krishna Gudipati 已提交
133 134
	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
						BFA_IOC_FWIMG_MINSZ)
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
		return;

	/**
	 * decrement usage count
	 */
	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
	bfa_assert(usecnt > 0);

	usecnt--;
	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
	bfa_trc(ioc, usecnt);

	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}

/**
 * Notify other functions on HB failure.
 */
static void
bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
{
157 158 159 160 161 162 163 164
	if (ioc->cna) {
		bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
		/* Wait for halt to take effect */
		bfa_reg_read(ioc->ioc_regs.ll_halt);
	} else {
		bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
		bfa_reg_read(ioc->ioc_regs.err_set);
	}
165 166 167 168 169
}

/**
 * Host to LPU mailbox message addresses
 */
170
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
171 172 173 174 175 176 177 178 179
	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};

/**
 * Host <-> LPU mailbox command/status registers - port 0
 */
180
static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
181 182 183 184 185 186 187 188 189
	{ HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
	{ HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
	{ HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
	{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
};

/**
 * Host <-> LPU mailbox command/status registers - port 1
 */
190
static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	{ HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
	{ HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
	{ HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
	{ HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
};

static void
bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
{
	bfa_os_addr_t	rb;
	int		pcifn = bfa_ioc_pcifn(ioc);

	rb = bfa_ioc_bar0(ioc);

	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;

	if (ioc->port_id == 0) {
		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
	} else {
		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
	}

	/*
	 * PSS control registers
	 */
	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
227
	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);

	/*
	 * IOC semaphore registers and serialization
	 */
	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);

	/**
	 * sram memory access
	 */
	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
244 245 246 247 248

	/*
	 * err set reg : for notification of hb failure in fcmode
	 */
	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
249 250 251 252 253 254 255 256 257 258 259
}

/**
 * Initialize IOC to port mapping.
 */

#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
static void
bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
{
	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
260
	u32	r32;
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279

	/**
	 * For catapult, base port id on personality register and IOC type
	 */
	r32 = bfa_reg_read(rb + FNC_PERS_REG);
	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;

	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
	bfa_trc(ioc, ioc->port_id);
}

/**
 * Set interrupt mode for a function: INTX or MSIX
 */
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
280
	u32	r32, mode;
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305

	r32 = bfa_reg_read(rb + FNC_PERS_REG);
	bfa_trc(ioc, r32);

	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
		__F0_INTX_STATUS;

	/**
	 * If already in desired mode, do not change anything
	 */
	if (!msix && mode)
		return;

	if (msix)
		mode = __F0_INTX_STATUS_MSIX;
	else
		mode = __F0_INTX_STATUS_INTA;

	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
	bfa_trc(ioc, r32);

	bfa_reg_write(rb + FNC_PERS_REG, r32);
}

K
Krishna Gudipati 已提交
306 307 308 309 310
/**
 * Cleanup hw semaphore and usecnt registers
 */
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
311
{
K
Krishna Gudipati 已提交
312 313 314 315 316 317

	if (ioc->cna) {
		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
	}
318 319

	/*
K
Krishna Gudipati 已提交
320 321 322
	 * Read the hw sem reg to make sure that it is locked
	 * before we clear it. If it is not locked, writing 1
	 * will lock it instead of clearing it.
323
	 */
K
Krishna Gudipati 已提交
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
	bfa_ioc_hw_sem_release(ioc);
}



/*
 * Check the firmware state to know if pll_init has been completed already
 */
bfa_boolean_t
bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
{
	if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
	  (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
		return BFA_TRUE;

	return BFA_FALSE;
}

bfa_status_t
bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
{
	u32	pll_sclk, pll_fclk, r32;
347

348 349
	pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
		__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
350 351
		__APP_PLL_312_JITLMT0_1(3U) |
		__APP_PLL_312_CNTLMT0_1(1U);
352 353
	pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
		__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
354 355
		__APP_PLL_425_JITLMT0_1(3U) |
		__APP_PLL_425_CNTLMT0_1(1U);
K
Krishna Gudipati 已提交
356
	if (fcmode) {
357 358 359 360 361 362 363 364
		bfa_reg_write((rb + OP_MODE), 0);
		bfa_reg_write((rb + ETH_MAC_SER_REG),
				__APP_EMS_CMLCKSEL |
				__APP_EMS_REFCKBUFEN2 |
				__APP_EMS_CHANNEL_SEL);
	} else {
		bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
		bfa_reg_write((rb + ETH_MAC_SER_REG),
K
Krishna Gudipati 已提交
365
				__APP_EMS_REFCKBUFEN1);
366 367 368 369 370 371 372 373 374
	}
	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
K
Krishna Gudipati 已提交
375
	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
376
		__APP_PLL_312_LOGIC_SOFT_RESET);
K
Krishna Gudipati 已提交
377
	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
378
		__APP_PLL_425_LOGIC_SOFT_RESET);
K
Krishna Gudipati 已提交
379
	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
380
		__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
K
Krishna Gudipati 已提交
381
	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
382 383
		__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
	bfa_reg_read(rb + HOSTFN0_INT_MSK);
384 385 386
	bfa_os_udelay(2000);
	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
K
Krishna Gudipati 已提交
387
	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
388
		__APP_PLL_312_ENABLE);
K
Krishna Gudipati 已提交
389
	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
390
		__APP_PLL_425_ENABLE);
K
Krishna Gudipati 已提交
391
	if (!fcmode) {
392 393 394 395 396 397 398
		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
	}
	r32 = bfa_reg_read((rb + PSS_CTL_REG));
	r32 &= ~__PSS_LMEM_RESET;
	bfa_reg_write((rb + PSS_CTL_REG), r32);
	bfa_os_udelay(1000);
K
Krishna Gudipati 已提交
399
	if (!fcmode) {
400 401 402 403
		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
	}

404 405 406
	bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
	bfa_os_udelay(1000);
	r32 = bfa_reg_read((rb + MBIST_STAT_REG));
407
	bfa_reg_write((rb + MBIST_CTL_REG), 0);
408 409
	return BFA_STATUS_OK;
}