megaraid_sas_fp.c 40.3 KB
Newer Older
1 2 3
/*
 *  Linux MegaRAID driver for SAS based RAID controllers
 *
4
 *  Copyright (c) 2009-2013  LSI Corporation
5 6
 *  Copyright (c) 2013-2016  Avago Technologies
 *  Copyright (c) 2016-2018  Broadcom Inc.
7 8 9 10 11 12 13 14 15 16 17 18
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version 2
 *  of the License, or (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
19
 *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 21 22
 *
 *  FILE: megaraid_sas_fp.c
 *
23
 *  Authors: Broadcom Inc.
24 25 26
 *           Sumant Patro
 *           Varad Talamacki
 *           Manoj Jose
27 28
 *           Kashyap Desai <kashyap.desai@broadcom.com>
 *           Sumit Saxena <sumit.saxena@broadcom.com>
29
 *
30
 *  Send feedback to: megaraidlinux.pdl@broadcom.com
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
 */

#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
48
#include <linux/irq_poll.h>
49 50 51 52 53 54 55

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>

#include "megaraid_sas_fusion.h"
56
#include "megaraid_sas.h"
57 58
#include <asm/div64.h>

59 60
#define LB_PENDING_CMDS_DEFAULT 4
static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
61
module_param(lb_pending_cmds, int, 0444);
62 63 64 65
MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
	"threshold. Valid Values are 1-128. Default: 4");


66 67
#define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
#define MR_LD_STATE_OPTIMAL 3
68

69 70 71 72
#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
#define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
#define SPAN_INVALID  0xff

73
/* Prototypes */
74
static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
75 76 77
	PLD_SPAN_INFO ldSpanInfo);
static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
78
	struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
79
static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
80
	u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

u32 mega_mod64(u64 dividend, u32 divisor)
{
	u64 d;
	u32 remainder;

	if (!divisor)
		printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
	d = dividend;
	remainder = do_div(d, divisor);
	return remainder;
}

/**
 * @param dividend    : Dividend
 * @param divisor    : Divisor
 *
 * @return quotient
 **/
u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
{
	u32 remainder;
	u64 d;

	if (!divisor)
		printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");

	d = dividend;
	remainder = do_div(d, divisor);

	return d;
}

114
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
115 116 117 118 119
{
	return &map->raidMap.ldSpanMap[ld].ldRaid;
}

static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
120
						   struct MR_DRV_RAID_MAP_ALL
121 122 123 124 125
						   *map)
{
	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
}

126
static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
127 128 129 130
{
	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}

131
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
132
{
133
	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
134 135
}

136
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
137
{
138
	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
139 140
}

141
__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
142 143 144 145
{
	return map->raidMap.devHndlInfo[pd].curDevHdl;
}

146 147 148 149 150
static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
{
	return map->raidMap.devHndlInfo[pd].interfaceType;
}

151
u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
152
{
153
	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
154 155
}

156
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
157
{
158
	return map->raidMap.ldTgtIdToLd[ldTgtId];
159 160 161
}

static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
162
					  struct MR_DRV_RAID_MAP_ALL *map)
163 164 165 166
{
	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
}

167 168 169
/*
 * This function will Populate Driver Map using firmware raid map
 */
170
static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
171 172 173 174
{
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
175
	int i, j;
176
	u16 ld_count;
177 178 179
	struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
	struct MR_FW_RAID_MAP_EXT *fw_map_ext;
	struct MR_RAID_MAP_DESC_TABLE *desc_table;
180 181 182


	struct MR_DRV_RAID_MAP_ALL *drv_map =
183
			fusion->ld_drv_map[(map_id & 1)];
184
	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
185 186 187 188
	void *raid_map_data = NULL;

	memset(drv_map, 0, fusion->drv_map_sz);
	memset(pDrvRaidMap->ldTgtIdToLd,
189
	       0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
190 191

	if (instance->max_raid_mapsize) {
192
		fw_map_dyn = fusion->ld_map[(map_id & 1)];
193 194 195 196 197 198 199 200 201 202
		desc_table =
		(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
		if (desc_table != fw_map_dyn->raid_map_desc_table)
			dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
				desc_table, fw_map_dyn->raid_map_desc_table);

		ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
		pDrvRaidMap->fpPdIoTimeoutSec =
			fw_map_dyn->fp_pd_io_timeout_sec;
203 204
		pDrvRaidMap->totalSize =
			cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
205 206 207 208 209 210 211 212 213 214 215
		/* point to actual data starting point*/
		raid_map_data = (void *)fw_map_dyn +
			le32_to_cpu(fw_map_dyn->desc_table_offset) +
			le32_to_cpu(fw_map_dyn->desc_table_size);

		for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
			switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
			case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
				fw_map_dyn->dev_hndl_info =
				(struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
				memcpy(pDrvRaidMap->devHndlInfo,
216 217 218
					fw_map_dyn->dev_hndl_info,
					sizeof(struct MR_DEV_HANDLE_INFO) *
					le32_to_cpu(desc_table->raid_map_desc_elements));
219 220 221
			break;
			case RAID_MAP_DESC_TYPE_TGTID_INFO:
				fw_map_dyn->ld_tgt_id_to_ld =
222 223 224 225 226 227
					(u16 *)(raid_map_data +
					le32_to_cpu(desc_table->raid_map_desc_offset));
				for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
					pDrvRaidMap->ldTgtIdToLd[j] =
						le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
				}
228 229 230
			break;
			case RAID_MAP_DESC_TYPE_ARRAY_INFO:
				fw_map_dyn->ar_map_info =
231 232
					(struct MR_ARRAY_INFO *)
					(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
233
				memcpy(pDrvRaidMap->arMapInfo,
234 235 236
				       fw_map_dyn->ar_map_info,
				       sizeof(struct MR_ARRAY_INFO) *
				       le32_to_cpu(desc_table->raid_map_desc_elements));
237 238 239
			break;
			case RAID_MAP_DESC_TYPE_SPAN_INFO:
				fw_map_dyn->ld_span_map =
240 241 242
					(struct MR_LD_SPAN_MAP *)
					(raid_map_data +
					le32_to_cpu(desc_table->raid_map_desc_offset));
243
				memcpy(pDrvRaidMap->ldSpanMap,
244 245 246
				       fw_map_dyn->ld_span_map,
				       sizeof(struct MR_LD_SPAN_MAP) *
				       le32_to_cpu(desc_table->raid_map_desc_elements));
247 248 249 250 251 252 253 254 255 256
			break;
			default:
				dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
					fw_map_dyn->desc_table_num_elements);
			}
			++desc_table;
		}

	} else if (instance->supportmax256vd) {
		fw_map_ext =
257
			(struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
258 259 260
		ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
		if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
			dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
261
			return 1;
262 263 264 265 266 267 268 269
		}

		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
		pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
		for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
			pDrvRaidMap->ldTgtIdToLd[i] =
				(u16)fw_map_ext->ldTgtIdToLd[i];
		memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
270
		       sizeof(struct MR_LD_SPAN_MAP) * ld_count);
271
		memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
272
		       sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
273
		memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
274 275
		       sizeof(struct MR_DEV_HANDLE_INFO) *
		       MAX_RAIDMAP_PHYSICAL_DEVICES);
276 277 278 279

		/* New Raid map will not set totalSize, so keep expected value
		 * for legacy code in ValidateMapInfo
		 */
280 281
		pDrvRaidMap->totalSize =
			cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
282 283
	} else {
		fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
284
				fusion->ld_map[(map_id & 1)];
285
		pFwRaidMap = &fw_map_old->raidMap;
286
		ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
287 288 289 290 291 292
		if (ld_count > MAX_LOGICAL_DRIVES) {
			dev_dbg(&instance->pdev->dev,
				"LD count exposed in RAID map in not valid\n");
			return 1;
		}

293
		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
294
		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
295 296 297 298
		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
			pDrvRaidMap->ldTgtIdToLd[i] =
				(u8)pFwRaidMap->ldTgtIdToLd[i];
299
		for (i = 0; i < ld_count; i++) {
300 301 302 303 304 305 306 307
			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
		}
		memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
			sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
		memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
			sizeof(struct MR_DEV_HANDLE_INFO) *
			MAX_RAIDMAP_PHYSICAL_DEVICES);
	}
308 309

	return 0;
310 311
}

312 313 314
/*
 * This function will validate Map info data provided by FW
 */
315
u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
316
{
317 318 319 320 321
	struct fusion_context *fusion;
	struct MR_DRV_RAID_MAP_ALL *drv_map;
	struct MR_DRV_RAID_MAP *pDrvRaidMap;
	struct LD_LOAD_BALANCE_INFO *lbInfo;
	PLD_SPAN_INFO ldSpanInfo;
322
	struct MR_LD_RAID         *raid;
323
	u16 num_lds, i;
324
	u16 ld;
325
	u32 expected_size;
326

327
	if (MR_PopulateDrvRaidMap(instance, map_id))
328
		return 0;
329 330

	fusion = instance->ctrl_context;
331
	drv_map = fusion->ld_drv_map[(map_id & 1)];
332 333 334 335 336
	pDrvRaidMap = &drv_map->raidMap;

	lbInfo = fusion->load_balance_info;
	ldSpanInfo = fusion->log_to_span;

337 338 339
	if (instance->max_raid_mapsize)
		expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
	else if (instance->supportmax256vd)
340 341 342 343
		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
	else
		expected_size =
			(sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
344
			(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
345 346

	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
347 348 349
		dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
			le32_to_cpu(pDrvRaidMap->totalSize));
		dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
350
			(unsigned int)expected_size);
351 352 353
		dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
			le32_to_cpu(pDrvRaidMap->totalSize));
354 355 356
		return 0;
	}

357
	if (instance->UnevenSpanSupport)
358
		mr_update_span_set(drv_map, ldSpanInfo);
359

360 361
	if (lbInfo)
		mr_update_load_balance_params(drv_map, lbInfo);
362

363
	num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
364 365

	/*Convert Raid capability values to CPU arch */
366 367 368 369 370 371 372
	for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
		ld = MR_TargetIdToLdGet(i, drv_map);

		/* For non existing VDs, iterate to next VD*/
		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
			continue;

373
		raid = MR_LdRaidGet(ld, drv_map);
374
		le32_to_cpus((u32 *)&raid->capability);
375 376

		num_lds--;
377 378
	}

379 380 381 382
	return 1;
}

u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
383
		    struct MR_DRV_RAID_MAP_ALL *map)
384 385 386 387 388 389 390 391
{
	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
	struct MR_QUAD_ELEMENT    *quad;
	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
	u32                span, j;

	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {

392
		for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
393 394
			quad = &pSpanBlock->block_span_info.quad[j];

395
			if (le32_to_cpu(quad->diff) == 0)
396
				return SPAN_INVALID;
397 398 399
			if (le64_to_cpu(quad->logStart) <= row && row <=
				le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
				le32_to_cpu(quad->diff))) == 0) {
400 401
				if (span_blk != NULL) {
					u64  blk, debugBlk;
402
					blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
403 404
					debugBlk = blk;

405
					blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
406 407 408 409 410 411
					*span_blk = blk;
				}
				return span;
			}
		}
	}
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	return SPAN_INVALID;
}

/*
******************************************************************************
*
* This routine calculates the Span block for given row using spanset.
*
* Inputs :
*    instance - HBA instance
*    ld   - Logical drive number
*    row        - Row number
*    map    - LD map
*
* Outputs :
*
*    span          - Span number
*    block         - Absolute Block number in the physical disk
*    div_error	   - Devide error code.
*/

u32 mr_spanset_get_span_block(struct megasas_instance *instance,
434
		u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
{
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
	LD_SPAN_SET *span_set;
	struct MR_QUAD_ELEMENT    *quad;
	u32    span, info;
	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;

	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
		span_set = &(ldSpanInfo[ld].span_set[info]);

		if (span_set->span_row_data_width == 0)
			break;

		if (row > span_set->data_row_end)
			continue;

		for (span = 0; span < raid->spanDepth; span++)
453 454
			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
				block_span_info.noElements) >= info+1) {
455 456 457
				quad = &map->raidMap.ldSpanMap[ld].
					spanBlock[span].
					block_span_info.quad[info];
458
				if (le32_to_cpu(quad->diff) == 0)
459
					return SPAN_INVALID;
460 461 462 463
				if (le64_to_cpu(quad->logStart) <= row  &&
					row <= le64_to_cpu(quad->logEnd)  &&
					(mega_mod64(row - le64_to_cpu(quad->logStart),
						le32_to_cpu(quad->diff))) == 0) {
464 465 466
					if (span_blk != NULL) {
						u64  blk;
						blk = mega_div64_32
467 468 469
						    ((row - le64_to_cpu(quad->logStart)),
						    le32_to_cpu(quad->diff));
						blk = (blk + le64_to_cpu(quad->offsetInSpan))
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
							 << raid->stripeShift;
						*span_blk = blk;
					}
					return span;
				}
			}
	}
	return SPAN_INVALID;
}

/*
******************************************************************************
*
* This routine calculates the row for given strip using spanset.
*
* Inputs :
*    instance - HBA instance
*    ld   - Logical drive number
*    Strip        - Strip
*    map    - LD map
*
* Outputs :
*
*    row         - row associated with strip
*/

static u64  get_row_from_strip(struct megasas_instance *instance,
497
	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
{
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
	LD_SPAN_SET	*span_set;
	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
	u32		info, strip_offset, span, span_offset;
	u64		span_set_Strip, span_set_Row, retval;

	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
		span_set = &(ldSpanInfo[ld].span_set[info]);

		if (span_set->span_row_data_width == 0)
			break;
		if (strip > span_set->data_strip_end)
			continue;

		span_set_Strip = strip - span_set->data_strip_start;
		strip_offset = mega_mod64(span_set_Strip,
				span_set->span_row_data_width);
		span_set_Row = mega_div64_32(span_set_Strip,
				span_set->span_row_data_width) * span_set->diff;
		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
520
			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
521
				block_span_info.noElements) >= info+1) {
522 523 524 525 526 527
				if (strip_offset >=
					span_set->strip_offset[span])
					span_offset++;
				else
					break;
			}
528

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
		retval = (span_set->data_row_start + span_set_Row +
				(span_offset - 1));
		return retval;
	}
	return -1LLU;
}


/*
******************************************************************************
*
* This routine calculates the Start Strip for given row using spanset.
*
* Inputs :
*    instance - HBA instance
*    ld   - Logical drive number
*    row        - Row number
*    map    - LD map
*
* Outputs :
*
*    Strip         - Start strip associated with row
*/

static u64 get_strip_from_row(struct megasas_instance *instance,
554
		u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
{
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
	LD_SPAN_SET *span_set;
	struct MR_QUAD_ELEMENT    *quad;
	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
	u32    span, info;
	u64  strip;

	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
		span_set = &(ldSpanInfo[ld].span_set[info]);

		if (span_set->span_row_data_width == 0)
			break;
		if (row > span_set->data_row_end)
			continue;

		for (span = 0; span < raid->spanDepth; span++)
573 574
			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
				block_span_info.noElements) >= info+1) {
575 576
				quad = &map->raidMap.ldSpanMap[ld].
					spanBlock[span].block_span_info.quad[info];
577 578 579 580
				if (le64_to_cpu(quad->logStart) <= row  &&
					row <= le64_to_cpu(quad->logEnd)  &&
					mega_mod64((row - le64_to_cpu(quad->logStart)),
					le32_to_cpu(quad->diff)) == 0) {
581 582
					strip = mega_div64_32
						(((row - span_set->data_row_start)
583 584
							- le64_to_cpu(quad->logStart)),
							le32_to_cpu(quad->diff));
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
					strip *= span_set->span_row_data_width;
					strip += span_set->data_strip_start;
					strip += span_set->strip_offset[span];
					return strip;
				}
			}
	}
	dev_err(&instance->pdev->dev, "get_strip_from_row"
		"returns invalid strip for ld=%x, row=%lx\n",
		ld, (long unsigned int)row);
	return -1;
}

/*
******************************************************************************
*
* This routine calculates the Physical Arm for given strip using spanset.
*
* Inputs :
*    instance - HBA instance
*    ld   - Logical drive number
*    strip      - Strip
*    map    - LD map
*
* Outputs :
*
*    Phys Arm         - Phys Arm associated with strip
*/

static u32 get_arm_from_strip(struct megasas_instance *instance,
615
	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
{
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
	LD_SPAN_SET *span_set;
	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
	u32    info, strip_offset, span, span_offset, retval;

	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
		span_set = &(ldSpanInfo[ld].span_set[info]);

		if (span_set->span_row_data_width == 0)
			break;
		if (strip > span_set->data_strip_end)
			continue;

		strip_offset = (uint)mega_mod64
				((strip - span_set->data_strip_start),
				span_set->span_row_data_width);

		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
636 637
			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
				block_span_info.noElements) >= info+1) {
638 639 640 641 642 643 644
				if (strip_offset >=
					span_set->strip_offset[span])
					span_offset =
						span_set->strip_offset[span];
				else
					break;
			}
645

646 647 648 649 650 651 652 653 654 655 656 657 658
		retval = (strip_offset - span_offset);
		return retval;
	}

	dev_err(&instance->pdev->dev, "get_arm_from_strip"
		"returns invalid arm for ld=%x strip=%lx\n",
		ld, (long unsigned int)strip);

	return -1;
}

/* This Function will return Phys arm */
u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
659
		struct MR_DRV_RAID_MAP_ALL *map)
660 661 662 663 664 665 666 667 668 669 670 671 672 673
{
	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
	/* Need to check correct default value */
	u32    arm = 0;

	switch (raid->level) {
	case 0:
	case 5:
	case 6:
		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
		break;
	case 1:
		/* start with logical arm */
		arm = get_arm_from_strip(instance, ld, stripe, map);
674
		if (arm != -1U)
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
			arm *= 2;
		break;
	}

	return arm;
}


/*
******************************************************************************
*
* This routine calculates the arm, span and block for the specified stripe and
* reference in stripe using spanset
*
* Inputs :
*
*    ld   - Logical drive number
*    stripRow        - Stripe number
*    stripRef    - Reference in stripe
*
* Outputs :
*
*    span          - Span number
*    block         - Absolute Block number in the physical disk
*/
static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
		struct RAID_CONTEXT *pRAID_Context,
703
		struct MR_DRV_RAID_MAP_ALL *map)
704 705
{
	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
706
	u32     pd, arRef, r1_alt_pd;
707 708
	u8      physArm, span;
	u64     row;
709
	u8	retval = true;
710
	u64	*pdBlock = &io_info->pdBlock;
711
	__le16	*pDevHandle = &io_info->devHandle;
712
	u8	*pPdInterface = &io_info->pd_interface;
713
	u32	logArm, rowMod, armQ, arm;
714
	struct fusion_context *fusion;
715

716
	fusion = instance->ctrl_context;
717
	*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
718 719 720 721 722 723 724 725

	/*Get row and span from io_info for Uneven Span IO.*/
	row	    = io_info->start_row;
	span	    = io_info->start_span;


	if (raid->level == 6) {
		logArm = get_arm_from_strip(instance, ld, stripRow, map);
726
		if (logArm == -1U)
727
			return false;
728 729 730 731 732 733 734 735 736 737
		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
		arm = armQ + 1 + logArm;
		if (arm >= SPAN_ROW_SIZE(map, ld, span))
			arm -= SPAN_ROW_SIZE(map, ld, span);
		physArm = (u8)arm;
	} else
		/* Calculate the arm */
		physArm = get_arm(instance, ld, span, stripRow, map);
	if (physArm == 0xFF)
738
		return false;
739 740 741 742

	arRef       = MR_LdSpanArrayGet(ld, span, map);
	pd          = MR_ArPdGet(arRef, physArm, map);

743
	if (pd != MR_PD_INVALID) {
744
		*pDevHandle = MR_PdDevHandleGet(pd, map);
745
		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
746
		/* get second pd also for raid 1/10 fast path writes*/
747
		if ((instance->adapter_type >= VENTURA_SERIES) &&
748 749
		    (raid->level == 1) &&
		    !io_info->isRead) {
750 751 752 753 754 755
			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
			if (r1_alt_pd != MR_PD_INVALID)
				io_info->r1_alt_dev_handle =
				MR_PdDevHandleGet(r1_alt_pd, map);
		}
	} else {
756
		if ((raid->level >= 5) &&
757 758
			((instance->adapter_type == THUNDERBOLT_SERIES)  ||
			((instance->adapter_type == INVADER_SERIES) &&
759
			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
760
			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
761
		else if (raid->level == 1) {
762 763
			physArm = physArm + 1;
			pd = MR_ArPdGet(arRef, physArm, map);
764
			if (pd != MR_PD_INVALID) {
765
				*pDevHandle = MR_PdDevHandleGet(pd, map);
766 767
				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
			}
768 769 770
		}
	}

771
	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
772
	if (instance->adapter_type >= VENTURA_SERIES) {
773
		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
774 775 776 777 778 779 780 781
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
	} else {
		pRAID_Context->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm = pRAID_Context->span_arm;
	}
782
	io_info->pd_after_lb = pd;
783
	return retval;
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
}

/*
******************************************************************************
*
* This routine calculates the arm, span and block for the specified stripe and
* reference in stripe.
*
* Inputs :
*
*    ld   - Logical drive number
*    stripRow        - Stripe number
*    stripRef    - Reference in stripe
*
* Outputs :
*
*    span          - Span number
*    block         - Absolute Block number in the physical disk
*/
803
u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
804 805
		u16 stripRef, struct IO_REQUEST_INFO *io_info,
		struct RAID_CONTEXT *pRAID_Context,
806
		struct MR_DRV_RAID_MAP_ALL *map)
807 808
{
	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
809
	u32         pd, arRef, r1_alt_pd;
810 811
	u8          physArm, span;
	u64         row;
812
	u8	    retval = true;
813
	u64	    *pdBlock = &io_info->pdBlock;
814
	__le16	    *pDevHandle = &io_info->devHandle;
815
	u8	    *pPdInterface = &io_info->pd_interface;
816 817 818
	struct fusion_context *fusion;

	fusion = instance->ctrl_context;
819
	*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
820 821 822 823 824 825 826 827 828

	row =  mega_div64_32(stripRow, raid->rowDataSize);

	if (raid->level == 6) {
		/* logical arm within row */
		u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
		u32 rowMod, armQ, arm;

		if (raid->rowSize == 0)
829
			return false;
830 831 832 833 834 835 836 837 838
		/* get logical row mod */
		rowMod = mega_mod64(row, raid->rowSize);
		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
		arm = armQ+1+logArm; /* data always logically follows Q */
		if (arm >= raid->rowSize) /* handle wrap condition */
			arm -= raid->rowSize;
		physArm = (u8)arm;
	} else  {
		if (raid->modFactor == 0)
839
			return false;
840 841 842 843 844 845 846 847 848
		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
							  raid->modFactor),
					  map);
	}

	if (raid->spanDepth == 1) {
		span = 0;
		*pdBlock = row << raid->stripeShift;
	} else {
849 850
		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
		if (span == SPAN_INVALID)
851
			return false;
852 853 854 855 856 857
	}

	/* Get the array on which this span is present */
	arRef       = MR_LdSpanArrayGet(ld, span, map);
	pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */

858
	if (pd != MR_PD_INVALID) {
859 860
		/* Get dev handle from Pd. */
		*pDevHandle = MR_PdDevHandleGet(pd, map);
861
		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
862
		/* get second pd also for raid 1/10 fast path writes*/
863
		if ((instance->adapter_type >= VENTURA_SERIES) &&
864 865
		    (raid->level == 1) &&
		    !io_info->isRead) {
866 867 868
			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
			if (r1_alt_pd != MR_PD_INVALID)
				io_info->r1_alt_dev_handle =
869
					MR_PdDevHandleGet(r1_alt_pd, map);
870 871
		}
	} else {
872
		if ((raid->level >= 5) &&
873 874
			((instance->adapter_type == THUNDERBOLT_SERIES)  ||
			((instance->adapter_type == INVADER_SERIES) &&
875
			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
876
			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
877 878
		else if (raid->level == 1) {
			/* Get alternate Pd. */
879 880
			physArm = physArm + 1;
			pd = MR_ArPdGet(arRef, physArm, map);
881
			if (pd != MR_PD_INVALID) {
882 883
				/* Get dev handle from Pd */
				*pDevHandle = MR_PdDevHandleGet(pd, map);
884 885
				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
			}
886 887 888
		}
	}

889
	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
890
	if (instance->adapter_type >= VENTURA_SERIES) {
891
		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
892 893 894 895 896 897 898 899
				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm =
				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
	} else {
		pRAID_Context->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm = pRAID_Context->span_arm;
	}
900
	io_info->pd_after_lb = pd;
901 902 903 904 905 906 907 908 909 910 911 912 913
	return retval;
}

/*
******************************************************************************
*
* MR_BuildRaidContext function
*
* This function will initiate command processing.  The start/end row and strip
* information is calculated then the lock is acquired.
* This function will return 0 if region lock was acquired OR return num strips
*/
u8
914 915
MR_BuildRaidContext(struct megasas_instance *instance,
		    struct IO_REQUEST_INFO *io_info,
916
		    struct RAID_CONTEXT *pRAID_Context,
917
		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
918
{
919
	struct fusion_context *fusion;
920
	struct MR_LD_RAID  *raid;
921
	u32         stripSize, stripe_mask;
922 923 924 925 926 927 928 929 930
	u64         endLba, endStrip, endRow, start_row, start_strip;
	u64         regStart;
	u32         regSize;
	u8          num_strips, numRows;
	u16         ref_in_start_stripe, ref_in_end_stripe;
	u64         ldStartBlock;
	u32         numBlocks, ldTgtId;
	u8          isRead;
	u8	    retval = 0;
931 932
	u8	    startlba_span = SPAN_INVALID;
	u64 *pdBlock = &io_info->pdBlock;
933
	u16	    ld;
934 935 936 937 938

	ldStartBlock = io_info->ldStartBlock;
	numBlocks = io_info->numBlocks;
	ldTgtId = io_info->ldTgtId;
	isRead = io_info->isRead;
939 940
	io_info->IoforUnevenSpan = 0;
	io_info->start_span	= SPAN_INVALID;
941
	fusion = instance->ctrl_context;
942 943 944

	ld = MR_TargetIdToLdGet(ldTgtId, map);
	raid = MR_LdRaidGet(ld, map);
945 946
	/*check read ahead bit*/
	io_info->ra_capable = raid->capability.ra_capable;
947

948 949 950 951 952 953
	/*
	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
	 * return FALSE
	 */
	if (raid->rowDataSize == 0) {
		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
954
			return false;
955 956 957 958 959 960 961 962
		else if (instance->UnevenSpanSupport) {
			io_info->IoforUnevenSpan = 1;
		} else {
			dev_info(&instance->pdev->dev,
				"raid->rowDataSize is 0, but has SPAN[0]"
				"rowDataSize = 0x%0x,"
				"but there is _NO_ UnevenSpanSupport\n",
				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
963
			return false;
964 965 966
		}
	}

967 968
	stripSize = 1 << raid->stripeShift;
	stripe_mask = stripSize-1;
969 970


971 972 973 974 975 976 977 978 979
	/*
	 * calculate starting row and stripe, and number of strips and rows
	 */
	start_strip         = ldStartBlock >> raid->stripeShift;
	ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
	endLba              = ldStartBlock + numBlocks - 1;
	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
	endStrip            = endLba >> raid->stripeShift;
	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
980 981 982 983 984 985 986 987

	if (io_info->IoforUnevenSpan) {
		start_row = get_row_from_strip(instance, ld, start_strip, map);
		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
		if (start_row == -1ULL || endRow == -1ULL) {
			dev_info(&instance->pdev->dev, "return from %s %d."
				"Send IO w/o region lock.\n",
				__func__, __LINE__);
988
			return false;
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
		}

		if (raid->spanDepth == 1) {
			startlba_span = 0;
			*pdBlock = start_row << raid->stripeShift;
		} else
			startlba_span = (u8)mr_spanset_get_span_block(instance,
						ld, start_row, pdBlock, map);
		if (startlba_span == SPAN_INVALID) {
			dev_info(&instance->pdev->dev, "return from %s %d"
				"for row 0x%llx,start strip %llx"
				"endSrip %llx\n", __func__, __LINE__,
				(unsigned long long)start_row,
				(unsigned long long)start_strip,
				(unsigned long long)endStrip);
1004
			return false;
1005 1006 1007 1008 1009 1010 1011 1012
		}
		io_info->start_span	= startlba_span;
		io_info->start_row	= start_row;
	} else {
		start_row = mega_div64_32(start_strip, raid->rowDataSize);
		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
	}
	numRows = (u8)(endRow - start_row + 1);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022

	/*
	 * calculate region info.
	 */

	/* assume region is at the start of the first row */
	regStart            = start_row << raid->stripeShift;
	/* assume this IO needs the full row - we'll adjust if not true */
	regSize             = stripSize;

1023 1024
	io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	/* Check if we can send this I/O via FastPath */
	if (raid->capability.fpCapable) {
		if (isRead)
			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
					      ((num_strips == 1) ||
					       raid->capability.
					       fpReadAcrossStripe));
		else
			io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
					      ((num_strips == 1) ||
					       raid->capability.
					       fpWriteAcrossStripe));
	} else
1038
		io_info->fpOkForIo = false;
1039 1040 1041 1042 1043 1044 1045 1046

	if (numRows == 1) {
		/* single-strip IOs can always lock only the data needed */
		if (num_strips == 1) {
			regStart += ref_in_start_stripe;
			regSize = numBlocks;
		}
		/* multi-strip IOs always need to full stripe locked */
1047 1048 1049 1050 1051
	} else if (io_info->IoforUnevenSpan == 0) {
		/*
		 * For Even span region lock optimization.
		 * If the start strip is the last in the start row
		 */
1052 1053 1054 1055
		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
			regStart += ref_in_start_stripe;
			/* initialize count to sectors from startref to end
			   of strip */
1056
			regSize = stripSize - ref_in_start_stripe;
1057 1058
		}

1059
		/* add complete rows in the middle of the transfer */
1060 1061 1062
		if (numRows > 2)
			regSize += (numRows-2) << raid->stripeShift;

1063
		/* if IO ends within first strip of last row*/
1064 1065 1066 1067
		if (endStrip == endRow*raid->rowDataSize)
			regSize += ref_in_end_stripe+1;
		else
			regSize += stripSize;
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	} else {
		/*
		 * For Uneven span region lock optimization.
		 * If the start strip is the last in the start row
		 */
		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
			regStart += ref_in_start_stripe;
			/* initialize count to sectors from
			 * startRef to end of strip
			 */
			regSize = stripSize - ref_in_start_stripe;
		}
		/* Add complete rows in the middle of the transfer*/

		if (numRows > 2)
			/* Add complete rows in the middle of the transfer*/
			regSize += (numRows-2) << raid->stripeShift;

		/* if IO ends within first strip of last row */
		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
			regSize += ref_in_end_stripe + 1;
		else
			regSize += stripSize;
1092 1093
	}

1094
	pRAID_Context->timeout_value =
1095 1096 1097
		cpu_to_le16(raid->fpIoTimeoutForLd ?
			    raid->fpIoTimeoutForLd :
			    map->raidMap.fpPdIoTimeoutSec);
1098
	if (instance->adapter_type == INVADER_SERIES)
1099
		pRAID_Context->reg_lock_flags = (isRead) ?
1100
			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1101
	else if (instance->adapter_type == THUNDERBOLT_SERIES)
1102
		pRAID_Context->reg_lock_flags = (isRead) ?
1103
			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1104 1105 1106 1107
	pRAID_Context->virtual_disk_tgt_id = raid->targetId;
	pRAID_Context->reg_lock_row_lba    = cpu_to_le64(regStart);
	pRAID_Context->reg_lock_length    = cpu_to_le32(regSize);
	pRAID_Context->config_seq_num	= raid->seqNum;
1108 1109 1110
	/* save pointer to raid->LUN array */
	*raidLUN = raid->LUN;

1111 1112 1113 1114

	/*Get Phy Params only if FP capable, or else leave it to MR firmware
	  to do the calculation.*/
	if (io_info->fpOkForIo) {
1115 1116 1117 1118 1119 1120 1121 1122
		retval = io_info->IoforUnevenSpan ?
				mr_spanset_get_phy_params(instance, ld,
					start_strip, ref_in_start_stripe,
					io_info, pRAID_Context, map) :
				MR_GetPhyParams(instance, ld, start_strip,
					ref_in_start_stripe, io_info,
					pRAID_Context, map);
		/* If IO on an invalid Pd, then FP is not possible.*/
1123
		if (io_info->devHandle == MR_DEVHANDLE_INVALID)
1124
			io_info->fpOkForIo = false;
1125 1126 1127 1128
		return retval;
	} else if (isRead) {
		uint stripIdx;
		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1129 1130 1131 1132 1133 1134 1135 1136 1137
			retval = io_info->IoforUnevenSpan ?
				mr_spanset_get_phy_params(instance, ld,
				    start_strip + stripIdx,
				    ref_in_start_stripe, io_info,
				    pRAID_Context, map) :
				MR_GetPhyParams(instance, ld,
				    start_strip + stripIdx, ref_in_start_stripe,
				    io_info, pRAID_Context, map);
			if (!retval)
1138
				return true;
1139 1140
		}
	}
1141
	return true;
1142 1143
}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
/*
******************************************************************************
*
* This routine pepare spanset info from Valid Raid map and store it into
* local copy of ldSpanInfo per instance data structure.
*
* Inputs :
* map    - LD map
* ldSpanInfo - ldSpanInfo per HBA instance
*
*/
1155
void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1156
	PLD_SPAN_INFO ldSpanInfo)
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
{
	u8   span, count;
	u32  element, span_row_width;
	u64  span_row;
	struct MR_LD_RAID *raid;
	LD_SPAN_SET *span_set, *span_set_prev;
	struct MR_QUAD_ELEMENT    *quad;
	int ldCount;
	u16 ld;


1168
	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1169
		ld = MR_TargetIdToLdGet(ldCount, map);
1170
		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
1171 1172 1173 1174
			continue;
		raid = MR_LdRaidGet(ld, map);
		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
			for (span = 0; span < raid->spanDepth; span++) {
1175 1176
				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
					block_span_info.noElements) <
1177 1178 1179 1180 1181 1182 1183
					element + 1)
					continue;
				span_set = &(ldSpanInfo[ld].span_set[element]);
				quad = &map->raidMap.ldSpanMap[ld].
					spanBlock[span].block_span_info.
					quad[element];

1184
				span_set->diff = le32_to_cpu(quad->diff);
1185 1186 1187

				for (count = 0, span_row_width = 0;
					count < raid->spanDepth; count++) {
1188
					if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1189 1190
						spanBlock[count].
						block_span_info.
1191
						noElements) >= element + 1) {
1192 1193 1194 1195 1196 1197 1198 1199 1200
						span_set->strip_offset[count] =
							span_row_width;
						span_row_width +=
							MR_LdSpanPtrGet
							(ld, count, map)->spanRowDataSize;
					}
				}

				span_set->span_row_data_width = span_row_width;
1201 1202 1203
				span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
					le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
					le32_to_cpu(quad->diff));
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219

				if (element == 0) {
					span_set->log_start_lba = 0;
					span_set->log_end_lba =
						((span_row << raid->stripeShift)
						* span_row_width) - 1;

					span_set->span_row_start = 0;
					span_set->span_row_end = span_row - 1;

					span_set->data_strip_start = 0;
					span_set->data_strip_end =
						(span_row * span_row_width) - 1;

					span_set->data_row_start = 0;
					span_set->data_row_end =
1220
						(span_row * le32_to_cpu(quad->diff)) - 1;
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
				} else {
					span_set_prev = &(ldSpanInfo[ld].
							span_set[element - 1]);
					span_set->log_start_lba =
						span_set_prev->log_end_lba + 1;
					span_set->log_end_lba =
						span_set->log_start_lba +
						((span_row << raid->stripeShift)
						* span_row_width) - 1;

					span_set->span_row_start =
						span_set_prev->span_row_end + 1;
					span_set->span_row_end =
					span_set->span_row_start + span_row - 1;

					span_set->data_strip_start =
					span_set_prev->data_strip_end + 1;
					span_set->data_strip_end =
						span_set->data_strip_start +
						(span_row * span_row_width) - 1;

					span_set->data_row_start =
						span_set_prev->data_row_end + 1;
					span_set->data_row_end =
						span_set->data_row_start +
1246
						(span_row * le32_to_cpu(quad->diff)) - 1;
1247 1248 1249 1250 1251 1252 1253 1254 1255
				}
				break;
		}
		if (span == raid->spanDepth)
			break;
	    }
	}
}

1256 1257
void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
	struct LD_LOAD_BALANCE_INFO *lbInfo)
1258 1259 1260 1261 1262
{
	int ldCount;
	u16 ld;
	struct MR_LD_RAID *raid;

1263 1264 1265
	if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
		lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;

1266
	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1267
		ld = MR_TargetIdToLdGet(ldCount, drv_map);
1268
		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
1269 1270 1271 1272
			lbInfo[ldCount].loadBalanceFlag = 0;
			continue;
		}

1273 1274 1275
		raid = MR_LdRaidGet(ld, drv_map);
		if ((raid->level != 1) ||
			(raid->ldState != MR_LD_STATE_OPTIMAL)) {
1276
			lbInfo[ldCount].loadBalanceFlag = 0;
1277 1278 1279
			continue;
		}
		lbInfo[ldCount].loadBalanceFlag = 1;
1280 1281 1282
	}
}

1283
u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1284 1285 1286
			   struct LD_LOAD_BALANCE_INFO *lbInfo,
			   struct IO_REQUEST_INFO *io_info,
			   struct MR_DRV_RAID_MAP_ALL *drv_map)
1287
{
1288
	struct MR_LD_RAID  *raid;
1289
	u16	pd1_dev_handle;
1290
	u16     pend0, pend1, ld;
1291
	u64     diff0, diff1;
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
	u8      bestArm, pd0, pd1, span, arm;
	u32     arRef, span_row_size;

	u64 block = io_info->ldStartBlock;
	u32 count = io_info->numBlocks;

	span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
			>> RAID_CTX_SPANARM_SPAN_SHIFT);
	arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);

	ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
	raid = MR_LdRaidGet(ld, drv_map);
	span_row_size = instance->UnevenSpanSupport ?
			SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;

	arRef = MR_LdSpanArrayGet(ld, span, drv_map);
	pd0 = MR_ArPdGet(arRef, arm, drv_map);
	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
		(arm + 1 - span_row_size) : arm + 1, drv_map);
1311

1312 1313 1314 1315
	/* Get PD1 Dev Handle */

	pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);

1316
	if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
1317 1318 1319 1320 1321
		bestArm = arm;
	} else {
		/* get the pending cmds for the data and mirror arms */
		pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
		pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1322

1323 1324 1325 1326
		/* Determine the disk whose head is nearer to the req. block */
		diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
		bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1327

1328 1329 1330 1331
		/* Make balance count from 16 to 4 to
		 *  keep driver in sync with Firmware
		 */
		if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
1332
		    (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1333 1334 1335 1336 1337 1338 1339
			bestArm ^= 1;

		/* Update the last accessed block on the correct pd */
		io_info->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
		io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
	}
1340

1341 1342
	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
	return io_info->pd_after_lb;
1343 1344
}

1345
__le16 get_updated_dev_handle(struct megasas_instance *instance,
1346 1347 1348
			      struct LD_LOAD_BALANCE_INFO *lbInfo,
			      struct IO_REQUEST_INFO *io_info,
			      struct MR_DRV_RAID_MAP_ALL *drv_map)
1349
{
1350
	u8 arm_pd;
1351
	__le16 devHandle;
1352

1353
	/* get best new arm (PD ID) */
1354
	arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
1355
	devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1356
	io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
1357
	atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1358

1359 1360
	return devHandle;
}