host.c 81.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * BSD LICENSE
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in
 *     the documentation and/or other materials provided with the
 *     distribution.
 *   * Neither the name of Intel Corporation nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
55
#include <linux/circ_buf.h>
56 57 58
#include <linux/device.h>
#include <scsi/sas.h>
#include "host.h"
59 60 61
#include "isci.h"
#include "port.h"
#include "host.h"
62
#include "probe_roms.h"
63 64 65 66
#include "remote_device.h"
#include "request.h"
#include "scu_completion_codes.h"
#include "scu_event_codes.h"
67
#include "registers.h"
68 69
#include "scu_remote_node_context.h"
#include "scu_task_context.h"
70

71 72
#define SCU_CONTEXT_RAM_INIT_STALL_TIME      200

73
#define smu_max_ports(dcc_value) \
74 75 76 77 78
	(\
		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
	)

79
#define smu_max_task_contexts(dcc_value)	\
80 81 82 83 84
	(\
		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
	)

85
#define smu_max_rncs(dcc_value) \
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	(\
		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
	)

#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100

/**
 *
 *
 * The number of milliseconds to wait while a given phy is consuming power
 * before allowing another set of phys to consume power. Ultimately, this will
 * be specified by OEM parameter.
 */
#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500

/**
 * NORMALIZE_PUT_POINTER() -
 *
 * This macro will normalize the completion queue put pointer so its value can
 * be used as an array inde
 */
#define NORMALIZE_PUT_POINTER(x) \
	((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)


/**
 * NORMALIZE_EVENT_POINTER() -
 *
 * This macro will normalize the completion queue event entry so its value can
 * be used as an index.
 */
#define NORMALIZE_EVENT_POINTER(x) \
	(\
		((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
		>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT	\
	)

/**
 * NORMALIZE_GET_POINTER() -
 *
 * This macro will normalize the completion queue get pointer so its value can
 * be used as an index into an array
 */
#define NORMALIZE_GET_POINTER(x) \
	((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)

/**
 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
 *
 * This macro will normalize the completion queue cycle pointer so it matches
 * the completion queue cycle bit
 */
#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
	((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))

/**
 * COMPLETION_QUEUE_CYCLE_BIT() -
 *
 * This macro will return the cycle bit of the completion queue entry
 */
#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/* Init the state machine and call the state entry function (if any) */
void sci_init_sm(struct sci_base_state_machine *sm,
		 const struct sci_base_state *state_table, u32 initial_state)
{
	sci_state_transition_t handler;

	sm->initial_state_id    = initial_state;
	sm->previous_state_id   = initial_state;
	sm->current_state_id    = initial_state;
	sm->state_table         = state_table;

	handler = sm->state_table[initial_state].enter_state;
	if (handler)
		handler(sm);
}

/* Call the state exit fn, update the current state, call the state entry fn */
void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
{
	sci_state_transition_t handler;

	handler = sm->state_table[sm->current_state_id].exit_state;
	if (handler)
		handler(sm);

	sm->previous_state_id = sm->current_state_id;
	sm->current_state_id = next_state;

	handler = sm->state_table[sm->current_state_id].enter_state;
	if (handler)
		handler(sm);
}

182
static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
183
{
184
	u32 get_value = ihost->completion_queue_get;
185 186 187
	u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;

	if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
188
	    COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
189 190 191 192 193
		return true;

	return false;
}

194
static bool sci_controller_isr(struct isci_host *ihost)
195
{
196
	if (sci_controller_completion_queue_has_entries(ihost)) {
197 198 199 200 201
		return true;
	} else {
		/*
		 * we have a spurious interrupt it could be that we have already
		 * emptied the completion queue from a previous interrupt */
202
		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203 204 205 206 207 208

		/*
		 * There is a race in the hardware that could cause us not to be notified
		 * of an interrupt completion if we do not take this step.  We will mask
		 * then unmask the interrupts so if there is another interrupt pending
		 * the clearing of the interrupt source we get the next interrupt message. */
209 210
		writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
		writel(0, &ihost->smu_registers->interrupt_mask);
211 212 213 214 215
	}

	return false;
}

216
irqreturn_t isci_msix_isr(int vec, void *data)
217
{
218 219
	struct isci_host *ihost = data;

220
	if (sci_controller_isr(ihost))
221
		tasklet_schedule(&ihost->completion_tasklet);
222

223
	return IRQ_HANDLED;
224 225
}

226
static bool sci_controller_error_isr(struct isci_host *ihost)
227 228 229 230
{
	u32 interrupt_status;

	interrupt_status =
231
		readl(&ihost->smu_registers->interrupt_status);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);

	if (interrupt_status != 0) {
		/*
		 * There is an error interrupt pending so let it through and handle
		 * in the callback */
		return true;
	}

	/*
	 * There is a race in the hardware that could cause us not to be notified
	 * of an interrupt completion if we do not take this step.  We will mask
	 * then unmask the error interrupts so if there was another interrupt
	 * pending we will be notified.
	 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
247 248
	writel(0xff, &ihost->smu_registers->interrupt_mask);
	writel(0, &ihost->smu_registers->interrupt_mask);
249 250 251 252

	return false;
}

253
static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
254
{
255
	u32 index = SCU_GET_COMPLETION_INDEX(ent);
D
Dan Williams 已提交
256
	struct isci_request *ireq = ihost->reqs[index];
257 258

	/* Make sure that we really want to process this IO request */
D
Dan Williams 已提交
259
	if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
260
	    ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
261
	    ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
262 263 264 265
		/* Yep this is a valid io request pass it along to the
		 * io request handler
		 */
		sci_io_request_tc_completion(ireq, ent);
266 267
}

268
static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
269 270
{
	u32 index;
271
	struct isci_request *ireq;
272
	struct isci_remote_device *idev;
273

274
	index = SCU_GET_COMPLETION_INDEX(ent);
275

276
	switch (scu_get_command_request_type(ent)) {
277 278
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
279 280
		ireq = ihost->reqs[index];
		dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
281
			 __func__, ent, ireq);
282 283 284 285 286 287 288
		/* @todo For a post TC operation we need to fail the IO
		 * request
		 */
		break;
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
289 290
		idev = ihost->device_table[index];
		dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
291
			 __func__, ent, idev);
292 293 294 295 296
		/* @todo For a port RNC operation we need to fail the
		 * device
		 */
		break;
	default:
297
		dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
298
			 __func__, ent);
299 300 301 302
		break;
	}
}

303
static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
304 305 306 307 308
{
	u32 index;
	u32 frame_index;

	struct scu_unsolicited_frame_header *frame_header;
309
	struct isci_phy *iphy;
310
	struct isci_remote_device *idev;
311 312 313

	enum sci_status result = SCI_FAILURE;

314
	frame_index = SCU_GET_FRAME_INDEX(ent);
315

316 317
	frame_header = ihost->uf_control.buffers.array[frame_index].header;
	ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
318

319
	if (SCU_GET_FRAME_ERROR(ent)) {
320 321 322 323
		/*
		 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
		 * /       this cause a problem? We expect the phy initialization will
		 * /       fail if there is an error in the frame. */
324
		sci_controller_release_frame(ihost, frame_index);
325 326 327 328
		return;
	}

	if (frame_header->is_address_frame) {
329
		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
330
		iphy = &ihost->phys[index];
331
		result = sci_phy_frame_handler(iphy, frame_index);
332 333
	} else {

334
		index = SCU_GET_COMPLETION_INDEX(ent);
335 336 337 338 339 340

		if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
			/*
			 * This is a signature fis or a frame from a direct attached SATA
			 * device that has not yet been created.  In either case forwared
			 * the frame to the PE and let it take care of the frame data. */
341
			index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
342
			iphy = &ihost->phys[index];
343
			result = sci_phy_frame_handler(iphy, frame_index);
344
		} else {
345 346
			if (index < ihost->remote_node_entries)
				idev = ihost->device_table[index];
347
			else
348
				idev = NULL;
349

350
			if (idev != NULL)
351
				result = sci_remote_device_frame_handler(idev, frame_index);
352
			else
353
				sci_controller_release_frame(ihost, frame_index);
354 355 356 357 358 359 360 361 362 363
		}
	}

	if (result != SCI_SUCCESS) {
		/*
		 * / @todo Is there any reason to report some additional error message
		 * /       when we get this failure notifiction? */
	}
}

364
static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
365
{
366
	struct isci_remote_device *idev;
367
	struct isci_request *ireq;
368
	struct isci_phy *iphy;
369 370
	u32 index;

371
	index = SCU_GET_COMPLETION_INDEX(ent);
372

373
	switch (scu_get_event_type(ent)) {
374 375
	case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
		/* / @todo The driver did something wrong and we need to fix the condtion. */
376
		dev_err(&ihost->pdev->dev,
377 378 379
			"%s: SCIC Controller 0x%p received SMU command error "
			"0x%x\n",
			__func__,
380
			ihost,
381
			ent);
382 383 384 385 386 387 388 389
		break;

	case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
	case SCU_EVENT_TYPE_SMU_ERROR:
	case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
		/*
		 * / @todo This is a hardware failure and its likely that we want to
		 * /       reset the controller. */
390
		dev_err(&ihost->pdev->dev,
391 392 393
			"%s: SCIC Controller 0x%p received fatal controller "
			"event  0x%x\n",
			__func__,
394
			ihost,
395
			ent);
396 397 398
		break;

	case SCU_EVENT_TYPE_TRANSPORT_ERROR:
399
		ireq = ihost->reqs[index];
400
		sci_io_request_event_handler(ireq, ent);
401 402 403
		break;

	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
404
		switch (scu_get_event_specifier(ent)) {
405 406
		case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
		case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
407 408
			ireq = ihost->reqs[index];
			if (ireq != NULL)
409
				sci_io_request_event_handler(ireq, ent);
410
			else
411
				dev_warn(&ihost->pdev->dev,
412 413 414 415
					 "%s: SCIC Controller 0x%p received "
					 "event 0x%x for io request object "
					 "that doesnt exist.\n",
					 __func__,
416
					 ihost,
417
					 ent);
418 419 420 421

			break;

		case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
422
			idev = ihost->device_table[index];
423
			if (idev != NULL)
424
				sci_remote_device_event_handler(idev, ent);
425
			else
426
				dev_warn(&ihost->pdev->dev,
427 428 429 430
					 "%s: SCIC Controller 0x%p received "
					 "event 0x%x for remote device object "
					 "that doesnt exist.\n",
					 __func__,
431
					 ihost,
432
					 ent);
433 434 435 436 437 438 439 440 441 442 443 444 445 446

			break;
		}
		break;

	case SCU_EVENT_TYPE_BROADCAST_CHANGE:
	/*
	 * direct the broadcast change event to the phy first and then let
	 * the phy redirect the broadcast change to the port object */
	case SCU_EVENT_TYPE_ERR_CNT_EVENT:
	/*
	 * direct error counter event to the phy object since that is where
	 * we get the event notification.  This is a type 4 event. */
	case SCU_EVENT_TYPE_OSSP_EVENT:
447
		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
448
		iphy = &ihost->phys[index];
449
		sci_phy_event_handler(iphy, ent);
450 451 452 453 454
		break;

	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
	case SCU_EVENT_TYPE_RNC_OPS_MISC:
455 456
		if (index < ihost->remote_node_entries) {
			idev = ihost->device_table[index];
457

458
			if (idev != NULL)
459
				sci_remote_device_event_handler(idev, ent);
460
		} else
461
			dev_err(&ihost->pdev->dev,
462 463 464 465
				"%s: SCIC Controller 0x%p received event 0x%x "
				"for remote device object 0x%0x that doesnt "
				"exist.\n",
				__func__,
466
				ihost,
467
				ent,
468 469 470 471 472
				index);

		break;

	default:
473
		dev_warn(&ihost->pdev->dev,
474 475
			 "%s: SCIC Controller received unknown event code %x\n",
			 __func__,
476
			 ent);
477 478 479 480
		break;
	}
}

481
static void sci_controller_process_completions(struct isci_host *ihost)
482 483
{
	u32 completion_count = 0;
484
	u32 ent;
485 486
	u32 get_index;
	u32 get_cycle;
487
	u32 event_get;
488 489
	u32 event_cycle;

490
	dev_dbg(&ihost->pdev->dev,
491 492
		"%s: completion queue begining get:0x%08x\n",
		__func__,
493
		ihost->completion_queue_get);
494 495

	/* Get the component parts of the completion queue */
496 497
	get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
	get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
498

499 500
	event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
	event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
501 502 503

	while (
		NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
504
		== COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
505 506 507
		) {
		completion_count++;

508
		ent = ihost->completion_queue[get_index];
509 510 511 512 513

		/* increment the get pointer and check for rollover to toggle the cycle bit */
		get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
			     (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
		get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
514

515
		dev_dbg(&ihost->pdev->dev,
516 517
			"%s: completion queue entry:0x%08x\n",
			__func__,
518
			ent);
519

520
		switch (SCU_GET_COMPLETION_TYPE(ent)) {
521
		case SCU_COMPLETION_TYPE_TASK:
522
			sci_controller_task_completion(ihost, ent);
523 524 525
			break;

		case SCU_COMPLETION_TYPE_SDMA:
526
			sci_controller_sdma_completion(ihost, ent);
527 528 529
			break;

		case SCU_COMPLETION_TYPE_UFI:
530
			sci_controller_unsolicited_frame(ihost, ent);
531 532 533
			break;

		case SCU_COMPLETION_TYPE_EVENT:
534 535 536 537
		case SCU_COMPLETION_TYPE_NOTIFY: {
			event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
				       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
			event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
538

539
			sci_controller_event_completion(ihost, ent);
540
			break;
541
		}
542
		default:
543
			dev_warn(&ihost->pdev->dev,
544 545 546
				 "%s: SCIC Controller received unknown "
				 "completion type %x\n",
				 __func__,
547
				 ent);
548 549 550 551 552 553
			break;
		}
	}

	/* Update the get register if we completed one or more entries */
	if (completion_count > 0) {
554
		ihost->completion_queue_get =
555 556 557
			SMU_CQGR_GEN_BIT(ENABLE) |
			SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
			event_cycle |
558
			SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
559 560 561
			get_cycle |
			SMU_CQGR_GEN_VAL(POINTER, get_index);

562 563
		writel(ihost->completion_queue_get,
		       &ihost->smu_registers->completion_queue_get);
564 565 566

	}

567
	dev_dbg(&ihost->pdev->dev,
568 569
		"%s: completion queue ending get:0x%08x\n",
		__func__,
570
		ihost->completion_queue_get);
571 572 573

}

574
static void sci_controller_error_handler(struct isci_host *ihost)
575 576 577 578
{
	u32 interrupt_status;

	interrupt_status =
579
		readl(&ihost->smu_registers->interrupt_status);
580 581

	if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
582
	    sci_controller_completion_queue_has_entries(ihost)) {
583

584
		sci_controller_process_completions(ihost);
585
		writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
586
	} else {
587
		dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
588 589
			interrupt_status);

590
		sci_change_state(&ihost->sm, SCIC_FAILED);
591 592 593 594 595 596 597

		return;
	}

	/* If we dont process any completions I am not sure that we want to do this.
	 * We are in the middle of a hardware fault and should probably be reset.
	 */
598
	writel(0, &ihost->smu_registers->interrupt_mask);
599 600
}

601
irqreturn_t isci_intx_isr(int vec, void *data)
602 603
{
	irqreturn_t ret = IRQ_NONE;
604
	struct isci_host *ihost = data;
605

606
	if (sci_controller_isr(ihost)) {
607
		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
608 609
		tasklet_schedule(&ihost->completion_tasklet);
		ret = IRQ_HANDLED;
610
	} else if (sci_controller_error_isr(ihost)) {
611
		spin_lock(&ihost->scic_lock);
612
		sci_controller_error_handler(ihost);
613 614
		spin_unlock(&ihost->scic_lock);
		ret = IRQ_HANDLED;
615
	}
D
Dan Williams 已提交
616

617 618 619
	return ret;
}

D
Dan Williams 已提交
620 621 622 623
irqreturn_t isci_error_isr(int vec, void *data)
{
	struct isci_host *ihost = data;

624 625
	if (sci_controller_error_isr(ihost))
		sci_controller_error_handler(ihost);
D
Dan Williams 已提交
626 627 628

	return IRQ_HANDLED;
}
629 630 631 632 633 634 635 636 637

/**
 * isci_host_start_complete() - This function is called by the core library,
 *    through the ISCI Module, to indicate controller start status.
 * @isci_host: This parameter specifies the ISCI host object
 * @completion_status: This parameter specifies the completion status from the
 *    core library.
 *
 */
638
static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
639
{
640 641 642 643 644 645
	if (completion_status != SCI_SUCCESS)
		dev_info(&ihost->pdev->dev,
			"controller start timed out, continuing...\n");
	isci_host_change_state(ihost, isci_ready);
	clear_bit(IHOST_START_PENDING, &ihost->flags);
	wake_up(&ihost->eventq);
646 647
}

648
int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
649
{
650
	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
651

652
	if (test_bit(IHOST_START_PENDING, &ihost->flags))
653 654
		return 0;

655 656 657 658
	/* todo: use sas_flush_discovery once it is upstream */
	scsi_flush_work(shost);

	scsi_flush_work(shost);
659

660 661 662
	dev_dbg(&ihost->pdev->dev,
		"%s: ihost->status = %d, time = %ld\n",
		 __func__, isci_host_get_state(ihost), time);
663 664 665 666 667

	return 1;

}

668
/**
669 670
 * sci_controller_get_suggested_start_timeout() - This method returns the
 *    suggested sci_controller_start() timeout amount.  The user is free to
671 672 673 674 675 676 677 678 679
 *    use any timeout value, but this method provides the suggested minimum
 *    start timeout value.  The returned value is based upon empirical
 *    information determined as a result of interoperability testing.
 * @controller: the handle to the controller object for which to return the
 *    suggested start timeout.
 *
 * This method returns the number of milliseconds for the suggested start
 * operation timeout.
 */
680
static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
681 682
{
	/* Validate the user supplied parameters. */
683
	if (!ihost)
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
		return 0;

	/*
	 * The suggested minimum timeout value for a controller start operation:
	 *
	 *     Signature FIS Timeout
	 *   + Phy Start Timeout
	 *   + Number of Phy Spin Up Intervals
	 *   ---------------------------------
	 *   Number of milliseconds for the controller start operation.
	 *
	 * NOTE: The number of phy spin up intervals will be equivalent
	 *       to the number of phys divided by the number phys allowed
	 *       per interval - 1 (once OEM parameters are supported).
	 *       Currently we assume only 1 phy per interval. */

	return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
		+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
		+ ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
}

705
static void sci_controller_enable_interrupts(struct isci_host *ihost)
706
{
707 708
	BUG_ON(ihost->smu_registers == NULL);
	writel(0, &ihost->smu_registers->interrupt_mask);
709 710
}

711
void sci_controller_disable_interrupts(struct isci_host *ihost)
712
{
713 714
	BUG_ON(ihost->smu_registers == NULL);
	writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
715 716
}

717
static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718 719 720 721
{
	u32 port_task_scheduler_value;

	port_task_scheduler_value =
722
		readl(&ihost->scu_registers->peg0.ptsg.control);
723 724 725 726
	port_task_scheduler_value |=
		(SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
		 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
	writel(port_task_scheduler_value,
727
	       &ihost->scu_registers->peg0.ptsg.control);
728 729
}

730
static void sci_controller_assign_task_entries(struct isci_host *ihost)
731 732 733 734 735 736 737 738 739
{
	u32 task_assignment;

	/*
	 * Assign all the TCs to function 0
	 * TODO: Do we actually need to read this register to write it back?
	 */

	task_assignment =
740
		readl(&ihost->smu_registers->task_context_assignment[0]);
741 742

	task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743
		(SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
744 745 746
		(SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));

	writel(task_assignment,
747
		&ihost->smu_registers->task_context_assignment[0]);
748 749 750

}

751
static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752 753 754 755 756 757
{
	u32 index;
	u32 completion_queue_control_value;
	u32 completion_queue_get_value;
	u32 completion_queue_put_value;

758
	ihost->completion_queue_get = 0;
759

760 761 762
	completion_queue_control_value =
		(SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
		 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763 764

	writel(completion_queue_control_value,
765
	       &ihost->smu_registers->completion_queue_control);
766 767 768 769 770 771 772 773 774 775 776


	/* Set the completion queue get pointer and enable the queue */
	completion_queue_get_value = (
		(SMU_CQGR_GEN_VAL(POINTER, 0))
		| (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
		| (SMU_CQGR_GEN_BIT(ENABLE))
		| (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
		);

	writel(completion_queue_get_value,
777
	       &ihost->smu_registers->completion_queue_get);
778 779 780 781 782 783 784 785

	/* Set the completion queue put pointer */
	completion_queue_put_value = (
		(SMU_CQPR_GEN_VAL(POINTER, 0))
		| (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
		);

	writel(completion_queue_put_value,
786
	       &ihost->smu_registers->completion_queue_put);
787 788

	/* Initialize the cycle bit of the completion queue entries */
789
	for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 791 792 793
		/*
		 * If get.cycle_bit != completion_queue.cycle_bit
		 * its not a valid completion queue entry
		 * so at system start all entries are invalid */
794
		ihost->completion_queue[index] = 0x80000000;
795 796 797
	}
}

798
static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799 800 801 802 803 804 805
{
	u32 frame_queue_control_value;
	u32 frame_queue_get_value;
	u32 frame_queue_put_value;

	/* Write the queue size */
	frame_queue_control_value =
806
		SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807 808

	writel(frame_queue_control_value,
809
	       &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810 811 812 813 814 815 816 817

	/* Setup the get pointer for the unsolicited frame queue */
	frame_queue_get_value = (
		SCU_UFQGP_GEN_VAL(POINTER, 0)
		|  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
		);

	writel(frame_queue_get_value,
818
	       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 820 821
	/* Setup the put pointer for the unsolicited frame queue */
	frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
	writel(frame_queue_put_value,
822
	       &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823 824
}

825
static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826
{
827
	if (ihost->sm.current_state_id == SCIC_STARTING) {
828 829 830 831
		/*
		 * We move into the ready state, because some of the phys/ports
		 * may be up and operational.
		 */
832
		sci_change_state(&ihost->sm, SCIC_READY);
833 834 835 836 837

		isci_host_start_complete(ihost, status);
	}
}

838
static bool is_phy_starting(struct isci_phy *iphy)
A
Adam Gruchala 已提交
839
{
840
	enum sci_phy_states state;
A
Adam Gruchala 已提交
841

842
	state = iphy->sm.current_state_id;
A
Adam Gruchala 已提交
843
	switch (state) {
E
Edmund Nadolski 已提交
844 845 846 847 848 849 850 851 852 853
	case SCI_PHY_STARTING:
	case SCI_PHY_SUB_INITIAL:
	case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
	case SCI_PHY_SUB_AWAIT_IAF_UF:
	case SCI_PHY_SUB_AWAIT_SAS_POWER:
	case SCI_PHY_SUB_AWAIT_SATA_POWER:
	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
	case SCI_PHY_SUB_FINAL:
A
Adam Gruchala 已提交
854 855 856 857 858 859
		return true;
	default:
		return false;
	}
}

860
/**
861
 * sci_controller_start_next_phy - start phy
862 863 864 865
 * @scic: controller
 *
 * If all the phys have been started, then attempt to transition the
 * controller to the READY state and inform the user
866
 * (sci_cb_controller_start_complete()).
867
 */
868
static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
869
{
870
	struct sci_oem_params *oem = &ihost->oem_parameters;
871
	struct isci_phy *iphy;
872 873 874 875
	enum sci_status status;

	status = SCI_SUCCESS;

876
	if (ihost->phy_startup_timer_pending)
877 878
		return status;

879
	if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
880 881 882 883 884
		bool is_controller_start_complete = true;
		u32 state;
		u8 index;

		for (index = 0; index < SCI_MAX_PHYS; index++) {
885 886
			iphy = &ihost->phys[index];
			state = iphy->sm.current_state_id;
887

888
			if (!phy_get_non_dummy_port(iphy))
889 890 891 892 893 894 895 896
				continue;

			/* The controller start operation is complete iff:
			 * - all links have been given an opportunity to start
			 * - have no indication of a connected device
			 * - have an indication of a connected device and it has
			 *   finished the link training process.
			 */
897 898 899
			if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
			    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
			    (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
900 901 902 903 904 905 906 907 908
				is_controller_start_complete = false;
				break;
			}
		}

		/*
		 * The controller has successfully finished the start process.
		 * Inform the SCI Core user and transition to the READY state. */
		if (is_controller_start_complete == true) {
909
			sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
910 911
			sci_del_timer(&ihost->phy_timer);
			ihost->phy_startup_timer_pending = false;
912 913
		}
	} else {
914
		iphy = &ihost->phys[ihost->next_phy_to_start];
915 916

		if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
917
			if (phy_get_non_dummy_port(iphy) == NULL) {
918
				ihost->next_phy_to_start++;
919 920 921 922 923 924 925 926 927 928

				/* Caution recursion ahead be forwarned
				 *
				 * The PHY was never added to a PORT in MPC mode
				 * so start the next phy in sequence This phy
				 * will never go link up and will not draw power
				 * the OEM parameters either configured the phy
				 * incorrectly for the PORT or it was never
				 * assigned to a PORT
				 */
929
				return sci_controller_start_next_phy(ihost);
930 931 932
			}
		}

933
		status = sci_phy_start(iphy);
934 935

		if (status == SCI_SUCCESS) {
936
			sci_mod_timer(&ihost->phy_timer,
937
				      SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
938
			ihost->phy_startup_timer_pending = true;
939
		} else {
940
			dev_warn(&ihost->pdev->dev,
941 942 943 944
				 "%s: Controller stop operation failed "
				 "to stop phy %d because of status "
				 "%d.\n",
				 __func__,
945
				 ihost->phys[ihost->next_phy_to_start].phy_index,
946 947 948
				 status);
		}

949
		ihost->next_phy_to_start++;
950 951 952 953 954
	}

	return status;
}

955
static void phy_startup_timeout(unsigned long data)
956
{
957
	struct sci_timer *tmr = (struct sci_timer *)data;
958
	struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
959
	unsigned long flags;
960 961
	enum sci_status status;

962 963 964 965 966
	spin_lock_irqsave(&ihost->scic_lock, flags);

	if (tmr->cancel)
		goto done;

967
	ihost->phy_startup_timer_pending = false;
968 969

	do {
970
		status = sci_controller_start_next_phy(ihost);
971 972 973 974
	} while (status != SCI_SUCCESS);

done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
975 976
}

977 978 979 980 981
static u16 isci_tci_active(struct isci_host *ihost)
{
	return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}

982
static enum sci_status sci_controller_start(struct isci_host *ihost,
983 984 985 986 987
					     u32 timeout)
{
	enum sci_status result;
	u16 index;

988 989
	if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
		dev_warn(&ihost->pdev->dev,
990 991 992 993 994 995
			 "SCIC Controller start operation requested in "
			 "invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}

	/* Build the TCi free pool */
996 997 998
	BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
	ihost->tci_head = 0;
	ihost->tci_tail = 0;
999
	for (index = 0; index < ihost->task_context_entries; index++)
1000
		isci_tci_free(ihost, index);
1001 1002

	/* Build the RNi free pool */
1003 1004
	sci_remote_node_table_initialize(&ihost->available_remote_nodes,
					 ihost->remote_node_entries);
1005 1006 1007 1008 1009

	/*
	 * Before anything else lets make sure we will not be
	 * interrupted by the hardware.
	 */
1010
	sci_controller_disable_interrupts(ihost);
1011 1012

	/* Enable the port task scheduler */
1013
	sci_controller_enable_port_task_scheduler(ihost);
1014

1015
	/* Assign all the task entries to ihost physical function */
1016
	sci_controller_assign_task_entries(ihost);
1017 1018

	/* Now initialize the completion queue */
1019
	sci_controller_initialize_completion_queue(ihost);
1020 1021

	/* Initialize the unsolicited frame queue for use */
1022
	sci_controller_initialize_unsolicited_frame_queue(ihost);
1023 1024

	/* Start all of the ports on this controller */
1025
	for (index = 0; index < ihost->logical_port_entries; index++) {
1026
		struct isci_port *iport = &ihost->ports[index];
1027

1028
		result = sci_port_start(iport);
1029 1030 1031 1032
		if (result)
			return result;
	}

1033
	sci_controller_start_next_phy(ihost);
1034

1035
	sci_mod_timer(&ihost->timer, timeout);
1036

1037
	sci_change_state(&ihost->sm, SCIC_STARTING);
1038 1039 1040 1041

	return SCI_SUCCESS;
}

1042 1043
void isci_host_scan_start(struct Scsi_Host *shost)
{
1044
	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1045
	unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1046

1047
	set_bit(IHOST_START_PENDING, &ihost->flags);
1048 1049

	spin_lock_irq(&ihost->scic_lock);
1050 1051
	sci_controller_start(ihost, tmo);
	sci_controller_enable_interrupts(ihost);
1052
	spin_unlock_irq(&ihost->scic_lock);
1053 1054
}

1055
static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1056
{
1057
	isci_host_change_state(ihost, isci_stopped);
1058
	sci_controller_disable_interrupts(ihost);
1059 1060
	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
	wake_up(&ihost->eventq);
1061 1062
}

1063
static void sci_controller_completion_handler(struct isci_host *ihost)
1064 1065
{
	/* Empty out the completion queue */
1066 1067
	if (sci_controller_completion_queue_has_entries(ihost))
		sci_controller_process_completions(ihost);
1068 1069

	/* Clear the interrupt and enable all interrupts again */
1070
	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1071
	/* Could we write the value of SMU_ISR_COMPLETION? */
1072 1073
	writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
	writel(0, &ihost->smu_registers->interrupt_mask);
1074 1075
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
/**
 * isci_host_completion_routine() - This function is the delayed service
 *    routine that calls the sci core library's completion handler. It's
 *    scheduled as a tasklet from the interrupt service routine when interrupts
 *    in use, or set as the timeout function in polled mode.
 * @data: This parameter specifies the ISCI host object
 *
 */
static void isci_host_completion_routine(unsigned long data)
{
1086
	struct isci_host *ihost = (struct isci_host *)data;
1087 1088 1089 1090
	struct list_head    completed_request_list;
	struct list_head    errored_request_list;
	struct list_head    *current_position;
	struct list_head    *next_position;
1091 1092
	struct isci_request *request;
	struct isci_request *next_request;
1093
	struct sas_task     *task;
1094
	u16 active;
1095 1096

	INIT_LIST_HEAD(&completed_request_list);
1097
	INIT_LIST_HEAD(&errored_request_list);
1098

1099
	spin_lock_irq(&ihost->scic_lock);
1100

1101
	sci_controller_completion_handler(ihost);
1102

1103
	/* Take the lists of completed I/Os from the host. */
1104

1105
	list_splice_init(&ihost->requests_to_complete,
1106 1107
			 &completed_request_list);

1108
	/* Take the list of errored I/Os from the host. */
1109
	list_splice_init(&ihost->requests_to_errorback,
1110
			 &errored_request_list);
1111

1112
	spin_unlock_irq(&ihost->scic_lock);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

	/* Process any completions in the lists. */
	list_for_each_safe(current_position, next_position,
			   &completed_request_list) {

		request = list_entry(current_position, struct isci_request,
				     completed_node);
		task = isci_request_access_task(request);

		/* Normal notification (task_done) */
1123
		dev_dbg(&ihost->pdev->dev,
1124 1125 1126 1127 1128
			"%s: Normal - request/task = %p/%p\n",
			__func__,
			request,
			task);

1129 1130 1131 1132 1133
		/* Return the task to libsas */
		if (task != NULL) {

			task->lldd_task = NULL;
			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1134

1135 1136 1137 1138 1139 1140
				/* If the task is already in the abort path,
				* the task_done callback cannot be called.
				*/
				task->task_done(task);
			}
		}
1141

1142 1143 1144
		spin_lock_irq(&ihost->scic_lock);
		isci_free_tag(ihost, request->io_tag);
		spin_unlock_irq(&ihost->scic_lock);
1145
	}
1146
	list_for_each_entry_safe(request, next_request, &errored_request_list,
1147 1148 1149 1150 1151
				 completed_node) {

		task = isci_request_access_task(request);

		/* Use sas_task_abort */
1152
		dev_warn(&ihost->pdev->dev,
1153 1154 1155 1156 1157
			 "%s: Error - request/task = %p/%p\n",
			 __func__,
			 request,
			 task);

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
		if (task != NULL) {

			/* Put the task into the abort path if it's not there
			 * already.
			 */
			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
				sas_task_abort(task);

		} else {
			/* This is a case where the request has completed with a
			 * status such that it needed further target servicing,
			 * but the sas_task reference has already been removed
			 * from the request.  Since it was errored, it was not
			 * being aborted, so there is nothing to do except free
			 * it.
			 */

1175
			spin_lock_irq(&ihost->scic_lock);
1176 1177 1178 1179
			/* Remove the request from the remote device's list
			* of pending requests.
			*/
			list_del_init(&request->dev_node);
1180 1181
			isci_free_tag(ihost, request->io_tag);
			spin_unlock_irq(&ihost->scic_lock);
1182
		}
1183 1184
	}

1185 1186 1187 1188 1189 1190 1191
	/* the coalesence timeout doubles at each encoding step, so
	 * update it based on the ilog2 value of the outstanding requests
	 */
	active = isci_tci_active(ihost);
	writel(SMU_ICC_GEN_VAL(NUMBER, active) |
	       SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
	       &ihost->smu_registers->interrupt_coalesce_control);
1192 1193
}

1194
/**
1195
 * sci_controller_stop() - This method will stop an individual controller
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
 *    object.This method will invoke the associated user callback upon
 *    completion.  The completion callback is called when the following
 *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
 *    controller has been quiesced. This method will ensure that all IO
 *    requests are quiesced, phys are stopped, and all additional operation by
 *    the hardware is halted.
 * @controller: the handle to the controller object to stop.
 * @timeout: This parameter specifies the number of milliseconds in which the
 *    stop operation should complete.
 *
 * The controller must be in the STARTED or STOPPED state. Indicate if the
 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
 * controller is not either in the STARTED or STOPPED states.
 */
1212
static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1213
{
1214 1215
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev,
1216 1217 1218 1219
			 "SCIC Controller stop operation requested in "
			 "invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}
1220

1221 1222
	sci_mod_timer(&ihost->timer, timeout);
	sci_change_state(&ihost->sm, SCIC_STOPPING);
1223 1224 1225 1226
	return SCI_SUCCESS;
}

/**
1227
 * sci_controller_reset() - This method will reset the supplied core
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
 *    controller regardless of the state of said controller.  This operation is
 *    considered destructive.  In other words, all current operations are wiped
 *    out.  No IO completions for outstanding devices occur.  Outstanding IO
 *    requests are not aborted or completed at the actual remote device.
 * @controller: the handle to the controller object to reset.
 *
 * Indicate if the controller reset method succeeded or failed in some way.
 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
 * the controller reset operation is unable to complete.
 */
1238
static enum sci_status sci_controller_reset(struct isci_host *ihost)
1239
{
1240
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
1241 1242 1243 1244
	case SCIC_RESET:
	case SCIC_READY:
	case SCIC_STOPPED:
	case SCIC_FAILED:
1245 1246 1247 1248
		/*
		 * The reset operation is not a graceful cleanup, just
		 * perform the state transition.
		 */
1249
		sci_change_state(&ihost->sm, SCIC_RESETTING);
1250 1251
		return SCI_SUCCESS;
	default:
1252
		dev_warn(&ihost->pdev->dev,
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
			 "SCIC Controller reset operation requested in "
			 "invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}
}

void isci_host_deinit(struct isci_host *ihost)
{
	int i;

	isci_host_change_state(ihost, isci_stopping);
1264
	for (i = 0; i < SCI_MAX_PORTS; i++) {
D
Dan Williams 已提交
1265
		struct isci_port *iport = &ihost->ports[i];
1266 1267
		struct isci_remote_device *idev, *d;

D
Dan Williams 已提交
1268
		list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1269 1270
			if (test_bit(IDEV_ALLOCATED, &idev->flags))
				isci_remote_device_stop(ihost, idev);
1271 1272 1273
		}
	}

1274
	set_bit(IHOST_STOP_PENDING, &ihost->flags);
D
Dan Williams 已提交
1275 1276

	spin_lock_irq(&ihost->scic_lock);
1277
	sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
D
Dan Williams 已提交
1278 1279
	spin_unlock_irq(&ihost->scic_lock);

1280
	wait_for_stop(ihost);
1281
	sci_controller_reset(ihost);
1282 1283

	/* Cancel any/all outstanding port timers */
1284
	for (i = 0; i < ihost->logical_port_entries; i++) {
1285 1286
		struct isci_port *iport = &ihost->ports[i];
		del_timer_sync(&iport->timer.timer);
1287 1288
	}

1289 1290
	/* Cancel any/all outstanding phy timers */
	for (i = 0; i < SCI_MAX_PHYS; i++) {
1291 1292
		struct isci_phy *iphy = &ihost->phys[i];
		del_timer_sync(&iphy->sata_timer.timer);
1293 1294
	}

1295
	del_timer_sync(&ihost->port_agent.timer.timer);
1296

1297
	del_timer_sync(&ihost->power_control.timer.timer);
1298

1299
	del_timer_sync(&ihost->timer.timer);
1300

1301
	del_timer_sync(&ihost->phy_timer.timer);
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
}

static void __iomem *scu_base(struct isci_host *isci_host)
{
	struct pci_dev *pdev = isci_host->pdev;
	int id = isci_host->id;

	return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
}

static void __iomem *smu_base(struct isci_host *isci_host)
{
	struct pci_dev *pdev = isci_host->pdev;
	int id = isci_host->id;

	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}

1320
static void isci_user_parameters_get(struct sci_user_parameters *u)
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
{
	int i;

	for (i = 0; i < SCI_MAX_PHYS; i++) {
		struct sci_phy_user_params *u_phy = &u->phys[i];

		u_phy->max_speed_generation = phy_gen;

		/* we are not exporting these for now */
		u_phy->align_insertion_frequency = 0x7f;
		u_phy->in_connection_align_insertion_frequency = 0xff;
		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
	}

	u->stp_inactivity_timeout = stp_inactive_to;
	u->ssp_inactivity_timeout = ssp_inactive_to;
	u->stp_max_occupancy_timeout = stp_max_occ_to;
	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
	u->no_outbound_task_timeout = no_outbound_task_to;
	u->max_number_concurrent_device_spin_up = max_concurr_spinup;
}

1343
static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1344
{
1345
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1346

1347
	sci_change_state(&ihost->sm, SCIC_RESET);
1348 1349
}

1350
static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1351
{
1352
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1353

1354
	sci_del_timer(&ihost->timer);
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
}

#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
#define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
#define INTERRUPT_COALESCE_NUMBER_MAX                        256
#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28

/**
1365
 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
 *    configure the interrupt coalescence.
 * @controller: This parameter represents the handle to the controller object
 *    for which its interrupt coalesce register is overridden.
 * @coalesce_number: Used to control the number of entries in the Completion
 *    Queue before an interrupt is generated. If the number of entries exceed
 *    this number, an interrupt will be generated. The valid range of the input
 *    is [0, 256]. A setting of 0 results in coalescing being disabled.
 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
 *    input is [0, 2700000] . A setting of 0 is allowed and results in no
 *    interrupt coalescing timeout.
 *
 * Indicate if the user successfully set the interrupt coalesce parameters.
 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
 */
1381
static enum sci_status
1382 1383 1384
sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
					 u32 coalesce_number,
					 u32 coalesce_timeout)
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
{
	u8 timeout_encode = 0;
	u32 min = 0;
	u32 max = 0;

	/* Check if the input parameters fall in the range. */
	if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
		return SCI_FAILURE_INVALID_PARAMETER_VALUE;

	/*
	 *  Defined encoding for interrupt coalescing timeout:
	 *              Value   Min      Max     Units
	 *              -----   ---      ---     -----
	 *              0       -        -       Disabled
	 *              1       13.3     20.0    ns
	 *              2       26.7     40.0
	 *              3       53.3     80.0
	 *              4       106.7    160.0
	 *              5       213.3    320.0
	 *              6       426.7    640.0
	 *              7       853.3    1280.0
	 *              8       1.7      2.6     us
	 *              9       3.4      5.1
	 *              10      6.8      10.2
	 *              11      13.7     20.5
	 *              12      27.3     41.0
	 *              13      54.6     81.9
	 *              14      109.2    163.8
	 *              15      218.5    327.7
	 *              16      436.9    655.4
	 *              17      873.8    1310.7
	 *              18      1.7      2.6     ms
	 *              19      3.5      5.2
	 *              20      7.0      10.5
	 *              21      14.0     21.0
	 *              22      28.0     41.9
	 *              23      55.9     83.9
	 *              24      111.8    167.8
	 *              25      223.7    335.5
	 *              26      447.4    671.1
	 *              27      894.8    1342.2
	 *              28      1.8      2.7     s
	 *              Others Undefined */

	/*
	 * Use the table above to decide the encode of interrupt coalescing timeout
	 * value for register writing. */
	if (coalesce_timeout == 0)
		timeout_encode = 0;
	else{
		/* make the timeout value in unit of (10 ns). */
		coalesce_timeout = coalesce_timeout * 100;
		min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
		max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;

		/* get the encode of timeout for register writing. */
		for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
		      timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
		      timeout_encode++) {
			if (min <= coalesce_timeout &&  max > coalesce_timeout)
				break;
			else if (coalesce_timeout >= max && coalesce_timeout < min * 2
				 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
				if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
					break;
				else{
					timeout_encode++;
					break;
				}
			} else {
				max = max * 2;
				min = min * 2;
			}
		}

		if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
			/* the value is out of range. */
			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
	}

	writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
	       SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1467
	       &ihost->smu_registers->interrupt_coalesce_control);
1468 1469


1470 1471
	ihost->interrupt_coalesce_number = (u16)coalesce_number;
	ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1472 1473 1474 1475 1476

	return SCI_SUCCESS;
}


1477
static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1478
{
1479
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1480 1481

	/* set the default interrupt coalescence number and timeout value. */
1482
	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1483 1484
}

1485
static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1486
{
1487
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1488 1489

	/* disable interrupt coalescence. */
1490
	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1491 1492
}

1493
static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1494 1495 1496 1497 1498 1499 1500 1501
{
	u32 index;
	enum sci_status status;
	enum sci_status phy_status;

	status = SCI_SUCCESS;

	for (index = 0; index < SCI_MAX_PHYS; index++) {
1502
		phy_status = sci_phy_stop(&ihost->phys[index]);
1503 1504 1505 1506 1507

		if (phy_status != SCI_SUCCESS &&
		    phy_status != SCI_FAILURE_INVALID_STATE) {
			status = SCI_FAILURE;

1508
			dev_warn(&ihost->pdev->dev,
1509 1510 1511
				 "%s: Controller stop operation failed to stop "
				 "phy %d because of status %d.\n",
				 __func__,
1512
				 ihost->phys[index].phy_index, phy_status);
1513 1514 1515 1516 1517 1518
		}
	}

	return status;
}

1519
static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1520 1521 1522 1523 1524
{
	u32 index;
	enum sci_status port_status;
	enum sci_status status = SCI_SUCCESS;

1525
	for (index = 0; index < ihost->logical_port_entries; index++) {
1526
		struct isci_port *iport = &ihost->ports[index];
1527

1528
		port_status = sci_port_stop(iport);
1529 1530 1531 1532 1533

		if ((port_status != SCI_SUCCESS) &&
		    (port_status != SCI_FAILURE_INVALID_STATE)) {
			status = SCI_FAILURE;

1534
			dev_warn(&ihost->pdev->dev,
1535 1536 1537
				 "%s: Controller stop operation failed to "
				 "stop port %d because of status %d.\n",
				 __func__,
1538
				 iport->logical_port_index,
1539 1540 1541 1542 1543 1544 1545
				 port_status);
		}
	}

	return status;
}

1546
static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1547 1548 1549 1550 1551 1552 1553
{
	u32 index;
	enum sci_status status;
	enum sci_status device_status;

	status = SCI_SUCCESS;

1554 1555
	for (index = 0; index < ihost->remote_node_entries; index++) {
		if (ihost->device_table[index] != NULL) {
1556
			/* / @todo What timeout value do we want to provide to this request? */
1557
			device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1558 1559 1560

			if ((device_status != SCI_SUCCESS) &&
			    (device_status != SCI_FAILURE_INVALID_STATE)) {
1561
				dev_warn(&ihost->pdev->dev,
1562 1563 1564 1565
					 "%s: Controller stop operation failed "
					 "to stop device 0x%p because of "
					 "status %d.\n",
					 __func__,
1566
					 ihost->device_table[index], device_status);
1567 1568 1569 1570 1571 1572 1573
			}
		}
	}

	return status;
}

1574
static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1575
{
1576
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1577 1578

	/* Stop all of the components for this controller */
1579 1580 1581
	sci_controller_stop_phys(ihost);
	sci_controller_stop_ports(ihost);
	sci_controller_stop_devices(ihost);
1582 1583
}

1584
static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1585
{
1586
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1587

1588
	sci_del_timer(&ihost->timer);
1589 1590
}

1591
static void sci_controller_reset_hardware(struct isci_host *ihost)
1592 1593
{
	/* Disable interrupts so we dont take any spurious interrupts */
1594
	sci_controller_disable_interrupts(ihost);
1595 1596

	/* Reset the SCU */
1597
	writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1598 1599 1600 1601 1602

	/* Delay for 1ms to before clearing the CQP and UFQPR. */
	udelay(1000);

	/* The write to the CQGR clears the CQP */
1603
	writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1604 1605

	/* The write to the UFQGP clears the UFQPR */
1606
	writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1607 1608
}

1609
static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1610
{
1611
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1612

1613
	sci_controller_reset_hardware(ihost);
1614
	sci_change_state(&ihost->sm, SCIC_RESET);
1615 1616
}

1617
static const struct sci_base_state sci_controller_state_table[] = {
E
Edmund Nadolski 已提交
1618
	[SCIC_INITIAL] = {
1619
		.enter_state = sci_controller_initial_state_enter,
1620
	},
E
Edmund Nadolski 已提交
1621 1622 1623 1624
	[SCIC_RESET] = {},
	[SCIC_INITIALIZING] = {},
	[SCIC_INITIALIZED] = {},
	[SCIC_STARTING] = {
1625
		.exit_state  = sci_controller_starting_state_exit,
1626
	},
E
Edmund Nadolski 已提交
1627
	[SCIC_READY] = {
1628 1629
		.enter_state = sci_controller_ready_state_enter,
		.exit_state  = sci_controller_ready_state_exit,
1630
	},
E
Edmund Nadolski 已提交
1631
	[SCIC_RESETTING] = {
1632
		.enter_state = sci_controller_resetting_state_enter,
1633
	},
E
Edmund Nadolski 已提交
1634
	[SCIC_STOPPING] = {
1635 1636
		.enter_state = sci_controller_stopping_state_enter,
		.exit_state = sci_controller_stopping_state_exit,
1637
	},
E
Edmund Nadolski 已提交
1638 1639
	[SCIC_STOPPED] = {},
	[SCIC_FAILED] = {}
1640 1641
};

1642
static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1643 1644 1645 1646 1647
{
	/* these defaults are overridden by the platform / firmware */
	u16 index;

	/* Default to APC mode. */
1648
	ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1649 1650

	/* Default to APC mode. */
1651
	ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
1652 1653

	/* Default to no SSC operation. */
1654
	ihost->oem_parameters.controller.do_enable_ssc = false;
1655 1656 1657

	/* Initialize all of the port parameter information to narrow ports. */
	for (index = 0; index < SCI_MAX_PORTS; index++) {
1658
		ihost->oem_parameters.ports[index].phy_mask = 0;
1659 1660 1661 1662 1663
	}

	/* Initialize all of the phy parameter information. */
	for (index = 0; index < SCI_MAX_PHYS; index++) {
		/* Default to 6G (i.e. Gen 3) for now. */
1664
		ihost->user_parameters.phys[index].max_speed_generation = 3;
1665 1666

		/* the frequencies cannot be 0 */
1667 1668 1669
		ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
		ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
		ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1670 1671 1672 1673 1674 1675

		/*
		 * Previous Vitesse based expanders had a arbitration issue that
		 * is worked around by having the upper 32-bits of SAS address
		 * with a value greater then the Vitesse company identifier.
		 * Hence, usage of 0x5FCFFFFF. */
1676 1677
		ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
		ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1678 1679
	}

1680 1681 1682 1683 1684
	ihost->user_parameters.stp_inactivity_timeout = 5;
	ihost->user_parameters.ssp_inactivity_timeout = 5;
	ihost->user_parameters.stp_max_occupancy_timeout = 5;
	ihost->user_parameters.ssp_max_occupancy_timeout = 20;
	ihost->user_parameters.no_outbound_task_timeout = 20;
1685 1686
}

1687 1688 1689
static void controller_timeout(unsigned long data)
{
	struct sci_timer *tmr = (struct sci_timer *)data;
1690 1691
	struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
	struct sci_base_state_machine *sm = &ihost->sm;
1692 1693 1694 1695 1696 1697 1698
	unsigned long flags;

	spin_lock_irqsave(&ihost->scic_lock, flags);

	if (tmr->cancel)
		goto done;

E
Edmund Nadolski 已提交
1699
	if (sm->current_state_id == SCIC_STARTING)
1700
		sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
E
Edmund Nadolski 已提交
1701 1702
	else if (sm->current_state_id == SCIC_STOPPING) {
		sci_change_state(sm, SCIC_FAILED);
1703 1704
		isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
	} else	/* / @todo Now what do we want to do in this case? */
1705
		dev_err(&ihost->pdev->dev,
1706 1707 1708
			"%s: Controller timer fired when controller was not "
			"in a state being timed.\n",
			__func__);
1709

1710 1711 1712
done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
1713

1714 1715 1716
static enum sci_status sci_controller_construct(struct isci_host *ihost,
						void __iomem *scu_base,
						void __iomem *smu_base)
1717 1718 1719
{
	u8 i;

1720
	sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1721

1722 1723
	ihost->scu_registers = scu_base;
	ihost->smu_registers = smu_base;
1724

1725
	sci_port_configuration_agent_construct(&ihost->port_agent);
1726 1727 1728

	/* Construct the ports for this controller */
	for (i = 0; i < SCI_MAX_PORTS; i++)
1729 1730
		sci_port_construct(&ihost->ports[i], i, ihost);
	sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1731 1732 1733 1734

	/* Construct the phys for this controller */
	for (i = 0; i < SCI_MAX_PHYS; i++) {
		/* Add all the PHYs to the dummy port */
1735 1736
		sci_phy_construct(&ihost->phys[i],
				  &ihost->ports[SCI_MAX_PORTS], i);
1737 1738
	}

1739
	ihost->invalid_phy_mask = 0;
1740

1741
	sci_init_timer(&ihost->timer, controller_timeout);
1742

1743
	/* Initialize the User and OEM parameters to default values. */
1744
	sci_controller_set_default_config_parameters(ihost);
1745

1746
	return sci_controller_reset(ihost);
1747 1748
}

1749
int sci_oem_parameters_validate(struct sci_oem_params *oem)
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
{
	int i;

	for (i = 0; i < SCI_MAX_PORTS; i++)
		if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
			return -EINVAL;

	for (i = 0; i < SCI_MAX_PHYS; i++)
		if (oem->phys[i].sas_address.high == 0 &&
		    oem->phys[i].sas_address.low == 0)
			return -EINVAL;

	if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
		for (i = 0; i < SCI_MAX_PHYS; i++)
			if (oem->ports[i].phy_mask != 0)
				return -EINVAL;
	} else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
		u8 phy_mask = 0;

		for (i = 0; i < SCI_MAX_PHYS; i++)
			phy_mask |= oem->ports[i].phy_mask;

		if (phy_mask == 0)
			return -EINVAL;
	} else
		return -EINVAL;

	if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
		return -EINVAL;

	return 0;
}

1783
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1784
{
1785
	u32 state = ihost->sm.current_state_id;
1786

E
Edmund Nadolski 已提交
1787 1788 1789
	if (state == SCIC_RESET ||
	    state == SCIC_INITIALIZING ||
	    state == SCIC_INITIALIZED) {
1790

1791
		if (sci_oem_parameters_validate(&ihost->oem_parameters))
1792 1793 1794 1795 1796 1797 1798 1799
			return SCI_FAILURE_INVALID_PARAMETER_VALUE;

		return SCI_SUCCESS;
	}

	return SCI_FAILURE_INVALID_STATE;
}

1800
static void power_control_timeout(unsigned long data)
1801
{
1802
	struct sci_timer *tmr = (struct sci_timer *)data;
1803
	struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1804
	struct isci_phy *iphy;
1805 1806
	unsigned long flags;
	u8 i;
1807

1808
	spin_lock_irqsave(&ihost->scic_lock, flags);
1809

1810 1811 1812
	if (tmr->cancel)
		goto done;

1813
	ihost->power_control.phys_granted_power = 0;
1814

1815 1816
	if (ihost->power_control.phys_waiting == 0) {
		ihost->power_control.timer_started = false;
1817
		goto done;
1818 1819
	}

1820
	for (i = 0; i < SCI_MAX_PHYS; i++) {
1821

1822
		if (ihost->power_control.phys_waiting == 0)
1823
			break;
1824

1825
		iphy = ihost->power_control.requesters[i];
1826
		if (iphy == NULL)
1827
			continue;
1828

1829
		if (ihost->power_control.phys_granted_power >=
1830
		    ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
1831
			break;
1832

1833 1834 1835
		ihost->power_control.requesters[i] = NULL;
		ihost->power_control.phys_waiting--;
		ihost->power_control.phys_granted_power++;
1836
		sci_phy_consume_power_handler(iphy);
1837
	}
1838 1839 1840 1841 1842 1843

	/*
	 * It doesn't matter if the power list is empty, we need to start the
	 * timer in case another phy becomes ready.
	 */
	sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1844
	ihost->power_control.timer_started = true;
1845 1846 1847

done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1848 1849
}

1850 1851
void sci_controller_power_control_queue_insert(struct isci_host *ihost,
					       struct isci_phy *iphy)
1852
{
1853
	BUG_ON(iphy == NULL);
1854

1855
	if (ihost->power_control.phys_granted_power <
1856
	    ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
1857
		ihost->power_control.phys_granted_power++;
1858
		sci_phy_consume_power_handler(iphy);
1859 1860 1861 1862 1863

		/*
		 * stop and start the power_control timer. When the timer fires, the
		 * no_of_phys_granted_power will be set to 0
		 */
1864 1865
		if (ihost->power_control.timer_started)
			sci_del_timer(&ihost->power_control.timer);
1866

1867
		sci_mod_timer(&ihost->power_control.timer,
1868
				 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1869
		ihost->power_control.timer_started = true;
1870

1871 1872
	} else {
		/* Add the phy in the waiting list */
1873 1874
		ihost->power_control.requesters[iphy->phy_index] = iphy;
		ihost->power_control.phys_waiting++;
1875 1876 1877
	}
}

1878 1879
void sci_controller_power_control_queue_remove(struct isci_host *ihost,
					       struct isci_phy *iphy)
1880
{
1881
	BUG_ON(iphy == NULL);
1882

1883
	if (ihost->power_control.requesters[iphy->phy_index])
1884
		ihost->power_control.phys_waiting--;
1885

1886
	ihost->power_control.requesters[iphy->phy_index] = NULL;
1887 1888 1889 1890 1891 1892 1893
}

#define AFE_REGISTER_WRITE_DELAY 10

/* Initialize the AFE for this phy index. We need to read the AFE setup from
 * the OEM parameters
 */
1894
static void sci_controller_afe_initialization(struct isci_host *ihost)
1895
{
1896
	const struct sci_oem_params *oem = &ihost->oem_parameters;
1897
	struct pci_dev *pdev = ihost->pdev;
1898 1899 1900 1901
	u32 afe_status;
	u32 phy_id;

	/* Clear DFX Status registers */
1902
	writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
1903 1904
	udelay(AFE_REGISTER_WRITE_DELAY);

1905
	if (is_b0(pdev)) {
1906 1907
		/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
		 * Timer, PM Stagger Timer */
1908
		writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
1909 1910 1911 1912
		udelay(AFE_REGISTER_WRITE_DELAY);
	}

	/* Configure bias currents to normal */
1913
	if (is_a2(pdev))
1914
		writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
1915
	else if (is_b0(pdev) || is_c0(pdev))
1916
		writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
1917 1918 1919 1920

	udelay(AFE_REGISTER_WRITE_DELAY);

	/* Enable PLL */
1921
	if (is_b0(pdev) || is_c0(pdev))
1922
		writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
1923
	else
1924
		writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
1925 1926 1927 1928 1929

	udelay(AFE_REGISTER_WRITE_DELAY);

	/* Wait for the PLL to lock */
	do {
1930
		afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
1931 1932 1933
		udelay(AFE_REGISTER_WRITE_DELAY);
	} while ((afe_status & 0x00001000) == 0);

1934
	if (is_a2(pdev)) {
1935
		/* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
1936
		writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
1937 1938 1939 1940 1941 1942
		udelay(AFE_REGISTER_WRITE_DELAY);
	}

	for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
		const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];

1943
		if (is_b0(pdev)) {
1944
			 /* Configure transmitter SSC parameters */
1945
			writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1946
			udelay(AFE_REGISTER_WRITE_DELAY);
1947
		} else if (is_c0(pdev)) {
1948
			 /* Configure transmitter SSC parameters */
1949
			writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1950 1951 1952 1953 1954
			udelay(AFE_REGISTER_WRITE_DELAY);

			/*
			 * All defaults, except the Receive Word Alignament/Comma Detect
			 * Enable....(0xe800) */
1955
			writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1956
			udelay(AFE_REGISTER_WRITE_DELAY);
1957 1958 1959 1960
		} else {
			/*
			 * All defaults, except the Receive Word Alignament/Comma Detect
			 * Enable....(0xe800) */
1961
			writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1962 1963
			udelay(AFE_REGISTER_WRITE_DELAY);

1964
			writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
1965 1966 1967 1968 1969 1970
			udelay(AFE_REGISTER_WRITE_DELAY);
		}

		/*
		 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
		 * & increase TX int & ext bias 20%....(0xe85c) */
1971
		if (is_a2(pdev))
1972
			writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1973
		else if (is_b0(pdev)) {
1974
			 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
1975
			writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1976 1977 1978 1979 1980
			udelay(AFE_REGISTER_WRITE_DELAY);

			/*
			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
			 * & increase TX int & ext bias 20%....(0xe85c) */
1981
			writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1982
		} else {
1983
			writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1984 1985 1986 1987 1988
			udelay(AFE_REGISTER_WRITE_DELAY);

			/*
			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
			 * & increase TX int & ext bias 20%....(0xe85c) */
1989
			writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1990 1991 1992
		}
		udelay(AFE_REGISTER_WRITE_DELAY);

1993
		if (is_a2(pdev)) {
1994
			/* Enable TX equalization (0xe824) */
1995
			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
1996 1997 1998 1999 2000 2001
			udelay(AFE_REGISTER_WRITE_DELAY);
		}

		/*
		 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
		 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2002
		writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2003 2004 2005
		udelay(AFE_REGISTER_WRITE_DELAY);

		/* Leave DFE/FFE on */
2006
		if (is_a2(pdev))
2007
			writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2008
		else if (is_b0(pdev)) {
2009
			writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2010
			udelay(AFE_REGISTER_WRITE_DELAY);
2011
			/* Enable TX equalization (0xe824) */
2012
			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2013
		} else {
2014
			writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
2015 2016
			udelay(AFE_REGISTER_WRITE_DELAY);

2017
			writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2018 2019
			udelay(AFE_REGISTER_WRITE_DELAY);

2020
			/* Enable TX equalization (0xe824) */
2021
			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2022
		}
2023

2024 2025 2026
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control0,
2027
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2028 2029 2030
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control1,
2031
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2032 2033 2034
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control2,
2035
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2036 2037 2038
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control3,
2039
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2040 2041 2042 2043
		udelay(AFE_REGISTER_WRITE_DELAY);
	}

	/* Transfer control to the PEs */
2044
	writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
2045 2046 2047
	udelay(AFE_REGISTER_WRITE_DELAY);
}

2048
static void sci_controller_initialize_power_control(struct isci_host *ihost)
2049
{
2050
	sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2051

2052 2053
	memset(ihost->power_control.requesters, 0,
	       sizeof(ihost->power_control.requesters));
2054

2055 2056
	ihost->power_control.phys_waiting = 0;
	ihost->power_control.phys_granted_power = 0;
2057 2058
}

2059
static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2060
{
2061
	struct sci_base_state_machine *sm = &ihost->sm;
2062 2063
	enum sci_status result = SCI_FAILURE;
	unsigned long i, state, val;
2064

2065 2066
	if (ihost->sm.current_state_id != SCIC_RESET) {
		dev_warn(&ihost->pdev->dev,
2067 2068 2069 2070 2071
			 "SCIC Controller initialize operation requested "
			 "in invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}

E
Edmund Nadolski 已提交
2072
	sci_change_state(sm, SCIC_INITIALIZING);
2073

2074
	sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2075

2076 2077
	ihost->next_phy_to_start = 0;
	ihost->phy_startup_timer_pending = false;
2078

2079
	sci_controller_initialize_power_control(ihost);
2080 2081 2082 2083 2084 2085

	/*
	 * There is nothing to do here for B0 since we do not have to
	 * program the AFE registers.
	 * / @todo The AFE settings are supposed to be correct for the B0 but
	 * /       presently they seem to be wrong. */
2086
	sci_controller_afe_initialization(ihost);
2087 2088


2089
	/* Take the hardware out of reset */
2090
	writel(0, &ihost->smu_registers->soft_reset_control);
2091

2092 2093 2094 2095 2096
	/*
	 * / @todo Provide meaningfull error code for hardware failure
	 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
	for (i = 100; i >= 1; i--) {
		u32 status;
2097

2098 2099
		/* Loop until the hardware reports success */
		udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2100
		status = readl(&ihost->smu_registers->control_status);
2101

2102 2103 2104 2105 2106
		if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
			break;
	}
	if (i == 0)
		goto out;
2107

2108 2109 2110
	/*
	 * Determine what are the actaul device capacities that the
	 * hardware will support */
2111
	val = readl(&ihost->smu_registers->device_context_capacity);
2112

2113
	/* Record the smaller of the two capacity values */
2114 2115 2116
	ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
	ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
	ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2117

2118 2119 2120 2121
	/*
	 * Make all PEs that are unassigned match up with the
	 * logical ports
	 */
2122
	for (i = 0; i < ihost->logical_port_entries; i++) {
2123
		struct scu_port_task_scheduler_group_registers __iomem
2124
			*ptsg = &ihost->scu_registers->peg0.ptsg;
2125

2126
		writel(i, &ptsg->protocol_engine[i]);
2127 2128 2129
	}

	/* Initialize hardware PCI Relaxed ordering in DMA engines */
2130
	val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2131
	val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2132
	writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2133

2134
	val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2135
	val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2136
	writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2137 2138 2139 2140 2141

	/*
	 * Initialize the PHYs before the PORTs because the PHY registers
	 * are accessed during the port initialization.
	 */
2142
	for (i = 0; i < SCI_MAX_PHYS; i++) {
2143 2144 2145
		result = sci_phy_initialize(&ihost->phys[i],
					    &ihost->scu_registers->peg0.pe[i].tl,
					    &ihost->scu_registers->peg0.pe[i].ll);
2146 2147
		if (result != SCI_SUCCESS)
			goto out;
2148 2149
	}

2150
	for (i = 0; i < ihost->logical_port_entries; i++) {
2151
		struct isci_port *iport = &ihost->ports[i];
2152

2153 2154 2155
		iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
		iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
		iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2156 2157
	}

2158
	result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2159

2160
 out:
2161 2162
	/* Advance the controller state machine */
	if (result == SCI_SUCCESS)
E
Edmund Nadolski 已提交
2163
		state = SCIC_INITIALIZED;
2164
	else
E
Edmund Nadolski 已提交
2165 2166
		state = SCIC_FAILED;
	sci_change_state(sm, state);
2167 2168 2169 2170

	return result;
}

2171 2172
static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
					       struct sci_user_parameters *sci_parms)
2173
{
2174
	u32 state = ihost->sm.current_state_id;
2175

E
Edmund Nadolski 已提交
2176 2177 2178
	if (state == SCIC_RESET ||
	    state == SCIC_INITIALIZING ||
	    state == SCIC_INITIALIZED) {
2179 2180 2181 2182 2183 2184 2185 2186 2187
		u16 index;

		/*
		 * Validate the user parameters.  If they are not legal, then
		 * return a failure.
		 */
		for (index = 0; index < SCI_MAX_PHYS; index++) {
			struct sci_phy_user_params *user_phy;

2188
			user_phy = &sci_parms->phys[index];
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208

			if (!((user_phy->max_speed_generation <=
						SCIC_SDS_PARM_MAX_SPEED) &&
			      (user_phy->max_speed_generation >
						SCIC_SDS_PARM_NO_SPEED)))
				return SCI_FAILURE_INVALID_PARAMETER_VALUE;

			if (user_phy->in_connection_align_insertion_frequency <
					3)
				return SCI_FAILURE_INVALID_PARAMETER_VALUE;

			if ((user_phy->in_connection_align_insertion_frequency <
						3) ||
			    (user_phy->align_insertion_frequency == 0) ||
			    (user_phy->
				notify_enable_spin_up_insertion_frequency ==
						0))
				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
		}

2209 2210 2211 2212 2213
		if ((sci_parms->stp_inactivity_timeout == 0) ||
		    (sci_parms->ssp_inactivity_timeout == 0) ||
		    (sci_parms->stp_max_occupancy_timeout == 0) ||
		    (sci_parms->ssp_max_occupancy_timeout == 0) ||
		    (sci_parms->no_outbound_task_timeout == 0))
2214 2215
			return SCI_FAILURE_INVALID_PARAMETER_VALUE;

2216
		memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2217 2218 2219 2220 2221 2222 2223

		return SCI_SUCCESS;
	}

	return SCI_FAILURE_INVALID_STATE;
}

2224
static int sci_controller_mem_init(struct isci_host *ihost)
2225
{
2226
	struct device *dev = &ihost->pdev->dev;
2227 2228 2229
	dma_addr_t dma;
	size_t size;
	int err;
2230

2231
	size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2232 2233
	ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
	if (!ihost->completion_queue)
2234 2235
		return -ENOMEM;

2236 2237
	writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
	writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2238

2239 2240
	size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
	ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
2241
							       GFP_KERNEL);
2242
	if (!ihost->remote_node_context_table)
2243 2244
		return -ENOMEM;

2245 2246
	writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
	writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2247

2248 2249 2250
	size = ihost->task_context_entries * sizeof(struct scu_task_context),
	ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
	if (!ihost->task_context_table)
2251 2252
		return -ENOMEM;

2253 2254 2255
	ihost->task_context_dma = dma;
	writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
	writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
2256

2257
	err = sci_unsolicited_frame_control_construct(ihost);
2258 2259
	if (err)
		return err;
2260 2261 2262 2263 2264

	/*
	 * Inform the silicon as to the location of the UF headers and
	 * address table.
	 */
2265 2266 2267 2268
	writel(lower_32_bits(ihost->uf_control.headers.physical_address),
		&ihost->scu_registers->sdma.uf_header_base_address_lower);
	writel(upper_32_bits(ihost->uf_control.headers.physical_address),
		&ihost->scu_registers->sdma.uf_header_base_address_upper);
2269

2270 2271 2272 2273
	writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
		&ihost->scu_registers->sdma.uf_address_table_lower);
	writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
		&ihost->scu_registers->sdma.uf_address_table_upper);
2274 2275 2276 2277

	return 0;
}

2278
int isci_host_init(struct isci_host *ihost)
2279
{
D
Dan Williams 已提交
2280
	int err = 0, i;
2281
	enum sci_status status;
2282
	struct sci_user_parameters sci_user_params;
2283
	struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2284

2285 2286 2287
	spin_lock_init(&ihost->state_lock);
	spin_lock_init(&ihost->scic_lock);
	init_waitqueue_head(&ihost->eventq);
2288

2289
	isci_host_change_state(ihost, isci_starting);
2290

2291 2292
	status = sci_controller_construct(ihost, scu_base(ihost),
					  smu_base(ihost));
2293 2294

	if (status != SCI_SUCCESS) {
2295
		dev_err(&ihost->pdev->dev,
2296
			"%s: sci_controller_construct failed - status = %x\n",
2297 2298
			__func__,
			status);
2299
		return -ENODEV;
2300 2301
	}

2302 2303
	ihost->sas_ha.dev = &ihost->pdev->dev;
	ihost->sas_ha.lldd_ha = ihost;
2304

2305 2306 2307 2308
	/*
	 * grab initial values stored in the controller object for OEM and USER
	 * parameters
	 */
2309 2310
	isci_user_parameters_get(&sci_user_params);
	status = sci_user_parameters_set(ihost, &sci_user_params);
2311
	if (status != SCI_SUCCESS) {
2312
		dev_warn(&ihost->pdev->dev,
2313
			 "%s: sci_user_parameters_set failed\n",
2314 2315 2316 2317 2318 2319
			 __func__);
		return -ENODEV;
	}

	/* grab any OEM parameters specified in orom */
	if (pci_info->orom) {
2320
		status = isci_parse_oem_parameters(&ihost->oem_parameters,
2321
						   pci_info->orom,
2322
						   ihost->id);
2323
		if (status != SCI_SUCCESS) {
2324
			dev_warn(&ihost->pdev->dev,
2325
				 "parsing firmware oem parameters failed\n");
2326
			return -EINVAL;
2327
		}
2328 2329
	}

2330
	status = sci_oem_parameters_set(ihost);
2331
	if (status != SCI_SUCCESS) {
2332
		dev_warn(&ihost->pdev->dev,
2333
				"%s: sci_oem_parameters_set failed\n",
2334 2335
				__func__);
		return -ENODEV;
2336 2337
	}

2338 2339
	tasklet_init(&ihost->completion_tasklet,
		     isci_host_completion_routine, (unsigned long)ihost);
D
Dan Williams 已提交
2340

2341 2342
	INIT_LIST_HEAD(&ihost->requests_to_complete);
	INIT_LIST_HEAD(&ihost->requests_to_errorback);
D
Dan Williams 已提交
2343

2344
	spin_lock_irq(&ihost->scic_lock);
2345
	status = sci_controller_initialize(ihost);
2346
	spin_unlock_irq(&ihost->scic_lock);
2347
	if (status != SCI_SUCCESS) {
2348
		dev_warn(&ihost->pdev->dev,
2349
			 "%s: sci_controller_initialize failed -"
2350 2351
			 " status = 0x%x\n",
			 __func__, status);
2352
		return -ENODEV;
2353 2354
	}

2355
	err = sci_controller_mem_init(ihost);
2356
	if (err)
2357
		return err;
2358

D
Dan Williams 已提交
2359
	for (i = 0; i < SCI_MAX_PORTS; i++)
2360
		isci_port_init(&ihost->ports[i], ihost, i);
2361

D
Dan Williams 已提交
2362
	for (i = 0; i < SCI_MAX_PHYS; i++)
2363
		isci_phy_init(&ihost->phys[i], ihost, i);
D
Dan Williams 已提交
2364 2365

	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2366
		struct isci_remote_device *idev = &ihost->devices[i];
D
Dan Williams 已提交
2367 2368 2369 2370

		INIT_LIST_HEAD(&idev->reqs_in_process);
		INIT_LIST_HEAD(&idev->node);
	}
2371

D
Dan Williams 已提交
2372 2373 2374 2375
	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
		struct isci_request *ireq;
		dma_addr_t dma;

2376
		ireq = dmam_alloc_coherent(&ihost->pdev->dev,
D
Dan Williams 已提交
2377 2378 2379 2380 2381
					   sizeof(struct isci_request), &dma,
					   GFP_KERNEL);
		if (!ireq)
			return -ENOMEM;

2382 2383
		ireq->tc = &ihost->task_context_table[i];
		ireq->owning_controller = ihost;
D
Dan Williams 已提交
2384 2385
		spin_lock_init(&ireq->state_lock);
		ireq->request_daddr = dma;
2386 2387
		ireq->isci_host = ihost;
		ihost->reqs[i] = ireq;
D
Dan Williams 已提交
2388 2389
	}

2390
	return 0;
2391
}
2392

2393 2394
void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
			    struct isci_phy *iphy)
2395
{
2396
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
2397
	case SCIC_STARTING:
2398 2399 2400
		sci_del_timer(&ihost->phy_timer);
		ihost->phy_startup_timer_pending = false;
		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2401 2402
						  iport, iphy);
		sci_controller_start_next_phy(ihost);
2403
		break;
E
Edmund Nadolski 已提交
2404
	case SCIC_READY:
2405
		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2406
						  iport, iphy);
2407 2408
		break;
	default:
2409
		dev_dbg(&ihost->pdev->dev,
2410
			"%s: SCIC Controller linkup event from phy %d in "
2411
			"unexpected state %d\n", __func__, iphy->phy_index,
2412
			ihost->sm.current_state_id);
2413 2414 2415
	}
}

2416 2417
void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
			      struct isci_phy *iphy)
2418
{
2419
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
2420 2421
	case SCIC_STARTING:
	case SCIC_READY:
2422
		ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2423
						   iport, iphy);
2424 2425
		break;
	default:
2426
		dev_dbg(&ihost->pdev->dev,
2427 2428 2429
			"%s: SCIC Controller linkdown event from phy %d in "
			"unexpected state %d\n",
			__func__,
2430
			iphy->phy_index,
2431
			ihost->sm.current_state_id);
2432 2433 2434
	}
}

2435
static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2436 2437 2438
{
	u32 index;

2439 2440 2441
	for (index = 0; index < ihost->remote_node_entries; index++) {
		if ((ihost->device_table[index] != NULL) &&
		   (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2442 2443 2444 2445 2446 2447
			return true;
	}

	return false;
}

2448 2449
void sci_controller_remote_device_stopped(struct isci_host *ihost,
					  struct isci_remote_device *idev)
2450
{
2451 2452
	if (ihost->sm.current_state_id != SCIC_STOPPING) {
		dev_dbg(&ihost->pdev->dev,
2453 2454
			"SCIC Controller 0x%p remote device stopped event "
			"from device 0x%p in unexpected state %d\n",
2455 2456
			ihost, idev,
			ihost->sm.current_state_id);
2457 2458 2459
		return;
	}

2460
	if (!sci_controller_has_remote_devices_stopping(ihost))
2461
		sci_change_state(&ihost->sm, SCIC_STOPPED);
2462 2463
}

2464
void sci_controller_post_request(struct isci_host *ihost, u32 request)
2465
{
2466 2467
	dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
		__func__, ihost->id, request);
2468

2469
	writel(request, &ihost->smu_registers->post_context_port);
2470 2471
}

2472
struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2473 2474 2475 2476
{
	u16 task_index;
	u16 task_sequence;

D
Dan Williams 已提交
2477
	task_index = ISCI_TAG_TCI(io_tag);
2478

2479 2480
	if (task_index < ihost->task_context_entries) {
		struct isci_request *ireq = ihost->reqs[task_index];
D
Dan Williams 已提交
2481 2482

		if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
D
Dan Williams 已提交
2483
			task_sequence = ISCI_TAG_SEQ(io_tag);
2484

2485
			if (task_sequence == ihost->io_request_sequence[task_index])
2486
				return ireq;
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
		}
	}

	return NULL;
}

/**
 * This method allocates remote node index and the reserves the remote node
 *    context space for use. This method can fail if there are no more remote
 *    node index available.
 * @scic: This is the controller object which contains the set of
 *    free remote node ids
 * @sci_dev: This is the device object which is requesting the a remote node
 *    id
 * @node_id: This is the remote node id that is assinged to the device if one
 *    is available
 *
 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
 * node index available.
 */
2507 2508 2509
enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
							    struct isci_remote_device *idev,
							    u16 *node_id)
2510 2511
{
	u16 node_index;
2512
	u32 remote_node_count = sci_remote_device_node_count(idev);
2513

2514
	node_index = sci_remote_node_table_allocate_remote_node(
2515
		&ihost->available_remote_nodes, remote_node_count
2516 2517 2518
		);

	if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2519
		ihost->device_table[node_index] = idev;
2520 2521 2522 2523 2524 2525 2526 2527 2528

		*node_id = node_index;

		return SCI_SUCCESS;
	}

	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
}

2529 2530 2531
void sci_controller_free_remote_node_context(struct isci_host *ihost,
					     struct isci_remote_device *idev,
					     u16 node_id)
2532
{
2533
	u32 remote_node_count = sci_remote_device_node_count(idev);
2534

2535 2536
	if (ihost->device_table[node_id] == idev) {
		ihost->device_table[node_id] = NULL;
2537

2538
		sci_remote_node_table_release_remote_node_index(
2539
			&ihost->available_remote_nodes, remote_node_count, node_id
2540 2541 2542 2543
			);
	}
}

2544 2545 2546
void sci_controller_copy_sata_response(void *response_buffer,
				       void *frame_header,
				       void *frame_buffer)
2547
{
2548
	/* XXX type safety? */
2549 2550 2551 2552 2553 2554 2555
	memcpy(response_buffer, frame_header, sizeof(u32));

	memcpy(response_buffer + sizeof(u32),
	       frame_buffer,
	       sizeof(struct dev_to_host_fis) - sizeof(u32));
}

2556
void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2557
{
2558
	if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2559 2560
		writel(ihost->uf_control.get,
			&ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2561 2562
}

2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
void isci_tci_free(struct isci_host *ihost, u16 tci)
{
	u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);

	ihost->tci_pool[tail] = tci;
	ihost->tci_tail = tail + 1;
}

static u16 isci_tci_alloc(struct isci_host *ihost)
{
	u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
	u16 tci = ihost->tci_pool[head];

	ihost->tci_head = head + 1;
	return tci;
}

static u16 isci_tci_space(struct isci_host *ihost)
{
	return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}

u16 isci_alloc_tag(struct isci_host *ihost)
{
	if (isci_tci_space(ihost)) {
		u16 tci = isci_tci_alloc(ihost);
2589
		u8 seq = ihost->io_request_sequence[tci];
2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605

		return ISCI_TAG(seq, tci);
	}

	return SCI_CONTROLLER_INVALID_IO_TAG;
}

enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
{
	u16 tci = ISCI_TAG_TCI(io_tag);
	u16 seq = ISCI_TAG_SEQ(io_tag);

	/* prevent tail from passing head */
	if (isci_tci_active(ihost) == 0)
		return SCI_FAILURE_INVALID_IO_TAG;

2606 2607
	if (seq == ihost->io_request_sequence[tci]) {
		ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2608 2609 2610 2611 2612 2613 2614 2615

		isci_tci_free(ihost, tci);

		return SCI_SUCCESS;
	}
	return SCI_FAILURE_INVALID_IO_TAG;
}

2616 2617 2618
enum sci_status sci_controller_start_io(struct isci_host *ihost,
					struct isci_remote_device *idev,
					struct isci_request *ireq)
2619 2620 2621
{
	enum sci_status status;

2622 2623
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
2624 2625 2626
		return SCI_FAILURE_INVALID_STATE;
	}

2627
	status = sci_remote_device_start_io(ihost, idev, ireq);
2628 2629 2630
	if (status != SCI_SUCCESS)
		return status;

2631
	set_bit(IREQ_ACTIVE, &ireq->flags);
D
Dan Williams 已提交
2632
	sci_controller_post_request(ihost, ireq->post_context);
2633 2634 2635
	return SCI_SUCCESS;
}

2636 2637 2638
enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
						 struct isci_remote_device *idev,
						 struct isci_request *ireq)
2639
{
2640 2641 2642 2643
	/* terminate an ongoing (i.e. started) core IO request.  This does not
	 * abort the IO request at the target, but rather removes the IO
	 * request from the host controller.
	 */
2644 2645
	enum sci_status status;

2646 2647
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev,
2648 2649 2650 2651
			 "invalid state to terminate request\n");
		return SCI_FAILURE_INVALID_STATE;
	}

2652
	status = sci_io_request_terminate(ireq);
2653 2654 2655 2656 2657 2658 2659
	if (status != SCI_SUCCESS)
		return status;

	/*
	 * Utilize the original post context command and or in the POST_TC_ABORT
	 * request sub-type.
	 */
2660 2661
	sci_controller_post_request(ihost,
				    ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2662 2663 2664 2665
	return SCI_SUCCESS;
}

/**
2666
 * sci_controller_complete_io() - This method will perform core specific
2667 2668 2669
 *    completion operations for an IO request.  After this method is invoked,
 *    the user should consider the IO request as invalid until it is properly
 *    reused (i.e. re-constructed).
2670
 * @ihost: The handle to the controller object for which to complete the
2671
 *    IO request.
2672
 * @idev: The handle to the remote device object for which to complete
2673
 *    the IO request.
2674
 * @ireq: the handle to the io request object to complete.
2675
 */
2676 2677 2678
enum sci_status sci_controller_complete_io(struct isci_host *ihost,
					   struct isci_remote_device *idev,
					   struct isci_request *ireq)
2679 2680 2681 2682
{
	enum sci_status status;
	u16 index;

2683
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
2684
	case SCIC_STOPPING:
2685 2686
		/* XXX: Implement this function */
		return SCI_FAILURE;
E
Edmund Nadolski 已提交
2687
	case SCIC_READY:
2688
		status = sci_remote_device_complete_io(ihost, idev, ireq);
2689 2690 2691
		if (status != SCI_SUCCESS)
			return status;

2692 2693
		index = ISCI_TAG_TCI(ireq->io_tag);
		clear_bit(IREQ_ACTIVE, &ireq->flags);
2694 2695
		return SCI_SUCCESS;
	default:
2696
		dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
2697 2698 2699 2700 2701
		return SCI_FAILURE_INVALID_STATE;
	}

}

2702
enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2703
{
2704
	struct isci_host *ihost = ireq->owning_controller;
2705

2706 2707
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
2708 2709 2710
		return SCI_FAILURE_INVALID_STATE;
	}

2711
	set_bit(IREQ_ACTIVE, &ireq->flags);
D
Dan Williams 已提交
2712
	sci_controller_post_request(ihost, ireq->post_context);
2713 2714 2715 2716
	return SCI_SUCCESS;
}

/**
2717
 * sci_controller_start_task() - This method is called by the SCIC user to
2718 2719 2720 2721 2722 2723 2724
 *    send/start a framework task management request.
 * @controller: the handle to the controller object for which to start the task
 *    management request.
 * @remote_device: the handle to the remote device object for which to start
 *    the task management request.
 * @task_request: the handle to the task request object to start.
 */
2725 2726 2727
enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
					       struct isci_remote_device *idev,
					       struct isci_request *ireq)
2728 2729 2730
{
	enum sci_status status;

2731 2732
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev,
2733 2734 2735 2736 2737 2738
			 "%s: SCIC Controller starting task from invalid "
			 "state\n",
			 __func__);
		return SCI_TASK_FAILURE_INVALID_STATE;
	}

2739
	status = sci_remote_device_start_task(ihost, idev, ireq);
2740 2741
	switch (status) {
	case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
D
Dan Williams 已提交
2742
		set_bit(IREQ_ACTIVE, &ireq->flags);
2743 2744 2745 2746 2747 2748 2749 2750

		/*
		 * We will let framework know this task request started successfully,
		 * although core is still woring on starting the request (to post tc when
		 * RNC is resumed.)
		 */
		return SCI_SUCCESS;
	case SCI_SUCCESS:
D
Dan Williams 已提交
2751
		set_bit(IREQ_ACTIVE, &ireq->flags);
D
Dan Williams 已提交
2752
		sci_controller_post_request(ihost, ireq->post_context);
2753 2754 2755 2756 2757 2758 2759
		break;
	default:
		break;
	}

	return status;
}