host.c 80.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * BSD LICENSE
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in
 *     the documentation and/or other materials provided with the
 *     distribution.
 *   * Neither the name of Intel Corporation nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
55
#include <linux/circ_buf.h>
56 57 58
#include <linux/device.h>
#include <scsi/sas.h>
#include "host.h"
59 60 61
#include "isci.h"
#include "port.h"
#include "host.h"
62
#include "probe_roms.h"
63 64 65 66
#include "remote_device.h"
#include "request.h"
#include "scu_completion_codes.h"
#include "scu_event_codes.h"
67
#include "registers.h"
68 69
#include "scu_remote_node_context.h"
#include "scu_task_context.h"
70

71 72
#define SCU_CONTEXT_RAM_INIT_STALL_TIME      200

73
#define smu_max_ports(dcc_value) \
74 75 76 77 78
	(\
		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
	)

79
#define smu_max_task_contexts(dcc_value)	\
80 81 82 83 84
	(\
		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
	)

85
#define smu_max_rncs(dcc_value) \
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	(\
		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
	)

#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100

/**
 *
 *
 * The number of milliseconds to wait while a given phy is consuming power
 * before allowing another set of phys to consume power. Ultimately, this will
 * be specified by OEM parameter.
 */
#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500

/**
 * NORMALIZE_PUT_POINTER() -
 *
 * This macro will normalize the completion queue put pointer so its value can
 * be used as an array inde
 */
#define NORMALIZE_PUT_POINTER(x) \
	((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)


/**
 * NORMALIZE_EVENT_POINTER() -
 *
 * This macro will normalize the completion queue event entry so its value can
 * be used as an index.
 */
#define NORMALIZE_EVENT_POINTER(x) \
	(\
		((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
		>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT	\
	)

/**
 * NORMALIZE_GET_POINTER() -
 *
 * This macro will normalize the completion queue get pointer so its value can
 * be used as an index into an array
 */
#define NORMALIZE_GET_POINTER(x) \
	((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)

/**
 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
 *
 * This macro will normalize the completion queue cycle pointer so it matches
 * the completion queue cycle bit
 */
#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
	((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))

/**
 * COMPLETION_QUEUE_CYCLE_BIT() -
 *
 * This macro will return the cycle bit of the completion queue entry
 */
#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/* Init the state machine and call the state entry function (if any) */
void sci_init_sm(struct sci_base_state_machine *sm,
		 const struct sci_base_state *state_table, u32 initial_state)
{
	sci_state_transition_t handler;

	sm->initial_state_id    = initial_state;
	sm->previous_state_id   = initial_state;
	sm->current_state_id    = initial_state;
	sm->state_table         = state_table;

	handler = sm->state_table[initial_state].enter_state;
	if (handler)
		handler(sm);
}

/* Call the state exit fn, update the current state, call the state entry fn */
void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
{
	sci_state_transition_t handler;

	handler = sm->state_table[sm->current_state_id].exit_state;
	if (handler)
		handler(sm);

	sm->previous_state_id = sm->current_state_id;
	sm->current_state_id = next_state;

	handler = sm->state_table[sm->current_state_id].enter_state;
	if (handler)
		handler(sm);
}

182
static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
183
{
184
	u32 get_value = ihost->completion_queue_get;
185 186 187
	u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;

	if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
188
	    COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
189 190 191 192 193
		return true;

	return false;
}

194
static bool sci_controller_isr(struct isci_host *ihost)
195
{
196
	if (sci_controller_completion_queue_has_entries(ihost)) {
197 198 199 200 201
		return true;
	} else {
		/*
		 * we have a spurious interrupt it could be that we have already
		 * emptied the completion queue from a previous interrupt */
202
		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203 204 205 206 207 208

		/*
		 * There is a race in the hardware that could cause us not to be notified
		 * of an interrupt completion if we do not take this step.  We will mask
		 * then unmask the interrupts so if there is another interrupt pending
		 * the clearing of the interrupt source we get the next interrupt message. */
209 210
		writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
		writel(0, &ihost->smu_registers->interrupt_mask);
211 212 213 214 215
	}

	return false;
}

216
irqreturn_t isci_msix_isr(int vec, void *data)
217
{
218 219
	struct isci_host *ihost = data;

220
	if (sci_controller_isr(ihost))
221
		tasklet_schedule(&ihost->completion_tasklet);
222

223
	return IRQ_HANDLED;
224 225
}

226
static bool sci_controller_error_isr(struct isci_host *ihost)
227 228 229 230
{
	u32 interrupt_status;

	interrupt_status =
231
		readl(&ihost->smu_registers->interrupt_status);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);

	if (interrupt_status != 0) {
		/*
		 * There is an error interrupt pending so let it through and handle
		 * in the callback */
		return true;
	}

	/*
	 * There is a race in the hardware that could cause us not to be notified
	 * of an interrupt completion if we do not take this step.  We will mask
	 * then unmask the error interrupts so if there was another interrupt
	 * pending we will be notified.
	 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
247 248
	writel(0xff, &ihost->smu_registers->interrupt_mask);
	writel(0, &ihost->smu_registers->interrupt_mask);
249 250 251 252

	return false;
}

253
static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
254
{
255
	u32 index = SCU_GET_COMPLETION_INDEX(ent);
D
Dan Williams 已提交
256
	struct isci_request *ireq = ihost->reqs[index];
257 258

	/* Make sure that we really want to process this IO request */
D
Dan Williams 已提交
259
	if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
260
	    ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
261
	    ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
262 263 264 265
		/* Yep this is a valid io request pass it along to the
		 * io request handler
		 */
		sci_io_request_tc_completion(ireq, ent);
266 267
}

268
static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
269 270
{
	u32 index;
271
	struct isci_request *ireq;
272
	struct isci_remote_device *idev;
273

274
	index = SCU_GET_COMPLETION_INDEX(ent);
275

276
	switch (scu_get_command_request_type(ent)) {
277 278
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
279 280
		ireq = ihost->reqs[index];
		dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
281
			 __func__, ent, ireq);
282 283 284 285 286 287 288
		/* @todo For a post TC operation we need to fail the IO
		 * request
		 */
		break;
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
289 290
		idev = ihost->device_table[index];
		dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
291
			 __func__, ent, idev);
292 293 294 295 296
		/* @todo For a port RNC operation we need to fail the
		 * device
		 */
		break;
	default:
297
		dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
298
			 __func__, ent);
299 300 301 302
		break;
	}
}

303
static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
304 305 306 307 308
{
	u32 index;
	u32 frame_index;

	struct scu_unsolicited_frame_header *frame_header;
309
	struct isci_phy *iphy;
310
	struct isci_remote_device *idev;
311 312 313

	enum sci_status result = SCI_FAILURE;

314
	frame_index = SCU_GET_FRAME_INDEX(ent);
315

316 317
	frame_header = ihost->uf_control.buffers.array[frame_index].header;
	ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
318

319
	if (SCU_GET_FRAME_ERROR(ent)) {
320 321 322 323
		/*
		 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
		 * /       this cause a problem? We expect the phy initialization will
		 * /       fail if there is an error in the frame. */
324
		sci_controller_release_frame(ihost, frame_index);
325 326 327 328
		return;
	}

	if (frame_header->is_address_frame) {
329
		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
330
		iphy = &ihost->phys[index];
331
		result = sci_phy_frame_handler(iphy, frame_index);
332 333
	} else {

334
		index = SCU_GET_COMPLETION_INDEX(ent);
335 336 337 338 339 340

		if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
			/*
			 * This is a signature fis or a frame from a direct attached SATA
			 * device that has not yet been created.  In either case forwared
			 * the frame to the PE and let it take care of the frame data. */
341
			index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
342
			iphy = &ihost->phys[index];
343
			result = sci_phy_frame_handler(iphy, frame_index);
344
		} else {
345 346
			if (index < ihost->remote_node_entries)
				idev = ihost->device_table[index];
347
			else
348
				idev = NULL;
349

350
			if (idev != NULL)
351
				result = sci_remote_device_frame_handler(idev, frame_index);
352
			else
353
				sci_controller_release_frame(ihost, frame_index);
354 355 356 357 358 359 360 361 362 363
		}
	}

	if (result != SCI_SUCCESS) {
		/*
		 * / @todo Is there any reason to report some additional error message
		 * /       when we get this failure notifiction? */
	}
}

364
static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
365
{
366
	struct isci_remote_device *idev;
367
	struct isci_request *ireq;
368
	struct isci_phy *iphy;
369 370
	u32 index;

371
	index = SCU_GET_COMPLETION_INDEX(ent);
372

373
	switch (scu_get_event_type(ent)) {
374 375
	case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
		/* / @todo The driver did something wrong and we need to fix the condtion. */
376
		dev_err(&ihost->pdev->dev,
377 378 379
			"%s: SCIC Controller 0x%p received SMU command error "
			"0x%x\n",
			__func__,
380
			ihost,
381
			ent);
382 383 384 385 386 387 388 389
		break;

	case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
	case SCU_EVENT_TYPE_SMU_ERROR:
	case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
		/*
		 * / @todo This is a hardware failure and its likely that we want to
		 * /       reset the controller. */
390
		dev_err(&ihost->pdev->dev,
391 392 393
			"%s: SCIC Controller 0x%p received fatal controller "
			"event  0x%x\n",
			__func__,
394
			ihost,
395
			ent);
396 397 398
		break;

	case SCU_EVENT_TYPE_TRANSPORT_ERROR:
399
		ireq = ihost->reqs[index];
400
		sci_io_request_event_handler(ireq, ent);
401 402 403
		break;

	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
404
		switch (scu_get_event_specifier(ent)) {
405 406
		case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
		case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
407 408
			ireq = ihost->reqs[index];
			if (ireq != NULL)
409
				sci_io_request_event_handler(ireq, ent);
410
			else
411
				dev_warn(&ihost->pdev->dev,
412 413 414 415
					 "%s: SCIC Controller 0x%p received "
					 "event 0x%x for io request object "
					 "that doesnt exist.\n",
					 __func__,
416
					 ihost,
417
					 ent);
418 419 420 421

			break;

		case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
422
			idev = ihost->device_table[index];
423
			if (idev != NULL)
424
				sci_remote_device_event_handler(idev, ent);
425
			else
426
				dev_warn(&ihost->pdev->dev,
427 428 429 430
					 "%s: SCIC Controller 0x%p received "
					 "event 0x%x for remote device object "
					 "that doesnt exist.\n",
					 __func__,
431
					 ihost,
432
					 ent);
433 434 435 436 437 438 439 440 441 442 443 444 445 446

			break;
		}
		break;

	case SCU_EVENT_TYPE_BROADCAST_CHANGE:
	/*
	 * direct the broadcast change event to the phy first and then let
	 * the phy redirect the broadcast change to the port object */
	case SCU_EVENT_TYPE_ERR_CNT_EVENT:
	/*
	 * direct error counter event to the phy object since that is where
	 * we get the event notification.  This is a type 4 event. */
	case SCU_EVENT_TYPE_OSSP_EVENT:
447
		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
448
		iphy = &ihost->phys[index];
449
		sci_phy_event_handler(iphy, ent);
450 451 452 453 454
		break;

	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
	case SCU_EVENT_TYPE_RNC_OPS_MISC:
455 456
		if (index < ihost->remote_node_entries) {
			idev = ihost->device_table[index];
457

458
			if (idev != NULL)
459
				sci_remote_device_event_handler(idev, ent);
460
		} else
461
			dev_err(&ihost->pdev->dev,
462 463 464 465
				"%s: SCIC Controller 0x%p received event 0x%x "
				"for remote device object 0x%0x that doesnt "
				"exist.\n",
				__func__,
466
				ihost,
467
				ent,
468 469 470 471 472
				index);

		break;

	default:
473
		dev_warn(&ihost->pdev->dev,
474 475
			 "%s: SCIC Controller received unknown event code %x\n",
			 __func__,
476
			 ent);
477 478 479 480
		break;
	}
}

481
static void sci_controller_process_completions(struct isci_host *ihost)
482 483
{
	u32 completion_count = 0;
484
	u32 ent;
485 486
	u32 get_index;
	u32 get_cycle;
487
	u32 event_get;
488 489
	u32 event_cycle;

490
	dev_dbg(&ihost->pdev->dev,
491 492
		"%s: completion queue begining get:0x%08x\n",
		__func__,
493
		ihost->completion_queue_get);
494 495

	/* Get the component parts of the completion queue */
496 497
	get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
	get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
498

499 500
	event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
	event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
501 502 503

	while (
		NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
504
		== COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
505 506 507
		) {
		completion_count++;

508
		ent = ihost->completion_queue[get_index];
509 510 511 512 513

		/* increment the get pointer and check for rollover to toggle the cycle bit */
		get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
			     (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
		get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
514

515
		dev_dbg(&ihost->pdev->dev,
516 517
			"%s: completion queue entry:0x%08x\n",
			__func__,
518
			ent);
519

520
		switch (SCU_GET_COMPLETION_TYPE(ent)) {
521
		case SCU_COMPLETION_TYPE_TASK:
522
			sci_controller_task_completion(ihost, ent);
523 524 525
			break;

		case SCU_COMPLETION_TYPE_SDMA:
526
			sci_controller_sdma_completion(ihost, ent);
527 528 529
			break;

		case SCU_COMPLETION_TYPE_UFI:
530
			sci_controller_unsolicited_frame(ihost, ent);
531 532 533
			break;

		case SCU_COMPLETION_TYPE_EVENT:
534 535 536 537
		case SCU_COMPLETION_TYPE_NOTIFY: {
			event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
				       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
			event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
538

539
			sci_controller_event_completion(ihost, ent);
540
			break;
541
		}
542
		default:
543
			dev_warn(&ihost->pdev->dev,
544 545 546
				 "%s: SCIC Controller received unknown "
				 "completion type %x\n",
				 __func__,
547
				 ent);
548 549 550 551 552 553
			break;
		}
	}

	/* Update the get register if we completed one or more entries */
	if (completion_count > 0) {
554
		ihost->completion_queue_get =
555 556 557
			SMU_CQGR_GEN_BIT(ENABLE) |
			SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
			event_cycle |
558
			SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
559 560 561
			get_cycle |
			SMU_CQGR_GEN_VAL(POINTER, get_index);

562 563
		writel(ihost->completion_queue_get,
		       &ihost->smu_registers->completion_queue_get);
564 565 566

	}

567
	dev_dbg(&ihost->pdev->dev,
568 569
		"%s: completion queue ending get:0x%08x\n",
		__func__,
570
		ihost->completion_queue_get);
571 572 573

}

574
static void sci_controller_error_handler(struct isci_host *ihost)
575 576 577 578
{
	u32 interrupt_status;

	interrupt_status =
579
		readl(&ihost->smu_registers->interrupt_status);
580 581

	if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
582
	    sci_controller_completion_queue_has_entries(ihost)) {
583

584
		sci_controller_process_completions(ihost);
585
		writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
586
	} else {
587
		dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
588 589
			interrupt_status);

590
		sci_change_state(&ihost->sm, SCIC_FAILED);
591 592 593 594 595 596 597

		return;
	}

	/* If we dont process any completions I am not sure that we want to do this.
	 * We are in the middle of a hardware fault and should probably be reset.
	 */
598
	writel(0, &ihost->smu_registers->interrupt_mask);
599 600
}

601
irqreturn_t isci_intx_isr(int vec, void *data)
602 603
{
	irqreturn_t ret = IRQ_NONE;
604
	struct isci_host *ihost = data;
605

606
	if (sci_controller_isr(ihost)) {
607
		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
608 609
		tasklet_schedule(&ihost->completion_tasklet);
		ret = IRQ_HANDLED;
610
	} else if (sci_controller_error_isr(ihost)) {
611
		spin_lock(&ihost->scic_lock);
612
		sci_controller_error_handler(ihost);
613 614
		spin_unlock(&ihost->scic_lock);
		ret = IRQ_HANDLED;
615
	}
D
Dan Williams 已提交
616

617 618 619
	return ret;
}

D
Dan Williams 已提交
620 621 622 623
irqreturn_t isci_error_isr(int vec, void *data)
{
	struct isci_host *ihost = data;

624 625
	if (sci_controller_error_isr(ihost))
		sci_controller_error_handler(ihost);
D
Dan Williams 已提交
626 627 628

	return IRQ_HANDLED;
}
629 630 631 632 633 634 635 636 637

/**
 * isci_host_start_complete() - This function is called by the core library,
 *    through the ISCI Module, to indicate controller start status.
 * @isci_host: This parameter specifies the ISCI host object
 * @completion_status: This parameter specifies the completion status from the
 *    core library.
 *
 */
638
static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
639
{
640 641 642 643 644 645
	if (completion_status != SCI_SUCCESS)
		dev_info(&ihost->pdev->dev,
			"controller start timed out, continuing...\n");
	isci_host_change_state(ihost, isci_ready);
	clear_bit(IHOST_START_PENDING, &ihost->flags);
	wake_up(&ihost->eventq);
646 647
}

648
int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
649
{
650
	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
651

652
	if (test_bit(IHOST_START_PENDING, &ihost->flags))
653 654
		return 0;

655 656 657 658
	/* todo: use sas_flush_discovery once it is upstream */
	scsi_flush_work(shost);

	scsi_flush_work(shost);
659

660 661 662
	dev_dbg(&ihost->pdev->dev,
		"%s: ihost->status = %d, time = %ld\n",
		 __func__, isci_host_get_state(ihost), time);
663 664 665 666 667

	return 1;

}

668
/**
669 670
 * sci_controller_get_suggested_start_timeout() - This method returns the
 *    suggested sci_controller_start() timeout amount.  The user is free to
671 672 673 674 675 676 677 678 679
 *    use any timeout value, but this method provides the suggested minimum
 *    start timeout value.  The returned value is based upon empirical
 *    information determined as a result of interoperability testing.
 * @controller: the handle to the controller object for which to return the
 *    suggested start timeout.
 *
 * This method returns the number of milliseconds for the suggested start
 * operation timeout.
 */
680
static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
681 682
{
	/* Validate the user supplied parameters. */
683
	if (!ihost)
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
		return 0;

	/*
	 * The suggested minimum timeout value for a controller start operation:
	 *
	 *     Signature FIS Timeout
	 *   + Phy Start Timeout
	 *   + Number of Phy Spin Up Intervals
	 *   ---------------------------------
	 *   Number of milliseconds for the controller start operation.
	 *
	 * NOTE: The number of phy spin up intervals will be equivalent
	 *       to the number of phys divided by the number phys allowed
	 *       per interval - 1 (once OEM parameters are supported).
	 *       Currently we assume only 1 phy per interval. */

	return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
		+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
		+ ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
}

705
static void sci_controller_enable_interrupts(struct isci_host *ihost)
706
{
707 708
	BUG_ON(ihost->smu_registers == NULL);
	writel(0, &ihost->smu_registers->interrupt_mask);
709 710
}

711
void sci_controller_disable_interrupts(struct isci_host *ihost)
712
{
713 714
	BUG_ON(ihost->smu_registers == NULL);
	writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
715 716
}

717
static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718 719 720 721
{
	u32 port_task_scheduler_value;

	port_task_scheduler_value =
722
		readl(&ihost->scu_registers->peg0.ptsg.control);
723 724 725 726
	port_task_scheduler_value |=
		(SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
		 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
	writel(port_task_scheduler_value,
727
	       &ihost->scu_registers->peg0.ptsg.control);
728 729
}

730
static void sci_controller_assign_task_entries(struct isci_host *ihost)
731 732 733 734 735 736 737 738 739
{
	u32 task_assignment;

	/*
	 * Assign all the TCs to function 0
	 * TODO: Do we actually need to read this register to write it back?
	 */

	task_assignment =
740
		readl(&ihost->smu_registers->task_context_assignment[0]);
741 742

	task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743
		(SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
744 745 746
		(SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));

	writel(task_assignment,
747
		&ihost->smu_registers->task_context_assignment[0]);
748 749 750

}

751
static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752 753 754 755 756 757
{
	u32 index;
	u32 completion_queue_control_value;
	u32 completion_queue_get_value;
	u32 completion_queue_put_value;

758
	ihost->completion_queue_get = 0;
759

760 761 762
	completion_queue_control_value =
		(SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
		 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763 764

	writel(completion_queue_control_value,
765
	       &ihost->smu_registers->completion_queue_control);
766 767 768 769 770 771 772 773 774 775 776


	/* Set the completion queue get pointer and enable the queue */
	completion_queue_get_value = (
		(SMU_CQGR_GEN_VAL(POINTER, 0))
		| (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
		| (SMU_CQGR_GEN_BIT(ENABLE))
		| (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
		);

	writel(completion_queue_get_value,
777
	       &ihost->smu_registers->completion_queue_get);
778 779 780 781 782 783 784 785

	/* Set the completion queue put pointer */
	completion_queue_put_value = (
		(SMU_CQPR_GEN_VAL(POINTER, 0))
		| (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
		);

	writel(completion_queue_put_value,
786
	       &ihost->smu_registers->completion_queue_put);
787 788

	/* Initialize the cycle bit of the completion queue entries */
789
	for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 791 792 793
		/*
		 * If get.cycle_bit != completion_queue.cycle_bit
		 * its not a valid completion queue entry
		 * so at system start all entries are invalid */
794
		ihost->completion_queue[index] = 0x80000000;
795 796 797
	}
}

798
static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799 800 801 802 803 804 805
{
	u32 frame_queue_control_value;
	u32 frame_queue_get_value;
	u32 frame_queue_put_value;

	/* Write the queue size */
	frame_queue_control_value =
806
		SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807 808

	writel(frame_queue_control_value,
809
	       &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810 811 812 813 814 815 816 817

	/* Setup the get pointer for the unsolicited frame queue */
	frame_queue_get_value = (
		SCU_UFQGP_GEN_VAL(POINTER, 0)
		|  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
		);

	writel(frame_queue_get_value,
818
	       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 820 821
	/* Setup the put pointer for the unsolicited frame queue */
	frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
	writel(frame_queue_put_value,
822
	       &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823 824
}

825
static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826
{
827
	if (ihost->sm.current_state_id == SCIC_STARTING) {
828 829 830 831
		/*
		 * We move into the ready state, because some of the phys/ports
		 * may be up and operational.
		 */
832
		sci_change_state(&ihost->sm, SCIC_READY);
833 834 835 836 837

		isci_host_start_complete(ihost, status);
	}
}

838
static bool is_phy_starting(struct isci_phy *iphy)
A
Adam Gruchala 已提交
839
{
840
	enum sci_phy_states state;
A
Adam Gruchala 已提交
841

842
	state = iphy->sm.current_state_id;
A
Adam Gruchala 已提交
843
	switch (state) {
E
Edmund Nadolski 已提交
844 845 846 847 848 849 850 851 852 853
	case SCI_PHY_STARTING:
	case SCI_PHY_SUB_INITIAL:
	case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
	case SCI_PHY_SUB_AWAIT_IAF_UF:
	case SCI_PHY_SUB_AWAIT_SAS_POWER:
	case SCI_PHY_SUB_AWAIT_SATA_POWER:
	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
	case SCI_PHY_SUB_FINAL:
A
Adam Gruchala 已提交
854 855 856 857 858 859
		return true;
	default:
		return false;
	}
}

860
/**
861
 * sci_controller_start_next_phy - start phy
862 863 864 865
 * @scic: controller
 *
 * If all the phys have been started, then attempt to transition the
 * controller to the READY state and inform the user
866
 * (sci_cb_controller_start_complete()).
867
 */
868
static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
869
{
870
	struct sci_oem_params *oem = &ihost->oem_parameters;
871
	struct isci_phy *iphy;
872 873 874 875
	enum sci_status status;

	status = SCI_SUCCESS;

876
	if (ihost->phy_startup_timer_pending)
877 878
		return status;

879
	if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
880 881 882 883 884
		bool is_controller_start_complete = true;
		u32 state;
		u8 index;

		for (index = 0; index < SCI_MAX_PHYS; index++) {
885 886
			iphy = &ihost->phys[index];
			state = iphy->sm.current_state_id;
887

888
			if (!phy_get_non_dummy_port(iphy))
889 890 891 892 893 894 895 896
				continue;

			/* The controller start operation is complete iff:
			 * - all links have been given an opportunity to start
			 * - have no indication of a connected device
			 * - have an indication of a connected device and it has
			 *   finished the link training process.
			 */
897 898 899
			if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
			    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
			    (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
900 901 902 903 904 905 906 907 908
				is_controller_start_complete = false;
				break;
			}
		}

		/*
		 * The controller has successfully finished the start process.
		 * Inform the SCI Core user and transition to the READY state. */
		if (is_controller_start_complete == true) {
909
			sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
910 911
			sci_del_timer(&ihost->phy_timer);
			ihost->phy_startup_timer_pending = false;
912 913
		}
	} else {
914
		iphy = &ihost->phys[ihost->next_phy_to_start];
915 916

		if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
917
			if (phy_get_non_dummy_port(iphy) == NULL) {
918
				ihost->next_phy_to_start++;
919 920 921 922 923 924 925 926 927 928

				/* Caution recursion ahead be forwarned
				 *
				 * The PHY was never added to a PORT in MPC mode
				 * so start the next phy in sequence This phy
				 * will never go link up and will not draw power
				 * the OEM parameters either configured the phy
				 * incorrectly for the PORT or it was never
				 * assigned to a PORT
				 */
929
				return sci_controller_start_next_phy(ihost);
930 931 932
			}
		}

933
		status = sci_phy_start(iphy);
934 935

		if (status == SCI_SUCCESS) {
936
			sci_mod_timer(&ihost->phy_timer,
937
				      SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
938
			ihost->phy_startup_timer_pending = true;
939
		} else {
940
			dev_warn(&ihost->pdev->dev,
941 942 943 944
				 "%s: Controller stop operation failed "
				 "to stop phy %d because of status "
				 "%d.\n",
				 __func__,
945
				 ihost->phys[ihost->next_phy_to_start].phy_index,
946 947 948
				 status);
		}

949
		ihost->next_phy_to_start++;
950 951 952 953 954
	}

	return status;
}

955
static void phy_startup_timeout(unsigned long data)
956
{
957
	struct sci_timer *tmr = (struct sci_timer *)data;
958
	struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
959
	unsigned long flags;
960 961
	enum sci_status status;

962 963 964 965 966
	spin_lock_irqsave(&ihost->scic_lock, flags);

	if (tmr->cancel)
		goto done;

967
	ihost->phy_startup_timer_pending = false;
968 969

	do {
970
		status = sci_controller_start_next_phy(ihost);
971 972 973 974
	} while (status != SCI_SUCCESS);

done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
975 976
}

977 978 979 980 981
static u16 isci_tci_active(struct isci_host *ihost)
{
	return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}

982
static enum sci_status sci_controller_start(struct isci_host *ihost,
983 984 985 986 987
					     u32 timeout)
{
	enum sci_status result;
	u16 index;

988 989
	if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
		dev_warn(&ihost->pdev->dev,
990 991 992 993 994 995
			 "SCIC Controller start operation requested in "
			 "invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}

	/* Build the TCi free pool */
996 997 998
	BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
	ihost->tci_head = 0;
	ihost->tci_tail = 0;
999
	for (index = 0; index < ihost->task_context_entries; index++)
1000
		isci_tci_free(ihost, index);
1001 1002

	/* Build the RNi free pool */
1003 1004
	sci_remote_node_table_initialize(&ihost->available_remote_nodes,
					 ihost->remote_node_entries);
1005 1006 1007 1008 1009

	/*
	 * Before anything else lets make sure we will not be
	 * interrupted by the hardware.
	 */
1010
	sci_controller_disable_interrupts(ihost);
1011 1012

	/* Enable the port task scheduler */
1013
	sci_controller_enable_port_task_scheduler(ihost);
1014

1015
	/* Assign all the task entries to ihost physical function */
1016
	sci_controller_assign_task_entries(ihost);
1017 1018

	/* Now initialize the completion queue */
1019
	sci_controller_initialize_completion_queue(ihost);
1020 1021

	/* Initialize the unsolicited frame queue for use */
1022
	sci_controller_initialize_unsolicited_frame_queue(ihost);
1023 1024

	/* Start all of the ports on this controller */
1025
	for (index = 0; index < ihost->logical_port_entries; index++) {
1026
		struct isci_port *iport = &ihost->ports[index];
1027

1028
		result = sci_port_start(iport);
1029 1030 1031 1032
		if (result)
			return result;
	}

1033
	sci_controller_start_next_phy(ihost);
1034

1035
	sci_mod_timer(&ihost->timer, timeout);
1036

1037
	sci_change_state(&ihost->sm, SCIC_STARTING);
1038 1039 1040 1041

	return SCI_SUCCESS;
}

1042 1043
void isci_host_scan_start(struct Scsi_Host *shost)
{
1044
	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1045
	unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1046

1047
	set_bit(IHOST_START_PENDING, &ihost->flags);
1048 1049

	spin_lock_irq(&ihost->scic_lock);
1050 1051
	sci_controller_start(ihost, tmo);
	sci_controller_enable_interrupts(ihost);
1052
	spin_unlock_irq(&ihost->scic_lock);
1053 1054
}

1055
static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1056
{
1057
	isci_host_change_state(ihost, isci_stopped);
1058
	sci_controller_disable_interrupts(ihost);
1059 1060
	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
	wake_up(&ihost->eventq);
1061 1062
}

1063
static void sci_controller_completion_handler(struct isci_host *ihost)
1064 1065
{
	/* Empty out the completion queue */
1066 1067
	if (sci_controller_completion_queue_has_entries(ihost))
		sci_controller_process_completions(ihost);
1068 1069

	/* Clear the interrupt and enable all interrupts again */
1070
	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1071
	/* Could we write the value of SMU_ISR_COMPLETION? */
1072 1073
	writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
	writel(0, &ihost->smu_registers->interrupt_mask);
1074 1075
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
/**
 * isci_host_completion_routine() - This function is the delayed service
 *    routine that calls the sci core library's completion handler. It's
 *    scheduled as a tasklet from the interrupt service routine when interrupts
 *    in use, or set as the timeout function in polled mode.
 * @data: This parameter specifies the ISCI host object
 *
 */
static void isci_host_completion_routine(unsigned long data)
{
1086
	struct isci_host *ihost = (struct isci_host *)data;
1087 1088 1089 1090
	struct list_head    completed_request_list;
	struct list_head    errored_request_list;
	struct list_head    *current_position;
	struct list_head    *next_position;
1091 1092
	struct isci_request *request;
	struct isci_request *next_request;
1093
	struct sas_task     *task;
1094 1095

	INIT_LIST_HEAD(&completed_request_list);
1096
	INIT_LIST_HEAD(&errored_request_list);
1097

1098
	spin_lock_irq(&ihost->scic_lock);
1099

1100
	sci_controller_completion_handler(ihost);
1101

1102
	/* Take the lists of completed I/Os from the host. */
1103

1104
	list_splice_init(&ihost->requests_to_complete,
1105 1106
			 &completed_request_list);

1107
	/* Take the list of errored I/Os from the host. */
1108
	list_splice_init(&ihost->requests_to_errorback,
1109
			 &errored_request_list);
1110

1111
	spin_unlock_irq(&ihost->scic_lock);
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121

	/* Process any completions in the lists. */
	list_for_each_safe(current_position, next_position,
			   &completed_request_list) {

		request = list_entry(current_position, struct isci_request,
				     completed_node);
		task = isci_request_access_task(request);

		/* Normal notification (task_done) */
1122
		dev_dbg(&ihost->pdev->dev,
1123 1124 1125 1126 1127
			"%s: Normal - request/task = %p/%p\n",
			__func__,
			request,
			task);

1128 1129 1130 1131 1132
		/* Return the task to libsas */
		if (task != NULL) {

			task->lldd_task = NULL;
			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1133

1134 1135 1136 1137 1138 1139
				/* If the task is already in the abort path,
				* the task_done callback cannot be called.
				*/
				task->task_done(task);
			}
		}
1140

1141 1142 1143
		spin_lock_irq(&ihost->scic_lock);
		isci_free_tag(ihost, request->io_tag);
		spin_unlock_irq(&ihost->scic_lock);
1144
	}
1145
	list_for_each_entry_safe(request, next_request, &errored_request_list,
1146 1147 1148 1149 1150
				 completed_node) {

		task = isci_request_access_task(request);

		/* Use sas_task_abort */
1151
		dev_warn(&ihost->pdev->dev,
1152 1153 1154 1155 1156
			 "%s: Error - request/task = %p/%p\n",
			 __func__,
			 request,
			 task);

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		if (task != NULL) {

			/* Put the task into the abort path if it's not there
			 * already.
			 */
			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
				sas_task_abort(task);

		} else {
			/* This is a case where the request has completed with a
			 * status such that it needed further target servicing,
			 * but the sas_task reference has already been removed
			 * from the request.  Since it was errored, it was not
			 * being aborted, so there is nothing to do except free
			 * it.
			 */

1174
			spin_lock_irq(&ihost->scic_lock);
1175 1176 1177 1178
			/* Remove the request from the remote device's list
			* of pending requests.
			*/
			list_del_init(&request->dev_node);
1179 1180
			isci_free_tag(ihost, request->io_tag);
			spin_unlock_irq(&ihost->scic_lock);
1181
		}
1182 1183 1184 1185
	}

}

1186
/**
1187
 * sci_controller_stop() - This method will stop an individual controller
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
 *    object.This method will invoke the associated user callback upon
 *    completion.  The completion callback is called when the following
 *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
 *    controller has been quiesced. This method will ensure that all IO
 *    requests are quiesced, phys are stopped, and all additional operation by
 *    the hardware is halted.
 * @controller: the handle to the controller object to stop.
 * @timeout: This parameter specifies the number of milliseconds in which the
 *    stop operation should complete.
 *
 * The controller must be in the STARTED or STOPPED state. Indicate if the
 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
 * controller is not either in the STARTED or STOPPED states.
 */
1204
static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1205
{
1206 1207
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev,
1208 1209 1210 1211
			 "SCIC Controller stop operation requested in "
			 "invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}
1212

1213 1214
	sci_mod_timer(&ihost->timer, timeout);
	sci_change_state(&ihost->sm, SCIC_STOPPING);
1215 1216 1217 1218
	return SCI_SUCCESS;
}

/**
1219
 * sci_controller_reset() - This method will reset the supplied core
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
 *    controller regardless of the state of said controller.  This operation is
 *    considered destructive.  In other words, all current operations are wiped
 *    out.  No IO completions for outstanding devices occur.  Outstanding IO
 *    requests are not aborted or completed at the actual remote device.
 * @controller: the handle to the controller object to reset.
 *
 * Indicate if the controller reset method succeeded or failed in some way.
 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
 * the controller reset operation is unable to complete.
 */
1230
static enum sci_status sci_controller_reset(struct isci_host *ihost)
1231
{
1232
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
1233 1234 1235 1236
	case SCIC_RESET:
	case SCIC_READY:
	case SCIC_STOPPED:
	case SCIC_FAILED:
1237 1238 1239 1240
		/*
		 * The reset operation is not a graceful cleanup, just
		 * perform the state transition.
		 */
1241
		sci_change_state(&ihost->sm, SCIC_RESETTING);
1242 1243
		return SCI_SUCCESS;
	default:
1244
		dev_warn(&ihost->pdev->dev,
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
			 "SCIC Controller reset operation requested in "
			 "invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}
}

void isci_host_deinit(struct isci_host *ihost)
{
	int i;

	isci_host_change_state(ihost, isci_stopping);
1256
	for (i = 0; i < SCI_MAX_PORTS; i++) {
D
Dan Williams 已提交
1257
		struct isci_port *iport = &ihost->ports[i];
1258 1259
		struct isci_remote_device *idev, *d;

D
Dan Williams 已提交
1260
		list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1261 1262
			if (test_bit(IDEV_ALLOCATED, &idev->flags))
				isci_remote_device_stop(ihost, idev);
1263 1264 1265
		}
	}

1266
	set_bit(IHOST_STOP_PENDING, &ihost->flags);
D
Dan Williams 已提交
1267 1268

	spin_lock_irq(&ihost->scic_lock);
1269
	sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
D
Dan Williams 已提交
1270 1271
	spin_unlock_irq(&ihost->scic_lock);

1272
	wait_for_stop(ihost);
1273
	sci_controller_reset(ihost);
1274 1275

	/* Cancel any/all outstanding port timers */
1276
	for (i = 0; i < ihost->logical_port_entries; i++) {
1277 1278
		struct isci_port *iport = &ihost->ports[i];
		del_timer_sync(&iport->timer.timer);
1279 1280
	}

1281 1282
	/* Cancel any/all outstanding phy timers */
	for (i = 0; i < SCI_MAX_PHYS; i++) {
1283 1284
		struct isci_phy *iphy = &ihost->phys[i];
		del_timer_sync(&iphy->sata_timer.timer);
1285 1286
	}

1287
	del_timer_sync(&ihost->port_agent.timer.timer);
1288

1289
	del_timer_sync(&ihost->power_control.timer.timer);
1290

1291
	del_timer_sync(&ihost->timer.timer);
1292

1293
	del_timer_sync(&ihost->phy_timer.timer);
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
}

static void __iomem *scu_base(struct isci_host *isci_host)
{
	struct pci_dev *pdev = isci_host->pdev;
	int id = isci_host->id;

	return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
}

static void __iomem *smu_base(struct isci_host *isci_host)
{
	struct pci_dev *pdev = isci_host->pdev;
	int id = isci_host->id;

	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}

1312
static void isci_user_parameters_get(struct sci_user_parameters *u)
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
{
	int i;

	for (i = 0; i < SCI_MAX_PHYS; i++) {
		struct sci_phy_user_params *u_phy = &u->phys[i];

		u_phy->max_speed_generation = phy_gen;

		/* we are not exporting these for now */
		u_phy->align_insertion_frequency = 0x7f;
		u_phy->in_connection_align_insertion_frequency = 0xff;
		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
	}

	u->stp_inactivity_timeout = stp_inactive_to;
	u->ssp_inactivity_timeout = ssp_inactive_to;
	u->stp_max_occupancy_timeout = stp_max_occ_to;
	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
	u->no_outbound_task_timeout = no_outbound_task_to;
	u->max_number_concurrent_device_spin_up = max_concurr_spinup;
}

1335
static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1336
{
1337
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1338

1339
	sci_change_state(&ihost->sm, SCIC_RESET);
1340 1341
}

1342
static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1343
{
1344
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1345

1346
	sci_del_timer(&ihost->timer);
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
}

#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
#define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
#define INTERRUPT_COALESCE_NUMBER_MAX                        256
#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28

/**
1357
 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
 *    configure the interrupt coalescence.
 * @controller: This parameter represents the handle to the controller object
 *    for which its interrupt coalesce register is overridden.
 * @coalesce_number: Used to control the number of entries in the Completion
 *    Queue before an interrupt is generated. If the number of entries exceed
 *    this number, an interrupt will be generated. The valid range of the input
 *    is [0, 256]. A setting of 0 results in coalescing being disabled.
 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
 *    input is [0, 2700000] . A setting of 0 is allowed and results in no
 *    interrupt coalescing timeout.
 *
 * Indicate if the user successfully set the interrupt coalesce parameters.
 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
 */
1373
static enum sci_status
1374 1375 1376
sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
					 u32 coalesce_number,
					 u32 coalesce_timeout)
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
{
	u8 timeout_encode = 0;
	u32 min = 0;
	u32 max = 0;

	/* Check if the input parameters fall in the range. */
	if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
		return SCI_FAILURE_INVALID_PARAMETER_VALUE;

	/*
	 *  Defined encoding for interrupt coalescing timeout:
	 *              Value   Min      Max     Units
	 *              -----   ---      ---     -----
	 *              0       -        -       Disabled
	 *              1       13.3     20.0    ns
	 *              2       26.7     40.0
	 *              3       53.3     80.0
	 *              4       106.7    160.0
	 *              5       213.3    320.0
	 *              6       426.7    640.0
	 *              7       853.3    1280.0
	 *              8       1.7      2.6     us
	 *              9       3.4      5.1
	 *              10      6.8      10.2
	 *              11      13.7     20.5
	 *              12      27.3     41.0
	 *              13      54.6     81.9
	 *              14      109.2    163.8
	 *              15      218.5    327.7
	 *              16      436.9    655.4
	 *              17      873.8    1310.7
	 *              18      1.7      2.6     ms
	 *              19      3.5      5.2
	 *              20      7.0      10.5
	 *              21      14.0     21.0
	 *              22      28.0     41.9
	 *              23      55.9     83.9
	 *              24      111.8    167.8
	 *              25      223.7    335.5
	 *              26      447.4    671.1
	 *              27      894.8    1342.2
	 *              28      1.8      2.7     s
	 *              Others Undefined */

	/*
	 * Use the table above to decide the encode of interrupt coalescing timeout
	 * value for register writing. */
	if (coalesce_timeout == 0)
		timeout_encode = 0;
	else{
		/* make the timeout value in unit of (10 ns). */
		coalesce_timeout = coalesce_timeout * 100;
		min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
		max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;

		/* get the encode of timeout for register writing. */
		for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
		      timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
		      timeout_encode++) {
			if (min <= coalesce_timeout &&  max > coalesce_timeout)
				break;
			else if (coalesce_timeout >= max && coalesce_timeout < min * 2
				 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
				if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
					break;
				else{
					timeout_encode++;
					break;
				}
			} else {
				max = max * 2;
				min = min * 2;
			}
		}

		if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
			/* the value is out of range. */
			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
	}

	writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
	       SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1459
	       &ihost->smu_registers->interrupt_coalesce_control);
1460 1461


1462 1463
	ihost->interrupt_coalesce_number = (u16)coalesce_number;
	ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1464 1465 1466 1467 1468

	return SCI_SUCCESS;
}


1469
static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1470
{
1471
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1472 1473

	/* set the default interrupt coalescence number and timeout value. */
1474
	sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
1475 1476
}

1477
static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1478
{
1479
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1480 1481

	/* disable interrupt coalescence. */
1482
	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1483 1484
}

1485
static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1486 1487 1488 1489 1490 1491 1492 1493
{
	u32 index;
	enum sci_status status;
	enum sci_status phy_status;

	status = SCI_SUCCESS;

	for (index = 0; index < SCI_MAX_PHYS; index++) {
1494
		phy_status = sci_phy_stop(&ihost->phys[index]);
1495 1496 1497 1498 1499

		if (phy_status != SCI_SUCCESS &&
		    phy_status != SCI_FAILURE_INVALID_STATE) {
			status = SCI_FAILURE;

1500
			dev_warn(&ihost->pdev->dev,
1501 1502 1503
				 "%s: Controller stop operation failed to stop "
				 "phy %d because of status %d.\n",
				 __func__,
1504
				 ihost->phys[index].phy_index, phy_status);
1505 1506 1507 1508 1509 1510
		}
	}

	return status;
}

1511
static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1512 1513 1514 1515 1516
{
	u32 index;
	enum sci_status port_status;
	enum sci_status status = SCI_SUCCESS;

1517
	for (index = 0; index < ihost->logical_port_entries; index++) {
1518
		struct isci_port *iport = &ihost->ports[index];
1519

1520
		port_status = sci_port_stop(iport);
1521 1522 1523 1524 1525

		if ((port_status != SCI_SUCCESS) &&
		    (port_status != SCI_FAILURE_INVALID_STATE)) {
			status = SCI_FAILURE;

1526
			dev_warn(&ihost->pdev->dev,
1527 1528 1529
				 "%s: Controller stop operation failed to "
				 "stop port %d because of status %d.\n",
				 __func__,
1530
				 iport->logical_port_index,
1531 1532 1533 1534 1535 1536 1537
				 port_status);
		}
	}

	return status;
}

1538
static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1539 1540 1541 1542 1543 1544 1545
{
	u32 index;
	enum sci_status status;
	enum sci_status device_status;

	status = SCI_SUCCESS;

1546 1547
	for (index = 0; index < ihost->remote_node_entries; index++) {
		if (ihost->device_table[index] != NULL) {
1548
			/* / @todo What timeout value do we want to provide to this request? */
1549
			device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1550 1551 1552

			if ((device_status != SCI_SUCCESS) &&
			    (device_status != SCI_FAILURE_INVALID_STATE)) {
1553
				dev_warn(&ihost->pdev->dev,
1554 1555 1556 1557
					 "%s: Controller stop operation failed "
					 "to stop device 0x%p because of "
					 "status %d.\n",
					 __func__,
1558
					 ihost->device_table[index], device_status);
1559 1560 1561 1562 1563 1564 1565
			}
		}
	}

	return status;
}

1566
static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1567
{
1568
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1569 1570

	/* Stop all of the components for this controller */
1571 1572 1573
	sci_controller_stop_phys(ihost);
	sci_controller_stop_ports(ihost);
	sci_controller_stop_devices(ihost);
1574 1575
}

1576
static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1577
{
1578
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1579

1580
	sci_del_timer(&ihost->timer);
1581 1582
}

1583
static void sci_controller_reset_hardware(struct isci_host *ihost)
1584 1585
{
	/* Disable interrupts so we dont take any spurious interrupts */
1586
	sci_controller_disable_interrupts(ihost);
1587 1588

	/* Reset the SCU */
1589
	writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1590 1591 1592 1593 1594

	/* Delay for 1ms to before clearing the CQP and UFQPR. */
	udelay(1000);

	/* The write to the CQGR clears the CQP */
1595
	writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1596 1597

	/* The write to the UFQGP clears the UFQPR */
1598
	writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1599 1600
}

1601
static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1602
{
1603
	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1604

1605
	sci_controller_reset_hardware(ihost);
1606
	sci_change_state(&ihost->sm, SCIC_RESET);
1607 1608
}

1609
static const struct sci_base_state sci_controller_state_table[] = {
E
Edmund Nadolski 已提交
1610
	[SCIC_INITIAL] = {
1611
		.enter_state = sci_controller_initial_state_enter,
1612
	},
E
Edmund Nadolski 已提交
1613 1614 1615 1616
	[SCIC_RESET] = {},
	[SCIC_INITIALIZING] = {},
	[SCIC_INITIALIZED] = {},
	[SCIC_STARTING] = {
1617
		.exit_state  = sci_controller_starting_state_exit,
1618
	},
E
Edmund Nadolski 已提交
1619
	[SCIC_READY] = {
1620 1621
		.enter_state = sci_controller_ready_state_enter,
		.exit_state  = sci_controller_ready_state_exit,
1622
	},
E
Edmund Nadolski 已提交
1623
	[SCIC_RESETTING] = {
1624
		.enter_state = sci_controller_resetting_state_enter,
1625
	},
E
Edmund Nadolski 已提交
1626
	[SCIC_STOPPING] = {
1627 1628
		.enter_state = sci_controller_stopping_state_enter,
		.exit_state = sci_controller_stopping_state_exit,
1629
	},
E
Edmund Nadolski 已提交
1630 1631
	[SCIC_STOPPED] = {},
	[SCIC_FAILED] = {}
1632 1633
};

1634
static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1635 1636 1637 1638 1639
{
	/* these defaults are overridden by the platform / firmware */
	u16 index;

	/* Default to APC mode. */
1640
	ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1641 1642

	/* Default to APC mode. */
1643
	ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
1644 1645

	/* Default to no SSC operation. */
1646
	ihost->oem_parameters.controller.do_enable_ssc = false;
1647 1648 1649

	/* Initialize all of the port parameter information to narrow ports. */
	for (index = 0; index < SCI_MAX_PORTS; index++) {
1650
		ihost->oem_parameters.ports[index].phy_mask = 0;
1651 1652 1653 1654 1655
	}

	/* Initialize all of the phy parameter information. */
	for (index = 0; index < SCI_MAX_PHYS; index++) {
		/* Default to 6G (i.e. Gen 3) for now. */
1656
		ihost->user_parameters.phys[index].max_speed_generation = 3;
1657 1658

		/* the frequencies cannot be 0 */
1659 1660 1661
		ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
		ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
		ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1662 1663 1664 1665 1666 1667

		/*
		 * Previous Vitesse based expanders had a arbitration issue that
		 * is worked around by having the upper 32-bits of SAS address
		 * with a value greater then the Vitesse company identifier.
		 * Hence, usage of 0x5FCFFFFF. */
1668 1669
		ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
		ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1670 1671
	}

1672 1673 1674 1675 1676
	ihost->user_parameters.stp_inactivity_timeout = 5;
	ihost->user_parameters.ssp_inactivity_timeout = 5;
	ihost->user_parameters.stp_max_occupancy_timeout = 5;
	ihost->user_parameters.ssp_max_occupancy_timeout = 20;
	ihost->user_parameters.no_outbound_task_timeout = 20;
1677 1678
}

1679 1680 1681
static void controller_timeout(unsigned long data)
{
	struct sci_timer *tmr = (struct sci_timer *)data;
1682 1683
	struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
	struct sci_base_state_machine *sm = &ihost->sm;
1684 1685 1686 1687 1688 1689 1690
	unsigned long flags;

	spin_lock_irqsave(&ihost->scic_lock, flags);

	if (tmr->cancel)
		goto done;

E
Edmund Nadolski 已提交
1691
	if (sm->current_state_id == SCIC_STARTING)
1692
		sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
E
Edmund Nadolski 已提交
1693 1694
	else if (sm->current_state_id == SCIC_STOPPING) {
		sci_change_state(sm, SCIC_FAILED);
1695 1696
		isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
	} else	/* / @todo Now what do we want to do in this case? */
1697
		dev_err(&ihost->pdev->dev,
1698 1699 1700
			"%s: Controller timer fired when controller was not "
			"in a state being timed.\n",
			__func__);
1701

1702 1703 1704
done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
1705

1706 1707 1708
static enum sci_status sci_controller_construct(struct isci_host *ihost,
						void __iomem *scu_base,
						void __iomem *smu_base)
1709 1710 1711
{
	u8 i;

1712
	sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1713

1714 1715
	ihost->scu_registers = scu_base;
	ihost->smu_registers = smu_base;
1716

1717
	sci_port_configuration_agent_construct(&ihost->port_agent);
1718 1719 1720

	/* Construct the ports for this controller */
	for (i = 0; i < SCI_MAX_PORTS; i++)
1721 1722
		sci_port_construct(&ihost->ports[i], i, ihost);
	sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1723 1724 1725 1726

	/* Construct the phys for this controller */
	for (i = 0; i < SCI_MAX_PHYS; i++) {
		/* Add all the PHYs to the dummy port */
1727 1728
		sci_phy_construct(&ihost->phys[i],
				  &ihost->ports[SCI_MAX_PORTS], i);
1729 1730
	}

1731
	ihost->invalid_phy_mask = 0;
1732

1733
	sci_init_timer(&ihost->timer, controller_timeout);
1734

1735
	/* Initialize the User and OEM parameters to default values. */
1736
	sci_controller_set_default_config_parameters(ihost);
1737

1738
	return sci_controller_reset(ihost);
1739 1740
}

1741
int sci_oem_parameters_validate(struct sci_oem_params *oem)
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
{
	int i;

	for (i = 0; i < SCI_MAX_PORTS; i++)
		if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
			return -EINVAL;

	for (i = 0; i < SCI_MAX_PHYS; i++)
		if (oem->phys[i].sas_address.high == 0 &&
		    oem->phys[i].sas_address.low == 0)
			return -EINVAL;

	if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
		for (i = 0; i < SCI_MAX_PHYS; i++)
			if (oem->ports[i].phy_mask != 0)
				return -EINVAL;
	} else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
		u8 phy_mask = 0;

		for (i = 0; i < SCI_MAX_PHYS; i++)
			phy_mask |= oem->ports[i].phy_mask;

		if (phy_mask == 0)
			return -EINVAL;
	} else
		return -EINVAL;

	if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
		return -EINVAL;

	return 0;
}

1775
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1776
{
1777
	u32 state = ihost->sm.current_state_id;
1778

E
Edmund Nadolski 已提交
1779 1780 1781
	if (state == SCIC_RESET ||
	    state == SCIC_INITIALIZING ||
	    state == SCIC_INITIALIZED) {
1782

1783
		if (sci_oem_parameters_validate(&ihost->oem_parameters))
1784 1785 1786 1787 1788 1789 1790 1791
			return SCI_FAILURE_INVALID_PARAMETER_VALUE;

		return SCI_SUCCESS;
	}

	return SCI_FAILURE_INVALID_STATE;
}

1792
static void power_control_timeout(unsigned long data)
1793
{
1794
	struct sci_timer *tmr = (struct sci_timer *)data;
1795
	struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1796
	struct isci_phy *iphy;
1797 1798
	unsigned long flags;
	u8 i;
1799

1800
	spin_lock_irqsave(&ihost->scic_lock, flags);
1801

1802 1803 1804
	if (tmr->cancel)
		goto done;

1805
	ihost->power_control.phys_granted_power = 0;
1806

1807 1808
	if (ihost->power_control.phys_waiting == 0) {
		ihost->power_control.timer_started = false;
1809
		goto done;
1810 1811
	}

1812
	for (i = 0; i < SCI_MAX_PHYS; i++) {
1813

1814
		if (ihost->power_control.phys_waiting == 0)
1815
			break;
1816

1817
		iphy = ihost->power_control.requesters[i];
1818
		if (iphy == NULL)
1819
			continue;
1820

1821
		if (ihost->power_control.phys_granted_power >=
1822
		    ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
1823
			break;
1824

1825 1826 1827
		ihost->power_control.requesters[i] = NULL;
		ihost->power_control.phys_waiting--;
		ihost->power_control.phys_granted_power++;
1828
		sci_phy_consume_power_handler(iphy);
1829
	}
1830 1831 1832 1833 1834 1835

	/*
	 * It doesn't matter if the power list is empty, we need to start the
	 * timer in case another phy becomes ready.
	 */
	sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1836
	ihost->power_control.timer_started = true;
1837 1838 1839

done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1840 1841
}

1842 1843
void sci_controller_power_control_queue_insert(struct isci_host *ihost,
					       struct isci_phy *iphy)
1844
{
1845
	BUG_ON(iphy == NULL);
1846

1847
	if (ihost->power_control.phys_granted_power <
1848
	    ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
1849
		ihost->power_control.phys_granted_power++;
1850
		sci_phy_consume_power_handler(iphy);
1851 1852 1853 1854 1855

		/*
		 * stop and start the power_control timer. When the timer fires, the
		 * no_of_phys_granted_power will be set to 0
		 */
1856 1857
		if (ihost->power_control.timer_started)
			sci_del_timer(&ihost->power_control.timer);
1858

1859
		sci_mod_timer(&ihost->power_control.timer,
1860
				 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1861
		ihost->power_control.timer_started = true;
1862

1863 1864
	} else {
		/* Add the phy in the waiting list */
1865 1866
		ihost->power_control.requesters[iphy->phy_index] = iphy;
		ihost->power_control.phys_waiting++;
1867 1868 1869
	}
}

1870 1871
void sci_controller_power_control_queue_remove(struct isci_host *ihost,
					       struct isci_phy *iphy)
1872
{
1873
	BUG_ON(iphy == NULL);
1874

1875
	if (ihost->power_control.requesters[iphy->phy_index])
1876
		ihost->power_control.phys_waiting--;
1877

1878
	ihost->power_control.requesters[iphy->phy_index] = NULL;
1879 1880 1881 1882 1883 1884 1885
}

#define AFE_REGISTER_WRITE_DELAY 10

/* Initialize the AFE for this phy index. We need to read the AFE setup from
 * the OEM parameters
 */
1886
static void sci_controller_afe_initialization(struct isci_host *ihost)
1887
{
1888
	const struct sci_oem_params *oem = &ihost->oem_parameters;
1889 1890 1891 1892
	u32 afe_status;
	u32 phy_id;

	/* Clear DFX Status registers */
1893
	writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
1894 1895 1896 1897 1898
	udelay(AFE_REGISTER_WRITE_DELAY);

	if (is_b0()) {
		/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
		 * Timer, PM Stagger Timer */
1899
		writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
1900 1901 1902 1903 1904
		udelay(AFE_REGISTER_WRITE_DELAY);
	}

	/* Configure bias currents to normal */
	if (is_a0())
1905
		writel(0x00005500, &ihost->scu_registers->afe.afe_bias_control);
1906
	else if (is_a2())
1907
		writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
1908
	else if (is_b0() || is_c0())
1909
		writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
1910 1911 1912 1913

	udelay(AFE_REGISTER_WRITE_DELAY);

	/* Enable PLL */
1914
	if (is_b0() || is_c0())
1915
		writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
1916
	else
1917
		writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
1918 1919 1920 1921 1922

	udelay(AFE_REGISTER_WRITE_DELAY);

	/* Wait for the PLL to lock */
	do {
1923
		afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
1924 1925 1926 1927 1928
		udelay(AFE_REGISTER_WRITE_DELAY);
	} while ((afe_status & 0x00001000) == 0);

	if (is_a0() || is_a2()) {
		/* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
1929
		writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
1930 1931 1932 1933 1934 1935 1936 1937
		udelay(AFE_REGISTER_WRITE_DELAY);
	}

	for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
		const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];

		if (is_b0()) {
			 /* Configure transmitter SSC parameters */
1938
			writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1939
			udelay(AFE_REGISTER_WRITE_DELAY);
1940 1941
		} else if (is_c0()) {
			 /* Configure transmitter SSC parameters */
1942
			writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1943 1944 1945 1946 1947
			udelay(AFE_REGISTER_WRITE_DELAY);

			/*
			 * All defaults, except the Receive Word Alignament/Comma Detect
			 * Enable....(0xe800) */
1948
			writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1949
			udelay(AFE_REGISTER_WRITE_DELAY);
1950 1951 1952 1953
		} else {
			/*
			 * All defaults, except the Receive Word Alignament/Comma Detect
			 * Enable....(0xe800) */
1954
			writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1955 1956
			udelay(AFE_REGISTER_WRITE_DELAY);

1957
			writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
1958 1959 1960 1961 1962 1963 1964
			udelay(AFE_REGISTER_WRITE_DELAY);
		}

		/*
		 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
		 * & increase TX int & ext bias 20%....(0xe85c) */
		if (is_a0())
1965
			writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1966
		else if (is_a2())
1967
			writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1968
		else if (is_b0()) {
1969
			 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
1970
			writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1971 1972 1973 1974 1975
			udelay(AFE_REGISTER_WRITE_DELAY);

			/*
			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
			 * & increase TX int & ext bias 20%....(0xe85c) */
1976
			writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1977
		} else {
1978
			writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1979 1980 1981 1982 1983
			udelay(AFE_REGISTER_WRITE_DELAY);

			/*
			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
			 * & increase TX int & ext bias 20%....(0xe85c) */
1984
			writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1985 1986 1987 1988 1989
		}
		udelay(AFE_REGISTER_WRITE_DELAY);

		if (is_a0() || is_a2()) {
			/* Enable TX equalization (0xe824) */
1990
			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
1991 1992 1993 1994 1995 1996
			udelay(AFE_REGISTER_WRITE_DELAY);
		}

		/*
		 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
		 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
1997
		writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1998 1999 2000 2001
		udelay(AFE_REGISTER_WRITE_DELAY);

		/* Leave DFE/FFE on */
		if (is_a0())
2002
			writel(0x3F09983F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2003
		else if (is_a2())
2004
			writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2005
		else if (is_b0()) {
2006
			writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2007
			udelay(AFE_REGISTER_WRITE_DELAY);
2008
			/* Enable TX equalization (0xe824) */
2009
			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2010
		} else {
2011
			writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
2012 2013
			udelay(AFE_REGISTER_WRITE_DELAY);

2014
			writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2015 2016
			udelay(AFE_REGISTER_WRITE_DELAY);

2017
			/* Enable TX equalization (0xe824) */
2018
			writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2019
		}
2020

2021 2022 2023
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control0,
2024
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2025 2026 2027
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control1,
2028
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2029 2030 2031
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control2,
2032
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2033 2034 2035
		udelay(AFE_REGISTER_WRITE_DELAY);

		writel(oem_phy->afe_tx_amp_control3,
2036
			&ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2037 2038 2039 2040
		udelay(AFE_REGISTER_WRITE_DELAY);
	}

	/* Transfer control to the PEs */
2041
	writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
2042 2043 2044
	udelay(AFE_REGISTER_WRITE_DELAY);
}

2045
static void sci_controller_initialize_power_control(struct isci_host *ihost)
2046
{
2047
	sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2048

2049 2050
	memset(ihost->power_control.requesters, 0,
	       sizeof(ihost->power_control.requesters));
2051

2052 2053
	ihost->power_control.phys_waiting = 0;
	ihost->power_control.phys_granted_power = 0;
2054 2055
}

2056
static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2057
{
2058
	struct sci_base_state_machine *sm = &ihost->sm;
2059 2060
	enum sci_status result = SCI_FAILURE;
	unsigned long i, state, val;
2061

2062 2063
	if (ihost->sm.current_state_id != SCIC_RESET) {
		dev_warn(&ihost->pdev->dev,
2064 2065 2066 2067 2068
			 "SCIC Controller initialize operation requested "
			 "in invalid state\n");
		return SCI_FAILURE_INVALID_STATE;
	}

E
Edmund Nadolski 已提交
2069
	sci_change_state(sm, SCIC_INITIALIZING);
2070

2071
	sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2072

2073 2074
	ihost->next_phy_to_start = 0;
	ihost->phy_startup_timer_pending = false;
2075

2076
	sci_controller_initialize_power_control(ihost);
2077 2078 2079 2080 2081 2082

	/*
	 * There is nothing to do here for B0 since we do not have to
	 * program the AFE registers.
	 * / @todo The AFE settings are supposed to be correct for the B0 but
	 * /       presently they seem to be wrong. */
2083
	sci_controller_afe_initialization(ihost);
2084 2085


2086
	/* Take the hardware out of reset */
2087
	writel(0, &ihost->smu_registers->soft_reset_control);
2088

2089 2090 2091 2092 2093
	/*
	 * / @todo Provide meaningfull error code for hardware failure
	 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
	for (i = 100; i >= 1; i--) {
		u32 status;
2094

2095 2096
		/* Loop until the hardware reports success */
		udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2097
		status = readl(&ihost->smu_registers->control_status);
2098

2099 2100 2101 2102 2103
		if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
			break;
	}
	if (i == 0)
		goto out;
2104

2105 2106 2107
	/*
	 * Determine what are the actaul device capacities that the
	 * hardware will support */
2108
	val = readl(&ihost->smu_registers->device_context_capacity);
2109

2110
	/* Record the smaller of the two capacity values */
2111 2112 2113
	ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
	ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
	ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2114

2115 2116 2117 2118
	/*
	 * Make all PEs that are unassigned match up with the
	 * logical ports
	 */
2119
	for (i = 0; i < ihost->logical_port_entries; i++) {
2120
		struct scu_port_task_scheduler_group_registers __iomem
2121
			*ptsg = &ihost->scu_registers->peg0.ptsg;
2122

2123
		writel(i, &ptsg->protocol_engine[i]);
2124 2125 2126
	}

	/* Initialize hardware PCI Relaxed ordering in DMA engines */
2127
	val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2128
	val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2129
	writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2130

2131
	val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2132
	val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2133
	writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2134 2135 2136 2137 2138

	/*
	 * Initialize the PHYs before the PORTs because the PHY registers
	 * are accessed during the port initialization.
	 */
2139
	for (i = 0; i < SCI_MAX_PHYS; i++) {
2140 2141 2142
		result = sci_phy_initialize(&ihost->phys[i],
					    &ihost->scu_registers->peg0.pe[i].tl,
					    &ihost->scu_registers->peg0.pe[i].ll);
2143 2144
		if (result != SCI_SUCCESS)
			goto out;
2145 2146
	}

2147
	for (i = 0; i < ihost->logical_port_entries; i++) {
2148
		struct isci_port *iport = &ihost->ports[i];
2149

2150 2151 2152
		iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
		iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
		iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2153 2154
	}

2155
	result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2156

2157
 out:
2158 2159
	/* Advance the controller state machine */
	if (result == SCI_SUCCESS)
E
Edmund Nadolski 已提交
2160
		state = SCIC_INITIALIZED;
2161
	else
E
Edmund Nadolski 已提交
2162 2163
		state = SCIC_FAILED;
	sci_change_state(sm, state);
2164 2165 2166 2167

	return result;
}

2168 2169
static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
					       struct sci_user_parameters *sci_parms)
2170
{
2171
	u32 state = ihost->sm.current_state_id;
2172

E
Edmund Nadolski 已提交
2173 2174 2175
	if (state == SCIC_RESET ||
	    state == SCIC_INITIALIZING ||
	    state == SCIC_INITIALIZED) {
2176 2177 2178 2179 2180 2181 2182 2183 2184
		u16 index;

		/*
		 * Validate the user parameters.  If they are not legal, then
		 * return a failure.
		 */
		for (index = 0; index < SCI_MAX_PHYS; index++) {
			struct sci_phy_user_params *user_phy;

2185
			user_phy = &sci_parms->phys[index];
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205

			if (!((user_phy->max_speed_generation <=
						SCIC_SDS_PARM_MAX_SPEED) &&
			      (user_phy->max_speed_generation >
						SCIC_SDS_PARM_NO_SPEED)))
				return SCI_FAILURE_INVALID_PARAMETER_VALUE;

			if (user_phy->in_connection_align_insertion_frequency <
					3)
				return SCI_FAILURE_INVALID_PARAMETER_VALUE;

			if ((user_phy->in_connection_align_insertion_frequency <
						3) ||
			    (user_phy->align_insertion_frequency == 0) ||
			    (user_phy->
				notify_enable_spin_up_insertion_frequency ==
						0))
				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
		}

2206 2207 2208 2209 2210
		if ((sci_parms->stp_inactivity_timeout == 0) ||
		    (sci_parms->ssp_inactivity_timeout == 0) ||
		    (sci_parms->stp_max_occupancy_timeout == 0) ||
		    (sci_parms->ssp_max_occupancy_timeout == 0) ||
		    (sci_parms->no_outbound_task_timeout == 0))
2211 2212
			return SCI_FAILURE_INVALID_PARAMETER_VALUE;

2213
		memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2214 2215 2216 2217 2218 2219 2220

		return SCI_SUCCESS;
	}

	return SCI_FAILURE_INVALID_STATE;
}

2221
static int sci_controller_mem_init(struct isci_host *ihost)
2222
{
2223
	struct device *dev = &ihost->pdev->dev;
2224 2225 2226
	dma_addr_t dma;
	size_t size;
	int err;
2227

2228
	size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2229 2230
	ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
	if (!ihost->completion_queue)
2231 2232
		return -ENOMEM;

2233 2234
	writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
	writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2235

2236 2237
	size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
	ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
2238
							       GFP_KERNEL);
2239
	if (!ihost->remote_node_context_table)
2240 2241
		return -ENOMEM;

2242 2243
	writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
	writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2244

2245 2246 2247
	size = ihost->task_context_entries * sizeof(struct scu_task_context),
	ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
	if (!ihost->task_context_table)
2248 2249
		return -ENOMEM;

2250 2251 2252
	ihost->task_context_dma = dma;
	writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
	writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
2253

2254
	err = sci_unsolicited_frame_control_construct(ihost);
2255 2256
	if (err)
		return err;
2257 2258 2259 2260 2261

	/*
	 * Inform the silicon as to the location of the UF headers and
	 * address table.
	 */
2262 2263 2264 2265
	writel(lower_32_bits(ihost->uf_control.headers.physical_address),
		&ihost->scu_registers->sdma.uf_header_base_address_lower);
	writel(upper_32_bits(ihost->uf_control.headers.physical_address),
		&ihost->scu_registers->sdma.uf_header_base_address_upper);
2266

2267 2268 2269 2270
	writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
		&ihost->scu_registers->sdma.uf_address_table_lower);
	writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
		&ihost->scu_registers->sdma.uf_address_table_upper);
2271 2272 2273 2274

	return 0;
}

2275
int isci_host_init(struct isci_host *ihost)
2276
{
D
Dan Williams 已提交
2277
	int err = 0, i;
2278
	enum sci_status status;
2279
	struct sci_user_parameters sci_user_params;
2280
	struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2281

2282 2283 2284
	spin_lock_init(&ihost->state_lock);
	spin_lock_init(&ihost->scic_lock);
	init_waitqueue_head(&ihost->eventq);
2285

2286
	isci_host_change_state(ihost, isci_starting);
2287

2288 2289
	status = sci_controller_construct(ihost, scu_base(ihost),
					  smu_base(ihost));
2290 2291

	if (status != SCI_SUCCESS) {
2292
		dev_err(&ihost->pdev->dev,
2293
			"%s: sci_controller_construct failed - status = %x\n",
2294 2295
			__func__,
			status);
2296
		return -ENODEV;
2297 2298
	}

2299 2300
	ihost->sas_ha.dev = &ihost->pdev->dev;
	ihost->sas_ha.lldd_ha = ihost;
2301

2302 2303 2304 2305
	/*
	 * grab initial values stored in the controller object for OEM and USER
	 * parameters
	 */
2306 2307
	isci_user_parameters_get(&sci_user_params);
	status = sci_user_parameters_set(ihost, &sci_user_params);
2308
	if (status != SCI_SUCCESS) {
2309
		dev_warn(&ihost->pdev->dev,
2310
			 "%s: sci_user_parameters_set failed\n",
2311 2312 2313 2314 2315 2316
			 __func__);
		return -ENODEV;
	}

	/* grab any OEM parameters specified in orom */
	if (pci_info->orom) {
2317
		status = isci_parse_oem_parameters(&ihost->oem_parameters,
2318
						   pci_info->orom,
2319
						   ihost->id);
2320
		if (status != SCI_SUCCESS) {
2321
			dev_warn(&ihost->pdev->dev,
2322
				 "parsing firmware oem parameters failed\n");
2323
			return -EINVAL;
2324
		}
2325 2326
	}

2327
	status = sci_oem_parameters_set(ihost);
2328
	if (status != SCI_SUCCESS) {
2329
		dev_warn(&ihost->pdev->dev,
2330
				"%s: sci_oem_parameters_set failed\n",
2331 2332
				__func__);
		return -ENODEV;
2333 2334
	}

2335 2336
	tasklet_init(&ihost->completion_tasklet,
		     isci_host_completion_routine, (unsigned long)ihost);
D
Dan Williams 已提交
2337

2338 2339
	INIT_LIST_HEAD(&ihost->requests_to_complete);
	INIT_LIST_HEAD(&ihost->requests_to_errorback);
D
Dan Williams 已提交
2340

2341
	spin_lock_irq(&ihost->scic_lock);
2342
	status = sci_controller_initialize(ihost);
2343
	spin_unlock_irq(&ihost->scic_lock);
2344
	if (status != SCI_SUCCESS) {
2345
		dev_warn(&ihost->pdev->dev,
2346
			 "%s: sci_controller_initialize failed -"
2347 2348
			 " status = 0x%x\n",
			 __func__, status);
2349
		return -ENODEV;
2350 2351
	}

2352
	err = sci_controller_mem_init(ihost);
2353
	if (err)
2354
		return err;
2355

D
Dan Williams 已提交
2356
	for (i = 0; i < SCI_MAX_PORTS; i++)
2357
		isci_port_init(&ihost->ports[i], ihost, i);
2358

D
Dan Williams 已提交
2359
	for (i = 0; i < SCI_MAX_PHYS; i++)
2360
		isci_phy_init(&ihost->phys[i], ihost, i);
D
Dan Williams 已提交
2361 2362

	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2363
		struct isci_remote_device *idev = &ihost->devices[i];
D
Dan Williams 已提交
2364 2365 2366 2367

		INIT_LIST_HEAD(&idev->reqs_in_process);
		INIT_LIST_HEAD(&idev->node);
	}
2368

D
Dan Williams 已提交
2369 2370 2371 2372
	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
		struct isci_request *ireq;
		dma_addr_t dma;

2373
		ireq = dmam_alloc_coherent(&ihost->pdev->dev,
D
Dan Williams 已提交
2374 2375 2376 2377 2378
					   sizeof(struct isci_request), &dma,
					   GFP_KERNEL);
		if (!ireq)
			return -ENOMEM;

2379 2380
		ireq->tc = &ihost->task_context_table[i];
		ireq->owning_controller = ihost;
D
Dan Williams 已提交
2381 2382
		spin_lock_init(&ireq->state_lock);
		ireq->request_daddr = dma;
2383 2384
		ireq->isci_host = ihost;
		ihost->reqs[i] = ireq;
D
Dan Williams 已提交
2385 2386
	}

2387
	return 0;
2388
}
2389

2390 2391
void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
			    struct isci_phy *iphy)
2392
{
2393
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
2394
	case SCIC_STARTING:
2395 2396 2397
		sci_del_timer(&ihost->phy_timer);
		ihost->phy_startup_timer_pending = false;
		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2398 2399
						  iport, iphy);
		sci_controller_start_next_phy(ihost);
2400
		break;
E
Edmund Nadolski 已提交
2401
	case SCIC_READY:
2402
		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2403
						  iport, iphy);
2404 2405
		break;
	default:
2406
		dev_dbg(&ihost->pdev->dev,
2407
			"%s: SCIC Controller linkup event from phy %d in "
2408
			"unexpected state %d\n", __func__, iphy->phy_index,
2409
			ihost->sm.current_state_id);
2410 2411 2412
	}
}

2413 2414
void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
			      struct isci_phy *iphy)
2415
{
2416
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
2417 2418
	case SCIC_STARTING:
	case SCIC_READY:
2419
		ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2420
						   iport, iphy);
2421 2422
		break;
	default:
2423
		dev_dbg(&ihost->pdev->dev,
2424 2425 2426
			"%s: SCIC Controller linkdown event from phy %d in "
			"unexpected state %d\n",
			__func__,
2427
			iphy->phy_index,
2428
			ihost->sm.current_state_id);
2429 2430 2431
	}
}

2432
static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2433 2434 2435
{
	u32 index;

2436 2437 2438
	for (index = 0; index < ihost->remote_node_entries; index++) {
		if ((ihost->device_table[index] != NULL) &&
		   (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2439 2440 2441 2442 2443 2444
			return true;
	}

	return false;
}

2445 2446
void sci_controller_remote_device_stopped(struct isci_host *ihost,
					  struct isci_remote_device *idev)
2447
{
2448 2449
	if (ihost->sm.current_state_id != SCIC_STOPPING) {
		dev_dbg(&ihost->pdev->dev,
2450 2451
			"SCIC Controller 0x%p remote device stopped event "
			"from device 0x%p in unexpected state %d\n",
2452 2453
			ihost, idev,
			ihost->sm.current_state_id);
2454 2455 2456
		return;
	}

2457
	if (!sci_controller_has_remote_devices_stopping(ihost))
2458
		sci_change_state(&ihost->sm, SCIC_STOPPED);
2459 2460
}

2461
void sci_controller_post_request(struct isci_host *ihost, u32 request)
2462
{
2463 2464
	dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
		__func__, ihost->id, request);
2465

2466
	writel(request, &ihost->smu_registers->post_context_port);
2467 2468
}

2469
struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2470 2471 2472 2473
{
	u16 task_index;
	u16 task_sequence;

D
Dan Williams 已提交
2474
	task_index = ISCI_TAG_TCI(io_tag);
2475

2476 2477
	if (task_index < ihost->task_context_entries) {
		struct isci_request *ireq = ihost->reqs[task_index];
D
Dan Williams 已提交
2478 2479

		if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
D
Dan Williams 已提交
2480
			task_sequence = ISCI_TAG_SEQ(io_tag);
2481

2482
			if (task_sequence == ihost->io_request_sequence[task_index])
2483
				return ireq;
2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
		}
	}

	return NULL;
}

/**
 * This method allocates remote node index and the reserves the remote node
 *    context space for use. This method can fail if there are no more remote
 *    node index available.
 * @scic: This is the controller object which contains the set of
 *    free remote node ids
 * @sci_dev: This is the device object which is requesting the a remote node
 *    id
 * @node_id: This is the remote node id that is assinged to the device if one
 *    is available
 *
 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
 * node index available.
 */
2504 2505 2506
enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
							    struct isci_remote_device *idev,
							    u16 *node_id)
2507 2508
{
	u16 node_index;
2509
	u32 remote_node_count = sci_remote_device_node_count(idev);
2510

2511
	node_index = sci_remote_node_table_allocate_remote_node(
2512
		&ihost->available_remote_nodes, remote_node_count
2513 2514 2515
		);

	if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2516
		ihost->device_table[node_index] = idev;
2517 2518 2519 2520 2521 2522 2523 2524 2525

		*node_id = node_index;

		return SCI_SUCCESS;
	}

	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
}

2526 2527 2528
void sci_controller_free_remote_node_context(struct isci_host *ihost,
					     struct isci_remote_device *idev,
					     u16 node_id)
2529
{
2530
	u32 remote_node_count = sci_remote_device_node_count(idev);
2531

2532 2533
	if (ihost->device_table[node_id] == idev) {
		ihost->device_table[node_id] = NULL;
2534

2535
		sci_remote_node_table_release_remote_node_index(
2536
			&ihost->available_remote_nodes, remote_node_count, node_id
2537 2538 2539 2540
			);
	}
}

2541 2542 2543
void sci_controller_copy_sata_response(void *response_buffer,
				       void *frame_header,
				       void *frame_buffer)
2544
{
2545
	/* XXX type safety? */
2546 2547 2548 2549 2550 2551 2552
	memcpy(response_buffer, frame_header, sizeof(u32));

	memcpy(response_buffer + sizeof(u32),
	       frame_buffer,
	       sizeof(struct dev_to_host_fis) - sizeof(u32));
}

2553
void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2554
{
2555
	if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2556 2557
		writel(ihost->uf_control.get,
			&ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2558 2559
}

2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
void isci_tci_free(struct isci_host *ihost, u16 tci)
{
	u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);

	ihost->tci_pool[tail] = tci;
	ihost->tci_tail = tail + 1;
}

static u16 isci_tci_alloc(struct isci_host *ihost)
{
	u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
	u16 tci = ihost->tci_pool[head];

	ihost->tci_head = head + 1;
	return tci;
}

static u16 isci_tci_space(struct isci_host *ihost)
{
	return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}

u16 isci_alloc_tag(struct isci_host *ihost)
{
	if (isci_tci_space(ihost)) {
		u16 tci = isci_tci_alloc(ihost);
2586
		u8 seq = ihost->io_request_sequence[tci];
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602

		return ISCI_TAG(seq, tci);
	}

	return SCI_CONTROLLER_INVALID_IO_TAG;
}

enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
{
	u16 tci = ISCI_TAG_TCI(io_tag);
	u16 seq = ISCI_TAG_SEQ(io_tag);

	/* prevent tail from passing head */
	if (isci_tci_active(ihost) == 0)
		return SCI_FAILURE_INVALID_IO_TAG;

2603 2604
	if (seq == ihost->io_request_sequence[tci]) {
		ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2605 2606 2607 2608 2609 2610 2611 2612

		isci_tci_free(ihost, tci);

		return SCI_SUCCESS;
	}
	return SCI_FAILURE_INVALID_IO_TAG;
}

2613 2614 2615
enum sci_status sci_controller_start_io(struct isci_host *ihost,
					struct isci_remote_device *idev,
					struct isci_request *ireq)
2616 2617 2618
{
	enum sci_status status;

2619 2620
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
2621 2622 2623
		return SCI_FAILURE_INVALID_STATE;
	}

2624
	status = sci_remote_device_start_io(ihost, idev, ireq);
2625 2626 2627
	if (status != SCI_SUCCESS)
		return status;

2628
	set_bit(IREQ_ACTIVE, &ireq->flags);
D
Dan Williams 已提交
2629
	sci_controller_post_request(ihost, ireq->post_context);
2630 2631 2632
	return SCI_SUCCESS;
}

2633 2634 2635
enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
						 struct isci_remote_device *idev,
						 struct isci_request *ireq)
2636
{
2637 2638 2639 2640
	/* terminate an ongoing (i.e. started) core IO request.  This does not
	 * abort the IO request at the target, but rather removes the IO
	 * request from the host controller.
	 */
2641 2642
	enum sci_status status;

2643 2644
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev,
2645 2646 2647 2648
			 "invalid state to terminate request\n");
		return SCI_FAILURE_INVALID_STATE;
	}

2649
	status = sci_io_request_terminate(ireq);
2650 2651 2652 2653 2654 2655 2656
	if (status != SCI_SUCCESS)
		return status;

	/*
	 * Utilize the original post context command and or in the POST_TC_ABORT
	 * request sub-type.
	 */
2657 2658
	sci_controller_post_request(ihost,
				    ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2659 2660 2661 2662
	return SCI_SUCCESS;
}

/**
2663
 * sci_controller_complete_io() - This method will perform core specific
2664 2665 2666
 *    completion operations for an IO request.  After this method is invoked,
 *    the user should consider the IO request as invalid until it is properly
 *    reused (i.e. re-constructed).
2667
 * @ihost: The handle to the controller object for which to complete the
2668
 *    IO request.
2669
 * @idev: The handle to the remote device object for which to complete
2670
 *    the IO request.
2671
 * @ireq: the handle to the io request object to complete.
2672
 */
2673 2674 2675
enum sci_status sci_controller_complete_io(struct isci_host *ihost,
					   struct isci_remote_device *idev,
					   struct isci_request *ireq)
2676 2677 2678 2679
{
	enum sci_status status;
	u16 index;

2680
	switch (ihost->sm.current_state_id) {
E
Edmund Nadolski 已提交
2681
	case SCIC_STOPPING:
2682 2683
		/* XXX: Implement this function */
		return SCI_FAILURE;
E
Edmund Nadolski 已提交
2684
	case SCIC_READY:
2685
		status = sci_remote_device_complete_io(ihost, idev, ireq);
2686 2687 2688
		if (status != SCI_SUCCESS)
			return status;

2689 2690
		index = ISCI_TAG_TCI(ireq->io_tag);
		clear_bit(IREQ_ACTIVE, &ireq->flags);
2691 2692
		return SCI_SUCCESS;
	default:
2693
		dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
2694 2695 2696 2697 2698
		return SCI_FAILURE_INVALID_STATE;
	}

}

2699
enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2700
{
2701
	struct isci_host *ihost = ireq->owning_controller;
2702

2703 2704
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
2705 2706 2707
		return SCI_FAILURE_INVALID_STATE;
	}

2708
	set_bit(IREQ_ACTIVE, &ireq->flags);
D
Dan Williams 已提交
2709
	sci_controller_post_request(ihost, ireq->post_context);
2710 2711 2712 2713
	return SCI_SUCCESS;
}

/**
2714
 * sci_controller_start_task() - This method is called by the SCIC user to
2715 2716 2717 2718 2719 2720 2721
 *    send/start a framework task management request.
 * @controller: the handle to the controller object for which to start the task
 *    management request.
 * @remote_device: the handle to the remote device object for which to start
 *    the task management request.
 * @task_request: the handle to the task request object to start.
 */
2722 2723 2724
enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
					       struct isci_remote_device *idev,
					       struct isci_request *ireq)
2725 2726 2727
{
	enum sci_status status;

2728 2729
	if (ihost->sm.current_state_id != SCIC_READY) {
		dev_warn(&ihost->pdev->dev,
2730 2731 2732 2733 2734 2735
			 "%s: SCIC Controller starting task from invalid "
			 "state\n",
			 __func__);
		return SCI_TASK_FAILURE_INVALID_STATE;
	}

2736
	status = sci_remote_device_start_task(ihost, idev, ireq);
2737 2738
	switch (status) {
	case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
D
Dan Williams 已提交
2739
		set_bit(IREQ_ACTIVE, &ireq->flags);
2740 2741 2742 2743 2744 2745 2746 2747

		/*
		 * We will let framework know this task request started successfully,
		 * although core is still woring on starting the request (to post tc when
		 * RNC is resumed.)
		 */
		return SCI_SUCCESS;
	case SCI_SUCCESS:
D
Dan Williams 已提交
2748
		set_bit(IREQ_ACTIVE, &ireq->flags);
D
Dan Williams 已提交
2749
		sci_controller_post_request(ihost, ireq->post_context);
2750 2751 2752 2753 2754 2755 2756
		break;
	default:
		break;
	}

	return status;
}