host.c 14.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * BSD LICENSE
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in
 *     the documentation and/or other materials provided with the
 *     distribution.
 *   * Neither the name of Intel Corporation nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "isci.h"
#include "scic_io_request.h"
#include "scic_port.h"
#include "port.h"
#include "request.h"
#include "host.h"
62
#include "probe_roms.h"
63
#include "scic_sds_controller.h"
64

65
irqreturn_t isci_msix_isr(int vec, void *data)
66
{
67 68 69
	struct isci_host *ihost = data;
	struct scic_sds_controller *scic = ihost->core_controller;

70 71
	if (scic_sds_controller_isr(scic))
		tasklet_schedule(&ihost->completion_tasklet);
72

73
	return IRQ_HANDLED;
74 75
}

76
irqreturn_t isci_intx_isr(int vec, void *data)
77 78
{
	irqreturn_t ret = IRQ_NONE;
79 80
	struct isci_host *ihost = data;
	struct scic_sds_controller *scic = ihost->core_controller;
81

82 83 84 85 86 87 88 89 90
	if (scic_sds_controller_isr(scic)) {
		writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
		tasklet_schedule(&ihost->completion_tasklet);
		ret = IRQ_HANDLED;
	} else if (scic_sds_controller_error_isr(scic)) {
		spin_lock(&ihost->scic_lock);
		scic_sds_controller_error_handler(scic);
		spin_unlock(&ihost->scic_lock);
		ret = IRQ_HANDLED;
91
	}
D
Dan Williams 已提交
92

93 94 95
	return ret;
}

D
Dan Williams 已提交
96 97 98 99 100 101 102 103 104 105
irqreturn_t isci_error_isr(int vec, void *data)
{
	struct isci_host *ihost = data;
	struct scic_sds_controller *scic = ihost->core_controller;

	if (scic_sds_controller_error_isr(scic))
		scic_sds_controller_error_handler(scic);

	return IRQ_HANDLED;
}
106 107 108 109 110 111 112 113 114

/**
 * isci_host_start_complete() - This function is called by the core library,
 *    through the ISCI Module, to indicate controller start status.
 * @isci_host: This parameter specifies the ISCI host object
 * @completion_status: This parameter specifies the completion status from the
 *    core library.
 *
 */
115
void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
116
{
117 118 119 120 121 122
	if (completion_status != SCI_SUCCESS)
		dev_info(&ihost->pdev->dev,
			"controller start timed out, continuing...\n");
	isci_host_change_state(ihost, isci_ready);
	clear_bit(IHOST_START_PENDING, &ihost->flags);
	wake_up(&ihost->eventq);
123 124
}

125
int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
126
{
127
	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
128

129
	if (test_bit(IHOST_START_PENDING, &ihost->flags))
130 131
		return 0;

132 133 134 135
	/* todo: use sas_flush_discovery once it is upstream */
	scsi_flush_work(shost);

	scsi_flush_work(shost);
136

137 138 139
	dev_dbg(&ihost->pdev->dev,
		"%s: ihost->status = %d, time = %ld\n",
		 __func__, isci_host_get_state(ihost), time);
140 141 142 143 144 145 146

	return 1;

}

void isci_host_scan_start(struct Scsi_Host *shost)
{
147
	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
148 149
	struct scic_sds_controller *scic = ihost->core_controller;
	unsigned long tmo = scic_controller_get_suggested_start_timeout(scic);
150

151
	set_bit(IHOST_START_PENDING, &ihost->flags);
152 153

	spin_lock_irq(&ihost->scic_lock);
154
	scic_controller_start(scic, tmo);
155 156
	scic_controller_enable_interrupts(scic);
	spin_unlock_irq(&ihost->scic_lock);
157 158
}

159
void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
160
{
161 162 163 164
	isci_host_change_state(ihost, isci_stopped);
	scic_controller_disable_interrupts(ihost->core_controller);
	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
	wake_up(&ihost->eventq);
165 166 167 168 169 170 171 172 173 174 175 176 177
}

/**
 * isci_host_completion_routine() - This function is the delayed service
 *    routine that calls the sci core library's completion handler. It's
 *    scheduled as a tasklet from the interrupt service routine when interrupts
 *    in use, or set as the timeout function in polled mode.
 * @data: This parameter specifies the ISCI host object
 *
 */
static void isci_host_completion_routine(unsigned long data)
{
	struct isci_host *isci_host = (struct isci_host *)data;
178 179 180 181
	struct list_head    completed_request_list;
	struct list_head    errored_request_list;
	struct list_head    *current_position;
	struct list_head    *next_position;
182 183
	struct isci_request *request;
	struct isci_request *next_request;
184
	struct sas_task     *task;
185 186

	INIT_LIST_HEAD(&completed_request_list);
187
	INIT_LIST_HEAD(&errored_request_list);
188 189 190

	spin_lock_irq(&isci_host->scic_lock);

191 192
	scic_sds_controller_completion_handler(isci_host->core_controller);

193
	/* Take the lists of completed I/Os from the host. */
194

195 196 197
	list_splice_init(&isci_host->requests_to_complete,
			 &completed_request_list);

198 199 200
	/* Take the list of errored I/Os from the host. */
	list_splice_init(&isci_host->requests_to_errorback,
			 &errored_request_list);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218

	spin_unlock_irq(&isci_host->scic_lock);

	/* Process any completions in the lists. */
	list_for_each_safe(current_position, next_position,
			   &completed_request_list) {

		request = list_entry(current_position, struct isci_request,
				     completed_node);
		task = isci_request_access_task(request);

		/* Normal notification (task_done) */
		dev_dbg(&isci_host->pdev->dev,
			"%s: Normal - request/task = %p/%p\n",
			__func__,
			request,
			task);

219 220 221 222 223
		/* Return the task to libsas */
		if (task != NULL) {

			task->lldd_task = NULL;
			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
224

225 226 227 228 229 230
				/* If the task is already in the abort path,
				* the task_done callback cannot be called.
				*/
				task->task_done(task);
			}
		}
231 232 233
		/* Free the request object. */
		isci_request_free(isci_host, request);
	}
234
	list_for_each_entry_safe(request, next_request, &errored_request_list,
235 236 237 238 239 240 241 242 243 244 245
				 completed_node) {

		task = isci_request_access_task(request);

		/* Use sas_task_abort */
		dev_warn(&isci_host->pdev->dev,
			 "%s: Error - request/task = %p/%p\n",
			 __func__,
			 request,
			 task);

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
		if (task != NULL) {

			/* Put the task into the abort path if it's not there
			 * already.
			 */
			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
				sas_task_abort(task);

		} else {
			/* This is a case where the request has completed with a
			 * status such that it needed further target servicing,
			 * but the sas_task reference has already been removed
			 * from the request.  Since it was errored, it was not
			 * being aborted, so there is nothing to do except free
			 * it.
			 */

			spin_lock_irq(&isci_host->scic_lock);
			/* Remove the request from the remote device's list
			* of pending requests.
			*/
			list_del_init(&request->dev_node);
			spin_unlock_irq(&isci_host->scic_lock);

			/* Free the request object. */
			isci_request_free(isci_host, request);
		}
273 274 275 276
	}

}

277
void isci_host_deinit(struct isci_host *ihost)
278
{
279
	struct scic_sds_controller *scic = ihost->core_controller;
280 281
	int i;

282
	isci_host_change_state(ihost, isci_stopping);
283
	for (i = 0; i < SCI_MAX_PORTS; i++) {
284 285 286 287 288
		struct isci_port *port = &ihost->isci_ports[i];
		struct isci_remote_device *idev, *d;

		list_for_each_entry_safe(idev, d, &port->remote_dev_list, node) {
			isci_remote_device_change_state(idev, isci_stopping);
289
			isci_remote_device_stop(ihost, idev);
290 291 292
		}
	}

293
	set_bit(IHOST_STOP_PENDING, &ihost->flags);
D
Dan Williams 已提交
294 295

	spin_lock_irq(&ihost->scic_lock);
296
	scic_controller_stop(scic, SCIC_CONTROLLER_STOP_TIMEOUT);
D
Dan Williams 已提交
297 298
	spin_unlock_irq(&ihost->scic_lock);

299 300
	wait_for_stop(ihost);
	scic_controller_reset(scic);
D
Dan Williams 已提交
301
	isci_timer_list_destroy(ihost);
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
}

static void __iomem *scu_base(struct isci_host *isci_host)
{
	struct pci_dev *pdev = isci_host->pdev;
	int id = isci_host->id;

	return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
}

static void __iomem *smu_base(struct isci_host *isci_host)
{
	struct pci_dev *pdev = isci_host->pdev;
	int id = isci_host->id;

	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static void isci_user_parameters_get(
		struct isci_host *isci_host,
		union scic_user_parameters *scic_user_params)
{
	struct scic_sds_user_parameters *u = &scic_user_params->sds1;
	int i;

	for (i = 0; i < SCI_MAX_PHYS; i++) {
		struct sci_phy_user_params *u_phy = &u->phys[i];

		u_phy->max_speed_generation = phy_gen;

		/* we are not exporting these for now */
		u_phy->align_insertion_frequency = 0x7f;
		u_phy->in_connection_align_insertion_frequency = 0xff;
		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
	}

	u->stp_inactivity_timeout = stp_inactive_to;
	u->ssp_inactivity_timeout = ssp_inactive_to;
	u->stp_max_occupancy_timeout = stp_max_occ_to;
	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
	u->no_outbound_task_timeout = no_outbound_task_to;
	u->max_number_concurrent_device_spin_up = max_concurr_spinup;
}

346 347
int isci_host_init(struct isci_host *isci_host)
{
D
Dan Williams 已提交
348
	int err = 0, i;
349 350
	enum sci_status status;
	struct scic_sds_controller *controller;
351
	union scic_oem_parameters oem;
352
	union scic_user_parameters scic_user_params;
353
	struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
354

D
Dan Williams 已提交
355
	isci_timer_list_construct(isci_host);
356 357 358 359

	controller = scic_controller_alloc(&isci_host->pdev->dev);

	if (!controller) {
360 361 362 363 364
		dev_err(&isci_host->pdev->dev,
			"%s: failed (%d)\n",
			__func__,
			err);
		return -ENOMEM;
365 366 367
	}

	isci_host->core_controller = controller;
368
	controller->ihost = isci_host;
369 370 371
	spin_lock_init(&isci_host->state_lock);
	spin_lock_init(&isci_host->scic_lock);
	spin_lock_init(&isci_host->queue_lock);
372
	init_waitqueue_head(&isci_host->eventq);
373 374 375 376 377 378 379 380 381 382 383 384

	isci_host_change_state(isci_host, isci_starting);
	isci_host->can_queue = ISCI_CAN_QUEUE_VAL;

	status = scic_controller_construct(controller, scu_base(isci_host),
					   smu_base(isci_host));

	if (status != SCI_SUCCESS) {
		dev_err(&isci_host->pdev->dev,
			"%s: scic_controller_construct failed - status = %x\n",
			__func__,
			status);
385
		return -ENODEV;
386 387 388 389 390
	}

	isci_host->sas_ha.dev = &isci_host->pdev->dev;
	isci_host->sas_ha.lldd_ha = isci_host;

391 392 393 394
	/*
	 * grab initial values stored in the controller object for OEM and USER
	 * parameters
	 */
395
	isci_user_parameters_get(isci_host, &scic_user_params);
396 397 398 399 400 401 402 403 404
	status = scic_user_parameters_set(isci_host->core_controller,
					  &scic_user_params);
	if (status != SCI_SUCCESS) {
		dev_warn(&isci_host->pdev->dev,
			 "%s: scic_user_parameters_set failed\n",
			 __func__);
		return -ENODEV;
	}

405
	scic_oem_parameters_get(controller, &oem);
406

407 408
	/* grab any OEM parameters specified in orom */
	if (pci_info->orom) {
409
		status = isci_parse_oem_parameters(&oem,
410 411
						   pci_info->orom,
						   isci_host->id);
412 413 414
		if (status != SCI_SUCCESS) {
			dev_warn(&isci_host->pdev->dev,
				 "parsing firmware oem parameters failed\n");
415
			return -EINVAL;
416
		}
417 418 419 420 421 422 423 424
	}

	status = scic_oem_parameters_set(isci_host->core_controller, &oem);
	if (status != SCI_SUCCESS) {
		dev_warn(&isci_host->pdev->dev,
				"%s: scic_oem_parameters_set failed\n",
				__func__);
		return -ENODEV;
425 426
	}

D
Dan Williams 已提交
427 428 429 430
	tasklet_init(&isci_host->completion_tasklet,
		     isci_host_completion_routine, (unsigned long)isci_host);

	INIT_LIST_HEAD(&isci_host->requests_to_complete);
431
	INIT_LIST_HEAD(&isci_host->requests_to_errorback);
D
Dan Williams 已提交
432 433

	spin_lock_irq(&isci_host->scic_lock);
434
	status = scic_controller_initialize(isci_host->core_controller);
D
Dan Williams 已提交
435
	spin_unlock_irq(&isci_host->scic_lock);
436 437 438 439 440
	if (status != SCI_SUCCESS) {
		dev_warn(&isci_host->pdev->dev,
			 "%s: scic_controller_initialize failed -"
			 " status = 0x%x\n",
			 __func__, status);
441
		return -ENODEV;
442 443
	}

444
	err = scic_controller_mem_init(isci_host->core_controller);
445
	if (err)
446
		return err;
447 448 449 450 451 452 453 454 455 456 457

	/*
	 * keep the pool alloc size around, will use it for a bounds checking
	 * when trying to convert virtual addresses to physical addresses
	 */
	isci_host->dma_pool_alloc_size = sizeof(struct isci_request) +
					 scic_io_request_get_object_size();
	isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
					       isci_host->dma_pool_alloc_size,
					       SLAB_HWCACHE_ALIGN, 0);

458 459
	if (!isci_host->dma_pool)
		return -ENOMEM;
460

D
Dan Williams 已提交
461 462
	for (i = 0; i < SCI_MAX_PORTS; i++)
		isci_port_init(&isci_host->isci_ports[i], isci_host, i);
463

D
Dan Williams 已提交
464 465 466 467
	for (i = 0; i < SCI_MAX_PHYS; i++)
		isci_phy_init(&isci_host->phys[i], isci_host, i);

	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
468
		struct isci_remote_device *idev = &isci_host->devices[i];
D
Dan Williams 已提交
469 470 471 472 473

		INIT_LIST_HEAD(&idev->reqs_in_process);
		INIT_LIST_HEAD(&idev->node);
		spin_lock_init(&idev->state_lock);
	}
474

475
	return 0;
476
}