fw-sbp2.c 46.9 KB
Newer Older
1 2
/*
 * SBP2 driver (SCSI over IEEE1394)
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21 22
/*
 * The basic structure of this driver is based on the old storage driver,
23 24 25 26 27 28 29 30
 * drivers/ieee1394/sbp2.c, originally written by
 *     James Goodwin <jamesg@filanet.com>
 * with later contributions and ongoing maintenance from
 *     Ben Collins <bcollins@debian.org>,
 *     Stefan Richter <stefanr@s5r6.in-berlin.de>
 * and many others.
 */

31 32 33 34
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
35
#include <linux/kernel.h>
36
#include <linux/mod_devicetable.h>
37
#include <linux/module.h>
38
#include <linux/moduleparam.h>
A
Andrew Morton 已提交
39
#include <linux/scatterlist.h>
40
#include <linux/string.h>
41
#include <linux/stringify.h>
42
#include <linux/timer.h>
43
#include <linux/workqueue.h>
44
#include <asm/system.h>
45 46 47 48 49 50 51

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>

#include "fw-device.h"
52 53
#include "fw-topology.h"
#include "fw-transaction.h"
54

55 56 57 58 59 60 61 62 63 64 65 66
/*
 * So far only bridges from Oxford Semiconductor are known to support
 * concurrent logins. Depending on firmware, four or two concurrent logins
 * are possible on OXFW911 and newer Oxsemi bridges.
 *
 * Concurrent logins are useful together with cluster filesystems.
 */
static int sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
		 "(default = Y, use N for concurrent initiators)");

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * Flags for firmware oddities
 *
 * - 128kB max transfer
 *   Limit transfer size. Necessary for some old bridges.
 *
 * - 36 byte inquiry
 *   When scsi_mod probes the device, let the inquiry command look like that
 *   from MS Windows.
 *
 * - skip mode page 8
 *   Suppress sending of mode_sense for mode page 8 if the device pretends to
 *   support the SCSI Primary Block commands instead of Reduced Block Commands.
 *
 * - fix capacity
 *   Tell sd_mod to correct the last sector number reported by read_capacity.
 *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
 *   Don't use this with devices which don't have this bug.
 *
86 87 88
 * - delay inquiry
 *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
 *
89 90 91 92 93
 * - power condition
 *   Set the power condition field in the START STOP UNIT commands sent by
 *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
 *   Some disks need this to spin down or to resume properly.
 *
94 95 96 97 98 99 100 101 102
 * - override internal blacklist
 *   Instead of adding to the built-in blacklist, use only the workarounds
 *   specified in the module load parameter.
 *   Useful if a blacklist entry interfered with a non-broken device.
 */
#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
#define SBP2_WORKAROUND_INQUIRY_36	0x2
#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
103 104
#define SBP2_WORKAROUND_DELAY_INQUIRY	0x10
#define SBP2_INQUIRY_DELAY		12
105
#define SBP2_WORKAROUND_POWER_CONDITION	0x20
106 107 108 109 110 111 112 113 114
#define SBP2_WORKAROUND_OVERRIDE	0x100

static int sbp2_param_workarounds;
module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
	", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
	", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
	", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
	", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
115
	", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
116 117
	", set power condition in start stop unit = "
				  __stringify(SBP2_WORKAROUND_POWER_CONDITION)
118 119 120
	", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
	", or a combination)");

121
/* I don't know why the SCSI stack doesn't define something like this... */
122
typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
123 124 125

static const char sbp2_driver_name[] = "sbp2";

126 127 128 129 130 131 132
/*
 * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
 * and one struct scsi_device per sbp2_logical_unit.
 */
struct sbp2_logical_unit {
	struct sbp2_target *tgt;
	struct list_head link;
133 134
	struct fw_address_handler address_handler;
	struct list_head orb_list;
135

136
	u64 command_block_agent_address;
137
	u16 lun;
138 139
	int login_id;

140
	/*
141 142 143 144
	 * The generation is updated once we've logged in or reconnected
	 * to the logical unit.  Thus, I/O to the device will automatically
	 * fail and get retried if it happens in a window where the device
	 * is not ready, e.g. after a bus reset but before we reconnect.
145
	 */
146
	int generation;
147 148
	int retries;
	struct delayed_work work;
149
	bool has_sdev;
150
	bool blocked;
151 152
};

153 154 155 156 157 158 159
/*
 * We create one struct sbp2_target per IEEE 1212 Unit Directory
 * and one struct Scsi_Host per sbp2_target.
 */
struct sbp2_target {
	struct kref kref;
	struct fw_unit *unit;
160
	const char *bus_id;
161
	struct list_head lu_list;
162 163

	u64 management_agent_address;
164
	u64 guid;
165 166 167
	int directory_id;
	int node_id;
	int address_high;
168
	unsigned int workarounds;
169
	unsigned int mgt_orb_timeout;
170 171 172

	int dont_block;	/* counter for each logical unit */
	int blocked;	/* ditto */
173 174
};

175 176
/*
 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
177 178
 * provided in the config rom. Most devices do provide a value, which
 * we'll use for login management orbs, but with some sane limits.
179
 */
180 181
#define SBP2_MIN_LOGIN_ORB_TIMEOUT	5000U	/* Timeout in ms */
#define SBP2_MAX_LOGIN_ORB_TIMEOUT	40000U	/* Timeout in ms */
182
#define SBP2_ORB_TIMEOUT		2000U	/* Timeout in ms */
183
#define SBP2_ORB_NULL			0x80000000
184
#define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
185 186
#define SBP2_RETRY_LIMIT		0xf		/* 15 retries */
#define SBP2_CYCLE_LIMIT		(0xc8 << 12)	/* 200 125us cycles */
187 188

/* Unit directory keys */
189
#define SBP2_CSR_UNIT_CHARACTERISTICS	0x3a
190 191 192
#define SBP2_CSR_FIRMWARE_REVISION	0x3c
#define SBP2_CSR_LOGICAL_UNIT_NUMBER	0x14
#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY	0xd4
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217

/* Management orb opcodes */
#define SBP2_LOGIN_REQUEST		0x0
#define SBP2_QUERY_LOGINS_REQUEST	0x1
#define SBP2_RECONNECT_REQUEST		0x3
#define SBP2_SET_PASSWORD_REQUEST	0x4
#define SBP2_LOGOUT_REQUEST		0x7
#define SBP2_ABORT_TASK_REQUEST		0xb
#define SBP2_ABORT_TASK_SET		0xc
#define SBP2_LOGICAL_UNIT_RESET		0xe
#define SBP2_TARGET_RESET_REQUEST	0xf

/* Offsets for command block agent registers */
#define SBP2_AGENT_STATE		0x00
#define SBP2_AGENT_RESET		0x04
#define SBP2_ORB_POINTER		0x08
#define SBP2_DOORBELL			0x10
#define SBP2_UNSOLICITED_STATUS_ENABLE	0x14

/* Status write response codes */
#define SBP2_STATUS_REQUEST_COMPLETE	0x0
#define SBP2_STATUS_TRANSPORT_FAILURE	0x1
#define SBP2_STATUS_ILLEGAL_REQUEST	0x2
#define SBP2_STATUS_VENDOR_DEPENDENT	0x3

218 219 220 221 222 223 224 225
#define STATUS_GET_ORB_HIGH(v)		((v).status & 0xffff)
#define STATUS_GET_SBP_STATUS(v)	(((v).status >> 16) & 0xff)
#define STATUS_GET_LEN(v)		(((v).status >> 24) & 0x07)
#define STATUS_GET_DEAD(v)		(((v).status >> 27) & 0x01)
#define STATUS_GET_RESPONSE(v)		(((v).status >> 28) & 0x03)
#define STATUS_GET_SOURCE(v)		(((v).status >> 30) & 0x03)
#define STATUS_GET_ORB_LOW(v)		((v).orb_low)
#define STATUS_GET_DATA(v)		((v).data)
226 227 228 229 230 231 232 233

struct sbp2_status {
	u32 status;
	u32 orb_low;
	u8 data[24];
};

struct sbp2_pointer {
234 235
	__be32 high;
	__be32 low;
236 237 238 239
};

struct sbp2_orb {
	struct fw_transaction t;
240
	struct kref kref;
241 242 243
	dma_addr_t request_bus;
	int rcode;
	struct sbp2_pointer pointer;
244
	void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
245 246 247
	struct list_head link;
};

248 249 250
#define MANAGEMENT_ORB_LUN(v)			((v))
#define MANAGEMENT_ORB_FUNCTION(v)		((v) << 16)
#define MANAGEMENT_ORB_RECONNECT(v)		((v) << 20)
251
#define MANAGEMENT_ORB_EXCLUSIVE(v)		((v) ? 1 << 28 : 0)
252 253
#define MANAGEMENT_ORB_REQUEST_FORMAT(v)	((v) << 29)
#define MANAGEMENT_ORB_NOTIFY			((1) << 31)
254

255 256
#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)	((v))
#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)	((v) << 16)
257 258 259 260 261 262

struct sbp2_management_orb {
	struct sbp2_orb base;
	struct {
		struct sbp2_pointer password;
		struct sbp2_pointer response;
263 264
		__be32 misc;
		__be32 length;
265 266 267 268 269 270 271 272 273
		struct sbp2_pointer status_fifo;
	} request;
	__be32 response[4];
	dma_addr_t response_bus;
	struct completion done;
	struct sbp2_status status;
};

struct sbp2_login_response {
274
	__be32 misc;
275
	struct sbp2_pointer command_block_agent;
276
	__be32 reconnect_hold;
277
};
278 279 280 281 282
#define COMMAND_ORB_DATA_SIZE(v)	((v))
#define COMMAND_ORB_PAGE_SIZE(v)	((v) << 16)
#define COMMAND_ORB_PAGE_TABLE_PRESENT	((1) << 19)
#define COMMAND_ORB_MAX_PAYLOAD(v)	((v) << 20)
#define COMMAND_ORB_SPEED(v)		((v) << 24)
283
#define COMMAND_ORB_DIRECTION		((1) << 27)
284 285
#define COMMAND_ORB_REQUEST_FORMAT(v)	((v) << 29)
#define COMMAND_ORB_NOTIFY		((1) << 31)
286 287 288 289 290 291

struct sbp2_command_orb {
	struct sbp2_orb base;
	struct {
		struct sbp2_pointer next;
		struct sbp2_pointer data_descriptor;
292
		__be32 misc;
293 294 295 296
		u8 command_block[12];
	} request;
	struct scsi_cmnd *cmd;
	scsi_done_fn_t done;
297
	struct sbp2_logical_unit *lu;
298

299
	struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	dma_addr_t page_table_bus;
};

/*
 * List of devices with known bugs.
 *
 * The firmware_revision field, masked with 0xffff00, is the best
 * indicator for the type of bridge chip of a device.  It yields a few
 * false positives but this did not break correctly behaving devices
 * so far.  We use ~0 as a wildcard, since the 24 bit values we get
 * from the config rom can never match that.
 */
static const struct {
	u32 firmware_revision;
	u32 model;
315
	unsigned int workarounds;
316 317 318 319 320
} sbp2_workarounds_table[] = {
	/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
		.firmware_revision	= 0x002800,
		.model			= 0x001010,
		.workarounds		= SBP2_WORKAROUND_INQUIRY_36 |
321 322
					  SBP2_WORKAROUND_MODE_SENSE_8 |
					  SBP2_WORKAROUND_POWER_CONDITION,
323
	},
324 325 326
	/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
		.firmware_revision	= 0x002800,
		.model			= 0x000000,
327 328
		.workarounds		= SBP2_WORKAROUND_DELAY_INQUIRY |
					  SBP2_WORKAROUND_POWER_CONDITION,
329
	},
330 331 332 333 334
	/* Initio bridges, actually only needed for some older ones */ {
		.firmware_revision	= 0x000200,
		.model			= ~0,
		.workarounds		= SBP2_WORKAROUND_INQUIRY_36,
	},
335 336 337 338 339
	/* PL-3507 bridge with Prolific firmware */ {
		.firmware_revision	= 0x012800,
		.model			= ~0,
		.workarounds		= SBP2_WORKAROUND_POWER_CONDITION,
	},
340 341 342 343 344
	/* Symbios bridge */ {
		.firmware_revision	= 0xa0b800,
		.model			= ~0,
		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
	},
345 346 347 348 349
	/* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
		.firmware_revision	= 0x002600,
		.model			= ~0,
		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
	},
350 351 352

	/*
	 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
353 354
	 * these iPods do not feature the read_capacity bug according
	 * to one report.  Read_capacity behaviour as well as model_id
355 356 357
	 * could change due to Apple-supplied firmware updates though.
	 */

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	/* iPod 4th generation. */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x000021,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
	/* iPod mini */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x000023,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
	/* iPod Photo */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x00007e,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	}
};

375 376 377 378 379 380 381 382
static void
free_orb(struct kref *kref)
{
	struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);

	kfree(orb);
}

383 384 385 386 387 388 389
static void
sbp2_status_write(struct fw_card *card, struct fw_request *request,
		  int tcode, int destination, int source,
		  int generation, int speed,
		  unsigned long long offset,
		  void *payload, size_t length, void *callback_data)
{
390
	struct sbp2_logical_unit *lu = callback_data;
391 392 393 394 395 396
	struct sbp2_orb *orb;
	struct sbp2_status status;
	size_t header_size;
	unsigned long flags;

	if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
397
	    length == 0 || length > sizeof(status)) {
398 399 400 401 402 403 404 405
		fw_send_response(card, request, RCODE_TYPE_ERROR);
		return;
	}

	header_size = min(length, 2 * sizeof(u32));
	fw_memcpy_from_be32(&status, payload, header_size);
	if (length > header_size)
		memcpy(status.data, payload + 8, length - header_size);
406
	if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
407 408 409 410 411 412 413
		fw_notify("non-orb related status write, not handled\n");
		fw_send_response(card, request, RCODE_COMPLETE);
		return;
	}

	/* Lookup the orb corresponding to this status write. */
	spin_lock_irqsave(&card->lock, flags);
414
	list_for_each_entry(orb, &lu->orb_list, link) {
415
		if (STATUS_GET_ORB_HIGH(status) == 0 &&
416 417
		    STATUS_GET_ORB_LOW(status) == orb->request_bus) {
			orb->rcode = RCODE_COMPLETE;
418 419 420 421 422 423
			list_del(&orb->link);
			break;
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);

424
	if (&orb->link != &lu->orb_list)
425 426 427 428
		orb->callback(orb, &status);
	else
		fw_error("status write for unknown orb\n");

429 430
	kref_put(&orb->kref, free_orb);

431 432 433 434 435 436 437 438 439 440
	fw_send_response(card, request, RCODE_COMPLETE);
}

static void
complete_transaction(struct fw_card *card, int rcode,
		     void *payload, size_t length, void *data)
{
	struct sbp2_orb *orb = data;
	unsigned long flags;

441 442 443 444 445 446 447 448 449 450 451 452 453 454
	/*
	 * This is a little tricky.  We can get the status write for
	 * the orb before we get this callback.  The status write
	 * handler above will assume the orb pointer transaction was
	 * successful and set the rcode to RCODE_COMPLETE for the orb.
	 * So this callback only sets the rcode if it hasn't already
	 * been set and only does the cleanup if the transaction
	 * failed and we didn't already get a status write.
	 */
	spin_lock_irqsave(&card->lock, flags);

	if (orb->rcode == -1)
		orb->rcode = rcode;
	if (orb->rcode != RCODE_COMPLETE) {
455
		list_del(&orb->link);
456
		spin_unlock_irqrestore(&card->lock, flags);
457
		orb->callback(orb, NULL);
458 459
	} else {
		spin_unlock_irqrestore(&card->lock, flags);
460
	}
461 462

	kref_put(&orb->kref, free_orb);
463 464 465
}

static void
466
sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
467 468
	      int node_id, int generation, u64 offset)
{
469
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
470 471 472
	unsigned long flags;

	orb->pointer.high = 0;
473
	orb->pointer.low = cpu_to_be32(orb->request_bus);
474 475

	spin_lock_irqsave(&device->card->lock, flags);
476
	list_add_tail(&orb->link, &lu->orb_list);
477 478
	spin_unlock_irqrestore(&device->card->lock, flags);

479 480 481 482
	/* Take a ref for the orb list and for the transaction callback. */
	kref_get(&orb->kref);
	kref_get(&orb->kref);

483
	fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
484
			node_id, generation, device->max_speed, offset,
485
			&orb->pointer, sizeof(orb->pointer),
486 487 488
			complete_transaction, orb);
}

489
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
490
{
491
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
492 493 494
	struct sbp2_orb *orb, *next;
	struct list_head list;
	unsigned long flags;
495
	int retval = -ENOENT;
496 497 498

	INIT_LIST_HEAD(&list);
	spin_lock_irqsave(&device->card->lock, flags);
499
	list_splice_init(&lu->orb_list, &list);
500 501 502
	spin_unlock_irqrestore(&device->card->lock, flags);

	list_for_each_entry_safe(orb, next, &list, link) {
503
		retval = 0;
504 505 506
		if (fw_cancel_transaction(device->card, &orb->t) == 0)
			continue;

507 508 509 510
		orb->rcode = RCODE_CANCELLED;
		orb->callback(orb, NULL);
	}

511
	return retval;
512 513
}

514 515 516 517
static void
complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{
	struct sbp2_management_orb *orb =
518
		container_of(base_orb, struct sbp2_management_orb, base);
519 520

	if (status)
521
		memcpy(&orb->status, status, sizeof(*status));
522 523 524 525
	complete(&orb->done);
}

static int
526 527 528
sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
			 int generation, int function, int lun_or_login_id,
			 void *response)
529
{
530
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
531
	struct sbp2_management_orb *orb;
532
	unsigned int timeout;
533 534
	int retval = -ENOMEM;

535 536 537
	if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
		return 0;

538
	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
539 540 541
	if (orb == NULL)
		return -ENOMEM;

542
	kref_init(&orb->base.kref);
543 544
	orb->response_bus =
		dma_map_single(device->card->device, &orb->response,
545
			       sizeof(orb->response), DMA_FROM_DEVICE);
546
	if (dma_mapping_error(device->card->device, orb->response_bus))
547
		goto fail_mapping_response;
548

549 550
	orb->request.response.high = 0;
	orb->request.response.low  = cpu_to_be32(orb->response_bus);
551

552
	orb->request.misc = cpu_to_be32(
553 554
		MANAGEMENT_ORB_NOTIFY |
		MANAGEMENT_ORB_FUNCTION(function) |
555 556 557
		MANAGEMENT_ORB_LUN(lun_or_login_id));
	orb->request.length = cpu_to_be32(
		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
558

559 560 561 562
	orb->request.status_fifo.high =
		cpu_to_be32(lu->address_handler.offset >> 32);
	orb->request.status_fifo.low  =
		cpu_to_be32(lu->address_handler.offset);
563 564

	if (function == SBP2_LOGIN_REQUEST) {
565
		/* Ask for 2^2 == 4 seconds reconnect grace period */
566
		orb->request.misc |= cpu_to_be32(
567
			MANAGEMENT_ORB_RECONNECT(2) |
568
			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
569
		timeout = lu->tgt->mgt_orb_timeout;
570 571
	} else {
		timeout = SBP2_ORB_TIMEOUT;
572 573 574 575
	}

	init_completion(&orb->done);
	orb->base.callback = complete_management_orb;
576

577 578 579
	orb->base.request_bus =
		dma_map_single(device->card->device, &orb->request,
			       sizeof(orb->request), DMA_TO_DEVICE);
580
	if (dma_mapping_error(device->card->device, orb->base.request_bus))
581 582
		goto fail_mapping_request;

583 584
	sbp2_send_orb(&orb->base, lu, node_id, generation,
		      lu->tgt->management_agent_address);
585

586
	wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
587 588

	retval = -EIO;
589
	if (sbp2_cancel_orbs(lu) == 0) {
590 591
		fw_error("%s: orb reply timed out, rcode=0x%02x\n",
			 lu->tgt->bus_id, orb->base.rcode);
592 593 594
		goto out;
	}

595
	if (orb->base.rcode != RCODE_COMPLETE) {
596 597
		fw_error("%s: management write failed, rcode 0x%02x\n",
			 lu->tgt->bus_id, orb->base.rcode);
598 599 600
		goto out;
	}

601 602
	if (STATUS_GET_RESPONSE(orb->status) != 0 ||
	    STATUS_GET_SBP_STATUS(orb->status) != 0) {
603
		fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
604 605
			 STATUS_GET_RESPONSE(orb->status),
			 STATUS_GET_SBP_STATUS(orb->status));
606 607 608 609 610 611
		goto out;
	}

	retval = 0;
 out:
	dma_unmap_single(device->card->device, orb->base.request_bus,
612
			 sizeof(orb->request), DMA_TO_DEVICE);
613
 fail_mapping_request:
614
	dma_unmap_single(device->card->device, orb->response_bus,
615
			 sizeof(orb->response), DMA_FROM_DEVICE);
616
 fail_mapping_response:
617
	if (response)
618
		memcpy(response, orb->response, sizeof(orb->response));
619
	kref_put(&orb->base.kref, free_orb);
620 621 622 623

	return retval;
}

624 625 626
static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
{
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
J
Jay Fenlason 已提交
627
	__be32 d = 0;
628

J
Jay Fenlason 已提交
629 630 631 632
	fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
			   lu->tgt->node_id, lu->generation, device->max_speed,
			   lu->command_block_agent_address + SBP2_AGENT_RESET,
			   &d, sizeof(d));
633 634
}

635 636 637 638 639 640 641 642
static void
complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
				   void *payload, size_t length, void *data)
{
	kfree(data);
}

static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
643
{
644
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
645
	struct fw_transaction *t;
J
Jay Fenlason 已提交
646
	static __be32 d;
647

648
	t = kmalloc(sizeof(*t), GFP_ATOMIC);
649
	if (t == NULL)
650
		return;
651 652

	fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
653 654
			lu->tgt->node_id, lu->generation, device->max_speed,
			lu->command_block_agent_address + SBP2_AGENT_RESET,
J
Jay Fenlason 已提交
655
			&d, sizeof(d), complete_agent_reset_write_no_wait, t);
656 657
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
{
	struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card;
	unsigned long flags;

	/* serialize with comparisons of lu->generation and card->generation */
	spin_lock_irqsave(&card->lock, flags);
	lu->generation = generation;
	spin_unlock_irqrestore(&card->lock, flags);
}

static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
{
	/*
	 * We may access dont_block without taking card->lock here:
	 * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
	 * are currently serialized against each other.
	 * And a wrong result in sbp2_conditionally_block()'s access of
	 * dont_block is rather harmless, it simply misses its first chance.
	 */
	--lu->tgt->dont_block;
}

/*
 * Blocks lu->tgt if all of the following conditions are met:
 *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
 *     logical units have been finished (indicated by dont_block == 0).
 *   - lu->generation is stale.
 *
 * Note, scsi_block_requests() must be called while holding card->lock,
 * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
 * unblock the target.
 */
static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
{
	struct sbp2_target *tgt = lu->tgt;
	struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
	if (!tgt->dont_block && !lu->blocked &&
	    lu->generation != card->generation) {
		lu->blocked = true;
703
		if (++tgt->blocked == 1)
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
			scsi_block_requests(shost);
	}
	spin_unlock_irqrestore(&card->lock, flags);
}

/*
 * Unblocks lu->tgt as soon as all its logical units can be unblocked.
 * Note, it is harmless to run scsi_unblock_requests() outside the
 * card->lock protected section.  On the other hand, running it inside
 * the section might clash with shost->host_lock.
 */
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
{
	struct sbp2_target *tgt = lu->tgt;
	struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	unsigned long flags;
	bool unblock = false;

	spin_lock_irqsave(&card->lock, flags);
	if (lu->blocked && lu->generation == card->generation) {
		lu->blocked = false;
		unblock = --tgt->blocked == 0;
	}
	spin_unlock_irqrestore(&card->lock, flags);

731
	if (unblock)
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
		scsi_unblock_requests(shost);
}

/*
 * Prevents future blocking of tgt and unblocks it.
 * Note, it is harmless to run scsi_unblock_requests() outside the
 * card->lock protected section.  On the other hand, running it inside
 * the section might clash with shost->host_lock.
 */
static void sbp2_unblock(struct sbp2_target *tgt)
{
	struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
	++tgt->dont_block;
	spin_unlock_irqrestore(&card->lock, flags);

	scsi_unblock_requests(shost);
}

755 756 757 758 759 760 761 762 763 764 765
static int sbp2_lun2int(u16 lun)
{
	struct scsi_lun eight_bytes_lun;

	memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
	eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
	eight_bytes_lun.scsi_lun[1] = lun & 0xff;

	return scsilun_to_int(&eight_bytes_lun);
}

766
static void sbp2_release_target(struct kref *kref)
767
{
768 769 770 771
	struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
	struct sbp2_logical_unit *lu, *next;
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
772
	struct scsi_device *sdev;
773
	struct fw_device *device = fw_device(tgt->unit->device.parent);
774

775 776 777
	/* prevent deadlocks */
	sbp2_unblock(tgt);

778
	list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
779 780 781 782
		sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
		if (sdev) {
			scsi_remove_device(sdev);
			scsi_device_put(sdev);
783
		}
784 785
		sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
786

787 788 789 790 791
		fw_core_remove_address_handler(&lu->address_handler);
		list_del(&lu->link);
		kfree(lu);
	}
	scsi_remove_host(shost);
792
	fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
793

794
	fw_unit_put(tgt->unit);
795
	scsi_host_put(shost);
796
	fw_device_put(device);
797 798
}

799 800
static struct workqueue_struct *sbp2_wq;

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
/*
 * Always get the target's kref when scheduling work on one its units.
 * Each workqueue job is responsible to call sbp2_target_put() upon return.
 */
static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
{
	if (queue_delayed_work(sbp2_wq, &lu->work, delay))
		kref_get(&lu->tgt->kref);
}

static void sbp2_target_put(struct sbp2_target *tgt)
{
	kref_put(&tgt->kref, sbp2_release_target);
}

816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
/*
 * Write retransmit retry values into the BUSY_TIMEOUT register.
 * - The single-phase retry protocol is supported by all SBP-2 devices, but the
 *   default retry_limit value is 0 (i.e. never retry transmission). We write a
 *   saner value after logging into the device.
 * - The dual-phase retry protocol is optional to implement, and if not
 *   supported, writes to the dual-phase portion of the register will be
 *   ignored. We try to write the original 1394-1995 default here.
 * - In the case of devices that are also SBP-3-compliant, all writes are
 *   ignored, as the register is read-only, but contains single-phase retry of
 *   15, which is what we're trying to set for all SBP-2 device anyway, so this
 *   write attempt is safe and yields more consistent behavior for all devices.
 *
 * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
 * and section 6.4 of the SBP-3 spec for further details.
 */
832 833 834
static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
{
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
J
Jay Fenlason 已提交
835
	__be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
836

J
Jay Fenlason 已提交
837 838 839 840
	fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
			   lu->tgt->node_id, lu->generation, device->max_speed,
			   CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
			   &d, sizeof(d));
841 842
}

843 844
static void sbp2_reconnect(struct work_struct *work);

845 846
static void sbp2_login(struct work_struct *work)
{
847 848
	struct sbp2_logical_unit *lu =
		container_of(work, struct sbp2_logical_unit, work.work);
849 850 851
	struct sbp2_target *tgt = lu->tgt;
	struct fw_device *device = fw_device(tgt->unit->device.parent);
	struct Scsi_Host *shost;
852
	struct scsi_device *sdev;
853
	struct sbp2_login_response response;
854
	int generation, node_id, local_node_id;
855

856 857 858
	if (fw_device_is_shutdown(device))
		goto out;

859
	generation    = device->generation;
860
	smp_rmb();    /* node_id must not be older than generation */
861 862
	node_id       = device->node_id;
	local_node_id = device->card->node_id;
863

864
	/* If this is a re-login attempt, log out, or we might be rejected. */
865
	if (lu->has_sdev)
866 867 868
		sbp2_send_management_orb(lu, device->node_id, generation,
				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);

869 870
	if (sbp2_send_management_orb(lu, node_id, generation,
				SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
871
		if (lu->retries++ < 5) {
872
			sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
873
		} else {
874 875
			fw_error("%s: failed to login to LUN %04x\n",
				 tgt->bus_id, lu->lun);
876 877 878
			/* Let any waiting I/O fail from now on. */
			sbp2_unblock(lu->tgt);
		}
879
		goto out;
880 881
	}

882 883
	tgt->node_id	  = node_id;
	tgt->address_high = local_node_id << 16;
884
	sbp2_set_generation(lu, generation);
885

886
	lu->command_block_agent_address =
887 888 889
		((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
		      << 32) | be32_to_cpu(response.command_block_agent.low);
	lu->login_id = be32_to_cpu(response.misc) & 0xffff;
890

891 892
	fw_notify("%s: logged in to LUN %04x (%d retries)\n",
		  tgt->bus_id, lu->lun, lu->retries);
893

894 895
	/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
	sbp2_set_busy_timeout(lu);
896

897 898 899
	PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
	sbp2_agent_reset(lu);

900
	/* This was a re-login. */
901
	if (lu->has_sdev) {
902
		sbp2_cancel_orbs(lu);
903
		sbp2_conditionally_unblock(lu);
904 905 906
		goto out;
	}

907 908 909
	if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
		ssleep(SBP2_INQUIRY_DELAY);

910
	shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
911
	sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
912 913 914 915 916 917 918
	/*
	 * FIXME:  We are unable to perform reconnects while in sbp2_login().
	 * Therefore __scsi_add_device() will get into trouble if a bus reset
	 * happens in parallel.  It will either fail or leave us with an
	 * unusable sdev.  As a workaround we check for this and retry the
	 * whole login and SCSI probing.
	 */
919

920 921 922 923 924 925 926 927
	/* Reported error during __scsi_add_device() */
	if (IS_ERR(sdev))
		goto out_logout_login;

	/* Unreported error during __scsi_add_device() */
	smp_rmb(); /* get current card generation */
	if (generation != device->card->generation) {
		scsi_remove_device(sdev);
928
		scsi_device_put(sdev);
929
		goto out_logout_login;
930
	}
931 932

	/* No error during __scsi_add_device() */
933 934
	lu->has_sdev = true;
	scsi_device_put(sdev);
935
	sbp2_allow_block(lu);
936 937 938 939 940 941 942 943 944 945 946 947 948 949
	goto out;

 out_logout_login:
	smp_rmb(); /* generation may have changed */
	generation = device->generation;
	smp_rmb(); /* node_id must not be older than generation */

	sbp2_send_management_orb(lu, device->node_id, generation,
				 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
	/*
	 * If a bus reset happened, sbp2_update will have requeued
	 * lu->work already.  Reset the work from reconnect to login.
	 */
	PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
950
 out:
951
	sbp2_target_put(tgt);
952
}
953

954
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
955
{
956
	struct sbp2_logical_unit *lu;
957

958 959 960
	lu = kmalloc(sizeof(*lu), GFP_KERNEL);
	if (!lu)
		return -ENOMEM;
961

962 963 964
	lu->address_handler.length           = 0x100;
	lu->address_handler.address_callback = sbp2_status_write;
	lu->address_handler.callback_data    = lu;
965

966 967 968 969 970
	if (fw_core_add_address_handler(&lu->address_handler,
					&fw_high_memory_region) < 0) {
		kfree(lu);
		return -ENOMEM;
	}
971

972 973 974 975 976
	lu->tgt      = tgt;
	lu->lun      = lun_entry & 0xffff;
	lu->retries  = 0;
	lu->has_sdev = false;
	lu->blocked  = false;
977
	++tgt->dont_block;
978 979
	INIT_LIST_HEAD(&lu->orb_list);
	INIT_DELAYED_WORK(&lu->work, sbp2_login);
980

981 982 983
	list_add_tail(&lu->link, &tgt->lu_list);
	return 0;
}
984

985 986 987 988
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
{
	struct fw_csr_iterator ci;
	int key, value;
989

990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	fw_csr_iterator_init(&ci, directory);
	while (fw_csr_iterator_next(&ci, &key, &value))
		if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
		    sbp2_add_logical_unit(tgt, value) < 0)
			return -ENOMEM;
	return 0;
}

static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
			      u32 *model, u32 *firmware_revision)
{
	struct fw_csr_iterator ci;
	int key, value;
1003
	unsigned int timeout;
1004 1005

	fw_csr_iterator_init(&ci, directory);
1006 1007
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		switch (key) {
1008

1009
		case CSR_DEPENDENT_INFO | CSR_OFFSET:
1010 1011
			tgt->management_agent_address =
					CSR_REGISTER_BASE + 4 * value;
1012
			break;
1013 1014 1015

		case CSR_DIRECTORY_ID:
			tgt->directory_id = value;
1016
			break;
1017

1018
		case CSR_MODEL:
1019 1020 1021 1022 1023 1024 1025
			*model = value;
			break;

		case SBP2_CSR_FIRMWARE_REVISION:
			*firmware_revision = value;
			break;

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
		case SBP2_CSR_UNIT_CHARACTERISTICS:
			/* the timeout value is stored in 500ms units */
			timeout = ((unsigned int) value >> 8 & 0xff) * 500;
			timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
			tgt->mgt_orb_timeout =
				  min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);

			if (timeout > tgt->mgt_orb_timeout)
				fw_notify("%s: config rom contains %ds "
					  "management ORB timeout, limiting "
1036
					  "to %ds\n", tgt->bus_id,
1037 1038 1039 1040
					  timeout / 1000,
					  tgt->mgt_orb_timeout / 1000);
			break;

1041 1042 1043 1044 1045 1046
		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
			if (sbp2_add_logical_unit(tgt, value) < 0)
				return -ENOMEM;
			break;

		case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
1047 1048
			/* Adjust for the increment in the iterator */
			if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
1049
				return -ENOMEM;
1050 1051 1052
			break;
		}
	}
1053 1054 1055 1056 1057 1058 1059
	return 0;
}

static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
				  u32 firmware_revision)
{
	int i;
1060
	unsigned int w = sbp2_param_workarounds;
1061 1062 1063 1064

	if (w)
		fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
			  "if you need the workarounds parameter for %s\n",
1065
			  tgt->bus_id);
1066

1067 1068
	if (w & SBP2_WORKAROUND_OVERRIDE)
		goto out;
1069 1070

	for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1071

1072 1073 1074
		if (sbp2_workarounds_table[i].firmware_revision !=
		    (firmware_revision & 0xffffff00))
			continue;
1075

1076 1077 1078
		if (sbp2_workarounds_table[i].model != model &&
		    sbp2_workarounds_table[i].model != ~0)
			continue;
1079

1080
		w |= sbp2_workarounds_table[i].workarounds;
1081 1082
		break;
	}
1083 1084
 out:
	if (w)
1085
		fw_notify("Workarounds for %s: 0x%x "
1086
			  "(firmware_revision 0x%06x, model_id 0x%06x)\n",
1087
			  tgt->bus_id, w, firmware_revision, model);
1088
	tgt->workarounds = w;
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
}

static struct scsi_host_template scsi_driver_template;

static int sbp2_probe(struct device *dev)
{
	struct fw_unit *unit = fw_unit(dev);
	struct fw_device *device = fw_device(unit->device.parent);
	struct sbp2_target *tgt;
	struct sbp2_logical_unit *lu;
	struct Scsi_Host *shost;
	u32 model, firmware_revision;

	shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
	if (shost == NULL)
		return -ENOMEM;

	tgt = (struct sbp2_target *)shost->hostdata;
	unit->device.driver_data = tgt;
	tgt->unit = unit;
	kref_init(&tgt->kref);
	INIT_LIST_HEAD(&tgt->lu_list);
1111
	tgt->bus_id = unit->device.bus_id;
1112
	tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1113 1114 1115 1116 1117 1118 1119

	if (fw_device_enable_phys_dma(device) < 0)
		goto fail_shost_put;

	if (scsi_add_host(shost, &unit->device) < 0)
		goto fail_shost_put;

1120
	fw_device_get(device);
1121
	fw_unit_get(unit);
1122

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	/* Initialize to values that won't match anything in our table. */
	firmware_revision = 0xff000000;
	model = 0xff000000;

	/* implicit directory ID */
	tgt->directory_id = ((unit->directory - device->config_rom) * 4
			     + CSR_CONFIG_ROM) & 0xffffff;

	if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
			       &firmware_revision) < 0)
		goto fail_tgt_put;

	sbp2_init_workarounds(tgt, model, firmware_revision);
1136

1137
	/* Do the login in a workqueue so we can easily reschedule retries. */
1138
	list_for_each_entry(lu, &tgt->lu_list, link)
1139
		sbp2_queue_work(lu, 0);
1140
	return 0;
1141

1142
 fail_tgt_put:
1143
	sbp2_target_put(tgt);
1144 1145 1146 1147 1148
	return -ENOMEM;

 fail_shost_put:
	scsi_host_put(shost);
	return -ENOMEM;
1149 1150 1151 1152 1153
}

static int sbp2_remove(struct device *dev)
{
	struct fw_unit *unit = fw_unit(dev);
1154
	struct sbp2_target *tgt = unit->device.driver_data;
1155

1156
	sbp2_target_put(tgt);
1157 1158 1159 1160 1161
	return 0;
}

static void sbp2_reconnect(struct work_struct *work)
{
1162 1163
	struct sbp2_logical_unit *lu =
		container_of(work, struct sbp2_logical_unit, work.work);
1164 1165
	struct sbp2_target *tgt = lu->tgt;
	struct fw_device *device = fw_device(tgt->unit->device.parent);
1166 1167
	int generation, node_id, local_node_id;

1168 1169 1170
	if (fw_device_is_shutdown(device))
		goto out;

1171
	generation    = device->generation;
1172
	smp_rmb();    /* node_id must not be older than generation */
1173 1174
	node_id       = device->node_id;
	local_node_id = device->card->node_id;
1175

1176
	if (sbp2_send_management_orb(lu, node_id, generation,
1177
				     SBP2_RECONNECT_REQUEST,
1178
				     lu->login_id, NULL) < 0) {
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
		/*
		 * If reconnect was impossible even though we are in the
		 * current generation, fall back and try to log in again.
		 *
		 * We could check for "Function rejected" status, but
		 * looking at the bus generation as simpler and more general.
		 */
		smp_rmb(); /* get current card generation */
		if (generation == device->card->generation ||
		    lu->retries++ >= 5) {
1189
			fw_error("%s: failed to reconnect\n", tgt->bus_id);
1190 1191
			lu->retries = 0;
			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1192
		}
1193 1194
		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
		goto out;
1195
	}
1196

1197 1198
	tgt->node_id      = node_id;
	tgt->address_high = local_node_id << 16;
1199
	sbp2_set_generation(lu, generation);
1200

1201 1202
	fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
		  tgt->bus_id, lu->lun, lu->retries);
1203 1204 1205

	sbp2_agent_reset(lu);
	sbp2_cancel_orbs(lu);
1206
	sbp2_conditionally_unblock(lu);
1207
 out:
1208
	sbp2_target_put(tgt);
1209 1210 1211 1212
}

static void sbp2_update(struct fw_unit *unit)
{
1213 1214
	struct sbp2_target *tgt = unit->device.driver_data;
	struct sbp2_logical_unit *lu;
1215

1216 1217 1218 1219 1220 1221 1222
	fw_device_enable_phys_dma(fw_device(unit->device.parent));

	/*
	 * Fw-core serializes sbp2_update() against sbp2_remove().
	 * Iteration over tgt->lu_list is therefore safe here.
	 */
	list_for_each_entry(lu, &tgt->lu_list, link) {
1223
		sbp2_conditionally_block(lu);
1224
		lu->retries = 0;
1225
		sbp2_queue_work(lu, 0);
1226
	}
1227 1228 1229 1230 1231
}

#define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e
#define SBP2_SW_VERSION_ENTRY	0x00010483

1232
static const struct fw_device_id sbp2_id_table[] = {
1233 1234 1235
	{
		.match_flags  = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
1236
		.version      = SBP2_SW_VERSION_ENTRY,
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	},
	{ }
};

static struct fw_driver sbp2_driver = {
	.driver   = {
		.owner  = THIS_MODULE,
		.name   = sbp2_driver_name,
		.bus    = &fw_bus_type,
		.probe  = sbp2_probe,
		.remove = sbp2_remove,
	},
	.update   = sbp2_update,
	.id_table = sbp2_id_table,
};

1253 1254
static unsigned int
sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1255
{
1256 1257
	int sam_status;

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	sense_data[0] = 0x70;
	sense_data[1] = 0x0;
	sense_data[2] = sbp2_status[1];
	sense_data[3] = sbp2_status[4];
	sense_data[4] = sbp2_status[5];
	sense_data[5] = sbp2_status[6];
	sense_data[6] = sbp2_status[7];
	sense_data[7] = 10;
	sense_data[8] = sbp2_status[8];
	sense_data[9] = sbp2_status[9];
	sense_data[10] = sbp2_status[10];
	sense_data[11] = sbp2_status[11];
	sense_data[12] = sbp2_status[2];
	sense_data[13] = sbp2_status[3];
	sense_data[14] = sbp2_status[12];
	sense_data[15] = sbp2_status[13];

1275
	sam_status = sbp2_status[0] & 0x3f;
1276

1277 1278
	switch (sam_status) {
	case SAM_STAT_GOOD:
1279 1280
	case SAM_STAT_CHECK_CONDITION:
	case SAM_STAT_CONDITION_MET:
1281
	case SAM_STAT_BUSY:
1282 1283
	case SAM_STAT_RESERVATION_CONFLICT:
	case SAM_STAT_COMMAND_TERMINATED:
1284 1285
		return DID_OK << 16 | sam_status;

1286
	default:
1287
		return DID_ERROR << 16;
1288 1289 1290 1291 1292 1293
	}
}

static void
complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{
1294 1295
	struct sbp2_command_orb *orb =
		container_of(base_orb, struct sbp2_command_orb, base);
1296
	struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
1297 1298 1299
	int result;

	if (status != NULL) {
1300
		if (STATUS_GET_DEAD(*status))
1301
			sbp2_agent_reset_no_wait(orb->lu);
1302

1303
		switch (STATUS_GET_RESPONSE(*status)) {
1304
		case SBP2_STATUS_REQUEST_COMPLETE:
1305
			result = DID_OK << 16;
1306 1307
			break;
		case SBP2_STATUS_TRANSPORT_FAILURE:
1308
			result = DID_BUS_BUSY << 16;
1309 1310 1311 1312
			break;
		case SBP2_STATUS_ILLEGAL_REQUEST:
		case SBP2_STATUS_VENDOR_DEPENDENT:
		default:
1313
			result = DID_ERROR << 16;
1314 1315 1316
			break;
		}

1317 1318
		if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
			result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
1319 1320
							   orb->cmd->sense_buffer);
	} else {
1321 1322
		/*
		 * If the orb completes with status == NULL, something
1323
		 * went wrong, typically a bus reset happened mid-orb
1324 1325
		 * or when sending the write (less likely).
		 */
1326
		result = DID_BUS_BUSY << 16;
1327
		sbp2_conditionally_block(orb->lu);
1328 1329 1330
	}

	dma_unmap_single(device->card->device, orb->base.request_bus,
1331
			 sizeof(orb->request), DMA_TO_DEVICE);
1332

1333 1334 1335
	if (scsi_sg_count(orb->cmd) > 0)
		dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
			     scsi_sg_count(orb->cmd),
1336 1337 1338 1339
			     orb->cmd->sc_data_direction);

	if (orb->page_table_bus != 0)
		dma_unmap_single(device->card->device, orb->page_table_bus,
1340
				 sizeof(orb->page_table), DMA_TO_DEVICE);
1341

1342
	orb->cmd->result = result;
1343 1344 1345
	orb->done(orb->cmd);
}

1346 1347 1348
static int
sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
		     struct sbp2_logical_unit *lu)
1349 1350 1351 1352 1353
{
	struct scatterlist *sg;
	int sg_len, l, i, j, count;
	dma_addr_t sg_addr;

1354 1355
	sg = scsi_sglist(orb->cmd);
	count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
1356
			   orb->cmd->sc_data_direction);
1357 1358
	if (count == 0)
		goto fail;
1359

1360 1361
	/*
	 * Handle the special case where there is only one element in
1362 1363 1364
	 * the scatter list by converting it to an immediate block
	 * request. This is also a workaround for broken devices such
	 * as the second generation iPod which doesn't support page
1365 1366
	 * tables.
	 */
1367
	if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
1368 1369 1370 1371 1372 1373
		orb->request.data_descriptor.high =
			cpu_to_be32(lu->tgt->address_high);
		orb->request.data_descriptor.low  =
			cpu_to_be32(sg_dma_address(sg));
		orb->request.misc |=
			cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
1374
		return 0;
1375 1376
	}

1377 1378
	/*
	 * Convert the scatterlist to an sbp2 page table.  If any
1379 1380 1381 1382
	 * scatterlist entries are too big for sbp2, we split them as we
	 * go.  Even if we ask the block I/O layer to not give us sg
	 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
	 * during DMA mapping, and Linux currently doesn't prevent this.
1383
	 */
1384 1385 1386
	for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
		sg_len = sg_dma_len(sg);
		sg_addr = sg_dma_address(sg);
1387
		while (sg_len) {
1388 1389 1390 1391 1392
			/* FIXME: This won't get us out of the pinch. */
			if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
				fw_error("page table overflow\n");
				goto fail_page_table;
			}
1393
			l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
1394 1395
			orb->page_table[j].low = cpu_to_be32(sg_addr);
			orb->page_table[j].high = cpu_to_be32(l << 16);
1396 1397 1398 1399 1400 1401
			sg_addr += l;
			sg_len -= l;
			j++;
		}
	}

1402 1403 1404
	orb->page_table_bus =
		dma_map_single(device->card->device, orb->page_table,
			       sizeof(orb->page_table), DMA_TO_DEVICE);
1405
	if (dma_mapping_error(device->card->device, orb->page_table_bus))
1406
		goto fail_page_table;
1407

1408 1409
	/*
	 * The data_descriptor pointer is the one case where we need
1410 1411 1412
	 * to fill in the node ID part of the address.  All other
	 * pointers assume that the data referenced reside on the
	 * initiator (i.e. us), but data_descriptor can refer to data
1413 1414
	 * on other nodes so we need to put our ID in descriptor.high.
	 */
1415 1416 1417 1418
	orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
	orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
	orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
					 COMMAND_ORB_DATA_SIZE(j));
1419

1420 1421 1422
	return 0;

 fail_page_table:
1423
	dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
1424 1425 1426
		     orb->cmd->sc_data_direction);
 fail:
	return -ENOMEM;
1427 1428 1429 1430 1431 1432
}

/* SCSI stack integration */

static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
{
1433 1434
	struct sbp2_logical_unit *lu = cmd->device->hostdata;
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
1435
	struct sbp2_command_orb *orb;
1436
	unsigned int max_payload;
1437
	int retval = SCSI_MLQUEUE_HOST_BUSY;
1438

1439 1440 1441 1442
	/*
	 * Bidirectional commands are not yet implemented, and unknown
	 * transfer direction not handled.
	 */
1443
	if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1444
		fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
1445 1446 1447
		cmd->result = DID_ERROR << 16;
		done(cmd);
		return 0;
1448 1449
	}

1450
	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1451 1452
	if (orb == NULL) {
		fw_notify("failed to alloc orb\n");
1453
		return SCSI_MLQUEUE_HOST_BUSY;
1454 1455
	}

1456 1457
	/* Initialize rcode to something not RCODE_COMPLETE. */
	orb->base.rcode = -1;
1458
	kref_init(&orb->base.kref);
1459

1460
	orb->lu   = lu;
1461 1462 1463
	orb->done = done;
	orb->cmd  = cmd;

1464
	orb->request.next.high   = cpu_to_be32(SBP2_ORB_NULL);
1465 1466
	/*
	 * At speed 100 we can do 512 bytes per packet, at speed 200,
1467 1468
	 * 1024 bytes per packet etc.  The SBP-2 max_payload field
	 * specifies the max payload size as 2 ^ (max_payload + 2), so
1469 1470
	 * if we set this to max_speed + 7, we get the right value.
	 */
1471 1472
	max_payload = min(device->max_speed + 7,
			  device->card->max_receive - 1);
1473
	orb->request.misc = cpu_to_be32(
1474
		COMMAND_ORB_MAX_PAYLOAD(max_payload) |
1475
		COMMAND_ORB_SPEED(device->max_speed) |
1476
		COMMAND_ORB_NOTIFY);
1477 1478

	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1479
		orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
1480

1481 1482
	if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
		goto out;
1483

1484
	memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
1485 1486

	orb->base.callback = complete_command_orb;
1487 1488 1489
	orb->base.request_bus =
		dma_map_single(device->card->device, &orb->request,
			       sizeof(orb->request), DMA_TO_DEVICE);
1490
	if (dma_mapping_error(device->card->device, orb->base.request_bus))
1491
		goto out;
1492

1493 1494 1495 1496
	sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
		      lu->command_block_agent_address + SBP2_ORB_POINTER);
	retval = 0;
 out:
1497
	kref_put(&orb->base.kref, free_orb);
1498
	return retval;
1499 1500
}

1501 1502
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{
1503
	struct sbp2_logical_unit *lu = sdev->hostdata;
1504

1505 1506 1507 1508
	/* (Re-)Adding logical units via the SCSI stack is not supported. */
	if (!lu)
		return -ENOSYS;

1509 1510
	sdev->allow_restart = 1;

1511 1512
	/* SBP-2 requires quadlet alignment of the data buffers. */
	blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1513

1514
	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1515
		sdev->inquiry_len = 36;
1516

1517 1518 1519
	return 0;
}

1520 1521
static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
{
1522
	struct sbp2_logical_unit *lu = sdev->hostdata;
1523

1524 1525
	sdev->use_10_for_rw = 1;

1526 1527 1528
	if (sbp2_param_exclusive_login)
		sdev->manage_start_stop = 1;

1529 1530
	if (sdev->type == TYPE_ROM)
		sdev->use_10_for_ms = 1;
1531

1532
	if (sdev->type == TYPE_DISK &&
1533
	    lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1534
		sdev->skip_ms_page_8 = 1;
1535 1536

	if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
1537
		sdev->fix_capacity = 1;
1538

1539 1540 1541
	if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
		sdev->start_stop_pwr_cond = 1;

1542
	if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1543
		blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
1544

1545 1546 1547 1548 1549 1550 1551 1552 1553
	return 0;
}

/*
 * Called by scsi stack when something has really gone wrong.  Usually
 * called when a command has timed-out for some reason.
 */
static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
{
1554
	struct sbp2_logical_unit *lu = cmd->device->hostdata;
1555

1556
	fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
1557 1558
	sbp2_agent_reset(lu);
	sbp2_cancel_orbs(lu);
1559 1560 1561 1562

	return SUCCESS;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
/*
 * Format of /sys/bus/scsi/devices/.../ieee1394_id:
 * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
 *
 * This is the concatenation of target port identifier and logical unit
 * identifier as per SAM-2...SAM-4 annex A.
 */
static ssize_t
sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
			    char *buf)
{
	struct scsi_device *sdev = to_scsi_device(dev);
1575
	struct sbp2_logical_unit *lu;
1576 1577 1578 1579

	if (!sdev)
		return 0;

1580
	lu = sdev->hostdata;
1581

1582 1583
	return sprintf(buf, "%016llx:%06x:%04x\n",
			(unsigned long long)lu->tgt->guid,
1584
			lu->tgt->directory_id, lu->lun);
1585 1586 1587 1588 1589 1590 1591 1592 1593
}

static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);

static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
	&dev_attr_ieee1394_id,
	NULL
};

1594 1595 1596
static struct scsi_host_template scsi_driver_template = {
	.module			= THIS_MODULE,
	.name			= "SBP-2 IEEE-1394",
1597
	.proc_name		= sbp2_driver_name,
1598
	.queuecommand		= sbp2_scsi_queuecommand,
1599
	.slave_alloc		= sbp2_scsi_slave_alloc,
1600 1601 1602 1603 1604
	.slave_configure	= sbp2_scsi_slave_configure,
	.eh_abort_handler	= sbp2_scsi_abort,
	.this_id		= -1,
	.sg_tablesize		= SG_ALL,
	.use_clustering		= ENABLE_CLUSTERING,
1605 1606
	.cmd_per_lun		= 1,
	.can_queue		= 1,
1607
	.sdev_attrs		= sbp2_scsi_sysfs_attrs,
1608 1609 1610 1611 1612 1613 1614
};

MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("SCSI over IEEE1394");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);

1615 1616 1617 1618 1619
/* Provide a module alias so root-on-sbp2 initrds don't break. */
#ifndef CONFIG_IEEE1394_SBP2_MODULE
MODULE_ALIAS("sbp2");
#endif

1620 1621
static int __init sbp2_init(void)
{
1622 1623 1624 1625
	sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
	if (!sbp2_wq)
		return -ENOMEM;

1626 1627 1628 1629 1630 1631
	return driver_register(&sbp2_driver.driver);
}

static void __exit sbp2_cleanup(void)
{
	driver_unregister(&sbp2_driver.driver);
1632
	destroy_workqueue(sbp2_wq);
1633 1634 1635 1636
}

module_init(sbp2_init);
module_exit(sbp2_cleanup);