fw-sbp2.c 40.2 KB
Newer Older
1 2
/*
 * SBP2 driver (SCSI over IEEE1394)
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21 22
/*
 * The basic structure of this driver is based on the old storage driver,
23 24 25 26 27 28 29 30
 * drivers/ieee1394/sbp2.c, originally written by
 *     James Goodwin <jamesg@filanet.com>
 * with later contributions and ongoing maintenance from
 *     Ben Collins <bcollins@debian.org>,
 *     Stefan Richter <stefanr@s5r6.in-berlin.de>
 * and many others.
 */

31 32
#include <linux/kernel.h>
#include <linux/module.h>
33
#include <linux/moduleparam.h>
S
Stefan Richter 已提交
34
#include <linux/mod_devicetable.h>
35
#include <linux/delay.h>
36
#include <linux/device.h>
A
Andrew Morton 已提交
37
#include <linux/scatterlist.h>
38
#include <linux/dma-mapping.h>
39
#include <linux/blkdev.h>
40
#include <linux/string.h>
41
#include <linux/stringify.h>
42
#include <linux/timer.h>
43
#include <linux/workqueue.h>
44
#include <asm/system.h>
45 46 47 48 49 50 51 52 53 54

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>

#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"

55 56 57 58 59 60 61 62 63 64 65 66
/*
 * So far only bridges from Oxford Semiconductor are known to support
 * concurrent logins. Depending on firmware, four or two concurrent logins
 * are possible on OXFW911 and newer Oxsemi bridges.
 *
 * Concurrent logins are useful together with cluster filesystems.
 */
static int sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
		 "(default = Y, use N for concurrent initiators)");

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * Flags for firmware oddities
 *
 * - 128kB max transfer
 *   Limit transfer size. Necessary for some old bridges.
 *
 * - 36 byte inquiry
 *   When scsi_mod probes the device, let the inquiry command look like that
 *   from MS Windows.
 *
 * - skip mode page 8
 *   Suppress sending of mode_sense for mode page 8 if the device pretends to
 *   support the SCSI Primary Block commands instead of Reduced Block Commands.
 *
 * - fix capacity
 *   Tell sd_mod to correct the last sector number reported by read_capacity.
 *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
 *   Don't use this with devices which don't have this bug.
 *
86 87 88
 * - delay inquiry
 *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
 *
89 90 91 92 93 94 95 96 97
 * - override internal blacklist
 *   Instead of adding to the built-in blacklist, use only the workarounds
 *   specified in the module load parameter.
 *   Useful if a blacklist entry interfered with a non-broken device.
 */
#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
#define SBP2_WORKAROUND_INQUIRY_36	0x2
#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
98 99
#define SBP2_WORKAROUND_DELAY_INQUIRY	0x10
#define SBP2_INQUIRY_DELAY		12
100 101 102 103 104 105 106 107 108
#define SBP2_WORKAROUND_OVERRIDE	0x100

static int sbp2_param_workarounds;
module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
	", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
	", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
	", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
	", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
109
	", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
110 111 112
	", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
	", or a combination)");

113
/* I don't know why the SCSI stack doesn't define something like this... */
114
typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
115 116 117

static const char sbp2_driver_name[] = "sbp2";

118 119 120 121 122 123 124 125
/*
 * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
 * and one struct scsi_device per sbp2_logical_unit.
 */
struct sbp2_logical_unit {
	struct sbp2_target *tgt;
	struct list_head link;
	struct scsi_device *sdev;
126 127
	struct fw_address_handler address_handler;
	struct list_head orb_list;
128

129
	u64 command_block_agent_address;
130
	u16 lun;
131 132
	int login_id;

133
	/*
134 135 136 137
	 * The generation is updated once we've logged in or reconnected
	 * to the logical unit.  Thus, I/O to the device will automatically
	 * fail and get retried if it happens in a window where the device
	 * is not ready, e.g. after a bus reset but before we reconnect.
138
	 */
139
	int generation;
140 141
	int retries;
	struct delayed_work work;
142 143
};

144 145 146 147 148 149 150
/*
 * We create one struct sbp2_target per IEEE 1212 Unit Directory
 * and one struct Scsi_Host per sbp2_target.
 */
struct sbp2_target {
	struct kref kref;
	struct fw_unit *unit;
151
	const char *bus_id;
152
	struct list_head lu_list;
153 154 155 156 157

	u64 management_agent_address;
	int directory_id;
	int node_id;
	int address_high;
158
	unsigned int workarounds;
159
	unsigned int mgt_orb_timeout;
160 161
};

162 163
/*
 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
164 165
 * provided in the config rom. Most devices do provide a value, which
 * we'll use for login management orbs, but with some sane limits.
166
 */
167 168
#define SBP2_MIN_LOGIN_ORB_TIMEOUT	5000U	/* Timeout in ms */
#define SBP2_MAX_LOGIN_ORB_TIMEOUT	40000U	/* Timeout in ms */
169
#define SBP2_ORB_TIMEOUT		2000U	/* Timeout in ms */
170
#define SBP2_ORB_NULL			0x80000000
171
#define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
172 173 174 175 176

#define SBP2_DIRECTION_TO_MEDIA		0x0
#define SBP2_DIRECTION_FROM_MEDIA	0x1

/* Unit directory keys */
177
#define SBP2_CSR_UNIT_CHARACTERISTICS	0x3a
178 179 180
#define SBP2_CSR_FIRMWARE_REVISION	0x3c
#define SBP2_CSR_LOGICAL_UNIT_NUMBER	0x14
#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY	0xd4
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205

/* Management orb opcodes */
#define SBP2_LOGIN_REQUEST		0x0
#define SBP2_QUERY_LOGINS_REQUEST	0x1
#define SBP2_RECONNECT_REQUEST		0x3
#define SBP2_SET_PASSWORD_REQUEST	0x4
#define SBP2_LOGOUT_REQUEST		0x7
#define SBP2_ABORT_TASK_REQUEST		0xb
#define SBP2_ABORT_TASK_SET		0xc
#define SBP2_LOGICAL_UNIT_RESET		0xe
#define SBP2_TARGET_RESET_REQUEST	0xf

/* Offsets for command block agent registers */
#define SBP2_AGENT_STATE		0x00
#define SBP2_AGENT_RESET		0x04
#define SBP2_ORB_POINTER		0x08
#define SBP2_DOORBELL			0x10
#define SBP2_UNSOLICITED_STATUS_ENABLE	0x14

/* Status write response codes */
#define SBP2_STATUS_REQUEST_COMPLETE	0x0
#define SBP2_STATUS_TRANSPORT_FAILURE	0x1
#define SBP2_STATUS_ILLEGAL_REQUEST	0x2
#define SBP2_STATUS_VENDOR_DEPENDENT	0x3

206 207 208 209 210 211 212 213
#define STATUS_GET_ORB_HIGH(v)		((v).status & 0xffff)
#define STATUS_GET_SBP_STATUS(v)	(((v).status >> 16) & 0xff)
#define STATUS_GET_LEN(v)		(((v).status >> 24) & 0x07)
#define STATUS_GET_DEAD(v)		(((v).status >> 27) & 0x01)
#define STATUS_GET_RESPONSE(v)		(((v).status >> 28) & 0x03)
#define STATUS_GET_SOURCE(v)		(((v).status >> 30) & 0x03)
#define STATUS_GET_ORB_LOW(v)		((v).orb_low)
#define STATUS_GET_DATA(v)		((v).data)
214 215 216 217 218 219 220 221 222 223 224 225 226 227

struct sbp2_status {
	u32 status;
	u32 orb_low;
	u8 data[24];
};

struct sbp2_pointer {
	u32 high;
	u32 low;
};

struct sbp2_orb {
	struct fw_transaction t;
228
	struct kref kref;
229 230 231
	dma_addr_t request_bus;
	int rcode;
	struct sbp2_pointer pointer;
232
	void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
233 234 235
	struct list_head link;
};

236 237 238
#define MANAGEMENT_ORB_LUN(v)			((v))
#define MANAGEMENT_ORB_FUNCTION(v)		((v) << 16)
#define MANAGEMENT_ORB_RECONNECT(v)		((v) << 20)
239
#define MANAGEMENT_ORB_EXCLUSIVE(v)		((v) ? 1 << 28 : 0)
240 241
#define MANAGEMENT_ORB_REQUEST_FORMAT(v)	((v) << 29)
#define MANAGEMENT_ORB_NOTIFY			((1) << 31)
242

243 244
#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)	((v))
#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)	((v) << 16)
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

struct sbp2_management_orb {
	struct sbp2_orb base;
	struct {
		struct sbp2_pointer password;
		struct sbp2_pointer response;
		u32 misc;
		u32 length;
		struct sbp2_pointer status_fifo;
	} request;
	__be32 response[4];
	dma_addr_t response_bus;
	struct completion done;
	struct sbp2_status status;
};

261 262
#define LOGIN_RESPONSE_GET_LOGIN_ID(v)	((v).misc & 0xffff)
#define LOGIN_RESPONSE_GET_LENGTH(v)	(((v).misc >> 16) & 0xffff)
263 264 265 266 267 268

struct sbp2_login_response {
	u32 misc;
	struct sbp2_pointer command_block_agent;
	u32 reconnect_hold;
};
269 270 271 272 273 274 275 276
#define COMMAND_ORB_DATA_SIZE(v)	((v))
#define COMMAND_ORB_PAGE_SIZE(v)	((v) << 16)
#define COMMAND_ORB_PAGE_TABLE_PRESENT	((1) << 19)
#define COMMAND_ORB_MAX_PAYLOAD(v)	((v) << 20)
#define COMMAND_ORB_SPEED(v)		((v) << 24)
#define COMMAND_ORB_DIRECTION(v)	((v) << 27)
#define COMMAND_ORB_REQUEST_FORMAT(v)	((v) << 29)
#define COMMAND_ORB_NOTIFY		((1) << 31)
277 278 279 280 281 282 283 284 285 286 287

struct sbp2_command_orb {
	struct sbp2_orb base;
	struct {
		struct sbp2_pointer next;
		struct sbp2_pointer data_descriptor;
		u32 misc;
		u8 command_block[12];
	} request;
	struct scsi_cmnd *cmd;
	scsi_done_fn_t done;
288
	struct sbp2_logical_unit *lu;
289

290
	struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	dma_addr_t page_table_bus;
};

/*
 * List of devices with known bugs.
 *
 * The firmware_revision field, masked with 0xffff00, is the best
 * indicator for the type of bridge chip of a device.  It yields a few
 * false positives but this did not break correctly behaving devices
 * so far.  We use ~0 as a wildcard, since the 24 bit values we get
 * from the config rom can never match that.
 */
static const struct {
	u32 firmware_revision;
	u32 model;
306
	unsigned int workarounds;
307 308 309 310 311 312 313
} sbp2_workarounds_table[] = {
	/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
		.firmware_revision	= 0x002800,
		.model			= 0x001010,
		.workarounds		= SBP2_WORKAROUND_INQUIRY_36 |
					  SBP2_WORKAROUND_MODE_SENSE_8,
	},
314 315 316 317 318
	/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
		.firmware_revision	= 0x002800,
		.model			= 0x000000,
		.workarounds		= SBP2_WORKAROUND_DELAY_INQUIRY,
	},
319 320 321 322 323 324 325 326 327 328
	/* Initio bridges, actually only needed for some older ones */ {
		.firmware_revision	= 0x000200,
		.model			= ~0,
		.workarounds		= SBP2_WORKAROUND_INQUIRY_36,
	},
	/* Symbios bridge */ {
		.firmware_revision	= 0xa0b800,
		.model			= ~0,
		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
	},
329 330 331

	/*
	 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
332 333
	 * these iPods do not feature the read_capacity bug according
	 * to one report.  Read_capacity behaviour as well as model_id
334 335 336
	 * could change due to Apple-supplied firmware updates though.
	 */

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	/* iPod 4th generation. */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x000021,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
	/* iPod mini */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x000023,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
	/* iPod Photo */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x00007e,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	}
};

354 355 356 357 358 359 360 361
static void
free_orb(struct kref *kref)
{
	struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);

	kfree(orb);
}

362 363 364 365 366 367 368
static void
sbp2_status_write(struct fw_card *card, struct fw_request *request,
		  int tcode, int destination, int source,
		  int generation, int speed,
		  unsigned long long offset,
		  void *payload, size_t length, void *callback_data)
{
369
	struct sbp2_logical_unit *lu = callback_data;
370 371 372 373 374 375
	struct sbp2_orb *orb;
	struct sbp2_status status;
	size_t header_size;
	unsigned long flags;

	if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
376
	    length == 0 || length > sizeof(status)) {
377 378 379 380 381 382 383 384
		fw_send_response(card, request, RCODE_TYPE_ERROR);
		return;
	}

	header_size = min(length, 2 * sizeof(u32));
	fw_memcpy_from_be32(&status, payload, header_size);
	if (length > header_size)
		memcpy(status.data, payload + 8, length - header_size);
385
	if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
386 387 388 389 390 391 392
		fw_notify("non-orb related status write, not handled\n");
		fw_send_response(card, request, RCODE_COMPLETE);
		return;
	}

	/* Lookup the orb corresponding to this status write. */
	spin_lock_irqsave(&card->lock, flags);
393
	list_for_each_entry(orb, &lu->orb_list, link) {
394
		if (STATUS_GET_ORB_HIGH(status) == 0 &&
395 396
		    STATUS_GET_ORB_LOW(status) == orb->request_bus) {
			orb->rcode = RCODE_COMPLETE;
397 398 399 400 401 402
			list_del(&orb->link);
			break;
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);

403
	if (&orb->link != &lu->orb_list)
404 405 406 407
		orb->callback(orb, &status);
	else
		fw_error("status write for unknown orb\n");

408 409
	kref_put(&orb->kref, free_orb);

410 411 412 413 414 415 416 417 418 419
	fw_send_response(card, request, RCODE_COMPLETE);
}

static void
complete_transaction(struct fw_card *card, int rcode,
		     void *payload, size_t length, void *data)
{
	struct sbp2_orb *orb = data;
	unsigned long flags;

420 421 422 423 424 425 426 427 428 429 430 431 432 433
	/*
	 * This is a little tricky.  We can get the status write for
	 * the orb before we get this callback.  The status write
	 * handler above will assume the orb pointer transaction was
	 * successful and set the rcode to RCODE_COMPLETE for the orb.
	 * So this callback only sets the rcode if it hasn't already
	 * been set and only does the cleanup if the transaction
	 * failed and we didn't already get a status write.
	 */
	spin_lock_irqsave(&card->lock, flags);

	if (orb->rcode == -1)
		orb->rcode = rcode;
	if (orb->rcode != RCODE_COMPLETE) {
434
		list_del(&orb->link);
435
		spin_unlock_irqrestore(&card->lock, flags);
436
		orb->callback(orb, NULL);
437 438
	} else {
		spin_unlock_irqrestore(&card->lock, flags);
439
	}
440 441

	kref_put(&orb->kref, free_orb);
442 443 444
}

static void
445
sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
446 447
	      int node_id, int generation, u64 offset)
{
448
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
449 450 451 452
	unsigned long flags;

	orb->pointer.high = 0;
	orb->pointer.low = orb->request_bus;
453
	fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
454 455

	spin_lock_irqsave(&device->card->lock, flags);
456
	list_add_tail(&orb->link, &lu->orb_list);
457 458
	spin_unlock_irqrestore(&device->card->lock, flags);

459 460 461 462
	/* Take a ref for the orb list and for the transaction callback. */
	kref_get(&orb->kref);
	kref_get(&orb->kref);

463
	fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
464
			node_id, generation, device->max_speed, offset,
465
			&orb->pointer, sizeof(orb->pointer),
466 467 468
			complete_transaction, orb);
}

469
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
470
{
471
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
472 473 474
	struct sbp2_orb *orb, *next;
	struct list_head list;
	unsigned long flags;
475
	int retval = -ENOENT;
476 477 478

	INIT_LIST_HEAD(&list);
	spin_lock_irqsave(&device->card->lock, flags);
479
	list_splice_init(&lu->orb_list, &list);
480 481 482
	spin_unlock_irqrestore(&device->card->lock, flags);

	list_for_each_entry_safe(orb, next, &list, link) {
483
		retval = 0;
484 485 486
		if (fw_cancel_transaction(device->card, &orb->t) == 0)
			continue;

487 488 489 490
		orb->rcode = RCODE_CANCELLED;
		orb->callback(orb, NULL);
	}

491
	return retval;
492 493
}

494 495 496 497
static void
complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{
	struct sbp2_management_orb *orb =
498
		container_of(base_orb, struct sbp2_management_orb, base);
499 500

	if (status)
501
		memcpy(&orb->status, status, sizeof(*status));
502 503 504 505
	complete(&orb->done);
}

static int
506 507 508
sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
			 int generation, int function, int lun_or_login_id,
			 void *response)
509
{
510
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
511
	struct sbp2_management_orb *orb;
512
	unsigned int timeout;
513 514
	int retval = -ENOMEM;

515 516 517
	if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
		return 0;

518
	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
519 520 521
	if (orb == NULL)
		return -ENOMEM;

522
	kref_init(&orb->base.kref);
523 524
	orb->response_bus =
		dma_map_single(device->card->device, &orb->response,
525
			       sizeof(orb->response), DMA_FROM_DEVICE);
526
	if (dma_mapping_error(orb->response_bus))
527
		goto fail_mapping_response;
528 529 530 531 532

	orb->request.response.high    = 0;
	orb->request.response.low     = orb->response_bus;

	orb->request.misc =
533 534
		MANAGEMENT_ORB_NOTIFY |
		MANAGEMENT_ORB_FUNCTION(function) |
535
		MANAGEMENT_ORB_LUN(lun_or_login_id);
536
	orb->request.length =
537
		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
538

539 540
	orb->request.status_fifo.high = lu->address_handler.offset >> 32;
	orb->request.status_fifo.low  = lu->address_handler.offset;
541 542

	if (function == SBP2_LOGIN_REQUEST) {
543
		/* Ask for 2^2 == 4 seconds reconnect grace period */
544
		orb->request.misc |=
545 546
			MANAGEMENT_ORB_RECONNECT(2) |
			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login);
547
		timeout = lu->tgt->mgt_orb_timeout;
548 549
	} else {
		timeout = SBP2_ORB_TIMEOUT;
550 551
	}

552
	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
553 554 555

	init_completion(&orb->done);
	orb->base.callback = complete_management_orb;
556

557 558 559 560 561 562
	orb->base.request_bus =
		dma_map_single(device->card->device, &orb->request,
			       sizeof(orb->request), DMA_TO_DEVICE);
	if (dma_mapping_error(orb->base.request_bus))
		goto fail_mapping_request;

563 564
	sbp2_send_orb(&orb->base, lu, node_id, generation,
		      lu->tgt->management_agent_address);
565

566
	wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
567 568

	retval = -EIO;
569
	if (sbp2_cancel_orbs(lu) == 0) {
570 571
		fw_error("%s: orb reply timed out, rcode=0x%02x\n",
			 lu->tgt->bus_id, orb->base.rcode);
572 573 574
		goto out;
	}

575
	if (orb->base.rcode != RCODE_COMPLETE) {
576 577
		fw_error("%s: management write failed, rcode 0x%02x\n",
			 lu->tgt->bus_id, orb->base.rcode);
578 579 580
		goto out;
	}

581 582
	if (STATUS_GET_RESPONSE(orb->status) != 0 ||
	    STATUS_GET_SBP_STATUS(orb->status) != 0) {
583
		fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
584 585
			 STATUS_GET_RESPONSE(orb->status),
			 STATUS_GET_SBP_STATUS(orb->status));
586 587 588 589 590 591
		goto out;
	}

	retval = 0;
 out:
	dma_unmap_single(device->card->device, orb->base.request_bus,
592
			 sizeof(orb->request), DMA_TO_DEVICE);
593
 fail_mapping_request:
594
	dma_unmap_single(device->card->device, orb->response_bus,
595
			 sizeof(orb->response), DMA_FROM_DEVICE);
596
 fail_mapping_response:
597 598
	if (response)
		fw_memcpy_from_be32(response,
599
				    orb->response, sizeof(orb->response));
600
	kref_put(&orb->base.kref, free_orb);
601 602 603 604 605 606

	return retval;
}

static void
complete_agent_reset_write(struct fw_card *card, int rcode,
607
			   void *payload, size_t length, void *done)
608
{
609 610 611 612 613 614 615 616 617
	complete(done);
}

static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
{
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
	DECLARE_COMPLETION_ONSTACK(done);
	struct fw_transaction t;
	static u32 z;
618

619 620 621 622 623
	fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
			lu->tgt->node_id, lu->generation, device->max_speed,
			lu->command_block_agent_address + SBP2_AGENT_RESET,
			&z, sizeof(z), complete_agent_reset_write, &done);
	wait_for_completion(&done);
624 625
}

626 627 628 629 630 631 632 633
static void
complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
				   void *payload, size_t length, void *data)
{
	kfree(data);
}

static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
634
{
635
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
636
	struct fw_transaction *t;
637
	static u32 z;
638

639
	t = kmalloc(sizeof(*t), GFP_ATOMIC);
640
	if (t == NULL)
641
		return;
642 643

	fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
644 645
			lu->tgt->node_id, lu->generation, device->max_speed,
			lu->command_block_agent_address + SBP2_AGENT_RESET,
646
			&z, sizeof(z), complete_agent_reset_write_no_wait, t);
647 648
}

649
static void sbp2_release_target(struct kref *kref)
650
{
651 652 653 654 655 656 657 658 659
	struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
	struct sbp2_logical_unit *lu, *next;
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);

	list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
		if (lu->sdev)
			scsi_remove_device(lu->sdev);

660 661
		sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
662

663 664 665 666 667
		fw_core_remove_address_handler(&lu->address_handler);
		list_del(&lu->link);
		kfree(lu);
	}
	scsi_remove_host(shost);
668
	fw_notify("released %s\n", tgt->bus_id);
669 670 671

	put_device(&tgt->unit->device);
	scsi_host_put(shost);
672 673
}

674 675
static struct workqueue_struct *sbp2_wq;

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/*
 * Always get the target's kref when scheduling work on one its units.
 * Each workqueue job is responsible to call sbp2_target_put() upon return.
 */
static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
{
	if (queue_delayed_work(sbp2_wq, &lu->work, delay))
		kref_get(&lu->tgt->kref);
}

static void sbp2_target_put(struct sbp2_target *tgt)
{
	kref_put(&tgt->kref, sbp2_release_target);
}

691 692
static void sbp2_reconnect(struct work_struct *work);

693 694
static void sbp2_login(struct work_struct *work)
{
695 696
	struct sbp2_logical_unit *lu =
		container_of(work, struct sbp2_logical_unit, work.work);
697 698 699
	struct sbp2_target *tgt = lu->tgt;
	struct fw_device *device = fw_device(tgt->unit->device.parent);
	struct Scsi_Host *shost;
700 701
	struct scsi_device *sdev;
	struct scsi_lun eight_bytes_lun;
702
	struct sbp2_login_response response;
703
	int generation, node_id, local_node_id;
704

705 706 707
	if (fw_device_is_shutdown(device))
		goto out;

708
	generation    = device->generation;
709
	smp_rmb();    /* node_id must not be older than generation */
710 711
	node_id       = device->node_id;
	local_node_id = device->card->node_id;
712

713 714
	if (sbp2_send_management_orb(lu, node_id, generation,
				SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
715 716 717
		if (lu->retries++ < 5)
			sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
		else
718 719
			fw_error("%s: failed to login to LUN %04x\n",
				 tgt->bus_id, lu->lun);
720
		goto out;
721 722
	}

723 724 725
	lu->generation    = generation;
	tgt->node_id	  = node_id;
	tgt->address_high = local_node_id << 16;
726 727

	/* Get command block agent offset and login id. */
728
	lu->command_block_agent_address =
729
		((u64) (response.command_block_agent.high & 0xffff) << 32) |
730
		response.command_block_agent.low;
731
	lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
732

733 734
	fw_notify("%s: logged in to LUN %04x (%d retries)\n",
		  tgt->bus_id, lu->lun, lu->retries);
735 736 737 738 739 740

#if 0
	/* FIXME: The linux1394 sbp2 does this last step. */
	sbp2_set_busy_timeout(scsi_id);
#endif

741 742 743
	PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
	sbp2_agent_reset(lu);

744 745 746 747 748 749
	/* This was a re-login. */
	if (lu->sdev) {
		sbp2_cancel_orbs(lu);
		goto out;
	}

750 751 752
	if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
		ssleep(SBP2_INQUIRY_DELAY);

753 754 755
	memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
	eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff;
	eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff;
756
	shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
757

758 759 760
	sdev = __scsi_add_device(shost, 0, 0,
				 scsilun_to_int(&eight_bytes_lun), lu);
	if (IS_ERR(sdev)) {
761 762 763 764 765
		smp_rmb(); /* generation may have changed */
		generation = device->generation;
		smp_rmb(); /* node_id must not be older than generation */

		sbp2_send_management_orb(lu, device->node_id, generation,
766
				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
767 768 769 770
		/*
		 * Set this back to sbp2_login so we fall back and
		 * retry login on bus reset.
		 */
771 772 773 774
		PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
	} else {
		lu->sdev = sdev;
		scsi_device_put(sdev);
775
	}
776
 out:
777
	sbp2_target_put(tgt);
778
}
779

780
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
781
{
782
	struct sbp2_logical_unit *lu;
783

784 785 786
	lu = kmalloc(sizeof(*lu), GFP_KERNEL);
	if (!lu)
		return -ENOMEM;
787

788 789 790
	lu->address_handler.length           = 0x100;
	lu->address_handler.address_callback = sbp2_status_write;
	lu->address_handler.callback_data    = lu;
791

792 793 794 795 796
	if (fw_core_add_address_handler(&lu->address_handler,
					&fw_high_memory_region) < 0) {
		kfree(lu);
		return -ENOMEM;
	}
797

798 799 800 801 802 803
	lu->tgt  = tgt;
	lu->sdev = NULL;
	lu->lun  = lun_entry & 0xffff;
	lu->retries = 0;
	INIT_LIST_HEAD(&lu->orb_list);
	INIT_DELAYED_WORK(&lu->work, sbp2_login);
804

805 806 807
	list_add_tail(&lu->link, &tgt->lu_list);
	return 0;
}
808

809 810 811 812
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
{
	struct fw_csr_iterator ci;
	int key, value;
813

814 815 816 817 818 819 820 821 822 823 824 825 826
	fw_csr_iterator_init(&ci, directory);
	while (fw_csr_iterator_next(&ci, &key, &value))
		if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
		    sbp2_add_logical_unit(tgt, value) < 0)
			return -ENOMEM;
	return 0;
}

static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
			      u32 *model, u32 *firmware_revision)
{
	struct fw_csr_iterator ci;
	int key, value;
827
	unsigned int timeout;
828 829

	fw_csr_iterator_init(&ci, directory);
830 831
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		switch (key) {
832

833
		case CSR_DEPENDENT_INFO | CSR_OFFSET:
834 835
			tgt->management_agent_address =
					CSR_REGISTER_BASE + 4 * value;
836
			break;
837 838 839

		case CSR_DIRECTORY_ID:
			tgt->directory_id = value;
840
			break;
841

842
		case CSR_MODEL:
843 844 845 846 847 848 849
			*model = value;
			break;

		case SBP2_CSR_FIRMWARE_REVISION:
			*firmware_revision = value;
			break;

850 851 852 853 854 855 856 857 858 859
		case SBP2_CSR_UNIT_CHARACTERISTICS:
			/* the timeout value is stored in 500ms units */
			timeout = ((unsigned int) value >> 8 & 0xff) * 500;
			timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
			tgt->mgt_orb_timeout =
				  min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);

			if (timeout > tgt->mgt_orb_timeout)
				fw_notify("%s: config rom contains %ds "
					  "management ORB timeout, limiting "
860
					  "to %ds\n", tgt->bus_id,
861 862 863 864
					  timeout / 1000,
					  tgt->mgt_orb_timeout / 1000);
			break;

865 866 867 868 869 870 871 872
		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
			if (sbp2_add_logical_unit(tgt, value) < 0)
				return -ENOMEM;
			break;

		case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
			if (sbp2_scan_logical_unit_dir(tgt, ci.p + value) < 0)
				return -ENOMEM;
873 874 875
			break;
		}
	}
876 877 878 879 880 881 882
	return 0;
}

static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
				  u32 firmware_revision)
{
	int i;
883
	unsigned int w = sbp2_param_workarounds;
884 885 886 887

	if (w)
		fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
			  "if you need the workarounds parameter for %s\n",
888
			  tgt->bus_id);
889

890 891
	if (w & SBP2_WORKAROUND_OVERRIDE)
		goto out;
892 893

	for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
894

895 896 897
		if (sbp2_workarounds_table[i].firmware_revision !=
		    (firmware_revision & 0xffffff00))
			continue;
898

899 900 901
		if (sbp2_workarounds_table[i].model != model &&
		    sbp2_workarounds_table[i].model != ~0)
			continue;
902

903
		w |= sbp2_workarounds_table[i].workarounds;
904 905
		break;
	}
906 907
 out:
	if (w)
908
		fw_notify("Workarounds for %s: 0x%x "
909
			  "(firmware_revision 0x%06x, model_id 0x%06x)\n",
910
			  tgt->bus_id, w, firmware_revision, model);
911
	tgt->workarounds = w;
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
}

static struct scsi_host_template scsi_driver_template;

static int sbp2_probe(struct device *dev)
{
	struct fw_unit *unit = fw_unit(dev);
	struct fw_device *device = fw_device(unit->device.parent);
	struct sbp2_target *tgt;
	struct sbp2_logical_unit *lu;
	struct Scsi_Host *shost;
	u32 model, firmware_revision;

	shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
	if (shost == NULL)
		return -ENOMEM;

	tgt = (struct sbp2_target *)shost->hostdata;
	unit->device.driver_data = tgt;
	tgt->unit = unit;
	kref_init(&tgt->kref);
	INIT_LIST_HEAD(&tgt->lu_list);
934
	tgt->bus_id = unit->device.bus_id;
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954

	if (fw_device_enable_phys_dma(device) < 0)
		goto fail_shost_put;

	if (scsi_add_host(shost, &unit->device) < 0)
		goto fail_shost_put;

	/* Initialize to values that won't match anything in our table. */
	firmware_revision = 0xff000000;
	model = 0xff000000;

	/* implicit directory ID */
	tgt->directory_id = ((unit->directory - device->config_rom) * 4
			     + CSR_CONFIG_ROM) & 0xffffff;

	if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
			       &firmware_revision) < 0)
		goto fail_tgt_put;

	sbp2_init_workarounds(tgt, model, firmware_revision);
955

956 957
	get_device(&unit->device);

958
	/* Do the login in a workqueue so we can easily reschedule retries. */
959
	list_for_each_entry(lu, &tgt->lu_list, link)
960
		sbp2_queue_work(lu, 0);
961
	return 0;
962

963
 fail_tgt_put:
964
	sbp2_target_put(tgt);
965 966 967 968 969
	return -ENOMEM;

 fail_shost_put:
	scsi_host_put(shost);
	return -ENOMEM;
970 971 972 973 974
}

static int sbp2_remove(struct device *dev)
{
	struct fw_unit *unit = fw_unit(dev);
975
	struct sbp2_target *tgt = unit->device.driver_data;
976

977
	sbp2_target_put(tgt);
978 979 980 981 982
	return 0;
}

static void sbp2_reconnect(struct work_struct *work)
{
983 984
	struct sbp2_logical_unit *lu =
		container_of(work, struct sbp2_logical_unit, work.work);
985 986
	struct sbp2_target *tgt = lu->tgt;
	struct fw_device *device = fw_device(tgt->unit->device.parent);
987 988
	int generation, node_id, local_node_id;

989 990 991
	if (fw_device_is_shutdown(device))
		goto out;

992
	generation    = device->generation;
993
	smp_rmb();    /* node_id must not be older than generation */
994 995
	node_id       = device->node_id;
	local_node_id = device->card->node_id;
996

997
	if (sbp2_send_management_orb(lu, node_id, generation,
998
				     SBP2_RECONNECT_REQUEST,
999 1000
				     lu->login_id, NULL) < 0) {
		if (lu->retries++ >= 5) {
1001
			fw_error("%s: failed to reconnect\n", tgt->bus_id);
1002
			/* Fall back and try to log in again. */
1003 1004
			lu->retries = 0;
			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1005
		}
1006 1007
		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
		goto out;
1008
	}
1009

1010 1011 1012
	lu->generation    = generation;
	tgt->node_id      = node_id;
	tgt->address_high = local_node_id << 16;
1013

1014 1015
	fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
		  tgt->bus_id, lu->lun, lu->retries);
1016 1017 1018

	sbp2_agent_reset(lu);
	sbp2_cancel_orbs(lu);
1019
 out:
1020
	sbp2_target_put(tgt);
1021 1022 1023 1024
}

static void sbp2_update(struct fw_unit *unit)
{
1025 1026
	struct sbp2_target *tgt = unit->device.driver_data;
	struct sbp2_logical_unit *lu;
1027

1028 1029 1030 1031 1032 1033 1034 1035
	fw_device_enable_phys_dma(fw_device(unit->device.parent));

	/*
	 * Fw-core serializes sbp2_update() against sbp2_remove().
	 * Iteration over tgt->lu_list is therefore safe here.
	 */
	list_for_each_entry(lu, &tgt->lu_list, link) {
		lu->retries = 0;
1036
		sbp2_queue_work(lu, 0);
1037
	}
1038 1039 1040 1041 1042
}

#define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e
#define SBP2_SW_VERSION_ENTRY	0x00010483

1043
static const struct fw_device_id sbp2_id_table[] = {
1044 1045 1046
	{
		.match_flags  = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
1047
		.version      = SBP2_SW_VERSION_ENTRY,
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	},
	{ }
};

static struct fw_driver sbp2_driver = {
	.driver   = {
		.owner  = THIS_MODULE,
		.name   = sbp2_driver_name,
		.bus    = &fw_bus_type,
		.probe  = sbp2_probe,
		.remove = sbp2_remove,
	},
	.update   = sbp2_update,
	.id_table = sbp2_id_table,
};

1064 1065
static unsigned int
sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1066
{
1067 1068
	int sam_status;

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	sense_data[0] = 0x70;
	sense_data[1] = 0x0;
	sense_data[2] = sbp2_status[1];
	sense_data[3] = sbp2_status[4];
	sense_data[4] = sbp2_status[5];
	sense_data[5] = sbp2_status[6];
	sense_data[6] = sbp2_status[7];
	sense_data[7] = 10;
	sense_data[8] = sbp2_status[8];
	sense_data[9] = sbp2_status[9];
	sense_data[10] = sbp2_status[10];
	sense_data[11] = sbp2_status[11];
	sense_data[12] = sbp2_status[2];
	sense_data[13] = sbp2_status[3];
	sense_data[14] = sbp2_status[12];
	sense_data[15] = sbp2_status[13];

1086
	sam_status = sbp2_status[0] & 0x3f;
1087

1088 1089
	switch (sam_status) {
	case SAM_STAT_GOOD:
1090 1091
	case SAM_STAT_CHECK_CONDITION:
	case SAM_STAT_CONDITION_MET:
1092
	case SAM_STAT_BUSY:
1093 1094
	case SAM_STAT_RESERVATION_CONFLICT:
	case SAM_STAT_COMMAND_TERMINATED:
1095 1096
		return DID_OK << 16 | sam_status;

1097
	default:
1098
		return DID_ERROR << 16;
1099 1100 1101 1102 1103 1104
	}
}

static void
complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{
1105 1106
	struct sbp2_command_orb *orb =
		container_of(base_orb, struct sbp2_command_orb, base);
1107
	struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
1108 1109 1110
	int result;

	if (status != NULL) {
1111
		if (STATUS_GET_DEAD(*status))
1112
			sbp2_agent_reset_no_wait(orb->lu);
1113

1114
		switch (STATUS_GET_RESPONSE(*status)) {
1115
		case SBP2_STATUS_REQUEST_COMPLETE:
1116
			result = DID_OK << 16;
1117 1118
			break;
		case SBP2_STATUS_TRANSPORT_FAILURE:
1119
			result = DID_BUS_BUSY << 16;
1120 1121 1122 1123
			break;
		case SBP2_STATUS_ILLEGAL_REQUEST:
		case SBP2_STATUS_VENDOR_DEPENDENT:
		default:
1124
			result = DID_ERROR << 16;
1125 1126 1127
			break;
		}

1128 1129
		if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
			result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
1130 1131
							   orb->cmd->sense_buffer);
	} else {
1132 1133
		/*
		 * If the orb completes with status == NULL, something
1134
		 * went wrong, typically a bus reset happened mid-orb
1135 1136
		 * or when sending the write (less likely).
		 */
1137
		result = DID_BUS_BUSY << 16;
1138 1139 1140
	}

	dma_unmap_single(device->card->device, orb->base.request_bus,
1141
			 sizeof(orb->request), DMA_TO_DEVICE);
1142

1143 1144 1145
	if (scsi_sg_count(orb->cmd) > 0)
		dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
			     scsi_sg_count(orb->cmd),
1146 1147 1148 1149
			     orb->cmd->sc_data_direction);

	if (orb->page_table_bus != 0)
		dma_unmap_single(device->card->device, orb->page_table_bus,
1150
				 sizeof(orb->page_table), DMA_TO_DEVICE);
1151

1152
	orb->cmd->result = result;
1153 1154 1155
	orb->done(orb->cmd);
}

1156 1157 1158
static int
sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
		     struct sbp2_logical_unit *lu)
1159 1160 1161 1162 1163
{
	struct scatterlist *sg;
	int sg_len, l, i, j, count;
	dma_addr_t sg_addr;

1164 1165
	sg = scsi_sglist(orb->cmd);
	count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
1166
			   orb->cmd->sc_data_direction);
1167 1168
	if (count == 0)
		goto fail;
1169

1170 1171
	/*
	 * Handle the special case where there is only one element in
1172 1173 1174
	 * the scatter list by converting it to an immediate block
	 * request. This is also a workaround for broken devices such
	 * as the second generation iPod which doesn't support page
1175 1176
	 * tables.
	 */
1177
	if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
1178
		orb->request.data_descriptor.high = lu->tgt->address_high;
1179
		orb->request.data_descriptor.low  = sg_dma_address(sg);
1180
		orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
1181
		return 0;
1182 1183
	}

1184 1185
	/*
	 * Convert the scatterlist to an sbp2 page table.  If any
1186 1187 1188 1189
	 * scatterlist entries are too big for sbp2, we split them as we
	 * go.  Even if we ask the block I/O layer to not give us sg
	 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
	 * during DMA mapping, and Linux currently doesn't prevent this.
1190
	 */
1191 1192 1193
	for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
		sg_len = sg_dma_len(sg);
		sg_addr = sg_dma_address(sg);
1194
		while (sg_len) {
1195 1196 1197 1198 1199
			/* FIXME: This won't get us out of the pinch. */
			if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
				fw_error("page table overflow\n");
				goto fail_page_table;
			}
1200 1201 1202 1203 1204 1205 1206 1207 1208
			l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
			orb->page_table[j].low = sg_addr;
			orb->page_table[j].high = (l << 16);
			sg_addr += l;
			sg_len -= l;
			j++;
		}
	}

1209 1210 1211 1212 1213 1214 1215
	fw_memcpy_to_be32(orb->page_table, orb->page_table,
			  sizeof(orb->page_table[0]) * j);
	orb->page_table_bus =
		dma_map_single(device->card->device, orb->page_table,
			       sizeof(orb->page_table), DMA_TO_DEVICE);
	if (dma_mapping_error(orb->page_table_bus))
		goto fail_page_table;
1216

1217 1218
	/*
	 * The data_descriptor pointer is the one case where we need
1219 1220 1221
	 * to fill in the node ID part of the address.  All other
	 * pointers assume that the data referenced reside on the
	 * initiator (i.e. us), but data_descriptor can refer to data
1222 1223
	 * on other nodes so we need to put our ID in descriptor.high.
	 */
1224
	orb->request.data_descriptor.high = lu->tgt->address_high;
1225 1226
	orb->request.data_descriptor.low  = orb->page_table_bus;
	orb->request.misc |=
1227 1228
		COMMAND_ORB_PAGE_TABLE_PRESENT |
		COMMAND_ORB_DATA_SIZE(j);
1229

1230 1231 1232
	return 0;

 fail_page_table:
1233
	dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
1234 1235 1236
		     orb->cmd->sc_data_direction);
 fail:
	return -ENOMEM;
1237 1238 1239 1240 1241 1242
}

/* SCSI stack integration */

static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
{
1243 1244
	struct sbp2_logical_unit *lu = cmd->device->hostdata;
	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
1245
	struct sbp2_command_orb *orb;
1246
	unsigned int max_payload;
1247
	int retval = SCSI_MLQUEUE_HOST_BUSY;
1248

1249 1250 1251 1252
	/*
	 * Bidirectional commands are not yet implemented, and unknown
	 * transfer direction not handled.
	 */
1253
	if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1254
		fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
1255 1256 1257
		cmd->result = DID_ERROR << 16;
		done(cmd);
		return 0;
1258 1259
	}

1260
	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1261 1262
	if (orb == NULL) {
		fw_notify("failed to alloc orb\n");
1263
		return SCSI_MLQUEUE_HOST_BUSY;
1264 1265
	}

1266 1267
	/* Initialize rcode to something not RCODE_COMPLETE. */
	orb->base.rcode = -1;
1268
	kref_init(&orb->base.kref);
1269

1270
	orb->lu   = lu;
1271 1272 1273 1274 1275
	orb->done = done;
	orb->cmd  = cmd;

	orb->request.next.high   = SBP2_ORB_NULL;
	orb->request.next.low    = 0x0;
1276 1277
	/*
	 * At speed 100 we can do 512 bytes per packet, at speed 200,
1278 1279
	 * 1024 bytes per packet etc.  The SBP-2 max_payload field
	 * specifies the max payload size as 2 ^ (max_payload + 2), so
1280 1281
	 * if we set this to max_speed + 7, we get the right value.
	 */
1282 1283
	max_payload = min(device->max_speed + 7,
			  device->card->max_receive - 1);
1284
	orb->request.misc =
1285
		COMMAND_ORB_MAX_PAYLOAD(max_payload) |
1286
		COMMAND_ORB_SPEED(device->max_speed) |
1287
		COMMAND_ORB_NOTIFY;
1288 1289 1290

	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
		orb->request.misc |=
1291
			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
1292 1293
	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
		orb->request.misc |=
1294
			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1295

1296 1297
	if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
		goto out;
1298

1299
	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1300 1301

	memset(orb->request.command_block,
1302
	       0, sizeof(orb->request.command_block));
1303 1304 1305
	memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));

	orb->base.callback = complete_command_orb;
1306 1307 1308 1309
	orb->base.request_bus =
		dma_map_single(device->card->device, &orb->request,
			       sizeof(orb->request), DMA_TO_DEVICE);
	if (dma_mapping_error(orb->base.request_bus))
1310
		goto out;
1311

1312 1313 1314 1315
	sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
		      lu->command_block_agent_address + SBP2_ORB_POINTER);
	retval = 0;
 out:
1316
	kref_put(&orb->base.kref, free_orb);
1317
	return retval;
1318 1319
}

1320 1321
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{
1322
	struct sbp2_logical_unit *lu = sdev->hostdata;
1323 1324 1325

	sdev->allow_restart = 1;

1326 1327 1328 1329 1330 1331
	/*
	 * Update the dma alignment (minimum alignment requirements for
	 * start and end of DMA transfers) to be a sector
	 */
	blk_queue_update_dma_alignment(sdev->request_queue, 511);

1332
	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1333
		sdev->inquiry_len = 36;
1334

1335 1336 1337
	return 0;
}

1338 1339
static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
{
1340
	struct sbp2_logical_unit *lu = sdev->hostdata;
1341

1342 1343 1344 1345
	sdev->use_10_for_rw = 1;

	if (sdev->type == TYPE_ROM)
		sdev->use_10_for_ms = 1;
1346

1347
	if (sdev->type == TYPE_DISK &&
1348
	    lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1349
		sdev->skip_ms_page_8 = 1;
1350 1351

	if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
1352
		sdev->fix_capacity = 1;
1353 1354

	if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1355
		blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
1356

1357 1358 1359 1360 1361 1362 1363 1364 1365
	return 0;
}

/*
 * Called by scsi stack when something has really gone wrong.  Usually
 * called when a command has timed-out for some reason.
 */
static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
{
1366
	struct sbp2_logical_unit *lu = cmd->device->hostdata;
1367

1368
	fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
1369 1370
	sbp2_agent_reset(lu);
	sbp2_cancel_orbs(lu);
1371 1372 1373 1374

	return SUCCESS;
}

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
/*
 * Format of /sys/bus/scsi/devices/.../ieee1394_id:
 * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
 *
 * This is the concatenation of target port identifier and logical unit
 * identifier as per SAM-2...SAM-4 annex A.
 */
static ssize_t
sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
			    char *buf)
{
	struct scsi_device *sdev = to_scsi_device(dev);
1387
	struct sbp2_logical_unit *lu;
1388 1389 1390 1391 1392
	struct fw_device *device;

	if (!sdev)
		return 0;

1393 1394
	lu = sdev->hostdata;
	device = fw_device(lu->tgt->unit->device.parent);
1395 1396 1397

	return sprintf(buf, "%08x%08x:%06x:%04x\n",
			device->config_rom[3], device->config_rom[4],
1398
			lu->tgt->directory_id, lu->lun);
1399 1400 1401 1402 1403 1404 1405 1406 1407
}

static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);

static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
	&dev_attr_ieee1394_id,
	NULL
};

1408 1409 1410
static struct scsi_host_template scsi_driver_template = {
	.module			= THIS_MODULE,
	.name			= "SBP-2 IEEE-1394",
1411
	.proc_name		= sbp2_driver_name,
1412
	.queuecommand		= sbp2_scsi_queuecommand,
1413
	.slave_alloc		= sbp2_scsi_slave_alloc,
1414 1415 1416 1417 1418
	.slave_configure	= sbp2_scsi_slave_configure,
	.eh_abort_handler	= sbp2_scsi_abort,
	.this_id		= -1,
	.sg_tablesize		= SG_ALL,
	.use_clustering		= ENABLE_CLUSTERING,
1419 1420
	.cmd_per_lun		= 1,
	.can_queue		= 1,
1421
	.sdev_attrs		= sbp2_scsi_sysfs_attrs,
1422 1423 1424 1425 1426 1427 1428
};

MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("SCSI over IEEE1394");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);

1429 1430 1431 1432 1433
/* Provide a module alias so root-on-sbp2 initrds don't break. */
#ifndef CONFIG_IEEE1394_SBP2_MODULE
MODULE_ALIAS("sbp2");
#endif

1434 1435
static int __init sbp2_init(void)
{
1436 1437 1438 1439
	sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
	if (!sbp2_wq)
		return -ENOMEM;

1440 1441 1442 1443 1444 1445
	return driver_register(&sbp2_driver.driver);
}

static void __exit sbp2_cleanup(void)
{
	driver_unregister(&sbp2_driver.driver);
1446
	destroy_workqueue(sbp2_wq);
1447 1448 1449 1450
}

module_init(sbp2_init);
module_exit(sbp2_cleanup);