sbp2.c 46.9 KB
Newer Older
1 2
/*
 * SBP2 driver (SCSI over IEEE1394)
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21 22
/*
 * The basic structure of this driver is based on the old storage driver,
23 24 25 26 27 28 29 30
 * drivers/ieee1394/sbp2.c, originally written by
 *     James Goodwin <jamesg@filanet.com>
 * with later contributions and ongoing maintenance from
 *     Ben Collins <bcollins@debian.org>,
 *     Stefan Richter <stefanr@s5r6.in-berlin.de>
 * and many others.
 */

31
#include <linux/blkdev.h>
32
#include <linux/bug.h>
S
Stefan Richter 已提交
33
#include <linux/completion.h>
34 35 36
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
37
#include <linux/firewire.h>
S
Stefan Richter 已提交
38 39 40
#include <linux/firewire-constants.h>
#include <linux/init.h>
#include <linux/jiffies.h>
41
#include <linux/kernel.h>
S
Stefan Richter 已提交
42 43
#include <linux/kref.h>
#include <linux/list.h>
44
#include <linux/mod_devicetable.h>
45
#include <linux/module.h>
46
#include <linux/moduleparam.h>
A
Andrew Morton 已提交
47
#include <linux/scatterlist.h>
S
Stefan Richter 已提交
48 49
#include <linux/slab.h>
#include <linux/spinlock.h>
50
#include <linux/string.h>
51
#include <linux/stringify.h>
52
#include <linux/workqueue.h>
S
Stefan Richter 已提交
53 54

#include <asm/byteorder.h>
55
#include <asm/system.h>
56 57 58 59 60 61

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>

62 63 64 65 66 67 68 69 70 71 72 73
/*
 * So far only bridges from Oxford Semiconductor are known to support
 * concurrent logins. Depending on firmware, four or two concurrent logins
 * are possible on OXFW911 and newer Oxsemi bridges.
 *
 * Concurrent logins are useful together with cluster filesystems.
 */
static int sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
		 "(default = Y, use N for concurrent initiators)");

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * Flags for firmware oddities
 *
 * - 128kB max transfer
 *   Limit transfer size. Necessary for some old bridges.
 *
 * - 36 byte inquiry
 *   When scsi_mod probes the device, let the inquiry command look like that
 *   from MS Windows.
 *
 * - skip mode page 8
 *   Suppress sending of mode_sense for mode page 8 if the device pretends to
 *   support the SCSI Primary Block commands instead of Reduced Block Commands.
 *
 * - fix capacity
 *   Tell sd_mod to correct the last sector number reported by read_capacity.
 *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
 *   Don't use this with devices which don't have this bug.
 *
93 94 95
 * - delay inquiry
 *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
 *
96 97 98 99 100
 * - power condition
 *   Set the power condition field in the START STOP UNIT commands sent by
 *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
 *   Some disks need this to spin down or to resume properly.
 *
101 102 103 104 105 106 107 108 109
 * - override internal blacklist
 *   Instead of adding to the built-in blacklist, use only the workarounds
 *   specified in the module load parameter.
 *   Useful if a blacklist entry interfered with a non-broken device.
 */
#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
#define SBP2_WORKAROUND_INQUIRY_36	0x2
#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
110 111
#define SBP2_WORKAROUND_DELAY_INQUIRY	0x10
#define SBP2_INQUIRY_DELAY		12
112
#define SBP2_WORKAROUND_POWER_CONDITION	0x20
113 114 115 116 117 118 119 120 121
#define SBP2_WORKAROUND_OVERRIDE	0x100

static int sbp2_param_workarounds;
module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
	", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
	", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
	", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
	", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
122
	", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
123 124
	", set power condition in start stop unit = "
				  __stringify(SBP2_WORKAROUND_POWER_CONDITION)
125 126 127
	", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
	", or a combination)");

128 129
static const char sbp2_driver_name[] = "sbp2";

130 131 132 133 134 135 136
/*
 * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
 * and one struct scsi_device per sbp2_logical_unit.
 */
struct sbp2_logical_unit {
	struct sbp2_target *tgt;
	struct list_head link;
137 138
	struct fw_address_handler address_handler;
	struct list_head orb_list;
139

140
	u64 command_block_agent_address;
141
	u16 lun;
142 143
	int login_id;

144
	/*
145 146 147 148
	 * The generation is updated once we've logged in or reconnected
	 * to the logical unit.  Thus, I/O to the device will automatically
	 * fail and get retried if it happens in a window where the device
	 * is not ready, e.g. after a bus reset but before we reconnect.
149
	 */
150
	int generation;
151 152
	int retries;
	struct delayed_work work;
153
	bool has_sdev;
154
	bool blocked;
155 156
};

157 158 159 160 161
static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
{
	queue_delayed_work(fw_workqueue, &lu->work, delay);
}

162 163 164 165 166 167
/*
 * We create one struct sbp2_target per IEEE 1212 Unit Directory
 * and one struct Scsi_Host per sbp2_target.
 */
struct sbp2_target {
	struct fw_unit *unit;
168
	const char *bus_id;
169
	struct list_head lu_list;
170 171

	u64 management_agent_address;
172
	u64 guid;
173 174 175
	int directory_id;
	int node_id;
	int address_high;
176
	unsigned int workarounds;
177
	unsigned int mgt_orb_timeout;
178
	unsigned int max_payload;
179 180 181

	int dont_block;	/* counter for each logical unit */
	int blocked;	/* ditto */
182 183
};

184 185 186 187 188
static struct fw_device *target_device(struct sbp2_target *tgt)
{
	return fw_parent_device(tgt->unit);
}

J
Jay Fenlason 已提交
189 190 191
/* Impossible login_id, to detect logout attempt before successful login */
#define INVALID_LOGIN_ID 0x10000

192
#define SBP2_ORB_TIMEOUT		2000U		/* Timeout in ms */
193
#define SBP2_ORB_NULL			0x80000000
194 195
#define SBP2_RETRY_LIMIT		0xf		/* 15 retries */
#define SBP2_CYCLE_LIMIT		(0xc8 << 12)	/* 200 125us cycles */
196

197 198 199 200 201 202
/*
 * There is no transport protocol limit to the CDB length,  but we implement
 * a fixed length only.  16 bytes is enough for disks larger than 2 TB.
 */
#define SBP2_MAX_CDB_SIZE		16

203 204 205 206 207 208 209
/*
 * The default maximum s/g segment size of a FireWire controller is
 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
 */
#define SBP2_MAX_SEG_SIZE		0xfffc

210
/* Unit directory keys */
211
#define SBP2_CSR_UNIT_CHARACTERISTICS	0x3a
212 213 214
#define SBP2_CSR_FIRMWARE_REVISION	0x3c
#define SBP2_CSR_LOGICAL_UNIT_NUMBER	0x14
#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY	0xd4
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

/* Management orb opcodes */
#define SBP2_LOGIN_REQUEST		0x0
#define SBP2_QUERY_LOGINS_REQUEST	0x1
#define SBP2_RECONNECT_REQUEST		0x3
#define SBP2_SET_PASSWORD_REQUEST	0x4
#define SBP2_LOGOUT_REQUEST		0x7
#define SBP2_ABORT_TASK_REQUEST		0xb
#define SBP2_ABORT_TASK_SET		0xc
#define SBP2_LOGICAL_UNIT_RESET		0xe
#define SBP2_TARGET_RESET_REQUEST	0xf

/* Offsets for command block agent registers */
#define SBP2_AGENT_STATE		0x00
#define SBP2_AGENT_RESET		0x04
#define SBP2_ORB_POINTER		0x08
#define SBP2_DOORBELL			0x10
#define SBP2_UNSOLICITED_STATUS_ENABLE	0x14

/* Status write response codes */
#define SBP2_STATUS_REQUEST_COMPLETE	0x0
#define SBP2_STATUS_TRANSPORT_FAILURE	0x1
#define SBP2_STATUS_ILLEGAL_REQUEST	0x2
#define SBP2_STATUS_VENDOR_DEPENDENT	0x3

240 241 242 243 244 245 246 247
#define STATUS_GET_ORB_HIGH(v)		((v).status & 0xffff)
#define STATUS_GET_SBP_STATUS(v)	(((v).status >> 16) & 0xff)
#define STATUS_GET_LEN(v)		(((v).status >> 24) & 0x07)
#define STATUS_GET_DEAD(v)		(((v).status >> 27) & 0x01)
#define STATUS_GET_RESPONSE(v)		(((v).status >> 28) & 0x03)
#define STATUS_GET_SOURCE(v)		(((v).status >> 30) & 0x03)
#define STATUS_GET_ORB_LOW(v)		((v).orb_low)
#define STATUS_GET_DATA(v)		((v).data)
248 249 250 251 252 253 254 255

struct sbp2_status {
	u32 status;
	u32 orb_low;
	u8 data[24];
};

struct sbp2_pointer {
256 257
	__be32 high;
	__be32 low;
258 259 260 261
};

struct sbp2_orb {
	struct fw_transaction t;
262
	struct kref kref;
263 264
	dma_addr_t request_bus;
	int rcode;
265
	void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
266 267 268
	struct list_head link;
};

269 270 271
#define MANAGEMENT_ORB_LUN(v)			((v))
#define MANAGEMENT_ORB_FUNCTION(v)		((v) << 16)
#define MANAGEMENT_ORB_RECONNECT(v)		((v) << 20)
272
#define MANAGEMENT_ORB_EXCLUSIVE(v)		((v) ? 1 << 28 : 0)
273 274
#define MANAGEMENT_ORB_REQUEST_FORMAT(v)	((v) << 29)
#define MANAGEMENT_ORB_NOTIFY			((1) << 31)
275

276 277
#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)	((v))
#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)	((v) << 16)
278 279 280 281 282 283

struct sbp2_management_orb {
	struct sbp2_orb base;
	struct {
		struct sbp2_pointer password;
		struct sbp2_pointer response;
284 285
		__be32 misc;
		__be32 length;
286 287 288 289 290 291 292 293 294
		struct sbp2_pointer status_fifo;
	} request;
	__be32 response[4];
	dma_addr_t response_bus;
	struct completion done;
	struct sbp2_status status;
};

struct sbp2_login_response {
295
	__be32 misc;
296
	struct sbp2_pointer command_block_agent;
297
	__be32 reconnect_hold;
298
};
299 300 301 302 303
#define COMMAND_ORB_DATA_SIZE(v)	((v))
#define COMMAND_ORB_PAGE_SIZE(v)	((v) << 16)
#define COMMAND_ORB_PAGE_TABLE_PRESENT	((1) << 19)
#define COMMAND_ORB_MAX_PAYLOAD(v)	((v) << 20)
#define COMMAND_ORB_SPEED(v)		((v) << 24)
304
#define COMMAND_ORB_DIRECTION		((1) << 27)
305 306
#define COMMAND_ORB_REQUEST_FORMAT(v)	((v) << 29)
#define COMMAND_ORB_NOTIFY		((1) << 31)
307 308 309 310 311 312

struct sbp2_command_orb {
	struct sbp2_orb base;
	struct {
		struct sbp2_pointer next;
		struct sbp2_pointer data_descriptor;
313
		__be32 misc;
314
		u8 command_block[SBP2_MAX_CDB_SIZE];
315 316
	} request;
	struct scsi_cmnd *cmd;
317
	struct sbp2_logical_unit *lu;
318

319
	struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
320 321 322
	dma_addr_t page_table_bus;
};

323 324 325
#define SBP2_ROM_VALUE_WILDCARD ~0         /* match all */
#define SBP2_ROM_VALUE_MISSING  0xff000000 /* not present in the unit dir. */

326 327 328 329 330 331
/*
 * List of devices with known bugs.
 *
 * The firmware_revision field, masked with 0xffff00, is the best
 * indicator for the type of bridge chip of a device.  It yields a few
 * false positives but this did not break correctly behaving devices
332
 * so far.
333 334 335 336
 */
static const struct {
	u32 firmware_revision;
	u32 model;
337
	unsigned int workarounds;
338 339 340 341 342
} sbp2_workarounds_table[] = {
	/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
		.firmware_revision	= 0x002800,
		.model			= 0x001010,
		.workarounds		= SBP2_WORKAROUND_INQUIRY_36 |
343 344
					  SBP2_WORKAROUND_MODE_SENSE_8 |
					  SBP2_WORKAROUND_POWER_CONDITION,
345
	},
346 347 348
	/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
		.firmware_revision	= 0x002800,
		.model			= 0x000000,
349
		.workarounds		= SBP2_WORKAROUND_POWER_CONDITION,
350
	},
351 352
	/* Initio bridges, actually only needed for some older ones */ {
		.firmware_revision	= 0x000200,
353
		.model			= SBP2_ROM_VALUE_WILDCARD,
354 355
		.workarounds		= SBP2_WORKAROUND_INQUIRY_36,
	},
356 357
	/* PL-3507 bridge with Prolific firmware */ {
		.firmware_revision	= 0x012800,
358
		.model			= SBP2_ROM_VALUE_WILDCARD,
359 360
		.workarounds		= SBP2_WORKAROUND_POWER_CONDITION,
	},
361 362
	/* Symbios bridge */ {
		.firmware_revision	= 0xa0b800,
363
		.model			= SBP2_ROM_VALUE_WILDCARD,
364 365
		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
	},
366 367
	/* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
		.firmware_revision	= 0x002600,
368
		.model			= SBP2_ROM_VALUE_WILDCARD,
369 370
		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
	},
371
	/*
372 373
	 * iPod 2nd generation: needs 128k max transfer size workaround
	 * iPod 3rd generation: needs fix capacity workaround
374
	 */
375 376 377 378 379 380 381
	{
		.firmware_revision	= 0x0a2700,
		.model			= 0x000000,
		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS |
					  SBP2_WORKAROUND_FIX_CAPACITY,
	},
	/* iPod 4th generation */ {
382 383 384 385
		.firmware_revision	= 0x0a2700,
		.model			= 0x000021,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
386 387 388 389 390
	/* iPod mini */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x000022,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
391 392 393 394 395 396 397 398 399 400 401 402
	/* iPod mini */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x000023,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	},
	/* iPod Photo */ {
		.firmware_revision	= 0x0a2700,
		.model			= 0x00007e,
		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
	}
};

403
static void free_orb(struct kref *kref)
404 405 406 407 408 409
{
	struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);

	kfree(orb);
}

410 411
static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
			      int tcode, int destination, int source,
412
			      int generation, unsigned long long offset,
413
			      void *payload, size_t length, void *callback_data)
414
{
415
	struct sbp2_logical_unit *lu = callback_data;
416 417 418 419 420
	struct sbp2_orb *orb;
	struct sbp2_status status;
	unsigned long flags;

	if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
421
	    length < 8 || length > sizeof(status)) {
422 423 424 425
		fw_send_response(card, request, RCODE_TYPE_ERROR);
		return;
	}

426 427 428 429 430 431
	status.status  = be32_to_cpup(payload);
	status.orb_low = be32_to_cpup(payload + 4);
	memset(status.data, 0, sizeof(status.data));
	if (length > 8)
		memcpy(status.data, payload + 8, length - 8);

432
	if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
433 434 435 436 437 438 439
		fw_notify("non-orb related status write, not handled\n");
		fw_send_response(card, request, RCODE_COMPLETE);
		return;
	}

	/* Lookup the orb corresponding to this status write. */
	spin_lock_irqsave(&card->lock, flags);
440
	list_for_each_entry(orb, &lu->orb_list, link) {
441
		if (STATUS_GET_ORB_HIGH(status) == 0 &&
442 443
		    STATUS_GET_ORB_LOW(status) == orb->request_bus) {
			orb->rcode = RCODE_COMPLETE;
444 445 446 447 448 449
			list_del(&orb->link);
			break;
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);

450
	if (&orb->link != &lu->orb_list) {
451
		orb->callback(orb, &status);
452
		kref_put(&orb->kref, free_orb); /* orb callback reference */
453
	} else {
454
		fw_error("status write for unknown orb\n");
455
	}
456

457 458 459
	fw_send_response(card, request, RCODE_COMPLETE);
}

460 461
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
462 463 464 465
{
	struct sbp2_orb *orb = data;
	unsigned long flags;

466 467 468 469 470 471 472 473 474 475 476 477 478
	/*
	 * This is a little tricky.  We can get the status write for
	 * the orb before we get this callback.  The status write
	 * handler above will assume the orb pointer transaction was
	 * successful and set the rcode to RCODE_COMPLETE for the orb.
	 * So this callback only sets the rcode if it hasn't already
	 * been set and only does the cleanup if the transaction
	 * failed and we didn't already get a status write.
	 */
	spin_lock_irqsave(&card->lock, flags);

	if (orb->rcode == -1)
		orb->rcode = rcode;
479
	if (orb->rcode != RCODE_COMPLETE) {
480
		list_del(&orb->link);
481
		spin_unlock_irqrestore(&card->lock, flags);
482

483
		orb->callback(orb, NULL);
484
		kref_put(&orb->kref, free_orb); /* orb callback reference */
485 486
	} else {
		spin_unlock_irqrestore(&card->lock, flags);
487
	}
488

489
	kref_put(&orb->kref, free_orb); /* transaction callback reference */
490 491
}

492 493
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
			  int node_id, int generation, u64 offset)
494
{
495
	struct fw_device *device = target_device(lu->tgt);
496
	struct sbp2_pointer orb_pointer;
497 498
	unsigned long flags;

499 500
	orb_pointer.high = 0;
	orb_pointer.low = cpu_to_be32(orb->request_bus);
501 502

	spin_lock_irqsave(&device->card->lock, flags);
503
	list_add_tail(&orb->link, &lu->orb_list);
504 505
	spin_unlock_irqrestore(&device->card->lock, flags);

506 507
	kref_get(&orb->kref); /* transaction callback reference */
	kref_get(&orb->kref); /* orb callback reference */
508

509
	fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
510
			node_id, generation, device->max_speed, offset,
511
			&orb_pointer, 8, complete_transaction, orb);
512 513
}

514
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
515
{
516
	struct fw_device *device = target_device(lu->tgt);
517 518 519
	struct sbp2_orb *orb, *next;
	struct list_head list;
	unsigned long flags;
520
	int retval = -ENOENT;
521 522 523

	INIT_LIST_HEAD(&list);
	spin_lock_irqsave(&device->card->lock, flags);
524
	list_splice_init(&lu->orb_list, &list);
525 526 527
	spin_unlock_irqrestore(&device->card->lock, flags);

	list_for_each_entry_safe(orb, next, &list, link) {
528
		retval = 0;
529 530
		if (fw_cancel_transaction(device->card, &orb->t) == 0)
			continue;
531

532 533
		orb->rcode = RCODE_CANCELLED;
		orb->callback(orb, NULL);
534
		kref_put(&orb->kref, free_orb); /* orb callback reference */
535 536
	}

537
	return retval;
538 539
}

540 541
static void complete_management_orb(struct sbp2_orb *base_orb,
				    struct sbp2_status *status)
542 543
{
	struct sbp2_management_orb *orb =
544
		container_of(base_orb, struct sbp2_management_orb, base);
545 546

	if (status)
547
		memcpy(&orb->status, status, sizeof(*status));
548 549 550
	complete(&orb->done);
}

551 552 553
static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
				    int generation, int function,
				    int lun_or_login_id, void *response)
554
{
555
	struct fw_device *device = target_device(lu->tgt);
556
	struct sbp2_management_orb *orb;
557
	unsigned int timeout;
558 559
	int retval = -ENOMEM;

560 561 562
	if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
		return 0;

563
	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
564 565 566
	if (orb == NULL)
		return -ENOMEM;

567
	kref_init(&orb->base.kref);
568 569
	orb->response_bus =
		dma_map_single(device->card->device, &orb->response,
570
			       sizeof(orb->response), DMA_FROM_DEVICE);
571
	if (dma_mapping_error(device->card->device, orb->response_bus))
572
		goto fail_mapping_response;
573

574 575
	orb->request.response.high = 0;
	orb->request.response.low  = cpu_to_be32(orb->response_bus);
576

577
	orb->request.misc = cpu_to_be32(
578 579
		MANAGEMENT_ORB_NOTIFY |
		MANAGEMENT_ORB_FUNCTION(function) |
580 581 582
		MANAGEMENT_ORB_LUN(lun_or_login_id));
	orb->request.length = cpu_to_be32(
		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
583

584 585 586 587
	orb->request.status_fifo.high =
		cpu_to_be32(lu->address_handler.offset >> 32);
	orb->request.status_fifo.low  =
		cpu_to_be32(lu->address_handler.offset);
588 589

	if (function == SBP2_LOGIN_REQUEST) {
590
		/* Ask for 2^2 == 4 seconds reconnect grace period */
591
		orb->request.misc |= cpu_to_be32(
592
			MANAGEMENT_ORB_RECONNECT(2) |
593
			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
594
		timeout = lu->tgt->mgt_orb_timeout;
595 596
	} else {
		timeout = SBP2_ORB_TIMEOUT;
597 598 599 600
	}

	init_completion(&orb->done);
	orb->base.callback = complete_management_orb;
601

602 603 604
	orb->base.request_bus =
		dma_map_single(device->card->device, &orb->request,
			       sizeof(orb->request), DMA_TO_DEVICE);
605
	if (dma_mapping_error(device->card->device, orb->base.request_bus))
606 607
		goto fail_mapping_request;

608 609
	sbp2_send_orb(&orb->base, lu, node_id, generation,
		      lu->tgt->management_agent_address);
610

611
	wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
612 613

	retval = -EIO;
614
	if (sbp2_cancel_orbs(lu) == 0) {
615 616
		fw_error("%s: orb reply timed out, rcode=0x%02x\n",
			 lu->tgt->bus_id, orb->base.rcode);
617 618 619
		goto out;
	}

620
	if (orb->base.rcode != RCODE_COMPLETE) {
621 622
		fw_error("%s: management write failed, rcode 0x%02x\n",
			 lu->tgt->bus_id, orb->base.rcode);
623 624 625
		goto out;
	}

626 627
	if (STATUS_GET_RESPONSE(orb->status) != 0 ||
	    STATUS_GET_SBP_STATUS(orb->status) != 0) {
628
		fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
629 630
			 STATUS_GET_RESPONSE(orb->status),
			 STATUS_GET_SBP_STATUS(orb->status));
631 632 633 634 635 636
		goto out;
	}

	retval = 0;
 out:
	dma_unmap_single(device->card->device, orb->base.request_bus,
637
			 sizeof(orb->request), DMA_TO_DEVICE);
638
 fail_mapping_request:
639
	dma_unmap_single(device->card->device, orb->response_bus,
640
			 sizeof(orb->response), DMA_FROM_DEVICE);
641
 fail_mapping_response:
642
	if (response)
643
		memcpy(response, orb->response, sizeof(orb->response));
644
	kref_put(&orb->base.kref, free_orb);
645 646 647 648

	return retval;
}

649 650
static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
{
651
	struct fw_device *device = target_device(lu->tgt);
J
Jay Fenlason 已提交
652
	__be32 d = 0;
653

J
Jay Fenlason 已提交
654 655 656
	fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
			   lu->tgt->node_id, lu->generation, device->max_speed,
			   lu->command_block_agent_address + SBP2_AGENT_RESET,
657
			   &d, 4);
658 659
}

660 661
static void complete_agent_reset_write_no_wait(struct fw_card *card,
		int rcode, void *payload, size_t length, void *data)
662 663 664 665 666
{
	kfree(data);
}

static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
667
{
668
	struct fw_device *device = target_device(lu->tgt);
669
	struct fw_transaction *t;
J
Jay Fenlason 已提交
670
	static __be32 d;
671

672
	t = kmalloc(sizeof(*t), GFP_ATOMIC);
673
	if (t == NULL)
674
		return;
675 676

	fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
677 678
			lu->tgt->node_id, lu->generation, device->max_speed,
			lu->command_block_agent_address + SBP2_AGENT_RESET,
679
			&d, 4, complete_agent_reset_write_no_wait, t);
680 681
}

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
{
	/*
	 * We may access dont_block without taking card->lock here:
	 * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
	 * are currently serialized against each other.
	 * And a wrong result in sbp2_conditionally_block()'s access of
	 * dont_block is rather harmless, it simply misses its first chance.
	 */
	--lu->tgt->dont_block;
}

/*
 * Blocks lu->tgt if all of the following conditions are met:
 *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
 *     logical units have been finished (indicated by dont_block == 0).
 *   - lu->generation is stale.
 *
 * Note, scsi_block_requests() must be called while holding card->lock,
 * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
 * unblock the target.
 */
static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
{
	struct sbp2_target *tgt = lu->tgt;
707
	struct fw_card *card = target_device(tgt)->card;
708 709 710 711 712 713 714 715
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
	if (!tgt->dont_block && !lu->blocked &&
	    lu->generation != card->generation) {
		lu->blocked = true;
716
		if (++tgt->blocked == 1)
717 718 719 720 721 722 723 724 725 726 727 728 729 730
			scsi_block_requests(shost);
	}
	spin_unlock_irqrestore(&card->lock, flags);
}

/*
 * Unblocks lu->tgt as soon as all its logical units can be unblocked.
 * Note, it is harmless to run scsi_unblock_requests() outside the
 * card->lock protected section.  On the other hand, running it inside
 * the section might clash with shost->host_lock.
 */
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
{
	struct sbp2_target *tgt = lu->tgt;
731
	struct fw_card *card = target_device(tgt)->card;
732 733 734 735 736 737 738 739 740 741 742 743
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	unsigned long flags;
	bool unblock = false;

	spin_lock_irqsave(&card->lock, flags);
	if (lu->blocked && lu->generation == card->generation) {
		lu->blocked = false;
		unblock = --tgt->blocked == 0;
	}
	spin_unlock_irqrestore(&card->lock, flags);

744
	if (unblock)
745 746 747 748 749 750 751 752 753 754 755
		scsi_unblock_requests(shost);
}

/*
 * Prevents future blocking of tgt and unblocks it.
 * Note, it is harmless to run scsi_unblock_requests() outside the
 * card->lock protected section.  On the other hand, running it inside
 * the section might clash with shost->host_lock.
 */
static void sbp2_unblock(struct sbp2_target *tgt)
{
756
	struct fw_card *card = target_device(tgt)->card;
757 758 759 760 761 762 763 764 765 766 767
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
	++tgt->dont_block;
	spin_unlock_irqrestore(&card->lock, flags);

	scsi_unblock_requests(shost);
}

768 769 770 771 772 773 774 775 776 777 778
static int sbp2_lun2int(u16 lun)
{
	struct scsi_lun eight_bytes_lun;

	memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
	eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
	eight_bytes_lun.scsi_lun[1] = lun & 0xff;

	return scsilun_to_int(&eight_bytes_lun);
}

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
/*
 * Write retransmit retry values into the BUSY_TIMEOUT register.
 * - The single-phase retry protocol is supported by all SBP-2 devices, but the
 *   default retry_limit value is 0 (i.e. never retry transmission). We write a
 *   saner value after logging into the device.
 * - The dual-phase retry protocol is optional to implement, and if not
 *   supported, writes to the dual-phase portion of the register will be
 *   ignored. We try to write the original 1394-1995 default here.
 * - In the case of devices that are also SBP-3-compliant, all writes are
 *   ignored, as the register is read-only, but contains single-phase retry of
 *   15, which is what we're trying to set for all SBP-2 device anyway, so this
 *   write attempt is safe and yields more consistent behavior for all devices.
 *
 * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
 * and section 6.4 of the SBP-3 spec for further details.
 */
795 796
static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
{
797
	struct fw_device *device = target_device(lu->tgt);
J
Jay Fenlason 已提交
798
	__be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
799

J
Jay Fenlason 已提交
800 801
	fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
			   lu->tgt->node_id, lu->generation, device->max_speed,
802
			   CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4);
803 804
}

805 806
static void sbp2_reconnect(struct work_struct *work);

807 808
static void sbp2_login(struct work_struct *work)
{
809 810
	struct sbp2_logical_unit *lu =
		container_of(work, struct sbp2_logical_unit, work.work);
811
	struct sbp2_target *tgt = lu->tgt;
812
	struct fw_device *device = target_device(tgt);
813
	struct Scsi_Host *shost;
814
	struct scsi_device *sdev;
815
	struct sbp2_login_response response;
816
	int generation, node_id, local_node_id;
817

818
	if (fw_device_is_shutdown(device))
819
		return;
820

821
	generation    = device->generation;
822
	smp_rmb();    /* node IDs must not be older than generation */
823 824
	node_id       = device->node_id;
	local_node_id = device->card->node_id;
825

826
	/* If this is a re-login attempt, log out, or we might be rejected. */
827
	if (lu->has_sdev)
828 829 830
		sbp2_send_management_orb(lu, device->node_id, generation,
				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);

831 832
	if (sbp2_send_management_orb(lu, node_id, generation,
				SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
833
		if (lu->retries++ < 5) {
834
			sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
835
		} else {
836 837
			fw_error("%s: failed to login to LUN %04x\n",
				 tgt->bus_id, lu->lun);
838 839 840
			/* Let any waiting I/O fail from now on. */
			sbp2_unblock(lu->tgt);
		}
841
		return;
842 843
	}

844 845
	tgt->node_id	  = node_id;
	tgt->address_high = local_node_id << 16;
846 847
	smp_wmb();	  /* node IDs must not be older than generation */
	lu->generation	  = generation;
848

849
	lu->command_block_agent_address =
850 851 852
		((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
		      << 32) | be32_to_cpu(response.command_block_agent.low);
	lu->login_id = be32_to_cpu(response.misc) & 0xffff;
853

854 855
	fw_notify("%s: logged in to LUN %04x (%d retries)\n",
		  tgt->bus_id, lu->lun, lu->retries);
856

857 858
	/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
	sbp2_set_busy_timeout(lu);
859

860 861 862
	PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
	sbp2_agent_reset(lu);

863
	/* This was a re-login. */
864
	if (lu->has_sdev) {
865
		sbp2_cancel_orbs(lu);
866
		sbp2_conditionally_unblock(lu);
867 868

		return;
869 870
	}

871 872 873
	if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
		ssleep(SBP2_INQUIRY_DELAY);

874
	shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
875
	sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
876 877 878 879 880 881 882
	/*
	 * FIXME:  We are unable to perform reconnects while in sbp2_login().
	 * Therefore __scsi_add_device() will get into trouble if a bus reset
	 * happens in parallel.  It will either fail or leave us with an
	 * unusable sdev.  As a workaround we check for this and retry the
	 * whole login and SCSI probing.
	 */
883

884 885 886 887 888 889 890 891
	/* Reported error during __scsi_add_device() */
	if (IS_ERR(sdev))
		goto out_logout_login;

	/* Unreported error during __scsi_add_device() */
	smp_rmb(); /* get current card generation */
	if (generation != device->card->generation) {
		scsi_remove_device(sdev);
892
		scsi_device_put(sdev);
893
		goto out_logout_login;
894
	}
895 896

	/* No error during __scsi_add_device() */
897 898
	lu->has_sdev = true;
	scsi_device_put(sdev);
899
	sbp2_allow_block(lu);
900 901

	return;
902 903 904 905 906 907 908 909 910 911 912 913 914

 out_logout_login:
	smp_rmb(); /* generation may have changed */
	generation = device->generation;
	smp_rmb(); /* node_id must not be older than generation */

	sbp2_send_management_orb(lu, device->node_id, generation,
				 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
	/*
	 * If a bus reset happened, sbp2_update will have requeued
	 * lu->work already.  Reset the work from reconnect to login.
	 */
	PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
915
}
916

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
static void sbp2_reconnect(struct work_struct *work)
{
	struct sbp2_logical_unit *lu =
		container_of(work, struct sbp2_logical_unit, work.work);
	struct sbp2_target *tgt = lu->tgt;
	struct fw_device *device = target_device(tgt);
	int generation, node_id, local_node_id;

	if (fw_device_is_shutdown(device))
		return;

	generation    = device->generation;
	smp_rmb();    /* node IDs must not be older than generation */
	node_id       = device->node_id;
	local_node_id = device->card->node_id;

	if (sbp2_send_management_orb(lu, node_id, generation,
				     SBP2_RECONNECT_REQUEST,
				     lu->login_id, NULL) < 0) {
		/*
		 * If reconnect was impossible even though we are in the
		 * current generation, fall back and try to log in again.
		 *
		 * We could check for "Function rejected" status, but
		 * looking at the bus generation as simpler and more general.
		 */
		smp_rmb(); /* get current card generation */
		if (generation == device->card->generation ||
		    lu->retries++ >= 5) {
			fw_error("%s: failed to reconnect\n", tgt->bus_id);
			lu->retries = 0;
			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
		}
		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));

		return;
	}

	tgt->node_id      = node_id;
	tgt->address_high = local_node_id << 16;
	smp_wmb();	  /* node IDs must not be older than generation */
	lu->generation	  = generation;

	fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
		  tgt->bus_id, lu->lun, lu->retries);

	sbp2_agent_reset(lu);
	sbp2_cancel_orbs(lu);
	sbp2_conditionally_unblock(lu);
}

968
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
969
{
970
	struct sbp2_logical_unit *lu;
971

972 973 974
	lu = kmalloc(sizeof(*lu), GFP_KERNEL);
	if (!lu)
		return -ENOMEM;
975

976 977 978
	lu->address_handler.length           = 0x100;
	lu->address_handler.address_callback = sbp2_status_write;
	lu->address_handler.callback_data    = lu;
979

980 981 982 983 984
	if (fw_core_add_address_handler(&lu->address_handler,
					&fw_high_memory_region) < 0) {
		kfree(lu);
		return -ENOMEM;
	}
985

986 987
	lu->tgt      = tgt;
	lu->lun      = lun_entry & 0xffff;
J
Jay Fenlason 已提交
988
	lu->login_id = INVALID_LOGIN_ID;
989 990 991
	lu->retries  = 0;
	lu->has_sdev = false;
	lu->blocked  = false;
992
	++tgt->dont_block;
993 994
	INIT_LIST_HEAD(&lu->orb_list);
	INIT_DELAYED_WORK(&lu->work, sbp2_login);
995

996 997 998
	list_add_tail(&lu->link, &tgt->lu_list);
	return 0;
}
999

1000 1001
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
				      const u32 *directory)
1002 1003 1004
{
	struct fw_csr_iterator ci;
	int key, value;
1005

1006 1007 1008 1009 1010 1011 1012 1013
	fw_csr_iterator_init(&ci, directory);
	while (fw_csr_iterator_next(&ci, &key, &value))
		if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
		    sbp2_add_logical_unit(tgt, value) < 0)
			return -ENOMEM;
	return 0;
}

1014
static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
1015 1016 1017 1018 1019 1020
			      u32 *model, u32 *firmware_revision)
{
	struct fw_csr_iterator ci;
	int key, value;

	fw_csr_iterator_init(&ci, directory);
1021 1022
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		switch (key) {
1023

1024
		case CSR_DEPENDENT_INFO | CSR_OFFSET:
1025 1026
			tgt->management_agent_address =
					CSR_REGISTER_BASE + 4 * value;
1027
			break;
1028 1029 1030

		case CSR_DIRECTORY_ID:
			tgt->directory_id = value;
1031
			break;
1032

1033
		case CSR_MODEL:
1034 1035 1036 1037 1038 1039 1040
			*model = value;
			break;

		case SBP2_CSR_FIRMWARE_REVISION:
			*firmware_revision = value;
			break;

1041 1042
		case SBP2_CSR_UNIT_CHARACTERISTICS:
			/* the timeout value is stored in 500ms units */
1043
			tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
1044 1045
			break;

1046 1047 1048 1049 1050 1051
		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
			if (sbp2_add_logical_unit(tgt, value) < 0)
				return -ENOMEM;
			break;

		case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
1052 1053
			/* Adjust for the increment in the iterator */
			if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
1054
				return -ENOMEM;
1055 1056 1057
			break;
		}
	}
1058 1059 1060
	return 0;
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
/*
 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
 * provided in the config rom. Most devices do provide a value, which
 * we'll use for login management orbs, but with some sane limits.
 */
static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
{
	unsigned int timeout = tgt->mgt_orb_timeout;

	if (timeout > 40000)
		fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
			  tgt->bus_id, timeout / 1000);

	tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
}

1077 1078 1079 1080
static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
				  u32 firmware_revision)
{
	int i;
1081
	unsigned int w = sbp2_param_workarounds;
1082 1083 1084 1085

	if (w)
		fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
			  "if you need the workarounds parameter for %s\n",
1086
			  tgt->bus_id);
1087

1088 1089
	if (w & SBP2_WORKAROUND_OVERRIDE)
		goto out;
1090 1091

	for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1092

1093 1094 1095
		if (sbp2_workarounds_table[i].firmware_revision !=
		    (firmware_revision & 0xffffff00))
			continue;
1096

1097
		if (sbp2_workarounds_table[i].model != model &&
1098
		    sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
1099
			continue;
1100

1101
		w |= sbp2_workarounds_table[i].workarounds;
1102 1103
		break;
	}
1104 1105
 out:
	if (w)
1106
		fw_notify("Workarounds for %s: 0x%x "
1107
			  "(firmware_revision 0x%06x, model_id 0x%06x)\n",
1108
			  tgt->bus_id, w, firmware_revision, model);
1109
	tgt->workarounds = w;
1110 1111 1112
}

static struct scsi_host_template scsi_driver_template;
1113
static int sbp2_remove(struct device *dev);
1114 1115 1116 1117

static int sbp2_probe(struct device *dev)
{
	struct fw_unit *unit = fw_unit(dev);
1118
	struct fw_device *device = fw_parent_device(unit);
1119 1120 1121 1122 1123
	struct sbp2_target *tgt;
	struct sbp2_logical_unit *lu;
	struct Scsi_Host *shost;
	u32 model, firmware_revision;

1124 1125 1126 1127
	if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
		BUG_ON(dma_set_max_seg_size(device->card->device,
					    SBP2_MAX_SEG_SIZE));

1128 1129 1130 1131 1132
	shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
	if (shost == NULL)
		return -ENOMEM;

	tgt = (struct sbp2_target *)shost->hostdata;
1133
	dev_set_drvdata(&unit->device, tgt);
1134 1135
	tgt->unit = unit;
	INIT_LIST_HEAD(&tgt->lu_list);
1136
	tgt->bus_id = dev_name(&unit->device);
1137
	tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1138 1139 1140 1141

	if (fw_device_enable_phys_dma(device) < 0)
		goto fail_shost_put;

1142 1143
	shost->max_cmd_len = SBP2_MAX_CDB_SIZE;

1144 1145 1146 1147 1148 1149 1150
	if (scsi_add_host(shost, &unit->device) < 0)
		goto fail_shost_put;

	/* implicit directory ID */
	tgt->directory_id = ((unit->directory - device->config_rom) * 4
			     + CSR_CONFIG_ROM) & 0xffffff;

1151 1152 1153
	firmware_revision = SBP2_ROM_VALUE_MISSING;
	model		  = SBP2_ROM_VALUE_MISSING;

1154 1155
	if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
			       &firmware_revision) < 0)
1156
		goto fail_remove;
1157

1158
	sbp2_clamp_management_orb_timeout(tgt);
1159
	sbp2_init_workarounds(tgt, model, firmware_revision);
1160

1161 1162 1163 1164 1165 1166
	/*
	 * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
	 * and so on up to 4096 bytes.  The SBP-2 max_payload field
	 * specifies the max payload size as 2 ^ (max_payload + 2), so
	 * if we set this to max_speed + 7, we get the right value.
	 */
1167 1168
	tgt->max_payload = min3(device->max_speed + 7, 10U,
				device->card->max_receive - 1);
1169

1170
	/* Do the login in a workqueue so we can easily reschedule retries. */
1171
	list_for_each_entry(lu, &tgt->lu_list, link)
1172
		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1173

1174
	return 0;
1175

1176 1177
 fail_remove:
	sbp2_remove(dev);
1178 1179 1180 1181 1182
	return -ENOMEM;

 fail_shost_put:
	scsi_host_put(shost);
	return -ENOMEM;
1183 1184
}

1185
static void sbp2_update(struct fw_unit *unit)
1186
{
1187
	struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1188 1189
	struct sbp2_logical_unit *lu;

1190
	fw_device_enable_phys_dma(fw_parent_device(unit));
1191

1192 1193 1194 1195 1196 1197 1198 1199 1200
	/*
	 * Fw-core serializes sbp2_update() against sbp2_remove().
	 * Iteration over tgt->lu_list is therefore safe here.
	 */
	list_for_each_entry(lu, &tgt->lu_list, link) {
		sbp2_conditionally_block(lu);
		lu->retries = 0;
		sbp2_queue_work(lu, 0);
	}
1201 1202
}

1203
static int sbp2_remove(struct device *dev)
1204
{
1205 1206 1207
	struct fw_unit *unit = fw_unit(dev);
	struct fw_device *device = fw_parent_device(unit);
	struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1208 1209 1210 1211
	struct sbp2_logical_unit *lu, *next;
	struct Scsi_Host *shost =
		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
	struct scsi_device *sdev;
1212

1213 1214
	/* prevent deadlocks */
	sbp2_unblock(tgt);
1215

1216
	list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
1217
		cancel_delayed_work_sync(&lu->work);
1218 1219 1220 1221
		sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
		if (sdev) {
			scsi_remove_device(sdev);
			scsi_device_put(sdev);
1222
		}
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
		if (lu->login_id != INVALID_LOGIN_ID) {
			int generation, node_id;
			/*
			 * tgt->node_id may be obsolete here if we failed
			 * during initial login or after a bus reset where
			 * the topology changed.
			 */
			generation = device->generation;
			smp_rmb(); /* node_id vs. generation */
			node_id    = device->node_id;
			sbp2_send_management_orb(lu, node_id, generation,
						 SBP2_LOGOUT_REQUEST,
						 lu->login_id, NULL);
		}
		fw_core_remove_address_handler(&lu->address_handler);
		list_del(&lu->link);
		kfree(lu);
1240
	}
1241 1242
	scsi_remove_host(shost);
	fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
1243

1244 1245
	scsi_host_put(shost);
	return 0;
1246 1247 1248 1249 1250
}

#define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e
#define SBP2_SW_VERSION_ENTRY	0x00010483

1251
static const struct ieee1394_device_id sbp2_id_table[] = {
1252
	{
1253 1254
		.match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
				IEEE1394_MATCH_VERSION,
1255
		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
1256
		.version      = SBP2_SW_VERSION_ENTRY,
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	},
	{ }
};

static struct fw_driver sbp2_driver = {
	.driver   = {
		.owner  = THIS_MODULE,
		.name   = sbp2_driver_name,
		.bus    = &fw_bus_type,
		.probe  = sbp2_probe,
		.remove = sbp2_remove,
	},
	.update   = sbp2_update,
	.id_table = sbp2_id_table,
};

1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
static void sbp2_unmap_scatterlist(struct device *card_device,
				   struct sbp2_command_orb *orb)
{
	if (scsi_sg_count(orb->cmd))
		dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
			     scsi_sg_count(orb->cmd),
			     orb->cmd->sc_data_direction);

	if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
		dma_unmap_single(card_device, orb->page_table_bus,
				 sizeof(orb->page_table), DMA_TO_DEVICE);
}

1286
static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1287
{
1288 1289
	int sam_status;

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
	sense_data[0] = 0x70;
	sense_data[1] = 0x0;
	sense_data[2] = sbp2_status[1];
	sense_data[3] = sbp2_status[4];
	sense_data[4] = sbp2_status[5];
	sense_data[5] = sbp2_status[6];
	sense_data[6] = sbp2_status[7];
	sense_data[7] = 10;
	sense_data[8] = sbp2_status[8];
	sense_data[9] = sbp2_status[9];
	sense_data[10] = sbp2_status[10];
	sense_data[11] = sbp2_status[11];
	sense_data[12] = sbp2_status[2];
	sense_data[13] = sbp2_status[3];
	sense_data[14] = sbp2_status[12];
	sense_data[15] = sbp2_status[13];

1307
	sam_status = sbp2_status[0] & 0x3f;
1308

1309 1310
	switch (sam_status) {
	case SAM_STAT_GOOD:
1311 1312
	case SAM_STAT_CHECK_CONDITION:
	case SAM_STAT_CONDITION_MET:
1313
	case SAM_STAT_BUSY:
1314 1315
	case SAM_STAT_RESERVATION_CONFLICT:
	case SAM_STAT_COMMAND_TERMINATED:
1316 1317
		return DID_OK << 16 | sam_status;

1318
	default:
1319
		return DID_ERROR << 16;
1320 1321 1322
	}
}

1323 1324
static void complete_command_orb(struct sbp2_orb *base_orb,
				 struct sbp2_status *status)
1325
{
1326 1327
	struct sbp2_command_orb *orb =
		container_of(base_orb, struct sbp2_command_orb, base);
1328
	struct fw_device *device = target_device(orb->lu->tgt);
1329 1330 1331
	int result;

	if (status != NULL) {
1332
		if (STATUS_GET_DEAD(*status))
1333
			sbp2_agent_reset_no_wait(orb->lu);
1334

1335
		switch (STATUS_GET_RESPONSE(*status)) {
1336
		case SBP2_STATUS_REQUEST_COMPLETE:
1337
			result = DID_OK << 16;
1338 1339
			break;
		case SBP2_STATUS_TRANSPORT_FAILURE:
1340
			result = DID_BUS_BUSY << 16;
1341 1342 1343 1344
			break;
		case SBP2_STATUS_ILLEGAL_REQUEST:
		case SBP2_STATUS_VENDOR_DEPENDENT:
		default:
1345
			result = DID_ERROR << 16;
1346 1347 1348
			break;
		}

1349 1350
		if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
			result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
1351 1352
							   orb->cmd->sense_buffer);
	} else {
1353 1354
		/*
		 * If the orb completes with status == NULL, something
1355
		 * went wrong, typically a bus reset happened mid-orb
1356 1357
		 * or when sending the write (less likely).
		 */
1358
		result = DID_BUS_BUSY << 16;
1359
		sbp2_conditionally_block(orb->lu);
1360 1361 1362
	}

	dma_unmap_single(device->card->device, orb->base.request_bus,
1363
			 sizeof(orb->request), DMA_TO_DEVICE);
1364
	sbp2_unmap_scatterlist(device->card->device, orb);
1365

1366
	orb->cmd->result = result;
1367
	orb->cmd->scsi_done(orb->cmd);
1368 1369
}

1370 1371
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
		struct fw_device *device, struct sbp2_logical_unit *lu)
1372
{
1373 1374 1375 1376 1377 1378
	struct scatterlist *sg = scsi_sglist(orb->cmd);
	int i, n;

	n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
		       orb->cmd->sc_data_direction);
	if (n == 0)
1379
		goto fail;
1380

1381 1382
	/*
	 * Handle the special case where there is only one element in
1383 1384 1385
	 * the scatter list by converting it to an immediate block
	 * request. This is also a workaround for broken devices such
	 * as the second generation iPod which doesn't support page
1386 1387
	 * tables.
	 */
1388
	if (n == 1) {
1389 1390 1391 1392 1393 1394
		orb->request.data_descriptor.high =
			cpu_to_be32(lu->tgt->address_high);
		orb->request.data_descriptor.low  =
			cpu_to_be32(sg_dma_address(sg));
		orb->request.misc |=
			cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
1395
		return 0;
1396 1397
	}

1398 1399 1400
	for_each_sg(sg, sg, n, i) {
		orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
		orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
1401 1402
	}

1403 1404 1405
	orb->page_table_bus =
		dma_map_single(device->card->device, orb->page_table,
			       sizeof(orb->page_table), DMA_TO_DEVICE);
1406
	if (dma_mapping_error(device->card->device, orb->page_table_bus))
1407
		goto fail_page_table;
1408

1409 1410
	/*
	 * The data_descriptor pointer is the one case where we need
1411 1412 1413
	 * to fill in the node ID part of the address.  All other
	 * pointers assume that the data referenced reside on the
	 * initiator (i.e. us), but data_descriptor can refer to data
1414 1415
	 * on other nodes so we need to put our ID in descriptor.high.
	 */
1416 1417 1418
	orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
	orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
	orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
1419
					 COMMAND_ORB_DATA_SIZE(n));
1420

1421 1422 1423
	return 0;

 fail_page_table:
1424 1425
	dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
		     scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
1426 1427
 fail:
	return -ENOMEM;
1428 1429 1430 1431
}

/* SCSI stack integration */

1432 1433
static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
				  struct scsi_cmnd *cmd)
1434
{
1435
	struct sbp2_logical_unit *lu = cmd->device->hostdata;
1436
	struct fw_device *device = target_device(lu->tgt);
1437
	struct sbp2_command_orb *orb;
1438
	int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1439

1440 1441 1442 1443
	/*
	 * Bidirectional commands are not yet implemented, and unknown
	 * transfer direction not handled.
	 */
1444
	if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1445
		fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
1446
		cmd->result = DID_ERROR << 16;
1447
		cmd->scsi_done(cmd);
1448
		return 0;
1449 1450
	}

1451
	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1452 1453
	if (orb == NULL) {
		fw_notify("failed to alloc orb\n");
1454
		return SCSI_MLQUEUE_HOST_BUSY;
1455 1456
	}

1457 1458
	/* Initialize rcode to something not RCODE_COMPLETE. */
	orb->base.rcode = -1;
1459
	kref_init(&orb->base.kref);
1460 1461
	orb->lu = lu;
	orb->cmd = cmd;
1462
	orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
1463
	orb->request.misc = cpu_to_be32(
1464
		COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
1465
		COMMAND_ORB_SPEED(device->max_speed) |
1466
		COMMAND_ORB_NOTIFY);
1467 1468

	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1469
		orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
1470

1471 1472 1473
	generation = device->generation;
	smp_rmb();    /* sbp2_map_scatterlist looks at tgt->address_high */

1474 1475
	if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
		goto out;
1476

1477
	memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
1478 1479

	orb->base.callback = complete_command_orb;
1480 1481 1482
	orb->base.request_bus =
		dma_map_single(device->card->device, &orb->request,
			       sizeof(orb->request), DMA_TO_DEVICE);
1483 1484
	if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
		sbp2_unmap_scatterlist(device->card->device, orb);
1485
		goto out;
1486
	}
1487

1488
	sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
1489 1490 1491
		      lu->command_block_agent_address + SBP2_ORB_POINTER);
	retval = 0;
 out:
1492
	kref_put(&orb->base.kref, free_orb);
1493
	return retval;
1494 1495
}

1496 1497
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{
1498
	struct sbp2_logical_unit *lu = sdev->hostdata;
1499

1500 1501 1502 1503
	/* (Re-)Adding logical units via the SCSI stack is not supported. */
	if (!lu)
		return -ENOSYS;

1504 1505
	sdev->allow_restart = 1;

1506 1507
	/* SBP-2 requires quadlet alignment of the data buffers. */
	blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1508

1509
	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1510
		sdev->inquiry_len = 36;
1511

1512 1513 1514
	return 0;
}

1515 1516
static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
{
1517
	struct sbp2_logical_unit *lu = sdev->hostdata;
1518

1519 1520
	sdev->use_10_for_rw = 1;

1521 1522 1523
	if (sbp2_param_exclusive_login)
		sdev->manage_start_stop = 1;

1524 1525
	if (sdev->type == TYPE_ROM)
		sdev->use_10_for_ms = 1;
1526

1527
	if (sdev->type == TYPE_DISK &&
1528
	    lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1529
		sdev->skip_ms_page_8 = 1;
1530 1531

	if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
1532
		sdev->fix_capacity = 1;
1533

1534 1535 1536
	if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
		sdev->start_stop_pwr_cond = 1;

1537
	if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1538
		blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
1539

1540 1541
	blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);

1542 1543 1544 1545 1546 1547 1548 1549 1550
	return 0;
}

/*
 * Called by scsi stack when something has really gone wrong.  Usually
 * called when a command has timed-out for some reason.
 */
static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
{
1551
	struct sbp2_logical_unit *lu = cmd->device->hostdata;
1552

1553
	fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
1554 1555
	sbp2_agent_reset(lu);
	sbp2_cancel_orbs(lu);
1556 1557 1558 1559

	return SUCCESS;
}

1560 1561 1562 1563 1564 1565 1566
/*
 * Format of /sys/bus/scsi/devices/.../ieee1394_id:
 * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
 *
 * This is the concatenation of target port identifier and logical unit
 * identifier as per SAM-2...SAM-4 annex A.
 */
1567 1568
static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
			struct device_attribute *attr, char *buf)
1569 1570
{
	struct scsi_device *sdev = to_scsi_device(dev);
1571
	struct sbp2_logical_unit *lu;
1572 1573 1574 1575

	if (!sdev)
		return 0;

1576
	lu = sdev->hostdata;
1577

1578 1579
	return sprintf(buf, "%016llx:%06x:%04x\n",
			(unsigned long long)lu->tgt->guid,
1580
			lu->tgt->directory_id, lu->lun);
1581 1582 1583 1584 1585 1586 1587 1588 1589
}

static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);

static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
	&dev_attr_ieee1394_id,
	NULL
};

1590 1591 1592
static struct scsi_host_template scsi_driver_template = {
	.module			= THIS_MODULE,
	.name			= "SBP-2 IEEE-1394",
1593
	.proc_name		= sbp2_driver_name,
1594
	.queuecommand		= sbp2_scsi_queuecommand,
1595
	.slave_alloc		= sbp2_scsi_slave_alloc,
1596 1597 1598 1599 1600
	.slave_configure	= sbp2_scsi_slave_configure,
	.eh_abort_handler	= sbp2_scsi_abort,
	.this_id		= -1,
	.sg_tablesize		= SG_ALL,
	.use_clustering		= ENABLE_CLUSTERING,
1601 1602
	.cmd_per_lun		= 1,
	.can_queue		= 1,
1603
	.sdev_attrs		= sbp2_scsi_sysfs_attrs,
1604 1605 1606 1607 1608 1609 1610
};

MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("SCSI over IEEE1394");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);

1611 1612 1613 1614 1615
/* Provide a module alias so root-on-sbp2 initrds don't break. */
#ifndef CONFIG_IEEE1394_SBP2_MODULE
MODULE_ALIAS("sbp2");
#endif

1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
static int __init sbp2_init(void)
{
	return driver_register(&sbp2_driver.driver);
}

static void __exit sbp2_cleanup(void)
{
	driver_unregister(&sbp2_driver.driver);
}

module_init(sbp2_init);
module_exit(sbp2_cleanup);