提交 c9976797 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (55 commits)
  ieee1394: sbp2: code formatting around work_struct stuff
  ieee1394: nodemgr: remove a kcalloc
  ieee1394: conditionally export ieee1394_bus_type
  ieee1394: Consolidate driver registering
  ieee1394: sbp2: convert from PCI DMA to generic DMA
  ieee1394: nodemgr: spaces to tabs
  ieee1394: nodemgr: fix deadlock in shutdown
  ieee1394: nodemgr: remove duplicate assignment
  sbp2: make 1bit bitfield unsigned
  ieee1394: schedule *_oui sysfs attributes for removal
  ieee1394: schedule unused symbol exports for removal
  ieee1394: dv1394: schedule for feature removal
  ieee1394: raw1394: defer feature removal of old isoch interface
  ieee1394: ohci1394: call PMac code in shutdown only for proper machines
  ieee1394: ohci1394: reformat PPC_PMAC platform code
  ieee1394: ohci1394: add PPC_PMAC platform code to driver probe
  ieee1394: sbp2: wrap two functions into one
  ieee1394: sbp2: update comment on things to do
  ieee1394: sbp2: use list_move_tail()
  ieee1394: sbp2: more concise names for types and variables
  ...
......@@ -30,11 +30,39 @@ Who: Adrian Bunk <bunk@stusta.de>
---------------------------
What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
When: November 2006
Why: Deprecated in favour of the new ioctl-based rawiso interface, which is
more efficient. You should really be using libraw1394 for raw1394
access anyway.
Who: Jody McIntyre <scjody@modernduck.com>
When: June 2007
Why: Deprecated in favour of the more efficient and robust rawiso interface.
Affected are applications which use the deprecated part of libraw1394
(raw1394_iso_write, raw1394_start_iso_write, raw1394_start_iso_rcv,
raw1394_stop_iso_rcv) or bypass libraw1394.
Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
What: dv1394 driver (CONFIG_IEEE1394_DV1394)
When: June 2007
Why: Replaced by raw1394 + userspace libraries, notably libiec61883. This
shift of application support has been indicated on www.linux1394.org
and developers' mailinglists for quite some time. Major applications
have been converted, with the exception of ffmpeg and hence xine.
Piped output of dvgrab2 is a partial equivalent to dv1394.
Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
What: ieee1394 core's unused exports (CONFIG_IEEE1394_EXPORT_FULL_API)
When: January 2007
Why: There are no projects known to use these exported symbols, except
dfg1394 (uses one symbol whose functionality is core-internal now).
Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
What: ieee1394's *_oui sysfs attributes (CONFIG_IEEE1394_OUI_DB)
When: January 2007
Files: drivers/ieee1394/: oui.db, oui2c.sh
Why: big size, little value
Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
......
......@@ -36,7 +36,7 @@ config IEEE1394_VERBOSEDEBUG
else says N.
config IEEE1394_OUI_DB
bool "OUI Database built-in"
bool "OUI Database built-in (deprecated)"
depends on IEEE1394
help
If you say Y here, then an OUI list (vendor unique ID's) will be
......@@ -67,16 +67,11 @@ config IEEE1394_CONFIG_ROM_IP1394
eth1394 option below.
config IEEE1394_EXPORT_FULL_API
bool "Export all symbols of ieee1394's API"
bool "Export all symbols of ieee1394's API (deprecated)"
depends on IEEE1394
default n
help
Export all symbols of ieee1394's driver programming interface, even
those that are not currently used by the standard IEEE 1394 drivers.
This option does not affect the interface to userspace applications.
Say Y here if you want to compile externally developed drivers that
make extended use of ieee1394's API. It is otherwise safe to say N.
This option will be removed soon. Don't worry, say N.
comment "Device Drivers"
depends on IEEE1394
......@@ -125,7 +120,7 @@ comment "SBP-2 support (for storage devices) requires SCSI"
config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)"
depends on IEEE1394 && SCSI && (PCI || BROKEN)
depends on IEEE1394 && SCSI
help
This option enables you to use SBP-2 devices connected to an IEEE
1394 bus. SBP-2 devices include storage devices like harddisks and
......@@ -161,17 +156,12 @@ config IEEE1394_ETH1394
MCAP, therefore multicast support is significantly limited.
config IEEE1394_DV1394
tristate "OHCI-DV I/O support"
tristate "OHCI-DV I/O support (deprecated)"
depends on IEEE1394 && IEEE1394_OHCI1394
help
This driver allows you to transmit and receive DV (digital video)
streams on an OHCI-1394 card using a simple frame-oriented
interface.
The user-space API for dv1394 is documented in dv1394.h.
To compile this driver as a module, say M here: the
module will be called dv1394.
The dv1394 driver will be removed from Linux in a future release.
Its functionality is now provided by raw1394 together with libraries
such as libiec61883.
config IEEE1394_RAWIO
tristate "Raw IEEE1394 I/O support"
......
......@@ -3,8 +3,11 @@
#
ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
highlevel.o csr.o nodemgr.o oui.o dma.o iso.o \
highlevel.o csr.o nodemgr.o dma.o iso.o \
csr1212.o config_roms.o
ifdef CONFIG_IEEE1394_OUI_DB
ieee1394-objs += oui.o
endif
obj-$(CONFIG_IEEE1394) += ieee1394.o
obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
......
......@@ -158,12 +158,10 @@ static void host_reset(struct hpsb_host *host)
*/
static inline void calculate_expire(struct csr_control *csr)
{
unsigned long usecs =
(csr->split_timeout_hi & 0x07) * USEC_PER_SEC +
(csr->split_timeout_lo >> 19) * 125L;
csr->expire = usecs_to_jiffies(usecs > 100000L ? usecs : 100000L);
unsigned int usecs = (csr->split_timeout_hi & 7) * 1000000 +
(csr->split_timeout_lo >> 19) * 125;
csr->expire = usecs_to_jiffies(usecs > 100000 ? usecs : 100000);
HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
}
......
......@@ -1536,27 +1536,20 @@ static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count
static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct video_card *video;
struct video_card *video = file_to_video_card(file);
unsigned long flags;
int ret = -EINVAL;
void __user *argp = (void __user *)arg;
DECLARE_WAITQUEUE(wait, current);
lock_kernel();
video = file_to_video_card(file);
/* serialize this to prevent multi-threaded mayhem */
if (file->f_flags & O_NONBLOCK) {
if (!mutex_trylock(&video->mtx)) {
unlock_kernel();
if (!mutex_trylock(&video->mtx))
return -EAGAIN;
}
} else {
if (mutex_lock_interruptible(&video->mtx)) {
unlock_kernel();
if (mutex_lock_interruptible(&video->mtx))
return -ERESTARTSYS;
}
}
switch(cmd)
......@@ -1780,7 +1773,6 @@ static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
out:
mutex_unlock(&video->mtx);
unlock_kernel();
return ret;
}
......@@ -2188,12 +2180,8 @@ static struct ieee1394_device_id dv1394_id_table[] = {
MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
static struct hpsb_protocol_driver dv1394_driver = {
.name = "DV/1394 Driver",
.name = "dv1394",
.id_table = dv1394_id_table,
.driver = {
.name = "dv1394",
.bus = &ieee1394_bus_type,
},
};
......@@ -2587,6 +2575,10 @@ static int __init dv1394_init_module(void)
{
int ret;
printk(KERN_WARNING
"WARNING: The dv1394 driver is unsupported and will be removed "
"from Linux soon. Use raw1394 instead.\n");
cdev_init(&dv1394_cdev, &dv1394_fops);
dv1394_cdev.owner = THIS_MODULE;
kobject_set_name(&dv1394_cdev.kobj, "dv1394");
......
......@@ -474,12 +474,10 @@ static struct ieee1394_device_id eth1394_id_table[] = {
MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
static struct hpsb_protocol_driver eth1394_proto_driver = {
.name = "IPv4 over 1394 Driver",
.name = ETH1394_DRIVER_NAME,
.id_table = eth1394_id_table,
.update = eth1394_update,
.driver = {
.name = ETH1394_DRIVER_NAME,
.bus = &ieee1394_bus_type,
.probe = eth1394_probe,
.remove = eth1394_remove,
},
......
......@@ -24,7 +24,6 @@ struct hpsb_address_serve {
/* Only the following structures are of interest to actual highlevel drivers. */
struct hpsb_highlevel {
struct module *owner;
const char *name;
/* Any of the following pointers can legally be NULL, except for
......
......@@ -44,9 +44,10 @@ static void delayed_reset_bus(struct work_struct *work)
CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation);
if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
/* CSR image creation failed, reset generation field and do not
* issue a bus reset. */
CSR_SET_BUS_INFO_GENERATION(host->csr.rom, host->csr.generation);
/* CSR image creation failed.
* Reset generation field and do not issue a bus reset. */
CSR_SET_BUS_INFO_GENERATION(host->csr.rom,
host->csr.generation);
return;
}
......@@ -54,7 +55,8 @@ static void delayed_reset_bus(struct work_struct *work)
host->update_config_rom = 0;
if (host->driver->set_hw_config_rom)
host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
host->driver->set_hw_config_rom(host,
host->csr.rom->bus_info_data);
host->csr.gen_timestamp[host->csr.generation] = jiffies;
hpsb_reset_bus(host, SHORT_RESET);
......@@ -70,7 +72,8 @@ static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
return -1;
}
static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
unsigned long arg)
{
return -1;
}
......@@ -128,10 +131,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
return NULL;
h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
if (!h->csr.rom) {
kfree(h);
return NULL;
}
if (!h->csr.rom)
goto fail;
h->hostdata = h + 1;
h->driver = drv;
......@@ -151,16 +152,15 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
init_timer(&h->timeout);
h->timeout.data = (unsigned long) h;
h->timeout.function = abort_timedouts;
h->timeout_interval = HZ / 20; // 50ms by default
h->timeout_interval = HZ / 20; /* 50ms, half of minimum SPLIT_TIMEOUT */
h->topology_map = h->csr.topology_map + 3;
h->speed_map = (u8 *)(h->csr.speed_map + 2);
mutex_lock(&host_num_alloc);
while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
hostnum++;
mutex_unlock(&host_num_alloc);
h->id = hostnum;
memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
......@@ -171,13 +171,19 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
h->class_dev.class = &hpsb_host_class;
snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id);
device_register(&h->device);
class_device_register(&h->class_dev);
if (device_register(&h->device))
goto fail;
if (class_device_register(&h->class_dev)) {
device_unregister(&h->device);
goto fail;
}
get_device(&h->device);
mutex_unlock(&host_num_alloc);
return h;
fail:
kfree(h);
return NULL;
}
int hpsb_add_host(struct hpsb_host *host)
......@@ -229,7 +235,8 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
/* Wait 60 seconds from the last time this generation number was
* used. */
reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
reset_delay =
(60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
else
/* Wait 1 second in case some other code wants to change the
* Config ROM in the near future. */
......
......@@ -1237,10 +1237,10 @@ EXPORT_SYMBOL(highlevel_remove_host);
/** nodemgr.c **/
EXPORT_SYMBOL(hpsb_node_fill_packet);
EXPORT_SYMBOL(hpsb_node_write);
EXPORT_SYMBOL(hpsb_register_protocol);
EXPORT_SYMBOL(__hpsb_register_protocol);
EXPORT_SYMBOL(hpsb_unregister_protocol);
EXPORT_SYMBOL(ieee1394_bus_type);
#ifdef CONFIG_IEEE1394_EXPORT_FULL_API
EXPORT_SYMBOL(ieee1394_bus_type);
EXPORT_SYMBOL(nodemgr_for_each_host);
#endif
......
......@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/freezer.h>
#include <asm/atomic.h>
......@@ -67,7 +68,7 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
{
quadlet_t q;
u8 i, *speed, old_speed, good_speed;
int ret;
int error;
speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]);
old_speed = *speed;
......@@ -79,9 +80,9 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
* just finished its initialization. */
for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
*speed = i;
ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
&q, sizeof(quadlet_t));
if (ret)
error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
&q, sizeof(quadlet_t));
if (error)
break;
*buffer = q;
good_speed = i;
......@@ -95,19 +96,19 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
return 0;
}
*speed = old_speed;
return ret;
return error;
}
static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
void *buffer, void *__ci)
void *buffer, void *__ci)
{
struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
int i, ret;
int i, error;
for (i = 1; ; i++) {
ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
buffer, length);
if (!ret) {
error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
buffer, length);
if (!error) {
ci->speed_unverified = 0;
break;
}
......@@ -118,14 +119,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
/* The ieee1394_core guessed the node's speed capability from
* the self ID. Check whether a lower speed works. */
if (ci->speed_unverified && length == sizeof(quadlet_t)) {
ret = nodemgr_check_speed(ci, addr, buffer);
if (!ret)
error = nodemgr_check_speed(ci, addr, buffer);
if (!error)
break;
}
if (msleep_interruptible(334))
return -EINTR;
}
return ret;
return error;
}
static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
......@@ -260,9 +261,20 @@ static struct device nodemgr_dev_template_ne = {
.release = nodemgr_release_ne,
};
/* This dummy driver prevents the host devices from being scanned. We have no
* useful drivers for them yet, and there would be a deadlock possible if the
* driver core scans the host device while the host's low-level driver (i.e.
* the host's parent device) is being removed. */
static struct device_driver nodemgr_mid_layer_driver = {
.bus = &ieee1394_bus_type,
.name = "nodemgr",
.owner = THIS_MODULE,
};
struct device nodemgr_dev_template_host = {
.bus = &ieee1394_bus_type,
.release = nodemgr_release_host,
.driver = &nodemgr_mid_layer_driver,
};
......@@ -307,8 +319,8 @@ static ssize_t fw_drv_show_##field (struct device_driver *drv, char *buf) \
return sprintf(buf, format_string, (type)driver->field);\
} \
static struct driver_attribute driver_attr_drv_##field = { \
.attr = {.name = __stringify(field), .mode = S_IRUGO }, \
.show = fw_drv_show_##field, \
.attr = {.name = __stringify(field), .mode = S_IRUGO }, \
.show = fw_drv_show_##field, \
};
......@@ -362,7 +374,7 @@ static ssize_t fw_show_ne_tlabels_mask(struct device *dev,
#endif
spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
return sprintf(buf, "0x%016llx\n", tm);
return sprintf(buf, "0x%016llx\n", (unsigned long long)tm);
}
static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
#endif /* HPSB_DEBUG_TLABELS */
......@@ -374,11 +386,11 @@ static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute
int state = simple_strtoul(buf, NULL, 10);
if (state == 1) {
down_write(&dev->bus->subsys.rwsem);
device_release_driver(dev);
ud->ignore_driver = 1;
up_write(&dev->bus->subsys.rwsem);
} else if (!state)
down_write(&ieee1394_bus_type.subsys.rwsem);
device_release_driver(dev);
up_write(&ieee1394_bus_type.subsys.rwsem);
} else if (state == 0)
ud->ignore_driver = 0;
return count;
......@@ -413,11 +425,14 @@ static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf)
static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node);
static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count)
static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf,
size_t count)
{
int error = 0;
if (simple_strtoul(buf, NULL, 10) == 1)
bus_rescan_devices(&ieee1394_bus_type);
return count;
error = bus_rescan_devices(&ieee1394_bus_type);
return error ? error : count;
}
static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
{
......@@ -433,7 +448,7 @@ static ssize_t fw_set_ignore_drivers(struct bus_type *bus, const char *buf, size
if (state == 1)
ignore_drivers = 1;
else if (!state)
else if (state == 0)
ignore_drivers = 0;
return count;
......@@ -526,7 +541,7 @@ static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
int length = 0;
char *scratch = buf;
driver = container_of(drv, struct hpsb_protocol_driver, driver);
driver = container_of(drv, struct hpsb_protocol_driver, driver);
for (id = driver->id_table; id->match_flags != 0; id++) {
int need_coma = 0;
......@@ -583,7 +598,11 @@ static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
int i;
for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
driver_create_file(drv, fw_drv_attrs[i]);
if (driver_create_file(drv, fw_drv_attrs[i]))
goto fail;
return;
fail:
HPSB_ERR("Failed to add sysfs attribute for driver %s", driver->name);
}
......@@ -603,7 +622,12 @@ static void nodemgr_create_ne_dev_files(struct node_entry *ne)
int i;
for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++)
device_create_file(dev, fw_ne_attrs[i]);
if (device_create_file(dev, fw_ne_attrs[i]))
goto fail;
return;
fail:
HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
(unsigned long long)ne->guid);
}
......@@ -613,11 +637,16 @@ static void nodemgr_create_host_dev_files(struct hpsb_host *host)
int i;
for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++)
device_create_file(dev, fw_host_attrs[i]);
if (device_create_file(dev, fw_host_attrs[i]))
goto fail;
return;
fail:
HPSB_ERR("Failed to add sysfs attribute for host %d", host->id);
}
static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid);
static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
nodeid_t nodeid);
static void nodemgr_update_host_dev_links(struct hpsb_host *host)
{
......@@ -628,12 +657,18 @@ static void nodemgr_update_host_dev_links(struct hpsb_host *host)
sysfs_remove_link(&dev->kobj, "busmgr_id");
sysfs_remove_link(&dev->kobj, "host_id");
if ((ne = find_entry_by_nodeid(host, host->irm_id)))
sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id");
if ((ne = find_entry_by_nodeid(host, host->busmgr_id)))
sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id");
if ((ne = find_entry_by_nodeid(host, host->node_id)))
sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id");
if ((ne = find_entry_by_nodeid(host, host->irm_id)) &&
sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id"))
goto fail;
if ((ne = find_entry_by_nodeid(host, host->busmgr_id)) &&
sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id"))
goto fail;
if ((ne = find_entry_by_nodeid(host, host->node_id)) &&
sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id"))
goto fail;
return;
fail:
HPSB_ERR("Failed to update sysfs attributes for host %d", host->id);
}
static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
......@@ -642,32 +677,39 @@ static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
int i;
for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++)
device_create_file(dev, fw_ud_attrs[i]);
if (device_create_file(dev, fw_ud_attrs[i]))
goto fail;
if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
device_create_file(dev, &dev_attr_ud_specifier_id);
if (device_create_file(dev, &dev_attr_ud_specifier_id))
goto fail;
if (ud->flags & UNIT_DIRECTORY_VERSION)
device_create_file(dev, &dev_attr_ud_version);
if (device_create_file(dev, &dev_attr_ud_version))
goto fail;
if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
device_create_file(dev, &dev_attr_ud_vendor_id);
if (ud->vendor_name_kv)
device_create_file(dev, &dev_attr_ud_vendor_name_kv);
if (device_create_file(dev, &dev_attr_ud_vendor_id))
goto fail;
if (ud->vendor_name_kv &&
device_create_file(dev, &dev_attr_ud_vendor_name_kv))
goto fail;
}
if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
device_create_file(dev, &dev_attr_ud_model_id);
if (ud->model_name_kv)
device_create_file(dev, &dev_attr_ud_model_name_kv);
if (device_create_file(dev, &dev_attr_ud_model_id))
goto fail;
if (ud->model_name_kv &&
device_create_file(dev, &dev_attr_ud_model_name_kv))
goto fail;
}
return;
fail:
HPSB_ERR("Failed to add sysfs attributes for unit %s",
ud->device.bus_id);
}
static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
{
struct hpsb_protocol_driver *driver;
struct unit_directory *ud;
struct hpsb_protocol_driver *driver;
struct unit_directory *ud;
struct ieee1394_device_id *id;
/* We only match unit directories */
......@@ -675,55 +717,77 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
return 0;
ud = container_of(dev, struct unit_directory, device);
driver = container_of(drv, struct hpsb_protocol_driver, driver);
if (ud->ne->in_limbo || ud->ignore_driver)
return 0;
for (id = driver->id_table; id->match_flags != 0; id++) {
if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
id->vendor_id != ud->vendor_id)
continue;
/* We only match drivers of type hpsb_protocol_driver */
if (drv == &nodemgr_mid_layer_driver)
return 0;
if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
id->model_id != ud->model_id)
continue;
driver = container_of(drv, struct hpsb_protocol_driver, driver);
for (id = driver->id_table; id->match_flags != 0; id++) {
if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
id->vendor_id != ud->vendor_id)
continue;
if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
id->specifier_id != ud->specifier_id)
continue;
if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
id->model_id != ud->model_id)
continue;
if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
id->version != ud->version)
continue;
if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
id->specifier_id != ud->specifier_id)
continue;
if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
id->version != ud->version)
continue;
return 1;
}
}
return 0;
}
static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
static void nodemgr_remove_uds(struct node_entry *ne)
{
struct class_device *cdev, *next;
struct unit_directory *ud;
list_for_each_entry_safe(cdev, next, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
if (ud->ne != ne)
continue;
struct class_device *cdev;
struct unit_directory *tmp, *ud;
/* Iteration over nodemgr_ud_class.children has to be protected by
* nodemgr_ud_class.sem, but class_device_unregister() will eventually
* take nodemgr_ud_class.sem too. Therefore pick out one ud at a time,
* release the semaphore, and then unregister the ud. Since this code
* may be called from other contexts besides the knodemgrds, protect the
* gap after release of the semaphore by nodemgr_serialize_remove_uds.
*/
mutex_lock(&nodemgr_serialize_remove_uds);
for (;;) {
ud = NULL;
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
tmp = container_of(cdev, struct unit_directory,
class_dev);
if (tmp->ne == ne) {
ud = tmp;
break;
}
}
up(&nodemgr_ud_class.sem);
if (ud == NULL)
break;
class_device_unregister(&ud->class_dev);
device_unregister(&ud->device);
}
mutex_unlock(&nodemgr_serialize_remove_uds);
}
static void nodemgr_remove_ne(struct node_entry *ne)
{
struct device *dev = &ne->device;
struct device *dev;
dev = get_device(&ne->device);
if (!dev)
......@@ -748,7 +812,7 @@ static int __nodemgr_remove_host_dev(struct device *dev, void *data)
static void nodemgr_remove_host_dev(struct device *dev)
{
device_for_each_child(dev, NULL, __nodemgr_remove_host_dev);
WARN_ON(device_for_each_child(dev, NULL, __nodemgr_remove_host_dev));
sysfs_remove_link(&dev->kobj, "irm_id");
sysfs_remove_link(&dev->kobj, "busmgr_id");
sysfs_remove_link(&dev->kobj, "host_id");
......@@ -762,16 +826,16 @@ static void nodemgr_update_bus_options(struct node_entry *ne)
#endif
quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]);
ne->busopt.irmc = (busoptions >> 31) & 1;
ne->busopt.cmc = (busoptions >> 30) & 1;
ne->busopt.isc = (busoptions >> 29) & 1;
ne->busopt.bmc = (busoptions >> 28) & 1;
ne->busopt.pmc = (busoptions >> 27) & 1;
ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff;
ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1);
ne->busopt.irmc = (busoptions >> 31) & 1;
ne->busopt.cmc = (busoptions >> 30) & 1;
ne->busopt.isc = (busoptions >> 29) & 1;
ne->busopt.bmc = (busoptions >> 28) & 1;
ne->busopt.pmc = (busoptions >> 27) & 1;
ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff;
ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1);
ne->busopt.max_rom = (busoptions >> 8) & 0x3;
ne->busopt.generation = (busoptions >> 4) & 0xf;
ne->busopt.lnkspd = busoptions & 0x7;
ne->busopt.generation = (busoptions >> 4) & 0xf;
ne->busopt.lnkspd = busoptions & 0x7;
HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
"cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
......@@ -792,7 +856,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
ne = kzalloc(sizeof(*ne), GFP_KERNEL);
if (!ne)
return NULL;
goto fail_alloc;
ne->host = host;
ne->nodeid = nodeid;
......@@ -815,12 +879,15 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx",
(unsigned long long)(ne->guid));
device_register(&ne->device);
class_device_register(&ne->class_dev);
if (device_register(&ne->device))
goto fail_devreg;
if (class_device_register(&ne->class_dev))
goto fail_classdevreg;
get_device(&ne->device);
if (ne->guid_vendor_oui)
device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui);
if (ne->guid_vendor_oui &&
device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui))
goto fail_addoiu;
nodemgr_create_ne_dev_files(ne);
nodemgr_update_bus_options(ne);
......@@ -830,17 +897,28 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
return ne;
fail_addoiu:
put_device(&ne->device);
fail_classdevreg:
device_unregister(&ne->device);
fail_devreg:
kfree(ne);
fail_alloc:
HPSB_ERR("Failed to create node ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
return NULL;
}
static struct node_entry *find_entry_by_guid(u64 guid)
{
struct class *class = &nodemgr_ne_class;
struct class_device *cdev;
struct node_entry *ne, *ret_ne = NULL;
down_read(&class->subsys.rwsem);
list_for_each_entry(cdev, &class->children, node) {
down(&nodemgr_ne_class.sem);
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
if (ne->guid == guid) {
......@@ -848,20 +926,20 @@ static struct node_entry *find_entry_by_guid(u64 guid)
break;
}
}
up_read(&class->subsys.rwsem);
up(&nodemgr_ne_class.sem);
return ret_ne;
return ret_ne;
}
static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid)
static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
nodeid_t nodeid)
{
struct class *class = &nodemgr_ne_class;
struct class_device *cdev;
struct node_entry *ne, *ret_ne = NULL;
down_read(&class->subsys.rwsem);
list_for_each_entry(cdev, &class->children, node) {
down(&nodemgr_ne_class.sem);
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
if (ne->host == host && ne->nodeid == nodeid) {
......@@ -869,7 +947,7 @@ static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t
break;
}
}
up_read(&class->subsys.rwsem);
up(&nodemgr_ne_class.sem);
return ret_ne;
}
......@@ -891,13 +969,25 @@ static void nodemgr_register_device(struct node_entry *ne,
snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u",
ne->device.bus_id, ud->id);
device_register(&ud->device);
class_device_register(&ud->class_dev);
if (device_register(&ud->device))
goto fail_devreg;
if (class_device_register(&ud->class_dev))
goto fail_classdevreg;
get_device(&ud->device);
if (ud->vendor_oui)
device_create_file(&ud->device, &dev_attr_ud_vendor_oui);
if (ud->vendor_oui &&
device_create_file(&ud->device, &dev_attr_ud_vendor_oui))
goto fail_addoui;
nodemgr_create_ud_dev_files(ud);
return;
fail_addoui:
put_device(&ud->device);
fail_classdevreg:
device_unregister(&ud->device);
fail_devreg:
HPSB_ERR("Failed to create unit %s", ud->device.bus_id);
}
......@@ -977,10 +1067,9 @@ static struct unit_directory *nodemgr_process_unit_directory
/* Logical Unit Number */
if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
ud_child = kmalloc(sizeof(*ud_child), GFP_KERNEL);
ud_child = kmemdup(ud, sizeof(*ud_child), GFP_KERNEL);
if (!ud_child)
goto unit_directory_error;
memcpy(ud_child, ud, sizeof(*ud_child));
nodemgr_register_device(ne, ud_child, &ne->device);
ud_child = NULL;
......@@ -1094,10 +1183,16 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
last_key_id = kv->key.id;
}
if (ne->vendor_oui)
device_create_file(&ne->device, &dev_attr_ne_vendor_oui);
if (ne->vendor_name_kv)
device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv);
if (ne->vendor_oui &&
device_create_file(&ne->device, &dev_attr_ne_vendor_oui))
goto fail;
if (ne->vendor_name_kv &&
device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv))
goto fail;
return;
fail:
HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
(unsigned long long)ne->guid);
}
#ifdef CONFIG_HOTPLUG
......@@ -1161,16 +1256,20 @@ static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
#endif /* CONFIG_HOTPLUG */
int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
int __hpsb_register_protocol(struct hpsb_protocol_driver *drv,
struct module *owner)
{
int ret;
int error;
/* This will cause a probe for devices */
ret = driver_register(&driver->driver);
if (!ret)
nodemgr_create_drv_files(driver);
drv->driver.bus = &ieee1394_bus_type;
drv->driver.owner = owner;
drv->driver.name = drv->name;
return ret;
/* This will cause a probe for devices */
error = driver_register(&drv->driver);
if (!error)
nodemgr_create_drv_files(drv);
return error;
}
void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
......@@ -1298,26 +1397,25 @@ static void nodemgr_node_scan_one(struct host_info *hi,
static void nodemgr_node_scan(struct host_info *hi, int generation)
{
int count;
struct hpsb_host *host = hi->host;
struct selfid *sid = (struct selfid *)host->topology_map;
nodeid_t nodeid = LOCAL_BUS;
int count;
struct hpsb_host *host = hi->host;
struct selfid *sid = (struct selfid *)host->topology_map;
nodeid_t nodeid = LOCAL_BUS;
/* Scan each node on the bus */
for (count = host->selfid_count; count; count--, sid++) {
if (sid->extended)
continue;
/* Scan each node on the bus */
for (count = host->selfid_count; count; count--, sid++) {
if (sid->extended)
continue;
if (!sid->link_active) {
nodeid++;
continue;
}
nodemgr_node_scan_one(hi, nodeid++, generation);
}
if (!sid->link_active) {
nodeid++;
continue;
}
nodemgr_node_scan_one(hi, nodeid++, generation);
}
}
/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */
static void nodemgr_suspend_ne(struct node_entry *ne)
{
struct class_device *cdev;
......@@ -1327,21 +1425,22 @@ static void nodemgr_suspend_ne(struct node_entry *ne)
NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
ne->in_limbo = 1;
device_create_file(&ne->device, &dev_attr_ne_in_limbo);
WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
down_write(&ne->device.bus->subsys.rwsem);
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
if (ud->ne != ne)
continue;
down_write(&ieee1394_bus_type.subsys.rwsem);
if (ud->device.driver &&
(!ud->device.driver->suspend ||
ud->device.driver->suspend(&ud->device, PMSG_SUSPEND)))
device_release_driver(&ud->device);
up_write(&ieee1394_bus_type.subsys.rwsem);
}
up_write(&ne->device.bus->subsys.rwsem);
up(&nodemgr_ud_class.sem);
}
......@@ -1353,45 +1452,47 @@ static void nodemgr_resume_ne(struct node_entry *ne)
ne->in_limbo = 0;
device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
down_read(&nodemgr_ud_class.subsys.rwsem);
down_read(&ne->device.bus->subsys.rwsem);
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
if (ud->ne != ne)
continue;
down_read(&ieee1394_bus_type.subsys.rwsem);
if (ud->device.driver && ud->device.driver->resume)
ud->device.driver->resume(&ud->device);
up_read(&ieee1394_bus_type.subsys.rwsem);
}
up_read(&ne->device.bus->subsys.rwsem);
up_read(&nodemgr_ud_class.subsys.rwsem);
up(&nodemgr_ud_class.sem);
HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
}
/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */
static void nodemgr_update_pdrv(struct node_entry *ne)
{
struct unit_directory *ud;
struct hpsb_protocol_driver *pdrv;
struct class_device *cdev;
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
if (ud->ne != ne || !ud->device.driver)
if (ud->ne != ne)
continue;
pdrv = container_of(ud->device.driver, struct hpsb_protocol_driver, driver);
if (pdrv->update && pdrv->update(ud)) {
down_write(&ud->device.bus->subsys.rwsem);
device_release_driver(&ud->device);
up_write(&ud->device.bus->subsys.rwsem);
down_write(&ieee1394_bus_type.subsys.rwsem);
if (ud->device.driver) {
pdrv = container_of(ud->device.driver,
struct hpsb_protocol_driver,
driver);
if (pdrv->update && pdrv->update(ud))
device_release_driver(&ud->device);
}
up_write(&ieee1394_bus_type.subsys.rwsem);
}
up(&nodemgr_ud_class.sem);
}
......@@ -1405,7 +1506,7 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
{
const u64 bc_addr = (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL);
quadlet_t bc_remote, bc_local;
int ret;
int error;
if (!ne->host->is_irm || ne->generation != generation ||
ne->nodeid == ne->host->node_id)
......@@ -1414,16 +1515,14 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
bc_local = cpu_to_be32(ne->host->csr.broadcast_channel);
/* Check if the register is implemented and 1394a compliant. */
ret = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote,
sizeof(bc_remote));
if (!ret && bc_remote & cpu_to_be32(0x80000000) &&
error = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote,
sizeof(bc_remote));
if (!error && bc_remote & cpu_to_be32(0x80000000) &&
bc_remote != bc_local)
hpsb_node_write(ne, bc_addr, &bc_local, sizeof(bc_local));
}
/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader because the
* calls to nodemgr_update_pdrv() and nodemgr_suspend_ne() here require it. */
static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation)
{
struct device *dev;
......@@ -1456,7 +1555,6 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
static void nodemgr_node_probe(struct host_info *hi, int generation)
{
struct hpsb_host *host = hi->host;
struct class *class = &nodemgr_ne_class;
struct class_device *cdev;
struct node_entry *ne;
......@@ -1469,18 +1567,18 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
* while probes are time-consuming. (Well, those probes need some
* improvement...) */
down_read(&class->subsys.rwsem);
list_for_each_entry(cdev, &class->children, node) {
down(&nodemgr_ne_class.sem);
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
if (!ne->needs_probe)
nodemgr_probe_ne(hi, ne, generation);
}
list_for_each_entry(cdev, &class->children, node) {
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
if (ne->needs_probe)
nodemgr_probe_ne(hi, ne, generation);
}
up_read(&class->subsys.rwsem);
up(&nodemgr_ne_class.sem);
/* If we had a bus reset while we were scanning the bus, it is
......@@ -1498,15 +1596,14 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
* just removed. */
if (generation == get_hpsb_generation(host))
bus_rescan_devices(&ieee1394_bus_type);
return;
if (bus_rescan_devices(&ieee1394_bus_type))
HPSB_DEBUG("bus_rescan_devices had an error");
}
static int nodemgr_send_resume_packet(struct hpsb_host *host)
{
struct hpsb_packet *packet;
int ret = 1;
int error = -ENOMEM;
packet = hpsb_make_phypacket(host,
EXTPHYPACKET_TYPE_RESUME |
......@@ -1514,12 +1611,12 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host)
if (packet) {
packet->no_waiter = 1;
packet->generation = get_hpsb_generation(host);
ret = hpsb_send_packet(packet);
error = hpsb_send_packet(packet);
}
if (ret)
if (error)
HPSB_WARN("fw-host%d: Failed to broadcast resume packet",
host->id);
return ret;
return error;
}
/* Perform a few high-level IRM responsibilities. */
......@@ -1692,19 +1789,18 @@ static int nodemgr_host_thread(void *__hi)
int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
{
struct class *class = &hpsb_host_class;
struct class_device *cdev;
struct hpsb_host *host;
int error = 0;
down_read(&class->subsys.rwsem);
list_for_each_entry(cdev, &class->children, node) {
down(&hpsb_host_class.sem);
list_for_each_entry(cdev, &hpsb_host_class.children, node) {
host = container_of(cdev, struct hpsb_host, class_dev);
if ((error = cb(host, __data)))
break;
}
up_read(&class->subsys.rwsem);
up(&hpsb_host_class.sem);
return error;
}
......@@ -1726,10 +1822,10 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
{
pkt->host = ne->host;
pkt->generation = ne->generation;
pkt->host = ne->host;
pkt->generation = ne->generation;
barrier();
pkt->node_id = ne->nodeid;
pkt->node_id = ne->nodeid;
}
int hpsb_node_write(struct node_entry *ne, u64 addr,
......@@ -1789,26 +1885,25 @@ static struct hpsb_highlevel nodemgr_highlevel = {
int init_ieee1394_nodemgr(void)
{
int ret;
int error;
ret = class_register(&nodemgr_ne_class);
if (ret < 0)
return ret;
error = class_register(&nodemgr_ne_class);
if (error)
return error;
ret = class_register(&nodemgr_ud_class);
if (ret < 0) {
error = class_register(&nodemgr_ud_class);
if (error) {
class_unregister(&nodemgr_ne_class);
return ret;
return error;
}
error = driver_register(&nodemgr_mid_layer_driver);
hpsb_register_highlevel(&nodemgr_highlevel);
return 0;
}
void cleanup_ieee1394_nodemgr(void)
{
hpsb_unregister_highlevel(&nodemgr_highlevel);
hpsb_unregister_highlevel(&nodemgr_highlevel);
class_unregister(&nodemgr_ud_class);
class_unregister(&nodemgr_ne_class);
......
......@@ -144,7 +144,12 @@ struct hpsb_protocol_driver {
struct device_driver driver;
};
int hpsb_register_protocol(struct hpsb_protocol_driver *driver);
int __hpsb_register_protocol(struct hpsb_protocol_driver *, struct module *);
static inline int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
{
return __hpsb_register_protocol(driver, THIS_MODULE);
}
void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver);
static inline int hpsb_node_entry_valid(struct node_entry *ne)
......
......@@ -468,7 +468,6 @@ static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
/* Global initialization */
static void ohci_initialize(struct ti_ohci *ohci)
{
char irq_buf[16];
quadlet_t buf;
int num_ports, i;
......@@ -586,11 +585,10 @@ static void ohci_initialize(struct ti_ohci *ohci)
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
buf = reg_read(ohci, OHCI1394_Version);
sprintf (irq_buf, "%d", ohci->dev->irq);
PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
"MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
(unsigned long long)pci_resource_start(ohci->dev, 0),
(unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
ohci->max_packet_size,
......@@ -3217,6 +3215,18 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
struct ti_ohci *ohci; /* shortcut to currently handled device */
resource_size_t ohci_base;
#ifdef CONFIG_PPC_PMAC
/* Necessary on some machines if ohci1394 was loaded/ unloaded before */
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
}
#endif /* CONFIG_PPC_PMAC */
if (pci_enable_device(dev))
FAIL(-ENXIO, "Failed to enable OHCI hardware");
pci_set_master(dev);
......@@ -3505,17 +3515,14 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
#endif
#ifdef CONFIG_PPC_PMAC
/* On UniNorth, power down the cable and turn off the chip
* clock when the module is removed to save power on
* laptops. Turning it back ON is done by the arch code when
* pci_enable_device() is called */
{
struct device_node* of_node;
/* On UniNorth, power down the cable and turn off the chip clock
* to save power on laptops */
if (machine_is(powermac)) {
struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
of_node = pci_device_to_OF_node(ohci->dev);
if (of_node) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
}
}
#endif /* CONFIG_PPC_PMAC */
......@@ -3529,59 +3536,102 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM
static int ohci1394_pci_resume (struct pci_dev *pdev)
{
/* PowerMac resume code comes first */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *of_node;
/* Re-enable 1394 */
of_node = pci_device_to_OF_node (pdev);
if (of_node)
pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
}
#endif /* CONFIG_PPC_PMAC */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return pci_enable_device(pdev);
}
static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
int err;
struct ti_ohci *ohci = pci_get_drvdata(pdev);
printk(KERN_INFO "%s does not fully support suspend and resume yet\n",
OHCI1394_DRIVER_NAME);
if (!ohci) {
printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
OHCI1394_DRIVER_NAME);
return -ENXIO;
}
DBGMSG("suspend called");
/* Clear the async DMA contexts and stop using the controller */
hpsb_bus_reset(ohci->host);
/* See ohci1394_pci_remove() for comments on this sequence */
reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
reg_write(ohci, OHCI1394_BusOptions,
(reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
0x00ff0000);
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
ohci_soft_reset(ohci);
err = pci_save_state(pdev);
if (err) {
printk(KERN_ERR "%s: pci_save_state failed with %d\n",
OHCI1394_DRIVER_NAME, err);
PRINT(KERN_ERR, "pci_save_state failed with %d", err);
return err;
}
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
#ifdef OHCI1394_DEBUG
if (err)
printk(KERN_DEBUG "%s: pci_set_power_state failed with %d\n",
OHCI1394_DRIVER_NAME, err);
#endif /* OHCI1394_DEBUG */
DBGMSG("pci_set_power_state failed with %d", err);
/* PowerMac suspend code comes last */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *of_node;
struct device_node *ofn = pci_device_to_OF_node(pdev);
/* Disable 1394 */
of_node = pci_device_to_OF_node (pdev);
if (of_node)
pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
if (ofn)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
}
#endif /* CONFIG_PPC_PMAC */
return 0;
}
static int ohci1394_pci_resume(struct pci_dev *pdev)
{
int err;
struct ti_ohci *ohci = pci_get_drvdata(pdev);
if (!ohci) {
printk(KERN_ERR "%s: tried to resume nonexisting host\n",
OHCI1394_DRIVER_NAME);
return -ENXIO;
}
DBGMSG("resume called");
/* PowerMac resume code comes first */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(pdev);
if (ofn)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
#endif /* CONFIG_PPC_PMAC */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
return err;
}
/* See ohci1394_pci_probe() for comments on this sequence */
ohci_soft_reset(ohci);
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
mdelay(50);
ohci_initialize(ohci);
return 0;
}
#endif /* CONFIG_PM */
#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
......
......@@ -1428,10 +1428,9 @@ static int __devinit add_card(struct pci_dev *dev,
struct i2c_algo_bit_data i2c_adapter_data;
error = -ENOMEM;
i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL);
i2c_ad = kmemdup(&bit_ops, sizeof(*i2c_ad), GFP_KERNEL);
if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
i2c_adapter_data = bit_data;
i2c_ad->algo_data = &i2c_adapter_data;
i2c_adapter_data.data = lynx;
......
......@@ -27,12 +27,12 @@ struct file_info {
struct hpsb_host *host;
struct list_head req_pending;
struct list_head req_complete;
struct list_head req_pending; /* protected by reqlists_lock */
struct list_head req_complete; /* protected by reqlists_lock */
spinlock_t reqlists_lock;
wait_queue_head_t wait_complete;
struct list_head addr_list;
struct list_head addr_list; /* protected by host_info_lock */
u8 __user *fcp_buffer;
......@@ -63,7 +63,7 @@ struct arm_addr {
u8 client_transactions;
u64 recvb;
u16 rec_length;
u8 *addr_space_buffer; /* accessed by read/write/lock */
u8 *addr_space_buffer; /* accessed by read/write/lock requests */
};
struct pending_request {
......@@ -79,7 +79,7 @@ struct pending_request {
struct host_info {
struct list_head list;
struct hpsb_host *host;
struct list_head file_info_list;
struct list_head file_info_list; /* protected by host_info_lock */
};
#endif /* IEEE1394_RAW1394_PRIVATE_H */
......@@ -99,6 +99,21 @@ static struct hpsb_address_ops arm_ops = {
static void queue_complete_cb(struct pending_request *req);
#include <asm/current.h>
static void print_old_iso_deprecation(void)
{
static pid_t p;
if (p == current->pid)
return;
p = current->pid;
printk(KERN_WARNING "raw1394: WARNING - Program \"%s\" uses unsupported"
" isochronous request types which will be removed in a next"
" kernel release\n", current->comm);
printk(KERN_WARNING "raw1394: Update your software to use libraw1394's"
" newer interface\n");
}
static struct pending_request *__alloc_pending_request(gfp_t flags)
{
struct pending_request *req;
......@@ -2292,6 +2307,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
return sizeof(struct raw1394_request);
case RAW1394_REQ_ISO_SEND:
print_old_iso_deprecation();
return handle_iso_send(fi, req, node);
case RAW1394_REQ_ARM_REGISTER:
......@@ -2310,6 +2326,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
return reset_notification(fi, req);
case RAW1394_REQ_ISO_LISTEN:
print_old_iso_deprecation();
handle_iso_listen(fi, req);
return sizeof(struct raw1394_request);
......@@ -2970,12 +2987,8 @@ static struct ieee1394_device_id raw1394_id_table[] = {
MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
static struct hpsb_protocol_driver raw1394_driver = {
.name = "raw1394 Driver",
.name = "raw1394",
.id_table = raw1394_id_table,
.driver = {
.name = "raw1394",
.bus = &ieee1394_bus_type,
},
};
/******************************************************************************/
......
......@@ -29,13 +29,26 @@
* driver. It also registers as a SCSI lower-level driver in order to accept
* SCSI commands for transport using SBP-2.
*
* You may access any attached SBP-2 storage devices as if they were SCSI
* devices (e.g. mount /dev/sda1, fdisk, mkfs, etc.).
* You may access any attached SBP-2 (usually storage devices) as regular
* SCSI devices. E.g. mount /dev/sda1, fdisk, mkfs, etc..
*
* Current Issues:
* See http://www.t10.org/drafts.htm#sbp2 for the final draft of the SBP-2
* specification and for where to purchase the official standard.
*
* - Error Handling: SCSI aborts and bus reset requests are handled somewhat
* but the code needs additional debugging.
* TODO:
* - look into possible improvements of the SCSI error handlers
* - handle Unit_Characteristics.mgt_ORB_timeout and .ORB_size
* - handle Logical_Unit_Number.ordered
* - handle src == 1 in status blocks
* - reimplement the DMA mapping in absence of physical DMA so that
* bus_to_virt is no longer required
* - debug the handling of absent physical DMA
* - replace CONFIG_IEEE1394_SBP2_PHYS_DMA by automatic detection
* (this is easy but depends on the previous two TODO items)
* - make the parameter serialize_io configurable per device
* - move all requests to fetch agent registers into non-atomic context,
* replace all usages of sbp2util_node_write_no_wait by true transactions
* Grep for inline FIXME comments below.
*/
#include <linux/blkdev.h>
......@@ -49,7 +62,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
......@@ -98,20 +110,20 @@
* (probably due to PCI latency/throughput issues with the part). You can
* bump down the speed if you are running into problems.
*/
static int max_speed = IEEE1394_SPEED_MAX;
module_param(max_speed, int, 0644);
MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)");
static int sbp2_max_speed = IEEE1394_SPEED_MAX;
module_param_named(max_speed, sbp2_max_speed, int, 0644);
MODULE_PARM_DESC(max_speed, "Force max speed "
"(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)");
/*
* Set serialize_io to 1 if you'd like only one scsi command sent
* down to us at a time (debugging). This might be necessary for very
* badly behaved sbp2 devices.
*
* TODO: Make this configurable per device.
*/
static int serialize_io = 1;
module_param(serialize_io, int, 0444);
MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)");
static int sbp2_serialize_io = 1;
module_param_named(serialize_io, sbp2_serialize_io, int, 0444);
MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers "
"(default = 1, faster = 0)");
/*
* Bump up max_sectors if you'd like to support very large sized
......@@ -121,10 +133,10 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
* the Oxsemi sbp2 chipsets have no problems supporting very large
* transfer sizes.
*/
static int max_sectors = SBP2_MAX_SECTORS;
module_param(max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
__stringify(SBP2_MAX_SECTORS) ")");
static int sbp2_max_sectors = SBP2_MAX_SECTORS;
module_param_named(max_sectors, sbp2_max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
"(default = " __stringify(SBP2_MAX_SECTORS) ")");
/*
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
......@@ -139,9 +151,10 @@ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
* concurrent logins. Depending on firmware, four or two concurrent logins
* are possible on OXFW911 and newer Oxsemi bridges.
*/
static int exclusive_login = 1;
module_param(exclusive_login, int, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
static int sbp2_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_exclusive_login, int, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = 1)");
/*
* If any of the following workarounds is required for your device to work,
......@@ -179,123 +192,123 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
", or a combination)");
/*
* Export information about protocols/devices supported by this driver.
*/
static struct ieee1394_device_id sbp2_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
.version = SBP2_SW_VERSION_ENTRY & 0xffffff},
{}
};
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
/*
* Debug levels, configured via kernel config, or enable here.
*/
#define CONFIG_IEEE1394_SBP2_DEBUG 0
/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
/* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
/* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
#define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
static u32 global_outstanding_command_orbs = 0;
#define outstanding_orb_incr global_outstanding_command_orbs++
#define outstanding_orb_decr global_outstanding_command_orbs--
#else
#define SBP2_ORB_DEBUG(fmt, args...) do {} while (0)
#define outstanding_orb_incr do {} while (0)
#define outstanding_orb_decr do {} while (0)
#endif
#ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
#define SBP2_DMA_ALLOC(fmt, args...) \
HPSB_ERR("sbp2(%s)alloc(%d): "fmt, __FUNCTION__, \
++global_outstanding_dmas, ## args)
#define SBP2_DMA_FREE(fmt, args...) \
HPSB_ERR("sbp2(%s)free(%d): "fmt, __FUNCTION__, \
--global_outstanding_dmas, ## args)
static u32 global_outstanding_dmas = 0;
#else
#define SBP2_DMA_ALLOC(fmt, args...) do {} while (0)
#define SBP2_DMA_FREE(fmt, args...) do {} while (0)
#endif
#if CONFIG_IEEE1394_SBP2_DEBUG >= 2
#define SBP2_DEBUG(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
#define SBP2_INFO(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
#elif CONFIG_IEEE1394_SBP2_DEBUG == 1
#define SBP2_DEBUG(fmt, args...) HPSB_DEBUG("sbp2: "fmt, ## args)
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
#else
#define SBP2_DEBUG(fmt, args...) do {} while (0)
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
#endif
#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
#define SBP2_DEBUG_ENTER() SBP2_DEBUG("%s", __FUNCTION__)
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
/*
* Globals
*/
static void sbp2scsi_complete_all_commands(struct sbp2_lu *, u32);
static void sbp2scsi_complete_command(struct sbp2_lu *, u32, struct scsi_cmnd *,
void (*)(struct scsi_cmnd *));
static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *);
static int sbp2_start_device(struct sbp2_lu *);
static void sbp2_remove_device(struct sbp2_lu *);
static int sbp2_login_device(struct sbp2_lu *);
static int sbp2_reconnect_device(struct sbp2_lu *);
static int sbp2_logout_device(struct sbp2_lu *);
static void sbp2_host_reset(struct hpsb_host *);
static int sbp2_handle_status_write(struct hpsb_host *, int, int, quadlet_t *,
u64, size_t, u16);
static int sbp2_agent_reset(struct sbp2_lu *, int);
static void sbp2_parse_unit_directory(struct sbp2_lu *,
struct unit_directory *);
static int sbp2_set_busy_timeout(struct sbp2_lu *);
static int sbp2_max_speed_and_size(struct sbp2_lu *);
static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
u32 status);
static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
u32 scsi_status, struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *));
static struct scsi_host_template scsi_driver_template;
static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
static void sbp2_host_reset(struct hpsb_host *host);
static int sbp2_probe(struct device *dev);
static int sbp2_remove(struct device *dev);
static int sbp2_update(struct unit_directory *ud);
static struct hpsb_highlevel sbp2_highlevel = {
.name = SBP2_DEVICE_NAME,
.host_reset = sbp2_host_reset,
.name = SBP2_DEVICE_NAME,
.host_reset = sbp2_host_reset,
};
static struct hpsb_address_ops sbp2_ops = {
.write = sbp2_handle_status_write
.write = sbp2_handle_status_write
};
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
u64, size_t, u16);
static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
size_t, u16);
static struct hpsb_address_ops sbp2_physdma_ops = {
.read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write,
.read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write,
};
#endif
/*
* Interface to driver core and IEEE 1394 core
*/
static struct ieee1394_device_id sbp2_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
.version = SBP2_SW_VERSION_ENTRY & 0xffffff},
{}
};
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
static int sbp2_probe(struct device *);
static int sbp2_remove(struct device *);
static int sbp2_update(struct unit_directory *);
static struct hpsb_protocol_driver sbp2_driver = {
.name = "SBP2 Driver",
.name = SBP2_DEVICE_NAME,
.id_table = sbp2_id_table,
.update = sbp2_update,
.driver = {
.name = SBP2_DEVICE_NAME,
.bus = &ieee1394_bus_type,
.probe = sbp2_probe,
.remove = sbp2_remove,
},
};
/*
* Interface to SCSI core
*/
static int sbp2scsi_queuecommand(struct scsi_cmnd *,
void (*)(struct scsi_cmnd *));
static int sbp2scsi_abort(struct scsi_cmnd *);
static int sbp2scsi_reset(struct scsi_cmnd *);
static int sbp2scsi_slave_alloc(struct scsi_device *);
static int sbp2scsi_slave_configure(struct scsi_device *);
static void sbp2scsi_slave_destroy(struct scsi_device *);
static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *,
struct device_attribute *, char *);
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
&dev_attr_ieee1394_id,
NULL
};
static struct scsi_host_template sbp2_shost_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
.proc_name = SBP2_DEVICE_NAME,
.queuecommand = sbp2scsi_queuecommand,
.eh_abort_handler = sbp2scsi_abort,
.eh_device_reset_handler = sbp2scsi_reset,
.slave_alloc = sbp2scsi_slave_alloc,
.slave_configure = sbp2scsi_slave_configure,
.slave_destroy = sbp2scsi_slave_destroy,
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.cmd_per_lun = SBP2_MAX_CMDS,
.can_queue = SBP2_MAX_CMDS,
.emulated = 1,
.sdev_attrs = sbp2_sysfs_sdev_attrs,
};
/*
* List of devices with known bugs.
*
......@@ -363,8 +376,6 @@ static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
for (length = (length >> 2); length--; )
temp[length] = be32_to_cpu(temp[length]);
return;
}
/*
......@@ -376,8 +387,6 @@ static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
for (length = (length >> 2); length--; )
temp[length] = cpu_to_be32(temp[length]);
return;
}
#else /* BIG_ENDIAN */
/* Why waste the cpu cycles? */
......@@ -385,344 +394,246 @@ static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
#define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0)
#endif
#ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
/*
* Debug packet dump routine. Length is in bytes.
*/
static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
u32 dump_phys_addr)
{
int i;
unsigned char *dump = buffer;
if (!dump || !length || !dump_name)
return;
if (dump_phys_addr)
printk("[%s, 0x%x]", dump_name, dump_phys_addr);
else
printk("[%s]", dump_name);
for (i = 0; i < length; i++) {
if (i > 0x3f) {
printk("\n ...");
break;
}
if ((i & 0x3) == 0)
printk(" ");
if ((i & 0xf) == 0)
printk("\n ");
printk("%02x ", (int)dump[i]);
}
printk("\n");
return;
}
#else
#define sbp2util_packet_dump(w,x,y,z) do {} while (0)
#endif
static DECLARE_WAIT_QUEUE_HEAD(access_wq);
static DECLARE_WAIT_QUEUE_HEAD(sbp2_access_wq);
/*
* Waits for completion of an SBP-2 access request.
* Returns nonzero if timed out or prematurely interrupted.
*/
static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id,
int timeout)
static int sbp2util_access_timeout(struct sbp2_lu *lu, int timeout)
{
long leftover = wait_event_interruptible_timeout(
access_wq, scsi_id->access_complete, timeout);
long leftover;
scsi_id->access_complete = 0;
leftover = wait_event_interruptible_timeout(
sbp2_access_wq, lu->access_complete, timeout);
lu->access_complete = 0;
return leftover <= 0;
}
/* Frees an allocated packet */
static void sbp2_free_packet(struct hpsb_packet *packet)
static void sbp2_free_packet(void *packet)
{
hpsb_free_tlabel(packet);
hpsb_free_packet(packet);
}
/* This is much like hpsb_node_write(), except it ignores the response
* subaction and returns immediately. Can be used from interrupts.
/*
* This is much like hpsb_node_write(), except it ignores the response
* subaction and returns immediately. Can be used from atomic context.
*/
static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length)
quadlet_t *buf, size_t len)
{
struct hpsb_packet *packet;
packet = hpsb_make_writepacket(ne->host, ne->nodeid,
addr, buffer, length);
packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buf, len);
if (!packet)
return -ENOMEM;
hpsb_set_packet_complete_task(packet,
(void (*)(void *))sbp2_free_packet,
packet);
hpsb_set_packet_complete_task(packet, sbp2_free_packet, packet);
hpsb_node_fill_packet(ne, packet);
if (hpsb_send_packet(packet) < 0) {
sbp2_free_packet(packet);
return -EIO;
}
return 0;
}
static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
u64 offset, quadlet_t *data, size_t len)
static void sbp2util_notify_fetch_agent(struct sbp2_lu *lu, u64 offset,
quadlet_t *data, size_t len)
{
/*
* There is a small window after a bus reset within which the node
* entry's generation is current but the reconnect wasn't completed.
*/
if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET))
/* There is a small window after a bus reset within which the node
* entry's generation is current but the reconnect wasn't completed. */
if (unlikely(atomic_read(&lu->state) == SBP2LU_STATE_IN_RESET))
return;
if (hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr + offset,
if (hpsb_node_write(lu->ne, lu->command_block_agent_addr + offset,
data, len))
SBP2_ERR("sbp2util_notify_fetch_agent failed.");
/*
* Now accept new SCSI commands, unless a bus reset happended during
* hpsb_node_write.
*/
if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET))
scsi_unblock_requests(scsi_id->scsi_host);
/* Now accept new SCSI commands, unless a bus reset happended during
* hpsb_node_write. */
if (likely(atomic_read(&lu->state) != SBP2LU_STATE_IN_RESET))
scsi_unblock_requests(lu->shost);
}
static void sbp2util_write_orb_pointer(struct work_struct *work)
{
struct scsi_id_instance_data *scsi_id =
container_of(work, struct scsi_id_instance_data,
protocol_work.work);
struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
quadlet_t data[2];
data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
data[1] = scsi_id->last_orb_dma;
data[0] = ORB_SET_NODE_ID(lu->hi->host->node_id);
data[1] = lu->last_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
sbp2util_notify_fetch_agent(lu, SBP2_ORB_POINTER_OFFSET, data, 8);
}
static void sbp2util_write_doorbell(struct work_struct *work)
{
struct scsi_id_instance_data *scsi_id =
container_of(work, struct scsi_id_instance_data,
protocol_work.work);
sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
sbp2util_notify_fetch_agent(lu, SBP2_DOORBELL_OFFSET, NULL, 4);
}
/*
* This function is called to create a pool of command orbs used for
* command processing. It is called when a new sbp2 device is detected.
*/
static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
int i;
unsigned long flags, orbs;
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
for (i = 0; i < orbs; i++) {
command = kzalloc(sizeof(*command), GFP_ATOMIC);
if (!command) {
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock,
flags);
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd) {
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return -ENOMEM;
}
command->command_orb_dma =
pci_map_single(hi->host->pdev, &command->command_orb,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
SBP2_DMA_ALLOC("single command orb DMA");
command->sge_dma =
pci_map_single(hi->host->pdev,
&command->scatter_gather_element,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
SBP2_DMA_ALLOC("scatter_gather_element");
INIT_LIST_HEAD(&command->list);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
cmd->command_orb_dma = dma_map_single(&hi->host->device,
&cmd->command_orb,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
cmd->sge_dma = dma_map_single(&hi->host->device,
&cmd->scatter_gather_element,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
INIT_LIST_HEAD(&cmd->list);
list_add_tail(&cmd->list, &lu->cmd_orb_completed);
}
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return 0;
}
/*
* This function is called to delete a pool of command orbs.
*/
static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
{
struct hpsb_host *host = scsi_id->hi->host;
struct hpsb_host *host = lu->hi->host;
struct list_head *lh, *next;
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
command = list_entry(lh, struct sbp2_command_info, list);
/* Release our generic DMA's */
pci_unmap_single(host->pdev, command->command_orb_dma,
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
if (!list_empty(&lu->cmd_orb_completed))
list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
cmd = list_entry(lh, struct sbp2_command_info, list);
dma_unmap_single(&host->device, cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
SBP2_DMA_FREE("single command orb DMA");
pci_unmap_single(host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
SBP2_DMA_FREE("scatter_gather_element");
kfree(command);
DMA_TO_DEVICE);
dma_unmap_single(&host->device, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
kfree(cmd);
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return;
}
/*
* This function finds the sbp2_command for a given outstanding command
* orb.Only looks at the inuse list.
* Finds the sbp2_command for a given outstanding command ORB.
* Only looks at the in-use list.
*/
static struct sbp2_command_info *sbp2util_find_command_for_orb(
struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
struct sbp2_lu *lu, dma_addr_t orb)
{
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
if (command->command_orb_dma == orb) {
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return command;
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
if (!list_empty(&lu->cmd_orb_inuse))
list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
if (cmd->command_orb_dma == orb) {
spin_unlock_irqrestore(
&lu->cmd_orb_lock, flags);
return cmd;
}
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return NULL;
}
/*
* This function finds the sbp2_command for a given outstanding SCpnt.
* Only looks at the inuse list.
* Must be called with scsi_id->sbp2_command_orb_lock held.
* Finds the sbp2_command for a given outstanding SCpnt.
* Only looks at the in-use list.
* Must be called with lu->cmd_orb_lock held.
*/
static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
struct scsi_id_instance_data *scsi_id, void *SCpnt)
struct sbp2_lu *lu, void *SCpnt)
{
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
if (!list_empty(&scsi_id->sbp2_command_orb_inuse))
list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list)
if (command->Current_SCpnt == SCpnt)
return command;
if (!list_empty(&lu->cmd_orb_inuse))
list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
if (cmd->Current_SCpnt == SCpnt)
return cmd;
return NULL;
}
/*
* This function allocates a command orb used to send a scsi command.
*/
static struct sbp2_command_info *sbp2util_allocate_command_orb(
struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *Current_SCpnt,
void (*Current_done)(struct scsi_cmnd *))
struct sbp2_lu *lu,
struct scsi_cmnd *Current_SCpnt,
void (*Current_done)(struct scsi_cmnd *))
{
struct list_head *lh;
struct sbp2_command_info *command = NULL;
struct sbp2_command_info *cmd = NULL;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
lh = scsi_id->sbp2_command_orb_completed.next;
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
if (!list_empty(&lu->cmd_orb_completed)) {
lh = lu->cmd_orb_completed.next;
list_del(lh);
command = list_entry(lh, struct sbp2_command_info, list);
command->Current_done = Current_done;
command->Current_SCpnt = Current_SCpnt;
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
} else {
cmd = list_entry(lh, struct sbp2_command_info, list);
cmd->Current_done = Current_done;
cmd->Current_SCpnt = Current_SCpnt;
list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
} else
SBP2_ERR("%s: no orbs available", __FUNCTION__);
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return command;
}
/* Free our DMA's */
static void sbp2util_free_command_dma(struct sbp2_command_info *command)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
struct hpsb_host *host;
if (!scsi_id) {
SBP2_ERR("%s: scsi_id == NULL", __FUNCTION__);
return;
}
host = scsi_id->ud->ne->host;
if (command->cmd_dma) {
if (command->dma_type == CMD_DMA_SINGLE) {
pci_unmap_single(host->pdev, command->cmd_dma,
command->dma_size, command->dma_dir);
SBP2_DMA_FREE("single bulk");
} else if (command->dma_type == CMD_DMA_PAGE) {
pci_unmap_page(host->pdev, command->cmd_dma,
command->dma_size, command->dma_dir);
SBP2_DMA_FREE("single page");
} /* XXX: Check for CMD_DMA_NONE bug */
command->dma_type = CMD_DMA_NONE;
command->cmd_dma = 0;
}
if (command->sge_buffer) {
pci_unmap_sg(host->pdev, command->sge_buffer,
command->dma_size, command->dma_dir);
SBP2_DMA_FREE("scatter list");
command->sge_buffer = NULL;
}
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return cmd;
}
/*
* This function moves a command to the completed orb list.
* Must be called with scsi_id->sbp2_command_orb_lock held.
* Unmaps the DMAs of a command and moves the command to the completed ORB list.
* Must be called with lu->cmd_orb_lock held.
*/
static void sbp2util_mark_command_completed(
struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command)
static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
struct sbp2_command_info *cmd)
{
list_del(&command->list);
sbp2util_free_command_dma(command);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
struct hpsb_host *host = lu->ud->ne->host;
if (cmd->cmd_dma) {
if (cmd->dma_type == CMD_DMA_SINGLE)
dma_unmap_single(&host->device, cmd->cmd_dma,
cmd->dma_size, cmd->dma_dir);
else if (cmd->dma_type == CMD_DMA_PAGE)
dma_unmap_page(&host->device, cmd->cmd_dma,
cmd->dma_size, cmd->dma_dir);
/* XXX: Check for CMD_DMA_NONE bug */
cmd->dma_type = CMD_DMA_NONE;
cmd->cmd_dma = 0;
}
if (cmd->sge_buffer) {
dma_unmap_sg(&host->device, cmd->sge_buffer,
cmd->dma_size, cmd->dma_dir);
cmd->sge_buffer = NULL;
}
list_move_tail(&cmd->list, &lu->cmd_orb_completed);
}
/*
* Is scsi_id valid? Is the 1394 node still present?
* Is lu valid? Is the 1394 node still present?
*/
static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id)
static inline int sbp2util_node_is_available(struct sbp2_lu *lu)
{
return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
return lu && lu->ne && !lu->ne->in_limbo;
}
/*********************************************
* IEEE-1394 core driver stack related section
*********************************************/
static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
static int sbp2_probe(struct device *dev)
{
struct unit_directory *ud;
struct scsi_id_instance_data *scsi_id;
SBP2_DEBUG_ENTER();
struct sbp2_lu *lu;
ud = container_of(dev, struct unit_directory, device);
......@@ -731,67 +642,58 @@ static int sbp2_probe(struct device *dev)
if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
return -ENODEV;
scsi_id = sbp2_alloc_device(ud);
if (!scsi_id)
lu = sbp2_alloc_device(ud);
if (!lu)
return -ENOMEM;
sbp2_parse_unit_directory(scsi_id, ud);
return sbp2_start_device(scsi_id);
sbp2_parse_unit_directory(lu, ud);
return sbp2_start_device(lu);
}
static int sbp2_remove(struct device *dev)
{
struct unit_directory *ud;
struct scsi_id_instance_data *scsi_id;
struct sbp2_lu *lu;
struct scsi_device *sdev;
SBP2_DEBUG_ENTER();
ud = container_of(dev, struct unit_directory, device);
scsi_id = ud->device.driver_data;
if (!scsi_id)
lu = ud->device.driver_data;
if (!lu)
return 0;
if (scsi_id->scsi_host) {
if (lu->shost) {
/* Get rid of enqueued commands if there is no chance to
* send them. */
if (!sbp2util_node_is_available(scsi_id))
sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
/* scsi_remove_device() will trigger shutdown functions of SCSI
if (!sbp2util_node_is_available(lu))
sbp2scsi_complete_all_commands(lu, DID_NO_CONNECT);
/* scsi_remove_device() may trigger shutdown functions of SCSI
* highlevel drivers which would deadlock if blocked. */
atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN);
scsi_unblock_requests(scsi_id->scsi_host);
atomic_set(&lu->state, SBP2LU_STATE_IN_SHUTDOWN);
scsi_unblock_requests(lu->shost);
}
sdev = scsi_id->sdev;
sdev = lu->sdev;
if (sdev) {
scsi_id->sdev = NULL;
lu->sdev = NULL;
scsi_remove_device(sdev);
}
sbp2_logout_device(scsi_id);
sbp2_remove_device(scsi_id);
sbp2_logout_device(lu);
sbp2_remove_device(lu);
return 0;
}
static int sbp2_update(struct unit_directory *ud)
{
struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
SBP2_DEBUG_ENTER();
struct sbp2_lu *lu = ud->device.driver_data;
if (sbp2_reconnect_device(scsi_id)) {
if (sbp2_reconnect_device(lu)) {
/* Reconnect has failed. Perhaps we didn't reconnect fast
* enough. Try a regular login, but first log out just in
* case of any weirdness. */
sbp2_logout_device(lu);
/*
* Ok, reconnect has failed. Perhaps we didn't
* reconnect fast enough. Try doing a regular login, but
* first do a logout just in case of any weirdness.
*/
sbp2_logout_device(scsi_id);
if (sbp2_login_device(scsi_id)) {
if (sbp2_login_device(lu)) {
/* Login failed too, just fail, and the backend
* will call our sbp2_remove for us */
SBP2_ERR("Failed to reconnect to sbp2 device!");
......@@ -799,69 +701,59 @@ static int sbp2_update(struct unit_directory *ud)
}
}
/* Set max retries to something large on the device. */
sbp2_set_busy_timeout(scsi_id);
sbp2_set_busy_timeout(lu);
sbp2_agent_reset(lu, 1);
sbp2_max_speed_and_size(lu);
/* Do a SBP-2 fetch agent reset. */
sbp2_agent_reset(scsi_id, 1);
/* Get the max speed and packet size that we can use. */
sbp2_max_speed_and_size(scsi_id);
/* Complete any pending commands with busy (so they get
* retried) and remove them from our queue
*/
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
/* Complete any pending commands with busy (so they get retried)
* and remove them from our queue. */
sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
/* Accept new commands unless there was another bus reset in the
* meantime. */
if (hpsb_node_entry_valid(scsi_id->ne)) {
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
scsi_unblock_requests(scsi_id->scsi_host);
if (hpsb_node_entry_valid(lu->ne)) {
atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
scsi_unblock_requests(lu->shost);
}
return 0;
}
/* This functions is called by the sbp2_probe, for each new device. We now
* allocate one scsi host for each scsi_id (unit directory). */
static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
{
struct sbp2scsi_host_info *hi;
struct Scsi_Host *scsi_host = NULL;
struct scsi_id_instance_data *scsi_id = NULL;
struct sbp2_fwhost_info *hi;
struct Scsi_Host *shost = NULL;
struct sbp2_lu *lu = NULL;
SBP2_DEBUG_ENTER();
scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL);
if (!scsi_id) {
SBP2_ERR("failed to create scsi_id");
lu = kzalloc(sizeof(*lu), GFP_KERNEL);
if (!lu) {
SBP2_ERR("failed to create lu");
goto failed_alloc;
}
scsi_id->ne = ud->ne;
scsi_id->ud = ud;
scsi_id->speed_code = IEEE1394_SPEED_100;
scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
INIT_LIST_HEAD(&scsi_id->scsi_list);
spin_lock_init(&scsi_id->sbp2_command_orb_lock);
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
lu->ne = ud->ne;
lu->ud = ud;
lu->speed_code = IEEE1394_SPEED_100;
lu->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
lu->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
INIT_LIST_HEAD(&lu->cmd_orb_inuse);
INIT_LIST_HEAD(&lu->cmd_orb_completed);
INIT_LIST_HEAD(&lu->lu_list);
spin_lock_init(&lu->cmd_orb_lock);
atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
INIT_WORK(&lu->protocol_work, NULL);
ud->device.driver_data = scsi_id;
ud->device.driver_data = lu;
hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
if (!hi) {
hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi));
hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host,
sizeof(*hi));
if (!hi) {
SBP2_ERR("failed to allocate hostinfo");
goto failed_alloc;
}
SBP2_DEBUG("sbp2_alloc_device: allocated hostinfo");
hi->host = ud->ne->host;
INIT_LIST_HEAD(&hi->scsi_ids);
INIT_LIST_HEAD(&hi->logical_units);
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
/* Handle data movement if physical dma is not
......@@ -881,9 +773,9 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
goto failed_alloc;
}
scsi_id->hi = hi;
lu->hi = hi;
list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
list_add_tail(&lu->lu_list, &hi->logical_units);
/* Register the status FIFO address range. We could use the same FIFO
* for targets at different nodes. However we need different FIFOs per
......@@ -893,302 +785,214 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
* then be performed as unified transactions. This slightly reduces
* bandwidth usage, and some Prolific based devices seem to require it.
*/
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
lu->status_fifo_addr = hpsb_allocate_and_register_addrspace(
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
if (scsi_id->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
if (lu->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
}
/* Register our host with the SCSI stack. */
scsi_host = scsi_host_alloc(&scsi_driver_template,
sizeof(unsigned long));
if (!scsi_host) {
shost = scsi_host_alloc(&sbp2_shost_template, sizeof(unsigned long));
if (!shost) {
SBP2_ERR("failed to register scsi host");
goto failed_alloc;
}
scsi_host->hostdata[0] = (unsigned long)scsi_id;
shost->hostdata[0] = (unsigned long)lu;
if (!scsi_add_host(scsi_host, &ud->device)) {
scsi_id->scsi_host = scsi_host;
return scsi_id;
if (!scsi_add_host(shost, &ud->device)) {
lu->shost = shost;
return lu;
}
SBP2_ERR("failed to add scsi host");
scsi_host_put(scsi_host);
scsi_host_put(shost);
failed_alloc:
sbp2_remove_device(scsi_id);
sbp2_remove_device(lu);
return NULL;
}
static void sbp2_host_reset(struct hpsb_host *host)
{
struct sbp2scsi_host_info *hi;
struct scsi_id_instance_data *scsi_id;
struct sbp2_fwhost_info *hi;
struct sbp2_lu *lu;
hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
if (!hi)
return;
list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list)
if (likely(atomic_read(&scsi_id->state) !=
list_for_each_entry(lu, &hi->logical_units, lu_list)
if (likely(atomic_read(&lu->state) !=
SBP2LU_STATE_IN_SHUTDOWN)) {
atomic_set(&scsi_id->state, SBP2LU_STATE_IN_RESET);
scsi_block_requests(scsi_id->scsi_host);
atomic_set(&lu->state, SBP2LU_STATE_IN_RESET);
scsi_block_requests(lu->shost);
}
}
/*
* This function is where we first pull the node unique ids, and then
* allocate memory and register a SBP-2 device.
*/
static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_start_device(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
int error;
SBP2_DEBUG_ENTER();
/* Login FIFO DMA */
scsi_id->login_response =
pci_alloc_consistent(hi->host->pdev,
lu->login_response = dma_alloc_coherent(&hi->host->device,
sizeof(struct sbp2_login_response),
&scsi_id->login_response_dma);
if (!scsi_id->login_response)
&lu->login_response_dma, GFP_KERNEL);
if (!lu->login_response)
goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
/* Query logins ORB DMA */
scsi_id->query_logins_orb =
pci_alloc_consistent(hi->host->pdev,
lu->query_logins_orb = dma_alloc_coherent(&hi->host->device,
sizeof(struct sbp2_query_logins_orb),
&scsi_id->query_logins_orb_dma);
if (!scsi_id->query_logins_orb)
&lu->query_logins_orb_dma, GFP_KERNEL);
if (!lu->query_logins_orb)
goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for query logins ORB");
/* Query logins response DMA */
scsi_id->query_logins_response =
pci_alloc_consistent(hi->host->pdev,
lu->query_logins_response = dma_alloc_coherent(&hi->host->device,
sizeof(struct sbp2_query_logins_response),
&scsi_id->query_logins_response_dma);
if (!scsi_id->query_logins_response)
&lu->query_logins_response_dma, GFP_KERNEL);
if (!lu->query_logins_response)
goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for query logins response");
/* Reconnect ORB DMA */
scsi_id->reconnect_orb =
pci_alloc_consistent(hi->host->pdev,
lu->reconnect_orb = dma_alloc_coherent(&hi->host->device,
sizeof(struct sbp2_reconnect_orb),
&scsi_id->reconnect_orb_dma);
if (!scsi_id->reconnect_orb)
&lu->reconnect_orb_dma, GFP_KERNEL);
if (!lu->reconnect_orb)
goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
/* Logout ORB DMA */
scsi_id->logout_orb =
pci_alloc_consistent(hi->host->pdev,
lu->logout_orb = dma_alloc_coherent(&hi->host->device,
sizeof(struct sbp2_logout_orb),
&scsi_id->logout_orb_dma);
if (!scsi_id->logout_orb)
&lu->logout_orb_dma, GFP_KERNEL);
if (!lu->logout_orb)
goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
/* Login ORB DMA */
scsi_id->login_orb =
pci_alloc_consistent(hi->host->pdev,
lu->login_orb = dma_alloc_coherent(&hi->host->device,
sizeof(struct sbp2_login_orb),
&scsi_id->login_orb_dma);
if (!scsi_id->login_orb)
&lu->login_orb_dma, GFP_KERNEL);
if (!lu->login_orb)
goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for login ORB");
SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
/*
* Create our command orb pool
*/
if (sbp2util_create_command_orb_pool(scsi_id)) {
if (sbp2util_create_command_orb_pool(lu)) {
SBP2_ERR("sbp2util_create_command_orb_pool failed!");
sbp2_remove_device(scsi_id);
sbp2_remove_device(lu);
return -ENOMEM;
}
/* Schedule a timeout here. The reason is that we may be so close
* to a bus reset, that the device is not available for logins.
* This can happen when the bus reset is caused by the host
* connected to the sbp2 device being removed. That host would
* have a certain amount of time to relogin before the sbp2 device
* allows someone else to login instead. One second makes sense. */
/* Wait a second before trying to log in. Previously logged in
* initiators need a chance to reconnect. */
if (msleep_interruptible(1000)) {
sbp2_remove_device(scsi_id);
sbp2_remove_device(lu);
return -EINTR;
}
/*
* Login to the sbp-2 device
*/
if (sbp2_login_device(scsi_id)) {
/* Login failed, just remove the device. */
sbp2_remove_device(scsi_id);
if (sbp2_login_device(lu)) {
sbp2_remove_device(lu);
return -EBUSY;
}
/*
* Set max retries to something large on the device
*/
sbp2_set_busy_timeout(scsi_id);
/*
* Do a SBP-2 fetch agent reset
*/
sbp2_agent_reset(scsi_id, 1);
sbp2_set_busy_timeout(lu);
sbp2_agent_reset(lu, 1);
sbp2_max_speed_and_size(lu);
/*
* Get the max speed and packet size that we can use
*/
sbp2_max_speed_and_size(scsi_id);
/* Add this device to the scsi layer now */
error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
if (error) {
SBP2_ERR("scsi_add_device failed");
sbp2_logout_device(scsi_id);
sbp2_remove_device(scsi_id);
sbp2_logout_device(lu);
sbp2_remove_device(lu);
return error;
}
return 0;
alloc_fail:
SBP2_ERR("Could not allocate memory for scsi_id");
sbp2_remove_device(scsi_id);
SBP2_ERR("Could not allocate memory for lu");
sbp2_remove_device(lu);
return -ENOMEM;
}
/*
* This function removes an sbp2 device from the sbp2scsi_host_info struct.
*/
static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
static void sbp2_remove_device(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi;
SBP2_DEBUG_ENTER();
struct sbp2_fwhost_info *hi;
if (!scsi_id)
if (!lu)
return;
hi = scsi_id->hi;
hi = lu->hi;
/* This will remove our scsi device aswell */
if (scsi_id->scsi_host) {
scsi_remove_host(scsi_id->scsi_host);
scsi_host_put(scsi_id->scsi_host);
if (lu->shost) {
scsi_remove_host(lu->shost);
scsi_host_put(lu->shost);
}
flush_scheduled_work();
sbp2util_remove_command_orb_pool(scsi_id);
sbp2util_remove_command_orb_pool(lu);
list_del(&scsi_id->scsi_list);
list_del(&lu->lu_list);
if (scsi_id->login_response) {
pci_free_consistent(hi->host->pdev,
if (lu->login_response)
dma_free_coherent(&hi->host->device,
sizeof(struct sbp2_login_response),
scsi_id->login_response,
scsi_id->login_response_dma);
SBP2_DMA_FREE("single login FIFO");
}
if (scsi_id->login_orb) {
pci_free_consistent(hi->host->pdev,
lu->login_response,
lu->login_response_dma);
if (lu->login_orb)
dma_free_coherent(&hi->host->device,
sizeof(struct sbp2_login_orb),
scsi_id->login_orb,
scsi_id->login_orb_dma);
SBP2_DMA_FREE("single login ORB");
}
if (scsi_id->reconnect_orb) {
pci_free_consistent(hi->host->pdev,
lu->login_orb,
lu->login_orb_dma);
if (lu->reconnect_orb)
dma_free_coherent(&hi->host->device,
sizeof(struct sbp2_reconnect_orb),
scsi_id->reconnect_orb,
scsi_id->reconnect_orb_dma);
SBP2_DMA_FREE("single reconnect orb");
}
if (scsi_id->logout_orb) {
pci_free_consistent(hi->host->pdev,
lu->reconnect_orb,
lu->reconnect_orb_dma);
if (lu->logout_orb)
dma_free_coherent(&hi->host->device,
sizeof(struct sbp2_logout_orb),
scsi_id->logout_orb,
scsi_id->logout_orb_dma);
SBP2_DMA_FREE("single logout orb");
}
if (scsi_id->query_logins_orb) {
pci_free_consistent(hi->host->pdev,
lu->logout_orb,
lu->logout_orb_dma);
if (lu->query_logins_orb)
dma_free_coherent(&hi->host->device,
sizeof(struct sbp2_query_logins_orb),
scsi_id->query_logins_orb,
scsi_id->query_logins_orb_dma);
SBP2_DMA_FREE("single query logins orb");
}
if (scsi_id->query_logins_response) {
pci_free_consistent(hi->host->pdev,
lu->query_logins_orb,
lu->query_logins_orb_dma);
if (lu->query_logins_response)
dma_free_coherent(&hi->host->device,
sizeof(struct sbp2_query_logins_response),
scsi_id->query_logins_response,
scsi_id->query_logins_response_dma);
SBP2_DMA_FREE("single query logins data");
}
lu->query_logins_response,
lu->query_logins_response_dma);
if (scsi_id->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
if (lu->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
scsi_id->status_fifo_addr);
lu->status_fifo_addr);
scsi_id->ud->device.driver_data = NULL;
lu->ud->device.driver_data = NULL;
if (hi)
module_put(hi->host->driver->owner);
SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
kfree(scsi_id);
kfree(lu);
}
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
/*
* This function deals with physical dma write requests (for adapters that do not support
* physical dma in hardware). Mostly just here for debugging...
* Deal with write requests on adapters which do not support physical DMA or
* have it switched off.
*/
static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
int destid, quadlet_t *data, u64 addr,
size_t length, u16 flags)
{
/*
* Manually put the data in the right place.
*/
memcpy(bus_to_virt((u32) addr), data, length);
sbp2util_packet_dump(data, length, "sbp2 phys dma write by device",
(u32) addr);
return RCODE_COMPLETE;
}
/*
* This function deals with physical dma read requests (for adapters that do not support
* physical dma in hardware). Mostly just here for debugging...
* Deal with read requests on adapters which do not support physical DMA or
* have it switched off.
*/
static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
quadlet_t *data, u64 addr, size_t length,
u16 flags)
{
/*
* Grab data from memory and send a read response.
*/
memcpy(data, bus_to_virt((u32) addr), length);
sbp2util_packet_dump(data, length, "sbp2 phys dma read by device",
(u32) addr);
return RCODE_COMPLETE;
}
#endif
......@@ -1197,74 +1001,69 @@ static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
* SBP-2 protocol related section
**************************************/
/*
* This function queries the device for the maximum concurrent logins it
* supports.
*/
static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
static int sbp2_query_logins(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
quadlet_t data[2];
int max_logins;
int active_logins;
SBP2_DEBUG_ENTER();
scsi_id->query_logins_orb->reserved1 = 0x0;
scsi_id->query_logins_orb->reserved2 = 0x0;
scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
lu->query_logins_orb->reserved1 = 0x0;
lu->query_logins_orb->reserved2 = 0x0;
scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
lu->query_logins_orb->query_response_lo = lu->query_logins_response_dma;
lu->query_logins_orb->query_response_hi =
ORB_SET_NODE_ID(hi->host->node_id);
lu->query_logins_orb->lun_misc =
ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
lu->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
lu->query_logins_orb->lun_misc |= ORB_SET_LUN(lu->lun);
scsi_id->query_logins_orb->reserved_resp_length =
ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
lu->query_logins_orb->reserved_resp_length =
ORB_SET_QUERY_LOGINS_RESP_LENGTH(
sizeof(struct sbp2_query_logins_response));
scsi_id->query_logins_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->query_logins_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
lu->query_logins_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
lu->query_logins_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
sbp2util_cpu_to_be32_buffer(lu->query_logins_orb,
sizeof(struct sbp2_query_logins_orb));
sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb),
"sbp2 query logins orb", scsi_id->query_logins_orb_dma);
memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
memset(lu->query_logins_response, 0,
sizeof(struct sbp2_query_logins_response));
data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->query_logins_orb_dma;
data[1] = lu->query_logins_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
if (sbp2util_access_timeout(scsi_id, 2*HZ)) {
if (sbp2util_access_timeout(lu, 2*HZ)) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out");
return -EIO;
}
if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
if (lu->status_block.ORB_offset_lo != lu->query_logins_orb_dma) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out");
return -EIO;
}
if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
SBP2_INFO("Error querying logins to SBP-2 device - failed");
return -EIO;
}
sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
SBP2_DEBUG("length_max_logins = %x",
(unsigned int)scsi_id->query_logins_response->length_max_logins);
sbp2util_cpu_to_be32_buffer(lu->query_logins_response,
sizeof(struct sbp2_query_logins_response));
max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
max_logins = RESPONSE_GET_MAX_LOGINS(
lu->query_logins_response->length_max_logins);
SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
active_logins = RESPONSE_GET_ACTIVE_LOGINS(
lu->query_logins_response->length_max_logins);
SBP2_INFO("Number of active logins: %d", active_logins);
if (active_logins >= max_logins) {
......@@ -1274,332 +1073,231 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
return 0;
}
/*
* This function is called in order to login to a particular SBP-2 device,
* after a bus reset.
*/
static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_login_device(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
quadlet_t data[2];
SBP2_DEBUG_ENTER();
if (!scsi_id->login_orb) {
SBP2_DEBUG("%s: login_orb not alloc'd!", __FUNCTION__);
if (!lu->login_orb)
return -EIO;
}
if (!exclusive_login) {
if (sbp2_query_logins(scsi_id)) {
SBP2_INFO("Device does not support any more concurrent logins");
return -EIO;
}
if (!sbp2_exclusive_login && sbp2_query_logins(lu)) {
SBP2_INFO("Device does not support any more concurrent logins");
return -EIO;
}
/* Set-up login ORB, assume no password */
scsi_id->login_orb->password_hi = 0;
scsi_id->login_orb->password_lo = 0;
/* assume no password */
lu->login_orb->password_hi = 0;
lu->login_orb->password_lo = 0;
scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
lu->login_orb->login_response_lo = lu->login_response_dma;
lu->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
lu->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
/* one second reconnect time */
lu->login_orb->lun_misc |= ORB_SET_RECONNECT(0);
lu->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login);
lu->login_orb->lun_misc |= ORB_SET_NOTIFY(1);
lu->login_orb->lun_misc |= ORB_SET_LUN(lu->lun);
scsi_id->login_orb->passwd_resp_lengths =
lu->login_orb->passwd_resp_lengths =
ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
scsi_id->login_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->login_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
lu->login_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
lu->login_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
sbp2util_cpu_to_be32_buffer(lu->login_orb,
sizeof(struct sbp2_login_orb));
sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
"sbp2 login orb", scsi_id->login_orb_dma);
memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
memset(lu->login_response, 0, sizeof(struct sbp2_login_response));
data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->login_orb_dma;
data[1] = lu->login_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
/*
* Wait for login status (up to 20 seconds)...
*/
if (sbp2util_access_timeout(scsi_id, 20*HZ)) {
/* wait up to 20 seconds for login status */
if (sbp2util_access_timeout(lu, 20*HZ)) {
SBP2_ERR("Error logging into SBP-2 device - timed out");
return -EIO;
}
/*
* Sanity. Make sure status returned matches login orb.
*/
if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
/* make sure that the returned status matches the login ORB */
if (lu->status_block.ORB_offset_lo != lu->login_orb_dma) {
SBP2_ERR("Error logging into SBP-2 device - timed out");
return -EIO;
}
if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
SBP2_ERR("Error logging into SBP-2 device - failed");
return -EIO;
}
/*
* Byte swap the login response, for use when reconnecting or
* logging out.
*/
sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response));
/*
* Grab our command block agent address from the login response.
*/
SBP2_DEBUG("command_block_agent_hi = %x",
(unsigned int)scsi_id->login_response->command_block_agent_hi);
SBP2_DEBUG("command_block_agent_lo = %x",
(unsigned int)scsi_id->login_response->command_block_agent_lo);
scsi_id->sbp2_command_block_agent_addr =
((u64)scsi_id->login_response->command_block_agent_hi) << 32;
scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
sbp2util_cpu_to_be32_buffer(lu->login_response,
sizeof(struct sbp2_login_response));
lu->command_block_agent_addr =
((u64)lu->login_response->command_block_agent_hi) << 32;
lu->command_block_agent_addr |=
((u64)lu->login_response->command_block_agent_lo);
lu->command_block_agent_addr &= 0x0000ffffffffffffULL;
SBP2_INFO("Logged into SBP-2 device");
return 0;
}
/*
* This function is called in order to logout from a particular SBP-2
* device, usually called during driver unload.
*/
static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_logout_device(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
quadlet_t data[2];
int error;
SBP2_DEBUG_ENTER();
/*
* Set-up logout ORB
*/
scsi_id->logout_orb->reserved1 = 0x0;
scsi_id->logout_orb->reserved2 = 0x0;
scsi_id->logout_orb->reserved3 = 0x0;
scsi_id->logout_orb->reserved4 = 0x0;
scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
/* Notify us when complete */
scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
lu->logout_orb->reserved1 = 0x0;
lu->logout_orb->reserved2 = 0x0;
lu->logout_orb->reserved3 = 0x0;
lu->logout_orb->reserved4 = 0x0;
scsi_id->logout_orb->reserved5 = 0x0;
scsi_id->logout_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->logout_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
lu->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
lu->logout_orb->login_ID_misc |=
ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
lu->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
/*
* Byte swap ORB if necessary
*/
sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
lu->logout_orb->reserved5 = 0x0;
lu->logout_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
lu->logout_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
"sbp2 logout orb", scsi_id->logout_orb_dma);
sbp2util_cpu_to_be32_buffer(lu->logout_orb,
sizeof(struct sbp2_logout_orb));
/*
* Ok, let's write to the target's management agent register
*/
data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->logout_orb_dma;
data[1] = lu->logout_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
error = hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_management_agent_addr, data, 8);
error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
if (error)
return error;
/* Wait for device to logout...1 second. */
if (sbp2util_access_timeout(scsi_id, HZ))
/* wait up to 1 second for the device to complete logout */
if (sbp2util_access_timeout(lu, HZ))
return -EIO;
SBP2_INFO("Logged out of SBP-2 device");
return 0;
}
/*
* This function is called in order to reconnect to a particular SBP-2
* device, after a bus reset.
*/
static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_reconnect_device(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
quadlet_t data[2];
int error;
SBP2_DEBUG_ENTER();
lu->reconnect_orb->reserved1 = 0x0;
lu->reconnect_orb->reserved2 = 0x0;
lu->reconnect_orb->reserved3 = 0x0;
lu->reconnect_orb->reserved4 = 0x0;
/*
* Set-up reconnect ORB
*/
scsi_id->reconnect_orb->reserved1 = 0x0;
scsi_id->reconnect_orb->reserved2 = 0x0;
scsi_id->reconnect_orb->reserved3 = 0x0;
scsi_id->reconnect_orb->reserved4 = 0x0;
scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
scsi_id->reconnect_orb->login_ID_misc |=
ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
lu->reconnect_orb->login_ID_misc =
ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
lu->reconnect_orb->login_ID_misc |=
ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
lu->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
/* Notify us when complete */
scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
lu->reconnect_orb->reserved5 = 0x0;
lu->reconnect_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
lu->reconnect_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
scsi_id->reconnect_orb->reserved5 = 0x0;
scsi_id->reconnect_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->reconnect_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
*/
sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
"sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
sbp2util_cpu_to_be32_buffer(lu->reconnect_orb,
sizeof(struct sbp2_reconnect_orb));
data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->reconnect_orb_dma;
data[1] = lu->reconnect_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
error = hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_management_agent_addr, data, 8);
error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
if (error)
return error;
/*
* Wait for reconnect status (up to 1 second)...
*/
if (sbp2util_access_timeout(scsi_id, HZ)) {
/* wait up to 1 second for reconnect status */
if (sbp2util_access_timeout(lu, HZ)) {
SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
return -EIO;
}
/*
* Sanity. Make sure status returned matches reconnect orb.
*/
if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
/* make sure that the returned status matches the reconnect ORB */
if (lu->status_block.ORB_offset_lo != lu->reconnect_orb_dma) {
SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
return -EIO;
}
if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
SBP2_ERR("Error reconnecting to SBP-2 device - failed");
return -EIO;
}
HPSB_DEBUG("Reconnected to SBP-2 device");
SBP2_INFO("Reconnected to SBP-2 device");
return 0;
}
/*
* This function is called in order to set the busy timeout (number of
* retries to attempt) on the sbp2 device.
* Set the target node's Single Phase Retry limit. Affects the target's retry
* behaviour if our node is too busy to accept requests.
*/
static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
{
quadlet_t data;
SBP2_DEBUG_ENTER();
data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
SBP2_ERR("%s error", __FUNCTION__);
return 0;
}
/*
* This function is called to parse sbp2 device's config rom unit
* directory. Used to determine things like sbp2 management agent offset,
* and command set used (SCSI or RBC).
*/
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
struct unit_directory *ud)
{
struct csr1212_keyval *kv;
struct csr1212_dentry *dentry;
u64 management_agent_addr;
u32 command_set_spec_id, command_set, unit_characteristics,
firmware_revision;
u32 unit_characteristics, firmware_revision;
unsigned workarounds;
int i;
SBP2_DEBUG_ENTER();
management_agent_addr = 0;
unit_characteristics = 0;
firmware_revision = 0;
management_agent_addr = 0x0;
command_set_spec_id = 0x0;
command_set = 0x0;
unit_characteristics = 0x0;
firmware_revision = 0x0;
/* Handle different fields in the unit directory, based on keys */
csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
switch (kv->key.id) {
case CSR1212_KV_ID_DEPENDENT_INFO:
if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
/* Save off the management agent address */
if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET)
management_agent_addr =
CSR1212_REGISTER_SPACE_BASE +
(kv->value.csr_offset << 2);
SBP2_DEBUG("sbp2_management_agent_addr = %x",
(unsigned int)management_agent_addr);
} else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
scsi_id->sbp2_lun =
ORB_SET_LUN(kv->value.immediate);
}
break;
case SBP2_COMMAND_SET_SPEC_ID_KEY:
/* Command spec organization */
command_set_spec_id = kv->value.immediate;
SBP2_DEBUG("sbp2_command_set_spec_id = %x",
(unsigned int)command_set_spec_id);
break;
case SBP2_COMMAND_SET_KEY:
/* Command set used by sbp2 device */
command_set = kv->value.immediate;
SBP2_DEBUG("sbp2_command_set = %x",
(unsigned int)command_set);
else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE)
lu->lun = ORB_SET_LUN(kv->value.immediate);
break;
case SBP2_UNIT_CHARACTERISTICS_KEY:
/*
* Unit characterisitcs (orb related stuff
* that I'm not yet paying attention to)
*/
/* FIXME: This is ignored so far.
* See SBP-2 clause 7.4.8. */
unit_characteristics = kv->value.immediate;
SBP2_DEBUG("sbp2_unit_characteristics = %x",
(unsigned int)unit_characteristics);
break;
case SBP2_FIRMWARE_REVISION_KEY:
/* Firmware revision */
firmware_revision = kv->value.immediate;
SBP2_DEBUG("sbp2_firmware_revision = %x",
(unsigned int)firmware_revision);
break;
default:
/* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
* Its "ordered" bit has consequences for command ORB
* list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
break;
}
}
......@@ -1631,28 +1329,24 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
/* We would need one SCSI host template for each target to adjust
* max_sectors on the fly, therefore warn only. */
if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
(max_sectors * 512) > (128 * 1024))
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
(sbp2_max_sectors * 512) > (128 * 1024))
SBP2_INFO("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
"max transfer size. WARNING: Current max_sectors "
"setting is larger than 128KB (%d sectors)",
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
max_sectors);
sbp2_max_sectors);
/* If this is a logical unit directory entry, process the parent
* to get the values. */
if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
struct unit_directory *parent_ud =
container_of(ud->device.parent, struct unit_directory, device);
sbp2_parse_unit_directory(scsi_id, parent_ud);
struct unit_directory *parent_ud = container_of(
ud->device.parent, struct unit_directory, device);
sbp2_parse_unit_directory(lu, parent_ud);
} else {
scsi_id->sbp2_management_agent_addr = management_agent_addr;
scsi_id->sbp2_command_set_spec_id = command_set_spec_id;
scsi_id->sbp2_command_set = command_set;
scsi_id->sbp2_unit_characteristics = unit_characteristics;
scsi_id->sbp2_firmware_revision = firmware_revision;
scsi_id->workarounds = workarounds;
lu->management_agent_addr = management_agent_addr;
lu->workarounds = workarounds;
if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun);
lu->lun = ORB_SET_LUN(ud->lun);
}
}
......@@ -1667,133 +1361,114 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
* the speed that it needs to use, and the max_rec the host supports, and
* it takes care of the rest.
*/
static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
static int sbp2_max_speed_and_size(struct sbp2_lu *lu)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
u8 payload;
SBP2_DEBUG_ENTER();
scsi_id->speed_code =
hi->host->speed[NODEID_TO_NODE(scsi_id->ne->nodeid)];
lu->speed_code = hi->host->speed[NODEID_TO_NODE(lu->ne->nodeid)];
/* Bump down our speed if the user requested it */
if (scsi_id->speed_code > max_speed) {
scsi_id->speed_code = max_speed;
SBP2_ERR("Forcing SBP-2 max speed down to %s",
hpsb_speedto_str[scsi_id->speed_code]);
if (lu->speed_code > sbp2_max_speed) {
lu->speed_code = sbp2_max_speed;
SBP2_INFO("Reducing speed to %s",
hpsb_speedto_str[sbp2_max_speed]);
}
/* Payload size is the lesser of what our speed supports and what
* our host supports. */
payload = min(sbp2_speedto_max_payload[scsi_id->speed_code],
payload = min(sbp2_speedto_max_payload[lu->speed_code],
(u8) (hi->host->csr.max_rec - 1));
/* If physical DMA is off, work around limitation in ohci1394:
* packet size must not exceed PAGE_SIZE */
if (scsi_id->ne->host->low_addr_space < (1ULL << 32))
if (lu->ne->host->low_addr_space < (1ULL << 32))
while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
payload)
payload--;
HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
hpsb_speedto_str[scsi_id->speed_code],
SBP2_PAYLOAD_TO_BYTES(payload));
SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
NODE_BUS_ARGS(hi->host, lu->ne->nodeid),
hpsb_speedto_str[lu->speed_code],
SBP2_PAYLOAD_TO_BYTES(payload));
scsi_id->max_payload_size = payload;
lu->max_payload_size = payload;
return 0;
}
/*
* This function is called in order to perform a SBP-2 agent reset.
*/
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
{
quadlet_t data;
u64 addr;
int retval;
unsigned long flags;
SBP2_DEBUG_ENTER();
cancel_delayed_work(&scsi_id->protocol_work);
/* flush lu->protocol_work */
if (wait)
flush_scheduled_work();
data = ntohl(SBP2_AGENT_RESET_DATA);
addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
addr = lu->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
if (wait)
retval = hpsb_node_write(scsi_id->ne, addr, &data, 4);
retval = hpsb_node_write(lu->ne, addr, &data, 4);
else
retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4);
retval = sbp2util_node_write_no_wait(lu->ne, addr, &data, 4);
if (retval < 0) {
SBP2_ERR("hpsb_node_write failed.\n");
return -EIO;
}
/*
* Need to make sure orb pointer is written on next command
*/
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
scsi_id->last_orb = NULL;
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
/* make sure that the ORB_POINTER is written on next command */
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
lu->last_orb = NULL;
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return 0;
}
static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
struct sbp2scsi_host_info *hi,
struct sbp2_command_info *command,
struct sbp2_fwhost_info *hi,
struct sbp2_command_info *cmd,
unsigned int scsi_use_sg,
struct scatterlist *sgpnt,
u32 orb_direction,
enum dma_data_direction dma_dir)
{
command->dma_dir = dma_dir;
cmd->dma_dir = dma_dir;
orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
orb->misc |= ORB_SET_DIRECTION(orb_direction);
/* Special case if only one element (and less than 64KB in size) */
/* special case if only one element (and less than 64KB in size) */
if ((scsi_use_sg == 1) &&
(sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
SBP2_DEBUG("Only one s/g element");
command->dma_size = sgpnt[0].length;
command->dma_type = CMD_DMA_PAGE;
command->cmd_dma = pci_map_page(hi->host->pdev,
sgpnt[0].page,
sgpnt[0].offset,
command->dma_size,
command->dma_dir);
SBP2_DMA_ALLOC("single page scatter element");
cmd->dma_size = sgpnt[0].length;
cmd->dma_type = CMD_DMA_PAGE;
cmd->cmd_dma = dma_map_page(&hi->host->device,
sgpnt[0].page, sgpnt[0].offset,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_lo = command->cmd_dma;
orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
orb->data_descriptor_lo = cmd->cmd_dma;
orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
} else {
struct sbp2_unrestricted_page_table *sg_element =
&command->scatter_gather_element[0];
&cmd->scatter_gather_element[0];
u32 sg_count, sg_len;
dma_addr_t sg_addr;
int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg,
int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg,
dma_dir);
SBP2_DMA_ALLOC("scatter list");
command->dma_size = scsi_use_sg;
command->sge_buffer = sgpnt;
cmd->dma_size = scsi_use_sg;
cmd->sge_buffer = sgpnt;
/* use page tables (s/g) */
orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
orb->data_descriptor_lo = command->sge_dma;
orb->data_descriptor_lo = cmd->sge_dma;
/*
* Loop through and fill out our sbp-2 page tables
* (and split up anything too large)
*/
/* loop through and fill out our SBP-2 page tables
* (and split up anything too large) */
for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
sg_len = sg_dma_len(sgpnt);
sg_addr = sg_dma_address(sgpnt);
......@@ -1813,70 +1488,53 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
}
}
/* Number of page table (s/g) elements */
orb->misc |= ORB_SET_DATA_SIZE(sg_count);
sbp2util_packet_dump(sg_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
"sbp2 s/g list", command->sge_dma);
/* Byte swap page tables if necessary */
sbp2util_cpu_to_be32_buffer(sg_element,
(sizeof(struct sbp2_unrestricted_page_table)) *
sg_count);
(sizeof(struct sbp2_unrestricted_page_table)) *
sg_count);
}
}
static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
struct sbp2scsi_host_info *hi,
struct sbp2_command_info *command,
struct sbp2_fwhost_info *hi,
struct sbp2_command_info *cmd,
struct scatterlist *sgpnt,
u32 orb_direction,
unsigned int scsi_request_bufflen,
void *scsi_request_buffer,
enum dma_data_direction dma_dir)
{
command->dma_dir = dma_dir;
command->dma_size = scsi_request_bufflen;
command->dma_type = CMD_DMA_SINGLE;
command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer,
command->dma_size, command->dma_dir);
cmd->dma_dir = dma_dir;
cmd->dma_size = scsi_request_bufflen;
cmd->dma_type = CMD_DMA_SINGLE;
cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
orb->misc |= ORB_SET_DIRECTION(orb_direction);
SBP2_DMA_ALLOC("single bulk");
/*
* Handle case where we get a command w/o s/g enabled (but
* check for transfers larger than 64K)
*/
/* handle case where we get a command w/o s/g enabled
* (but check for transfers larger than 64K) */
if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
orb->data_descriptor_lo = command->cmd_dma;
orb->data_descriptor_lo = cmd->cmd_dma;
orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
} else {
/* The buffer is too large. Turn this into page tables. */
struct sbp2_unrestricted_page_table *sg_element =
&command->scatter_gather_element[0];
&cmd->scatter_gather_element[0];
u32 sg_count, sg_len;
dma_addr_t sg_addr;
/*
* Need to turn this into page tables, since the
* buffer is too large.
*/
orb->data_descriptor_lo = command->sge_dma;
/* Use page tables (s/g) */
orb->data_descriptor_lo = cmd->sge_dma;
orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
/*
* fill out our sbp-2 page tables (and split up
* the large buffer)
*/
/* fill out our SBP-2 page tables; split up the large buffer */
sg_count = 0;
sg_len = scsi_request_bufflen;
sg_addr = command->cmd_dma;
sg_addr = cmd->cmd_dma;
while (sg_len) {
sg_element[sg_count].segment_base_lo = sg_addr;
if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
......@@ -1892,50 +1550,40 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
sg_count++;
}
/* Number of page table (s/g) elements */
orb->misc |= ORB_SET_DATA_SIZE(sg_count);
sbp2util_packet_dump(sg_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
"sbp2 s/g list", command->sge_dma);
/* Byte swap page tables if necessary */
sbp2util_cpu_to_be32_buffer(sg_element,
(sizeof(struct sbp2_unrestricted_page_table)) *
sg_count);
(sizeof(struct sbp2_unrestricted_page_table)) *
sg_count);
}
}
/*
* This function is called to create the actual command orb and s/g list
* out of the scsi command itself.
*/
static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command,
static void sbp2_create_command_orb(struct sbp2_lu *lu,
struct sbp2_command_info *cmd,
unchar *scsi_cmd,
unsigned int scsi_use_sg,
unsigned int scsi_request_bufflen,
void *scsi_request_buffer,
enum dma_data_direction dma_dir)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
struct sbp2_command_orb *command_orb = &command->command_orb;
struct sbp2_command_orb *orb = &cmd->command_orb;
u32 orb_direction;
/*
* Set-up our command ORB..
* Set-up our command ORB.
*
* NOTE: We're doing unrestricted page tables (s/g), as this is
* best performance (at least with the devices I have). This means
* that data_size becomes the number of s/g elements, and
* page_size should be zero (for unrestricted).
*/
command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
command_orb->next_ORB_lo = 0x0;
command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
orb->next_ORB_lo = 0x0;
orb->misc = ORB_SET_MAX_PAYLOAD(lu->max_payload_size);
orb->misc |= ORB_SET_SPEED(lu->speed_code);
orb->misc |= ORB_SET_NOTIFY(1);
if (dma_dir == DMA_NONE)
orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
......@@ -1944,66 +1592,51 @@ static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
else {
SBP2_WARN("Falling back to DMA_NONE");
SBP2_INFO("Falling back to DMA_NONE");
orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
}
/* Set-up our pagetable stuff */
/* set up our page table stuff */
if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
SBP2_DEBUG("No data transfer");
command_orb->data_descriptor_hi = 0x0;
command_orb->data_descriptor_lo = 0x0;
command_orb->misc |= ORB_SET_DIRECTION(1);
} else if (scsi_use_sg) {
SBP2_DEBUG("Use scatter/gather");
sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg,
sgpnt, orb_direction, dma_dir);
} else {
SBP2_DEBUG("No scatter/gather");
sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt,
orb_direction, scsi_request_bufflen,
orb->data_descriptor_hi = 0x0;
orb->data_descriptor_lo = 0x0;
orb->misc |= ORB_SET_DIRECTION(1);
} else if (scsi_use_sg)
sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
orb_direction, dma_dir);
else
sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,
scsi_request_bufflen,
scsi_request_buffer, dma_dir);
}
/* Byte swap command ORB if necessary */
sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
/* Put our scsi command in the command ORB */
memset(command_orb->cdb, 0, 12);
memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
memset(orb->cdb, 0, 12);
memcpy(orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
}
/*
* This function is called in order to begin a regular SBP-2 command.
*/
static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command)
static void sbp2_link_orb_command(struct sbp2_lu *lu,
struct sbp2_command_info *cmd)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_command_orb *command_orb = &command->command_orb;
struct sbp2_fwhost_info *hi = lu->hi;
struct sbp2_command_orb *last_orb;
dma_addr_t last_orb_dma;
u64 addr = scsi_id->sbp2_command_block_agent_addr;
u64 addr = lu->command_block_agent_addr;
quadlet_t data[2];
size_t length;
unsigned long flags;
outstanding_orb_incr;
SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
command_orb, global_outstanding_command_orbs);
pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
/*
* Check to see if there are any previous orbs to use
*/
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
last_orb = scsi_id->last_orb;
last_orb_dma = scsi_id->last_orb_dma;
dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
dma_sync_single_for_device(&hi->host->device, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
/* check to see if there are any previous orbs to use */
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
last_orb = lu->last_orb;
last_orb_dma = lu->last_orb_dma;
if (!last_orb) {
/*
* last_orb == NULL means: We know that the target's fetch agent
......@@ -2011,7 +1644,7 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
*/
addr += SBP2_ORB_POINTER_OFFSET;
data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = command->command_orb_dma;
data[1] = cmd->command_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
length = 8;
} else {
......@@ -2022,27 +1655,25 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
* The target's fetch agent may or may not have read this
* previous ORB yet.
*/
pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma);
dma_sync_single_for_cpu(&hi->host->device, last_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
wmb();
/* Tells hardware that this pointer is valid */
last_orb->next_ORB_hi = 0;
pci_dma_sync_single_for_device(hi->host->pdev, last_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
dma_sync_single_for_device(&hi->host->device, last_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
addr += SBP2_DOORBELL_OFFSET;
data[0] = 0;
length = 4;
}
scsi_id->last_orb = command_orb;
scsi_id->last_orb_dma = command->command_orb_dma;
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
lu->last_orb = &cmd->command_orb;
lu->last_orb_dma = cmd->command_orb_dma;
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
SBP2_ORB_DEBUG("write to %s register, command orb %p",
last_orb ? "DOORBELL" : "ORB_POINTER", command_orb);
if (sbp2util_node_write_no_wait(scsi_id->ne, addr, data, length)) {
if (sbp2util_node_write_no_wait(lu->ne, addr, data, length)) {
/*
* sbp2util_node_write_no_wait failed. We certainly ran out
* of transaction labels, perhaps just because there were no
......@@ -2051,51 +1682,29 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
* the workqueue job will sleep to guaranteedly get a tlabel.
* We do not accept new commands until the job is over.
*/
scsi_block_requests(scsi_id->scsi_host);
PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
scsi_block_requests(lu->shost);
PREPARE_WORK(&lu->protocol_work,
last_orb ? sbp2util_write_doorbell:
sbp2util_write_orb_pointer);
schedule_delayed_work(&scsi_id->protocol_work, 0);
schedule_work(&lu->protocol_work);
}
}
/*
* This function is called in order to begin a regular SBP-2 command.
*/
static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt,
static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
unsigned int request_bufflen = SCpnt->request_bufflen;
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
SBP2_DEBUG_ENTER();
SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
/*
* Allocate a command orb and s/g structure
*/
command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
if (!command) {
cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
if (!cmd)
return -EIO;
}
/*
* Now actually fill in the comamnd orb and sbp2 s/g list
*/
sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg,
request_bufflen, SCpnt->request_buffer,
SCpnt->sc_data_direction);
sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
"sbp2 command orb", command->command_orb_dma);
/*
* Link up the orb, and ring the doorbell if needed
*/
sbp2_link_orb_command(scsi_id, command);
sbp2_link_orb_command(lu, cmd);
return 0;
}
......@@ -2103,13 +1712,10 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
/*
* Translates SBP-2 status into SCSI sense data for check conditions
*/
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
unchar *sense_data)
{
SBP2_DEBUG_ENTER();
/*
* Ok, it's pretty ugly... ;-)
*/
/* OK, it's pretty ugly... ;-) */
sense_data[0] = 0x70;
sense_data[1] = 0x0;
sense_data[2] = sbp2_status[9];
......@@ -2127,28 +1733,21 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
sense_data[14] = sbp2_status[20];
sense_data[15] = sbp2_status[21];
return sbp2_status[8] & 0x3f; /* return scsi status */
return sbp2_status[8] & 0x3f;
}
/*
* This function deals with status writes from the SBP-2 device
*/
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
int destid, quadlet_t *data, u64 addr,
size_t length, u16 fl)
{
struct sbp2scsi_host_info *hi;
struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
struct sbp2_fwhost_info *hi;
struct sbp2_lu *lu = NULL, *lu_tmp;
struct scsi_cmnd *SCpnt = NULL;
struct sbp2_status_block *sb;
u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
unsigned long flags;
SBP2_DEBUG_ENTER();
sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
SBP2_ERR("Wrong size of status block");
return RCODE_ADDRESS_ERROR;
......@@ -2162,131 +1761,97 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
SBP2_ERR("host info is NULL - this is bad!");
return RCODE_ADDRESS_ERROR;
}
/*
* Find our scsi_id structure by looking at the status fifo address
* written to by the sbp2 device.
*/
list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
if (scsi_id_tmp->ne->nodeid == nodeid &&
scsi_id_tmp->status_fifo_addr == addr) {
scsi_id = scsi_id_tmp;
/* Find the unit which wrote the status. */
list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
if (lu_tmp->ne->nodeid == nodeid &&
lu_tmp->status_fifo_addr == addr) {
lu = lu_tmp;
break;
}
}
if (unlikely(!scsi_id)) {
SBP2_ERR("scsi_id is NULL - device is gone?");
if (unlikely(!lu)) {
SBP2_ERR("lu is NULL - device is gone?");
return RCODE_ADDRESS_ERROR;
}
/*
* Put response into scsi_id status fifo buffer. The first two bytes
/* Put response into lu status fifo buffer. The first two bytes
* come in big endian bit order. Often the target writes only a
* truncated status block, minimally the first two quadlets. The rest
* is implied to be zeros.
*/
sb = &scsi_id->status_block;
* is implied to be zeros. */
sb = &lu->status_block;
memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
memcpy(sb, data, length);
sbp2util_be32_to_cpu_buffer(sb, 8);
/*
* Ignore unsolicited status. Handle command ORB status.
*/
/* Ignore unsolicited status. Handle command ORB status. */
if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
command = NULL;
cmd = NULL;
else
command = sbp2util_find_command_for_orb(scsi_id,
sb->ORB_offset_lo);
if (command) {
SBP2_DEBUG("Found status for command ORB");
pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
outstanding_orb_decr;
/*
* Matched status with command, now grab scsi command pointers
* and check status.
*/
cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
if (cmd) {
dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
/* Grab SCSI command pointers and check status. */
/*
* FIXME: If the src field in the status is 1, the ORB DMA must
* not be reused until status for a subsequent ORB is received.
*/
SCpnt = command->Current_SCpnt;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
sbp2util_mark_command_completed(scsi_id, command);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
SCpnt = cmd->Current_SCpnt;
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
sbp2util_mark_command_completed(lu, cmd);
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
if (SCpnt) {
u32 h = sb->ORB_offset_hi_misc;
u32 r = STATUS_GET_RESP(h);
if (r != RESP_STATUS_REQUEST_COMPLETE) {
SBP2_WARN("resp 0x%x, sbp_status 0x%x",
SBP2_INFO("resp 0x%x, sbp_status 0x%x",
r, STATUS_GET_SBP_STATUS(h));
scsi_status =
r == RESP_STATUS_TRANSPORT_FAILURE ?
SBP2_SCSI_STATUS_BUSY :
SBP2_SCSI_STATUS_COMMAND_TERMINATED;
}
/*
* See if the target stored any scsi status information.
*/
if (STATUS_GET_LEN(h) > 1) {
SBP2_DEBUG("CHECK CONDITION");
if (STATUS_GET_LEN(h) > 1)
scsi_status = sbp2_status_to_sense_data(
(unchar *)sb, SCpnt->sense_buffer);
}
/*
* Check to see if the dead bit is set. If so, we'll
* have to initiate a fetch agent reset.
*/
if (STATUS_TEST_DEAD(h)) {
SBP2_DEBUG("Dead bit set - "
"initiating fetch agent reset");
sbp2_agent_reset(scsi_id, 0);
}
SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
if (STATUS_TEST_DEAD(h))
sbp2_agent_reset(lu, 0);
}
/*
* Check here to see if there are no commands in-use. If there
/* Check here to see if there are no commands in-use. If there
* are none, we know that the fetch agent left the active state
* _and_ that we did not reactivate it yet. Therefore clear
* last_orb so that next time we write directly to the
* ORB_POINTER register. That way the fetch agent does not need
* to refetch the next_ORB.
*/
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (list_empty(&scsi_id->sbp2_command_orb_inuse))
scsi_id->last_orb = NULL;
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
* to refetch the next_ORB. */
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
if (list_empty(&lu->cmd_orb_inuse))
lu->last_orb = NULL;
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
} else {
/*
* It's probably a login/logout/reconnect status.
*/
if ((sb->ORB_offset_lo == scsi_id->reconnect_orb_dma) ||
(sb->ORB_offset_lo == scsi_id->login_orb_dma) ||
(sb->ORB_offset_lo == scsi_id->query_logins_orb_dma) ||
(sb->ORB_offset_lo == scsi_id->logout_orb_dma)) {
scsi_id->access_complete = 1;
wake_up_interruptible(&access_wq);
/* It's probably status after a management request. */
if ((sb->ORB_offset_lo == lu->reconnect_orb_dma) ||
(sb->ORB_offset_lo == lu->login_orb_dma) ||
(sb->ORB_offset_lo == lu->query_logins_orb_dma) ||
(sb->ORB_offset_lo == lu->logout_orb_dma)) {
lu->access_complete = 1;
wake_up_interruptible(&sbp2_access_wq);
}
}
if (SCpnt) {
SBP2_DEBUG("Completing SCSI command");
sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt,
command->Current_done);
SBP2_ORB_DEBUG("command orb completed");
}
if (SCpnt)
sbp2scsi_complete_command(lu, scsi_status, SCpnt,
cmd->Current_done);
return RCODE_COMPLETE;
}
......@@ -2294,77 +1859,57 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
* SCSI interface related section
**************************************/
/*
* This routine is the main request entry routine for doing I/O. It is
* called from the scsi stack directly.
*/
static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
struct sbp2scsi_host_info *hi;
struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
struct sbp2_fwhost_info *hi;
int result = DID_NO_CONNECT << 16;
SBP2_DEBUG_ENTER();
#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
scsi_print_command(SCpnt);
#endif
if (!sbp2util_node_is_available(scsi_id))
if (unlikely(!sbp2util_node_is_available(lu)))
goto done;
hi = scsi_id->hi;
hi = lu->hi;
if (!hi) {
SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
if (unlikely(!hi)) {
SBP2_ERR("sbp2_fwhost_info is NULL - this is bad!");
goto done;
}
/*
* Until we handle multiple luns, just return selection time-out
* to any IO directed at non-zero LUNs
*/
if (SCpnt->device->lun)
/* Multiple units are currently represented to the SCSI core as separate
* targets, not as one target with multiple LUs. Therefore return
* selection time-out to any IO directed at non-zero LUNs. */
if (unlikely(SCpnt->device->lun))
goto done;
/*
* Check for request sense command, and handle it here
* (autorequest sense)
*/
/* handle the request sense command here (auto-request sense) */
if (SCpnt->cmnd[0] == REQUEST_SENSE) {
SBP2_DEBUG("REQUEST_SENSE");
memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
memcpy(SCpnt->request_buffer, SCpnt->sense_buffer,
SCpnt->request_bufflen);
memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
sbp2scsi_complete_command(lu, SBP2_SCSI_STATUS_GOOD, SCpnt,
done);
return 0;
}
/*
* Check to see if we are in the middle of a bus reset.
*/
if (!hpsb_node_entry_valid(scsi_id->ne)) {
if (unlikely(!hpsb_node_entry_valid(lu->ne))) {
SBP2_ERR("Bus reset in progress - rejecting command");
result = DID_BUS_BUSY << 16;
goto done;
}
/*
* Bidirectional commands are not yet implemented,
* and unknown transfer direction not handled.
*/
if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) {
/* Bidirectional commands are not yet implemented,
* and unknown transfer direction not handled. */
if (unlikely(SCpnt->sc_data_direction == DMA_BIDIRECTIONAL)) {
SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
result = DID_ERROR << 16;
goto done;
}
/*
* Try and send our SCSI command
*/
if (sbp2_send_command(scsi_id, SCpnt, done)) {
if (sbp2_send_command(lu, SCpnt, done)) {
SBP2_ERR("Error sending SCSI command");
sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
sbp2scsi_complete_command(lu,
SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
SCpnt, done);
}
return 0;
......@@ -2375,75 +1920,46 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
return 0;
}
/*
* This function is called in order to complete all outstanding SBP-2
* commands (in case of resets, etc.).
*/
static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
u32 status)
static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_fwhost_info *hi = lu->hi;
struct list_head *lh;
struct sbp2_command_info *command;
struct sbp2_command_info *cmd;
unsigned long flags;
SBP2_DEBUG_ENTER();
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
SBP2_DEBUG("Found pending command to complete");
lh = scsi_id->sbp2_command_orb_inuse.next;
command = list_entry(lh, struct sbp2_command_info, list);
pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(scsi_id, command);
if (command->Current_SCpnt) {
command->Current_SCpnt->result = status << 16;
command->Current_done(command->Current_SCpnt);
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
while (!list_empty(&lu->cmd_orb_inuse)) {
lh = lu->cmd_orb_inuse.next;
cmd = list_entry(lh, struct sbp2_command_info, list);
dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(lu, cmd);
if (cmd->Current_SCpnt) {
cmd->Current_SCpnt->result = status << 16;
cmd->Current_done(cmd->Current_SCpnt);
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return;
}
/*
* This function is called in order to complete a regular SBP-2 command.
*
* This can be called in interrupt context.
* Complete a regular SCSI command. Can be called in atomic context.
*/
static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
u32 scsi_status, struct scsi_cmnd *SCpnt,
static void sbp2scsi_complete_command(struct sbp2_lu *lu, u32 scsi_status,
struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
SBP2_DEBUG_ENTER();
/*
* Sanity
*/
if (!SCpnt) {
SBP2_ERR("SCpnt is NULL");
return;
}
/*
* If a bus reset is in progress and there was an error, don't
* complete the command, just let it get retried at the end of the
* bus reset.
*/
if (!hpsb_node_entry_valid(scsi_id->ne)
&& (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
SBP2_ERR("Bus reset in progress - retry command later");
return;
}
/*
* Switch on scsi status
*/
switch (scsi_status) {
case SBP2_SCSI_STATUS_GOOD:
SCpnt->result = DID_OK << 16;
......@@ -2455,12 +1971,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
break;
case SBP2_SCSI_STATUS_CHECK_CONDITION:
SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16;
#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
scsi_print_command(SCpnt);
scsi_print_sense(SBP2_DEVICE_NAME, SCpnt);
#endif
break;
case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
......@@ -2482,118 +1993,88 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
SCpnt->result = DID_ERROR << 16;
}
/*
* If a bus reset is in progress and there was an error, complete
* the command as busy so that it will get retried.
*/
if (!hpsb_node_entry_valid(scsi_id->ne)
/* If a bus reset is in progress and there was an error, complete
* the command as busy so that it will get retried. */
if (!hpsb_node_entry_valid(lu->ne)
&& (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
SBP2_ERR("Completing command with busy (bus reset)");
SCpnt->result = DID_BUS_BUSY << 16;
}
/*
* If a unit attention occurs, return busy status so it gets
* retried... it could have happened because of a 1394 bus reset
* or hot-plug...
* XXX DID_BUS_BUSY is actually a bad idea because it will defy
* the scsi layer's retry logic.
*/
#if 0
if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
(SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
SBP2_DEBUG("UNIT ATTENTION - return busy");
SCpnt->result = DID_BUS_BUSY << 16;
}
#endif
/*
* Tell scsi stack that we're done with this command
*/
/* Tell the SCSI stack that we're done with this command. */
done(SCpnt);
}
static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)sdev->host->hostdata[0];
struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
scsi_id->sdev = sdev;
lu->sdev = sdev;
sdev->allow_restart = 1;
if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
return 0;
}
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)sdev->host->hostdata[0];
struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
sdev->use_10_for_rw = 1;
if (sdev->type == TYPE_DISK &&
scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
sdev->skip_ms_page_8 = 1;
if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
sdev->fix_capacity = 1;
return 0;
}
static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
{
((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
((struct sbp2_lu *)sdev->host->hostdata[0])->sdev = NULL;
return;
}
/*
* Called by scsi stack when something has really gone wrong. Usually
* called when a command has timed-out for some reason.
* Called by scsi stack when something has really gone wrong.
* Usually called when a command has timed-out for some reason.
*/
static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_command_info *command;
struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
struct sbp2_fwhost_info *hi = lu->hi;
struct sbp2_command_info *cmd;
unsigned long flags;
SBP2_ERR("aborting sbp2 command");
SBP2_INFO("aborting sbp2 command");
scsi_print_command(SCpnt);
if (sbp2util_node_is_available(scsi_id)) {
/*
* Right now, just return any matching command structures
* to the free pool.
*/
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
if (command) {
SBP2_DEBUG("Found command to abort");
pci_dma_sync_single_for_cpu(hi->host->pdev,
command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(hi->host->pdev,
command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(scsi_id, command);
if (command->Current_SCpnt) {
command->Current_SCpnt->result = DID_ABORT << 16;
command->Current_done(command->Current_SCpnt);
if (sbp2util_node_is_available(lu)) {
sbp2_agent_reset(lu, 1);
/* Return a matching command structure to the free pool. */
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
if (cmd) {
dma_sync_single_for_cpu(&hi->host->device,
cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(lu, cmd);
if (cmd->Current_SCpnt) {
cmd->Current_SCpnt->result = DID_ABORT << 16;
cmd->Current_done(cmd->Current_SCpnt);
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
/*
* Initiate a fetch agent reset.
*/
sbp2_agent_reset(scsi_id, 1);
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
}
return SUCCESS;
......@@ -2604,14 +2085,13 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
*/
static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
SBP2_ERR("reset requested");
SBP2_INFO("reset requested");
if (sbp2util_node_is_available(scsi_id)) {
SBP2_ERR("Generating sbp2 fetch agent reset");
sbp2_agent_reset(scsi_id, 1);
if (sbp2util_node_is_available(lu)) {
SBP2_INFO("generating sbp2 fetch agent reset");
sbp2_agent_reset(lu, 1);
}
return SUCCESS;
......@@ -2622,90 +2102,50 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
char *buf)
{
struct scsi_device *sdev;
struct scsi_id_instance_data *scsi_id;
int lun;
struct sbp2_lu *lu;
if (!(sdev = to_scsi_device(dev)))
return 0;
if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
if (!(lu = (struct sbp2_lu *)sdev->host->hostdata[0]))
return 0;
lun = ORB_SET_LUN(scsi_id->sbp2_lun);
return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
scsi_id->ud->id, lun);
return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)lu->ne->guid,
lu->ud->id, ORB_SET_LUN(lu->lun));
}
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
&dev_attr_ieee1394_id,
NULL
};
MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
MODULE_LICENSE("GPL");
/* SCSI host template */
static struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
.proc_name = SBP2_DEVICE_NAME,
.queuecommand = sbp2scsi_queuecommand,
.eh_abort_handler = sbp2scsi_abort,
.eh_device_reset_handler = sbp2scsi_reset,
.slave_alloc = sbp2scsi_slave_alloc,
.slave_configure = sbp2scsi_slave_configure,
.slave_destroy = sbp2scsi_slave_destroy,
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.cmd_per_lun = SBP2_MAX_CMDS,
.can_queue = SBP2_MAX_CMDS,
.emulated = 1,
.sdev_attrs = sbp2_sysfs_sdev_attrs,
};
static int sbp2_module_init(void)
{
int ret;
SBP2_DEBUG_ENTER();
/* Module load debug option to force one command at a time (serializing I/O) */
if (serialize_io) {
SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
SBP2_INFO("Try serialize_io=0 for better performance");
scsi_driver_template.can_queue = 1;
scsi_driver_template.cmd_per_lun = 1;
if (sbp2_serialize_io) {
sbp2_shost_template.can_queue = 1;
sbp2_shost_template.cmd_per_lun = 1;
}
if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
(max_sectors * 512) > (128 * 1024))
max_sectors = 128 * 1024 / 512;
scsi_driver_template.max_sectors = max_sectors;
(sbp2_max_sectors * 512) > (128 * 1024))
sbp2_max_sectors = 128 * 1024 / 512;
sbp2_shost_template.max_sectors = sbp2_max_sectors;
/* Register our high level driver with 1394 stack */
hpsb_register_highlevel(&sbp2_highlevel);
ret = hpsb_register_protocol(&sbp2_driver);
if (ret) {
SBP2_ERR("Failed to register protocol");
hpsb_unregister_highlevel(&sbp2_highlevel);
return ret;
}
return 0;
}
static void __exit sbp2_module_exit(void)
{
SBP2_DEBUG_ENTER();
hpsb_unregister_protocol(&sbp2_driver);
hpsb_unregister_highlevel(&sbp2_highlevel);
}
......
......@@ -25,25 +25,25 @@
#define SBP2_DEVICE_NAME "sbp2"
/*
* SBP2 specific structures and defines
* SBP-2 specific definitions
*/
#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31)
#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
#define ORB_SET_STATUS_FIFO_HI(value, id) (value >> 32 | ORB_SET_NODE_ID(id))
#define ORB_SET_STATUS_FIFO_LO(value) (value & 0xffffffff)
#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
#define ORB_SET_NULL_PTR(v) (((v) & 0x1) << 31)
#define ORB_SET_NOTIFY(v) (((v) & 0x1) << 31)
#define ORB_SET_RQ_FMT(v) (((v) & 0x3) << 29)
#define ORB_SET_NODE_ID(v) (((v) & 0xffff) << 16)
#define ORB_SET_STATUS_FIFO_HI(v, id) ((v) >> 32 | ORB_SET_NODE_ID(id))
#define ORB_SET_STATUS_FIFO_LO(v) ((v) & 0xffffffff)
#define ORB_SET_DATA_SIZE(v) ((v) & 0xffff)
#define ORB_SET_PAGE_SIZE(v) (((v) & 0x7) << 16)
#define ORB_SET_PAGE_TABLE_PRESENT(v) (((v) & 0x1) << 19)
#define ORB_SET_MAX_PAYLOAD(v) (((v) & 0xf) << 20)
#define ORB_SET_SPEED(v) (((v) & 0x7) << 24)
#define ORB_SET_DIRECTION(v) (((v) & 0x1) << 27)
struct sbp2_command_orb {
u32 next_ORB_hi;
......@@ -64,12 +64,12 @@ struct sbp2_command_orb {
#define SBP2_LOGICAL_UNIT_RESET 0xe
#define SBP2_TARGET_RESET_REQUEST 0xf
#define ORB_SET_LUN(value) (value & 0xffff)
#define ORB_SET_FUNCTION(value) ((value & 0xf) << 16)
#define ORB_SET_RECONNECT(value) ((value & 0xf) << 20)
#define ORB_SET_EXCLUSIVE(value) ((value & 0x1) << 28)
#define ORB_SET_LOGIN_RESP_LENGTH(value) (value & 0xffff)
#define ORB_SET_PASSWD_LENGTH(value) ((value & 0xffff) << 16)
#define ORB_SET_LUN(v) ((v) & 0xffff)
#define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16)
#define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20)
#define ORB_SET_EXCLUSIVE(v) (((v) & 0x1) << 28)
#define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff)
#define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16)
struct sbp2_login_orb {
u32 password_hi;
......@@ -82,9 +82,9 @@ struct sbp2_login_orb {
u32 status_fifo_lo;
} __attribute__((packed));
#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
#define RESPONSE_GET_LENGTH(value) ((value >> 16) & 0xffff)
#define RESPONSE_GET_RECONNECT_HOLD(value) (value & 0xffff)
#define RESPONSE_GET_LOGIN_ID(v) ((v) & 0xffff)
#define RESPONSE_GET_LENGTH(v) (((v) >> 16) & 0xffff)
#define RESPONSE_GET_RECONNECT_HOLD(v) ((v) & 0xffff)
struct sbp2_login_response {
u32 length_login_ID;
......@@ -93,9 +93,8 @@ struct sbp2_login_response {
u32 reconnect_hold;
} __attribute__((packed));
#define ORB_SET_LOGIN_ID(value) (value & 0xffff)
#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(value) (value & 0xffff)
#define ORB_SET_LOGIN_ID(v) ((v) & 0xffff)
#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(v) ((v) & 0xffff)
struct sbp2_query_logins_orb {
u32 reserved1;
......@@ -108,8 +107,8 @@ struct sbp2_query_logins_orb {
u32 status_fifo_lo;
} __attribute__((packed));
#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff)
#define RESPONSE_GET_ACTIVE_LOGINS(value) ((RESPONSE_GET_LENGTH(value) - 4) / 12)
#define RESPONSE_GET_MAX_LOGINS(v) ((v) & 0xffff)
#define RESPONSE_GET_ACTIVE_LOGINS(v) ((RESPONSE_GET_LENGTH((v)) - 4) / 12)
struct sbp2_query_logins_response {
u32 length_max_logins;
......@@ -140,8 +139,8 @@ struct sbp2_logout_orb {
u32 status_fifo_lo;
} __attribute__((packed));
#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
#define PAGE_TABLE_SET_SEGMENT_LENGTH(value) ((value & 0xffff) << 16)
#define PAGE_TABLE_SET_SEGMENT_BASE_HI(v) ((v) & 0xffff)
#define PAGE_TABLE_SET_SEGMENT_LENGTH(v) (((v) & 0xffff) << 16)
struct sbp2_unrestricted_page_table {
u32 length_segment_base_hi;
......@@ -171,23 +170,14 @@ struct sbp2_unrestricted_page_table {
#define SFMT_DEFERRED_ERROR 0x1
#define SFMT_VENDOR_DEPENDENT_STATUS 0x3
#define SBP2_SCSI_STATUS_GOOD 0x0
#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
#define SBP2_SCSI_STATUS_BUSY 0x8
#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
#define STATUS_GET_SRC(value) (((value) >> 30) & 0x3)
#define STATUS_GET_RESP(value) (((value) >> 28) & 0x3)
#define STATUS_GET_LEN(value) (((value) >> 24) & 0x7)
#define STATUS_GET_SBP_STATUS(value) (((value) >> 16) & 0xff)
#define STATUS_GET_ORB_OFFSET_HI(value) ((value) & 0x0000ffff)
#define STATUS_TEST_DEAD(value) ((value) & 0x08000000)
#define STATUS_GET_SRC(v) (((v) >> 30) & 0x3)
#define STATUS_GET_RESP(v) (((v) >> 28) & 0x3)
#define STATUS_GET_LEN(v) (((v) >> 24) & 0x7)
#define STATUS_GET_SBP_STATUS(v) (((v) >> 16) & 0xff)
#define STATUS_GET_ORB_OFFSET_HI(v) ((v) & 0x0000ffff)
#define STATUS_TEST_DEAD(v) ((v) & 0x08000000)
/* test 'resp' | 'dead' | 'sbp2_status' */
#define STATUS_TEST_RDS(value) ((value) & 0x38ff0000)
#define STATUS_TEST_RDS(v) ((v) & 0x38ff0000)
struct sbp2_status_block {
u32 ORB_offset_hi_misc;
......@@ -195,66 +185,70 @@ struct sbp2_status_block {
u8 command_set_dependent[24];
} __attribute__((packed));
/*
* Miscellaneous SBP2 related config rom defines
* SBP2 related configuration ROM definitions
*/
#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
#define SBP2_CSR_OFFSET_KEY 0x54
#define SBP2_UNIT_SPEC_ID_KEY 0x12
#define SBP2_UNIT_SW_VERSION_KEY 0x13
#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
#define SBP2_COMMAND_SET_KEY 0x39
#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
#define SBP2_FIRMWARE_REVISION_KEY 0x3c
#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
#define SBP2_CSR_OFFSET_KEY 0x54
#define SBP2_UNIT_SPEC_ID_KEY 0x12
#define SBP2_UNIT_SW_VERSION_KEY 0x13
#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
#define SBP2_COMMAND_SET_KEY 0x39
#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
#define SBP2_FIRMWARE_REVISION_KEY 0x3c
#define SBP2_AGENT_STATE_OFFSET 0x00ULL
#define SBP2_AGENT_RESET_OFFSET 0x04ULL
#define SBP2_ORB_POINTER_OFFSET 0x08ULL
#define SBP2_DOORBELL_OFFSET 0x10ULL
#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
#define SBP2_AGENT_STATE_OFFSET 0x00ULL
#define SBP2_AGENT_RESET_OFFSET 0x04ULL
#define SBP2_ORB_POINTER_OFFSET 0x08ULL
#define SBP2_DOORBELL_OFFSET 0x10ULL
#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
#define SBP2_BUSY_TIMEOUT_VALUE 0xf
#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
/* biggest possible value for Single Phase Retry count is 0xf */
#define SBP2_BUSY_TIMEOUT_VALUE 0xf
#define SBP2_AGENT_RESET_DATA 0xf
#define SBP2_AGENT_RESET_DATA 0xf
/*
* Unit spec id and sw version entry for SBP-2 devices
*/
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
/*
* SCSI specific stuff
* SCSI specific definitions
*/
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_MAX_CMDS 8 /* This should be safe */
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2_MAX_SECTORS 255
/* There is no real limitation of the queue depth (i.e. length of the linked
* list of command ORBs) at the target. The chosen depth is merely an
* implementation detail of the sbp2 driver. */
#define SBP2_MAX_CMDS 8
#define SBP2_SCSI_STATUS_GOOD 0x0
#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
#define SBP2_SCSI_STATUS_BUSY 0x8
#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
/* Flags for detected oddities and brokeness */
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
#define SBP2_WORKAROUND_INQUIRY_36 0x2
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
#define SBP2_WORKAROUND_OVERRIDE 0x100
/* This is the two dma types we use for cmd_dma below */
enum cmd_dma_types {
/*
* Representations of commands and devices
*/
enum sbp2_dma_types {
CMD_DMA_NONE,
CMD_DMA_PAGE,
CMD_DMA_SINGLE
};
/*
* Encapsulates all the info necessary for an outstanding command.
*/
/* Per SCSI command */
struct sbp2_command_info {
struct list_head list;
struct sbp2_command_orb command_orb ____cacheline_aligned;
dma_addr_t command_orb_dma ____cacheline_aligned;
......@@ -262,25 +256,25 @@ struct sbp2_command_info {
void (*Current_done)(struct scsi_cmnd *);
/* Also need s/g structure for each sbp2 command */
struct sbp2_unrestricted_page_table scatter_gather_element[SG_ALL] ____cacheline_aligned;
struct sbp2_unrestricted_page_table
scatter_gather_element[SG_ALL] ____cacheline_aligned;
dma_addr_t sge_dma ____cacheline_aligned;
void *sge_buffer;
dma_addr_t cmd_dma;
enum cmd_dma_types dma_type;
enum sbp2_dma_types dma_type;
unsigned long dma_size;
int dma_dir;
enum dma_data_direction dma_dir;
};
struct sbp2scsi_host_info;
/* Per FireWire host */
struct sbp2_fwhost_info {
struct hpsb_host *host;
struct list_head logical_units;
};
/*
* Information needed on a per scsi id basis (one for each sbp2 device)
*/
struct scsi_id_instance_data {
/*
* Various sbp2 specific structures
*/
/* Per logical unit */
struct sbp2_lu {
/* Operation request blocks */
struct sbp2_command_orb *last_orb;
dma_addr_t last_orb_dma;
struct sbp2_login_orb *login_orb;
......@@ -297,116 +291,59 @@ struct scsi_id_instance_data {
dma_addr_t logout_orb_dma;
struct sbp2_status_block status_block;
/*
* Stuff we need to know about the sbp2 device itself
*/
u64 sbp2_management_agent_addr;
u64 sbp2_command_block_agent_addr;
/* How to talk to the unit */
u64 management_agent_addr;
u64 command_block_agent_addr;
u32 speed_code;
u32 max_payload_size;
u16 lun;
/*
* Values pulled from the device's unit directory
*/
u32 sbp2_command_set_spec_id;
u32 sbp2_command_set;
u32 sbp2_unit_characteristics;
u32 sbp2_lun;
u32 sbp2_firmware_revision;
/*
* Address for the device to write status blocks to
*/
/* Address for the unit to write status blocks to */
u64 status_fifo_addr;
/*
* Waitqueue flag for logins, reconnects, logouts, query logins
*/
int access_complete:1;
/* Waitqueue flag for logins, reconnects, logouts, query logins */
unsigned int access_complete:1;
/*
* Pool of command orbs, so we can have more than overlapped command per id
*/
spinlock_t sbp2_command_orb_lock;
struct list_head sbp2_command_orb_inuse;
struct list_head sbp2_command_orb_completed;
/* Pool of command ORBs for this logical unit */
spinlock_t cmd_orb_lock;
struct list_head cmd_orb_inuse;
struct list_head cmd_orb_completed;
struct list_head scsi_list;
/* Backlink to FireWire host; list of units attached to the host */
struct sbp2_fwhost_info *hi;
struct list_head lu_list;
/* Node entry, as retrieved from NodeMgr entries */
/* IEEE 1394 core's device representations */
struct node_entry *ne;
struct unit_directory *ud;
/* A backlink to our host_info */
struct sbp2scsi_host_info *hi;
/* SCSI related pointers */
/* SCSI core's device representations */
struct scsi_device *sdev;
struct Scsi_Host *scsi_host;
struct Scsi_Host *shost;
/* Device specific workarounds/brokeness */
unsigned workarounds;
/* Connection state */
atomic_t state;
struct delayed_work protocol_work;
/* For deferred requests to the fetch agent */
struct work_struct protocol_work;
};
/* For use in scsi_id_instance_data.state */
/* For use in sbp2_lu.state */
enum sbp2lu_state_types {
SBP2LU_STATE_RUNNING, /* all normal */
SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */
SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */
};
/* Sbp2 host data structure (one per IEEE1394 host) */
struct sbp2scsi_host_info {
struct hpsb_host *host; /* IEEE1394 host */
struct list_head scsi_ids; /* List of scsi ids on this host */
};
/*
* Function prototypes
*/
/*
* Various utility prototypes
*/
static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id);
static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id);
static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt);
static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *Current_SCpnt,
void (*Current_done)(struct scsi_cmnd *));
static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command);
static int sbp2_start_device(struct scsi_id_instance_data *scsi_id);
static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id);
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
u64 addr, size_t length, u16 flags);
static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
u64 addr, size_t length, u16 flags);
#endif
/*
* SBP-2 protocol related prototypes
*/
static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id);
static int sbp2_login_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, size_t length, u16 flags);
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
unchar *sense_data);
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
struct unit_directory *ud);
static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id);
static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id);
/* For use in sbp2_lu.workarounds and in the corresponding
* module load parameter */
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
#define SBP2_WORKAROUND_INQUIRY_36 0x2
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
#define SBP2_WORKAROUND_OVERRIDE 0x100
#endif /* SBP2_H */
......@@ -714,8 +714,8 @@ static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
return ret;
}
static int __video1394_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
static long video1394_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct ti_ohci *ohci = ctx->ohci;
......@@ -884,13 +884,14 @@ static int __video1394_ioctl(struct file *file,
struct dma_iso_ctx *d;
int next_prg;
if (copy_from_user(&v, argp, sizeof(v)))
if (unlikely(copy_from_user(&v, argp, sizeof(v))))
return -EFAULT;
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
if (d == NULL) return -EFAULT;
if (unlikely(d == NULL))
return -EFAULT;
if ((v.buffer<0) || (v.buffer>=d->num_desc - 1)) {
if (unlikely((v.buffer<0) || (v.buffer>=d->num_desc - 1))) {
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d out of range",v.buffer);
return -EINVAL;
......@@ -898,7 +899,7 @@ static int __video1394_ioctl(struct file *file,
spin_lock_irqsave(&d->lock,flags);
if (d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED) {
if (unlikely(d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED)) {
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
......@@ -949,13 +950,14 @@ static int __video1394_ioctl(struct file *file,
struct dma_iso_ctx *d;
int i = 0;
if (copy_from_user(&v, argp, sizeof(v)))
if (unlikely(copy_from_user(&v, argp, sizeof(v))))
return -EFAULT;
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
if (d == NULL) return -EFAULT;
if (unlikely(d == NULL))
return -EFAULT;
if ((v.buffer<0) || (v.buffer>d->num_desc - 1)) {
if (unlikely((v.buffer<0) || (v.buffer>d->num_desc - 1))) {
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d out of range",v.buffer);
return -EINVAL;
......@@ -1008,7 +1010,7 @@ static int __video1394_ioctl(struct file *file,
spin_unlock_irqrestore(&d->lock, flags);
v.buffer=i;
if (copy_to_user(argp, &v, sizeof(v)))
if (unlikely(copy_to_user(argp, &v, sizeof(v))))
return -EFAULT;
return 0;
......@@ -1156,15 +1158,6 @@ static int __video1394_ioctl(struct file *file,
}
}
static long video1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int err;
lock_kernel();
err = __video1394_ioctl(file, cmd, arg);
unlock_kernel();
return err;
}
/*
* This maps the vmalloced and reserved buffer to user space.
*
......@@ -1177,17 +1170,14 @@ static long video1394_ioctl(struct file *file, unsigned int cmd, unsigned long a
static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
int res = -EINVAL;
lock_kernel();
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id,
"Current iso context not set");
} else
res = dma_region_mmap(&ctx->current_ctx->dma, file, vma);
unlock_kernel();
return -EINVAL;
}
return res;
return dma_region_mmap(&ctx->current_ctx->dma, file, vma);
}
static unsigned int video1394_poll(struct file *file, poll_table *pt)
......@@ -1198,14 +1188,12 @@ static unsigned int video1394_poll(struct file *file, poll_table *pt)
struct dma_iso_ctx *d;
int i;
lock_kernel();
ctx = file->private_data;
d = ctx->current_ctx;
if (d == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id,
"Current iso context not set");
mask = POLLERR;
goto done;
return POLLERR;
}
poll_wait(file, &d->waitq, pt);
......@@ -1218,8 +1206,6 @@ static unsigned int video1394_poll(struct file *file, poll_table *pt)
}
}
spin_unlock_irqrestore(&d->lock, flags);
done:
unlock_kernel();
return mask;
}
......@@ -1255,7 +1241,6 @@ static int video1394_release(struct inode *inode, struct file *file)
struct list_head *lh, *next;
u64 mask;
lock_kernel();
list_for_each_safe(lh, next, &ctx->context_list) {
struct dma_iso_ctx *d;
d = list_entry(lh, struct dma_iso_ctx, link);
......@@ -1276,7 +1261,6 @@ static int video1394_release(struct inode *inode, struct file *file)
kfree(ctx);
file->private_data = NULL;
unlock_kernel();
return 0;
}
......@@ -1324,12 +1308,8 @@ static struct ieee1394_device_id video1394_id_table[] = {
MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
static struct hpsb_protocol_driver video1394_driver = {
.name = "1394 Digital Camera Driver",
.name = VIDEO1394_DRIVER_NAME,
.id_table = video1394_id_table,
.driver = {
.name = VIDEO1394_DRIVER_NAME,
.bus = &ieee1394_bus_type,
},
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册