提交 2d530569 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (82 commits)
  firewire: core: add forgotten dummy driver methods, remove unused ones
  firewire: add isochronous multichannel reception
  firewire: core: small clarifications in core-cdev
  firewire: core: remove unused code
  firewire: ohci: release channel in error path
  firewire: ohci: use memory barriers to order descriptor updates
  tools/firewire: nosy-dump: increment program version
  tools/firewire: nosy-dump: remove unused code
  tools/firewire: nosy-dump: use linux/firewire-constants.h
  tools/firewire: nosy-dump: break up a deeply nested function
  tools/firewire: nosy-dump: make some symbols static or const
  tools/firewire: nosy-dump: change to kernel coding style
  tools/firewire: nosy-dump: work around segfault in decode_fcp
  tools/firewire: nosy-dump: fix it on x86-64
  tools/firewire: add userspace front-end of nosy
  firewire: nosy: note ioctls in ioctl-number.txt
  firewire: nosy: use generic printk macros
  firewire: nosy: endianess fixes and annotations
  firewire: nosy: annotate __user pointers and __iomem pointers
  firewire: nosy: fix device shutdown with active client
  ...
......@@ -79,6 +79,7 @@ Code Seq#(hex) Include File Comments
0x22 all scsi/sg.h
'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem
'$' 00-0F linux/perf_counter.h, linux/perf_event.h
'&' 00-07 drivers/firewire/nosy-user.h
'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl
<ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
'2' 01-04 linux/i2o.h
......
......@@ -2324,6 +2324,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
S: Maintained
F: drivers/firewire/
F: include/linux/firewire*.h
F: tools/firewire/
FIRMWARE LOADER (request_firmware)
S: Orphan
......
......@@ -66,4 +66,28 @@ config FIREWIRE_NET
source "drivers/ieee1394/Kconfig"
config FIREWIRE_NOSY
tristate "Nosy - a FireWire traffic sniffer for PCILynx cards"
depends on PCI
help
Nosy is an IEEE 1394 packet sniffer that is used for protocol
analysis and in development of IEEE 1394 drivers, applications,
or firmwares.
This driver lets you use a Texas Instruments PCILynx 1394 to PCI
link layer controller TSB12LV21/A/B as a low-budget bus analyzer.
PCILynx is a nowadays very rare IEEE 1394 controller which is
not OHCI 1394 compliant.
The following cards are known to be based on PCILynx or PCILynx-2:
IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
(PCI card), Newer Technology FireWire 2 Go (CardBus card),
Apple Power Mac G3 blue & white (onboard controller).
To compile this driver as a module, say M here: The module will be
called nosy. Source code of a userspace interface to nosy, called
nosy-dump, can be found in tools/firewire/ of the kernel sources.
If unsure, say N.
endmenu
......@@ -12,3 +12,4 @@ obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
......@@ -204,17 +204,62 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
static int reset_bus(struct fw_card *card, bool short_reset)
{
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
return card->driver->update_phy_reg(card, reg, 0, bit);
}
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
{
/* We don't try hard to sort out requests of long vs. short resets. */
card->br_short = short_reset;
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
if (!schedule_delayed_work(&card->br_work,
delayed ? DIV_ROUND_UP(HZ, 100) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
static void br_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, br_work.work);
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
if (!schedule_delayed_work(&card->br_work, 2 * HZ))
fw_card_put(card);
return;
}
fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
FW_PHY_CONFIG_CURRENT_GAP_COUNT);
reset_bus(card, card->br_short);
fw_card_put(card);
}
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
fw_iso_resource_manage(card, generation, 1ULL << 31, &channel,
&bandwidth, true, card->bm_transaction_data);
if (channel == 31) {
if (!card->broadcast_channel_allocated) {
fw_iso_resource_manage(card, generation, 1ULL << 31,
&channel, &bandwidth, true,
card->bm_transaction_data);
if (channel != 31) {
fw_notify("failed to allocate broadcast channel\n");
return;
}
card->broadcast_channel_allocated = true;
device_for_each_child(card->device, (void *)(long)generation,
fw_device_set_broadcast_channel);
}
device_for_each_child(card->device, (void *)(long)generation,
fw_device_set_broadcast_channel);
}
static const char gap_count_table[] = {
......@@ -224,27 +269,26 @@ static const char gap_count_table[] = {
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
if (!schedule_delayed_work(&card->work, delay))
if (!schedule_delayed_work(&card->bm_work, delay))
fw_card_put(card);
}
static void fw_card_bm_work(struct work_struct *work)
static void bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
unsigned long flags;
int root_id, new_root_id, irm_id, local_id;
int root_id, new_root_id, irm_id, bm_id, local_id;
int gap_count, generation, grace, rcode;
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
bool irm_is_1394_1995_only;
spin_lock_irqsave(&card->lock, flags);
spin_lock_irq(&card->lock);
if (card->local_node == NULL) {
spin_unlock_irqrestore(&card->lock, flags);
spin_unlock_irq(&card->lock);
goto out_put_card;
}
......@@ -267,7 +311,8 @@ static void fw_card_bm_work(struct work_struct *work)
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if (is_next_generation(generation, card->bm_generation) ||
if ((is_next_generation(generation, card->bm_generation) &&
!card->bm_abdicate) ||
(card->bm_generation != generation && grace)) {
/*
* This first step is to figure out who is IRM and
......@@ -298,21 +343,26 @@ static void fw_card_bm_work(struct work_struct *work)
card->bm_transaction_data[0] = cpu_to_be32(0x3f);
card->bm_transaction_data[1] = cpu_to_be32(local_id);
spin_unlock_irqrestore(&card->lock, flags);
spin_unlock_irq(&card->lock);
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
card->bm_transaction_data,
sizeof(card->bm_transaction_data));
card->bm_transaction_data, 8);
if (rcode == RCODE_GENERATION)
/* Another bus reset, BM work has been rescheduled. */
goto out;
if (rcode == RCODE_COMPLETE &&
card->bm_transaction_data[0] != cpu_to_be32(0x3f)) {
bm_id = be32_to_cpu(card->bm_transaction_data[0]);
spin_lock_irq(&card->lock);
if (rcode == RCODE_COMPLETE && generation == card->generation)
card->bm_node_id =
bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
spin_unlock_irq(&card->lock);
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
......@@ -320,7 +370,17 @@ static void fw_card_bm_work(struct work_struct *work)
goto out;
}
spin_lock_irqsave(&card->lock, flags);
if (rcode == RCODE_SEND_ERROR) {
/*
* We have been unable to send the lock request due to
* some local problem. Let's try again later and hope
* that the problem has gone away by then.
*/
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
spin_lock_irq(&card->lock);
if (rcode != RCODE_COMPLETE) {
/*
......@@ -339,7 +399,7 @@ static void fw_card_bm_work(struct work_struct *work)
* We weren't BM in the last generation, and the last
* bus reset is less than 125ms ago. Reschedule this job.
*/
spin_unlock_irqrestore(&card->lock, flags);
spin_unlock_irq(&card->lock);
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
......@@ -362,14 +422,12 @@ static void fw_card_bm_work(struct work_struct *work)
* If we haven't probed this device yet, bail out now
* and let's try again once that's done.
*/
spin_unlock_irqrestore(&card->lock, flags);
spin_unlock_irq(&card->lock);
goto out;
} else if (root_device_is_cmc) {
/*
* FIXME: I suppose we should set the cmstr bit in the
* STATE_CLEAR register of this node, as described in
* 1394-1995, 8.4.2.6. Also, send out a force root
* packet for this node.
* We will send out a force root packet for this
* node as part of the gap count optimization.
*/
new_root_id = root_id;
} else {
......@@ -402,19 +460,33 @@ static void fw_card_bm_work(struct work_struct *work)
(card->gap_count != gap_count || new_root_id != root_id))
do_reset = true;
spin_unlock_irqrestore(&card->lock, flags);
spin_unlock_irq(&card->lock);
if (do_reset) {
fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
card->index, new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
fw_core_initiate_bus_reset(card, 1);
reset_bus(card, true);
/* Will allocate broadcast channel after the reset. */
} else {
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
goto out;
}
if (root_device_is_cmc) {
/*
* Make sure that the cycle master sends cycle start packets.
*/
card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
root_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_STATE_SET,
card->bm_transaction_data, 4);
if (rcode == RCODE_GENERATION)
goto out;
}
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
out:
fw_node_put(root_node);
out_put_card:
......@@ -432,17 +504,23 @@ void fw_card_initialize(struct fw_card *card,
card->device = device;
card->current_tlabel = 0;
card->tlabel_mask = 0;
card->split_timeout_hi = 0;
card->split_timeout_lo = 800 << 19;
card->split_timeout_cycles = 800;
card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10);
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
INIT_LIST_HEAD(&card->phy_receiver_list);
spin_lock_init(&card->lock);
card->local_node = NULL;
INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
INIT_DELAYED_WORK(&card->br_work, br_work);
INIT_DELAYED_WORK(&card->bm_work, bm_work);
}
EXPORT_SYMBOL(fw_card_initialize);
......@@ -468,20 +546,22 @@ int fw_card_add(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_add);
/*
* The next few functions implement a dummy driver that is used once a card
* driver shuts down an fw_card. This allows the driver to cleanly unload,
* as all IO to the card will be handled (and failed) by the dummy driver
* instead of calling into the module. Only functions for iso context
* shutdown still need to be provided by the card driver.
*
* .read/write_csr() should never be called anymore after the dummy driver
* was bound since they are only used within request handler context.
* .set_config_rom() is never called since the card is taken out of card_list
* before switching to the dummy driver.
*/
static int dummy_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
static int dummy_read_phy_reg(struct fw_card *card, int address)
{
BUG();
return -1;
return -ENODEV;
}
static int dummy_update_phy_reg(struct fw_card *card, int address,
......@@ -490,25 +570,14 @@ static int dummy_update_phy_reg(struct fw_card *card, int address,
return -ENODEV;
}
static int dummy_set_config_rom(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
/*
* We take the card out of card_list before setting the dummy
* driver, so this should never get called.
*/
BUG();
return -1;
}
static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, -ENODEV);
packet->callback(packet, card, RCODE_CANCELLED);
}
static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, -ENODEV);
packet->callback(packet, card, RCODE_CANCELLED);
}
static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
......@@ -522,14 +591,40 @@ static int dummy_enable_phys_dma(struct fw_card *card,
return -ENODEV;
}
static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
return ERR_PTR(-ENODEV);
}
static int dummy_start_iso(struct fw_iso_context *ctx,
s32 cycle, u32 sync, u32 tags)
{
return -ENODEV;
}
static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
{
return -ENODEV;
}
static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
struct fw_iso_buffer *buffer, unsigned long payload)
{
return -ENODEV;
}
static const struct fw_card_driver dummy_driver_template = {
.enable = dummy_enable,
.update_phy_reg = dummy_update_phy_reg,
.set_config_rom = dummy_set_config_rom,
.send_request = dummy_send_request,
.cancel_packet = dummy_cancel_packet,
.send_response = dummy_send_response,
.enable_phys_dma = dummy_enable_phys_dma,
.read_phy_reg = dummy_read_phy_reg,
.update_phy_reg = dummy_update_phy_reg,
.send_request = dummy_send_request,
.send_response = dummy_send_response,
.cancel_packet = dummy_cancel_packet,
.enable_phys_dma = dummy_enable_phys_dma,
.allocate_iso_context = dummy_allocate_iso_context,
.start_iso = dummy_start_iso,
.set_iso_channels = dummy_set_iso_channels,
.queue_iso = dummy_queue_iso,
};
void fw_card_release(struct kref *kref)
......@@ -545,7 +640,7 @@ void fw_core_remove_card(struct fw_card *card)
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_core_initiate_bus_reset(card, 1);
fw_schedule_bus_reset(card, false, true);
mutex_lock(&card_mutex);
list_del_init(&card->link);
......@@ -565,12 +660,3 @@ void fw_core_remove_card(struct fw_card *card)
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
return card->driver->update_phy_reg(card, reg, 0, bit);
}
EXPORT_SYMBOL(fw_core_initiate_bus_reset);
此差异已折叠。
......@@ -107,11 +107,11 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
}
/**
* fw_csr_string - reads a string from the configuration ROM
* @directory: e.g. root directory or unit directory
* @key: the key of the preceding directory entry
* @buf: where to put the string
* @size: size of @buf, in bytes
* fw_csr_string() - reads a string from the configuration ROM
* @directory: e.g. root directory or unit directory
* @key: the key of the preceding directory entry
* @buf: where to put the string
* @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
......@@ -1136,6 +1136,7 @@ static void fw_device_refresh(struct work_struct *work)
goto give_up;
}
fw_device_cdev_update(device);
create_units(device);
/* Userspace may want to re-read attributes. */
......
......@@ -118,6 +118,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
int i;
dma_addr_t address;
ssize_t offset;
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
offset = (ssize_t)completed - (ssize_t)address;
if (offset > 0 && offset <= PAGE_SIZE)
return (i << PAGE_SHIFT) + offset;
}
return 0;
}
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
......@@ -134,7 +151,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->channel = channel;
ctx->speed = speed;
ctx->header_size = header_size;
ctx->callback = callback;
ctx->callback.sc = callback;
ctx->callback_data = callback_data;
return ctx;
......@@ -143,9 +160,7 @@ EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
struct fw_card *card = ctx->card;
card->driver->free_iso_context(ctx);
ctx->card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
......@@ -156,14 +171,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx,
}
EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
{
return ctx->card->driver->set_iso_channels(ctx, channels);
}
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct fw_card *card = ctx->card;
return card->driver->queue_iso(ctx, packet, buffer, payload);
return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
......@@ -279,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
}
/**
* fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
* fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
......
......@@ -174,12 +174,7 @@ static inline struct fw_node *fw_node(struct list_head *l)
return list_entry(l, struct fw_node, link);
}
/**
* build_tree - Build the tree representation of the topology
* @self_ids: array of self IDs to create the tree from
* @self_id_count: the length of the self_ids array
* @local_id: the node ID of the local node
*
/*
* This function builds the tree representation of the topology given
* by the self IDs from the latest bus reset. During the construction
* of the tree, the function checks that the self IDs are valid and
......@@ -420,11 +415,10 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
}
}
/**
* update_tree - compare the old topology tree for card with the new
* one specified by root. Queue the nodes and mark them as either
* found, lost or updated. Update the nodes in the card topology tree
* as we go.
/*
* Compare the old topology tree for card with the new one specified by root.
* Queue the nodes and mark them as either found, lost or updated.
* Update the nodes in the card topology tree as we go.
*/
static void update_tree(struct fw_card *card, struct fw_node *root)
{
......@@ -524,7 +518,7 @@ static void update_topology_map(struct fw_card *card,
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
int self_id_count, u32 *self_ids)
int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
unsigned long flags;
......@@ -543,7 +537,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
spin_lock_irqsave(&card->lock, flags);
card->broadcast_channel_allocated = false;
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using
......@@ -552,6 +546,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
smp_wmb();
card->generation = generation;
card->reset_jiffies = jiffies;
card->bm_node_id = 0xffff;
card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count);
......
......@@ -246,7 +246,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break;
default:
WARN(1, KERN_ERR "wrong tcode %d", tcode);
WARN(1, "wrong tcode %d", tcode);
}
common:
packet->speed = speed;
......@@ -273,43 +273,52 @@ static int allocate_tlabel(struct fw_card *card)
}
/**
* This function provides low-level access to the IEEE1394 transaction
* logic. Most C programs would use either fw_read(), fw_write() or
* fw_lock() instead - those function are convenience wrappers for
* this function. The fw_send_request() function is primarily
* provided as a flexible, one-stop entry point for languages bindings
* and protocol bindings.
* fw_send_request() - submit a request packet for transmission
* @card: interface to send the request at
* @t: transaction instance to which the request belongs
* @tcode: transaction code
* @destination_id: destination node ID, consisting of bus_ID and phy_ID
* @generation: bus generation in which request and response are valid
* @speed: transmission speed
* @offset: 48bit wide offset into destination's address space
* @payload: data payload for the request subaction
* @length: length of the payload, in bytes
* @callback: function to be called when the transaction is completed
* @callback_data: data to be passed to the transaction completion callback
*
* FIXME: Document this function further, in particular the possible
* values for rcode in the callback. In short, we map ACK_COMPLETE to
* RCODE_COMPLETE, internal errors set errno and set rcode to
* RCODE_SEND_ERROR (which is out of range for standard ieee1394
* rcodes). All other rcodes are forwarded unchanged. For all
* errors, payload is NULL, length is 0.
* Submit a request packet into the asynchronous request transmission queue.
* Can be called from atomic context. If you prefer a blocking API, use
* fw_run_transaction() in a context that can sleep.
*
* Can not expect the callback to be called before the function
* returns, though this does happen in some cases (ACK_COMPLETE and
* errors).
* In case of lock requests, specify one of the firewire-core specific %TCODE_
* constants instead of %TCODE_LOCK_REQUEST in @tcode.
*
* The payload is only used for write requests and must not be freed
* until the callback has been called.
* Make sure that the value in @destination_id is not older than the one in
* @generation. Otherwise the request is in danger to be sent to a wrong node.
*
* @param card the card from which to send the request
* @param tcode the tcode for this transaction. Do not use
* TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
* etc. to specify tcode and ext_tcode.
* @param node_id the destination node ID (bus ID and PHY ID concatenated)
* @param generation the generation for which node_id is valid
* @param speed the speed to use for sending the request
* @param offset the 48 bit offset on the destination node
* @param payload the data payload for the request subaction
* @param length the length in bytes of the data to read
* @param callback function to be called when the transaction is completed
* @param callback_data pointer to arbitrary data, which will be
* passed to the callback
*
* In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
* In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
* It will contain tag, channel, and sy data instead of a node ID then.
*
* The payload buffer at @data is going to be DMA-mapped except in case of
* quadlet-sized payload or of local (loopback) requests. Hence make sure that
* the buffer complies with the restrictions for DMA-mapped memory. The
* @payload must not be freed before the @callback is called.
*
* In case of request types without payload, @data is NULL and @length is 0.
*
* After the transaction is completed successfully or unsuccessfully, the
* @callback will be called. Among its parameters is the response code which
* is either one of the rcodes per IEEE 1394 or, in case of internal errors,
* the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
* specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
* %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
* generation, or missing ACK respectively.
*
* Note some timing corner cases: fw_send_request() may complete much earlier
* than when the request packet actually hits the wire. On the other hand,
* transaction completion and hence execution of @callback may happen even
* before fw_send_request() returns.
*/
void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int destination_id, int generation, int speed,
......@@ -339,7 +348,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
setup_timer(&t->split_timeout_timer,
split_transaction_timeout_callback, (unsigned long)t);
/* FIXME: start this timer later, relative to t->timestamp */
mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
t->callback = callback;
t->callback_data = callback_data;
......@@ -374,9 +384,11 @@ static void transaction_callback(struct fw_card *card, int rcode,
}
/**
* fw_run_transaction - send request and sleep until transaction is completed
* fw_run_transaction() - send request and sleep until transaction is completed
*
* Returns the RCODE.
* Returns the RCODE. See fw_send_request() for parameter documentation.
* Unlike fw_send_request(), @data points to the payload of the request or/and
* to the payload of the response.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
......@@ -417,9 +429,21 @@ void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
long timeout = DIV_ROUND_UP(HZ, 10);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
PHY_CONFIG_ROOT_ID(node_id) |
PHY_CONFIG_GAP_COUNT(gap_count);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
data |= PHY_CONFIG_ROOT_ID(node_id);
if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
gap_count = card->driver->read_phy_reg(card, 1);
if (gap_count < 0)
return;
gap_count &= 63;
if (gap_count == 63)
return;
}
data |= PHY_CONFIG_GAP_COUNT(gap_count);
mutex_lock(&phy_config_mutex);
......@@ -494,9 +518,9 @@ static bool is_in_fcp_region(u64 offset, size_t length)
}
/**
* fw_core_add_address_handler - register for incoming requests
* @handler: callback
* @region: region in the IEEE 1212 node space address range
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
* @region: region in the IEEE 1212 node space address range
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
......@@ -519,8 +543,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
region->end & 0xffff000000000003ULL ||
region->start >= region->end ||
region->end > 0x0001000000000000ULL ||
handler->length & 3 ||
handler->length == 0)
return -EINVAL;
......@@ -551,7 +575,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
EXPORT_SYMBOL(fw_core_add_address_handler);
/**
* fw_core_remove_address_handler - unregister an address handler
* fw_core_remove_address_handler() - unregister an address handler
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
......@@ -580,6 +604,41 @@ static void free_response_callback(struct fw_packet *packet,
kfree(request);
}
int fw_get_response_length(struct fw_request *r)
{
int tcode, ext_tcode, data_length;
tcode = HEADER_GET_TCODE(r->request_header[0]);
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
return 0;
case TCODE_READ_QUADLET_REQUEST:
return 4;
case TCODE_READ_BLOCK_REQUEST:
data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
return data_length;
case TCODE_LOCK_REQUEST:
ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
switch (ext_tcode) {
case EXTCODE_FETCH_ADD:
case EXTCODE_LITTLE_ADD:
return data_length;
default:
return data_length / 2;
}
default:
WARN(1, "wrong tcode %d", tcode);
return 0;
}
}
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length)
{
......@@ -631,18 +690,35 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
break;
default:
WARN(1, KERN_ERR "wrong tcode %d", tcode);
WARN(1, "wrong tcode %d", tcode);
}
response->payload_mapped = false;
}
EXPORT_SYMBOL(fw_fill_response);
static struct fw_request *allocate_request(struct fw_packet *p)
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
{
unsigned int cycles;
u32 timestamp;
cycles = card->split_timeout_cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
timestamp += (cycles / 8000) << 13;
timestamp |= cycles % 8000;
return timestamp;
}
static struct fw_request *allocate_request(struct fw_card *card,
struct fw_packet *p)
{
struct fw_request *request;
u32 *data, length;
int request_tcode, t;
int request_tcode;
request_tcode = HEADER_GET_TCODE(p->header[0]);
switch (request_tcode) {
......@@ -677,14 +753,9 @@ static struct fw_request *allocate_request(struct fw_packet *p)
if (request == NULL)
return NULL;
t = (p->timestamp & 0x1fff) + 4000;
if (t >= 8000)
t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
else
t = (p->timestamp & ~0x1fff) + t;
request->response.speed = p->speed;
request->response.timestamp = t;
request->response.timestamp =
compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
......@@ -713,7 +784,8 @@ void fw_send_response(struct fw_card *card,
if (rcode == RCODE_COMPLETE)
fw_fill_response(&request->response, request->request_header,
rcode, request->data, request->length);
rcode, request->data,
fw_get_response_length(request));
else
fw_fill_response(&request->response, request->request_header,
rcode, NULL, 0);
......@@ -731,9 +803,11 @@ static void handle_exclusive_region_request(struct fw_card *card,
unsigned long flags;
int tcode, destination, source;
tcode = HEADER_GET_TCODE(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
tcode = HEADER_GET_TCODE(p->header[0]);
if (tcode == TCODE_LOCK_REQUEST)
tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
spin_lock_irqsave(&address_handler_lock, flags);
handler = lookup_enclosing_address_handler(&address_handler_list,
......@@ -753,7 +827,7 @@ static void handle_exclusive_region_request(struct fw_card *card,
else
handler->address_callback(card, request,
tcode, destination, source,
p->generation, p->speed, offset,
p->generation, offset,
request->data, request->length,
handler->callback_data);
}
......@@ -791,8 +865,8 @@ static void handle_fcp_region_request(struct fw_card *card,
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, NULL, tcode,
destination, source,
p->generation, p->speed,
offset, request->data,
p->generation, offset,
request->data,
request->length,
handler->callback_data);
}
......@@ -809,7 +883,12 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
return;
request = allocate_request(p);
if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
fw_cdev_handle_phy_packet(card, p);
return;
}
request = allocate_request(card, p);
if (request == NULL) {
/* FIXME: send statically allocated busy packet. */
return;
......@@ -832,13 +911,12 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
unsigned long flags;
u32 *data;
size_t data_length;
int tcode, tlabel, destination, source, rcode;
int tcode, tlabel, source, rcode;
tcode = HEADER_GET_TCODE(p->header[0]);
tlabel = HEADER_GET_TLABEL(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
rcode = HEADER_GET_RCODE(p->header[1]);
tcode = HEADER_GET_TCODE(p->header[0]);
tlabel = HEADER_GET_TLABEL(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
......@@ -903,8 +981,8 @@ static const struct fw_address_region topology_map_region =
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
int speed, unsigned long long offset,
void *payload, size_t length, void *callback_data)
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
int start;
......@@ -933,19 +1011,97 @@ static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
{
unsigned int cycles;
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
cycles = max(cycles, 800u); /* minimum as per the spec */
cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
card->split_timeout_cycles = cycles;
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
int speed, unsigned long long offset,
void *payload, size_t length, void *callback_data)
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
unsigned long flags;
switch (reg) {
case CSR_PRIORITY_BUDGET:
if (!card->priority_budget_implemented) {
rcode = RCODE_ADDRESS_ERROR;
break;
}
/* else fall through */
case CSR_NODE_IDS:
/*
* per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
* and 9.6, but interoperable with IEEE 1394.1-2004 bridges
*/
/* fall through */
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
case CSR_CYCLE_TIME:
if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
*data = cpu_to_be32(card->driver->get_cycle_time(card));
case CSR_BUS_TIME:
case CSR_BUSY_TIMEOUT:
if (tcode == TCODE_READ_QUADLET_REQUEST)
*data = cpu_to_be32(card->driver->read_csr(card, reg));
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->driver->write_csr(card, reg, be32_to_cpu(*data));
else
rcode = RCODE_TYPE_ERROR;
break;
case CSR_RESET_START:
if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->driver->write_csr(card, CSR_STATE_CLEAR,
CSR_STATE_BIT_ABDICATE);
else
rcode = RCODE_TYPE_ERROR;
break;
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
spin_lock_irqsave(&card->lock, flags);
card->split_timeout_hi = be32_to_cpu(*data) & 7;
update_split_timeout(card);
spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
break;
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
spin_lock_irqsave(&card->lock, flags);
card->split_timeout_lo =
be32_to_cpu(*data) & 0xfff80000;
update_split_timeout(card);
spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
break;
case CSR_MAINT_UTILITY:
if (tcode == TCODE_READ_QUADLET_REQUEST)
*data = card->maint_utility_register;
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->maint_utility_register = *data;
else
rcode = RCODE_TYPE_ERROR;
break;
......@@ -975,12 +1131,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
BUG();
break;
case CSR_BUSY_TIMEOUT:
/* FIXME: Implement this. */
case CSR_BUS_TIME:
/* Useless without initialization by the bus manager. */
default:
rcode = RCODE_ADDRESS_ERROR;
break;
......
......@@ -38,6 +38,9 @@ struct fw_packet;
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30)
#define CSR_STATE_BIT_CMSTR (1 << 8)
#define CSR_STATE_BIT_ABDICATE (1 << 10)
struct fw_card_driver {
/*
* Enable the given card with the given initial config rom.
......@@ -48,6 +51,7 @@ struct fw_card_driver {
int (*enable)(struct fw_card *card,
const __be32 *config_rom, size_t length);
int (*read_phy_reg)(struct fw_card *card, int address);
int (*update_phy_reg)(struct fw_card *card, int address,
int clear_bits, int set_bits);
......@@ -75,7 +79,8 @@ struct fw_card_driver {
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
u32 (*get_cycle_time)(struct fw_card *card);
u32 (*read_csr)(struct fw_card *card, int csr_offset);
void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
......@@ -85,6 +90,8 @@ struct fw_card_driver {
int (*start_iso)(struct fw_iso_context *ctx,
s32 cycle, u32 sync, u32 tags);
int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
int (*queue_iso)(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
......@@ -98,8 +105,8 @@ void fw_card_initialize(struct fw_card *card,
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
void fw_core_remove_card(struct fw_card *card);
int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_compute_block_crc(__be32 *block);
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
static inline struct fw_card *fw_card_get(struct fw_card *card)
......@@ -123,6 +130,7 @@ extern const struct file_operations fw_device_ops;
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
......@@ -192,7 +200,7 @@ static inline void fw_node_put(struct fw_node *node)
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids);
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
/*
......@@ -209,6 +217,7 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe)
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
......@@ -218,9 +227,18 @@ static inline bool is_next_generation(int new_generation, int old_generation)
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
int fw_get_response_length(struct fw_request *request);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
#define FW_PHY_CONFIG_NO_NODE_ID -1
#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
static inline bool is_ping_packet(u32 *data)
{
return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
}
#endif /* _FIREWIRE_CORE_H */
......@@ -806,8 +806,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int tcode, int destination, int source, int generation,
int speed, unsigned long long offset, void *payload,
size_t length, void *callback_data)
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
struct fwnet_device *dev = callback_data;
int rcode;
......
#ifndef __nosy_user_h
#define __nosy_user_h
#include <linux/ioctl.h>
#include <linux/types.h>
#define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats)
#define NOSY_IOC_START _IO('&', 1)
#define NOSY_IOC_STOP _IO('&', 2)
#define NOSY_IOC_FILTER _IOW('&', 2, __u32)
struct nosy_stats {
__u32 total_packet_count;
__u32 lost_packet_count;
};
/*
* Format of packets returned from the kernel driver:
*
* quadlet with timestamp (microseconds, CPU endian)
* quadlet-padded packet data... (little endian)
* quadlet with ack (little endian)
*/
#endif /* __nosy_user_h */
/*
* nosy - Snoop mode driver for TI PCILynx 1394 controllers
* Copyright (C) 2002-2007 Kristian Høgsberg
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
#include "nosy.h"
#include "nosy-user.h"
#define TCODE_PHY_PACKET 0x10
#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
static char driver_name[] = KBUILD_MODNAME;
/* this is the physical layout of a PCL, its size is 128 bytes */
struct pcl {
__le32 next;
__le32 async_error_next;
u32 user_data;
__le32 pcl_status;
__le32 remaining_transfer_count;
__le32 next_data_buffer;
struct {
__le32 control;
__le32 pointer;
} buffer[13];
};
struct packet {
unsigned int length;
char data[0];
};
struct packet_buffer {
char *data;
size_t capacity;
long total_packet_count, lost_packet_count;
atomic_t size;
struct packet *head, *tail;
wait_queue_head_t wait;
};
struct pcilynx {
struct pci_dev *pci_device;
__iomem char *registers;
struct pcl *rcv_start_pcl, *rcv_pcl;
__le32 *rcv_buffer;
dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
spinlock_t client_list_lock;
struct list_head client_list;
struct miscdevice misc;
struct list_head link;
struct kref kref;
};
static inline struct pcilynx *
lynx_get(struct pcilynx *lynx)
{
kref_get(&lynx->kref);
return lynx;
}
static void
lynx_release(struct kref *kref)
{
kfree(container_of(kref, struct pcilynx, kref));
}
static inline void
lynx_put(struct pcilynx *lynx)
{
kref_put(&lynx->kref, lynx_release);
}
struct client {
struct pcilynx *lynx;
u32 tcode_mask;
struct packet_buffer buffer;
struct list_head link;
};
static DEFINE_MUTEX(card_mutex);
static LIST_HEAD(card_list);
static int
packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
{
buffer->data = kmalloc(capacity, GFP_KERNEL);
if (buffer->data == NULL)
return -ENOMEM;
buffer->head = (struct packet *) buffer->data;
buffer->tail = (struct packet *) buffer->data;
buffer->capacity = capacity;
buffer->lost_packet_count = 0;
atomic_set(&buffer->size, 0);
init_waitqueue_head(&buffer->wait);
return 0;
}
static void
packet_buffer_destroy(struct packet_buffer *buffer)
{
kfree(buffer->data);
}
static int
packet_buffer_get(struct client *client, char __user *data, size_t user_length)
{
struct packet_buffer *buffer = &client->buffer;
size_t length;
char *end;
if (wait_event_interruptible(buffer->wait,
atomic_read(&buffer->size) > 0) ||
list_empty(&client->lynx->link))
return -ERESTARTSYS;
if (atomic_read(&buffer->size) == 0)
return -ENODEV;
/* FIXME: Check length <= user_length. */
end = buffer->data + buffer->capacity;
length = buffer->head->length;
if (&buffer->head->data[length] < end) {
if (copy_to_user(data, buffer->head->data, length))
return -EFAULT;
buffer->head = (struct packet *) &buffer->head->data[length];
} else {
size_t split = end - buffer->head->data;
if (copy_to_user(data, buffer->head->data, split))
return -EFAULT;
if (copy_to_user(data + split, buffer->data, length - split))
return -EFAULT;
buffer->head = (struct packet *) &buffer->data[length - split];
}
/*
* Decrease buffer->size as the last thing, since this is what
* keeps the interrupt from overwriting the packet we are
* retrieving from the buffer.
*/
atomic_sub(sizeof(struct packet) + length, &buffer->size);
return length;
}
static void
packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
{
char *end;
buffer->total_packet_count++;
if (buffer->capacity <
atomic_read(&buffer->size) + sizeof(struct packet) + length) {
buffer->lost_packet_count++;
return;
}
end = buffer->data + buffer->capacity;
buffer->tail->length = length;
if (&buffer->tail->data[length] < end) {
memcpy(buffer->tail->data, data, length);
buffer->tail = (struct packet *) &buffer->tail->data[length];
} else {
size_t split = end - buffer->tail->data;
memcpy(buffer->tail->data, data, split);
memcpy(buffer->data, data + split, length - split);
buffer->tail = (struct packet *) &buffer->data[length - split];
}
/* Finally, adjust buffer size and wake up userspace reader. */
atomic_add(sizeof(struct packet) + length, &buffer->size);
wake_up_interruptible(&buffer->wait);
}
static inline void
reg_write(struct pcilynx *lynx, int offset, u32 data)
{
writel(data, lynx->registers + offset);
}
static inline u32
reg_read(struct pcilynx *lynx, int offset)
{
return readl(lynx->registers + offset);
}
static inline void
reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
{
reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
}
/*
* Maybe the pcl programs could be set up to just append data instead
* of using a whole packet.
*/
static inline void
run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
int dmachan)
{
reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
}
static int
set_phy_reg(struct pcilynx *lynx, int addr, int val)
{
if (addr > 15) {
dev_err(&lynx->pci_device->dev,
"PHY register address %d out of range\n", addr);
return -1;
}
if (val > 0xff) {
dev_err(&lynx->pci_device->dev,
"PHY register value %d out of range\n", val);
return -1;
}
reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
return 0;
}
static int
nosy_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct client *client;
struct pcilynx *tmp, *lynx = NULL;
mutex_lock(&card_mutex);
list_for_each_entry(tmp, &card_list, link)
if (tmp->misc.minor == minor) {
lynx = lynx_get(tmp);
break;
}
mutex_unlock(&card_mutex);
if (lynx == NULL)
return -ENODEV;
client = kmalloc(sizeof *client, GFP_KERNEL);
if (client == NULL)
goto fail;
client->tcode_mask = ~0;
client->lynx = lynx;
INIT_LIST_HEAD(&client->link);
if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
goto fail;
file->private_data = client;
return 0;
fail:
kfree(client);
lynx_put(lynx);
return -ENOMEM;
}
static int
nosy_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
struct pcilynx *lynx = client->lynx;
spin_lock_irq(&lynx->client_list_lock);
list_del_init(&client->link);
spin_unlock_irq(&lynx->client_list_lock);
packet_buffer_destroy(&client->buffer);
kfree(client);
lynx_put(lynx);
return 0;
}
static unsigned int
nosy_poll(struct file *file, poll_table *pt)
{
struct client *client = file->private_data;
unsigned int ret = 0;
poll_wait(file, &client->buffer.wait, pt);
if (atomic_read(&client->buffer.size) > 0)
ret = POLLIN | POLLRDNORM;
if (list_empty(&client->lynx->link))
ret |= POLLHUP;
return ret;
}
static ssize_t
nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
{
struct client *client = file->private_data;
return packet_buffer_get(client, buffer, count);
}
static long
nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
struct nosy_stats stats;
switch (cmd) {
case NOSY_IOC_GET_STATS:
spin_lock_irq(client_list_lock);
stats.total_packet_count = client->buffer.total_packet_count;
stats.lost_packet_count = client->buffer.lost_packet_count;
spin_unlock_irq(client_list_lock);
if (copy_to_user((void __user *) arg, &stats, sizeof stats))
return -EFAULT;
else
return 0;
case NOSY_IOC_START:
spin_lock_irq(client_list_lock);
list_add_tail(&client->link, &client->lynx->client_list);
spin_unlock_irq(client_list_lock);
return 0;
case NOSY_IOC_STOP:
spin_lock_irq(client_list_lock);
list_del_init(&client->link);
spin_unlock_irq(client_list_lock);
return 0;
case NOSY_IOC_FILTER:
spin_lock_irq(client_list_lock);
client->tcode_mask = arg;
spin_unlock_irq(client_list_lock);
return 0;
default:
return -EINVAL;
/* Flush buffer, configure filter. */
}
}
static const struct file_operations nosy_ops = {
.owner = THIS_MODULE,
.read = nosy_read,
.unlocked_ioctl = nosy_ioctl,
.poll = nosy_poll,
.open = nosy_open,
.release = nosy_release,
};
#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
static void
packet_irq_handler(struct pcilynx *lynx)
{
struct client *client;
u32 tcode_mask, tcode;
size_t length;
struct timeval tv;
/* FIXME: Also report rcv_speed. */
length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
do_gettimeofday(&tv);
lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec;
if (length == PHY_PACKET_SIZE)
tcode_mask = 1 << TCODE_PHY_PACKET;
else
tcode_mask = 1 << tcode;
spin_lock(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
if (client->tcode_mask & tcode_mask)
packet_buffer_put(&client->buffer,
lynx->rcv_buffer, length + 4);
spin_unlock(&lynx->client_list_lock);
}
static void
bus_reset_irq_handler(struct pcilynx *lynx)
{
struct client *client;
struct timeval tv;
do_gettimeofday(&tv);
spin_lock(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
packet_buffer_put(&client->buffer, &tv.tv_usec, 4);
spin_unlock(&lynx->client_list_lock);
}
static irqreturn_t
irq_handler(int irq, void *device)
{
struct pcilynx *lynx = device;
u32 pci_int_status;
pci_int_status = reg_read(lynx, PCI_INT_STATUS);
if (pci_int_status == ~0)
/* Card was ejected. */
return IRQ_NONE;
if ((pci_int_status & PCI_INT_INT_PEND) == 0)
/* Not our interrupt, bail out quickly. */
return IRQ_NONE;
if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
u32 link_int_status;
link_int_status = reg_read(lynx, LINK_INT_STATUS);
reg_write(lynx, LINK_INT_STATUS, link_int_status);
if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
bus_reset_irq_handler(lynx);
}
/* Clear the PCI_INT_STATUS register only after clearing the
* LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
* be set again immediately. */
reg_write(lynx, PCI_INT_STATUS, pci_int_status);
if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
packet_irq_handler(lynx);
run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
}
return IRQ_HANDLED;
}
static void
remove_card(struct pci_dev *dev)
{
struct pcilynx *lynx = pci_get_drvdata(dev);
struct client *client;
mutex_lock(&card_mutex);
list_del_init(&lynx->link);
misc_deregister(&lynx->misc);
mutex_unlock(&card_mutex);
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
spin_lock_irq(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
wake_up_interruptible(&client->buffer.wait);
spin_unlock_irq(&lynx->client_list_lock);
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_pcl, lynx->rcv_pcl_bus);
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
pci_disable_device(dev);
lynx_put(lynx);
}
#define RCV_BUFFER_SIZE (16 * 1024)
static int __devinit
add_card(struct pci_dev *dev, const struct pci_device_id *unused)
{
struct pcilynx *lynx;
u32 p, end;
int ret, i;
if (pci_set_dma_mask(dev, 0xffffffff)) {
dev_err(&dev->dev,
"DMA address limits not supported for PCILynx hardware\n");
return -ENXIO;
}
if (pci_enable_device(dev)) {
dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
return -ENXIO;
}
pci_set_master(dev);
lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
if (lynx == NULL) {
dev_err(&dev->dev, "Failed to allocate control structure\n");
ret = -ENOMEM;
goto fail_disable;
}
lynx->pci_device = dev;
pci_set_drvdata(dev, lynx);
spin_lock_init(&lynx->client_list_lock);
INIT_LIST_HEAD(&lynx->client_list);
kref_init(&lynx->kref);
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_pcl_bus);
lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
if (lynx->rcv_start_pcl == NULL ||
lynx->rcv_pcl == NULL ||
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
goto fail_deallocate;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
lynx->rcv_pcl->buffer[0].control =
cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
lynx->rcv_pcl->buffer[0].pointer =
cpu_to_le32(lynx->rcv_buffer_bus + 4);
p = lynx->rcv_buffer_bus + 2048;
end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
for (i = 1; p < end; i++, p += 2048) {
lynx->rcv_pcl->buffer[i].control =
cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
}
lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
/* Fix buggy cards with autoboot pin not tied low: */
reg_write(lynx, DMA0_CHAN_CTRL, 0);
reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
#if 0
/* now, looking for PHY register set */
if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
lynx->phyic.reg_1394a = 1;
PRINT(KERN_INFO, lynx->id,
"found 1394a conform PHY (using extended register set)");
lynx->phyic.vendor = get_phy_vendorid(lynx);
lynx->phyic.product = get_phy_productid(lynx);
} else {
lynx->phyic.reg_1394a = 0;
PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
}
#endif
/* Setup the general receive FIFO max size. */
reg_write(lynx, FIFO_SIZES, 255);
reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
reg_write(lynx, LINK_INT_ENABLE,
LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
/* Disable the L flag in self ID packets. */
set_phy_reg(lynx, 4, 0);
/* Put this baby into snoop mode */
reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
driver_name, lynx)) {
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
goto fail_deallocate;
}
lynx->misc.parent = &dev->dev;
lynx->misc.minor = MISC_DYNAMIC_MINOR;
lynx->misc.name = "nosy";
lynx->misc.fops = &nosy_ops;
mutex_lock(&card_mutex);
ret = misc_register(&lynx->misc);
if (ret) {
dev_err(&dev->dev, "Failed to register misc char device\n");
mutex_unlock(&card_mutex);
goto fail_free_irq;
}
list_add_tail(&lynx->link, &card_list);
mutex_unlock(&card_mutex);
dev_info(&dev->dev,
"Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
return 0;
fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
fail_deallocate:
if (lynx->rcv_start_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
if (lynx->rcv_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_pcl, lynx->rcv_pcl_bus);
if (lynx->rcv_buffer)
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
kfree(lynx);
fail_disable:
pci_disable_device(dev);
return ret;
}
static struct pci_device_id pci_table[] __devinitdata = {
{
.vendor = PCI_VENDOR_ID_TI,
.device = PCI_DEVICE_ID_TI_PCILYNX,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ } /* Terminating entry */
};
static struct pci_driver lynx_pci_driver = {
.name = driver_name,
.id_table = pci_table,
.probe = add_card,
.remove = remove_card,
};
MODULE_AUTHOR("Kristian Hoegsberg");
MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pci_table);
static int __init nosy_init(void)
{
return pci_register_driver(&lynx_pci_driver);
}
static void __exit nosy_cleanup(void)
{
pci_unregister_driver(&lynx_pci_driver);
pr_info("Unloaded %s\n", driver_name);
}
module_init(nosy_init);
module_exit(nosy_cleanup);
/*
* Chip register definitions for PCILynx chipset. Based on pcilynx.h
* from the Linux 1394 drivers, but modified a bit so the names here
* match the specification exactly (even though they have weird names,
* like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent
* reject" etc.)
*/
#define PCILYNX_MAX_REGISTER 0xfff
#define PCILYNX_MAX_MEMORY 0xffff
#define PCI_LATENCY_CACHELINE 0x0c
#define MISC_CONTROL 0x40
#define MISC_CONTROL_SWRESET (1<<0)
#define SERIAL_EEPROM_CONTROL 0x44
#define PCI_INT_STATUS 0x48
#define PCI_INT_ENABLE 0x4c
/* status and enable have identical bit numbers */
#define PCI_INT_INT_PEND (1<<31)
#define PCI_INT_FRC_INT (1<<30)
#define PCI_INT_SLV_ADR_PERR (1<<28)
#define PCI_INT_SLV_DAT_PERR (1<<27)
#define PCI_INT_MST_DAT_PERR (1<<26)
#define PCI_INT_MST_DEV_TO (1<<25)
#define PCI_INT_INT_SLV_TO (1<<23)
#define PCI_INT_AUX_TO (1<<18)
#define PCI_INT_AUX_INT (1<<17)
#define PCI_INT_P1394_INT (1<<16)
#define PCI_INT_DMA4_PCL (1<<9)
#define PCI_INT_DMA4_HLT (1<<8)
#define PCI_INT_DMA3_PCL (1<<7)
#define PCI_INT_DMA3_HLT (1<<6)
#define PCI_INT_DMA2_PCL (1<<5)
#define PCI_INT_DMA2_HLT (1<<4)
#define PCI_INT_DMA1_PCL (1<<3)
#define PCI_INT_DMA1_HLT (1<<2)
#define PCI_INT_DMA0_PCL (1<<1)
#define PCI_INT_DMA0_HLT (1<<0)
/* all DMA interrupts combined: */
#define PCI_INT_DMA_ALL 0x3ff
#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
#define LBUS_ADDR 0xb4
#define LBUS_ADDR_SEL_RAM (0x0<<16)
#define LBUS_ADDR_SEL_ROM (0x1<<16)
#define LBUS_ADDR_SEL_AUX (0x2<<16)
#define LBUS_ADDR_SEL_ZV (0x3<<16)
#define GPIO_CTRL_A 0xb8
#define GPIO_CTRL_B 0xbc
#define GPIO_DATA_BASE 0xc0
#define DMA_BREG(base, chan) (base + chan * 0x20)
#define DMA_SREG(base, chan) (base + chan * 0x10)
#define PCL_NEXT_INVALID (1<<0)
/* transfer commands */
#define PCL_CMD_RCV (0x1<<24)
#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
#define PCL_CMD_XMT (0x2<<24)
#define PCL_CMD_UNFXMT (0xc<<24)
#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
/* aux commands */
#define PCL_CMD_NOP (0x0<<24)
#define PCL_CMD_LOAD (0x3<<24)
#define PCL_CMD_STOREQ (0x4<<24)
#define PCL_CMD_STORED (0xb<<24)
#define PCL_CMD_STORE0 (0x5<<24)
#define PCL_CMD_STORE1 (0x6<<24)
#define PCL_CMD_COMPARE (0xe<<24)
#define PCL_CMD_SWAP_COMPARE (0xf<<24)
#define PCL_CMD_ADD (0xd<<24)
#define PCL_CMD_BRANCH (0x7<<24)
/* BRANCH condition codes */
#define PCL_COND_DMARDY_SET (0x1<<20)
#define PCL_COND_DMARDY_CLEAR (0x2<<20)
#define PCL_GEN_INTR (1<<19)
#define PCL_LAST_BUFF (1<<18)
#define PCL_LAST_CMD (PCL_LAST_BUFF)
#define PCL_WAITSTAT (1<<17)
#define PCL_BIGENDIAN (1<<16)
#define PCL_ISOMODE (1<<12)
#define DMA0_PREV_PCL 0x100
#define DMA1_PREV_PCL 0x120
#define DMA2_PREV_PCL 0x140
#define DMA3_PREV_PCL 0x160
#define DMA4_PREV_PCL 0x180
#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
#define DMA0_CURRENT_PCL 0x104
#define DMA1_CURRENT_PCL 0x124
#define DMA2_CURRENT_PCL 0x144
#define DMA3_CURRENT_PCL 0x164
#define DMA4_CURRENT_PCL 0x184
#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
#define DMA0_CHAN_STAT 0x10c
#define DMA1_CHAN_STAT 0x12c
#define DMA2_CHAN_STAT 0x14c
#define DMA3_CHAN_STAT 0x16c
#define DMA4_CHAN_STAT 0x18c
#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
/* CHAN_STATUS registers share bits */
#define DMA_CHAN_STAT_SELFID (1<<31)
#define DMA_CHAN_STAT_ISOPKT (1<<30)
#define DMA_CHAN_STAT_PCIERR (1<<29)
#define DMA_CHAN_STAT_PKTERR (1<<28)
#define DMA_CHAN_STAT_PKTCMPL (1<<27)
#define DMA_CHAN_STAT_SPECIALACK (1<<14)
#define DMA0_CHAN_CTRL 0x110
#define DMA1_CHAN_CTRL 0x130
#define DMA2_CHAN_CTRL 0x150
#define DMA3_CHAN_CTRL 0x170
#define DMA4_CHAN_CTRL 0x190
#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
/* CHAN_CTRL registers share bits */
#define DMA_CHAN_CTRL_ENABLE (1<<31)
#define DMA_CHAN_CTRL_BUSY (1<<30)
#define DMA_CHAN_CTRL_LINK (1<<29)
#define DMA0_READY 0x114
#define DMA1_READY 0x134
#define DMA2_READY 0x154
#define DMA3_READY 0x174
#define DMA4_READY 0x194
#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
#define DMA_GLOBAL_REGISTER 0x908
#define FIFO_SIZES 0xa00
#define FIFO_CONTROL 0xa10
#define FIFO_CONTROL_GRF_FLUSH (1<<4)
#define FIFO_CONTROL_ITF_FLUSH (1<<3)
#define FIFO_CONTROL_ATF_FLUSH (1<<2)
#define FIFO_XMIT_THRESHOLD 0xa14
#define DMA0_WORD0_CMP_VALUE 0xb00
#define DMA1_WORD0_CMP_VALUE 0xb10
#define DMA2_WORD0_CMP_VALUE 0xb20
#define DMA3_WORD0_CMP_VALUE 0xb30
#define DMA4_WORD0_CMP_VALUE 0xb40
#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
#define DMA0_WORD0_CMP_ENABLE 0xb04
#define DMA1_WORD0_CMP_ENABLE 0xb14
#define DMA2_WORD0_CMP_ENABLE 0xb24
#define DMA3_WORD0_CMP_ENABLE 0xb34
#define DMA4_WORD0_CMP_ENABLE 0xb44
#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan))
#define DMA0_WORD1_CMP_VALUE 0xb08
#define DMA1_WORD1_CMP_VALUE 0xb18
#define DMA2_WORD1_CMP_VALUE 0xb28
#define DMA3_WORD1_CMP_VALUE 0xb38
#define DMA4_WORD1_CMP_VALUE 0xb48
#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
#define DMA0_WORD1_CMP_ENABLE 0xb0c
#define DMA1_WORD1_CMP_ENABLE 0xb1c
#define DMA2_WORD1_CMP_ENABLE 0xb2c
#define DMA3_WORD1_CMP_ENABLE 0xb3c
#define DMA4_WORD1_CMP_ENABLE 0xb4c
#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan))
/* word 1 compare enable flags */
#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
#define LINK_ID 0xf00
#define LINK_ID_BUS(id) (id<<22)
#define LINK_ID_NODE(id) (id<<16)
#define LINK_CONTROL 0xf04
#define LINK_CONTROL_BUSY (1<<29)
#define LINK_CONTROL_TX_ISO_EN (1<<26)
#define LINK_CONTROL_RX_ISO_EN (1<<25)
#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
#define LINK_CONTROL_RESET_TX (1<<21)
#define LINK_CONTROL_RESET_RX (1<<20)
#define LINK_CONTROL_CYCMASTER (1<<11)
#define LINK_CONTROL_CYCSOURCE (1<<10)
#define LINK_CONTROL_CYCTIMEREN (1<<9)
#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
#define CYCLE_TIMER 0xf08
#define LINK_PHY 0xf0c
#define LINK_PHY_READ (1<<31)
#define LINK_PHY_WRITE (1<<30)
#define LINK_PHY_ADDR(addr) (addr<<24)
#define LINK_PHY_WDATA(data) (data<<16)
#define LINK_PHY_RADDR(addr) (addr<<8)
#define LINK_INT_STATUS 0xf14
#define LINK_INT_ENABLE 0xf18
/* status and enable have identical bit numbers */
#define LINK_INT_LINK_INT (1<<31)
#define LINK_INT_PHY_TIME_OUT (1<<30)
#define LINK_INT_PHY_REG_RCVD (1<<29)
#define LINK_INT_PHY_BUSRESET (1<<28)
#define LINK_INT_TX_RDY (1<<26)
#define LINK_INT_RX_DATA_RDY (1<<25)
#define LINK_INT_IT_STUCK (1<<20)
#define LINK_INT_AT_STUCK (1<<19)
#define LINK_INT_SNTRJ (1<<17)
#define LINK_INT_HDR_ERR (1<<16)
#define LINK_INT_TC_ERR (1<<15)
#define LINK_INT_CYC_SEC (1<<11)
#define LINK_INT_CYC_STRT (1<<10)
#define LINK_INT_CYC_DONE (1<<9)
#define LINK_INT_CYC_PEND (1<<8)
#define LINK_INT_CYC_LOST (1<<7)
#define LINK_INT_CYC_ARB_FAILED (1<<6)
#define LINK_INT_GRF_OVER_FLOW (1<<5)
#define LINK_INT_ITF_UNDER_FLOW (1<<4)
#define LINK_INT_ATF_UNDER_FLOW (1<<3)
#define LINK_INT_IARB_FAILED (1<<0)
此差异已折叠。
......@@ -60,6 +60,7 @@
#define OHCI1394_LinkControl_cycleSource (1 << 22)
#define OHCI1394_NodeID 0x0E8
#define OHCI1394_NodeID_idValid 0x80000000
#define OHCI1394_NodeID_root 0x40000000
#define OHCI1394_NodeID_nodeNumber 0x0000003f
#define OHCI1394_NodeID_busNumber 0x0000ffc0
#define OHCI1394_PhyControl 0x0EC
......
......@@ -410,8 +410,7 @@ static void free_orb(struct kref *kref)
static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
unsigned long long offset,
int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
......@@ -508,8 +507,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
&orb->pointer, sizeof(orb->pointer),
complete_transaction, orb);
&orb->pointer, 8, complete_transaction, orb);
}
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
......@@ -654,7 +652,7 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
&d, sizeof(d));
&d, 4);
}
static void complete_agent_reset_write_no_wait(struct fw_card *card,
......@@ -676,7 +674,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
&d, sizeof(d), complete_agent_reset_write_no_wait, t);
&d, 4, complete_agent_reset_write_no_wait, t);
}
static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
......@@ -866,8 +864,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
&d, sizeof(d));
CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4);
}
static void sbp2_reconnect(struct work_struct *work);
......
......@@ -172,7 +172,7 @@ static DEFINE_SPINLOCK(dv1394_cards_lock);
static inline struct video_card* file_to_video_card(struct file *file)
{
return (struct video_card*) file->private_data;
return file->private_data;
}
/*** FRAME METHODS *********************************************************/
......@@ -610,7 +610,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
} else {
u32 transmit_sec, transmit_cyc;
u32 ts_cyc, ts_off;
u32 ts_cyc;
/* DMA is stopped, so this is the very first frame */
video->active_frame = this_frame;
......@@ -636,7 +636,6 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
transmit_sec += transmit_cyc/8000;
transmit_cyc %= 8000;
ts_off = ct_off;
ts_cyc = transmit_cyc + 3;
ts_cyc %= 8000;
......@@ -1784,7 +1783,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
struct video_card *video = NULL;
if (file->private_data) {
video = (struct video_card*) file->private_data;
video = file->private_data;
} else {
/* look up the card by ID */
......@@ -2004,7 +2003,7 @@ static void ir_tasklet_func(unsigned long data)
int sof=0; /* start-of-frame flag */
struct frame *f;
u16 packet_length, packet_time;
u16 packet_length;
int i, dbc=0;
struct DMA_descriptor_block *block = NULL;
u16 xferstatus;
......@@ -2024,11 +2023,6 @@ static void ir_tasklet_func(unsigned long data)
sizeof(struct packet));
packet_length = le16_to_cpu(p->data_length);
packet_time = le16_to_cpu(p->timestamp);
irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
packet_time, packet_length,
p->data[0], p->data[1]);
/* get the descriptor based on packet_buffer cursor */
f = video->frames[video->current_packet / MAX_PACKETS];
......@@ -2320,7 +2314,6 @@ static void dv1394_add_host(struct hpsb_host *host)
static void dv1394_host_reset(struct hpsb_host *host)
{
struct ti_ohci *ohci;
struct video_card *video = NULL, *tmp_vid;
unsigned long flags;
......@@ -2328,9 +2321,6 @@ static void dv1394_host_reset(struct hpsb_host *host)
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
return;
ohci = (struct ti_ohci *)host->hostdata;
/* find the corresponding video_cards */
spin_lock_irqsave(&dv1394_cards_lock, flags);
list_for_each_entry(tmp_vid, &dv1394_cards, list) {
......
......@@ -1258,7 +1258,6 @@ static void ether1394_iso(struct hpsb_iso *iso)
char *buf;
struct eth1394_host_info *hi;
struct net_device *dev;
struct eth1394_priv *priv;
unsigned int len;
u32 specifier_id;
u16 source_id;
......@@ -1288,8 +1287,6 @@ static void ether1394_iso(struct hpsb_iso *iso)
(be32_to_cpu(data[1]) & 0xff000000) >> 24;
source_id = be32_to_cpu(data[0]) >> 16;
priv = netdev_priv(dev);
if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
|| specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
/* This packet is not for us */
......
......@@ -440,7 +440,7 @@ static struct pending_request *next_complete_req(struct file_info *fi)
static ssize_t raw1394_read(struct file *file, char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
struct file_info *fi = (struct file_info *)file->private_data;
struct file_info *fi = file->private_data;
struct pending_request *req;
ssize_t ret;
......@@ -1015,7 +1015,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
struct arm_addr *arm_addr = NULL;
struct arm_request *arm_req = NULL;
struct arm_response *arm_resp = NULL;
int found = 0, size = 0, rcode = -1, length_conflict = 0;
int found = 0, size = 0, rcode = -1;
struct arm_request_response *arm_req_resp = NULL;
DBGMSG("arm_write called by node: %X "
......@@ -1054,7 +1054,6 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
}
if (arm_addr->rec_length < length) {
DBGMSG("arm_write blocklength too big -> rcode_data_error");
length_conflict = 1;
rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
}
if (rcode == -1) {
......@@ -2245,7 +2244,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
static ssize_t raw1394_write(struct file *file, const char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
struct file_info *fi = (struct file_info *)file->private_data;
struct file_info *fi = file->private_data;
struct pending_request *req;
ssize_t retval = -EBADFD;
......
......@@ -1350,12 +1350,11 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
struct csr1212_keyval *kv;
struct csr1212_dentry *dentry;
u64 management_agent_addr;
u32 unit_characteristics, firmware_revision, model;
u32 firmware_revision, model;
unsigned workarounds;
int i;
management_agent_addr = 0;
unit_characteristics = 0;
firmware_revision = SBP2_ROM_VALUE_MISSING;
model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
ud->model_id : SBP2_ROM_VALUE_MISSING;
......@@ -1372,17 +1371,15 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
lu->lun = ORB_SET_LUN(kv->value.immediate);
break;
case SBP2_UNIT_CHARACTERISTICS_KEY:
/* FIXME: This is ignored so far.
* See SBP-2 clause 7.4.8. */
unit_characteristics = kv->value.immediate;
break;
case SBP2_FIRMWARE_REVISION_KEY:
firmware_revision = kv->value.immediate;
break;
default:
/* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY
* mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */
/* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
* Its "ordered" bit has consequences for command ORB
* list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
......
......@@ -720,7 +720,7 @@ static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
static long video1394_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct file_ctx *ctx = file->private_data;
struct ti_ohci *ohci = ctx->ohci;
unsigned long flags;
void __user *argp = (void __user *)arg;
......@@ -1045,14 +1045,9 @@ static long video1394_ioctl(struct file *file,
if (get_user(qv, &p->packet_sizes))
return -EFAULT;
psizes = kmalloc(buf_size, GFP_KERNEL);
if (!psizes)
return -ENOMEM;
if (copy_from_user(psizes, qv, buf_size)) {
kfree(psizes);
return -EFAULT;
}
psizes = memdup_user(qv, buf_size);
if (IS_ERR(psizes))
return PTR_ERR(psizes);
}
spin_lock_irqsave(&d->lock,flags);
......@@ -1177,7 +1172,7 @@ static long video1394_ioctl(struct file *file,
static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct file_ctx *ctx = file->private_data;
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id,
......@@ -1244,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
static int video1394_release(struct inode *inode, struct file *file)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct file_ctx *ctx = file->private_data;
struct ti_ohci *ohci = ctx->ohci;
struct list_head *lh, *next;
u64 mask;
......
......@@ -194,8 +194,8 @@ static const struct firedtv_backend backend = {
static void handle_fcp(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
int speed, unsigned long long offset,
void *payload, size_t length, void *callback_data)
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
struct firedtv *f, *fdtv = NULL;
struct fw_device *device;
......
此差异已折叠。
......@@ -32,11 +32,13 @@
#define CSR_CYCLE_TIME 0x200
#define CSR_BUS_TIME 0x204
#define CSR_BUSY_TIMEOUT 0x210
#define CSR_PRIORITY_BUDGET 0x218
#define CSR_BUS_MANAGER_ID 0x21c
#define CSR_BANDWIDTH_AVAILABLE 0x220
#define CSR_CHANNELS_AVAILABLE 0x224
#define CSR_CHANNELS_AVAILABLE_HI 0x224
#define CSR_CHANNELS_AVAILABLE_LO 0x228
#define CSR_MAINT_UTILITY 0x230
#define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800
......@@ -89,6 +91,11 @@ struct fw_card {
struct list_head transaction_list;
unsigned long reset_jiffies;
u32 split_timeout_hi;
u32 split_timeout_lo;
unsigned int split_timeout_cycles;
unsigned int split_timeout_jiffies;
unsigned long long guid;
unsigned max_receive;
int link_speed;
......@@ -104,18 +111,28 @@ struct fw_card {
bool beta_repeaters_present;
int index;
struct list_head link;
/* Work struct for BM duties. */
struct delayed_work work;
struct list_head phy_receiver_list;
struct delayed_work br_work; /* bus reset job */
bool br_short;
struct delayed_work bm_work; /* bus manager job */
int bm_retries;
int bm_generation;
__be32 bm_transaction_data[2];
int bm_node_id;
bool bm_abdicate;
bool priority_budget_implemented; /* controller feature */
bool broadcast_channel_auto_allocated; /* controller feature */
bool broadcast_channel_allocated;
u32 broadcast_channel;
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 maint_utility_register;
};
struct fw_attribute_group {
......@@ -252,7 +269,7 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
int generation,
unsigned long long offset,
void *data, size_t length,
void *callback_data);
......@@ -269,10 +286,10 @@ struct fw_packet {
u32 timestamp;
/*
* This callback is called when the packet transmission has
* completed; for successful transmission, the status code is
* the ack received from the destination, otherwise it's a
* negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
* This callback is called when the packet transmission has completed.
* For successful transmission, the status code is the ack received
* from the destination. Otherwise it is one of the juju-specific
* rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
* The callback can be called from tasklet context and thus
* must never block.
*/
......@@ -355,17 +372,19 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc);
* scatter-gather streaming (e.g. assembling video frame automatically).
*/
struct fw_iso_packet {
u16 payload_length; /* Length of indirect payload. */
u32 interrupt:1; /* Generate interrupt on this packet */
u32 skip:1; /* Set to not send packet at all. */
u32 tag:2;
u32 sy:4;
u32 header_length:8; /* Length of immediate header. */
u32 header[0];
u16 payload_length; /* Length of indirect payload */
u32 interrupt:1; /* Generate interrupt on this packet */
u32 skip:1; /* tx: Set to not send packet at all */
/* rx: Sync bit, wait for matching sy */
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
u32 header_length:8; /* Length of immediate header */
u32 header[0]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
#define FW_ISO_CONTEXT_RECEIVE 1
#define FW_ISO_CONTEXT_TRANSMIT 0
#define FW_ISO_CONTEXT_RECEIVE 1
#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2
#define FW_ISO_CONTEXT_MATCH_TAG0 1
#define FW_ISO_CONTEXT_MATCH_TAG1 2
......@@ -389,24 +408,31 @@ struct fw_iso_buffer {
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
u32 cycle, size_t header_length,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
struct fw_iso_context {
struct fw_card *card;
int type;
int channel;
int speed;
size_t header_size;
fw_iso_callback_t callback;
union {
fw_iso_callback_t sc;
fw_iso_mc_callback_t mc;
} callback;
void *callback_data;
};
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
......
prefix = /usr
nosy-dump-version = 0.4
CC = gcc
all : nosy-dump
nosy-dump : CFLAGS = -Wall -O2 -g
nosy-dump : CPPFLAGS = -DVERSION=\"$(nosy-dump-version)\" -I../../drivers/firewire
nosy-dump : LDFLAGS = -g
nosy-dump : LDLIBS = -lpopt
nosy-dump : nosy-dump.o decode-fcp.o
clean :
rm -rf *.o nosy-dump
install :
install nosy-dump $(prefix)/bin/nosy-dump
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册