提交 b85d9905 编写于 作者: D Doug Ledford

staging/rdma: remove deprecated ipath driver

This driver was moved to staging for eventual deletion.  Time
to complete that task.
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 e581d111
......@@ -5795,12 +5795,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
S: Maintained
F: net/ipv4/netfilter/ipt_MASQUERADE.c
IPATH DRIVER
M: Mike Marciniszyn <infinipath@intel.com>
L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/staging/rdma/ipath/
IPMI SUBSYSTEM
M: Corey Minyard <minyard@acm.org>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
......
......@@ -24,6 +24,4 @@ if STAGING_RDMA
source "drivers/staging/rdma/hfi1/Kconfig"
source "drivers/staging/rdma/ipath/Kconfig"
endif
# Entries for RDMA_STAGING tree
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
config INFINIBAND_IPATH
tristate "QLogic HTX HCA support"
depends on 64BIT && NET && HT_IRQ
---help---
This is a driver for the deprecated QLogic Hyper-Transport
IB host channel adapter (model QHT7140),
including InfiniBand verbs support. This driver allows these
devices to be used with both kernel upper level protocols such
as IP-over-InfiniBand as well as with userspace applications
(in conjunction with InfiniBand userspace access).
For QLogic PCIe QLE based cards, use the QIB driver instead.
If you have this hardware you will need to boot with PAT disabled
on your x86-64 systems, use the nopat kernel parameter.
Note that this driver will soon be removed entirely from the kernel.
ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-DIPATH_KERN_TYPE=0
obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
ib_ipath-y := \
ipath_cq.o \
ipath_diag.o \
ipath_dma.o \
ipath_driver.o \
ipath_eeprom.o \
ipath_file_ops.o \
ipath_fs.o \
ipath_init_chip.o \
ipath_intr.o \
ipath_keys.o \
ipath_mad.o \
ipath_mmap.o \
ipath_mr.o \
ipath_qp.o \
ipath_rc.o \
ipath_ruc.o \
ipath_sdma.o \
ipath_srq.o \
ipath_stats.o \
ipath_sysfs.o \
ipath_uc.o \
ipath_ud.o \
ipath_user_pages.o \
ipath_user_sdma.o \
ipath_verbs_mcast.o \
ipath_verbs.o
ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
The ipath driver has been moved to staging in preparation for its removal in a
few releases. The driver will be deleted during the 4.6 merge window.
Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
Cc: linux-rdma@vger.kernel.org
此差异已折叠。
/*
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ipath_verbs.h"
/**
* ipath_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
* @sig: true if @entry is a solicitated entry
*
* This may be called with qp->s_lock held.
*/
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
{
struct ipath_cq_wc *wc;
unsigned long flags;
u32 head;
u32 next;
spin_lock_irqsave(&cq->lock, flags);
/*
* Note that the head pointer might be writable by user processes.
* Take care to verify it is a sane value.
*/
wc = cq->queue;
head = wc->head;
if (head >= (unsigned) cq->ibcq.cqe) {
head = cq->ibcq.cqe;
next = 0;
} else
next = head + 1;
if (unlikely(next == wc->tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
struct ib_event ev;
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
ev.event = IB_EVENT_CQ_ERR;
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
}
return;
}
if (cq->ip) {
wc->uqueue[head].wr_id = entry->wr_id;
wc->uqueue[head].status = entry->status;
wc->uqueue[head].opcode = entry->opcode;
wc->uqueue[head].vendor_err = entry->vendor_err;
wc->uqueue[head].byte_len = entry->byte_len;
wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
wc->uqueue[head].qp_num = entry->qp->qp_num;
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
wc->uqueue[head].pkey_index = entry->pkey_index;
wc->uqueue[head].slid = entry->slid;
wc->uqueue[head].sl = entry->sl;
wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
wc->uqueue[head].port_num = entry->port_num;
/* Make sure entry is written before the head index. */
smp_wmb();
} else
wc->kqueue[head] = *entry;
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = IB_CQ_NONE;
cq->triggered++;
/*
* This will cause send_complete() to be called in
* another thread.
*/
tasklet_hi_schedule(&cq->comptask);
}
spin_unlock_irqrestore(&cq->lock, flags);
if (entry->status != IB_WC_SUCCESS)
to_idev(cq->ibcq.device)->n_wqe_errs++;
}
/**
* ipath_poll_cq - poll for work completion entries
* @ibcq: the completion queue to poll
* @num_entries: the maximum number of entries to return
* @entry: pointer to array where work completions are placed
*
* Returns the number of completion entries polled.
*
* This may be called from interrupt context. Also called by ib_poll_cq()
* in the generic verbs code.
*/
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct ipath_cq *cq = to_icq(ibcq);
struct ipath_cq_wc *wc;
unsigned long flags;
int npolled;
u32 tail;
/* The kernel can only poll a kernel completion queue */
if (cq->ip) {
npolled = -EINVAL;
goto bail;
}
spin_lock_irqsave(&cq->lock, flags);
wc = cq->queue;
tail = wc->tail;
if (tail > (u32) cq->ibcq.cqe)
tail = (u32) cq->ibcq.cqe;
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
if (tail == wc->head)
break;
/* The kernel doesn't need a RMB since it has the lock. */
*entry = wc->kqueue[tail];
if (tail >= cq->ibcq.cqe)
tail = 0;
else
tail++;
}
wc->tail = tail;
spin_unlock_irqrestore(&cq->lock, flags);
bail:
return npolled;
}
static void send_complete(unsigned long data)
{
struct ipath_cq *cq = (struct ipath_cq *)data;
/*
* The completion handler will most likely rearm the notification
* and poll for all pending entries. If a new completion entry
* is added while we are in this routine, tasklet_hi_schedule()
* won't call us again until we return so we check triggered to
* see if we need to call the handler again.
*/
for (;;) {
u8 triggered = cq->triggered;
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq->triggered == triggered)
return;
}
}
/**
* ipath_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to
* @attr: creation attributes
* @context: unused by the InfiniPath driver
* @udata: unused by the InfiniPath driver
*
* Returns a pointer to the completion queue or negative errno values
* for failure.
*
* Called by ib_create_cq() in the generic verbs code.
*/
struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cq *cq;
struct ipath_cq_wc *wc;
struct ib_cq *ret;
u32 sz;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries < 1 || entries > ib_ipath_max_cqes) {
ret = ERR_PTR(-EINVAL);
goto done;
}
/* Allocate the completion queue structure. */
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
ret = ERR_PTR(-ENOMEM);
goto done;
}
/*
* Allocate the completion queue entries and head/tail pointers.
* This is allocated separately so that it can be resized and
* also mapped into user space.
* We need to use vmalloc() in order to support mmap and large
* numbers of entries.
*/
sz = sizeof(*wc);
if (udata && udata->outlen >= sizeof(__u64))
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
wc = vmalloc_user(sz);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
}
/*
* Return the address of the WC as the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
}
err = ib_copy_to_udata(udata, &cq->ip->offset,
sizeof(cq->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
} else
cq->ip = NULL;
spin_lock(&dev->n_cqs_lock);
if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
spin_unlock(&dev->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
dev->n_cqs_allocated++;
spin_unlock(&dev->n_cqs_lock);
if (cq->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
/*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return
* an error.
*/
cq->ibcq.cqe = entries;
cq->notify = IB_CQ_NONE;
cq->triggered = 0;
spin_lock_init(&cq->lock);
tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
wc->head = 0;
wc->tail = 0;
cq->queue = wc;
ret = &cq->ibcq;
goto done;
bail_ip:
kfree(cq->ip);
bail_wc:
vfree(wc);
bail_cq:
kfree(cq);
done:
return ret;
}
/**
* ipath_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy.
*
* Returns 0 for success.
*
* Called by ib_destroy_cq() in the generic verbs code.
*/
int ipath_destroy_cq(struct ib_cq *ibcq)
{
struct ipath_ibdev *dev = to_idev(ibcq->device);
struct ipath_cq *cq = to_icq(ibcq);
tasklet_kill(&cq->comptask);
spin_lock(&dev->n_cqs_lock);
dev->n_cqs_allocated--;
spin_unlock(&dev->n_cqs_lock);
if (cq->ip)
kref_put(&cq->ip->ref, ipath_release_mmap_info);
else
vfree(cq->queue);
kfree(cq);
return 0;
}
/**
* ipath_req_notify_cq - change the notification type for a completion queue
* @ibcq: the completion queue
* @notify_flags: the type of notification to request
*
* Returns 0 for success.
*
* This may be called from interrupt context. Also called by
* ib_req_notify_cq() in the generic verbs code.
*/
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
struct ipath_cq *cq = to_icq(ibcq);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&cq->lock, flags);
/*
* Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
* any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
*/
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
cq->queue->head != cq->queue->tail)
ret = 1;
spin_unlock_irqrestore(&cq->lock, flags);
return ret;
}
/**
* ipath_resize_cq - change the size of the CQ
* @ibcq: the completion queue
*
* Returns 0 for success.
*/
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct ipath_cq *cq = to_icq(ibcq);
struct ipath_cq_wc *old_wc;
struct ipath_cq_wc *wc;
u32 head, tail, n;
int ret;
u32 sz;
if (cqe < 1 || cqe > ib_ipath_max_cqes) {
ret = -EINVAL;
goto bail;
}
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
sz = sizeof(*wc);
if (udata && udata->outlen >= sizeof(__u64))
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
wc = vmalloc_user(sz);
if (!wc) {
ret = -ENOMEM;
goto bail;
}
/* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
__u64 offset = 0;
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (ret)
goto bail_free;
}
spin_lock_irq(&cq->lock);
/*
* Make sure head and tail are sane since they
* might be user writable.
*/
old_wc = cq->queue;
head = old_wc->head;
if (head > (u32) cq->ibcq.cqe)
head = (u32) cq->ibcq.cqe;
tail = old_wc->tail;
if (tail > (u32) cq->ibcq.cqe)
tail = (u32) cq->ibcq.cqe;
if (head < tail)
n = cq->ibcq.cqe + 1 + head - tail;
else
n = head - tail;
if (unlikely((u32)cqe < n)) {
ret = -EINVAL;
goto bail_unlock;
}
for (n = 0; tail != head; n++) {
if (cq->ip)
wc->uqueue[n] = old_wc->uqueue[tail];
else
wc->kqueue[n] = old_wc->kqueue[tail];
if (tail == (u32) cq->ibcq.cqe)
tail = 0;
else
tail++;
}
cq->ibcq.cqe = cqe;
wc->head = n;
wc->tail = 0;
cq->queue = wc;
spin_unlock_irq(&cq->lock);
vfree(old_wc);
if (cq->ip) {
struct ipath_ibdev *dev = to_idev(ibcq->device);
struct ipath_mmap_info *ip = cq->ip;
ipath_update_mmap_info(dev, ip, sz, wc);
/*
* Return the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
goto bail;
}
spin_lock_irq(&dev->pending_lock);
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
ret = 0;
goto bail;
bail_unlock:
spin_unlock_irq(&cq->lock);
bail_free:
vfree(wc);
bail:
return ret;
}
/*
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _IPATH_DEBUG_H
#define _IPATH_DEBUG_H
#ifndef _IPATH_DEBUGGING /* debugging enabled or not */
#define _IPATH_DEBUGGING 1
#endif
#if _IPATH_DEBUGGING
/*
* Mask values for debugging. The scheme allows us to compile out any
* of the debug tracing stuff, and if compiled in, to enable or disable
* dynamically. This can be set at modprobe time also:
* modprobe infinipath.ko infinipath_debug=7
*/
#define __IPATH_INFO 0x1 /* generic low verbosity stuff */
#define __IPATH_DBG 0x2 /* generic debug */
#define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */
/* leave some low verbosity spots open */
#define __IPATH_VERBDBG 0x40 /* very verbose debug */
#define __IPATH_PKTDBG 0x80 /* print packet data */
/* print process startup (init)/exit messages */
#define __IPATH_PROCDBG 0x100
/* print mmap/fault stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x200
#define __IPATH_ERRPKTDBG 0x400
#define __IPATH_USER_SEND 0x1000 /* use user mode send */
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
#define __IPATH_LINKVERBDBG 0x200000 /* very verbose linkchange debug */
#else /* _IPATH_DEBUGGING */
/*
* define all of these even with debugging off, for the few places that do
* if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
* compiler eliminate the code
*/
#define __IPATH_INFO 0x0 /* generic low verbosity stuff */
#define __IPATH_DBG 0x0 /* generic debug */
#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
#define __IPATH_VERBDBG 0x0 /* very verbose debug */
#define __IPATH_PKTDBG 0x0 /* print packet data */
#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
/* print mmap/fault stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
#define __IPATH_LINKVERBDBG 0x0 /* very verbose linkchange debug */
#endif /* _IPATH_DEBUGGING */
#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
#endif /* _IPATH_DEBUG_H */
/*
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file contains support for diagnostic functions. It is accessed by
* opening the ipath_diag device, normally minor number 129. Diagnostic use
* of the InfiniPath chip may render the chip or board unusable until the
* driver is unloaded, or in some cases, until the system is rebooted.
*
* Accesses to the chip through this interface are not similar to going
* through the /sys/bus/pci resource mmap interface.
*/
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/export.h>
#include <asm/uaccess.h>
#include "ipath_kernel.h"
#include "ipath_common.h"
int ipath_diag_inuse;
static int diag_set_link;
static int ipath_diag_open(struct inode *in, struct file *fp);
static int ipath_diag_release(struct inode *in, struct file *fp);
static ssize_t ipath_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off);
static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off);
static const struct file_operations diag_file_ops = {
.owner = THIS_MODULE,
.write = ipath_diag_write,
.read = ipath_diag_read,
.open = ipath_diag_open,
.release = ipath_diag_release,
.llseek = default_llseek,
};
static ssize_t ipath_diagpkt_write(struct file *fp,
const char __user *data,
size_t count, loff_t *off);
static const struct file_operations diagpkt_file_ops = {
.owner = THIS_MODULE,
.write = ipath_diagpkt_write,
.llseek = noop_llseek,
};
static atomic_t diagpkt_count = ATOMIC_INIT(0);
static struct cdev *diagpkt_cdev;
static struct device *diagpkt_dev;
int ipath_diag_add(struct ipath_devdata *dd)
{
char name[16];
int ret = 0;
if (atomic_inc_return(&diagpkt_count) == 1) {
ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
"ipath_diagpkt", &diagpkt_file_ops,
&diagpkt_cdev, &diagpkt_dev);
if (ret) {
ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
"device: %d", ret);
goto done;
}
}
snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
&diag_file_ops, &dd->diag_cdev,
&dd->diag_dev);
if (ret)
ipath_dev_err(dd, "Couldn't create %s device: %d",
name, ret);
done:
return ret;
}
void ipath_diag_remove(struct ipath_devdata *dd)
{
if (atomic_dec_and_test(&diagpkt_count))
ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
}
/**
* ipath_read_umem64 - read a 64-bit quantity from the chip into user space
* @dd: the infinipath device
* @uaddr: the location to store the data in user memory
* @caddr: the source chip address (full pointer, not offset)
* @count: number of bytes to copy (multiple of 32 bits)
*
* This function also localizes all chip memory accesses.
* The copy should be written such that we read full cacheline packets
* from the chip. This is usually used for a single qword
*
* NOTE: This assumes the chip address is 64-bit aligned.
*/
static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
const void __iomem *caddr, size_t count)
{
const u64 __iomem *reg_addr = caddr;
const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
int ret;
/* not very efficient, but it works for now */
if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
while (reg_addr < reg_end) {
u64 data = readq(reg_addr);
if (copy_to_user(uaddr, &data, sizeof(u64))) {
ret = -EFAULT;
goto bail;
}
reg_addr++;
uaddr += sizeof(u64);
}
ret = 0;
bail:
return ret;
}
/**
* ipath_write_umem64 - write a 64-bit quantity to the chip from user space
* @dd: the infinipath device
* @caddr: the destination chip address (full pointer, not offset)
* @uaddr: the source of the data in user memory
* @count: the number of bytes to copy (multiple of 32 bits)
*
* This is usually used for a single qword
* NOTE: This assumes the chip address is 64-bit aligned.
*/
static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
const void __user *uaddr, size_t count)
{
u64 __iomem *reg_addr = caddr;
const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
int ret;
/* not very efficient, but it works for now */
if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
while (reg_addr < reg_end) {
u64 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
writeq(data, reg_addr);
reg_addr++;
uaddr += sizeof(u64);
}
ret = 0;
bail:
return ret;
}
/**
* ipath_read_umem32 - read a 32-bit quantity from the chip into user space
* @dd: the infinipath device
* @uaddr: the location to store the data in user memory
* @caddr: the source chip address (full pointer, not offset)
* @count: number of bytes to copy
*
* read 32 bit values, not 64 bit; for memories that only
* support 32 bit reads; usually a single dword.
*/
static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
const void __iomem *caddr, size_t count)
{
const u32 __iomem *reg_addr = caddr;
const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
int ret;
if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
reg_end > (u32 __iomem *) dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
/* not very efficient, but it works for now */
while (reg_addr < reg_end) {
u32 data = readl(reg_addr);
if (copy_to_user(uaddr, &data, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
reg_addr++;
uaddr += sizeof(u32);
}
ret = 0;
bail:
return ret;
}
/**
* ipath_write_umem32 - write a 32-bit quantity to the chip from user space
* @dd: the infinipath device
* @caddr: the destination chip address (full pointer, not offset)
* @uaddr: the source of the data in user memory
* @count: number of bytes to copy
*
* write 32 bit values, not 64 bit; for memories that only
* support 32 bit write; usually a single dword.
*/
static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
const void __user *uaddr, size_t count)
{
u32 __iomem *reg_addr = caddr;
const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
int ret;
if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
reg_end > (u32 __iomem *) dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
while (reg_addr < reg_end) {
u32 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
writel(data, reg_addr);
reg_addr++;
uaddr += sizeof(u32);
}
ret = 0;
bail:
return ret;
}
static int ipath_diag_open(struct inode *in, struct file *fp)
{
int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
struct ipath_devdata *dd;
int ret;
mutex_lock(&ipath_mutex);
if (ipath_diag_inuse) {
ret = -EBUSY;
goto bail;
}
dd = ipath_lookup(unit);
if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
!dd->ipath_kregbase) {
ret = -ENODEV;
goto bail;
}
fp->private_data = dd;
ipath_diag_inuse = -2;
diag_set_link = 0;
ret = 0;
/* Only expose a way to reset the device if we
make it into diag mode. */
ipath_expose_reset(&dd->pcidev->dev);
bail:
mutex_unlock(&ipath_mutex);
return ret;
}
/**
* ipath_diagpkt_write - write an IB packet
* @fp: the diag data device file pointer
* @data: ipath_diag_pkt structure saying where to get the packet
* @count: size of data to write
* @off: unused by this code
*/
static ssize_t ipath_diagpkt_write(struct file *fp,
const char __user *data,
size_t count, loff_t *off)
{
u32 __iomem *piobuf;
u32 plen, pbufn, maxlen_reserve;
struct ipath_diag_pkt odp;
struct ipath_diag_xpkt dp;
u32 *tmpbuf = NULL;
struct ipath_devdata *dd;
ssize_t ret = 0;
u64 val;
u32 l_state, lt_state; /* LinkState, LinkTrainingState */
if (count == sizeof(dp)) {
if (copy_from_user(&dp, data, sizeof(dp))) {
ret = -EFAULT;
goto bail;
}
} else if (count == sizeof(odp)) {
if (copy_from_user(&odp, data, sizeof(odp))) {
ret = -EFAULT;
goto bail;
}
dp.len = odp.len;
dp.unit = odp.unit;
dp.data = odp.data;
dp.pbc_wd = 0;
} else {
ret = -EINVAL;
goto bail;
}
/* send count must be an exact number of dwords */
if (dp.len & 3) {
ret = -EINVAL;
goto bail;
}
plen = dp.len >> 2;
dd = ipath_lookup(dp.unit);
if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
!dd->ipath_kregbase) {
ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
dp.unit);
ret = -ENODEV;
goto bail;
}
if (ipath_diag_inuse && !diag_set_link &&
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
diag_set_link = 1;
ipath_cdbg(VERBOSE, "Trying to set to set link active for "
"diag pkt\n");
ipath_set_linkstate(dd, IPATH_IB_LINKARM);
ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
}
if (!(dd->ipath_flags & IPATH_INITTED)) {
/* no hardware, freeze, etc. */
ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
ret = -ENODEV;
goto bail;
}
/*
* Want to skip check for l_state if using custom PBC,
* because we might be trying to force an SM packet out.
* first-cut, skip _all_ state checking in that case.
*/
val = ipath_ib_state(dd, dd->ipath_lastibcstat);
lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
(val != dd->ib_init && val != dd->ib_arm &&
val != dd->ib_active))) {
ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
dd->ipath_unit, (unsigned long long) val);
ret = -EINVAL;
goto bail;
}
/*
* need total length before first word written, plus 2 Dwords. One Dword
* is for padding so we get the full user data when not aligned on
* a word boundary. The other Dword is to make sure we have room for the
* ICRC which gets tacked on later.
*/
maxlen_reserve = 2 * sizeof(u32);
if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
dp.len, dd->ipath_ibmaxlen);
ret = -EINVAL;
goto bail;
}
plen = sizeof(u32) + dp.len;
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
"failing\n");
ret = -ENOMEM;
goto bail;
}
if (copy_from_user(tmpbuf,
(const void __user *) (unsigned long) dp.data,
dp.len)) {
ret = -EFAULT;
goto bail;
}
plen >>= 2; /* in dwords */
piobuf = ipath_getpiobuf(dd, plen, &pbufn);
if (!piobuf) {
ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
dd->ipath_unit);
ret = -EBUSY;
goto bail;
}
/* disarm it just to be extra sure */
ipath_disarm_piobufs(dd, pbufn, 1);
if (ipath_debug & __IPATH_PKTDBG)
ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
dd->ipath_unit, plen - 1, pbufn);
if (dp.pbc_wd == 0)
dp.pbc_wd = plen;
writeq(dp.pbc_wd, piobuf);
/*
* Copy all by the trigger word, then flush, so it's written
* to chip before trigger word, then write trigger word, then
* flush again, so packet is sent.
*/
if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
ipath_flush_wc();
__iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
ipath_flush_wc();
__raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
} else
__iowrite32_copy(piobuf + 2, tmpbuf, plen);
ipath_flush_wc();
ret = sizeof(dp);
bail:
vfree(tmpbuf);
return ret;
}
static int ipath_diag_release(struct inode *in, struct file *fp)
{
mutex_lock(&ipath_mutex);
ipath_diag_inuse = 0;
fp->private_data = NULL;
mutex_unlock(&ipath_mutex);
return 0;
}
static ssize_t ipath_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off)
{
struct ipath_devdata *dd = fp->private_data;
void __iomem *kreg_base;
ssize_t ret;
kreg_base = dd->ipath_kregbase;
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if (ipath_diag_inuse < 1 && (*off || count != 8))
ret = -EINVAL; /* prevent cat /dev/ipath_diag* */
else if ((count % 8) || (*off % 8))
/* address or length not 64-bit aligned; do 32-bit reads */
ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
else
ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
if (ret >= 0) {
*off += count;
ret = count;
if (ipath_diag_inuse == -2)
ipath_diag_inuse++;
}
return ret;
}
static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off)
{
struct ipath_devdata *dd = fp->private_data;
void __iomem *kreg_base;
ssize_t ret;
kreg_base = dd->ipath_kregbase;
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
ipath_diag_inuse == -2) /* read qw off 0, write qw off 0 */
ret = -EINVAL; /* before any other write allowed */
else if ((count % 8) || (*off % 8))
/* address or length not 64-bit aligned; do 32-bit writes */
ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
else
ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
if (ret >= 0) {
*off += count;
ret = count;
if (ipath_diag_inuse == -1)
ipath_diag_inuse = 1; /* all read/write OK now */
}
return ret;
}
/*
* Copyright (c) 2006 QLogic, Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/scatterlist.h>
#include <linux/gfp.h>
#include <rdma/ib_verbs.h>
#include "ipath_verbs.h"
#define BAD_DMA_ADDRESS ((u64) 0)
/*
* The following functions implement driver specific replacements
* for the ib_dma_*() functions.
*
* These functions return kernel virtual addresses instead of
* device bus addresses since the driver uses the CPU to copy
* data instead of using hardware DMA.
*/
static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dma_addr == BAD_DMA_ADDRESS;
}
static u64 ipath_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
return (u64) cpu_addr;
}
static void ipath_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static u64 ipath_dma_map_page(struct ib_device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
u64 addr;
BUG_ON(!valid_dma_direction(direction));
if (offset + size > PAGE_SIZE) {
addr = BAD_DMA_ADDRESS;
goto done;
}
addr = (u64) page_address(page);
if (addr)
addr += offset;
/* TODO: handle highmem pages */
done:
return addr;
}
static void ipath_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
struct scatterlist *sg;
u64 addr;
int i;
int ret = nents;
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
addr = (u64) page_address(sg_page(sg));
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
break;
}
sg->dma_address = addr + sg->offset;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
}
return ret;
}
static void ipath_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static void ipath_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
static void ipath_sync_single_for_device(struct ib_device *dev,
u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
u64 *dma_handle, gfp_t flag)
{
struct page *p;
void *addr = NULL;
p = alloc_pages(flag, get_order(size));
if (p)
addr = page_address(p);
if (dma_handle)
*dma_handle = (u64) addr;
return addr;
}
static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
.mapping_error = ipath_mapping_error,
.map_single = ipath_dma_map_single,
.unmap_single = ipath_dma_unmap_single,
.map_page = ipath_dma_map_page,
.unmap_page = ipath_dma_unmap_page,
.map_sg = ipath_map_sg,
.unmap_sg = ipath_unmap_sg,
.sync_single_for_cpu = ipath_sync_single_for_cpu,
.sync_single_for_device = ipath_sync_single_for_device,
.alloc_coherent = ipath_dma_alloc_coherent,
.free_coherent = ipath_dma_free_coherent
};
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include "ipath_kernel.h"
#define IPATHFS_MAGIC 0x726a77
static struct super_block *ipath_super;
static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
error = -EPERM;
goto bail;
}
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
}
inode->i_fop = fops;
d_instantiate(dentry, inode);
error = 0;
bail:
return error;
}
static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
int error;
inode_lock(d_inode(parent));
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
error = ipathfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
inode_unlock(d_inode(parent));
return error;
}
static ssize_t atomic_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
sizeof ipath_stats);
}
static const struct file_operations atomic_stats_ops = {
.read = atomic_stats_read,
.llseek = default_llseek,
};
static ssize_t atomic_counters_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct infinipath_counters counters;
struct ipath_devdata *dd;
dd = file_inode(file)->i_private;
dd->ipath_f_read_counters(dd, &counters);
return simple_read_from_buffer(buf, count, ppos, &counters,
sizeof counters);
}
static const struct file_operations atomic_counters_ops = {
.read = atomic_counters_read,
.llseek = default_llseek,
};
static ssize_t flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct ipath_devdata *dd;
ssize_t ret;
loff_t pos;
char *tmp;
pos = *ppos;
if ( pos < 0) {
ret = -EINVAL;
goto bail;
}
if (pos >= sizeof(struct ipath_flash)) {
ret = 0;
goto bail;
}
if (count > sizeof(struct ipath_flash) - pos)
count = sizeof(struct ipath_flash) - pos;
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto bail;
}
dd = file_inode(file)->i_private;
if (ipath_eeprom_read(dd, pos, tmp, count)) {
ipath_dev_err(dd, "failed to read from flash\n");
ret = -ENXIO;
goto bail_tmp;
}
if (copy_to_user(buf, tmp, count)) {
ret = -EFAULT;
goto bail_tmp;
}
*ppos = pos + count;
ret = count;
bail_tmp:
kfree(tmp);
bail:
return ret;
}
static ssize_t flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ipath_devdata *dd;
ssize_t ret;
loff_t pos;
char *tmp;
pos = *ppos;
if (pos != 0) {
ret = -EINVAL;
goto bail;
}
if (count != sizeof(struct ipath_flash)) {
ret = -EINVAL;
goto bail;
}
tmp = memdup_user(buf, count);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
dd = file_inode(file)->i_private;
if (ipath_eeprom_write(dd, pos, tmp, count)) {
ret = -ENXIO;
ipath_dev_err(dd, "failed to write to flash\n");
goto bail_tmp;
}
*ppos = pos + count;
ret = count;
bail_tmp:
kfree(tmp);
bail:
return ret;
}
static const struct file_operations flash_ops = {
.read = flash_read,
.write = flash_write,
.llseek = default_llseek,
};
static int create_device_files(struct super_block *sb,
struct ipath_devdata *dd)
{
struct dentry *dir, *tmp;
char unit[10];
int ret;
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
&simple_dir_operations, dd);
if (ret) {
printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
goto bail;
}
ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
&atomic_counters_ops, dd);
if (ret) {
printk(KERN_ERR "create_file(%s/atomic_counters) "
"failed: %d\n", unit, ret);
goto bail;
}
ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
&flash_ops, dd);
if (ret) {
printk(KERN_ERR "create_file(%s/flash) "
"failed: %d\n", unit, ret);
goto bail;
}
bail:
return ret;
}
static int remove_file(struct dentry *parent, char *name)
{
struct dentry *tmp;
int ret;
tmp = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
goto bail;
}
spin_lock(&tmp->d_lock);
if (simple_positive(tmp)) {
dget_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
simple_unlink(d_inode(parent), tmp);
} else
spin_unlock(&tmp->d_lock);
ret = 0;
bail:
/*
* We don't expect clients to care about the return value, but
* it's there if they need it.
*/
return ret;
}
static int remove_device_files(struct super_block *sb,
struct ipath_devdata *dd)
{
struct dentry *dir, *root;
char unit[10];
int ret;
root = dget(sb->s_root);
inode_lock(d_inode(root));
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
dir = lookup_one_len(unit, root, strlen(unit));
if (IS_ERR(dir)) {
ret = PTR_ERR(dir);
printk(KERN_ERR "Lookup of %s failed\n", unit);
goto bail;
}
remove_file(dir, "flash");
remove_file(dir, "atomic_counters");
d_delete(dir);
ret = simple_rmdir(d_inode(root), dir);
bail:
inode_unlock(d_inode(root));
dput(root);
return ret;
}
static int ipathfs_fill_super(struct super_block *sb, void *data,
int silent)
{
struct ipath_devdata *dd, *tmp;
unsigned long flags;
int ret;
static struct tree_descr files[] = {
[2] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
{""},
};
ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
if (ret) {
printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
goto bail;
}
spin_lock_irqsave(&ipath_devs_lock, flags);
list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
spin_unlock_irqrestore(&ipath_devs_lock, flags);
ret = create_device_files(sb, dd);
if (ret)
goto bail;
spin_lock_irqsave(&ipath_devs_lock, flags);
}
spin_unlock_irqrestore(&ipath_devs_lock, flags);
bail:
return ret;
}
static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct dentry *ret;
ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
if (!IS_ERR(ret))
ipath_super = ret->d_sb;
return ret;
}
static void ipathfs_kill_super(struct super_block *s)
{
kill_litter_super(s);
ipath_super = NULL;
}
int ipathfs_add_device(struct ipath_devdata *dd)
{
int ret;
if (ipath_super == NULL) {
ret = 0;
goto bail;
}
ret = create_device_files(ipath_super, dd);
bail:
return ret;
}
int ipathfs_remove_device(struct ipath_devdata *dd)
{
int ret;
if (ipath_super == NULL) {
ret = 0;
goto bail;
}
ret = remove_device_files(ipath_super, dd);
bail:
return ret;
}
static struct file_system_type ipathfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
.mount = ipathfs_mount,
.kill_sb = ipathfs_kill_super,
};
MODULE_ALIAS_FS("ipathfs");
int __init ipath_init_ipathfs(void)
{
return register_filesystem(&ipathfs_fs_type);
}
void __exit ipath_exit_ipathfs(void)
{
unregister_filesystem(&ipathfs_fs_type);
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/device.h>
struct ipath_user_sdma_queue;
struct ipath_user_sdma_queue *
ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
int ipath_user_sdma_writev(struct ipath_devdata *dd,
struct ipath_user_sdma_queue *pq,
const struct iovec *iov,
unsigned long dim);
int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
struct ipath_user_sdma_queue *pq);
void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
struct ipath_user_sdma_queue *pq);
u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册