提交 f5e741b7 编写于 作者: D Doug Ledford

staging/rdma: remove deprecated amso1100 driver

Driver was placed in staging for eventual removal, it is time
to complete that task.
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 1c5e0809
......@@ -686,13 +686,6 @@ M: Michael Hanselmann <linux-kernel@hansmi.ch>
S: Supported
F: drivers/macintosh/ams/
AMSO1100 RNIC DRIVER
M: Tom Tucker <tom@opengridcomputing.com>
M: Steve Wise <swise@opengridcomputing.com>
L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/hw/amso1100/
ANALOG DEVICES INC AD9389B DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
......
......@@ -22,8 +22,6 @@ menuconfig STAGING_RDMA
# Please keep entries in alphabetic order
if STAGING_RDMA
source "drivers/staging/rdma/amso1100/Kconfig"
source "drivers/staging/rdma/ehca/Kconfig"
source "drivers/staging/rdma/hfi1/Kconfig"
......
# Entries for RDMA_STAGING tree
obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
config INFINIBAND_AMSO1100
tristate "Ammasso 1100 HCA support"
depends on PCI && INET
---help---
This is a low-level driver for the Ammasso 1100 host
channel adapter (HCA).
config INFINIBAND_AMSO1100_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_AMSO1100
default n
---help---
This option causes the amso1100 driver to produce a bunch of
debug messages. Select this if you are developing the driver
or trying to diagnose a problem.
7/2015
The amso1100 driver has been deprecated and moved to drivers/staging.
It will be removed in the 4.6 merge window.
此差异已折叠。
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __C2_H
#define __C2_H
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include "c2_provider.h"
#include "c2_mq.h"
#include "c2_status.h"
#define DRV_NAME "c2"
#define DRV_VERSION "1.1"
#define PFX DRV_NAME ": "
#define BAR_0 0
#define BAR_2 2
#define BAR_4 4
#define RX_BUF_SIZE (1536 + 8)
#define ETH_JUMBO_MTU 9000
#define C2_MAGIC "CEPHEUS"
#define C2_VERSION 4
#define C2_IVN (18 & 0x7fffffff)
#define C2_REG0_SIZE (16 * 1024)
#define C2_REG2_SIZE (2 * 1024 * 1024)
#define C2_REG4_SIZE (256 * 1024 * 1024)
#define C2_NUM_TX_DESC 341
#define C2_NUM_RX_DESC 256
#define C2_PCI_REGS_OFFSET (0x10000)
#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
#define C2_RXP_HRXDQ_SIZE (4096)
#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
#define C2_TXP_HTXDQ_SIZE (4096)
#define C2_TX_TIMEOUT (6*HZ)
/* CEPHEUS */
static const u8 c2_magic[] = {
0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
};
enum adapter_pci_regs {
C2_REGS_MAGIC = 0x0000,
C2_REGS_VERS = 0x0008,
C2_REGS_IVN = 0x000C,
C2_REGS_PCI_WINSIZE = 0x0010,
C2_REGS_Q0_QSIZE = 0x0014,
C2_REGS_Q0_MSGSIZE = 0x0018,
C2_REGS_Q0_POOLSTART = 0x001C,
C2_REGS_Q0_SHARED = 0x0020,
C2_REGS_Q1_QSIZE = 0x0024,
C2_REGS_Q1_MSGSIZE = 0x0028,
C2_REGS_Q1_SHARED = 0x0030,
C2_REGS_Q2_QSIZE = 0x0034,
C2_REGS_Q2_MSGSIZE = 0x0038,
C2_REGS_Q2_SHARED = 0x0040,
C2_REGS_ENADDR = 0x004C,
C2_REGS_RDMA_ENADDR = 0x0054,
C2_REGS_HRX_CUR = 0x006C,
};
struct c2_adapter_pci_regs {
char reg_magic[8];
u32 version;
u32 ivn;
u32 pci_window_size;
u32 q0_q_size;
u32 q0_msg_size;
u32 q0_pool_start;
u32 q0_shared;
u32 q1_q_size;
u32 q1_msg_size;
u32 q1_pool_start;
u32 q1_shared;
u32 q2_q_size;
u32 q2_msg_size;
u32 q2_pool_start;
u32 q2_shared;
u32 log_start;
u32 log_size;
u8 host_enaddr[8];
u8 rdma_enaddr[8];
u32 crash_entry;
u32 crash_ready[2];
u32 fw_txd_cur;
u32 fw_hrxd_cur;
u32 fw_rxd_cur;
};
enum pci_regs {
C2_HISR = 0x0000,
C2_DISR = 0x0004,
C2_HIMR = 0x0008,
C2_DIMR = 0x000C,
C2_NISR0 = 0x0010,
C2_NISR1 = 0x0014,
C2_NIMR0 = 0x0018,
C2_NIMR1 = 0x001C,
C2_IDIS = 0x0020,
};
enum {
C2_PCI_HRX_INT = 1 << 8,
C2_PCI_HTX_INT = 1 << 17,
C2_PCI_HRX_QUI = 1 << 31,
};
/*
* Cepheus registers in BAR0.
*/
struct c2_pci_regs {
u32 hostisr;
u32 dmaisr;
u32 hostimr;
u32 dmaimr;
u32 netisr0;
u32 netisr1;
u32 netimr0;
u32 netimr1;
u32 int_disable;
};
/* TXP flags */
enum c2_txp_flags {
TXP_HTXD_DONE = 0,
TXP_HTXD_READY = 1 << 0,
TXP_HTXD_UNINIT = 1 << 1,
};
/* RXP flags */
enum c2_rxp_flags {
RXP_HRXD_UNINIT = 0,
RXP_HRXD_READY = 1 << 0,
RXP_HRXD_DONE = 1 << 1,
};
/* RXP status */
enum c2_rxp_status {
RXP_HRXD_ZERO = 0,
RXP_HRXD_OK = 1 << 0,
RXP_HRXD_BUF_OV = 1 << 1,
};
/* TXP descriptor fields */
enum txp_desc {
C2_TXP_FLAGS = 0x0000,
C2_TXP_LEN = 0x0002,
C2_TXP_ADDR = 0x0004,
};
/* RXP descriptor fields */
enum rxp_desc {
C2_RXP_FLAGS = 0x0000,
C2_RXP_STATUS = 0x0002,
C2_RXP_COUNT = 0x0004,
C2_RXP_LEN = 0x0006,
C2_RXP_ADDR = 0x0008,
};
struct c2_txp_desc {
u16 flags;
u16 len;
u64 addr;
} __attribute__ ((packed));
struct c2_rxp_desc {
u16 flags;
u16 status;
u16 count;
u16 len;
u64 addr;
} __attribute__ ((packed));
struct c2_rxp_hdr {
u16 flags;
u16 status;
u16 len;
u16 rsvd;
} __attribute__ ((packed));
struct c2_tx_desc {
u32 len;
u32 status;
dma_addr_t next_offset;
};
struct c2_rx_desc {
u32 len;
u32 status;
dma_addr_t next_offset;
};
struct c2_alloc {
u32 last;
u32 max;
spinlock_t lock;
unsigned long *table;
};
struct c2_array {
struct {
void **page;
int used;
} *page_list;
};
/*
* The MQ shared pointer pool is organized as a linked list of
* chunks. Each chunk contains a linked list of free shared pointers
* that can be allocated to a given user mode client.
*
*/
struct sp_chunk {
struct sp_chunk *next;
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
u16 head;
u16 shared_ptr[0];
};
struct c2_pd_table {
u32 last;
u32 max;
spinlock_t lock;
unsigned long *table;
};
struct c2_qp_table {
struct idr idr;
spinlock_t lock;
};
struct c2_element {
struct c2_element *next;
void *ht_desc; /* host descriptor */
void __iomem *hw_desc; /* hardware descriptor */
struct sk_buff *skb;
dma_addr_t mapaddr;
u32 maplen;
};
struct c2_ring {
struct c2_element *to_clean;
struct c2_element *to_use;
struct c2_element *start;
unsigned long count;
};
struct c2_dev {
struct ib_device ibdev;
void __iomem *regs;
void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
void __iomem *mmio_rxp_ring;
spinlock_t lock;
struct pci_dev *pcidev;
struct net_device *netdev;
struct net_device *pseudo_netdev;
unsigned int cur_tx;
unsigned int cur_rx;
u32 adapter_handle;
int device_cap_flags;
void __iomem *kva; /* KVA device memory */
unsigned long pa; /* PA device memory */
void **qptr_array;
struct kmem_cache *host_msg_cache;
struct list_head cca_link; /* adapter list */
struct list_head eh_wakeup_list; /* event wakeup list */
wait_queue_head_t req_vq_wo;
/* Cached RNIC properties */
struct ib_device_attr props;
struct c2_pd_table pd_table;
struct c2_qp_table qp_table;
int ports; /* num of GigE ports */
int devnum;
spinlock_t vqlock; /* sync vbs req MQ */
/* Verbs Queues */
struct c2_mq req_vq; /* Verbs Request MQ */
struct c2_mq rep_vq; /* Verbs Reply MQ */
struct c2_mq aeq; /* Async Events MQ */
/* Kernel client MQs */
struct sp_chunk *kern_mqsp_pool;
/* Device updates these values when posting messages to a host
* target queue */
u16 req_vq_shared;
u16 rep_vq_shared;
u16 aeq_shared;
u16 irq_claimed;
/*
* Shared host target pages for user-accessible MQs.
*/
int hthead; /* index of first free entry */
void *htpages; /* kernel vaddr */
int htlen; /* length of htpages memory */
void *htuva; /* user mapped vaddr */
spinlock_t htlock; /* serialize allocation */
u64 adapter_hint_uva; /* access to the activity FIFO */
// spinlock_t aeq_lock;
// spinlock_t rnic_lock;
__be16 *hint_count;
dma_addr_t hint_count_dma;
u16 hints_read;
int init; /* TRUE if it's ready */
char ae_cache_name[16];
char vq_cache_name[16];
};
struct c2_port {
u32 msg_enable;
struct c2_dev *c2dev;
struct net_device *netdev;
spinlock_t tx_lock;
u32 tx_avail;
struct c2_ring tx_ring;
struct c2_ring rx_ring;
void *mem; /* PCI memory for host rings */
dma_addr_t dma;
unsigned long mem_size;
u32 rx_buf_size;
};
/*
* Activity FIFO registers in BAR0.
*/
#define PCI_BAR0_HOST_HINT 0x100
#define PCI_BAR0_ADAPTER_HINT 0x2000
/*
* Ammasso PCI vendor id and Cepheus PCI device id.
*/
#define CQ_ARMED 0x01
#define CQ_WAIT_FOR_DMA 0x80
/*
* The format of a hint is as follows:
* Lower 16 bits are the count of hints for the queue.
* Next 15 bits are the qp_index
* Upper most bit depends on who reads it:
* If read by producer, then it means Full (1) or Not-Full (0)
* If read by consumer, then it means Empty (1) or Not-Empty (0)
*/
#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
/*
* The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
* struct.
*/
#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
#ifndef readq
static inline u64 readq(const void __iomem * addr)
{
u64 ret = readl(addr + 4);
ret <<= 32;
ret |= readl(addr);
return ret;
}
#endif
#ifndef writeq
static inline void __raw_writeq(u64 val, void __iomem * addr)
{
__raw_writel((u32) (val), addr);
__raw_writel((u32) (val >> 32), (addr + 4));
}
#endif
#define C2_SET_CUR_RX(c2dev, cur_rx) \
__raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
#define C2_GET_CUR_RX(c2dev) \
be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
{
return container_of(ibdev, struct c2_dev, ibdev);
}
static inline int c2_errno(void *reply)
{
switch (c2_wr_get_result(reply)) {
case C2_OK:
return 0;
case CCERR_NO_BUFS:
case CCERR_INSUFFICIENT_RESOURCES:
case CCERR_ZERO_RDMA_READ_RESOURCES:
return -ENOMEM;
case CCERR_MR_IN_USE:
case CCERR_QP_IN_USE:
return -EBUSY;
case CCERR_ADDR_IN_USE:
return -EADDRINUSE;
case CCERR_ADDR_NOT_AVAIL:
return -EADDRNOTAVAIL;
case CCERR_CONN_RESET:
return -ECONNRESET;
case CCERR_NOT_IMPLEMENTED:
case CCERR_INVALID_WQE:
return -ENOSYS;
case CCERR_QP_NOT_PRIVILEGED:
return -EPERM;
case CCERR_STACK_ERROR:
return -EPROTO;
case CCERR_ACCESS_VIOLATION:
case CCERR_BASE_AND_BOUNDS_VIOLATION:
return -EFAULT;
case CCERR_STAG_STATE_NOT_INVALID:
case CCERR_INVALID_ADDRESS:
case CCERR_INVALID_CQ:
case CCERR_INVALID_EP:
case CCERR_INVALID_MODIFIER:
case CCERR_INVALID_MTU:
case CCERR_INVALID_PD_ID:
case CCERR_INVALID_QP:
case CCERR_INVALID_RNIC:
case CCERR_INVALID_STAG:
return -EINVAL;
default:
return -EAGAIN;
}
}
/* Device */
int c2_register_device(struct c2_dev *c2dev);
void c2_unregister_device(struct c2_dev *c2dev);
int c2_rnic_init(struct c2_dev *c2dev);
void c2_rnic_term(struct c2_dev *c2dev);
void c2_rnic_interrupt(struct c2_dev *c2dev);
int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
/* QPs */
int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
struct ib_qp_attr *attr, int attr_mask);
int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
int ord, int ird);
int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct ib_send_wr **bad_wr);
int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
struct ib_recv_wr **bad_wr);
void c2_init_qp_table(struct c2_dev *c2dev);
void c2_cleanup_qp_table(struct c2_dev *c2dev);
void c2_set_qp_state(struct c2_qp *, int);
struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
/* PDs */
int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
int c2_init_pd_table(struct c2_dev *c2dev);
void c2_cleanup_pd_table(struct c2_dev *c2dev);
/* CQs */
int c2_init_cq(struct c2_dev *c2dev, int entries,
struct c2_ucontext *ctx, struct c2_cq *cq);
void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/* CM */
int c2_llp_connect(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *iw_param);
int c2_llp_accept(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *iw_param);
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
u8 pdata_len);
int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
int c2_llp_service_destroy(struct iw_cm_id *cm_id);
/* MM */
int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
int page_size, int pbl_depth, u32 length,
u32 off, u64 *va, enum c2_acf acf,
struct c2_mr *mr);
int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
/* AE */
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
/* MQSP Allocator */
int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **root);
void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
dma_addr_t *dma_addr, gfp_t gfp_mask);
void c2_free_mqsp(__be16* mqsp);
#endif
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include <rdma/iw_cm.h>
#include "c2_status.h"
#include "c2_ae.h"
static int c2_convert_cm_status(u32 c2_status)
{
switch (c2_status) {
case C2_CONN_STATUS_SUCCESS:
return 0;
case C2_CONN_STATUS_REJECTED:
return -ENETRESET;
case C2_CONN_STATUS_REFUSED:
return -ECONNREFUSED;
case C2_CONN_STATUS_TIMEDOUT:
return -ETIMEDOUT;
case C2_CONN_STATUS_NETUNREACH:
return -ENETUNREACH;
case C2_CONN_STATUS_HOSTUNREACH:
return -EHOSTUNREACH;
case C2_CONN_STATUS_INVALID_RNIC:
return -EINVAL;
case C2_CONN_STATUS_INVALID_QP:
return -EINVAL;
case C2_CONN_STATUS_INVALID_QP_STATE:
return -EINVAL;
case C2_CONN_STATUS_ADDR_NOT_AVAIL:
return -EADDRNOTAVAIL;
default:
printk(KERN_ERR PFX
"%s - Unable to convert CM status: %d\n",
__func__, c2_status);
return -EIO;
}
}
static const char* to_event_str(int event)
{
static const char* event_str[] = {
"CCAE_REMOTE_SHUTDOWN",
"CCAE_ACTIVE_CONNECT_RESULTS",
"CCAE_CONNECTION_REQUEST",
"CCAE_LLP_CLOSE_COMPLETE",
"CCAE_TERMINATE_MESSAGE_RECEIVED",
"CCAE_LLP_CONNECTION_RESET",
"CCAE_LLP_CONNECTION_LOST",
"CCAE_LLP_SEGMENT_SIZE_INVALID",
"CCAE_LLP_INVALID_CRC",
"CCAE_LLP_BAD_FPDU",
"CCAE_INVALID_DDP_VERSION",
"CCAE_INVALID_RDMA_VERSION",
"CCAE_UNEXPECTED_OPCODE",
"CCAE_INVALID_DDP_QUEUE_NUMBER",
"CCAE_RDMA_READ_NOT_ENABLED",
"CCAE_RDMA_WRITE_NOT_ENABLED",
"CCAE_RDMA_READ_TOO_SMALL",
"CCAE_NO_L_BIT",
"CCAE_TAGGED_INVALID_STAG",
"CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
"CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
"CCAE_TAGGED_INVALID_PD",
"CCAE_WRAP_ERROR",
"CCAE_BAD_CLOSE",
"CCAE_BAD_LLP_CLOSE",
"CCAE_INVALID_MSN_RANGE",
"CCAE_INVALID_MSN_GAP",
"CCAE_IRRQ_OVERFLOW",
"CCAE_IRRQ_MSN_GAP",
"CCAE_IRRQ_MSN_RANGE",
"CCAE_IRRQ_INVALID_STAG",
"CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
"CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
"CCAE_IRRQ_INVALID_PD",
"CCAE_IRRQ_WRAP_ERROR",
"CCAE_CQ_SQ_COMPLETION_OVERFLOW",
"CCAE_CQ_RQ_COMPLETION_ERROR",
"CCAE_QP_SRQ_WQE_ERROR",
"CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
"CCAE_CQ_OVERFLOW",
"CCAE_CQ_OPERATION_ERROR",
"CCAE_SRQ_LIMIT_REACHED",
"CCAE_QP_RQ_LIMIT_REACHED",
"CCAE_SRQ_CATASTROPHIC_ERROR",
"CCAE_RNIC_CATASTROPHIC_ERROR"
};
if (event < CCAE_REMOTE_SHUTDOWN ||
event > CCAE_RNIC_CATASTROPHIC_ERROR)
return "<invalid event>";
event -= CCAE_REMOTE_SHUTDOWN;
return event_str[event];
}
static const char *to_qp_state_str(int state)
{
switch (state) {
case C2_QP_STATE_IDLE:
return "C2_QP_STATE_IDLE";
case C2_QP_STATE_CONNECTING:
return "C2_QP_STATE_CONNECTING";
case C2_QP_STATE_RTS:
return "C2_QP_STATE_RTS";
case C2_QP_STATE_CLOSING:
return "C2_QP_STATE_CLOSING";
case C2_QP_STATE_TERMINATE:
return "C2_QP_STATE_TERMINATE";
case C2_QP_STATE_ERROR:
return "C2_QP_STATE_ERROR";
default:
return "<invalid QP state>";
}
}
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
{
struct c2_mq *mq = c2dev->qptr_array[mq_index];
union c2wr *wr;
void *resource_user_context;
struct iw_cm_event cm_event;
struct ib_event ib_event;
enum c2_resource_indicator resource_indicator;
enum c2_event_id event_id;
unsigned long flags;
int status;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
/*
* retrieve the message
*/
wr = c2_mq_consume(mq);
if (!wr)
return;
memset(&ib_event, 0, sizeof(ib_event));
memset(&cm_event, 0, sizeof(cm_event));
event_id = c2_wr_get_id(wr);
resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
resource_user_context =
(void *) (unsigned long) wr->ae.ae_generic.user_context;
status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
pr_debug("event received c2_dev=%p, event_id=%d, "
"resource_indicator=%d, user_context=%p, status = %d\n",
c2dev, event_id, resource_indicator, resource_user_context,
status);
switch (resource_indicator) {
case C2_RES_IND_QP:{
struct c2_qp *qp = resource_user_context;
struct iw_cm_id *cm_id = qp->cm_id;
struct c2wr_ae_active_connect_results *res;
if (!cm_id) {
pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
qp);
goto ignore_it;
}
pr_debug("%s: event = %s, user_context=%llx, "
"resource_type=%x, "
"resource=%x, qp_state=%s\n",
__func__,
to_event_str(event_id),
(unsigned long long) wr->ae.ae_generic.user_context,
be32_to_cpu(wr->ae.ae_generic.resource_type),
be32_to_cpu(wr->ae.ae_generic.resource),
to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
switch (event_id) {
case CCAE_ACTIVE_CONNECT_RESULTS:
res = &wr->ae.ae_active_connect_results;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
laddr->sin_addr.s_addr = res->laddr;
raddr->sin_addr.s_addr = res->raddr;
laddr->sin_port = res->lport;
raddr->sin_port = res->rport;
if (status == 0) {
cm_event.private_data_len =
be32_to_cpu(res->private_data_length);
cm_event.private_data = res->private_data;
} else {
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
cm_event.private_data_len = 0;
cm_event.private_data = NULL;
}
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
case CCAE_TERMINATE_MESSAGE_RECEIVED:
case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
ib_event.device = &c2dev->ibdev;
ib_event.element.qp = &qp->ibqp;
ib_event.event = IB_EVENT_QP_REQ_ERR;
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&ib_event,
qp->ibqp.
qp_context);
break;
case CCAE_BAD_CLOSE:
case CCAE_LLP_CLOSE_COMPLETE:
case CCAE_LLP_CONNECTION_RESET:
case CCAE_LLP_CONNECTION_LOST:
BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
default:
BUG_ON(1);
pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
"CM_ID=%p\n",
__func__, __LINE__,
event_id, qp, cm_id);
break;
}
break;
}
case C2_RES_IND_EP:{
struct c2wr_ae_connection_request *req =
&wr->ae.ae_connection_request;
struct iw_cm_id *cm_id =
resource_user_context;
pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
if (event_id != CCAE_CONNECTION_REQUEST) {
pr_debug("%s: Invalid event_id: %d\n",
__func__, event_id);
break;
}
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
laddr->sin_addr.s_addr = req->laddr;
raddr->sin_addr.s_addr = req->raddr;
laddr->sin_port = req->lport;
raddr->sin_port = req->rport;
cm_event.private_data_len =
be32_to_cpu(req->private_data_length);
cm_event.private_data = req->private_data;
/*
* Until ird/ord negotiation via MPAv2 support is added, send
* max supported values
*/
cm_event.ird = cm_event.ord = 128;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
}
case C2_RES_IND_CQ:{
struct c2_cq *cq =
resource_user_context;
pr_debug("IB_EVENT_CQ_ERR\n");
ib_event.device = &c2dev->ibdev;
ib_event.element.cq = &cq->ibcq;
ib_event.event = IB_EVENT_CQ_ERR;
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
break;
}
default:
printk("Bad resource indicator = %d\n",
resource_indicator);
break;
}
ignore_it:
c2_mq_free(mq);
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_AE_H_
#define _C2_AE_H_
/*
* WARNING: If you change this file, also bump C2_IVN_BASE
* in common/include/clustercore/c2_ivn.h.
*/
/*
* Asynchronous Event Identifiers
*
* These start at 0x80 only so it's obvious from inspection that
* they are not work-request statuses. This isn't critical.
*
* NOTE: these event id's must fit in eight bits.
*/
enum c2_event_id {
CCAE_REMOTE_SHUTDOWN = 0x80,
CCAE_ACTIVE_CONNECT_RESULTS,
CCAE_CONNECTION_REQUEST,
CCAE_LLP_CLOSE_COMPLETE,
CCAE_TERMINATE_MESSAGE_RECEIVED,
CCAE_LLP_CONNECTION_RESET,
CCAE_LLP_CONNECTION_LOST,
CCAE_LLP_SEGMENT_SIZE_INVALID,
CCAE_LLP_INVALID_CRC,
CCAE_LLP_BAD_FPDU,
CCAE_INVALID_DDP_VERSION,
CCAE_INVALID_RDMA_VERSION,
CCAE_UNEXPECTED_OPCODE,
CCAE_INVALID_DDP_QUEUE_NUMBER,
CCAE_RDMA_READ_NOT_ENABLED,
CCAE_RDMA_WRITE_NOT_ENABLED,
CCAE_RDMA_READ_TOO_SMALL,
CCAE_NO_L_BIT,
CCAE_TAGGED_INVALID_STAG,
CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
CCAE_TAGGED_INVALID_PD,
CCAE_WRAP_ERROR,
CCAE_BAD_CLOSE,
CCAE_BAD_LLP_CLOSE,
CCAE_INVALID_MSN_RANGE,
CCAE_INVALID_MSN_GAP,
CCAE_IRRQ_OVERFLOW,
CCAE_IRRQ_MSN_GAP,
CCAE_IRRQ_MSN_RANGE,
CCAE_IRRQ_INVALID_STAG,
CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
CCAE_IRRQ_INVALID_PD,
CCAE_IRRQ_WRAP_ERROR,
CCAE_CQ_SQ_COMPLETION_OVERFLOW,
CCAE_CQ_RQ_COMPLETION_ERROR,
CCAE_QP_SRQ_WQE_ERROR,
CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
CCAE_CQ_OVERFLOW,
CCAE_CQ_OPERATION_ERROR,
CCAE_SRQ_LIMIT_REACHED,
CCAE_QP_RQ_LIMIT_REACHED,
CCAE_SRQ_CATASTROPHIC_ERROR,
CCAE_RNIC_CATASTROPHIC_ERROR
/* WARNING If you add more id's, make sure their values fit in eight bits. */
};
/*
* Resource Indicators and Identifiers
*/
enum c2_resource_indicator {
C2_RES_IND_QP = 1,
C2_RES_IND_EP,
C2_RES_IND_CQ,
C2_RES_IND_SRQ,
};
#endif /* _C2_AE_H_ */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/bitmap.h>
#include "c2.h"
static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **head)
{
int i;
struct sp_chunk *new_head;
dma_addr_t dma_addr;
new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
&dma_addr, gfp_mask);
if (new_head == NULL)
return -ENOMEM;
new_head->dma_addr = dma_addr;
dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
/* build list where each index is the next free slot */
for (i = 0;
i < (PAGE_SIZE - sizeof(struct sp_chunk) -
sizeof(u16)) / sizeof(u16) - 1;
i++) {
new_head->shared_ptr[i] = i + 1;
}
/* terminate list */
new_head->shared_ptr[i] = 0xFFFF;
*head = new_head;
return 0;
}
int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **root)
{
return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
}
void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
{
struct sp_chunk *next;
while (root) {
next = root->next;
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
dma_unmap_addr(root, mapping));
root = next;
}
}
__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
dma_addr_t *dma_addr, gfp_t gfp_mask)
{
u16 mqsp;
while (head) {
mqsp = head->head;
if (mqsp != 0xFFFF) {
head->head = head->shared_ptr[mqsp];
break;
} else if (head->next == NULL) {
if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
0) {
head = head->next;
mqsp = head->head;
head->head = head->shared_ptr[mqsp];
break;
} else
return NULL;
} else
head = head->next;
}
if (head) {
*dma_addr = head->dma_addr +
((unsigned long) &(head->shared_ptr[mqsp]) -
(unsigned long) head);
pr_debug("%s addr %p dma_addr %llx\n", __func__,
&(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
return (__force __be16 *) &(head->shared_ptr[mqsp]);
}
return NULL;
}
void c2_free_mqsp(__be16 *mqsp)
{
struct sp_chunk *head;
u16 idx;
/* The chunk containing this ptr begins at the page boundary */
head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
/* Link head to new mqsp */
*mqsp = (__force __be16) head->head;
/* Compute the shared_ptr index */
idx = (offset_in_page(mqsp)) >> 1;
idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
/* Point this index at the head */
head->shared_ptr[idx] = head->head;
/* Point head at this index */
head->head = idx;
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/slab.h>
#include "c2.h"
#include "c2_wr.h"
#include "c2_vq.h"
#include <rdma/iw_cm.h>
int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
struct c2_dev *c2dev = to_c2dev(cm_id->device);
struct ib_qp *ibqp;
struct c2_qp *qp;
struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
struct c2_vq_req *vq_req;
int err;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
if (cm_id->remote_addr.ss_family != AF_INET)
return -ENOSYS;
ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
if (!ibqp)
return -EINVAL;
qp = to_c2qp(ibqp);
/* Associate QP <--> CM_ID */
cm_id->provider_data = qp;
cm_id->add_ref(cm_id);
qp->cm_id = cm_id;
/*
* only support the max private_data length
*/
if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
err = -EINVAL;
goto bail0;
}
/*
* Set the rdma read limits
*/
err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
if (err)
goto bail0;
/*
* Create and send a WR_QP_CONNECT...
*/
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
err = -ENOMEM;
goto bail0;
}
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
err = -ENOMEM;
goto bail1;
}
c2_wr_set_id(wr, CCWR_QP_CONNECT);
wr->hdr.context = 0;
wr->rnic_handle = c2dev->adapter_handle;
wr->qp_handle = qp->adapter_handle;
wr->remote_addr = raddr->sin_addr.s_addr;
wr->remote_port = raddr->sin_port;
/*
* Move any private data from the callers's buf into
* the WR.
*/
if (iw_param->private_data) {
wr->private_data_length =
cpu_to_be32(iw_param->private_data_len);
memcpy(&wr->private_data[0], iw_param->private_data,
iw_param->private_data_len);
} else
wr->private_data_length = 0;
/*
* Send WR to adapter. NOTE: There is no synch reply from
* the adapter.
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
vq_req_free(c2dev, vq_req);
bail1:
kfree(wr);
bail0:
if (err) {
/*
* If we fail, release reference on QP and
* disassociate QP from CM_ID
*/
cm_id->provider_data = NULL;
qp->cm_id = NULL;
cm_id->rem_ref(cm_id);
}
return err;
}
int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
{
struct c2_dev *c2dev;
struct c2wr_ep_listen_create_req wr;
struct c2wr_ep_listen_create_rep *reply;
struct c2_vq_req *vq_req;
int err;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
if (cm_id->local_addr.ss_family != AF_INET)
return -ENOSYS;
c2dev = to_c2dev(cm_id->device);
if (c2dev == NULL)
return -EINVAL;
/*
* Allocate verbs request.
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
/*
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
wr.hdr.context = (u64) (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.local_addr = laddr->sin_addr.s_addr;
wr.local_port = laddr->sin_port;
wr.backlog = cpu_to_be32(backlog);
wr.user_context = (u64) (unsigned long) cm_id;
/*
* Reference the request struct. Dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail0;
/*
* Process reply
*/
reply =
(struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail1;
}
if ((err = c2_errno(reply)) != 0)
goto bail1;
/*
* Keep the adapter handle. Used in subsequent destroy
*/
cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
/*
* free vq stuff
*/
vq_repbuf_free(c2dev, reply);
vq_req_free(c2dev, vq_req);
return 0;
bail1:
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
int c2_llp_service_destroy(struct iw_cm_id *cm_id)
{
struct c2_dev *c2dev;
struct c2wr_ep_listen_destroy_req wr;
struct c2wr_ep_listen_destroy_rep *reply;
struct c2_vq_req *vq_req;
int err;
c2dev = to_c2dev(cm_id->device);
if (c2dev == NULL)
return -EINVAL;
/*
* Allocate verbs request.
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
/*
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
/*
* reference the request struct. dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail0;
/*
* Process reply
*/
reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
struct c2_dev *c2dev = to_c2dev(cm_id->device);
struct c2_qp *qp;
struct ib_qp *ibqp;
struct c2wr_cr_accept_req *wr; /* variable length WR */
struct c2_vq_req *vq_req;
struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
int err;
ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
if (!ibqp)
return -EINVAL;
qp = to_c2qp(ibqp);
/* Set the RDMA read limits */
err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
if (err)
goto bail0;
/* Allocate verbs request. */
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
err = -ENOMEM;
goto bail0;
}
vq_req->qp = qp;
vq_req->cm_id = cm_id;
vq_req->event = IW_CM_EVENT_ESTABLISHED;
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
err = -ENOMEM;
goto bail1;
}
/* Build the WR */
c2_wr_set_id(wr, CCWR_CR_ACCEPT);
wr->hdr.context = (unsigned long) vq_req;
wr->rnic_handle = c2dev->adapter_handle;
wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
wr->qp_handle = qp->adapter_handle;
/* Replace the cr_handle with the QP after accept */
cm_id->provider_data = qp;
cm_id->add_ref(cm_id);
qp->cm_id = cm_id;
cm_id->provider_data = qp;
/* Validate private_data length */
if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
err = -EINVAL;
goto bail1;
}
if (iw_param->private_data) {
wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
memcpy(&wr->private_data[0],
iw_param->private_data, iw_param->private_data_len);
} else
wr->private_data_length = 0;
/* Reference the request struct. Dereferenced in the int handler. */
vq_req_get(c2dev, vq_req);
/* Send WR to adapter */
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail1;
}
/* Wait for reply from adapter */
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail1;
/* Check that reply is present */
reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail1;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
if (!err)
c2_set_qp_state(qp, C2_QP_STATE_RTS);
bail1:
kfree(wr);
vq_req_free(c2dev, vq_req);
bail0:
if (err) {
/*
* If we fail, release reference on QP and
* disassociate QP from CM_ID
*/
cm_id->provider_data = NULL;
qp->cm_id = NULL;
cm_id->rem_ref(cm_id);
}
return err;
}
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
struct c2_dev *c2dev;
struct c2wr_cr_reject_req wr;
struct c2_vq_req *vq_req;
struct c2wr_cr_reject_rep *reply;
int err;
c2dev = to_c2dev(cm_id->device);
/*
* Allocate verbs request.
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
/*
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_CR_REJECT);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
/*
* reference the request struct. dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail0;
/*
* Process reply
*/
reply = (struct c2wr_cr_reject_rep *) (unsigned long)
vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
/*
* free vq stuff
*/
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/gfp.h>
#include "c2.h"
#include "c2_vq.h"
#include "c2_status.h"
#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
{
struct c2_cq *cq;
unsigned long flags;
spin_lock_irqsave(&c2dev->lock, flags);
cq = c2dev->qptr_array[cqn];
if (!cq) {
spin_unlock_irqrestore(&c2dev->lock, flags);
return NULL;
}
atomic_inc(&cq->refcount);
spin_unlock_irqrestore(&c2dev->lock, flags);
return cq;
}
static void c2_cq_put(struct c2_cq *cq)
{
if (atomic_dec_and_test(&cq->refcount))
wake_up(&cq->wait);
}
void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
{
struct c2_cq *cq;
cq = c2_cq_get(c2dev, mq_index);
if (!cq) {
printk("discarding events on destroyed CQN=%d\n", mq_index);
return;
}
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
c2_cq_put(cq);
}
void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
{
struct c2_cq *cq;
struct c2_mq *q;
cq = c2_cq_get(c2dev, mq_index);
if (!cq)
return;
spin_lock_irq(&cq->lock);
q = &cq->mq;
if (q && !c2_mq_empty(q)) {
u16 priv = q->priv;
struct c2wr_ce *msg;
while (priv != be16_to_cpu(*q->shared)) {
msg = (struct c2wr_ce *)
(q->msg_pool.host + priv * q->msg_size);
if (msg->qp_user_context == (u64) (unsigned long) qp) {
msg->qp_user_context = (u64) 0;
}
priv = (priv + 1) % q->q_size;
}
}
spin_unlock_irq(&cq->lock);
c2_cq_put(cq);
}
static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
{
switch (status) {
case C2_OK:
return IB_WC_SUCCESS;
case CCERR_FLUSHED:
return IB_WC_WR_FLUSH_ERR;
case CCERR_BASE_AND_BOUNDS_VIOLATION:
return IB_WC_LOC_PROT_ERR;
case CCERR_ACCESS_VIOLATION:
return IB_WC_LOC_ACCESS_ERR;
case CCERR_TOTAL_LENGTH_TOO_BIG:
return IB_WC_LOC_LEN_ERR;
case CCERR_INVALID_WINDOW:
return IB_WC_MW_BIND_ERR;
default:
return IB_WC_GENERAL_ERR;
}
}
static inline int c2_poll_one(struct c2_dev *c2dev,
struct c2_cq *cq, struct ib_wc *entry)
{
struct c2wr_ce *ce;
struct c2_qp *qp;
int is_recv = 0;
ce = c2_mq_consume(&cq->mq);
if (!ce) {
return -EAGAIN;
}
/*
* if the qp returned is null then this qp has already
* been freed and we are unable process the completion.
* try pulling the next message
*/
while ((qp =
(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
c2_mq_free(&cq->mq);
ce = c2_mq_consume(&cq->mq);
if (!ce)
return -EAGAIN;
}
entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
entry->wr_id = ce->hdr.context;
entry->qp = &qp->ibqp;
entry->wc_flags = 0;
entry->slid = 0;
entry->sl = 0;
entry->src_qp = 0;
entry->dlid_path_bits = 0;
entry->pkey_index = 0;
switch (c2_wr_get_id(ce)) {
case C2_WR_TYPE_SEND:
entry->opcode = IB_WC_SEND;
break;
case C2_WR_TYPE_RDMA_WRITE:
entry->opcode = IB_WC_RDMA_WRITE;
break;
case C2_WR_TYPE_RDMA_READ:
entry->opcode = IB_WC_RDMA_READ;
break;
case C2_WR_TYPE_RECV:
entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
entry->opcode = IB_WC_RECV;
is_recv = 1;
break;
default:
break;
}
/* consume the WQEs */
if (is_recv)
c2_mq_lconsume(&qp->rq_mq, 1);
else
c2_mq_lconsume(&qp->sq_mq,
be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
/* free the message */
c2_mq_free(&cq->mq);
return 0;
}
int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct c2_dev *c2dev = to_c2dev(ibcq->device);
struct c2_cq *cq = to_c2cq(ibcq);
unsigned long flags;
int npolled, err;
spin_lock_irqsave(&cq->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled) {
err = c2_poll_one(c2dev, cq, entry + npolled);
if (err)
break;
}
spin_unlock_irqrestore(&cq->lock, flags);
return npolled;
}
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
struct c2_mq_shared __iomem *shared;
struct c2_cq *cq;
unsigned long flags;
int ret = 0;
cq = to_c2cq(ibcq);
shared = cq->mq.peer;
if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
else
return -EINVAL;
writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
/*
* Now read back shared->armed to make the PCI
* write synchronous. This is necessary for
* correct cq notification semantics.
*/
readb(&shared->armed);
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
spin_lock_irqsave(&cq->lock, flags);
ret = !c2_mq_empty(&cq->mq);
spin_unlock_irqrestore(&cq->lock, flags);
}
return ret;
}
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
size_t q_size, size_t msg_size)
{
u8 *pool_start;
if (q_size > SIZE_MAX / msg_size)
return -EINVAL;
pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
&mq->host_dma, GFP_KERNEL);
if (!pool_start)
return -ENOMEM;
c2_mq_rep_init(mq,
0, /* index (currently unknown) */
q_size,
msg_size,
pool_start,
NULL, /* peer (currently unknown) */
C2_MQ_HOST_TARGET);
dma_unmap_addr_set(mq, mapping, mq->host_dma);
return 0;
}
int c2_init_cq(struct c2_dev *c2dev, int entries,
struct c2_ucontext *ctx, struct c2_cq *cq)
{
struct c2wr_cq_create_req wr;
struct c2wr_cq_create_rep *reply;
unsigned long peer_pa;
struct c2_vq_req *vq_req;
int err;
might_sleep();
cq->ibcq.cqe = entries - 1;
cq->is_kernel = !ctx;
/* Allocate a shared pointer */
cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
&cq->mq.shared_dma, GFP_KERNEL);
if (!cq->mq.shared)
return -ENOMEM;
/* Allocate pages for the message pool */
err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
if (err)
goto bail0;
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
err = -ENOMEM;
goto bail1;
}
memset(&wr, 0, sizeof(wr));
c2_wr_set_id(&wr, CCWR_CQ_CREATE);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.msg_size = cpu_to_be32(cq->mq.msg_size);
wr.depth = cpu_to_be32(cq->mq.q_size);
wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
wr.user_context = (u64) (unsigned long) (cq);
vq_req_get(c2dev, vq_req);
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail2;
}
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail2;
reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
if (!reply) {
err = -ENOMEM;
goto bail2;
}
if ((err = c2_errno(reply)) != 0)
goto bail3;
cq->adapter_handle = reply->cq_handle;
cq->mq.index = be32_to_cpu(reply->mq_index);
peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
if (!cq->mq.peer) {
err = -ENOMEM;
goto bail3;
}
vq_repbuf_free(c2dev, reply);
vq_req_free(c2dev, vq_req);
spin_lock_init(&cq->lock);
atomic_set(&cq->refcount, 1);
init_waitqueue_head(&cq->wait);
/*
* Use the MQ index allocated by the adapter to
* store the CQ in the qptr_array
*/
cq->cqn = cq->mq.index;
c2dev->qptr_array[cq->cqn] = cq;
return 0;
bail3:
vq_repbuf_free(c2dev, reply);
bail2:
vq_req_free(c2dev, vq_req);
bail1:
c2_free_cq_buf(c2dev, &cq->mq);
bail0:
c2_free_mqsp(cq->mq.shared);
return err;
}
void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
{
int err;
struct c2_vq_req *vq_req;
struct c2wr_cq_destroy_req wr;
struct c2wr_cq_destroy_rep *reply;
might_sleep();
/* Clear CQ from the qptr array */
spin_lock_irq(&c2dev->lock);
c2dev->qptr_array[cq->mq.index] = NULL;
atomic_dec(&cq->refcount);
spin_unlock_irq(&c2dev->lock);
wait_event(cq->wait, !atomic_read(&cq->refcount));
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
goto bail0;
}
memset(&wr, 0, sizeof(wr));
c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.cq_handle = cq->adapter_handle;
vq_req_get(c2dev, vq_req);
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail1;
}
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail1;
reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
if (reply)
vq_repbuf_free(c2dev, reply);
bail1:
vq_req_free(c2dev, vq_req);
bail0:
if (cq->is_kernel) {
c2_free_cq_buf(c2dev, &cq->mq);
}
return;
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include <rdma/iw_cm.h>
#include "c2_vq.h"
static void handle_mq(struct c2_dev *c2dev, u32 index);
static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
/*
* Handle RNIC interrupts
*/
void c2_rnic_interrupt(struct c2_dev *c2dev)
{
unsigned int mq_index;
while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
if (mq_index & 0x80000000) {
break;
}
c2dev->hints_read++;
handle_mq(c2dev, mq_index);
}
}
/*
* Top level MQ handler
*/
static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
{
if (c2dev->qptr_array[mq_index] == NULL) {
pr_debug("handle_mq: stray activity for mq_index=%d\n",
mq_index);
return;
}
switch (mq_index) {
case (0):
/*
* An index of 0 in the activity queue
* indicates the req vq now has messages
* available...
*
* Wake up any waiters waiting on req VQ
* message availability.
*/
wake_up(&c2dev->req_vq_wo);
break;
case (1):
handle_vq(c2dev, mq_index);
break;
case (2):
/* We have to purge the VQ in case there are pending
* accept reply requests that would result in the
* generation of an ESTABLISHED event. If we don't
* generate these first, a CLOSE event could end up
* being delivered before the ESTABLISHED event.
*/
handle_vq(c2dev, 1);
c2_ae_event(c2dev, mq_index);
break;
default:
/* There is no event synchronization between CQ events
* and AE or CM events. In fact, CQE could be
* delivered for all of the I/O up to and including the
* FLUSH for a peer disconenct prior to the ESTABLISHED
* event being delivered to the app. The reason for this
* is that CM events are delivered on a thread, while AE
* and CM events are delivered on interrupt context.
*/
c2_cq_event(c2dev, mq_index);
break;
}
return;
}
/*
* Handles verbs WR replies.
*/
static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
{
void *adapter_msg, *reply_msg;
struct c2wr_hdr *host_msg;
struct c2wr_hdr tmp;
struct c2_mq *reply_vq;
struct c2_vq_req *req;
struct iw_cm_event cm_event;
int err;
reply_vq = c2dev->qptr_array[mq_index];
/*
* get next msg from mq_index into adapter_msg.
* don't free it yet.
*/
adapter_msg = c2_mq_consume(reply_vq);
if (adapter_msg == NULL) {
return;
}
host_msg = vq_repbuf_alloc(c2dev);
/*
* If we can't get a host buffer, then we'll still
* wakeup the waiter, we just won't give him the msg.
* It is assumed the waiter will deal with this...
*/
if (!host_msg) {
pr_debug("handle_vq: no repbufs!\n");
/*
* just copy the WR header into a local variable.
* this allows us to still demux on the context
*/
host_msg = &tmp;
memcpy(host_msg, adapter_msg, sizeof(tmp));
reply_msg = NULL;
} else {
memcpy(host_msg, adapter_msg, reply_vq->msg_size);
reply_msg = host_msg;
}
/*
* consume the msg from the MQ
*/
c2_mq_free(reply_vq);
/*
* wakeup the waiter.
*/
req = (struct c2_vq_req *) (unsigned long) host_msg->context;
if (req == NULL) {
/*
* We should never get here, as the adapter should
* never send us a reply that we're not expecting.
*/
if (reply_msg != NULL)
vq_repbuf_free(c2dev, host_msg);
pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
return;
}
if (reply_msg)
err = c2_errno(reply_msg);
else
err = -ENOMEM;
if (!err) switch (req->event) {
case IW_CM_EVENT_ESTABLISHED:
c2_set_qp_state(req->qp,
C2_QP_STATE_RTS);
/*
* Until ird/ord negotiation via MPAv2 support is added, send
* max supported values
*/
cm_event.ird = cm_event.ord = 128;
case IW_CM_EVENT_CLOSE:
/*
* Move the QP to RTS if this is
* the established event
*/
cm_event.event = req->event;
cm_event.status = 0;
cm_event.local_addr = req->cm_id->local_addr;
cm_event.remote_addr = req->cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
req->cm_id->event_handler(req->cm_id, &cm_event);
break;
default:
break;
}
req->reply_msg = (u64) (unsigned long) (reply_msg);
atomic_set(&req->reply_ready, 1);
wake_up(&req->wait_object);
/*
* If the request was cancelled, then this put will
* free the vq_req memory...and reply_msg!!!
*/
vq_req_put(c2dev, req);
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "c2.h"
#include "c2_vq.h"
#define PBL_VIRT 1
#define PBL_PHYS 2
/*
* Send all the PBL messages to convey the remainder of the PBL
* Wait for the adapter's reply on the last one.
* This is indicated by setting the MEM_PBL_COMPLETE in the flags.
*
* NOTE: vq_req is _not_ freed by this function. The VQ Host
* Reply buffer _is_ freed by this function.
*/
static int
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
unsigned long va, u32 pbl_depth,
struct c2_vq_req *vq_req, int pbl_type)
{
u32 pbe_count; /* amt that fits in a PBL msg */
u32 count; /* amt in this PBL MSG. */
struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
int err, pbl_virt, pbl_index, i;
switch (pbl_type) {
case PBL_VIRT:
pbl_virt = 1;
break;
case PBL_PHYS:
pbl_virt = 0;
break;
default:
return -EINVAL;
break;
}
pbe_count = (c2dev->req_vq.msg_size -
sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
return -ENOMEM;
}
c2_wr_set_id(wr, CCWR_NSMR_PBL);
/*
* Only the last PBL message will generate a reply from the verbs,
* so we set the context to 0 indicating there is no kernel verbs
* handler blocked awaiting this reply.
*/
wr->hdr.context = 0;
wr->rnic_handle = c2dev->adapter_handle;
wr->stag_index = stag_index; /* already swapped */
wr->flags = 0;
pbl_index = 0;
while (pbl_depth) {
count = min(pbe_count, pbl_depth);
wr->addrs_length = cpu_to_be32(count);
/*
* If this is the last message, then reference the
* vq request struct cuz we're gonna wait for a reply.
* also make this PBL msg as the last one.
*/
if (count == pbl_depth) {
/*
* reference the request struct. dereferenced in the
* int handler.
*/
vq_req_get(c2dev, vq_req);
wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
/*
* This is the last PBL message.
* Set the context to our VQ Request Object so we can
* wait for the reply.
*/
wr->hdr.context = (unsigned long) vq_req;
}
/*
* If pbl_virt is set then va is a virtual address
* that describes a virtually contiguous memory
* allocation. The wr needs the start of each virtual page
* to be converted to the corresponding physical address
* of the page. If pbl_virt is not set then va is an array
* of physical addresses and there is no conversion to do.
* Just fill in the wr with what is in the array.
*/
for (i = 0; i < count; i++) {
if (pbl_virt) {
va += PAGE_SIZE;
} else {
wr->paddrs[i] =
cpu_to_be64(((u64 *)va)[pbl_index + i]);
}
}
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
if (count <= pbe_count) {
vq_req_put(c2dev, vq_req);
}
goto bail0;
}
pbl_depth -= count;
pbl_index += count;
}
/*
* Now wait for the reply...
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
kfree(wr);
return err;
}
#define C2_PBL_MAX_DEPTH 131072
int
c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
int page_size, int pbl_depth, u32 length,
u32 offset, u64 *va, enum c2_acf acf,
struct c2_mr *mr)
{
struct c2_vq_req *vq_req;
struct c2wr_nsmr_register_req *wr;
struct c2wr_nsmr_register_rep *reply;
u16 flags;
int i, pbe_count, count;
int err;
if (!va || !length || !addr_list || !pbl_depth)
return -EINTR;
/*
* Verify PBL depth is within rnic max
*/
if (pbl_depth > C2_PBL_MAX_DEPTH) {
return -EINTR;
}
/*
* allocate verbs request object
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
err = -ENOMEM;
goto bail0;
}
/*
* build the WR
*/
c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
wr->hdr.context = (unsigned long) vq_req;
wr->rnic_handle = c2dev->adapter_handle;
flags = (acf | MEM_VA_BASED | MEM_REMOTE);
/*
* compute how many pbes can fit in the message
*/
pbe_count = (c2dev->req_vq.msg_size -
sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
if (pbl_depth <= pbe_count) {
flags |= MEM_PBL_COMPLETE;
}
wr->flags = cpu_to_be16(flags);
wr->stag_key = 0; //stag_key;
wr->va = cpu_to_be64(*va);
wr->pd_id = mr->pd->pd_id;
wr->pbe_size = cpu_to_be32(page_size);
wr->length = cpu_to_be32(length);
wr->pbl_depth = cpu_to_be32(pbl_depth);
wr->fbo = cpu_to_be32(offset);
count = min(pbl_depth, pbe_count);
wr->addrs_length = cpu_to_be32(count);
/*
* fill out the PBL for this message
*/
for (i = 0; i < count; i++) {
wr->paddrs[i] = cpu_to_be64(addr_list[i]);
}
/*
* regerence the request struct
*/
vq_req_get(c2dev, vq_req);
/*
* send the WR to the adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail1;
}
/*
* wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail1;
}
/*
* process reply
*/
reply =
(struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
if (!reply) {
err = -ENOMEM;
goto bail1;
}
if ((err = c2_errno(reply))) {
goto bail2;
}
//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
vq_repbuf_free(c2dev, reply);
/*
* if there are still more PBEs we need to send them to
* the adapter and wait for a reply on the final one.
* reuse vq_req for this purpose.
*/
pbl_depth -= count;
if (pbl_depth) {
vq_req->reply_msg = (unsigned long) NULL;
atomic_set(&vq_req->reply_ready, 0);
err = send_pbl_messages(c2dev,
cpu_to_be32(mr->ibmr.lkey),
(unsigned long) &addr_list[i],
pbl_depth, vq_req, PBL_PHYS);
if (err) {
goto bail1;
}
}
vq_req_free(c2dev, vq_req);
kfree(wr);
return err;
bail2:
vq_repbuf_free(c2dev, reply);
bail1:
kfree(wr);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
{
struct c2_vq_req *vq_req; /* verbs request object */
struct c2wr_stag_dealloc_req wr; /* work request */
struct c2wr_stag_dealloc_rep *reply; /* WR reply */
int err;
/*
* allocate verbs request object
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
return -ENOMEM;
}
/*
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
wr.hdr.context = (u64) (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.stag_index = cpu_to_be32(stag_index);
/*
* reference the request struct. dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include "c2_mq.h"
void *c2_mq_alloc(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
if (c2_mq_full(q)) {
return NULL;
} else {
#ifdef DEBUG
struct c2wr_hdr *m =
(struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
#ifdef CCMSGMAGIC
BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
m->magic = cpu_to_be32(CCWR_MAGIC);
#endif
return m;
#else
return q->msg_pool.host + q->priv * q->msg_size;
#endif
}
}
void c2_mq_produce(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
if (!c2_mq_full(q)) {
q->priv = (q->priv + 1) % q->q_size;
q->hint_count++;
/* Update peer's offset. */
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
}
}
void *c2_mq_consume(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_HOST_TARGET);
if (c2_mq_empty(q)) {
return NULL;
} else {
#ifdef DEBUG
struct c2wr_hdr *m = (struct c2wr_hdr *)
(q->msg_pool.host + q->priv * q->msg_size);
#ifdef CCMSGMAGIC
BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
#endif
return m;
#else
return q->msg_pool.host + q->priv * q->msg_size;
#endif
}
}
void c2_mq_free(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_HOST_TARGET);
if (!c2_mq_empty(q)) {
#ifdef CCMSGMAGIC
{
struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
(q->msg_pool.adapter + q->priv * q->msg_size);
__raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
}
#endif
q->priv = (q->priv + 1) % q->q_size;
/* Update peer's offset. */
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
}
}
void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
while (wqe_count--) {
BUG_ON(c2_mq_empty(q));
*q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
}
}
#if 0
u32 c2_mq_count(struct c2_mq *q)
{
s32 count;
if (q->type == C2_MQ_HOST_TARGET)
count = be16_to_cpu(*q->shared) - q->priv;
else
count = q->priv - be16_to_cpu(*q->shared);
if (count < 0)
count += q->q_size;
return (u32) count;
}
#endif /* 0 */
void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
{
BUG_ON(!q->shared);
/* This code assumes the byte swapping has already been done! */
q->index = index;
q->q_size = q_size;
q->msg_size = msg_size;
q->msg_pool.adapter = pool_start;
q->peer = (struct c2_mq_shared __iomem *) peer;
q->magic = C2_MQ_MAGIC;
q->type = type;
q->priv = 0;
q->hint_count = 0;
return;
}
void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 *pool_start, u16 __iomem *peer, u32 type)
{
BUG_ON(!q->shared);
/* This code assumes the byte swapping has already been done! */
q->index = index;
q->q_size = q_size;
q->msg_size = msg_size;
q->msg_pool.host = pool_start;
q->peer = (struct c2_mq_shared __iomem *) peer;
q->magic = C2_MQ_MAGIC;
q->type = type;
q->priv = 0;
q->hint_count = 0;
return;
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_MQ_H_
#define _C2_MQ_H_
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include "c2_wr.h"
enum c2_shared_regs {
C2_SHARED_ARMED = 0x10,
C2_SHARED_NOTIFY = 0x18,
C2_SHARED_SHARED = 0x40,
};
struct c2_mq_shared {
u16 unused1;
u8 armed;
u8 notification_type;
u32 unused2;
u16 shared;
/* Pad to 64 bytes. */
u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
};
enum c2_mq_type {
C2_MQ_HOST_TARGET = 1,
C2_MQ_ADAPTER_TARGET = 2,
};
/*
* c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
* c2_user_mq_t (which is the same format) is for user-mode MQs...
*/
#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
struct c2_mq {
u32 magic;
union {
u8 *host;
u8 __iomem *adapter;
} msg_pool;
dma_addr_t host_dma;
DEFINE_DMA_UNMAP_ADDR(mapping);
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
__be16 *shared;
dma_addr_t shared_dma;
u32 q_size;
u32 msg_size;
u32 index;
enum c2_mq_type type;
};
static __inline__ int c2_mq_empty(struct c2_mq *q)
{
return q->priv == be16_to_cpu(*q->shared);
}
static __inline__ int c2_mq_full(struct c2_mq *q)
{
return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
}
void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
void *c2_mq_alloc(struct c2_mq *q);
void c2_mq_produce(struct c2_mq *q);
void *c2_mq_consume(struct c2_mq *q);
void c2_mq_free(struct c2_mq *q);
void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 *pool_start, u16 __iomem *peer, u32 type);
#endif /* _C2_MQ_H_ */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include "c2.h"
#include "c2_provider.h"
int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
{
u32 obj;
int ret = 0;
spin_lock(&c2dev->pd_table.lock);
obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
c2dev->pd_table.last);
if (obj >= c2dev->pd_table.max)
obj = find_first_zero_bit(c2dev->pd_table.table,
c2dev->pd_table.max);
if (obj < c2dev->pd_table.max) {
pd->pd_id = obj;
__set_bit(obj, c2dev->pd_table.table);
c2dev->pd_table.last = obj+1;
if (c2dev->pd_table.last >= c2dev->pd_table.max)
c2dev->pd_table.last = 0;
} else
ret = -ENOMEM;
spin_unlock(&c2dev->pd_table.lock);
return ret;
}
void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
{
spin_lock(&c2dev->pd_table.lock);
__clear_bit(pd->pd_id, c2dev->pd_table.table);
spin_unlock(&c2dev->pd_table.lock);
}
int c2_init_pd_table(struct c2_dev *c2dev)
{
c2dev->pd_table.last = 0;
c2dev->pd_table.max = c2dev->props.max_pd;
spin_lock_init(&c2dev->pd_table.lock);
c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
sizeof(long), GFP_KERNEL);
if (!c2dev->pd_table.table)
return -ENOMEM;
bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
return 0;
}
void c2_cleanup_pd_table(struct c2_dev *c2dev)
{
kfree(c2dev->pd_table.table);
}
此差异已折叠。
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef C2_PROVIDER_H
#define C2_PROVIDER_H
#include <linux/inetdevice.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include "c2_mq.h"
#include <rdma/iw_cm.h>
#define C2_MPT_FLAG_ATOMIC (1 << 14)
#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
struct c2_buf_list {
void *buf;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
/* The user context keeps track of objects allocated for a
* particular user-mode client. */
struct c2_ucontext {
struct ib_ucontext ibucontext;
};
struct c2_mtt;
/* All objects associated with a PD are kept in the
* associated user context if present.
*/
struct c2_pd {
struct ib_pd ibpd;
u32 pd_id;
};
struct c2_mr {
struct ib_mr ibmr;
struct c2_pd *pd;
struct ib_umem *umem;
};
struct c2_av;
enum c2_ah_type {
C2_AH_ON_HCA,
C2_AH_PCI_POOL,
C2_AH_KMALLOC
};
struct c2_ah {
struct ib_ah ibah;
};
struct c2_cq {
struct ib_cq ibcq;
spinlock_t lock;
atomic_t refcount;
int cqn;
int is_kernel;
wait_queue_head_t wait;
u32 adapter_handle;
struct c2_mq mq;
};
struct c2_wq {
spinlock_t lock;
};
struct iw_cm_id;
struct c2_qp {
struct ib_qp ibqp;
struct iw_cm_id *cm_id;
spinlock_t lock;
atomic_t refcount;
wait_queue_head_t wait;
int qpn;
u32 adapter_handle;
u32 send_sgl_depth;
u32 recv_sgl_depth;
u32 rdma_write_sgl_depth;
u8 state;
struct c2_mq sq_mq;
struct c2_mq rq_mq;
};
struct c2_cr_query_attrs {
u32 local_addr;
u32 remote_addr;
u16 local_port;
u16 remote_port;
};
static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct c2_pd, ibpd);
}
static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct c2_ucontext, ibucontext);
}
static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct c2_mr, ibmr);
}
static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
{
return container_of(ibah, struct c2_ah, ibah);
}
static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct c2_cq, ibcq);
}
static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct c2_qp, ibqp);
}
static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
{
struct in_device *ind;
int ret = 0;
ind = in_dev_get(netdev);
if (!ind)
return 0;
for_ifa(ind) {
if (ifa->ifa_address == addr) {
ret = 1;
break;
}
}
endfor_ifa(ind);
in_dev_put(ind);
return ret;
}
#endif /* C2_PROVIDER_H */
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_STATUS_H_
#define _C2_STATUS_H_
/*
* Verbs Status Codes
*/
enum c2_status {
C2_OK = 0, /* This must be zero */
CCERR_INSUFFICIENT_RESOURCES = 1,
CCERR_INVALID_MODIFIER = 2,
CCERR_INVALID_MODE = 3,
CCERR_IN_USE = 4,
CCERR_INVALID_RNIC = 5,
CCERR_INTERRUPTED_OPERATION = 6,
CCERR_INVALID_EH = 7,
CCERR_INVALID_CQ = 8,
CCERR_CQ_EMPTY = 9,
CCERR_NOT_IMPLEMENTED = 10,
CCERR_CQ_DEPTH_TOO_SMALL = 11,
CCERR_PD_IN_USE = 12,
CCERR_INVALID_PD = 13,
CCERR_INVALID_SRQ = 14,
CCERR_INVALID_ADDRESS = 15,
CCERR_INVALID_NETMASK = 16,
CCERR_INVALID_QP = 17,
CCERR_INVALID_QP_STATE = 18,
CCERR_TOO_MANY_WRS_POSTED = 19,
CCERR_INVALID_WR_TYPE = 20,
CCERR_INVALID_SGL_LENGTH = 21,
CCERR_INVALID_SQ_DEPTH = 22,
CCERR_INVALID_RQ_DEPTH = 23,
CCERR_INVALID_ORD = 24,
CCERR_INVALID_IRD = 25,
CCERR_QP_ATTR_CANNOT_CHANGE = 26,
CCERR_INVALID_STAG = 27,
CCERR_QP_IN_USE = 28,
CCERR_OUTSTANDING_WRS = 29,
CCERR_STAG_IN_USE = 30,
CCERR_INVALID_STAG_INDEX = 31,
CCERR_INVALID_SGL_FORMAT = 32,
CCERR_ADAPTER_TIMEOUT = 33,
CCERR_INVALID_CQ_DEPTH = 34,
CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
CCERR_INVALID_EP = 36,
CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
CCERR_FLUSHED = 38,
CCERR_INVALID_WQE = 39,
CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
CCERR_REMOTE_TERMINATION_ERROR = 41,
CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
CCERR_ACCESS_VIOLATION = 43,
CCERR_INVALID_PD_ID = 44,
CCERR_WRAP_ERROR = 45,
CCERR_INV_STAG_ACCESS_ERROR = 46,
CCERR_ZERO_RDMA_READ_RESOURCES = 47,
CCERR_QP_NOT_PRIVILEGED = 48,
CCERR_STAG_STATE_NOT_INVALID = 49,
CCERR_INVALID_PAGE_SIZE = 50,
CCERR_INVALID_BUFFER_SIZE = 51,
CCERR_INVALID_PBE = 52,
CCERR_INVALID_FBO = 53,
CCERR_INVALID_LENGTH = 54,
CCERR_INVALID_ACCESS_RIGHTS = 55,
CCERR_PBL_TOO_BIG = 56,
CCERR_INVALID_VA = 57,
CCERR_INVALID_REGION = 58,
CCERR_INVALID_WINDOW = 59,
CCERR_TOTAL_LENGTH_TOO_BIG = 60,
CCERR_INVALID_QP_ID = 61,
CCERR_ADDR_IN_USE = 62,
CCERR_ADDR_NOT_AVAIL = 63,
CCERR_NET_DOWN = 64,
CCERR_NET_UNREACHABLE = 65,
CCERR_CONN_ABORTED = 66,
CCERR_CONN_RESET = 67,
CCERR_NO_BUFS = 68,
CCERR_CONN_TIMEDOUT = 69,
CCERR_CONN_REFUSED = 70,
CCERR_HOST_UNREACHABLE = 71,
CCERR_INVALID_SEND_SGL_DEPTH = 72,
CCERR_INVALID_RECV_SGL_DEPTH = 73,
CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
CCERR_INSUFFICIENT_PRIVILEGES = 75,
CCERR_STACK_ERROR = 76,
CCERR_INVALID_VERSION = 77,
CCERR_INVALID_MTU = 78,
CCERR_INVALID_IMAGE = 79,
CCERR_PENDING = 98, /* not an error; user internally by adapter */
CCERR_DEFER = 99, /* not an error; used internally by adapter */
CCERR_FAILED_WRITE = 100,
CCERR_FAILED_ERASE = 101,
CCERR_FAILED_VERIFICATION = 102,
CCERR_NOT_FOUND = 103,
};
/*
* CCAE_ACTIVE_CONNECT_RESULTS status result codes.
*/
enum c2_connect_status {
C2_CONN_STATUS_SUCCESS = C2_OK,
C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
};
/*
* Flash programming status codes.
*/
enum c2_flash_status {
C2_FLASH_STATUS_SUCCESS = 0x0000,
C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
C2_FLASH_STATUS_ECLBS = 0x0400,
C2_FLASH_STATUS_PSLBS = 0x0800,
C2_FLASH_STATUS_VPENS = 0x1000,
};
#endif /* _C2_STATUS_H_ */
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef C2_USER_H
#define C2_USER_H
#include <linux/types.h>
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct c2_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 uarc_size;
};
struct c2_alloc_pd_resp {
__u32 pdn;
__u32 reserved;
};
struct c2_create_cq {
__u32 lkey;
__u32 pdn;
__u64 arm_db_page;
__u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
struct c2_create_cq_resp {
__u32 cqn;
__u32 reserved;
};
struct c2_create_qp {
__u32 lkey;
__u32 reserved;
__u64 sq_db_page;
__u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
#endif /* C2_USER_H */
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "c2_vq.h"
#include "c2_provider.h"
/*
* Verbs Request Objects:
*
* VQ Request Objects are allocated by the kernel verbs handlers.
* They contain a wait object, a refcnt, an atomic bool indicating that the
* adapter has replied, and a copy of the verb reply work request.
* A pointer to the VQ Request Object is passed down in the context
* field of the work request message, and reflected back by the adapter
* in the verbs reply message. The function handle_vq() in the interrupt
* path will use this pointer to:
* 1) append a copy of the verbs reply message
* 2) mark that the reply is ready
* 3) wake up the kernel verbs handler blocked awaiting the reply.
*
*
* The kernel verbs handlers do a "get" to put a 2nd reference on the
* VQ Request object. If the kernel verbs handler exits before the adapter
* can respond, this extra reference will keep the VQ Request object around
* until the adapter's reply can be processed. The reason we need this is
* because a pointer to this object is stuffed into the context field of
* the verbs work request message, and reflected back in the reply message.
* It is used in the interrupt handler (handle_vq()) to wake up the appropriate
* kernel verb handler that is blocked awaiting the verb reply.
* So handle_vq() will do a "put" on the object when it's done accessing it.
* NOTE: If we guarantee that the kernel verb handler will never bail before
* getting the reply, then we don't need these refcnts.
*
*
* VQ Request objects are freed by the kernel verbs handlers only
* after the verb has been processed, or when the adapter fails and
* does not reply.
*
*
* Verbs Reply Buffers:
*
* VQ Reply bufs are local host memory copies of a
* outstanding Verb Request reply
* message. The are always allocated by the kernel verbs handlers, and _may_ be
* freed by either the kernel verbs handler -or- the interrupt handler. The
* kernel verbs handler _must_ free the repbuf, then free the vq request object
* in that order.
*/
int vq_init(struct c2_dev *c2dev)
{
sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
(char) ('0' + c2dev->devnum));
c2dev->host_msg_cache =
kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (c2dev->host_msg_cache == NULL) {
return -ENOMEM;
}
return 0;
}
void vq_term(struct c2_dev *c2dev)
{
kmem_cache_destroy(c2dev->host_msg_cache);
}
/* vq_req_alloc - allocate a VQ Request Object and initialize it.
* The refcnt is set to 1.
*/
struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
{
struct c2_vq_req *r;
r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
if (r) {
init_waitqueue_head(&r->wait_object);
r->reply_msg = 0;
r->event = 0;
r->cm_id = NULL;
r->qp = NULL;
atomic_set(&r->refcnt, 1);
atomic_set(&r->reply_ready, 0);
}
return r;
}
/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler
* has already free the VQ Reply Buffer if it existed.
*/
void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
{
r->reply_msg = 0;
if (atomic_dec_and_test(&r->refcnt)) {
kfree(r);
}
}
/* vq_req_get - reference a VQ Request Object. Done
* only in the kernel verbs handlers.
*/
void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
{
atomic_inc(&r->refcnt);
}
/* vq_req_put - dereference and potentially free a VQ Request Object.
*
* This is only called by handle_vq() on the
* interrupt when it is done processing
* a verb reply message. If the associated
* kernel verbs handler has already bailed,
* then this put will actually free the VQ
* Request object _and_ the VQ Reply Buffer
* if it exists.
*/
void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
{
if (atomic_dec_and_test(&r->refcnt)) {
if (r->reply_msg != 0)
vq_repbuf_free(c2dev,
(void *) (unsigned long) r->reply_msg);
kfree(r);
}
}
/*
* vq_repbuf_alloc - allocate a VQ Reply Buffer.
*/
void *vq_repbuf_alloc(struct c2_dev *c2dev)
{
return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
}
/*
* vq_send_wr - post a verbs request message to the Verbs Request Queue.
* If a message is not available in the MQ, then block until one is available.
* NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
* When the adapter drains the Verbs Request Queue,
* it inserts MQ index 0 in to the
* adapter->host activity fifo and interrupts the host.
*/
int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
{
void *msg;
wait_queue_t __wait;
/*
* grab adapter vq lock
*/
spin_lock(&c2dev->vqlock);
/*
* allocate msg
*/
msg = c2_mq_alloc(&c2dev->req_vq);
/*
* If we cannot get a msg, then we'll wait
* When a messages are available, the int handler will wake_up()
* any waiters.
*/
while (msg == NULL) {
pr_debug("%s:%d no available msg in VQ, waiting...\n",
__func__, __LINE__);
init_waitqueue_entry(&__wait, current);
add_wait_queue(&c2dev->req_vq_wo, &__wait);
spin_unlock(&c2dev->vqlock);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (!c2_mq_full(&c2dev->req_vq)) {
break;
}
if (!signal_pending(current)) {
schedule_timeout(1 * HZ); /* 1 second... */
continue;
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&c2dev->req_vq_wo, &__wait);
return -EINTR;
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&c2dev->req_vq_wo, &__wait);
spin_lock(&c2dev->vqlock);
msg = c2_mq_alloc(&c2dev->req_vq);
}
/*
* copy wr into adapter msg
*/
memcpy(msg, wr, c2dev->req_vq.msg_size);
/*
* post msg
*/
c2_mq_produce(&c2dev->req_vq);
/*
* release adapter vq lock
*/
spin_unlock(&c2dev->vqlock);
return 0;
}
/*
* vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
*/
int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
{
if (!wait_event_timeout(req->wait_object,
atomic_read(&req->reply_ready),
60*HZ))
return -ETIMEDOUT;
return 0;
}
/*
* vq_repbuf_free - Free a Verbs Reply Buffer.
*/
void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
{
kmem_cache_free(c2dev->host_msg_cache, reply);
}
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册