提交 cd5e25d9 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IB/ipath: kbuild infrastructure
  IB/ipath: infiniband verbs support
  IB/ipath: misc infiniband code, part 2
  IB/ipath: misc infiniband code, part 1
  IB/ipath: infiniband RC protocol support
  IB/ipath: infiniband UC and UD protocol support
  IB/ipath: infiniband header files
  IB/ipath: layering interfaces used by higher-level driver code
  IB/ipath: support for userspace apps using core driver
  IB/ipath: sysfs and ipathfs support for core driver
  IB/ipath: misc driver support code
  IB/ipath: chip initialisation code, and diag support
  IB/ipath: support for PCI Express devices
  IB/ipath: support for HyperTransport devices
  IB/ipath: core driver header files
  IB/ipath: core device driver
......@@ -1451,6 +1451,12 @@ P: Juanjo Ciarlante
M: jjciarla@raiz.uncu.edu.ar
S: Maintained
IPATH DRIVER:
P: Bryan O'Sullivan
M: support@pathscale.com
L: openib-general@openib.org
S: Supported
IPX NETWORK LAYER
P: Arnaldo Carvalho de Melo
M: acme@conectiva.com.br
......
......@@ -69,6 +69,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_IPATH_CORE) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
......
......@@ -30,6 +30,7 @@ config INFINIBAND_USER_ACCESS
<http://www.openib.org>.
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
......
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_IPATH_CORE) += hw/ipath/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
config IPATH_CORE
tristate "PathScale InfiniPath Driver"
depends on 64BIT && PCI_MSI && NET
---help---
This is a low-level driver for PathScale InfiniPath host channel
adapters (HCAs) based on the HT-400 and PE-800 chips.
config INFINIBAND_IPATH
tristate "PathScale InfiniPath Verbs Driver"
depends on IPATH_CORE && INFINIBAND
---help---
This is a driver that provides InfiniBand verbs support for
PathScale InfiniPath host channel adapters (HCAs). This
allows these devices to be used with both kernel upper level
protocols such as IP-over-InfiniBand as well as with userspace
applications (in conjunction with InfiniBand userspace access).
EXTRA_CFLAGS += -DIPATH_IDSTR='"PathScale kernel.org driver"' \
-DIPATH_KERN_TYPE=0
obj-$(CONFIG_IPATH_CORE) += ipath_core.o
obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
ipath_core-y := \
ipath_diag.o \
ipath_driver.o \
ipath_eeprom.o \
ipath_file_ops.o \
ipath_fs.o \
ipath_ht400.o \
ipath_init_chip.o \
ipath_intr.o \
ipath_layer.o \
ipath_pe800.o \
ipath_stats.o \
ipath_sysfs.o \
ipath_user_pages.o
ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o
ib_ipath-y := \
ipath_cq.o \
ipath_keys.o \
ipath_mad.o \
ipath_mr.o \
ipath_qp.o \
ipath_rc.o \
ipath_ruc.o \
ipath_srq.o \
ipath_uc.o \
ipath_ud.o \
ipath_verbs.o \
ipath_verbs_mcast.o
/*
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _IPATH_COMMON_H
#define _IPATH_COMMON_H
/*
* This file contains defines, structures, etc. that are used
* to communicate between kernel and user code.
*/
/* This is the IEEE-assigned OUI for PathScale, Inc. */
#define IPATH_SRC_OUI_1 0x00
#define IPATH_SRC_OUI_2 0x11
#define IPATH_SRC_OUI_3 0x75
/* version of protocol header (known to chip also). In the long run,
* we should be able to generate and accept a range of version numbers;
* for now we only accept one, and it's compiled in.
*/
#define IPS_PROTO_VERSION 2
/*
* These are compile time constants that you may want to enable or disable
* if you are trying to debug problems with code or performance.
* IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
* fastpath code
* IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
* traced in faspath code
* _IPATH_TRACING define as 0 if you want to remove all tracing in a
* compilation unit
* _IPATH_DEBUGGING define as 0 if you want to remove debug prints
*/
/*
* The value in the BTH QP field that InfiniPath uses to differentiate
* an infinipath protocol IB packet vs standard IB transport
*/
#define IPATH_KD_QP 0x656b79
/*
* valid states passed to ipath_set_linkstate() user call
*/
#define IPATH_IB_LINKDOWN 0
#define IPATH_IB_LINKARM 1
#define IPATH_IB_LINKACTIVE 2
#define IPATH_IB_LINKINIT 3
#define IPATH_IB_LINKDOWN_SLEEP 4
#define IPATH_IB_LINKDOWN_DISABLE 5
/*
* stats maintained by the driver. For now, at least, this is global
* to all minor devices.
*/
struct infinipath_stats {
/* number of interrupts taken */
__u64 sps_ints;
/* number of interrupts for errors */
__u64 sps_errints;
/* number of errors from chip (not incl. packet errors or CRC) */
__u64 sps_errs;
/* number of packet errors from chip other than CRC */
__u64 sps_pkterrs;
/* number of packets with CRC errors (ICRC and VCRC) */
__u64 sps_crcerrs;
/* number of hardware errors reported (parity, etc.) */
__u64 sps_hwerrs;
/* number of times IB link changed state unexpectedly */
__u64 sps_iblink;
/* no longer used; left for compatibility */
__u64 sps_unused3;
/* number of kernel (port0) packets received */
__u64 sps_port0pkts;
/* number of "ethernet" packets sent by driver */
__u64 sps_ether_spkts;
/* number of "ethernet" packets received by driver */
__u64 sps_ether_rpkts;
/* number of SMA packets sent by driver */
__u64 sps_sma_spkts;
/* number of SMA packets received by driver */
__u64 sps_sma_rpkts;
/* number of times all ports rcvhdrq was full and packet dropped */
__u64 sps_hdrqfull;
/* number of times all ports egrtid was full and packet dropped */
__u64 sps_etidfull;
/*
* number of times we tried to send from driver, but no pio buffers
* avail
*/
__u64 sps_nopiobufs;
/* number of ports currently open */
__u64 sps_ports;
/* list of pkeys (other than default) accepted (0 means not set) */
__u16 sps_pkeys[4];
/* lids for up to 4 infinipaths, indexed by infinipath # */
__u16 sps_lid[4];
/* number of user ports per chip (not IB ports) */
__u32 sps_nports;
/* not our interrupt, or already handled */
__u32 sps_nullintr;
/* max number of packets handled per receive call */
__u32 sps_maxpkts_call;
/* avg number of packets handled per receive call */
__u32 sps_avgpkts_call;
/* total number of pages locked */
__u64 sps_pagelocks;
/* total number of pages unlocked */
__u64 sps_pageunlocks;
/*
* Number of packets dropped in kernel other than errors (ether
* packets if ipath not configured, sma/mad, etc.)
*/
__u64 sps_krdrops;
/* mlids for up to 4 infinipaths, indexed by infinipath # */
__u16 sps_mlid[4];
/* pad for future growth */
__u64 __sps_pad[45];
};
/*
* These are the status bits readable (in ascii form, 64bit value)
* from the "status" sysfs file.
*/
#define IPATH_STATUS_INITTED 0x1 /* basic initialization done */
#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
/* Device has been disabled via admin request */
#define IPATH_STATUS_ADMIN_DISABLED 0x4
#define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */
#define IPATH_STATUS_SMA 0x10 /* user SMA running */
/* Chip has been found and initted */
#define IPATH_STATUS_CHIP_PRESENT 0x20
/* IB link is at ACTIVE, usable for data traffic */
#define IPATH_STATUS_IB_READY 0x40
/* link is configured, LID, MTU, etc. have been set */
#define IPATH_STATUS_IB_CONF 0x80
/* no link established, probably no cable */
#define IPATH_STATUS_IB_NOCABLE 0x100
/* A Fatal hardware error has occurred. */
#define IPATH_STATUS_HWERROR 0x200
/*
* The list of usermode accessible registers. Also see Reg_* later in file.
*/
typedef enum _ipath_ureg {
/* (RO) DMA RcvHdr to be used next. */
ur_rcvhdrtail = 0,
/* (RW) RcvHdr entry to be processed next by host. */
ur_rcvhdrhead = 1,
/* (RO) Index of next Eager index to use. */
ur_rcvegrindextail = 2,
/* (RW) Eager TID to be processed next */
ur_rcvegrindexhead = 3,
/* For internal use only; max register number. */
_IPATH_UregMax
} ipath_ureg;
/* bit values for spi_runtime_flags */
#define IPATH_RUNTIME_HT 0x1
#define IPATH_RUNTIME_PCIE 0x2
#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
#define IPATH_RUNTIME_RCVHDR_COPY 0x8
/*
* This structure is returned by ipath_userinit() immediately after
* open to get implementation-specific info, and info specific to this
* instance.
*
* This struct must have explict pad fields where type sizes
* may result in different alignments between 32 and 64 bit
* programs, since the 64 bit * bit kernel requires the user code
* to have matching offsets
*/
struct ipath_base_info {
/* version of hardware, for feature checking. */
__u32 spi_hw_version;
/* version of software, for feature checking. */
__u32 spi_sw_version;
/* InfiniPath port assigned, goes into sent packets */
__u32 spi_port;
/*
* IB MTU, packets IB data must be less than this.
* The MTU is in bytes, and will be a multiple of 4 bytes.
*/
__u32 spi_mtu;
/*
* Size of a PIO buffer. Any given packet's total size must be less
* than this (in words). Included is the starting control word, so
* if 513 is returned, then total pkt size is 512 words or less.
*/
__u32 spi_piosize;
/* size of the TID cache in infinipath, in entries */
__u32 spi_tidcnt;
/* size of the TID Eager list in infinipath, in entries */
__u32 spi_tidegrcnt;
/* size of a single receive header queue entry. */
__u32 spi_rcvhdrent_size;
/*
* Count of receive header queue entries allocated.
* This may be less than the spu_rcvhdrcnt passed in!.
*/
__u32 spi_rcvhdr_cnt;
/* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
__u32 spi_runtime_flags;
/* address where receive buffer queue is mapped into */
__u64 spi_rcvhdr_base;
/* user program. */
/* base address of eager TID receive buffers. */
__u64 spi_rcv_egrbufs;
/* Allocated by initialization code, not by protocol. */
/*
* Size of each TID buffer in host memory, starting at
* spi_rcv_egrbufs. The buffers are virtually contiguous.
*/
__u32 spi_rcv_egrbufsize;
/*
* The special QP (queue pair) value that identifies an infinipath
* protocol packet from standard IB packets. More, probably much
* more, to be added.
*/
__u32 spi_qpair;
/*
* User register base for init code, not to be used directly by
* protocol or applications.
*/
__u64 __spi_uregbase;
/*
* Maximum buffer size in bytes that can be used in a single TID
* entry (assuming the buffer is aligned to this boundary). This is
* the minimum of what the hardware and software support Guaranteed
* to be a power of 2.
*/
__u32 spi_tid_maxsize;
/*
* alignment of each pio send buffer (byte count
* to add to spi_piobufbase to get to second buffer)
*/
__u32 spi_pioalign;
/*
* The index of the first pio buffer available to this process;
* needed to do lookup in spi_pioavailaddr; not added to
* spi_piobufbase.
*/
__u32 spi_pioindex;
/* number of buffers mapped for this process */
__u32 spi_piocnt;
/*
* Base address of writeonly pio buffers for this process.
* Each buffer has spi_piosize words, and is aligned on spi_pioalign
* boundaries. spi_piocnt buffers are mapped from this address
*/
__u64 spi_piobufbase;
/*
* Base address of readonly memory copy of the pioavail registers.
* There are 2 bits for each buffer.
*/
__u64 spi_pioavailaddr;
/*
* Address where driver updates a copy of the interface and driver
* status (IPATH_STATUS_*) as a 64 bit value. It's followed by a
* string indicating hardware error, if there was one.
*/
__u64 spi_status;
/* number of chip ports available to user processes */
__u32 spi_nports;
/* unit number of chip we are using */
__u32 spi_unit;
/* num bufs in each contiguous set */
__u32 spi_rcv_egrperchunk;
/* size in bytes of each contiguous set */
__u32 spi_rcv_egrchunksize;
/* total size of mmap to cover full rcvegrbuffers */
__u32 spi_rcv_egrbuftotlen;
} __attribute__ ((aligned(8)));
/*
* This version number is given to the driver by the user code during
* initialization in the spu_userversion field of ipath_user_info, so
* the driver can check for compatibility with user code.
*
* The major version changes when data structures
* change in an incompatible way. The driver must be the same or higher
* for initialization to succeed. In some cases, a higher version
* driver will not interoperate with older software, and initialization
* will return an error.
*/
#define IPATH_USER_SWMAJOR 1
/*
* Minor version differences are always compatible
* a within a major version, however if if user software is larger
* than driver software, some new features and/or structure fields
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference
*/
#define IPATH_USER_SWMINOR 2
#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
#define IPATH_KERN_TYPE 0
/*
* Similarly, this is the kernel version going back to the user. It's
* slightly different, in that we want to tell if the driver was built as
* part of a PathScale release, or from the driver from OpenIB, kernel.org,
* or a standard distribution, for support reasons. The high bit is 0 for
* non-PathScale, and 1 for PathScale-built/supplied.
*
* It's returned by the driver to the user code during initialization in the
* spi_sw_version field of ipath_base_info, so the user code can in turn
* check for compatibility with the kernel.
*/
#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
/*
* This structure is passed to ipath_userinit() to tell the driver where
* user code buffers are, sizes, etc. The offsets and sizes of the
* fields must remain unchanged, for binary compatibility. It can
* be extended, if userversion is changed so user code can tell, if needed
*/
struct ipath_user_info {
/*
* version of user software, to detect compatibility issues.
* Should be set to IPATH_USER_SWVERSION.
*/
__u32 spu_userversion;
/* desired number of receive header queue entries */
__u32 spu_rcvhdrcnt;
/* size of struct base_info to write to */
__u32 spu_base_info_size;
/*
* number of words in KD protocol header
* This tells InfiniPath how many words to copy to rcvhdrq. If 0,
* kernel uses a default. Once set, attempts to set any other value
* are an error (EAGAIN) until driver is reloaded.
*/
__u32 spu_rcvhdrsize;
/*
* cache line aligned (64 byte) user address to
* which the rcvhdrtail register will be written by infinipath
* whenever it changes, so that no chip registers are read in
* the performance path.
*/
__u64 spu_rcvhdraddr;
/*
* address of struct base_info to write to
*/
__u64 spu_base_info;
} __attribute__ ((aligned(8)));
/* User commands. */
#define IPATH_CMD_MIN 16
#define IPATH_CMD_USER_INIT 16 /* set up userspace */
#define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */
#define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */
#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
#define IPATH_CMD_MAX 21
struct ipath_port_info {
__u32 num_active; /* number of active units */
__u32 unit; /* unit (chip) assigned to caller */
__u32 port; /* port on unit assigned to caller */
};
struct ipath_tid_info {
__u32 tidcnt;
/* make structure same size in 32 and 64 bit */
__u32 tid__unused;
/* virtual address of first page in transfer */
__u64 tidvaddr;
/* pointer (same size 32/64 bit) to __u16 tid array */
__u64 tidlist;
/*
* pointer (same size 32/64 bit) to bitmap of TIDs used
* for this call; checked for being large enough at open
*/
__u64 tidmap;
};
struct ipath_cmd {
__u32 type; /* command type */
union {
struct ipath_tid_info tid_info;
struct ipath_user_info user_info;
/* address in userspace of struct ipath_port_info to
write result to */
__u64 port_info;
/* enable/disable receipt of packets */
__u32 recv_ctrl;
/* partition key to set */
__u16 part_key;
} cmd;
};
struct ipath_iovec {
/* Pointer to data, but same size 32 and 64 bit */
__u64 iov_base;
/*
* Length of data; don't need 64 bits, but want
* ipath_sendpkt to remain same size as before 32 bit changes, so...
*/
__u64 iov_len;
};
/*
* Describes a single packet for send. Each packet can have one or more
* buffers, but the total length (exclusive of IB headers) must be less
* than the MTU, and if using the PIO method, entire packet length,
* including IB headers, must be less than the ipath_piosize value (words).
* Use of this necessitates including sys/uio.h
*/
struct __ipath_sendpkt {
__u32 sps_flags; /* flags for packet (TBD) */
__u32 sps_cnt; /* number of entries to use in sps_iov */
/* array of iov's describing packet. TEMPORARY */
struct ipath_iovec sps_iov[4];
};
/* Passed into SMA special file's ->read and ->write methods. */
struct ipath_sma_pkt
{
__u32 unit; /* unit on which to send packet */
__u64 data; /* address of payload in userspace */
__u32 len; /* length of payload */
};
/*
* Data layout in I2C flash (for GUID, etc.)
* All fields are little-endian binary unless otherwise stated
*/
#define IPATH_FLASH_VERSION 1
struct ipath_flash {
/* flash layout version (IPATH_FLASH_VERSION) */
__u8 if_fversion;
/* checksum protecting if_length bytes */
__u8 if_csum;
/*
* valid length (in use, protected by if_csum), including
* if_fversion and if_sum themselves)
*/
__u8 if_length;
/* the GUID, in network order */
__u8 if_guid[8];
/* number of GUIDs to use, starting from if_guid */
__u8 if_numguid;
/* the board serial number, in ASCII */
char if_serial[12];
/* board mfg date (YYYYMMDD ASCII) */
char if_mfgdate[8];
/* last board rework/test date (YYYYMMDD ASCII) */
char if_testdate[8];
/* logging of error counts, TBD */
__u8 if_errcntp[4];
/* powered on hours, updated at driver unload */
__u8 if_powerhour[2];
/* ASCII free-form comment field */
char if_comment[32];
/* 78 bytes used, min flash size is 128 bytes */
__u8 if_future[50];
};
/*
* These are the counters implemented in the chip, and are listed in order.
* The InterCaps naming is taken straight from the chip spec.
*/
struct infinipath_counters {
__u64 LBIntCnt;
__u64 LBFlowStallCnt;
__u64 Reserved1;
__u64 TxUnsupVLErrCnt;
__u64 TxDataPktCnt;
__u64 TxFlowPktCnt;
__u64 TxDwordCnt;
__u64 TxLenErrCnt;
__u64 TxMaxMinLenErrCnt;
__u64 TxUnderrunCnt;
__u64 TxFlowStallCnt;
__u64 TxDroppedPktCnt;
__u64 RxDroppedPktCnt;
__u64 RxDataPktCnt;
__u64 RxFlowPktCnt;
__u64 RxDwordCnt;
__u64 RxLenErrCnt;
__u64 RxMaxMinLenErrCnt;
__u64 RxICRCErrCnt;
__u64 RxVCRCErrCnt;
__u64 RxFlowCtrlErrCnt;
__u64 RxBadFormatCnt;
__u64 RxLinkProblemCnt;
__u64 RxEBPCnt;
__u64 RxLPCRCErrCnt;
__u64 RxBufOvflCnt;
__u64 RxTIDFullErrCnt;
__u64 RxTIDValidErrCnt;
__u64 RxPKeyMismatchCnt;
__u64 RxP0HdrEgrOvflCnt;
__u64 RxP1HdrEgrOvflCnt;
__u64 RxP2HdrEgrOvflCnt;
__u64 RxP3HdrEgrOvflCnt;
__u64 RxP4HdrEgrOvflCnt;
__u64 RxP5HdrEgrOvflCnt;
__u64 RxP6HdrEgrOvflCnt;
__u64 RxP7HdrEgrOvflCnt;
__u64 RxP8HdrEgrOvflCnt;
__u64 Reserved6;
__u64 Reserved7;
__u64 IBStatusChangeCnt;
__u64 IBLinkErrRecoveryCnt;
__u64 IBLinkDownedCnt;
__u64 IBSymbolErrCnt;
};
/*
* The next set of defines are for packet headers, and chip register
* and memory bits that are visible to and/or used by user-mode software
* The other bits that are used only by the driver or diags are in
* ipath_registers.h
*/
/* RcvHdrFlags bits */
#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
#define INFINIPATH_RHF_LENGTH_SHIFT 0
#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF
#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
#define INFINIPATH_RHF_H_ICRCERR 0x80000000
#define INFINIPATH_RHF_H_VCRCERR 0x40000000
#define INFINIPATH_RHF_H_PARITYERR 0x20000000
#define INFINIPATH_RHF_H_LENERR 0x10000000
#define INFINIPATH_RHF_H_MTUERR 0x08000000
#define INFINIPATH_RHF_H_IHDRERR 0x04000000
#define INFINIPATH_RHF_H_TIDERR 0x02000000
#define INFINIPATH_RHF_H_MKERR 0x01000000
#define INFINIPATH_RHF_H_IBERR 0x00800000
#define INFINIPATH_RHF_L_SWA 0x00008000
#define INFINIPATH_RHF_L_SWB 0x00004000
/* infinipath header fields */
#define INFINIPATH_I_VERS_MASK 0xF
#define INFINIPATH_I_VERS_SHIFT 28
#define INFINIPATH_I_PORT_MASK 0xF
#define INFINIPATH_I_PORT_SHIFT 24
#define INFINIPATH_I_TID_MASK 0x7FF
#define INFINIPATH_I_TID_SHIFT 13
#define INFINIPATH_I_OFFSET_MASK 0x1FFF
#define INFINIPATH_I_OFFSET_SHIFT 0
/* K_PktFlags bits */
#define INFINIPATH_KPF_INTR 0x1
/* SendPIO per-buffer control */
#define INFINIPATH_SP_LENGTHP1_MASK 0x3FF
#define INFINIPATH_SP_LENGTHP1_SHIFT 0
#define INFINIPATH_SP_INTR 0x80000000
#define INFINIPATH_SP_TEST 0x40000000
#define INFINIPATH_SP_TESTEBP 0x20000000
/* SendPIOAvail bits */
#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
#endif /* _IPATH_COMMON_H */
/*
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/err.h>
#include <linux/vmalloc.h>
#include "ipath_verbs.h"
/**
* ipath_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
* @sig: true if @entry is a solicitated entry
*
* This may be called with one of the qp->s_lock or qp->r_rq.lock held.
*/
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
{
unsigned long flags;
u32 next;
spin_lock_irqsave(&cq->lock, flags);
if (cq->head == cq->ibcq.cqe)
next = 0;
else
next = cq->head + 1;
if (unlikely(next == cq->tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
struct ib_event ev;
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
ev.event = IB_EVENT_CQ_ERR;
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
}
return;
}
cq->queue[cq->head] = *entry;
cq->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = IB_CQ_NONE;
cq->triggered++;
/*
* This will cause send_complete() to be called in
* another thread.
*/
tasklet_hi_schedule(&cq->comptask);
}
spin_unlock_irqrestore(&cq->lock, flags);
if (entry->status != IB_WC_SUCCESS)
to_idev(cq->ibcq.device)->n_wqe_errs++;
}
/**
* ipath_poll_cq - poll for work completion entries
* @ibcq: the completion queue to poll
* @num_entries: the maximum number of entries to return
* @entry: pointer to array where work completions are placed
*
* Returns the number of completion entries polled.
*
* This may be called from interrupt context. Also called by ib_poll_cq()
* in the generic verbs code.
*/
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct ipath_cq *cq = to_icq(ibcq);
unsigned long flags;
int npolled;
spin_lock_irqsave(&cq->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
if (cq->tail == cq->head)
break;
*entry = cq->queue[cq->tail];
if (cq->tail == cq->ibcq.cqe)
cq->tail = 0;
else
cq->tail++;
}
spin_unlock_irqrestore(&cq->lock, flags);
return npolled;
}
static void send_complete(unsigned long data)
{
struct ipath_cq *cq = (struct ipath_cq *)data;
/*
* The completion handler will most likely rearm the notification
* and poll for all pending entries. If a new completion entry
* is added while we are in this routine, tasklet_hi_schedule()
* won't call us again until we return so we check triggered to
* see if we need to call the handler again.
*/
for (;;) {
u8 triggered = cq->triggered;
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq->triggered == triggered)
return;
}
}
/**
* ipath_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to
* @entries: the minimum size of the completion queue
* @context: unused by the InfiniPath driver
* @udata: unused by the InfiniPath driver
*
* Returns a pointer to the completion queue or negative errno values
* for failure.
*
* Called by ib_create_cq() in the generic verbs code.
*/
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct ipath_cq *cq;
struct ib_wc *wc;
struct ib_cq *ret;
/*
* Need to use vmalloc() if we want to support large #s of
* entries.
*/
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
wc = vmalloc(sizeof(*wc) * (entries + 1));
if (!wc) {
kfree(cq);
ret = ERR_PTR(-ENOMEM);
goto bail;
}
/*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return
* an error.
*/
cq->ibcq.cqe = entries;
cq->notify = IB_CQ_NONE;
cq->triggered = 0;
spin_lock_init(&cq->lock);
tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
cq->head = 0;
cq->tail = 0;
cq->queue = wc;
ret = &cq->ibcq;
bail:
return ret;
}
/**
* ipath_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy.
*
* Returns 0 for success.
*
* Called by ib_destroy_cq() in the generic verbs code.
*/
int ipath_destroy_cq(struct ib_cq *ibcq)
{
struct ipath_cq *cq = to_icq(ibcq);
tasklet_kill(&cq->comptask);
vfree(cq->queue);
kfree(cq);
return 0;
}
/**
* ipath_req_notify_cq - change the notification type for a completion queue
* @ibcq: the completion queue
* @notify: the type of notification to request
*
* Returns 0 for success.
*
* This may be called from interrupt context. Also called by
* ib_req_notify_cq() in the generic verbs code.
*/
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
{
struct ipath_cq *cq = to_icq(ibcq);
unsigned long flags;
spin_lock_irqsave(&cq->lock, flags);
/*
* Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
* any other transitions.
*/
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = notify;
spin_unlock_irqrestore(&cq->lock, flags);
return 0;
}
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct ipath_cq *cq = to_icq(ibcq);
struct ib_wc *wc, *old_wc;
u32 n;
int ret;
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
wc = vmalloc(sizeof(*wc) * (cqe + 1));
if (!wc) {
ret = -ENOMEM;
goto bail;
}
spin_lock_irq(&cq->lock);
if (cq->head < cq->tail)
n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
else
n = cq->head - cq->tail;
if (unlikely((u32)cqe < n)) {
spin_unlock_irq(&cq->lock);
vfree(wc);
ret = -EOVERFLOW;
goto bail;
}
for (n = 0; cq->tail != cq->head; n++) {
wc[n] = cq->queue[cq->tail];
if (cq->tail == cq->ibcq.cqe)
cq->tail = 0;
else
cq->tail++;
}
cq->ibcq.cqe = cqe;
cq->head = n;
cq->tail = 0;
old_wc = cq->queue;
cq->queue = wc;
spin_unlock_irq(&cq->lock);
vfree(old_wc);
ret = 0;
bail:
return ret;
}
/*
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _IPATH_DEBUG_H
#define _IPATH_DEBUG_H
#ifndef _IPATH_DEBUGGING /* debugging enabled or not */
#define _IPATH_DEBUGGING 1
#endif
#if _IPATH_DEBUGGING
/*
* Mask values for debugging. The scheme allows us to compile out any
* of the debug tracing stuff, and if compiled in, to enable or disable
* dynamically. This can be set at modprobe time also:
* modprobe infinipath.ko infinipath_debug=7
*/
#define __IPATH_INFO 0x1 /* generic low verbosity stuff */
#define __IPATH_DBG 0x2 /* generic debug */
#define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */
/* leave some low verbosity spots open */
#define __IPATH_VERBDBG 0x40 /* very verbose debug */
#define __IPATH_PKTDBG 0x80 /* print packet data */
/* print process startup (init)/exit messages */
#define __IPATH_PROCDBG 0x100
/* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x200
#define __IPATH_USER_SEND 0x1000 /* use user mode send */
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
#define __IPATH_SMADBG 0x8000 /* sma packet debug */
#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */
#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */
#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */
#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */
#else /* _IPATH_DEBUGGING */
/*
* define all of these even with debugging off, for the few places that do
* if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
* compiler eliminate the code
*/
#define __IPATH_INFO 0x0 /* generic low verbosity stuff */
#define __IPATH_DBG 0x0 /* generic debug */
#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
#define __IPATH_VERBDBG 0x0 /* very verbose debug */
#define __IPATH_PKTDBG 0x0 /* print packet data */
#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */
/* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
#endif /* _IPATH_DEBUGGING */
#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
#endif /* _IPATH_DEBUG_H */
/*
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file contains support for diagnostic functions. It is accessed by
* opening the ipath_diag device, normally minor number 129. Diagnostic use
* of the InfiniPath chip may render the chip or board unusable until the
* driver is unloaded, or in some cases, until the system is rebooted.
*
* Accesses to the chip through this interface are not similar to going
* through the /sys/bus/pci resource mmap interface.
*/
#include <linux/pci.h>
#include <asm/uaccess.h>
#include "ipath_common.h"
#include "ipath_kernel.h"
#include "ips_common.h"
#include "ipath_layer.h"
int ipath_diag_inuse;
static int diag_set_link;
static int ipath_diag_open(struct inode *in, struct file *fp);
static int ipath_diag_release(struct inode *in, struct file *fp);
static ssize_t ipath_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off);
static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off);
static struct file_operations diag_file_ops = {
.owner = THIS_MODULE,
.write = ipath_diag_write,
.read = ipath_diag_read,
.open = ipath_diag_open,
.release = ipath_diag_release
};
static struct cdev *diag_cdev;
static struct class_device *diag_class_dev;
int ipath_diag_init(void)
{
return ipath_cdev_init(IPATH_DIAG_MINOR, "ipath_diag",
&diag_file_ops, &diag_cdev, &diag_class_dev);
}
void ipath_diag_cleanup(void)
{
ipath_cdev_cleanup(&diag_cdev, &diag_class_dev);
}
/**
* ipath_read_umem64 - read a 64-bit quantity from the chip into user space
* @dd: the infinipath device
* @uaddr: the location to store the data in user memory
* @caddr: the source chip address (full pointer, not offset)
* @count: number of bytes to copy (multiple of 32 bits)
*
* This function also localizes all chip memory accesses.
* The copy should be written such that we read full cacheline packets
* from the chip. This is usually used for a single qword
*
* NOTE: This assumes the chip address is 64-bit aligned.
*/
static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
const void __iomem *caddr, size_t count)
{
const u64 __iomem *reg_addr = caddr;
const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
int ret;
/* not very efficient, but it works for now */
if (reg_addr < dd->ipath_kregbase ||
reg_end > dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
while (reg_addr < reg_end) {
u64 data = readq(reg_addr);
if (copy_to_user(uaddr, &data, sizeof(u64))) {
ret = -EFAULT;
goto bail;
}
reg_addr++;
uaddr++;
}
ret = 0;
bail:
return ret;
}
/**
* ipath_write_umem64 - write a 64-bit quantity to the chip from user space
* @dd: the infinipath device
* @caddr: the destination chip address (full pointer, not offset)
* @uaddr: the source of the data in user memory
* @count: the number of bytes to copy (multiple of 32 bits)
*
* This is usually used for a single qword
* NOTE: This assumes the chip address is 64-bit aligned.
*/
static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
const void __user *uaddr, size_t count)
{
u64 __iomem *reg_addr = caddr;
const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
int ret;
/* not very efficient, but it works for now */
if (reg_addr < dd->ipath_kregbase ||
reg_end > dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
while (reg_addr < reg_end) {
u64 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
writeq(data, reg_addr);
reg_addr++;
uaddr++;
}
ret = 0;
bail:
return ret;
}
/**
* ipath_read_umem32 - read a 32-bit quantity from the chip into user space
* @dd: the infinipath device
* @uaddr: the location to store the data in user memory
* @caddr: the source chip address (full pointer, not offset)
* @count: number of bytes to copy
*
* read 32 bit values, not 64 bit; for memories that only
* support 32 bit reads; usually a single dword.
*/
static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
const void __iomem *caddr, size_t count)
{
const u32 __iomem *reg_addr = caddr;
const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
int ret;
if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
reg_end > (u32 __iomem *) dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
/* not very efficient, but it works for now */
while (reg_addr < reg_end) {
u32 data = readl(reg_addr);
if (copy_to_user(uaddr, &data, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
reg_addr++;
uaddr++;
}
ret = 0;
bail:
return ret;
}
/**
* ipath_write_umem32 - write a 32-bit quantity to the chip from user space
* @dd: the infinipath device
* @caddr: the destination chip address (full pointer, not offset)
* @uaddr: the source of the data in user memory
* @count: number of bytes to copy
*
* write 32 bit values, not 64 bit; for memories that only
* support 32 bit write; usually a single dword.
*/
static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
const void __user *uaddr, size_t count)
{
u32 __iomem *reg_addr = caddr;
const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
int ret;
if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
reg_end > (u32 __iomem *) dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
while (reg_addr < reg_end) {
u32 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
writel(data, reg_addr);
reg_addr++;
uaddr++;
}
ret = 0;
bail:
return ret;
}
static int ipath_diag_open(struct inode *in, struct file *fp)
{
struct ipath_devdata *dd;
int unit = 0; /* XXX this is bogus */
unsigned long flags;
int ret;
dd = ipath_lookup(unit);
mutex_lock(&ipath_mutex);
spin_lock_irqsave(&ipath_devs_lock, flags);
if (ipath_diag_inuse) {
ret = -EBUSY;
goto bail;
}
list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
/*
* we need at least one infinipath device to be present
* (don't use INITTED, because we want to be able to open
* even if device is in freeze mode, which cleared INITTED).
* There is a small amount of risk to this, which is why we
* also verify kregbase is set.
*/
if (!(dd->ipath_flags & IPATH_PRESENT) ||
!dd->ipath_kregbase)
continue;
ipath_diag_inuse = 1;
diag_set_link = 0;
ret = 0;
goto bail;
}
ret = -ENODEV;
bail:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
mutex_unlock(&ipath_mutex);
/* Only expose a way to reset the device if we
make it into diag mode. */
if (ret == 0)
ipath_expose_reset(&dd->pcidev->dev);
return ret;
}
static int ipath_diag_release(struct inode *i, struct file *f)
{
mutex_lock(&ipath_mutex);
ipath_diag_inuse = 0;
mutex_unlock(&ipath_mutex);
return 0;
}
static ssize_t ipath_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off)
{
int unit = 0; /* XXX provide for reads on other units some day */
struct ipath_devdata *dd;
void __iomem *kreg_base;
ssize_t ret;
dd = ipath_lookup(unit);
if (!dd) {
ret = -ENODEV;
goto bail;
}
kreg_base = dd->ipath_kregbase;
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if ((count % 8) || (*off % 8))
/* address or length not 64-bit aligned; do 32-bit reads */
ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
else
ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
if (ret >= 0) {
*off += count;
ret = count;
}
bail:
return ret;
}
static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off)
{
int unit = 0; /* XXX this is bogus */
struct ipath_devdata *dd;
void __iomem *kreg_base;
ssize_t ret;
dd = ipath_lookup(unit);
if (!dd) {
ret = -ENODEV;
goto bail;
}
kreg_base = dd->ipath_kregbase;
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if ((count % 8) || (*off % 8))
/* address or length not 64-bit aligned; do 32-bit writes */
ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
else
ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
if (ret >= 0) {
*off += count;
ret = count;
}
bail:
return ret;
}
void ipath_diag_bringup_link(struct ipath_devdata *dd)
{
if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE))
return;
diag_set_link = 1;
ipath_cdbg(VERBOSE, "Trying to set to set link active for "
"diag pkt\n");
ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM);
ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE);
}
此差异已折叠。
/*
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include "ipath_kernel.h"
/*
* InfiniPath I2C driver for a serial eeprom. This is not a generic
* I2C interface. For a start, the device we're using (Atmel AT24C11)
* doesn't work like a regular I2C device. It looks like one
* electrically, but not logically. Normal I2C devices have a single
* 7-bit or 10-bit I2C address that they respond to. Valid 7-bit
* addresses range from 0x03 to 0x77. Addresses 0x00 to 0x02 and 0x78
* to 0x7F are special reserved addresses (e.g. 0x00 is the "general
* call" address.) The Atmel device, on the other hand, responds to ALL
* 7-bit addresses. It's designed to be the only device on a given I2C
* bus. A 7-bit address corresponds to the memory address within the
* Atmel device itself.
*
* Also, the timing requirements mean more than simple software
* bitbanging, with readbacks from chip to ensure timing (simple udelay
* is not enough).
*
* This all means that accessing the device is specialized enough
* that using the standard kernel I2C bitbanging interface would be
* impossible. For example, the core I2C eeprom driver expects to find
* a device at one or more of a limited set of addresses only. It doesn't
* allow writing to an eeprom. It also doesn't provide any means of
* accessing eeprom contents from within the kernel, only via sysfs.
*/
enum i2c_type {
i2c_line_scl = 0,
i2c_line_sda
};
enum i2c_state {
i2c_line_low = 0,
i2c_line_high
};
#define READ_CMD 1
#define WRITE_CMD 0
static int eeprom_init;
/*
* The gpioval manipulation really should be protected by spinlocks
* or be converted to use atomic operations.
*/
/**
* i2c_gpio_set - set a GPIO line
* @dd: the infinipath device
* @line: the line to set
* @new_line_state: the state to set
*
* Returns 0 if the line was set to the new state successfully, non-zero
* on error.
*/
static int i2c_gpio_set(struct ipath_devdata *dd,
enum i2c_type line,
enum i2c_state new_line_state)
{
u64 read_val, write_val, mask, *gpioval;
gpioval = &dd->ipath_gpio_out;
read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
if (line == i2c_line_scl)
mask = ipath_gpio_scl;
else
mask = ipath_gpio_sda;
if (new_line_state == i2c_line_high)
/* tri-state the output rather than force high */
write_val = read_val & ~mask;
else
/* config line to be an output */
write_val = read_val | mask;
ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
/* set high and verify */
if (new_line_state == i2c_line_high)
write_val = 0x1UL;
else
write_val = 0x0UL;
if (line == i2c_line_scl) {
write_val <<= ipath_gpio_scl_num;
*gpioval = *gpioval & ~(1UL << ipath_gpio_scl_num);
*gpioval |= write_val;
} else {
write_val <<= ipath_gpio_sda_num;
*gpioval = *gpioval & ~(1UL << ipath_gpio_sda_num);
*gpioval |= write_val;
}
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
return 0;
}
/**
* i2c_gpio_get - get a GPIO line state
* @dd: the infinipath device
* @line: the line to get
* @curr_statep: where to put the line state
*
* Returns 0 if the line was set to the new state successfully, non-zero
* on error. curr_state is not set on error.
*/
static int i2c_gpio_get(struct ipath_devdata *dd,
enum i2c_type line,
enum i2c_state *curr_statep)
{
u64 read_val, write_val, mask;
int ret;
/* check args */
if (curr_statep == NULL) {
ret = 1;
goto bail;
}
read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
/* config line to be an input */
if (line == i2c_line_scl)
mask = ipath_gpio_scl;
else
mask = ipath_gpio_sda;
write_val = read_val & ~mask;
ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
if (read_val & mask)
*curr_statep = i2c_line_high;
else
*curr_statep = i2c_line_low;
ret = 0;
bail:
return ret;
}
/**
* i2c_wait_for_writes - wait for a write
* @dd: the infinipath device
*
* We use this instead of udelay directly, so we can make sure
* that previous register writes have been flushed all the way
* to the chip. Since we are delaying anyway, the cost doesn't
* hurt, and makes the bit twiddling more regular
*/
static void i2c_wait_for_writes(struct ipath_devdata *dd)
{
(void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
}
static void scl_out(struct ipath_devdata *dd, u8 bit)
{
i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
i2c_wait_for_writes(dd);
}
static void sda_out(struct ipath_devdata *dd, u8 bit)
{
i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
i2c_wait_for_writes(dd);
}
static u8 sda_in(struct ipath_devdata *dd, int wait)
{
enum i2c_state bit;
if (i2c_gpio_get(dd, i2c_line_sda, &bit))
ipath_dbg("get bit failed!\n");
if (wait)
i2c_wait_for_writes(dd);
return bit == i2c_line_high ? 1U : 0;
}
/**
* i2c_ackrcv - see if ack following write is true
* @dd: the infinipath device
*/
static int i2c_ackrcv(struct ipath_devdata *dd)
{
u8 ack_received;
/* AT ENTRY SCL = LOW */
/* change direction, ignore data */
ack_received = sda_in(dd, 1);
scl_out(dd, i2c_line_high);
ack_received = sda_in(dd, 1) == 0;
scl_out(dd, i2c_line_low);
return ack_received;
}
/**
* wr_byte - write a byte, one bit at a time
* @dd: the infinipath device
* @data: the byte to write
*
* Returns 0 if we got the following ack, otherwise 1
*/
static int wr_byte(struct ipath_devdata *dd, u8 data)
{
int bit_cntr;
u8 bit;
for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
bit = (data >> bit_cntr) & 1;
sda_out(dd, bit);
scl_out(dd, i2c_line_high);
scl_out(dd, i2c_line_low);
}
return (!i2c_ackrcv(dd)) ? 1 : 0;
}
static void send_ack(struct ipath_devdata *dd)
{
sda_out(dd, i2c_line_low);
scl_out(dd, i2c_line_high);
scl_out(dd, i2c_line_low);
sda_out(dd, i2c_line_high);
}
/**
* i2c_startcmd - transmit the start condition, followed by address/cmd
* @dd: the infinipath device
* @offset_dir: direction byte
*
* (both clock/data high, clock high, data low while clock is high)
*/
static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
{
int res;
/* issue start sequence */
sda_out(dd, i2c_line_high);
scl_out(dd, i2c_line_high);
sda_out(dd, i2c_line_low);
scl_out(dd, i2c_line_low);
/* issue length and direction byte */
res = wr_byte(dd, offset_dir);
if (res)
ipath_cdbg(VERBOSE, "No ack to complete start\n");
return res;
}
/**
* stop_cmd - transmit the stop condition
* @dd: the infinipath device
*
* (both clock/data low, clock high, data high while clock is high)
*/
static void stop_cmd(struct ipath_devdata *dd)
{
scl_out(dd, i2c_line_low);
sda_out(dd, i2c_line_low);
scl_out(dd, i2c_line_high);
sda_out(dd, i2c_line_high);
udelay(2);
}
/**
* eeprom_reset - reset I2C communication
* @dd: the infinipath device
*/
static int eeprom_reset(struct ipath_devdata *dd)
{
int clock_cycles_left = 9;
u64 *gpioval = &dd->ipath_gpio_out;
int ret;
eeprom_init = 1;
*gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
"is %llx\n", (unsigned long long) *gpioval);
/*
* This is to get the i2c into a known state, by first going low,
* then tristate sda (and then tristate scl as first thing
* in loop)
*/
scl_out(dd, i2c_line_low);
sda_out(dd, i2c_line_high);
while (clock_cycles_left--) {
scl_out(dd, i2c_line_high);
if (sda_in(dd, 0)) {
sda_out(dd, i2c_line_low);
scl_out(dd, i2c_line_low);
ret = 0;
goto bail;
}
scl_out(dd, i2c_line_low);
}
ret = 1;
bail:
return ret;
}
/**
* ipath_eeprom_read - receives bytes from the eeprom via I2C
* @dd: the infinipath device
* @eeprom_offset: address to read from
* @buffer: where to store result
* @len: number of bytes to receive
*/
int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
void *buffer, int len)
{
/* compiler complains unless initialized */
u8 single_byte = 0;
int bit_cntr;
int ret;
if (!eeprom_init)
eeprom_reset(dd);
eeprom_offset = (eeprom_offset << 1) | READ_CMD;
if (i2c_startcmd(dd, eeprom_offset)) {
ipath_dbg("Failed startcmd\n");
stop_cmd(dd);
ret = 1;
goto bail;
}
/*
* eeprom keeps clocking data out as long as we ack, automatically
* incrementing the address.
*/
while (len-- > 0) {
/* get data */
single_byte = 0;
for (bit_cntr = 8; bit_cntr; bit_cntr--) {
u8 bit;
scl_out(dd, i2c_line_high);
bit = sda_in(dd, 0);
single_byte |= bit << (bit_cntr - 1);
scl_out(dd, i2c_line_low);
}
/* send ack if not the last byte */
if (len)
send_ack(dd);
*((u8 *) buffer) = single_byte;
buffer++;
}
stop_cmd(dd);
ret = 0;
bail:
return ret;
}
/**
* ipath_eeprom_write - writes data to the eeprom via I2C
* @dd: the infinipath device
* @eeprom_offset: where to place data
* @buffer: data to write
* @len: number of bytes to write
*/
int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
const void *buffer, int len)
{
u8 single_byte;
int sub_len;
const u8 *bp = buffer;
int max_wait_time, i;
int ret;
if (!eeprom_init)
eeprom_reset(dd);
while (len > 0) {
if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) {
ipath_dbg("Failed to start cmd offset %u\n",
eeprom_offset);
goto failed_write;
}
sub_len = min(len, 4);
eeprom_offset += sub_len;
len -= sub_len;
for (i = 0; i < sub_len; i++) {
if (wr_byte(dd, *bp++)) {
ipath_dbg("no ack after byte %u/%u (%u "
"total remain)\n", i, sub_len,
len + sub_len - i);
goto failed_write;
}
}
stop_cmd(dd);
/*
* wait for write complete by waiting for a successful
* read (the chip replies with a zero after the write
* cmd completes, and before it writes to the eeprom.
* The startcmd for the read will fail the ack until
* the writes have completed. We do this inline to avoid
* the debug prints that are in the real read routine
* if the startcmd fails.
*/
max_wait_time = 100;
while (i2c_startcmd(dd, READ_CMD)) {
stop_cmd(dd);
if (!--max_wait_time) {
ipath_dbg("Did not get successful read to "
"complete write\n");
goto failed_write;
}
}
/* now read the zero byte */
for (i = single_byte = 0; i < 8; i++) {
u8 bit;
scl_out(dd, i2c_line_high);
bit = sda_in(dd, 0);
scl_out(dd, i2c_line_low);
single_byte <<= 1;
single_byte |= bit;
}
stop_cmd(dd);
}
ret = 0;
goto bail;
failed_write:
stop_cmd(dd);
ret = 1;
bail:
return ret;
}
static u8 flash_csum(struct ipath_flash *ifp, int adjust)
{
u8 *ip = (u8 *) ifp;
u8 csum = 0, len;
for (len = 0; len < ifp->if_length; len++)
csum += *ip++;
csum -= ifp->if_csum;
csum = ~csum;
if (adjust)
ifp->if_csum = csum;
return csum;
}
/**
* ipath_get_guid - get the GUID from the i2c device
* @dd: the infinipath device
*
* When we add the multi-chip support, we will probably have to add
* the ability to use the number of guids field, and get the guid from
* the first chip's flash, to use for all of them.
*/
void ipath_get_guid(struct ipath_devdata *dd)
{
void *buf;
struct ipath_flash *ifp;
__be64 guid;
int len;
u8 csum, *bguid;
int t = dd->ipath_unit;
struct ipath_devdata *dd0 = ipath_lookup(0);
if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
u8 *bguid, oguid;
dd->ipath_guid = dd0->ipath_guid;
bguid = (u8 *) & dd->ipath_guid;
oguid = bguid[7];
bguid[7] += t;
if (oguid > bguid[7]) {
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
ipath_dev_err(
dd,
"Can't set %s GUID from "
"base, wraps to OUI!\n",
ipath_get_unit_name(t));
dd->ipath_guid = 0;
goto bail;
}
bguid[5]++;
}
bguid[6]++;
}
dd->ipath_nguid = 1;
ipath_dbg("nguid %u, so adding %u to device 0 guid, "
"for %llx\n",
dd0->ipath_nguid, t,
(unsigned long long) be64_to_cpu(dd->ipath_guid));
goto bail;
}
len = offsetof(struct ipath_flash, if_future);
buf = vmalloc(len);
if (!buf) {
ipath_dev_err(dd, "Couldn't allocate memory to read %u "
"bytes from eeprom for GUID\n", len);
goto bail;
}
if (ipath_eeprom_read(dd, 0, buf, len)) {
ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
goto done;
}
ifp = (struct ipath_flash *)buf;
csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) {
dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
"0x%x, not 0x%x\n", csum, ifp->if_csum);
goto done;
}
if (*(__be64 *) ifp->if_guid == 0ULL ||
*(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) {
ipath_dev_err(dd, "Invalid GUID %llx from flash; "
"ignoring\n",
*(unsigned long long *) ifp->if_guid);
/* don't allow GUID if all 0 or all 1's */
goto done;
}
/* complain, but allow it */
if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
"default, probably not correct!\n",
*(unsigned long long *) ifp->if_guid);
bguid = ifp->if_guid;
if (!bguid[0] && !bguid[1] && !bguid[2]) {
/* original incorrect GUID format in flash; fix in
* core copy, by shifting up 2 octets; don't need to
* change top octet, since both it and shifted are
* 0.. */
bguid[1] = bguid[3];
bguid[2] = bguid[4];
bguid[3] = bguid[4] = 0;
guid = *(__be64 *) ifp->if_guid;
ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
"shifting 2 octets\n");
} else
guid = *(__be64 *) ifp->if_guid;
dd->ipath_guid = guid;
dd->ipath_nguid = ifp->if_numguid;
memcpy(dd->ipath_serial, ifp->if_serial,
sizeof(ifp->if_serial));
ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
(unsigned long long) be64_to_cpu(dd->ipath_guid));
done:
vfree(buf);
bail:;
}
此差异已折叠。
/*
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/version.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/pci.h>
#include "ipath_kernel.h"
#define IPATHFS_MAGIC 0x726a77
static struct super_block *ipath_super;
static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
int mode, struct file_operations *fops,
void *data)
{
int error;
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
error = -EPERM;
goto bail;
}
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->u.generic_ip = data;
if ((mode & S_IFMT) == S_IFDIR) {
inode->i_op = &simple_dir_inode_operations;
inode->i_nlink++;
dir->i_nlink++;
}
inode->i_fop = fops;
d_instantiate(dentry, inode);
error = 0;
bail:
return error;
}
static int create_file(const char *name, mode_t mode,
struct dentry *parent, struct dentry **dentry,
struct file_operations *fops, void *data)
{
int error;
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry))
error = ipathfs_mknod(parent->d_inode, *dentry,
mode, fops, data);
else
error = PTR_ERR(dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
static ssize_t atomic_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
sizeof ipath_stats);
}
static struct file_operations atomic_stats_ops = {
.read = atomic_stats_read,
};
#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
static ssize_t atomic_counters_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
u64 counters[NUM_COUNTERS];
u16 i;
struct ipath_devdata *dd;
dd = file->f_dentry->d_inode->u.generic_ip;
for (i = 0; i < NUM_COUNTERS; i++)
counters[i] = ipath_snap_cntr(dd, i);
return simple_read_from_buffer(buf, count, ppos, counters,
sizeof counters);
}
static struct file_operations atomic_counters_ops = {
.read = atomic_counters_read,
};
static ssize_t atomic_node_info_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
u32 nodeinfo[10];
struct ipath_devdata *dd;
u64 guid;
dd = file->f_dentry->d_inode->u.generic_ip;
guid = be64_to_cpu(dd->ipath_guid);
nodeinfo[0] = /* BaseVersion is SMA */
/* ClassVersion is SMA */
(1 << 8) /* NodeType */
| (1 << 0); /* NumPorts */
nodeinfo[1] = (u32) (guid >> 32);
nodeinfo[2] = (u32) (guid & 0xffffffff);
/* PortGUID == SystemImageGUID for us */
nodeinfo[3] = nodeinfo[1];
/* PortGUID == SystemImageGUID for us */
nodeinfo[4] = nodeinfo[2];
/* PortGUID == NodeGUID for us */
nodeinfo[5] = nodeinfo[3];
/* PortGUID == NodeGUID for us */
nodeinfo[6] = nodeinfo[4];
nodeinfo[7] = (4 << 16) /* we support 4 pkeys */
| (dd->ipath_deviceid << 0);
/* our chip version as 16 bits major, 16 bits minor */
nodeinfo[8] = dd->ipath_minrev | (dd->ipath_majrev << 16);
nodeinfo[9] = (dd->ipath_unit << 24) | (dd->ipath_vendorid << 0);
return simple_read_from_buffer(buf, count, ppos, nodeinfo,
sizeof nodeinfo);
}
static struct file_operations atomic_node_info_ops = {
.read = atomic_node_info_read,
};
static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
u32 portinfo[13];
u32 tmp, tmp2;
struct ipath_devdata *dd;
dd = file->f_dentry->d_inode->u.generic_ip;
/* so we only initialize non-zero fields. */
memset(portinfo, 0, sizeof portinfo);
/*
* Notimpl yet M_Key (64)
* Notimpl yet GID (64)
*/
portinfo[4] = (dd->ipath_lid << 16);
/*
* Notimpl yet SMLID (should we store this in the driver, in case
* SMA dies?) CapabilityMask is 0, we don't support any of these
* DiagCode is 0; we don't store any diag info for now Notimpl yet
* M_KeyLeasePeriod (we don't support M_Key)
*/
/* LocalPortNum is whichever port number they ask for */
portinfo[7] = (dd->ipath_unit << 24)
/* LinkWidthEnabled */
| (2 << 16)
/* LinkWidthSupported (really 2, but not IB valid) */
| (3 << 8)
/* LinkWidthActive */
| (2 << 0);
tmp = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
tmp2 = 5;
if (tmp == IPATH_IBSTATE_INIT)
tmp = 2;
else if (tmp == IPATH_IBSTATE_ARM)
tmp = 3;
else if (tmp == IPATH_IBSTATE_ACTIVE)
tmp = 4;
else {
tmp = 0; /* down */
tmp2 = tmp & 0xf;
}
portinfo[8] = (1 << 28) /* LinkSpeedSupported */
| (tmp << 24) /* PortState */
| (tmp2 << 20) /* PortPhysicalState */
| (2 << 16)
/* LinkDownDefaultState */
/* M_KeyProtectBits == 0 */
/* NotImpl yet LMC == 0 (we can support all values) */
| (1 << 4) /* LinkSpeedActive */
| (1 << 0); /* LinkSpeedEnabled */
switch (dd->ipath_ibmtu) {
case 4096:
tmp = 5;
break;
case 2048:
tmp = 4;
break;
case 1024:
tmp = 3;
break;
case 512:
tmp = 2;
break;
case 256:
tmp = 1;
break;
default: /* oops, something is wrong */
ipath_dbg("Problem, ipath_ibmtu 0x%x not a valid IB MTU, "
"treat as 2048\n", dd->ipath_ibmtu);
tmp = 4;
break;
}
portinfo[9] = (tmp << 28)
/* NeighborMTU */
/* Notimpl MasterSMSL */
| (1 << 20)
/* VLCap */
/* Notimpl InitType (actually, an SMA decision) */
/* VLHighLimit is 0 (only one VL) */
; /* VLArbitrationHighCap is 0 (only one VL) */
portinfo[10] = /* VLArbitrationLowCap is 0 (only one VL) */
/* InitTypeReply is SMA decision */
(5 << 16) /* MTUCap 4096 */
| (7 << 13) /* VLStallCount */
| (0x1f << 8) /* HOQLife */
| (1 << 4)
/* OperationalVLs 0 */
/* PartitionEnforcementInbound */
/* PartitionEnforcementOutbound not enforced */
/* FilterRawinbound not enforced */
; /* FilterRawOutbound not enforced */
/* M_KeyViolations are not counted by hardware, SMA can count */
tmp = ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
/* P_KeyViolations are counted by hardware. */
portinfo[11] = ((tmp & 0xffff) << 0);
portinfo[12] =
/* Q_KeyViolations are not counted by hardware */
(1 << 8)
/* GUIDCap */
/* SubnetTimeOut handled by SMA */
/* RespTimeValue handled by SMA */
;
/* LocalPhyErrors are programmed to max */
portinfo[12] |= (0xf << 20)
| (0xf << 16) /* OverRunErrors are programmed to max */
;
return simple_read_from_buffer(buf, count, ppos, portinfo,
sizeof portinfo);
}
static struct file_operations atomic_port_info_ops = {
.read = atomic_port_info_read,
};
static ssize_t flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct ipath_devdata *dd;
ssize_t ret;
loff_t pos;
char *tmp;
pos = *ppos;
if ( pos < 0) {
ret = -EINVAL;
goto bail;
}
if (pos >= sizeof(struct ipath_flash)) {
ret = 0;
goto bail;
}
if (count > sizeof(struct ipath_flash) - pos)
count = sizeof(struct ipath_flash) - pos;
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto bail;
}
dd = file->f_dentry->d_inode->u.generic_ip;
if (ipath_eeprom_read(dd, pos, tmp, count)) {
ipath_dev_err(dd, "failed to read from flash\n");
ret = -ENXIO;
goto bail_tmp;
}
if (copy_to_user(buf, tmp, count)) {
ret = -EFAULT;
goto bail_tmp;
}
*ppos = pos + count;
ret = count;
bail_tmp:
kfree(tmp);
bail:
return ret;
}
static ssize_t flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ipath_devdata *dd;
ssize_t ret;
loff_t pos;
char *tmp;
pos = *ppos;
if ( pos < 0) {
ret = -EINVAL;
goto bail;
}
if (pos >= sizeof(struct ipath_flash)) {
ret = 0;
goto bail;
}
if (count > sizeof(struct ipath_flash) - pos)
count = sizeof(struct ipath_flash) - pos;
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto bail;
}
if (copy_from_user(tmp, buf, count)) {
ret = -EFAULT;
goto bail_tmp;
}
dd = file->f_dentry->d_inode->u.generic_ip;
if (ipath_eeprom_write(dd, pos, tmp, count)) {
ret = -ENXIO;
ipath_dev_err(dd, "failed to write to flash\n");
goto bail_tmp;
}
*ppos = pos + count;
ret = count;
bail_tmp:
kfree(tmp);
bail:
return ret;
}
static struct file_operations flash_ops = {
.read = flash_read,
.write = flash_write,
};
static int create_device_files(struct super_block *sb,
struct ipath_devdata *dd)
{
struct dentry *dir, *tmp;
char unit[10];
int ret;
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
(struct file_operations *) &simple_dir_operations,
dd);
if (ret) {
printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
goto bail;
}
ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
&atomic_counters_ops, dd);
if (ret) {
printk(KERN_ERR "create_file(%s/atomic_counters) "
"failed: %d\n", unit, ret);
goto bail;
}
ret = create_file("node_info", S_IFREG|S_IRUGO, dir, &tmp,
&atomic_node_info_ops, dd);
if (ret) {
printk(KERN_ERR "create_file(%s/node_info) "
"failed: %d\n", unit, ret);
goto bail;
}
ret = create_file("port_info", S_IFREG|S_IRUGO, dir, &tmp,
&atomic_port_info_ops, dd);
if (ret) {
printk(KERN_ERR "create_file(%s/port_info) "
"failed: %d\n", unit, ret);
goto bail;
}
ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
&flash_ops, dd);
if (ret) {
printk(KERN_ERR "create_file(%s/flash) "
"failed: %d\n", unit, ret);
goto bail;
}
bail:
return ret;
}
static void remove_file(struct dentry *parent, char *name)
{
struct dentry *tmp;
tmp = lookup_one_len(name, parent, strlen(name));
spin_lock(&dcache_lock);
spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
dget_locked(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_lock);
simple_unlink(parent->d_inode, tmp);
} else {
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_lock);
}
}
static int remove_device_files(struct super_block *sb,
struct ipath_devdata *dd)
{
struct dentry *dir, *root;
char unit[10];
int ret;
root = dget(sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
dir = lookup_one_len(unit, root, strlen(unit));
if (IS_ERR(dir)) {
ret = PTR_ERR(dir);
printk(KERN_ERR "Lookup of %s failed\n", unit);
goto bail;
}
remove_file(dir, "flash");
remove_file(dir, "port_info");
remove_file(dir, "node_info");
remove_file(dir, "atomic_counters");
d_delete(dir);
ret = simple_rmdir(root->d_inode, dir);
bail:
mutex_unlock(&root->d_inode->i_mutex);
dput(root);
return ret;
}
static int ipathfs_fill_super(struct super_block *sb, void *data,
int silent)
{
struct ipath_devdata *dd, *tmp;
unsigned long flags;
int ret;
static struct tree_descr files[] = {
[1] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
{""},
};
ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
if (ret) {
printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
goto bail;
}
spin_lock_irqsave(&ipath_devs_lock, flags);
list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
spin_unlock_irqrestore(&ipath_devs_lock, flags);
ret = create_device_files(sb, dd);
if (ret) {
deactivate_super(sb);
goto bail;
}
spin_lock_irqsave(&ipath_devs_lock, flags);
}
spin_unlock_irqrestore(&ipath_devs_lock, flags);
bail:
return ret;
}
static struct super_block *ipathfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
ipath_super = get_sb_single(fs_type, flags, data,
ipathfs_fill_super);
return ipath_super;
}
static void ipathfs_kill_super(struct super_block *s)
{
kill_litter_super(s);
ipath_super = NULL;
}
int ipathfs_add_device(struct ipath_devdata *dd)
{
int ret;
if (ipath_super == NULL) {
ret = 0;
goto bail;
}
ret = create_device_files(ipath_super, dd);
bail:
return ret;
}
int ipathfs_remove_device(struct ipath_devdata *dd)
{
int ret;
if (ipath_super == NULL) {
ret = 0;
goto bail;
}
ret = remove_device_files(ipath_super, dd);
bail:
return ret;
}
static struct file_system_type ipathfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
.get_sb = ipathfs_get_sb,
.kill_sb = ipathfs_kill_super,
};
int __init ipath_init_ipathfs(void)
{
return register_filesystem(&ipathfs_fs_type);
}
void __exit ipath_exit_ipathfs(void)
{
unregister_filesystem(&ipathfs_fs_type);
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <asm/io.h>
#include "ipath_verbs.h"
/**
* ipath_alloc_lkey - allocate an lkey
* @rkt: lkey table in which to allocate the lkey
* @mr: memory region that this lkey protects
*
* Returns 1 if successful, otherwise returns 0.
*/
int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
{
unsigned long flags;
u32 r;
u32 n;
int ret;
spin_lock_irqsave(&rkt->lock, flags);
/* Find the next available LKEY */
r = n = rkt->next;
for (;;) {
if (rkt->table[r] == NULL)
break;
r = (r + 1) & (rkt->max - 1);
if (r == n) {
spin_unlock_irqrestore(&rkt->lock, flags);
_VERBS_INFO("LKEY table full\n");
ret = 0;
goto bail;
}
}
rkt->next = (r + 1) & (rkt->max - 1);
/*
* Make sure lkey is never zero which is reserved to indicate an
* unrestricted LKEY.
*/
rkt->gen++;
mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
<< 8);
if (mr->lkey == 0) {
mr->lkey |= 1 << 8;
rkt->gen++;
}
rkt->table[r] = mr;
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 1;
bail:
return ret;
}
/**
* ipath_free_lkey - free an lkey
* @rkt: table from which to free the lkey
* @lkey: lkey id to free
*/
void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
{
unsigned long flags;
u32 r;
if (lkey == 0)
return;
r = lkey >> (32 - ib_ipath_lkey_table_size);
spin_lock_irqsave(&rkt->lock, flags);
rkt->table[r] = NULL;
spin_unlock_irqrestore(&rkt->lock, flags);
}
/**
* ipath_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against
* @isge: outgoing internal SGE
* @sge: SGE to check
* @acc: access flags
*
* Return 1 if valid and successful, otherwise returns 0.
*
* Check the IB SGE for validity and initialize our internal version
* of it.
*/
int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
struct ib_sge *sge, int acc)
{
struct ipath_mregion *mr;
size_t off;
int ret;
/*
* We use LKEY == zero to mean a physical kmalloc() address.
* This is a bit of a hack since we rely on dma_map_single()
* being reversible by calling bus_to_virt().
*/
if (sge->lkey == 0) {
isge->mr = NULL;
isge->vaddr = bus_to_virt(sge->addr);
isge->length = sge->length;
isge->sge_length = sge->length;
ret = 1;
goto bail;
}
spin_lock(&rkt->lock);
mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
spin_unlock(&rkt->lock);
if (unlikely(mr == NULL || mr->lkey != sge->lkey)) {
ret = 0;
goto bail;
}
off = sge->addr - mr->user_base;
if (unlikely(sge->addr < mr->user_base ||
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc)) {
ret = 0;
goto bail;
}
off += mr->offset;
isge->mr = mr;
isge->m = 0;
isge->n = 0;
while (off >= mr->map[isge->m]->segs[isge->n].length) {
off -= mr->map[isge->m]->segs[isge->n].length;
isge->n++;
if (isge->n >= IPATH_SEGSZ) {
isge->m++;
isge->n = 0;
}
}
isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off;
isge->length = mr->map[isge->m]->segs[isge->n].length - off;
isge->sge_length = sge->length;
ret = 1;
bail:
return ret;
}
/**
* ipath_rkey_ok - check the IB virtual address, length, and RKEY
* @dev: infiniband device
* @ss: SGE state
* @len: length of data
* @vaddr: virtual address to place data
* @rkey: rkey to check
* @acc: access flags
*
* Return 1 if successful, otherwise 0.
*
* The QP r_rq.lock should be held.
*/
int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
u32 len, u64 vaddr, u32 rkey, int acc)
{
struct ipath_lkey_table *rkt = &dev->lk_table;
struct ipath_sge *sge = &ss->sge;
struct ipath_mregion *mr;
size_t off;
int ret;
spin_lock(&rkt->lock);
mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
spin_unlock(&rkt->lock);
if (unlikely(mr == NULL || mr->lkey != rkey)) {
ret = 0;
goto bail;
}
off = vaddr - mr->iova;
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0)) {
ret = 0;
goto bail;
}
off += mr->offset;
sge->mr = mr;
sge->m = 0;
sge->n = 0;
while (off >= mr->map[sge->m]->segs[sge->n].length) {
off -= mr->map[sge->m]->segs[sge->n].length;
sge->n++;
if (sge->n >= IPATH_SEGSZ) {
sge->m++;
sge->n = 0;
}
}
sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off;
sge->length = mr->map[sge->m]->segs[sge->n].length - off;
sge->sge_length = len;
ss->sg_list = NULL;
ss->num_sge = 1;
ret = 1;
bail:
return ret;
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册