提交 0625bef6 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB: Increase DMA max_segment_size on Mellanox hardware
  IB/mad: Improve an error message so error code is included
  RDMA/nes: Don't print success message at level KERN_ERR
  RDMA/addr: Fix return of uninitialized ret value
  IB/srp: try to use larger FMR sizes to cover our mappings
  IB/srp: add support for indirect tables that don't fit in SRP_CMD
  IB/srp: rework mapping engine to use multiple FMR entries
  IB/srp: allow sg_tablesize to be set for each target
  IB/srp: move IB CM setup completion into its own function
  IB/srp: always avoid non-zero offsets into an FMR
...@@ -204,7 +204,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, ...@@ -204,7 +204,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
/* If the device does ARP internally, return 'done' */ /* If the device does ARP internally, return 'done' */
if (rt->dst.dev->flags & IFF_NOARP) { if (rt->dst.dev->flags & IFF_NOARP) {
rdma_copy_addr(addr, rt->dst.dev, NULL); ret = rdma_copy_addr(addr, rt->dst.dev, NULL);
goto put; goto put;
} }
......
...@@ -101,7 +101,8 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, ...@@ -101,7 +101,8 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
agent = port_priv->agent[qpn]; agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) { if (IS_ERR(ah)) {
printk(KERN_ERR SPFX "ib_create_ah_from_wc error\n"); printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n",
PTR_ERR(ah));
return; return;
} }
......
...@@ -1043,6 +1043,9 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) ...@@ -1043,6 +1043,9 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
} }
} }
/* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
if (!mdev) { if (!mdev) {
dev_err(&pdev->dev, "Device struct alloc failed, " dev_err(&pdev->dev, "Device struct alloc failed, "
......
...@@ -694,7 +694,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i ...@@ -694,7 +694,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
nesdev->netdev_count++; nesdev->netdev_count++;
nesdev->nesadapter->netdev_count++; nesdev->nesadapter->netdev_count++;
printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", printk(KERN_INFO PFX "%s: NetEffect RNIC driver successfully loaded.\n",
pci_name(pcidev)); pci_name(pcidev));
return 0; return 0;
......
此差异已折叠。
...@@ -69,9 +69,13 @@ enum { ...@@ -69,9 +69,13 @@ enum {
SRP_TAG_NO_REQ = ~0U, SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = 1U << 31, SRP_TAG_TSK_MGMT = 1U << 31,
SRP_FMR_SIZE = 256, SRP_FMR_SIZE = 512,
SRP_FMR_MIN_SIZE = 128,
SRP_FMR_POOL_SIZE = 1024, SRP_FMR_POOL_SIZE = 1024,
SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4,
SRP_MAP_ALLOW_FMR = 0,
SRP_MAP_NO_FMR = 1,
}; };
enum srp_target_state { enum srp_target_state {
...@@ -93,9 +97,9 @@ struct srp_device { ...@@ -93,9 +97,9 @@ struct srp_device {
struct ib_pd *pd; struct ib_pd *pd;
struct ib_mr *mr; struct ib_mr *mr;
struct ib_fmr_pool *fmr_pool; struct ib_fmr_pool *fmr_pool;
int fmr_page_shift;
int fmr_page_size;
u64 fmr_page_mask; u64 fmr_page_mask;
int fmr_page_size;
int fmr_max_size;
}; };
struct srp_host { struct srp_host {
...@@ -112,7 +116,11 @@ struct srp_request { ...@@ -112,7 +116,11 @@ struct srp_request {
struct list_head list; struct list_head list;
struct scsi_cmnd *scmnd; struct scsi_cmnd *scmnd;
struct srp_iu *cmd; struct srp_iu *cmd;
struct ib_pool_fmr *fmr; struct ib_pool_fmr **fmr_list;
u64 *map_page;
struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr;
short nfmr;
short index; short index;
}; };
...@@ -130,6 +138,10 @@ struct srp_target_port { ...@@ -130,6 +138,10 @@ struct srp_target_port {
u32 lkey; u32 lkey;
u32 rkey; u32 rkey;
enum srp_target_state state; enum srp_target_state state;
unsigned int max_iu_len;
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
/* Everything above this point is used in the hot path of /* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines. * command processing. Try to keep them packed into cachelines.
...@@ -144,6 +156,7 @@ struct srp_target_port { ...@@ -144,6 +156,7 @@ struct srp_target_port {
struct Scsi_Host *scsi_host; struct Scsi_Host *scsi_host;
char target_name[32]; char target_name[32];
unsigned int scsi_id; unsigned int scsi_id;
unsigned int sg_tablesize;
struct ib_sa_path_rec path; struct ib_sa_path_rec path;
__be16 orig_dgid[8]; __be16 orig_dgid[8];
...@@ -179,4 +192,19 @@ struct srp_iu { ...@@ -179,4 +192,19 @@ struct srp_iu {
enum dma_data_direction direction; enum dma_data_direction direction;
}; };
struct srp_map_state {
struct ib_pool_fmr **next_fmr;
struct srp_direct_buf *desc;
u64 *pages;
dma_addr_t base_dma_addr;
u32 fmr_len;
u32 total_len;
unsigned int npages;
unsigned int nfmr;
unsigned int ndesc;
struct scatterlist *unmapped_sg;
int unmapped_index;
dma_addr_t unmapped_addr;
};
#endif /* IB_SRP_H */ #endif /* IB_SRP_H */
...@@ -1109,6 +1109,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1109,6 +1109,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
} }
} }
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
priv = kzalloc(sizeof *priv, GFP_KERNEL); priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv) { if (!priv) {
dev_err(&pdev->dev, "Device struct alloc failed, " dev_err(&pdev->dev, "Device struct alloc failed, "
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册