提交 e04abfa2 编写于 作者: R Roland Dreier

Merge branches 'mlx5', 'qib' and 'srp' into for-next

...@@ -171,7 +171,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf, ...@@ -171,7 +171,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
int c; int c;
if (copy_from_user(lbuf, buf, sizeof(lbuf))) if (copy_from_user(lbuf, buf, sizeof(lbuf)))
return -EPERM; return -EFAULT;
c = order2idx(dev, ent->order); c = order2idx(dev, ent->order);
lbuf[sizeof(lbuf) - 1] = 0; lbuf[sizeof(lbuf) - 1] = 0;
...@@ -208,7 +208,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, ...@@ -208,7 +208,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
return err; return err;
if (copy_to_user(buf, lbuf, err)) if (copy_to_user(buf, lbuf, err))
return -EPERM; return -EFAULT;
*pos += err; *pos += err;
...@@ -233,7 +233,7 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, ...@@ -233,7 +233,7 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
int c; int c;
if (copy_from_user(lbuf, buf, sizeof(lbuf))) if (copy_from_user(lbuf, buf, sizeof(lbuf)))
return -EPERM; return -EFAULT;
c = order2idx(dev, ent->order); c = order2idx(dev, ent->order);
lbuf[sizeof(lbuf) - 1] = 0; lbuf[sizeof(lbuf) - 1] = 0;
...@@ -270,7 +270,7 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, ...@@ -270,7 +270,7 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
return err; return err;
if (copy_to_user(buf, lbuf, err)) if (copy_to_user(buf, lbuf, err))
return -EPERM; return -EFAULT;
*pos += err; *pos += err;
......
...@@ -1348,7 +1348,7 @@ static inline int __qib_sdma_running(struct qib_pportdata *ppd) ...@@ -1348,7 +1348,7 @@ static inline int __qib_sdma_running(struct qib_pportdata *ppd)
return ppd->sdma_state.current_state == qib_sdma_state_s99_running; return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
} }
int qib_sdma_running(struct qib_pportdata *); int qib_sdma_running(struct qib_pportdata *);
void dump_sdma_state(struct qib_pportdata *ppd);
void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
......
...@@ -83,6 +83,7 @@ static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); ...@@ -83,6 +83,7 @@ static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
static void serdes_7322_los_enable(struct qib_pportdata *, int); static void serdes_7322_los_enable(struct qib_pportdata *, int);
static int serdes_7322_init_old(struct qib_pportdata *); static int serdes_7322_init_old(struct qib_pportdata *);
static int serdes_7322_init_new(struct qib_pportdata *); static int serdes_7322_init_new(struct qib_pportdata *);
static void dump_sdma_7322_state(struct qib_pportdata *);
#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
...@@ -652,6 +653,7 @@ struct qib_chippport_specific { ...@@ -652,6 +653,7 @@ struct qib_chippport_specific {
u8 ibmalfusesnap; u8 ibmalfusesnap;
struct qib_qsfp_data qsfp_data; struct qib_qsfp_data qsfp_data;
char epmsgbuf[192]; /* for port error interrupt msg buffer */ char epmsgbuf[192]; /* for port error interrupt msg buffer */
char sdmamsgbuf[192]; /* for per-port sdma error messages */
}; };
static struct { static struct {
...@@ -1601,6 +1603,15 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) ...@@ -1601,6 +1603,15 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
spin_lock_irqsave(&ppd->sdma_lock, flags); spin_lock_irqsave(&ppd->sdma_lock, flags);
if (errs != QIB_E_P_SDMAHALT) {
/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
qib_dev_porterr(dd, ppd->port,
"SDMA %s 0x%016llx %s\n",
qib_sdma_state_names[ppd->sdma_state.current_state],
errs, ppd->cpspec->sdmamsgbuf);
dump_sdma_7322_state(ppd);
}
switch (ppd->sdma_state.current_state) { switch (ppd->sdma_state.current_state) {
case qib_sdma_state_s00_hw_down: case qib_sdma_state_s00_hw_down:
break; break;
...@@ -2156,6 +2167,29 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, ...@@ -2156,6 +2167,29 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_dev_err(dd, "%s hardware error\n", msg); qib_dev_err(dd, "%s hardware error\n", msg);
if (hwerrs &
(SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
int pidx = 0;
int err;
unsigned long flags;
struct qib_pportdata *ppd = dd->pport;
for (; pidx < dd->num_pports; ++pidx, ppd++) {
err = 0;
if (pidx == 0 && (hwerrs &
SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
err++;
if (pidx == 1 && (hwerrs &
SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
err++;
if (err) {
spin_lock_irqsave(&ppd->sdma_lock, flags);
dump_sdma_7322_state(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
}
}
if (isfatal && !dd->diag_client) { if (isfatal && !dd->diag_client) {
qib_dev_err(dd, qib_dev_err(dd,
"Fatal Hardware Error, no longer usable, SN %.16s\n", "Fatal Hardware Error, no longer usable, SN %.16s\n",
...@@ -6753,6 +6787,86 @@ static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) ...@@ -6753,6 +6787,86 @@ static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
} }
/*
* sdma_lock should be acquired before calling this routine
*/
static void dump_sdma_7322_state(struct qib_pportdata *ppd)
{
u64 reg, reg1, reg2;
reg = qib_read_kreg_port(ppd, krp_senddmastatus);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmastatus: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_sendctrl);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA sendctrl: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmabase);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmabase: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
reg, reg1, reg2);
/* get bufuse bits, clear them, and print them again if non-zero */
reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
/* 0 and 1 should always be zero, so print as short form */
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
reg, reg1, reg2);
reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
/* 0 and 1 should always be zero, so print as short form */
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
reg, reg1, reg2);
reg = qib_read_kreg_port(ppd, krp_senddmatail);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmatail: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmahead);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmahead: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmaheadaddr: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmalengen);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmalengen: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmadesccnt: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmaidlecnt: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmapriorityhld: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmareloadcnt: 0x%016llx\n", reg);
dump_sdma_state(ppd);
}
static struct sdma_set_state_action sdma_7322_action_table[] = { static struct sdma_set_state_action sdma_7322_action_table[] = {
[qib_sdma_state_s00_hw_down] = { [qib_sdma_state_s00_hw_down] = {
.go_s99_running_tofalse = 1, .go_s99_running_tofalse = 1,
......
...@@ -1350,7 +1350,7 @@ static void cleanup_device_data(struct qib_devdata *dd) ...@@ -1350,7 +1350,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
if (dd->pageshadow) { if (dd->pageshadow) {
struct page **tmpp = dd->pageshadow; struct page **tmpp = dd->pageshadow;
dma_addr_t *tmpd = dd->physshadow; dma_addr_t *tmpd = dd->physshadow;
int i, cnt = 0; int i;
for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
int ctxt_tidbase = ctxt * dd->rcvtidcnt; int ctxt_tidbase = ctxt * dd->rcvtidcnt;
...@@ -1363,13 +1363,13 @@ static void cleanup_device_data(struct qib_devdata *dd) ...@@ -1363,13 +1363,13 @@ static void cleanup_device_data(struct qib_devdata *dd)
PAGE_SIZE, PCI_DMA_FROMDEVICE); PAGE_SIZE, PCI_DMA_FROMDEVICE);
qib_release_user_pages(&tmpp[i], 1); qib_release_user_pages(&tmpp[i], 1);
tmpp[i] = NULL; tmpp[i] = NULL;
cnt++;
} }
} }
tmpp = dd->pageshadow;
dd->pageshadow = NULL; dd->pageshadow = NULL;
vfree(tmpp); vfree(tmpp);
dd->physshadow = NULL;
vfree(tmpd);
} }
/* /*
......
...@@ -708,6 +708,62 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -708,6 +708,62 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
return ret; return ret;
} }
/*
* sdma_lock should be acquired before calling this routine
*/
void dump_sdma_state(struct qib_pportdata *ppd)
{
struct qib_sdma_desc *descq;
struct qib_sdma_txreq *txp, *txpnext;
__le64 *descqp;
u64 desc[2];
dma_addr_t addr;
u16 gen, dwlen, dwoffset;
u16 head, tail, cnt;
head = ppd->sdma_descq_head;
tail = ppd->sdma_descq_tail;
cnt = qib_sdma_descq_freecnt(ppd);
descq = ppd->sdma_descq;
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA ppd->sdma_descq_head: %u\n", head);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA ppd->sdma_descq_tail: %u\n", tail);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA sdma_descq_freecnt: %u\n", cnt);
/* print info for each entry in the descriptor queue */
while (head != tail) {
char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
descqp = &descq[head].qw[0];
desc[0] = le64_to_cpu(descqp[0]);
desc[1] = le64_to_cpu(descqp[1]);
flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
gen = (desc[0] >> 30) & 3ULL;
dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
dwoffset = (desc[0] & 0x7ffULL) << 2;
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
head, flags, addr, gen, dwlen, dwoffset);
if (++head == ppd->sdma_descq_cnt)
head = 0;
}
/* print dma descriptor indices from the TX requests */
list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
list)
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
txp->start_idx, txp->next_descq_idx);
}
void qib_sdma_process_event(struct qib_pportdata *ppd, void qib_sdma_process_event(struct qib_pportdata *ppd,
enum qib_sdma_events event) enum qib_sdma_events event)
{ {
......
...@@ -1753,8 +1753,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) ...@@ -1753,8 +1753,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
if (!req || !srp_claim_req(target, req, scmnd)) if (!req || !srp_claim_req(target, req, scmnd))
return FAILED; return FAILED;
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK) == 0 || SRP_TSK_ABORT_TASK) == 0)
target->transport_offline)
ret = SUCCESS; ret = SUCCESS;
else if (target->transport_offline) else if (target->transport_offline)
ret = FAST_IO_FAIL; ret = FAST_IO_FAIL;
......
...@@ -693,7 +693,7 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf, ...@@ -693,7 +693,7 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf,
return -ENOMEM; return -ENOMEM;
if (copy_from_user(lbuf, buf, sizeof(lbuf))) if (copy_from_user(lbuf, buf, sizeof(lbuf)))
return -EPERM; return -EFAULT;
lbuf[sizeof(lbuf) - 1] = 0; lbuf[sizeof(lbuf) - 1] = 0;
...@@ -889,7 +889,7 @@ static ssize_t data_write(struct file *filp, const char __user *buf, ...@@ -889,7 +889,7 @@ static ssize_t data_write(struct file *filp, const char __user *buf,
return -ENOMEM; return -ENOMEM;
if (copy_from_user(ptr, buf, count)) { if (copy_from_user(ptr, buf, count)) {
err = -EPERM; err = -EFAULT;
goto out; goto out;
} }
dbg->in_msg = ptr; dbg->in_msg = ptr;
...@@ -919,7 +919,7 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count, ...@@ -919,7 +919,7 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
copy = min_t(int, count, dbg->outlen); copy = min_t(int, count, dbg->outlen);
if (copy_to_user(buf, dbg->out_msg, copy)) if (copy_to_user(buf, dbg->out_msg, copy))
return -EPERM; return -EFAULT;
*pos += copy; *pos += copy;
...@@ -949,7 +949,7 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, ...@@ -949,7 +949,7 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
return err; return err;
if (copy_to_user(buf, &outlen, err)) if (copy_to_user(buf, &outlen, err))
return -EPERM; return -EFAULT;
*pos += err; *pos += err;
...@@ -974,7 +974,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, ...@@ -974,7 +974,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
dbg->outlen = 0; dbg->outlen = 0;
if (copy_from_user(outlen_str, buf, count)) if (copy_from_user(outlen_str, buf, count))
return -EPERM; return -EFAULT;
outlen_str[7] = 0; outlen_str[7] = 0;
......
...@@ -148,7 +148,6 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, ...@@ -148,7 +148,6 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
struct mlx5_cmd_stats *stats; struct mlx5_cmd_stats *stats;
u64 field = 0; u64 field = 0;
int ret; int ret;
int err;
char tbuf[22]; char tbuf[22];
if (*pos) if (*pos)
...@@ -161,9 +160,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, ...@@ -161,9 +160,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
spin_unlock(&stats->lock); spin_unlock(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
if (ret > 0) { if (ret > 0) {
err = copy_to_user(buf, tbuf, ret); if (copy_to_user(buf, tbuf, ret))
if (err) return -EFAULT;
return err;
} }
*pos += ret; *pos += ret;
...@@ -418,7 +416,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, ...@@ -418,7 +416,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
char tbuf[18]; char tbuf[18];
u64 field; u64 field;
int ret; int ret;
int err;
if (*pos) if (*pos)
return 0; return 0;
...@@ -445,9 +442,8 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, ...@@ -445,9 +442,8 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
if (ret > 0) { if (ret > 0) {
err = copy_to_user(buf, tbuf, ret); if (copy_to_user(buf, tbuf, ret))
if (err) return -EFAULT;
return err;
} }
*pos += ret; *pos += ret;
......
...@@ -212,7 +212,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) ...@@ -212,7 +212,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
memset(&set_out, 0, sizeof(set_out)); memset(&set_out, 0, sizeof(set_out));
set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
&set_out, sizeof(set_out)); &set_out, sizeof(set_out));
......
...@@ -317,8 +317,8 @@ struct mlx5_hca_cap { ...@@ -317,8 +317,8 @@ struct mlx5_hca_cap {
u8 log_max_pd; u8 log_max_pd;
u8 rsvd25; u8 rsvd25;
u8 log_max_xrcd; u8 log_max_xrcd;
u8 rsvd26[40]; u8 rsvd26[42];
__be32 uar_page_sz; __be16 log_uar_page_sz;
u8 rsvd27[28]; u8 rsvd27[28];
u8 log_msx_atomic_size_qp; u8 log_msx_atomic_size_qp;
u8 rsvd28[2]; u8 rsvd28[2];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册