提交 c4b4d16e 编写于 作者: R Ralph Campbell 提交者: Roland Dreier

IB/ipath: Make send buffers available for kernel if not allocated to user

A fixed partitioning of send buffers is determined at driver load time
for user processes and kernel use.  Since send buffers are a scarce
resource, it makes sense to allow the kernel to use the buffers if they
are not in use by a user process.

Also, eliminate code duplication for ipath_force_pio_avail_update().
Signed-off-by: NRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: NRoland Dreier <rolandd@cisco.com>
上级 4330e4da
...@@ -439,7 +439,9 @@ static ssize_t ipath_diagpkt_write(struct file *fp, ...@@ -439,7 +439,9 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
goto bail; goto bail;
} }
piobuf = ipath_getpiobuf(dd, &pbufn); plen >>= 2; /* in dwords */
piobuf = ipath_getpiobuf(dd, plen, &pbufn);
if (!piobuf) { if (!piobuf) {
ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
dd->ipath_unit); dd->ipath_unit);
...@@ -449,8 +451,6 @@ static ssize_t ipath_diagpkt_write(struct file *fp, ...@@ -449,8 +451,6 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
/* disarm it just to be extra sure */ /* disarm it just to be extra sure */
ipath_disarm_piobufs(dd, pbufn, 1); ipath_disarm_piobufs(dd, pbufn, 1);
plen >>= 2; /* in dwords */
if (ipath_debug & __IPATH_PKTDBG) if (ipath_debug & __IPATH_PKTDBG)
ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
dd->ipath_unit, plen - 1, pbufn); dd->ipath_unit, plen - 1, pbufn);
......
...@@ -317,7 +317,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd) ...@@ -317,7 +317,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
u32 *addr; u32 *addr;
u64 msecs, emsecs; u64 msecs, emsecs;
piobuf = ipath_getpiobuf(dd, &pbnum); piobuf = ipath_getpiobuf(dd, 0, &pbnum);
if (!piobuf) { if (!piobuf) {
dev_info(&dd->pcidev->dev, dev_info(&dd->pcidev->dev,
"No PIObufs for checking perf, skipping\n"); "No PIObufs for checking perf, skipping\n");
...@@ -836,20 +836,8 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first, ...@@ -836,20 +836,8 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
} }
/* on some older chips, update may not happen after cancel */
/* ipath_force_pio_avail_update(dd);
* Disable PIOAVAILUPD, then re-enable, reading scratch in
* between. This seems to avoid a chip timing race that causes
* pioavail updates to memory to stop. We xor as we don't
* know the state of the bit when we're called.
*/
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
} }
/** /**
...@@ -1314,7 +1302,6 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd) ...@@ -1314,7 +1302,6 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
* happens when all buffers are in use, so only cpu overhead, not * happens when all buffers are in use, so only cpu overhead, not
* latency or bandwidth is affected. * latency or bandwidth is affected.
*/ */
#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
if (!dd->ipath_pioavailregs_dma) { if (!dd->ipath_pioavailregs_dma) {
ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n"); ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
return; return;
...@@ -1359,7 +1346,7 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd) ...@@ -1359,7 +1346,7 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]); piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
else else
piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]); piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
pchg = _IPATH_ALL_CHECKBITS & pchg = dd->ipath_pioavailkernel[i] &
~(dd->ipath_pioavailshadow[i] ^ piov); ~(dd->ipath_pioavailshadow[i] ^ piov);
pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT; pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) { if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
...@@ -1410,27 +1397,63 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize) ...@@ -1410,27 +1397,63 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
return ret; return ret;
} }
/** /*
* ipath_getpiobuf - find an available pio buffer * debugging code and stats updates if no pio buffers available.
* @dd: the infinipath device */
* @pbufnum: the buffer number is placed here static noinline void no_pio_bufs(struct ipath_devdata *dd)
{
unsigned long *shadow = dd->ipath_pioavailshadow;
__le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
dd->ipath_upd_pio_shadow = 1;
/*
* not atomic, but if we lose a stat count in a while, that's OK
*/
ipath_stats.sps_nopiobufs++;
if (!(++dd->ipath_consec_nopiobuf % 100000)) {
ipath_dbg("%u pio sends with no bufavail; dmacopy: "
"%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n",
dd->ipath_consec_nopiobuf,
(unsigned long long) le64_to_cpu(dma[0]),
(unsigned long long) le64_to_cpu(dma[1]),
(unsigned long long) le64_to_cpu(dma[2]),
(unsigned long long) le64_to_cpu(dma[3]),
shadow[0], shadow[1], shadow[2], shadow[3]);
/*
* 4 buffers per byte, 4 registers above, cover rest
* below
*/
if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
(sizeof(shadow[0]) * 4 * 4))
ipath_dbg("2nd group: dmacopy: %llx %llx "
"%llx %llx; shadow: %lx %lx %lx %lx\n",
(unsigned long long)le64_to_cpu(dma[4]),
(unsigned long long)le64_to_cpu(dma[5]),
(unsigned long long)le64_to_cpu(dma[6]),
(unsigned long long)le64_to_cpu(dma[7]),
shadow[4], shadow[5], shadow[6],
shadow[7]);
}
}
/*
* common code for normal driver pio buffer allocation, and reserved
* allocation.
* *
* do appropriate marking as busy, etc. * do appropriate marking as busy, etc.
* returns buffer number if one found (>=0), negative number is error. * returns buffer number if one found (>=0), negative number is error.
* Used by ipath_layer_send
*/ */
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
u32 *pbufnum, u32 first, u32 last, u32 firsti)
{ {
int i, j, starti, updated = 0; int i, j, updated = 0;
unsigned piobcnt, iter; unsigned piobcnt;
unsigned long flags; unsigned long flags;
unsigned long *shadow = dd->ipath_pioavailshadow; unsigned long *shadow = dd->ipath_pioavailshadow;
u32 __iomem *buf; u32 __iomem *buf;
piobcnt = (unsigned)(dd->ipath_piobcnt2k piobcnt = last - first;
+ dd->ipath_piobcnt4k);
starti = dd->ipath_lastport_piobuf;
iter = piobcnt - starti;
if (dd->ipath_upd_pio_shadow) { if (dd->ipath_upd_pio_shadow) {
/* /*
* Minor optimization. If we had no buffers on last call, * Minor optimization. If we had no buffers on last call,
...@@ -1438,12 +1461,10 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) ...@@ -1438,12 +1461,10 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
* if no buffers were updated, to be paranoid * if no buffers were updated, to be paranoid
*/ */
ipath_update_pio_bufs(dd); ipath_update_pio_bufs(dd);
/* we scanned here, don't do it at end of scan */ updated++;
updated = 1; i = first;
i = starti;
} else } else
i = dd->ipath_lastpioindex; i = firsti;
rescan: rescan:
/* /*
* while test_and_set_bit() is atomic, we do that and then the * while test_and_set_bit() is atomic, we do that and then the
...@@ -1451,103 +1472,140 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) ...@@ -1451,103 +1472,140 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
* of the remaining armlaunch errors. * of the remaining armlaunch errors.
*/ */
spin_lock_irqsave(&ipath_pioavail_lock, flags); spin_lock_irqsave(&ipath_pioavail_lock, flags);
for (j = 0; j < iter; j++, i++) { for (j = 0; j < piobcnt; j++, i++) {
if (i >= piobcnt) if (i >= last)
i = starti; i = first;
/* if (__test_and_set_bit((2 * i) + 1, shadow))
* To avoid bus lock overhead, we first find a candidate
* buffer, then do the test and set, and continue if that
* fails.
*/
if (test_bit((2 * i) + 1, shadow) ||
test_and_set_bit((2 * i) + 1, shadow))
continue; continue;
/* flip generation bit */ /* flip generation bit */
change_bit(2 * i, shadow); __change_bit(2 * i, shadow);
break; break;
} }
spin_unlock_irqrestore(&ipath_pioavail_lock, flags); spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
if (j == iter) { if (j == piobcnt) {
volatile __le64 *dma = dd->ipath_pioavailregs_dma;
/*
* first time through; shadow exhausted, but may be real
* buffers available, so go see; if any updated, rescan
* (once)
*/
if (!updated) { if (!updated) {
/*
* first time through; shadow exhausted, but may be
* buffers available, try an update and then rescan.
*/
ipath_update_pio_bufs(dd); ipath_update_pio_bufs(dd);
updated = 1; updated++;
i = starti; i = first;
goto rescan; goto rescan;
} } else if (updated == 1 && piobcnt <=
dd->ipath_upd_pio_shadow = 1; ((dd->ipath_sendctrl
/* >> INFINIPATH_S_UPDTHRESH_SHIFT) &
* not atomic, but if we lose one once in a while, that's OK INFINIPATH_S_UPDTHRESH_MASK)) {
*/
ipath_stats.sps_nopiobufs++;
if (!(++dd->ipath_consec_nopiobuf % 100000)) {
ipath_dbg(
"%u pio sends with no bufavail; dmacopy: "
"%llx %llx %llx %llx; shadow: "
"%lx %lx %lx %lx\n",
dd->ipath_consec_nopiobuf,
(unsigned long long) le64_to_cpu(dma[0]),
(unsigned long long) le64_to_cpu(dma[1]),
(unsigned long long) le64_to_cpu(dma[2]),
(unsigned long long) le64_to_cpu(dma[3]),
shadow[0], shadow[1], shadow[2],
shadow[3]);
/* /*
* 4 buffers per byte, 4 registers above, cover rest * for chips supporting and using the update
* below * threshold we need to force an update of the
* in-memory copy if the count is less than the
* thershold, then check one more time.
*/ */
if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > ipath_force_pio_avail_update(dd);
(sizeof(shadow[0]) * 4 * 4)) ipath_update_pio_bufs(dd);
ipath_dbg("2nd group: dmacopy: %llx %llx " updated++;
"%llx %llx; shadow: %lx %lx " i = first;
"%lx %lx\n", goto rescan;
(unsigned long long)
le64_to_cpu(dma[4]),
(unsigned long long)
le64_to_cpu(dma[5]),
(unsigned long long)
le64_to_cpu(dma[6]),
(unsigned long long)
le64_to_cpu(dma[7]),
shadow[4], shadow[5],
shadow[6], shadow[7]);
} }
no_pio_bufs(dd);
buf = NULL; buf = NULL;
goto bail; } else {
if (i < dd->ipath_piobcnt2k)
buf = (u32 __iomem *) (dd->ipath_pio2kbase +
i * dd->ipath_palign);
else
buf = (u32 __iomem *)
(dd->ipath_pio4kbase +
(i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
if (pbufnum)
*pbufnum = i;
} }
/* return buf;
* set next starting place. Since it's just an optimization, }
* it doesn't matter who wins on this, so no locking
*/
dd->ipath_lastpioindex = i + 1;
if (dd->ipath_upd_pio_shadow)
dd->ipath_upd_pio_shadow = 0;
if (dd->ipath_consec_nopiobuf)
dd->ipath_consec_nopiobuf = 0;
if (i < dd->ipath_piobcnt2k)
buf = (u32 __iomem *) (dd->ipath_pio2kbase +
i * dd->ipath_palign);
else
buf = (u32 __iomem *)
(dd->ipath_pio4kbase +
(i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
if (pbufnum)
*pbufnum = i;
bail: /**
* ipath_getpiobuf - find an available pio buffer
* @dd: the infinipath device
* @plen: the size of the PIO buffer needed in 32-bit words
* @pbufnum: the buffer number is placed here
*/
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
{
u32 __iomem *buf;
u32 pnum, nbufs;
u32 first, lasti;
if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
first = dd->ipath_piobcnt2k;
lasti = dd->ipath_lastpioindexl;
} else {
first = 0;
lasti = dd->ipath_lastpioindex;
}
nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
if (buf) {
/*
* Set next starting place. It's just an optimization,
* it doesn't matter who wins on this, so no locking
*/
if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
dd->ipath_lastpioindexl = pnum + 1;
else
dd->ipath_lastpioindex = pnum + 1;
if (dd->ipath_upd_pio_shadow)
dd->ipath_upd_pio_shadow = 0;
if (dd->ipath_consec_nopiobuf)
dd->ipath_consec_nopiobuf = 0;
ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
if (pbufnum)
*pbufnum = pnum;
}
return buf; return buf;
} }
/**
* ipath_chg_pioavailkernel - change which send buffers are available for kernel
* @dd: the infinipath device
* @start: the starting send buffer number
* @len: the number of send buffers
* @avail: true if the buffers are available for kernel use, false otherwise
*/
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail)
{
unsigned long flags;
unsigned end;
/* There are two bits per send buffer (busy and generation) */
start *= 2;
len *= 2;
end = start + len;
/* Set or clear the generation bits. */
spin_lock_irqsave(&ipath_pioavail_lock, flags);
while (start < end) {
if (avail) {
__clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
dd->ipath_pioavailshadow);
__set_bit(start, dd->ipath_pioavailkernel);
} else {
__set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
dd->ipath_pioavailshadow);
__clear_bit(start, dd->ipath_pioavailkernel);
}
start += 2;
}
spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
}
/** /**
* ipath_create_rcvhdrq - create a receive header queue * ipath_create_rcvhdrq - create a receive header queue
* @dd: the infinipath device * @dd: the infinipath device
...@@ -1664,6 +1722,30 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) ...@@ -1664,6 +1722,30 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
} }
/*
* Force an update of in-memory copy of the pioavail registers, when
* needed for any of a variety of reasons. We read the scratch register
* to make it highly likely that the update will have happened by the
* time we return. If already off (as in cancel_sends above), this
* routine is a nop, on the assumption that the caller will "do the
* right thing".
*/
void ipath_force_pio_avail_update(struct ipath_devdata *dd)
{
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
}
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd, static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
int linitcmd) int linitcmd)
{ {
......
...@@ -1603,6 +1603,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port, ...@@ -1603,6 +1603,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
port_fp(fp) = pd; port_fp(fp) = pd;
pd->port_pid = current->pid; pd->port_pid = current->pid;
strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
ipath_chg_pioavailkernel(dd,
dd->ipath_pbufsport * (pd->port_port - 1),
dd->ipath_pbufsport, 0);
ipath_stats.sps_ports++; ipath_stats.sps_ports++;
ret = 0; ret = 0;
} else } else
...@@ -2081,6 +2084,7 @@ static int ipath_close(struct inode *in, struct file *fp) ...@@ -2081,6 +2084,7 @@ static int ipath_close(struct inode *in, struct file *fp)
i = dd->ipath_pbufsport * (port - 1); i = dd->ipath_pbufsport * (port - 1);
ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
dd->ipath_f_clear_tids(dd, pd->port_port); dd->ipath_f_clear_tids(dd, pd->port_port);
...@@ -2145,21 +2149,6 @@ static int ipath_get_slave_info(struct ipath_portdata *pd, ...@@ -2145,21 +2149,6 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
return ret; return ret;
} }
static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
{
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
return 0;
}
static ssize_t ipath_write(struct file *fp, const char __user *data, static ssize_t ipath_write(struct file *fp, const char __user *data,
size_t count, loff_t *off) size_t count, loff_t *off)
{ {
...@@ -2304,7 +2293,7 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, ...@@ -2304,7 +2293,7 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
cmd.cmd.slave_mask_addr); cmd.cmd.slave_mask_addr);
break; break;
case IPATH_CMD_PIOAVAILUPD: case IPATH_CMD_PIOAVAILUPD:
ret = ipath_force_pio_avail_update(pd->port_dd); ipath_force_pio_avail_update(pd->port_dd);
break; break;
case IPATH_CMD_POLL_TYPE: case IPATH_CMD_POLL_TYPE:
pd->poll_type = cmd.cmd.poll_type; pd->poll_type = cmd.cmd.poll_type;
......
...@@ -521,7 +521,9 @@ static void enable_chip(struct ipath_devdata *dd, ...@@ -521,7 +521,9 @@ static void enable_chip(struct ipath_devdata *dd,
pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
else else
pioavail = dd->ipath_pioavailregs_dma[i]; pioavail = dd->ipath_pioavailregs_dma[i];
dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail); dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
(~dd->ipath_pioavailkernel[i] <<
INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
} }
/* can get counters, stats, etc. */ /* can get counters, stats, etc. */
dd->ipath_flags |= IPATH_PRESENT; dd->ipath_flags |= IPATH_PRESENT;
...@@ -743,7 +745,9 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) ...@@ -743,7 +745,9 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
dd->ipath_pbufsport, val32); dd->ipath_pbufsport, val32);
} }
dd->ipath_lastpioindex = dd->ipath_lastport_piobuf; dd->ipath_lastpioindex = 0;
dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
"each for %u user ports\n", kpiobufs, "each for %u user ports\n", kpiobufs,
piobufs, dd->ipath_pbufsport, uports); piobufs, dd->ipath_pbufsport, uports);
......
...@@ -804,7 +804,6 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -804,7 +804,6 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
{ {
int i, im; int i, im;
u64 val; u64 val;
unsigned long flags;
/* disable error interrupts, to avoid confusion */ /* disable error interrupts, to avoid confusion */
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
...@@ -823,14 +822,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -823,14 +822,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
dd->ipath_control); dd->ipath_control);
/* ensure pio avail updates continue */ /* ensure pio avail updates continue */
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); ipath_force_pio_avail_update(dd);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/* /*
* We just enabled pioavailupdate, so dma copy is almost certainly * We just enabled pioavailupdate, so dma copy is almost certainly
...@@ -842,7 +834,9 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -842,7 +834,9 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
i ^ 1 : i; i ^ 1 : i;
val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im); val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val); dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
dd->ipath_pioavailshadow[i] = val; dd->ipath_pioavailshadow[i] = val |
(~dd->ipath_pioavailkernel[i] <<
INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
} }
/* /*
......
...@@ -191,6 +191,9 @@ struct ipath_skbinfo { ...@@ -191,6 +191,9 @@ struct ipath_skbinfo {
dma_addr_t phys; dma_addr_t phys;
}; };
/* max dwords in small buffer packet */
#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
/* /*
* Possible IB config parameters for ipath_f_get/set_ib_cfg() * Possible IB config parameters for ipath_f_get/set_ib_cfg()
*/ */
...@@ -366,6 +369,7 @@ struct ipath_devdata { ...@@ -366,6 +369,7 @@ struct ipath_devdata {
* get to multiple devices * get to multiple devices
*/ */
u32 ipath_lastpioindex; u32 ipath_lastpioindex;
u32 ipath_lastpioindexl;
/* max length of freezemsg */ /* max length of freezemsg */
u32 ipath_freezelen; u32 ipath_freezelen;
/* /*
...@@ -453,6 +457,8 @@ struct ipath_devdata { ...@@ -453,6 +457,8 @@ struct ipath_devdata {
* init time. * init time.
*/ */
unsigned long ipath_pioavailshadow[8]; unsigned long ipath_pioavailshadow[8];
/* bitmap of send buffers available for the kernel to use with PIO. */
unsigned long ipath_pioavailkernel[8];
/* shadow of kr_gpio_out, for rmw ops */ /* shadow of kr_gpio_out, for rmw ops */
u64 ipath_gpio_out; u64 ipath_gpio_out;
/* shadow the gpio mask register */ /* shadow the gpio mask register */
...@@ -869,13 +875,16 @@ void ipath_hol_event(unsigned long); ...@@ -869,13 +875,16 @@ void ipath_hol_event(unsigned long);
/* free up any allocated data at closes */ /* free up any allocated data at closes */
void ipath_free_data(struct ipath_portdata *dd); void ipath_free_data(struct ipath_portdata *dd);
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail);
void ipath_init_iba6120_funcs(struct ipath_devdata *); void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *); void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *); void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd); int ipath_update_eeprom_log(struct ipath_devdata *dd);
void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
void ipath_force_pio_avail_update(struct ipath_devdata *);
void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev); void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
/* /*
......
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
/* kr_sendctrl bits */ /* kr_sendctrl bits */
#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16 #define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
#define INFINIPATH_S_UPDTHRESH_SHIFT 24
#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
#define IPATH_S_ABORT 0 #define IPATH_S_ABORT 0
#define IPATH_S_PIOINTBUFAVAIL 1 #define IPATH_S_PIOINTBUFAVAIL 1
......
...@@ -875,7 +875,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords, ...@@ -875,7 +875,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
unsigned flush_wc; unsigned flush_wc;
int ret; int ret;
piobuf = ipath_getpiobuf(dd, NULL); piobuf = ipath_getpiobuf(dd, plen, NULL);
if (unlikely(piobuf == NULL)) { if (unlikely(piobuf == NULL)) {
ret = -EBUSY; ret = -EBUSY;
goto bail; goto bail;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册