提交 4a7a17c1 编写于 作者: J Jack Steiner 提交者: Linus Torvalds

gru: support instruction completion interrupts

Add support for interrupts generated by GRU instruction completion.
Previously, the only interrupts were for TLB misses.  The hardware also
supports interrupts on instruction completion.  This will be supported for
instructions issued by the kernel.
Signed-off-by: NJack Steiner <steiner@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 940229b9
...@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq) ...@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq)
* the GRU, atomic operations must be used to clear bits. * the GRU, atomic operations must be used to clear bits.
*/ */
static void get_clear_fault_map(struct gru_state *gru, static void get_clear_fault_map(struct gru_state *gru,
struct gru_tlb_fault_map *map) struct gru_tlb_fault_map *imap,
struct gru_tlb_fault_map *dmap)
{ {
unsigned long i, k; unsigned long i, k;
struct gru_tlb_fault_map *tfm; struct gru_tlb_fault_map *tfm;
...@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru, ...@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru,
k = tfm->fault_bits[i]; k = tfm->fault_bits[i];
if (k) if (k)
k = xchg(&tfm->fault_bits[i], 0UL); k = xchg(&tfm->fault_bits[i], 0UL);
map->fault_bits[i] = k; imap->fault_bits[i] = k;
k = tfm->done_bits[i];
if (k)
k = xchg(&tfm->done_bits[i], 0UL);
dmap->fault_bits[i] = k;
} }
/* /*
...@@ -449,7 +454,7 @@ static int gru_try_dropin(struct gru_thread_state *gts, ...@@ -449,7 +454,7 @@ static int gru_try_dropin(struct gru_thread_state *gts,
irqreturn_t gru_intr(int irq, void *dev_id) irqreturn_t gru_intr(int irq, void *dev_id)
{ {
struct gru_state *gru; struct gru_state *gru;
struct gru_tlb_fault_map map; struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts; struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL; struct gru_tlb_fault_handle *tfh = NULL;
int cbrnum, ctxnum; int cbrnum, ctxnum;
...@@ -462,11 +467,19 @@ irqreturn_t gru_intr(int irq, void *dev_id) ...@@ -462,11 +467,19 @@ irqreturn_t gru_intr(int irq, void *dev_id)
raw_smp_processor_id(), irq); raw_smp_processor_id(), irq);
return IRQ_NONE; return IRQ_NONE;
} }
get_clear_fault_map(gru, &map); get_clear_fault_map(gru, &imap, &dmap);
gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid, gru_dbg(grudev,
map.fault_bits[0]); "irq %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
irq, gru->gs_gid, dmap.fault_bits[0], dmap.fault_bits[1],
dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
complete(gru->gs_blade->bs_async_wq);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
}
for_each_cbr_in_tfm(cbrnum, map.fault_bits) { for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
tfh = get_tfh_by_index(gru, cbrnum); tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */ prefetchw(tfh); /* Helps on hdw, required for emulator */
......
...@@ -572,8 +572,12 @@ void gru_load_context(struct gru_thread_state *gts) ...@@ -572,8 +572,12 @@ void gru_load_context(struct gru_thread_state *gts)
if (is_kernel_context(gts)) { if (is_kernel_context(gts)) {
cch->unmap_enable = 1; cch->unmap_enable = 1;
cch->tfm_done_bit_enable = 1;
cch->cb_int_enable = 1;
} else { } else {
cch->unmap_enable = 0; cch->unmap_enable = 0;
cch->tfm_done_bit_enable = 0;
cch->cb_int_enable = 0;
asid = gru_load_mm_tracker(gru, gts); asid = gru_load_mm_tracker(gru, gts);
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
cch->asid[i] = asid + i; cch->asid[i] = asid + i;
......
...@@ -462,6 +462,11 @@ struct gru_blade_state { ...@@ -462,6 +462,11 @@ struct gru_blade_state {
struct rw_semaphore bs_kgts_sema; /* lock for kgts */ struct rw_semaphore bs_kgts_sema; /* lock for kgts */
struct gru_thread_state *bs_kgts; /* GTS for kernel use */ struct gru_thread_state *bs_kgts; /* GTS for kernel use */
/* ---- the following are used for managing kernel async GRU CBRs --- */
int bs_async_dsr_bytes; /* DSRs for async */
int bs_async_cbrs; /* CBRs AU for async */
struct completion *bs_async_wq;
/* ---- the following are protected by the bs_lock spinlock ---- */ /* ---- the following are protected by the bs_lock spinlock ---- */
spinlock_t bs_lock; /* lock used for spinlock_t bs_lock; /* lock used for
stealing contexts */ stealing contexts */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册