提交 7b8274e9 编写于 作者: J Jack Steiner 提交者: Linus Torvalds

sgi-gru: support multiple pagesizes in GRU

Add multiple pagesize support to the GRU driver.
Signed-off-by: NJack Steiner <steiner@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 27ca8a7b
...@@ -360,6 +360,13 @@ static int gru_try_dropin(struct gru_thread_state *gts, ...@@ -360,6 +360,13 @@ static int gru_try_dropin(struct gru_thread_state *gts,
if (ret == -2) if (ret == -2)
goto failupm; goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
if (atomic || !gru_update_cch(gts, 0)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
gru_cb_set_istatus_active(cb); gru_cb_set_istatus_active(cb);
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift)); GRU_PAGESIZE(pageshift));
...@@ -535,6 +542,14 @@ int gru_handle_user_call_os(unsigned long cb) ...@@ -535,6 +542,14 @@ int gru_handle_user_call_os(unsigned long cb)
gts->ts_force_unload = 1; gts->ts_force_unload = 1;
} }
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gru_update_cch(gts, 0);
gts->ts_force_cch_reload = 0;
}
ret = -EAGAIN; ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum); cbrnum = thread_cbr_number(gts, ucbnum);
if (gts->ts_force_unload) { if (gts->ts_force_unload) {
......
...@@ -72,42 +72,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc) ...@@ -72,42 +72,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
return status; return status;
} }
#if defined CONFIG_IA64 int cch_allocate(struct gru_context_configuration_handle *cch,
static void cch_allocate_set_asids( int asidval, int sizeavail, unsigned long cbrmap,
struct gru_context_configuration_handle *cch, int asidval) unsigned long dsrmap)
{ {
int i; int i;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
cch->asid[i] = (asidval++); cch->asid[i] = (asidval++);
#if 0 cch->sizeavail[i] = sizeavail;
/* ZZZ hugepages not supported yet */
if (i == RGN_HPAGE)
cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
else
#endif
cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
} }
}
#elif defined CONFIG_X86_64
static void cch_allocate_set_asids(
struct gru_context_configuration_handle *cch, int asidval)
{
int i;
for (i = 0; i < 8; i++) {
cch->asid[i] = asidval++;
cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
GRU_SIZEAVAIL(21);
}
}
#endif
int cch_allocate(struct gru_context_configuration_handle *cch,
int asidval, unsigned long cbrmap,
unsigned long dsrmap)
{
cch_allocate_set_asids(cch, asidval);
cch->dsr_allocation_map = dsrmap; cch->dsr_allocation_map = dsrmap;
cch->cbr_allocation_map = cbrmap; cch->cbr_allocation_map = cbrmap;
cch->opc = CCHOP_ALLOCATE; cch->opc = CCHOP_ALLOCATE;
......
...@@ -496,7 +496,7 @@ enum gru_cbr_state { ...@@ -496,7 +496,7 @@ enum gru_cbr_state {
#define GRUMAXINVAL 1024UL #define GRUMAXINVAL 1024UL
int cch_allocate(struct gru_context_configuration_handle *cch, int cch_allocate(struct gru_context_configuration_handle *cch,
int asidval, unsigned long cbrmap, unsigned long dsrmap); int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);
int cch_start(struct gru_context_configuration_handle *cch); int cch_start(struct gru_context_configuration_handle *cch);
int cch_interrupt(struct gru_context_configuration_handle *cch); int cch_interrupt(struct gru_context_configuration_handle *cch);
......
...@@ -672,7 +672,7 @@ int gru_kservices_init(struct gru_state *gru) ...@@ -672,7 +672,7 @@ int gru_kservices_init(struct gru_state *gru)
cch->tlb_int_enable = 0; cch->tlb_int_enable = 0;
cch->tfm_done_bit_enable = 0; cch->tfm_done_bit_enable = 0;
cch->unmap_enable = 1; cch->unmap_enable = 1;
err = cch_allocate(cch, 0, cbr_map, dsr_map); err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
if (err) { if (err) {
gru_dbg(grudev, gru_dbg(grudev,
"Unable to allocate kernel CCH: gid %d, err %d\n", "Unable to allocate kernel CCH: gid %d, err %d\n",
......
...@@ -326,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, ...@@ -326,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
gts->ts_vma = vma; gts->ts_vma = vma;
gts->ts_tlb_int_select = -1; gts->ts_tlb_int_select = -1;
gts->ts_gms = gru_register_mmu_notifier(); gts->ts_gms = gru_register_mmu_notifier();
gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
if (!gts->ts_gms) if (!gts->ts_gms)
goto err; goto err;
...@@ -552,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts) ...@@ -552,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts)
cch->tlb_int_select = gts->ts_tlb_int_select; cch->tlb_int_select = gts->ts_tlb_int_select;
} }
cch->tfm_done_bit_enable = 0; cch->tfm_done_bit_enable = 0;
err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
gts->ts_dsr_map);
if (err) { if (err) {
gru_dbg(grudev, gru_dbg(grudev,
"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
...@@ -573,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts) ...@@ -573,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts)
/* /*
* Update fields in an active CCH: * Update fields in an active CCH:
* - retarget interrupts on local blade * - retarget interrupts on local blade
* - update sizeavail mask
* - force a delayed context unload by clearing the CCH asids. This * - force a delayed context unload by clearing the CCH asids. This
* forces TLB misses for new GRU instructions. The context is unloaded * forces TLB misses for new GRU instructions. The context is unloaded
* when the next TLB miss occurs. * when the next TLB miss occurs.
*/ */
static int gru_update_cch(struct gru_thread_state *gts, int int_select) int gru_update_cch(struct gru_thread_state *gts, int force_unload)
{ {
struct gru_context_configuration_handle *cch; struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru; struct gru_state *gru = gts->ts_gru;
...@@ -591,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select) ...@@ -591,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select)
goto exit; goto exit;
if (cch_interrupt(cch)) if (cch_interrupt(cch))
BUG(); BUG();
if (int_select >= 0) { if (!force_unload) {
gts->ts_tlb_int_select = int_select; for (i = 0; i < 8; i++)
cch->tlb_int_select = int_select; cch->sizeavail[i] = gts->ts_sizeavail;
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gru_cpu_fault_map_id();
} else { } else {
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
cch->asid[i] = 0; cch->asid[i] = 0;
...@@ -625,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts) ...@@ -625,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id()); gru_cpu_fault_map_id());
return gru_update_cch(gts, gru_cpu_fault_map_id()); return gru_update_cch(gts, 0);
} }
......
...@@ -361,6 +361,7 @@ struct gru_thread_state { ...@@ -361,6 +361,7 @@ struct gru_thread_state {
long ts_user_options;/* misc user option flags */ long ts_user_options;/* misc user option flags */
pid_t ts_tgid_owner; /* task that is using the pid_t ts_tgid_owner; /* task that is using the
context - for migration */ context - for migration */
unsigned short ts_sizeavail; /* Pagesizes in use */
int ts_tsid; /* thread that owns the int ts_tsid; /* thread that owns the
structure */ structure */
int ts_tlb_int_select;/* target cpu if interrupts int ts_tlb_int_select;/* target cpu if interrupts
...@@ -374,6 +375,7 @@ struct gru_thread_state { ...@@ -374,6 +375,7 @@ struct gru_thread_state {
required for contest */ required for contest */
char ts_blade; /* If >= 0, migrate context if char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */ ref from diferent blade */
char ts_force_cch_reload;
char ts_force_unload;/* force context to be unloaded char ts_force_unload;/* force context to be unloaded
after migration */ after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
...@@ -597,6 +599,7 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct ...@@ -597,6 +599,7 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
*vma, int tsid); *vma, int tsid);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate); extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
extern void gts_drop(struct gru_thread_state *gts); extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru); extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(struct gru_state *gru); extern int gru_kservices_init(struct gru_state *gru);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册