提交 99f7c229 编写于 作者: J Jack Steiner 提交者: Linus Torvalds

gru: allow users to specify gru chiplet 3

This patch builds on the infrastructure introduced in the patches that
allow user specification of GRU blades & chiplets for context allocation.

This patch simplifies the algorithms for migrating GRU contexts between
blades.

No new functionality is introduced.
Signed-off-by: NJack Steiner <steiner@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 55484c45
...@@ -362,7 +362,7 @@ static int gru_try_dropin(struct gru_thread_state *gts, ...@@ -362,7 +362,7 @@ static int gru_try_dropin(struct gru_thread_state *gts,
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
if (atomic || !gru_update_cch(gts, 0)) { if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1; gts->ts_force_cch_reload = 1;
goto failupm; goto failupm;
} }
...@@ -553,14 +553,12 @@ int gru_handle_user_call_os(unsigned long cb) ...@@ -553,14 +553,12 @@ int gru_handle_user_call_os(unsigned long cb)
*/ */
if (gts->ts_gru && gts->ts_force_cch_reload) { if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0; gts->ts_force_cch_reload = 0;
gru_update_cch(gts, 0); gru_update_cch(gts);
} }
ret = -EAGAIN; ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum); cbrnum = thread_cbr_number(gts, ucbnum);
if (gts->ts_force_unload) { if (gts->ts_gru) {
gru_unload_context(gts, 1);
} else if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum); tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr, cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
gts->ts_ctxnum, ucbnum); gts->ts_ctxnum, ucbnum);
......
...@@ -551,7 +551,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) ...@@ -551,7 +551,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
if (cch_deallocate(cch)) if (cch_deallocate(cch))
BUG(); BUG();
gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
unlock_cch_handle(cch); unlock_cch_handle(cch);
gru_free_gru_context(gts); gru_free_gru_context(gts);
...@@ -624,11 +623,8 @@ void gru_load_context(struct gru_thread_state *gts) ...@@ -624,11 +623,8 @@ void gru_load_context(struct gru_thread_state *gts)
* Update fields in an active CCH: * Update fields in an active CCH:
* - retarget interrupts on local blade * - retarget interrupts on local blade
* - update sizeavail mask * - update sizeavail mask
* - force a delayed context unload by clearing the CCH asids. This
* forces TLB misses for new GRU instructions. The context is unloaded
* when the next TLB miss occurs.
*/ */
int gru_update_cch(struct gru_thread_state *gts, int force_unload) int gru_update_cch(struct gru_thread_state *gts)
{ {
struct gru_context_configuration_handle *cch; struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru; struct gru_state *gru = gts->ts_gru;
...@@ -642,21 +638,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload) ...@@ -642,21 +638,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
goto exit; goto exit;
if (cch_interrupt(cch)) if (cch_interrupt(cch))
BUG(); BUG();
if (!force_unload) { for (i = 0; i < 8; i++)
for (i = 0; i < 8; i++) cch->sizeavail[i] = gts->ts_sizeavail;
cch->sizeavail[i] = gts->ts_sizeavail; gts->ts_tlb_int_select = gru_cpu_fault_map_id();
gts->ts_tlb_int_select = gru_cpu_fault_map_id(); cch->tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gru_cpu_fault_map_id(); cch->tfm_fault_bit_enable =
cch->tfm_fault_bit_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
} else {
for (i = 0; i < 8; i++)
cch->asid[i] = 0;
cch->tfm_fault_bit_enable = 0;
cch->tlb_int_enable = 0;
gts->ts_force_unload = 1;
}
if (cch_start(cch)) if (cch_start(cch))
BUG(); BUG();
ret = 1; ret = 1;
...@@ -681,7 +669,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts) ...@@ -681,7 +669,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id()); gru_cpu_fault_map_id());
return gru_update_cch(gts, 0); return gru_update_cch(gts);
} }
/* /*
......
...@@ -382,8 +382,6 @@ struct gru_thread_state { ...@@ -382,8 +382,6 @@ struct gru_thread_state {
char ts_blade; /* If >= 0, migrate context if char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */ ref from diferent blade */
char ts_force_cch_reload; char ts_force_cch_reload;
char ts_force_unload;/* force context to be unloaded
after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
allocated CB */ allocated CB */
int ts_data_valid; /* Indicates if ts_gdata has int ts_data_valid; /* Indicates if ts_gdata has
...@@ -636,7 +634,7 @@ extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts); ...@@ -636,7 +634,7 @@ extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
extern void gru_load_context(struct gru_thread_state *gts); extern void gru_load_context(struct gru_thread_state *gts);
extern void gru_steal_context(struct gru_thread_state *gts); extern void gru_steal_context(struct gru_thread_state *gts);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate); extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); extern int gru_update_cch(struct gru_thread_state *gts);
extern void gts_drop(struct gru_thread_state *gts); extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru); extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(void); extern int gru_kservices_init(void);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册