未验证 提交 a469aa4b 编写于 作者: W William Wang 提交者: GitHub

mem: opt dcache tag error check timing (#1461)

dcache.resp.bits.miss used to depend on tag_error, it causes severe
timing problem. That dependence is now removed.

Now when tag_error, we:

* Set access fault bit in exception vec
* Do not update miss queue. That is to say, if miss, that inst
may not be refilled
* Mark that inst as dataForwarded so it will not wait for refill
* Report error to CSR and BEU

If tag_error come with a miss, writeback taht inst from load
queue. Otherwise, writeback it from load pipeline.
上级 91df15e5
......@@ -263,7 +263,8 @@ class DCacheWordResp(implicit p: Parameters) extends DCacheBundle
// cache miss, and failed to enter the missqueue, replay from RS is needed
val replay = Bool()
// data has been corrupted
val error = Bool()
val tag_error = Bool() // tag error
val error = Bool() // all kinds of errors, include tag error
def dump() = {
XSDebug("DCacheWordResp: data: %x id: %d miss: %b replay: %b\n",
data, id, miss, replay)
......
......@@ -127,6 +127,8 @@ class MMIOEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule
io.resp.bits.id := req.id
io.resp.bits.miss := false.B
io.resp.bits.replay := false.B
io.resp.bits.tag_error := false.B
io.resp.bits.error := false.B
when (io.resp.fire()) {
state := s_invalid
......
......@@ -215,7 +215,7 @@ class LoadPipe(id: Int)(implicit p: Parameters) extends DCacheModule with HasPer
val s2_data_error = io.read_error // banked_data_resp_word.error && !bank_conflict_slow
val s2_error = RegEnable(s1_error, s1_fire) || s2_data_error
val s2_hit = s2_tag_match && s2_has_permission && s2_hit_coh === s2_new_hit_coh || s2_tag_error
val s2_hit = s2_tag_match && s2_has_permission && s2_hit_coh === s2_new_hit_coh
// only dump these signals when they are actually valid
dump_pipeline_valids("LoadPipe s2", "s2_hit", s2_valid && s2_hit)
......@@ -236,7 +236,7 @@ class LoadPipe(id: Int)(implicit p: Parameters) extends DCacheModule with HasPer
io.miss_req.bits.req_coh := s2_hit_coh
io.miss_req.bits.replace_coh := s2_repl_coh
io.miss_req.bits.replace_tag := s2_repl_tag
io.miss_req.bits.cancel := io.lsu.s2_kill
io.miss_req.bits.cancel := io.lsu.s2_kill || s2_tag_error
// send back response
val resp = Wire(ValidIO(new DCacheWordResp))
......@@ -252,15 +252,12 @@ class LoadPipe(id: Int)(implicit p: Parameters) extends DCacheModule with HasPer
// * report a miss if bank conflict is detected
val real_miss = !s2_hit
resp.bits.miss := real_miss || io.bank_conflict_slow
if (id == 0) {
// load pipe 0 will not be influenced by bank conflict
resp.bits.replay := resp.bits.miss && (!io.miss_req.fire() || s2_nack)
} else {
// load pipe 1 need replay when there is a bank conflict
resp.bits.replay := resp.bits.miss && (!io.miss_req.fire() || s2_nack) || io.bank_conflict_slow
XSPerfAccumulate("dcache_read_bank_conflict", io.bank_conflict_slow && s2_valid)
}
resp.bits.error := s2_error && s2_hit
// load pipe need replay when there is a bank conflict
resp.bits.replay := resp.bits.miss && (!io.miss_req.fire() || s2_nack) || io.bank_conflict_slow
resp.bits.tag_error := s2_tag_error
resp.bits.error := s2_error && (s2_hit || s2_tag_error)
XSPerfAccumulate("dcache_read_bank_conflict", io.bank_conflict_slow && s2_valid)
io.lsu.resp.valid := resp.valid
io.lsu.resp.bits := resp.bits
......
......@@ -332,6 +332,7 @@ class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper {
val s2_mmio = !s2_is_prefetch && actually_mmio && !s2_exception
val s2_cache_miss = io.dcacheResp.bits.miss
val s2_cache_replay = io.dcacheResp.bits.replay
val s2_cache_tag_error = io.dcacheResp.bits.tag_error
val s2_cache_error = io.dcacheResp.bits.error
val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid
val s2_ldld_violation = io.loadViolationQueryResp.valid &&
......@@ -415,10 +416,15 @@ class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper {
// For timing reasons, sometimes we can not let
// io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward
// We use io.dataForwarded instead. It means forward logic have prepared all data needed,
// and dcache query is no longer needed.
// We use io.dataForwarded instead. It means:
// 1. Forward logic have prepared all data needed,
// and dcache query is no longer needed.
// 2. ... or data cache tag error is detected, this kind of inst
// will not update miss queue. That is to say, if miss, that inst
// may not be refilled
// Such inst will be writebacked from load queue.
io.dataForwarded := s2_cache_miss && fullForward && !s2_exception && !s2_forward_fail
io.dataForwarded := s2_cache_miss && fullForward && !s2_exception && !s2_forward_fail || // case 1
io.csrCtrl.cache_error_enable && s2_cache_tag_error // case 2
// io.out.bits.forwardX will be send to lq
io.out.bits.forwardMask := forwardMask
// data retbrived from dcache is also included in io.out.bits.forwardData
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册