未验证 提交 a1351e5d 编写于 作者: J Jay 提交者: GitHub

Fix false hit bug after IFU timing optimization (#1367)

* fix invalidTakenFault use wrong seqTarget

* IFU: fix oversize bug

* ctrl: mark all flushes as level.flush for frontend

This commit changes how flushes behave for frontend.

When ROB commits an instruction with a flush, we notify the frontend
of the flush without the commit.

Flushes to frontend may be delayed by some cycles and commit before
flush causes errors. Thus, we make all flush reasons to behave the
same as exceptions for frontend, that is, RedirectLevel.flush.

* IFU: exclude lastTaken situation when judging beyond fetch
Co-authored-by: NYinan Xu <xuyinan@ict.ac.cn>
上级 f1c56d6c
......@@ -304,12 +304,18 @@ class CtrlBlockImp(outer: CtrlBlock)(implicit p: Parameters) extends LazyModuleI
redirectGen.io.flush := flushRedirect.valid
val frontendFlush = DelayN(flushRedirect, 5)
val frontendStage2Redirect = Mux(frontendFlush.valid, frontendFlush, redirectGen.io.stage2Redirect)
// When ROB commits an instruction with a flush, we notify the frontend of the flush without the commit.
// Flushes to frontend may be delayed by some cycles and commit before flush causes errors.
// Thus, we make all flush reasons to behave the same as exceptions for frontend.
for (i <- 0 until CommitWidth) {
io.frontend.toFtq.rob_commits(i).valid := RegNext(rob.io.commits.valid(i) && !rob.io.commits.isWalk)
io.frontend.toFtq.rob_commits(i).bits := RegNext(rob.io.commits.info(i))
val is_commit = rob.io.commits.valid(i) && !rob.io.commits.isWalk && !rob.io.flushOut.valid
io.frontend.toFtq.rob_commits(i).valid := RegNext(is_commit)
io.frontend.toFtq.rob_commits(i).bits := RegEnable(rob.io.commits.info(i), is_commit)
}
io.frontend.toFtq.stage2Redirect := Mux(frontendFlush.valid, frontendFlush, redirectGen.io.stage2Redirect)
when (frontendFlush.valid) {
io.frontend.toFtq.stage2Redirect.bits.level := RedirectLevel.flush
}
io.frontend.toFtq.stage2Redirect := frontendStage2Redirect
val pendingRedirect = RegInit(false.B)
when (stage2Redirect.valid) {
pendingRedirect := true.B
......
......@@ -310,6 +310,8 @@ class NewIFU(implicit p: Parameters) extends XSModule
val f3_has_except = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_))
val f3_pAddrs = RegEnable(next = f2_paddrs, enable = f2_fire)
val f3_oversize_target = f3_pc.last + 2.U
/*** MMIO State Machine***/
val f3_mmio_data = Reg(UInt(maxInstrLen.W))
......@@ -535,6 +537,17 @@ class NewIFU(implicit p: Parameters) extends XSModule
val wb_half_flush = wb_false_lastHalf
val wb_half_target = wb_false_target
/* false oversize */
val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last && wb_pd.last.isRVC
val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC
val lastTaken = wb_check_result.fixedTaken.last
val wb_false_oversize = wb_valid && wb_ftq_req.oversize && (lastIsRVC || lastIsRVI) && !lastTaken
val wb_oversize_target = RegNext(f3_oversize_target)
when(wb_valid){
assert(!wb_false_oversize || !wb_half_flush, "False oversize and false half should be exclusive. ")
}
f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid
val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle))
......@@ -544,11 +557,12 @@ class NewIFU(implicit p: Parameters) extends XSModule
checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)}
checkFlushWb.bits.ftqIdx := wb_ftq_req.ftqIdx
checkFlushWb.bits.ftqOffset := wb_ftq_req.ftqOffset.bits
checkFlushWb.bits.misOffset.valid := ParallelOR(wb_check_result.fixedMissPred) || wb_half_flush
checkFlushWb.bits.misOffset.valid := ParallelOR(wb_check_result.fixedMissPred) || wb_half_flush || wb_false_oversize
checkFlushWb.bits.misOffset.bits := Mux(wb_half_flush, (PredictWidth - 1).U, ParallelPriorityEncoder(wb_check_result.fixedMissPred))
checkFlushWb.bits.cfiOffset.valid := ParallelOR(wb_check_result.fixedTaken)
checkFlushWb.bits.cfiOffset.bits := ParallelPriorityEncoder(wb_check_result.fixedTaken)
checkFlushWb.bits.target := Mux(wb_half_flush, wb_half_target, wb_check_result.fixedTarget(ParallelPriorityEncoder(wb_check_result.fixedMissPred)))
checkFlushWb.bits.target := Mux(wb_false_oversize, wb_oversize_target,
Mux(wb_half_flush, wb_half_target, wb_check_result.fixedTarget(ParallelPriorityEncoder(wb_check_result.fixedMissPred))))
checkFlushWb.bits.jalTarget := wb_check_result.fixedTarget(ParallelPriorityEncoder(VecInit(wb_pd.map{pd => pd.isJal })))
checkFlushWb.bits.instrRange := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
......
......@@ -257,7 +257,7 @@ class PredChecker(implicit p: Parameters) extends XSModule with HasPdConst {
val jumpTargets = VecInit(pds.zipWithIndex.map{case(pd,i) => pc(i) + jumpOffset(i)})
targetFault := VecInit(pds.zipWithIndex.map{case(pd,i) => fixedRange(i) && instrValid(i) && (pd.isJal || pd.isBr) && takenIdx === i.U && predTaken && (predTarget =/= jumpTargets(i))})
val seqTargets = VecInit((0 until PredictWidth).map(i => pc(i) + Mux(pds(i).isRVC || !pds(i).valid, 2.U, 4.U ) ))
val seqTargets = VecInit((0 until PredictWidth).map(i => pc(i) + Mux(pds(i).isRVC || !instrValid(i), 2.U, 4.U ) ))
io.out.faultType.zipWithIndex.map{case(faultType, i) => faultType.value := Mux(jalFaultVec(i) , FaultType.jalFault ,
Mux(retFaultVec(i), FaultType.retFault ,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册