未验证 提交 fa9ac9b6 编写于 作者: W William Wang 提交者: GitHub

dcache: fix plru update logic (#1921)

This commit aims to fix dcache plru access logic

In the previous version, when a cacheline not in l1 is accessed, a replace way
is picked and used to update l1 plru (set the way as lru). However, if the same
missed cacheline is accessed multiple times before l1 refill, l1 will pick a new
replace way and use it to update plru for each time the missed cacheline is
accessed. It makes the plru totally a mess.

To fix that problem, extra condition check is added for a missed load plru
update. Now plru is updated on:

* load/store hit (touch hit way)
* load/store primary miss (touch replacement way)
* load/store secondary miss (touch replacement way)

`updateReplaceOn2ndmiss` is enabled. Disable it if the timing is bad.
上级 33d13d4b
......@@ -43,6 +43,7 @@ case class DCacheParameters
tagECC: Option[String] = None,
dataECC: Option[String] = None,
replacer: Option[String] = Some("setplru"),
updateReplaceOn2ndmiss: Boolean = true,
nMissEntries: Int = 1,
nProbeEntries: Int = 1,
nReleaseEntries: Int = 1,
......@@ -865,7 +866,8 @@ class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParame
missReqArb.io.in(MainPipeMissReqPort) <> mainPipe.io.miss_req
for (w <- 0 until LoadPipelineWidth) { missReqArb.io.in(w + 1) <> ldu(w).io.miss_req }
for (w <- 0 until LoadPipelineWidth) { ldu(w).io.miss_resp.id := missQueue.io.resp.id }
for (w <- 0 until LoadPipelineWidth) { ldu(w).io.miss_resp := missQueue.io.resp }
mainPipe.io.miss_resp := missQueue.io.resp
wb.io.miss_req.valid := missReqArb.io.out.valid
wb.io.miss_req.bits := missReqArb.io.out.bits.addr
......
......@@ -271,6 +271,8 @@ class LoadPipe(id: Int)(implicit p: Parameters) extends DCacheModule with HasPer
// Bank conflict on data arrays
val s2_nack_data = RegEnable(!io.banked_data_read.ready, s1_fire)
val s2_nack = s2_nack_hit || s2_nack_no_mshr || s2_nack_data
// s2 miss merged
val s2_miss_merged = io.miss_req.valid && io.miss_resp.merged
val s2_bank_addr = addr_to_dcache_bank(s2_paddr)
dontTouch(s2_bank_addr)
......@@ -387,9 +389,35 @@ class LoadPipe(id: Int)(implicit p: Parameters) extends DCacheModule with HasPer
io.error.valid := s3_error && s3_valid
// update plru in s3
io.replace_access.valid := RegNext(RegNext(RegNext(io.meta_read.fire()) && s1_valid && !io.lsu.s1_kill) && !s2_nack_no_mshr)
io.replace_access.bits.set := RegNext(RegNext(get_idx(s1_req.addr)))
io.replace_access.bits.way := RegNext(RegNext(Mux(s1_tag_match_dup_dc, OHToUInt(s1_tag_match_way_dup_dc), io.replace_way.way)))
if (!cfg.updateReplaceOn2ndmiss) {
// replacement is only updated on 1st miss
io.replace_access.valid := RegNext(RegNext(
RegNext(io.meta_read.fire()) && s1_valid && !io.lsu.s1_kill) &&
!s2_nack_no_mshr &&
!s2_miss_merged
)
io.replace_access.bits.set := RegNext(RegNext(get_idx(s1_req.addr)))
io.replace_access.bits.way := RegNext(RegNext(Mux(s1_tag_match_dup_dc, OHToUInt(s1_tag_match_way_dup_dc), io.replace_way.way)))
} else {
// replacement is updated on both 1st and 2nd miss
// timing is worse than !cfg.updateReplaceOn2ndmiss
io.replace_access.valid := RegNext(RegNext(
RegNext(io.meta_read.fire()) && s1_valid && !io.lsu.s1_kill) &&
!s2_nack_no_mshr
)
io.replace_access.bits.set := RegNext(RegNext(get_idx(s1_req.addr)))
io.replace_access.bits.way := RegNext(
Mux(
RegNext(s1_tag_match_dup_dc),
RegNext(OHToUInt(s1_tag_match_way_dup_dc)), // if hit, access hit way in plru
Mux( // if miss
!s2_miss_merged,
RegNext(io.replace_way.way), // 1st fire: access new selected replace way
OHToUInt(io.miss_resp.repl_way_en) // 2nd fire: access replace way selected at miss queue allocate time
)
)
)
}
// update access bit
io.access_flag_write.valid := s3_valid && s3_hit
......
......@@ -102,6 +102,7 @@ class MainPipe(implicit p: Parameters) extends DCacheModule with HasPerfEvents {
val probe_req = Flipped(DecoupledIO(new MainPipeReq))
// store miss go to miss queue
val miss_req = DecoupledIO(new MissReq)
val miss_resp = Input(new MissResp) // miss resp is used to support plru update
// store buffer
val store_req = Flipped(DecoupledIO(new DCacheLineReq))
val store_replay_resp = ValidIO(new DCacheLineResp)
......@@ -1544,9 +1545,45 @@ class MainPipe(implicit p: Parameters) extends DCacheModule with HasPerfEvents {
io.wb.bits.delay_release := s3_req_replace_dup_for_wb_valid
io.wb.bits.miss_id := s3_req.miss_id
io.replace_access.valid := RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe)
io.replace_access.bits.set := s2_idx_dup_for_replace_access
io.replace_access.bits.way := RegNext(OHToUInt(s1_way_en))
// update plru in main pipe s3
if (!cfg.updateReplaceOn2ndmiss) {
// replacement is only updated on 1st miss
io.replace_access.valid := RegNext(
// generated in mainpipe s1
RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe) &&
// generated in mainpipe s2
Mux(
io.miss_req.valid,
!io.miss_resp.merged && io.miss_req.ready, // if store miss, only update plru for the first miss
true.B // normal store access
)
)
io.replace_access.bits.set := RegNext(s2_idx_dup_for_replace_access)
io.replace_access.bits.way := RegNext(RegNext(OHToUInt(s1_way_en)))
} else {
// replacement is updated on both 1st and 2nd miss
// timing is worse than !cfg.updateReplaceOn2ndmiss
io.replace_access.valid := RegNext(
// generated in mainpipe s1
RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe) &&
// generated in mainpipe s2
Mux(
io.miss_req.valid,
io.miss_req.ready, // if store miss, do not update plru if that req needs to be replayed
true.B // normal store access
)
)
io.replace_access.bits.set := RegNext(s2_idx_dup_for_replace_access)
io.replace_access.bits.way := RegNext(
Mux(
io.miss_req.valid && io.miss_resp.merged,
// miss queue 2nd fire: access replace way selected at miss queue allocate time
OHToUInt(io.miss_resp.repl_way_en),
// new selected replace way or hit way
RegNext(OHToUInt(s1_way_en))
)
)
}
io.replace_way.set.valid := RegNext(s0_fire)
io.replace_way.set.bits := s1_idx_dup_for_replace_way
......
......@@ -116,6 +116,10 @@ class MissReq(implicit p: Parameters) extends MissReqWoStoreData {
class MissResp(implicit p: Parameters) extends DCacheBundle {
val id = UInt(log2Up(cfg.nMissEntries).W)
// cache req missed, merged into one of miss queue entries
// i.e. !miss_merged means this access is the first miss for this cacheline
val merged = Bool()
val repl_way_en = UInt(DCacheWays.W)
}
class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
......@@ -136,14 +140,17 @@ class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
val secondary_ready = Output(Bool())
// this entry is busy and it can not merge the new req
val secondary_reject = Output(Bool())
val refill_to_ldq = ValidIO(new Refill)
// way selected for replacing, used to support plru update
val repl_way_en = Output(UInt(DCacheWays.W))
// bus
val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
// send refill info to load queue
val refill_to_ldq = ValidIO(new Refill)
// refill pipe
val refill_pipe_req = DecoupledIO(new RefillPipeReq)
val refill_pipe_resp = Input(Bool())
......@@ -420,6 +427,7 @@ class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
io.primary_ready := !req_valid
io.secondary_ready := should_merge(io.req.bits)
io.secondary_reject := should_reject(io.req.bits)
io.repl_way_en := req.way_en
// should not allocate, merge or reject at the same time
assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U))
......@@ -635,6 +643,8 @@ class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule wi
val req_handled_vec = entries.map(_.io.req_handled_by_this_entry)
assert(PopCount(req_handled_vec) <= 1.U, "Only one mshr can handle a req")
io.resp.id := OHToUInt(req_handled_vec)
io.resp.merged := merge
io.resp.repl_way_en := Mux1H(secondary_ready_vec, entries.map(_.io.repl_way_en))
val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo))
(0 until LoadPipelineWidth).map(i => {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册