提交 86878c0c 编写于 作者: Y Yinan Xu

Merge remote-tracking branch 'origin/opt-mem-timing-merge' into debian-gogogo

Subproject commit 3d6bdf10d7b740588130e3056c8fd29f4175cadb
Subproject commit 0d800d4b9616bfffc786c6bd676f96ada631b0a3
......@@ -20,7 +20,6 @@ class ExceptionAddrIO extends XSBundle {
class LsqEntry extends XSBundle {
val vaddr = UInt(VAddrBits.W) // TODO: need opt
val paddr = UInt(PAddrBits.W)
val op = UInt(6.W)
val mask = UInt(8.W)
val data = UInt(XLEN.W)
val exception = UInt(16.W) // TODO: opt size
......@@ -29,6 +28,141 @@ class LsqEntry extends XSBundle {
val fwdData = Vec(8, UInt(8.W))
}
class LSQueueData(size: Int, nchannel: Int) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
val io = IO(new Bundle() {
val wb = Vec(nchannel, new Bundle() {
val wen = Input(Bool())
val index = Input(UInt(log2Up(size).W))
val wdata = Input(new LsqEntry)
})
val uncache = new Bundle() {
val wen = Input(Bool())
val index = Input(UInt(log2Up(size).W))
val wdata = Input(UInt(XLEN.W))
}
val refill = new Bundle() {
val wen = Input(Vec(size, Bool()))
val dcache = Input(new DCacheLineResp)
}
val needForward = Input(Vec(nchannel, Vec(2, UInt(size.W))))
val forward = Vec(nchannel, Flipped(new LoadForwardQueryIO))
val rdata = Output(Vec(size, new LsqEntry))
// val debug = new Bundle() {
// val debug_data = Vec(LoadQueueSize, new LsqEntry)
// }
def wbWrite(channel: Int, index: UInt, wdata: LsqEntry): Unit = {
require(channel < nchannel && channel >= 0)
// need extra "this.wb(channel).wen := true.B"
this.wb(channel).index := index
this.wb(channel).wdata := wdata
}
def uncacheWrite(index: UInt, wdata: UInt): Unit = {
// need extra "this.uncache.wen := true.B"
this.uncache.index := index
this.uncache.wdata := wdata
}
def forwardQuery(channel: Int, paddr: UInt, needForward1: Data, needForward2: Data): Unit = {
this.needForward(channel)(0) := needForward1
this.needForward(channel)(1) := needForward2
this.forward(channel).paddr := paddr
}
// def refillWrite(ldIdx: Int): Unit = {
// }
// use "this.refill.wen(ldIdx) := true.B" instead
})
io := DontCare
val data = Reg(Vec(size, new LsqEntry))
// writeback to lq/sq
(0 until 2).map(i => {
when(io.wb(i).wen){
data(io.wb(i).index) := io.wb(i).wdata
}
})
when(io.uncache.wen){
data(io.uncache.index).data := io.uncache.wdata
}
// refill missed load
def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
val res = Wire(Vec(8, UInt(8.W)))
(0 until 8).foreach(i => {
res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
})
res.asUInt
}
// split dcache result into words
val words = VecInit((0 until blockWords) map { i =>
io.refill.dcache.data(DataBits * (i + 1) - 1, DataBits * i)
})
(0 until size).map(i => {
when(io.refill.wen(i) ){
val refillData = words(get_word(data(i).paddr))
data(i).data := mergeRefillData(refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
XSDebug("miss resp: pos %d addr %x data %x + %x(%b)\n", i.U, data(i).paddr, refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
}
})
// forwarding
// Compare ringBufferTail (deqPtr) and forward.sqIdx, we have two cases:
// (1) if they have the same flag, we need to check range(tail, sqIdx)
// (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx)
// Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize))
// Forward2: Mux(same_flag, 0.U, range(0, sqIdx) )
// i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
// entry with larger index should have higher priority since it's data is younger
(0 until nchannel).map(i => {
val forwardMask1 = WireInit(VecInit(Seq.fill(8)(false.B)))
val forwardData1 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
val forwardMask2 = WireInit(VecInit(Seq.fill(8)(false.B)))
val forwardData2 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
for (j <- 0 until size) {
val needCheck = io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
(0 until XLEN / 8).foreach(k => {
when (needCheck && data(j).mask(k)) {
when (io.needForward(i)(0)(j)) {
forwardMask1(k) := true.B
forwardData1(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
}
when (io.needForward(i)(1)(j)) {
forwardMask2(k) := true.B
forwardData2(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
}
XSDebug(io.needForward(i)(0)(j) || io.needForward(i)(1)(j),
p"forwarding $k-th byte ${Hexadecimal(data(j).data(8 * (k + 1) - 1, 8 * k))} " +
p"from ptr $j\n")
}
})
}
// merge forward lookup results
// forward2 is younger than forward1 and should have higher priority
(0 until XLEN / 8).map(k => {
io.forward(i).forwardMask(k) := forwardMask1(k) || forwardMask2(k)
io.forward(i).forwardData(k) := Mux(forwardMask2(k), forwardData2(k), forwardData1(k))
})
})
// data read
io.rdata := data
// io.debug.debug_data := data
}
// inflight miss block reqs
class InflightBlockInfo extends XSBundle {
val block_addr = UInt(PAddrBits.W)
......@@ -134,11 +268,12 @@ class LsqWrappper extends XSModule with HasDCacheParameters {
// fix valid, allocate lq / sq index
(0 until RenameWidth).map(i => {
val isStore = CommitType.lsInstIsStore(io.dp1Req(i).bits.ctrl.commitType)
val prevCanIn = if (i == 0) true.B else Cat((0 until i).map(i => io.dp1Req(i).ready)).andR
loadQueue.io.dp1Req(i).valid := !isStore && io.dp1Req(i).valid && prevCanIn
storeQueue.io.dp1Req(i).valid := isStore && io.dp1Req(i).valid && prevCanIn
loadQueue.io.dp1Req(i).valid := !isStore && io.dp1Req(i).valid
storeQueue.io.dp1Req(i).valid := isStore && io.dp1Req(i).valid
loadQueue.io.lqIdxs(i) <> io.lsIdxs(i).lqIdx
storeQueue.io.sqIdxs(i) <> io.lsIdxs(i).sqIdx
loadQueue.io.lqReady <> storeQueue.io.lqReady
loadQueue.io.sqReady <> storeQueue.io.sqReady
io.dp1Req(i).ready := storeQueue.io.dp1Req(i).ready && loadQueue.io.dp1Req(i).ready
})
}
......@@ -28,6 +28,8 @@ object LqPtr extends HasXSParameter {
class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
val io = IO(new Bundle() {
val dp1Req = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp)))
val lqReady = Output(Vec(RenameWidth, Bool()))
val sqReady = Input(Vec(RenameWidth, Bool()))
val lqIdxs = Output(Vec(RenameWidth, new LqPtr)) // LSIdx will be assembled in LSQWrapper
val brqRedirect = Input(Valid(new Redirect))
val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
......@@ -44,7 +46,9 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
})
val uop = Reg(Vec(LoadQueueSize, new MicroOp))
val data = Reg(Vec(LoadQueueSize, new LsqEntry)) // FIXME: use LoadQueueEntry instead
// val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
dataModule.io := DontCare
val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
val valid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
......@@ -70,8 +74,6 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
val enqDeqMask1 = tailMask ^ headMask
val enqDeqMask = Mux(ringBufferSameFlag, enqDeqMask1, ~enqDeqMask1)
// TODO: misc arbitor
// Enqueue at dispatch
val emptyEntries = LoadQueueSize.U - distanceBetween(ringBufferHeadExtended, ringBufferTailExtended)
XSDebug("(ready, valid): ")
......@@ -88,10 +90,10 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
miss(index) := false.B
listening(index) := false.B
pending(index) := false.B
// data(index).bwdMask := 0.U(8.W).asBools
}
val numTryEnqueue = offset +& io.dp1Req(i).valid
io.dp1Req(i).ready := numTryEnqueue <= emptyEntries
io.lqReady(i) := numTryEnqueue <= emptyEntries
io.dp1Req(i).ready := io.lqReady(i) && io.sqReady(i)
io.lqIdxs(i) := lqIdx
XSDebug(false, true.B, "(%d, %d) ", io.dp1Req(i).ready, io.dp1Req(i).valid)
}
......@@ -105,6 +107,7 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
// writeback load
(0 until LoadPipelineWidth).map(i => {
dataModule.io.wb(i).wen := false.B
when(io.loadIn(i).fire()) {
when(io.loadIn(i).bits.miss) {
XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
......@@ -139,14 +142,19 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
valid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
// allocated(loadWbIndex) := io.loadIn(i).bits.miss // if hit, lq entry can be recycled
data(loadWbIndex).paddr := io.loadIn(i).bits.paddr
data(loadWbIndex).vaddr := io.loadIn(i).bits.vaddr
data(loadWbIndex).mask := io.loadIn(i).bits.mask
data(loadWbIndex).data := io.loadIn(i).bits.data // for mmio / misc / debug
data(loadWbIndex).mmio := io.loadIn(i).bits.mmio
data(loadWbIndex).fwdMask := io.loadIn(i).bits.forwardMask
data(loadWbIndex).fwdData := io.loadIn(i).bits.forwardData
data(loadWbIndex).exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
val loadWbData = Wire(new LsqEntry)
loadWbData.paddr := io.loadIn(i).bits.paddr
loadWbData.vaddr := io.loadIn(i).bits.vaddr
loadWbData.mask := io.loadIn(i).bits.mask
loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
loadWbData.mmio := io.loadIn(i).bits.mmio
loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
loadWbData.fwdData := io.loadIn(i).bits.forwardData
loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
dataModule.io.wb(i).wen := true.B
val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
miss(loadWbIndex) := dcacheMissed
listening(loadWbIndex) := dcacheMissed
......@@ -161,30 +169,30 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
val missRefillSelVec = VecInit(
(0 until LoadQueueSize).map{ i =>
val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(data(i).paddr)).reduce(_||_)
val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
allocated(i) && miss(i) && !inflight
})
val missRefillSel = getFirstOne(missRefillSelVec, tailMask)
val missRefillBlockAddr = get_block_addr(data(missRefillSel).paddr)
val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
io.dcache.req.valid := missRefillSelVec.asUInt.orR
io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
io.dcache.req.bits.addr := missRefillBlockAddr
io.dcache.req.bits.data := DontCare
io.dcache.req.bits.mask := DontCare
io.dcache.req.bits.meta.id := DontCare // TODO: // FIXME
io.dcache.req.bits.meta.vaddr := DontCare // data(missRefillSel).vaddr
io.dcache.req.bits.meta.id := DontCare
io.dcache.req.bits.meta.vaddr := DontCare // dataModule.io.rdata(missRefillSel).vaddr
io.dcache.req.bits.meta.paddr := missRefillBlockAddr
io.dcache.req.bits.meta.uop := uop(missRefillSel)
io.dcache.req.bits.meta.mmio := false.B // data(missRefillSel).mmio
io.dcache.req.bits.meta.mmio := false.B // dataModule.io.rdata(missRefillSel).mmio
io.dcache.req.bits.meta.tlb_miss := false.B
io.dcache.req.bits.meta.mask := DontCare
io.dcache.req.bits.meta.replay := false.B
io.dcache.resp.ready := true.B
assert(!(data(missRefillSel).mmio && io.dcache.req.valid))
assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
when(io.dcache.req.fire()) {
miss(missRefillSel) := false.B
......@@ -223,27 +231,15 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
// Refill 64 bit in a cycle
// Refill data comes back from io.dcache.resp
def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
val res = Wire(Vec(8, UInt(8.W)))
(0 until 8).foreach(i => {
res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
})
res.asUInt
}
dataModule.io.refill.dcache := io.dcache.resp.bits
(0 until LoadQueueSize).map(i => {
val blockMatch = get_block_addr(data(i).paddr) === io.dcache.resp.bits.meta.paddr
val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
dataModule.io.refill.wen(i) := false.B
when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
// split them into words
val words = VecInit((0 until blockWords) map { i =>
io.dcache.resp.bits.data(DataBits * (i + 1) - 1, DataBits * i)
})
val refillData = words(get_word(data(i).paddr))
data(i).data := mergeRefillData(refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
dataModule.io.refill.wen(i) := true.B
valid(i) := true.B
listening(i) := false.B
XSDebug("miss resp: pos %d addr %x data %x + %x(%b)\n", i.U, data(i).paddr, refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
}
})
......@@ -262,9 +258,9 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
loadWbSelV(1) := lselvec1.orR
(0 until StorePipelineWidth).map(i => {
// data select
val rdata = data(loadWbSel(i)).data
val rdata = dataModule.io.rdata(loadWbSel(i)).data
val func = uop(loadWbSel(i)).ctrl.fuOpType
val raddr = data(loadWbSel(i)).paddr
val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
val rdataSel = LookupTree(raddr(2, 0), List(
"b000".U -> rdata(63, 0),
"b001".U -> rdata(63, 8),
......@@ -286,13 +282,13 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
LSUOpType.flw -> boxF32ToF64(rdataSel(31, 0))
))
io.ldout(i).bits.uop := uop(loadWbSel(i))
io.ldout(i).bits.uop.cf.exceptionVec := data(loadWbSel(i)).exception.asBools
io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
io.ldout(i).bits.data := rdataPartialLoad
io.ldout(i).bits.redirectValid := false.B
io.ldout(i).bits.redirect := DontCare
io.ldout(i).bits.brUpdate := DontCare
io.ldout(i).bits.debug.isMMIO := data(loadWbSel(i)).mmio
io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio
io.ldout(i).bits.fflags := DontCare
io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
when(io.ldout(i).fire()) {
......@@ -301,9 +297,9 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
io.ldout(i).bits.uop.roqIdx.asUInt,
io.ldout(i).bits.uop.lqIdx.asUInt,
io.ldout(i).bits.uop.cf.pc,
data(loadWbSel(i)).paddr,
data(loadWbSel(i)).data,
data(loadWbSel(i)).mmio
dataModule.io.rdata(loadWbSel(i)).paddr,
dataModule.io.rdata(loadWbSel(i)).data,
dataModule.io.rdata(loadWbSel(i)).mmio
)
}
})
......@@ -391,10 +387,10 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
// check if load already in lq needs to be rolledback
val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
val addrMatch = allocated(j) &&
io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (valid(j) || listening(j) || miss(j))
// TODO: update refilled data
val violationVec = (0 until 8).map(k => data(j).mask(k) && io.storeIn(i).bits.mask(k))
val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
Cat(violationVec).orR() && entryNeedCheck
}))
val lqViolation = lqViolationVec.asUInt().orR()
......@@ -484,17 +480,17 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
!io.commits(0).bits.isWalk
io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD
io.uncache.req.bits.addr := data(ringBufferTail).paddr
io.uncache.req.bits.data := data(ringBufferTail).data
io.uncache.req.bits.mask := data(ringBufferTail).mask
io.uncache.req.bits.addr := dataModule.io.rdata(ringBufferTail).paddr
io.uncache.req.bits.data := dataModule.io.rdata(ringBufferTail).data
io.uncache.req.bits.mask := dataModule.io.rdata(ringBufferTail).mask
io.uncache.req.bits.meta.id := DontCare // TODO: // FIXME
io.uncache.req.bits.meta.vaddr := DontCare
io.uncache.req.bits.meta.paddr := data(ringBufferTail).paddr
io.uncache.req.bits.meta.paddr := dataModule.io.rdata(ringBufferTail).paddr
io.uncache.req.bits.meta.uop := uop(ringBufferTail)
io.uncache.req.bits.meta.mmio := true.B // data(ringBufferTail).mmio
io.uncache.req.bits.meta.mmio := true.B // dataModule.io.rdata(ringBufferTail).mmio
io.uncache.req.bits.meta.tlb_miss := false.B
io.uncache.req.bits.meta.mask := data(ringBufferTail).mask
io.uncache.req.bits.meta.mask := dataModule.io.rdata(ringBufferTail).mask
io.uncache.req.bits.meta.replay := false.B
io.uncache.resp.ready := true.B
......@@ -503,9 +499,11 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
pending(ringBufferTail) := false.B
}
dataModule.io.uncache.wen := false.B
when(io.uncache.resp.fire()){
valid(ringBufferTail) := true.B
data(ringBufferTail).data := io.uncache.resp.bits.data(XLEN-1, 0)
dataModule.io.uncacheWrite(ringBufferTail, io.uncache.resp.bits.data(XLEN-1, 0))
dataModule.io.uncache.wen := true.B
// TODO: write back exception info
}
......@@ -524,7 +522,7 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
}
// Read vaddr for mem exception
io.exceptionAddr.vaddr := data(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
// misprediction recovery / exception redirect
// invalidate lq term using robIdx
......@@ -565,7 +563,7 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
for (i <- 0 until LoadQueueSize) {
if (i % 4 == 0) XSDebug("")
XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, data(i).paddr)
XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
PrintFlag(allocated(i), "a")
PrintFlag(allocated(i) && valid(i), "v")
PrintFlag(allocated(i) && writebacked(i), "w")
......
......@@ -25,6 +25,8 @@ object SqPtr extends HasXSParameter {
class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
val io = IO(new Bundle() {
val dp1Req = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp)))
val lqReady = Input(Vec(RenameWidth, Bool()))
val sqReady = Output(Vec(RenameWidth, Bool()))
val sqIdxs = Output(Vec(RenameWidth, new SqPtr))
val brqRedirect = Input(Valid(new Redirect))
val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
......@@ -40,7 +42,9 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
})
val uop = Reg(Vec(StoreQueueSize, new MicroOp))
val data = Reg(Vec(StoreQueueSize, new LsqEntry)) // FIXME: use StoreQueueEntry instead
// val data = Reg(Vec(StoreQueueSize, new LsqEntry))
val dataModule = Module(new LSQueueData(StoreQueueSize, StorePipelineWidth))
dataModule.io := DontCare
val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated
val valid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // data is valid
val writebacked = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been writebacked to CDB
......@@ -84,10 +88,10 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
miss(index) := false.B
listening(index) := false.B
pending(index) := false.B
// data(index).bwdMask := 0.U(8.W).asBools
}
val numTryEnqueue = offset +& io.dp1Req(i).valid
io.dp1Req(i).ready := numTryEnqueue <= emptyEntries
io.sqReady(i) := numTryEnqueue <= emptyEntries
io.dp1Req(i).ready := io.lqReady(i) && io.sqReady(i)
io.sqIdxs(i) := sqIdx
XSDebug(false, true.B, "(%d, %d) ", io.dp1Req(i).ready, io.dp1Req(i).valid)
}
......@@ -101,17 +105,25 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
// writeback store
(0 until StorePipelineWidth).map(i => {
dataModule.io.wb(i).wen := false.B
when(io.storeIn(i).fire()) {
val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value
valid(stWbIndex) := !io.storeIn(i).bits.mmio
data(stWbIndex).paddr := io.storeIn(i).bits.paddr
data(stWbIndex).vaddr := io.storeIn(i).bits.vaddr
data(stWbIndex).mask := io.storeIn(i).bits.mask
data(stWbIndex).data := io.storeIn(i).bits.data
data(stWbIndex).mmio := io.storeIn(i).bits.mmio
data(stWbIndex).exception := io.storeIn(i).bits.uop.cf.exceptionVec.asUInt
miss(stWbIndex) := io.storeIn(i).bits.miss
pending(stWbIndex) := io.storeIn(i).bits.mmio
val storeWbData = Wire(new LsqEntry)
storeWbData := DontCare
storeWbData.paddr := io.storeIn(i).bits.paddr
storeWbData.vaddr := io.storeIn(i).bits.vaddr
storeWbData.mask := io.storeIn(i).bits.mask
storeWbData.data := io.storeIn(i).bits.data
storeWbData.mmio := io.storeIn(i).bits.mmio
storeWbData.exception := io.storeIn(i).bits.uop.cf.exceptionVec.asUInt
dataModule.io.wbWrite(i, stWbIndex, storeWbData)
dataModule.io.wb(i).wen := true.B
XSInfo("store write to sq idx %d pc 0x%x vaddr %x paddr %x data %x miss %x mmio %x roll %x exc %x\n",
io.storeIn(i).bits.uop.sqIdx.value,
io.storeIn(i).bits.uop.cf.pc,
......@@ -154,6 +166,24 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
(selValid, selVec)
}
def selectFirstTwoRoughly(valid: Vec[Bool]) = {
// TODO: do not select according to seq, just select 2 valid bit randomly
val firstSelVec = valid
val notFirstVec = Wire(Vec(valid.length, Bool()))
(0 until valid.length).map(i =>
notFirstVec(i) := (if(i != 0) { valid(i) || !notFirstVec(i) } else { false.B })
)
val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !notFirstVec(i)))
val selVec = Wire(Vec(2, UInt(log2Up(valid.length).W)))
val selValid = Wire(Vec(2, Bool()))
selVec(0) := PriorityEncoder(firstSelVec)
selVec(1) := PriorityEncoder(secondSelVec)
selValid(0) := Cat(firstSelVec).orR
selValid(1) := Cat(secondSelVec).orR
(selValid, selVec)
}
// select the last writebacked instruction
val validStoreVec = VecInit((0 until StoreQueueSize).map(i => !(allocated(i) && valid(i))))
val storeNotValid = SqPtr(false.B, getFirstOne(validStoreVec, tailMask))
......@@ -169,12 +199,12 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
(0 until StorePipelineWidth).map(i => {
io.stout(i).bits.uop := uop(storeWbSel(i))
io.stout(i).bits.uop.sqIdx := storeWbSel(i).asTypeOf(new SqPtr)
io.stout(i).bits.uop.cf.exceptionVec := data(storeWbSel(i)).exception.asBools
io.stout(i).bits.data := data(storeWbSel(i)).data
io.stout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(storeWbSel(i)).exception.asBools
io.stout(i).bits.data := dataModule.io.rdata(storeWbSel(i)).data
io.stout(i).bits.redirectValid := false.B
io.stout(i).bits.redirect := DontCare
io.stout(i).bits.brUpdate := DontCare
io.stout(i).bits.debug.isMMIO := data(storeWbSel(i)).mmio
io.stout(i).bits.debug.isMMIO := dataModule.io.rdata(storeWbSel(i)).mmio
io.stout(i).valid := storeWbSelVec(storeWbSel(i)) && storeWbValid(i)
when(io.stout(i).fire()) {
writebacked(storeWbSel(i)) := true.B
......@@ -204,83 +234,107 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
// Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize))
// Forward2: Mux(same_flag, 0.U, range(0, sqIdx) )
// i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
val forwardMask1 = WireInit(VecInit(Seq.fill(8)(false.B)))
val forwardData1 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
val forwardMask2 = WireInit(VecInit(Seq.fill(8)(false.B)))
val forwardData2 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
val differentFlag = ringBufferTailExtended.flag =/= io.forward(i).sqIdx.flag
val forwardMask = ((1.U((StoreQueueSize + 1).W)) << io.forward(i).sqIdx.value).asUInt - 1.U
val needForward1 = Mux(differentFlag, ~tailMask, tailMask ^ forwardMask)
val needForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W))
val storeWritebackedVec = WireInit(VecInit(Seq.fill(StoreQueueSize)(false.B)))
for (j <- 0 until StoreQueueSize) {
storeWritebackedVec(j) := valid(j) && allocated(j) // all valid terms need to be checked
}
val needForward1 = Mux(differentFlag, ~tailMask, tailMask ^ forwardMask) & storeWritebackedVec.asUInt
val needForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) & storeWritebackedVec.asUInt
XSDebug("" + i + " f1 %b f2 %b sqIdx %d pa %x\n", needForward1, needForward2, io.forward(i).sqIdx.asUInt, io.forward(i).paddr)
// entry with larger index should have higher priority since it's data is younger
for (j <- 0 until StoreQueueSize) {
val needCheck = valid(j) && allocated(j) && // all valid terms need to be checked
io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
(0 until XLEN / 8).foreach(k => {
when (needCheck && data(j).mask(k)) {
when (needForward1(j)) {
forwardMask1(k) := true.B
forwardData1(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
}
when (needForward2(j)) {
forwardMask2(k) := true.B
forwardData2(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
}
XSDebug(needForward1(j) || needForward2(j),
p"forwarding $k-th byte ${Hexadecimal(data(j).data(8 * (k + 1) - 1, 8 * k))} " +
p"from ptr $j pc ${Hexadecimal(uop(j).cf.pc)}\n")
}
})
}
// do real fwd query
dataModule.io.forwardQuery(
channel = i,
paddr = io.forward(i).paddr,
needForward1 = needForward1,
needForward2 = needForward2
)
// merge forward lookup results
// forward2 is younger than forward1 and should have higher priority
(0 until XLEN / 8).map(k => {
io.forward(i).forwardMask(k) := forwardMask1(k) || forwardMask2(k)
io.forward(i).forwardData(k) := Mux(forwardMask2(k), forwardData2(k), forwardData1(k))
})
io.forward(i).forwardMask := dataModule.io.forward(i).forwardMask
io.forward(i).forwardData := dataModule.io.forward(i).forwardData
})
// CommitedStoreQueue for timing opt
// send commited store inst to sbuffer
// select up to 2 writebacked store insts
val commitedStoreQueue = Module(new MIMOQueue(
UInt(log2Up(StoreQueueSize).W),
entries = 64, //FIXME
inCnt = 6,
outCnt = 2,
mem = false,
perf = true
))
commitedStoreQueue.io.flush := false.B
// When store commited, mark it as commited (will not be influenced by redirect),
// then add store's sq ptr into commitedStoreQueue
(0 until CommitWidth).map(i => {
when(storeCommit(i)) {
commited(mcommitIdx(i)) := true.B
XSDebug("store commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
}
commitedStoreQueue.io.enq(i).valid := storeCommit(i)
commitedStoreQueue.io.enq(i).bits := mcommitIdx(i)
// We assume commitedStoreQueue.io.enq(i).ready === true.B,
// for commitedStoreQueue.size = 64
})
val storeCommitSelVec = VecInit((0 until StoreQueueSize).map(i => {
allocated(i) && commited(i)
}))
val (storeCommitValid, storeCommitSel) = selectFirstTwo(storeCommitSelVec, tailMask)
// get no more than 2 commited store from storeCommitedQueue
// send selected store inst to sbuffer
class SbufferCandidateEntry extends XSBundle{
val sbuffer = new DCacheWordReq
val sqIdx = UInt(log2Up(StoreQueueSize).W)
}
val ensbufferCandidateQueue = Module(new MIMOQueue(
new SbufferCandidateEntry,
entries = 2,
inCnt = 2,
outCnt = 2,
mem = false,
perf = true
))
ensbufferCandidateQueue.io.flush := false.B
val sbufferCandidate = Wire(Vec(2, Decoupled(new SbufferCandidateEntry)))
(0 until 2).map(i => {
val ptr = storeCommitSel(i)
val mmio = data(ptr).mmio
io.sbuffer(i).valid := storeCommitValid(i) && !mmio
io.sbuffer(i).bits.cmd := MemoryOpConstants.M_XWR
io.sbuffer(i).bits.addr := data(ptr).paddr
io.sbuffer(i).bits.data := data(ptr).data
io.sbuffer(i).bits.mask := data(ptr).mask
io.sbuffer(i).bits.meta := DontCare
io.sbuffer(i).bits.meta.tlb_miss := false.B
io.sbuffer(i).bits.meta.uop := uop(ptr)
io.sbuffer(i).bits.meta.mmio := mmio
io.sbuffer(i).bits.meta.mask := data(ptr).mask
val ptr = commitedStoreQueue.io.deq(i).bits
val mmio = dataModule.io.rdata(ptr).mmio
sbufferCandidate(i).valid := commitedStoreQueue.io.deq(i).valid && !mmio
sbufferCandidate(i).bits.sqIdx := ptr
sbufferCandidate(i).bits.sbuffer.cmd := MemoryOpConstants.M_XWR
sbufferCandidate(i).bits.sbuffer.addr := dataModule.io.rdata(ptr).paddr
sbufferCandidate(i).bits.sbuffer.data := dataModule.io.rdata(ptr).data
sbufferCandidate(i).bits.sbuffer.mask := dataModule.io.rdata(ptr).mask
sbufferCandidate(i).bits.sbuffer.meta := DontCare
sbufferCandidate(i).bits.sbuffer.meta.tlb_miss := false.B
sbufferCandidate(i).bits.sbuffer.meta.uop := DontCare
sbufferCandidate(i).bits.sbuffer.meta.mmio := mmio
sbufferCandidate(i).bits.sbuffer.meta.mask := dataModule.io.rdata(ptr).mask
when(mmio && commitedStoreQueue.io.deq(i).valid) {
allocated(ptr) := false.B
}
commitedStoreQueue.io.deq(i).ready := sbufferCandidate(i).fire() || mmio
sbufferCandidate(i).ready := ensbufferCandidateQueue.io.enq(i).ready
ensbufferCandidateQueue.io.enq(i).valid := sbufferCandidate(i).valid
ensbufferCandidateQueue.io.enq(i).bits.sqIdx := sbufferCandidate(i).bits.sqIdx
ensbufferCandidateQueue.io.enq(i).bits.sbuffer := sbufferCandidate(i).bits.sbuffer
XSDebug(io.sbuffer(i).fire(), "[SBUFFER STORE REQ] pa %x data %x\n", data(ptr).paddr, data(ptr).data)
ensbufferCandidateQueue.io.deq(i).ready := io.sbuffer(i).fire()
io.sbuffer(i).valid := ensbufferCandidateQueue.io.deq(i).valid
io.sbuffer(i).bits := ensbufferCandidateQueue.io.deq(i).bits.sbuffer
// update sq meta if store inst is send to sbuffer
when(storeCommitValid(i) && (mmio || io.sbuffer(i).ready)) {
allocated(ptr) := false.B
when(ensbufferCandidateQueue.io.deq(i).valid && io.sbuffer(i).ready) {
allocated(ensbufferCandidateQueue.io.deq(i).bits.sqIdx) := false.B
}
})
// Memory mapped IO / other uncached operations
// setup misc mem access req
......@@ -292,17 +346,17 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
!io.commits(0).bits.isWalk
io.uncache.req.bits.cmd := MemoryOpConstants.M_XWR
io.uncache.req.bits.addr := data(ringBufferTail).paddr
io.uncache.req.bits.data := data(ringBufferTail).data
io.uncache.req.bits.mask := data(ringBufferTail).mask
io.uncache.req.bits.addr := dataModule.io.rdata(ringBufferTail).paddr
io.uncache.req.bits.data := dataModule.io.rdata(ringBufferTail).data
io.uncache.req.bits.mask := dataModule.io.rdata(ringBufferTail).mask
io.uncache.req.bits.meta.id := DontCare // TODO: // FIXME
io.uncache.req.bits.meta.vaddr := DontCare
io.uncache.req.bits.meta.paddr := data(ringBufferTail).paddr
io.uncache.req.bits.meta.paddr := dataModule.io.rdata(ringBufferTail).paddr
io.uncache.req.bits.meta.uop := uop(ringBufferTail)
io.uncache.req.bits.meta.mmio := true.B // data(ringBufferTail).mmio
io.uncache.req.bits.meta.mmio := true.B // dataModule.io.rdata(ringBufferTail).mmio
io.uncache.req.bits.meta.tlb_miss := false.B
io.uncache.req.bits.meta.mask := data(ringBufferTail).mask
io.uncache.req.bits.meta.mask := dataModule.io.rdata(ringBufferTail).mask
io.uncache.req.bits.meta.replay := false.B
io.uncache.resp.ready := true.B
......@@ -313,7 +367,6 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
when(io.uncache.resp.fire()){
valid(ringBufferTail) := true.B
data(ringBufferTail).data := io.uncache.resp.bits.data(XLEN-1, 0)
// TODO: write back exception info
}
......@@ -328,7 +381,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
}
// Read vaddr for mem exception
io.exceptionAddr.vaddr := data(io.exceptionAddr.lsIdx.sqIdx.value).vaddr
io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.sqIdx.value).vaddr
// misprediction recovery / exception redirect
// invalidate sq term using robIdx
......@@ -364,7 +417,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
for (i <- 0 until StoreQueueSize) {
if (i % 4 == 0) XSDebug("")
XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, data(i).paddr)
XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
PrintFlag(allocated(i), "a")
PrintFlag(allocated(i) && valid(i), "v")
PrintFlag(allocated(i) && writebacked(i), "w")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册