未验证 提交 68697cf6 编写于 作者: Y Yinan Xu 提交者: GitHub

Merge pull request #420 from RISCVERS/opt-lq

LoadQueueData: use separate data module
......@@ -16,210 +16,11 @@ class ExceptionAddrIO extends XSBundle {
val vaddr = Output(UInt(VAddrBits.W))
}
class LsqEntry extends XSBundle {
val vaddr = UInt(VAddrBits.W) // TODO: need opt
val paddr = UInt(PAddrBits.W)
val mask = UInt(8.W)
val data = UInt(XLEN.W)
val exception = UInt(16.W) // TODO: opt size
val fwdMask = Vec(8, Bool())
val fwdData = Vec(8, UInt(8.W))
}
class FwdEntry extends XSBundle {
val mask = Vec(8, Bool())
val data = Vec(8, UInt(8.W))
}
class LSQueueData(size: Int, nchannel: Int) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
val io = IO(new Bundle() {
val wb = Vec(nchannel, new Bundle() {
val wen = Input(Bool())
val index = Input(UInt(log2Up(size).W))
val wdata = Input(new LsqEntry)
})
val uncache = new Bundle() {
val wen = Input(Bool())
val index = Input(UInt(log2Up(size).W))
val wdata = Input(UInt(XLEN.W))
}
val refill = new Bundle() {
val wen = Input(Vec(size, Bool()))
val data = Input(UInt((cfg.blockBytes * 8).W))
}
val needForward = Input(Vec(nchannel, Vec(2, UInt(size.W))))
val forward = Vec(nchannel, Flipped(new LoadForwardQueryIO))
val rdata = Output(Vec(size, new LsqEntry))
// val debug = new Bundle() {
// val debug_data = Vec(LoadQueueSize, new LsqEntry)
// }
def wbWrite(channel: Int, index: UInt, wdata: LsqEntry): Unit = {
require(channel < nchannel && channel >= 0)
// need extra "this.wb(channel).wen := true.B"
this.wb(channel).index := index
this.wb(channel).wdata := wdata
}
def uncacheWrite(index: UInt, wdata: UInt): Unit = {
// need extra "this.uncache.wen := true.B"
this.uncache.index := index
this.uncache.wdata := wdata
}
def forwardQuery(channel: Int, paddr: UInt, needForward1: Data, needForward2: Data): Unit = {
this.needForward(channel)(0) := needForward1
this.needForward(channel)(1) := needForward2
this.forward(channel).paddr := paddr
}
// def refillWrite(ldIdx: Int): Unit = {
// }
// use "this.refill.wen(ldIdx) := true.B" instead
})
io := DontCare
val data = Reg(Vec(size, new LsqEntry))
// writeback to lq/sq
(0 until 2).map(i => {
when(io.wb(i).wen){
data(io.wb(i).index) := io.wb(i).wdata
}
})
when(io.uncache.wen){
data(io.uncache.index).data := io.uncache.wdata
}
// refill missed load
def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
val res = Wire(Vec(8, UInt(8.W)))
(0 until 8).foreach(i => {
res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
})
res.asUInt
}
// split dcache result into words
val words = VecInit((0 until blockWords) map { i => io.refill.data(DataBits * (i + 1) - 1, DataBits * i)})
(0 until size).map(i => {
when(io.refill.wen(i) ){
val refillData = words(get_word(data(i).paddr))
data(i).data := mergeRefillData(refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
XSDebug("miss resp: pos %d addr %x data %x + %x(%b)\n", i.U, data(i).paddr, refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
}
})
// forwarding
// Compare ringBufferTail (deqPtr) and forward.sqIdx, we have two cases:
// (1) if they have the same flag, we need to check range(tail, sqIdx)
// (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx)
// Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize))
// Forward2: Mux(same_flag, 0.U, range(0, sqIdx) )
// i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
// entry with larger index should have higher priority since it's data is younger
// FIXME: old fwd logic for assertion, remove when rtl freeze
(0 until nchannel).map(i => {
val forwardMask1 = WireInit(VecInit(Seq.fill(8)(false.B)))
val forwardData1 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
val forwardMask2 = WireInit(VecInit(Seq.fill(8)(false.B)))
val forwardData2 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
for (j <- 0 until size) {
val needCheck = io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
(0 until XLEN / 8).foreach(k => {
when (needCheck && data(j).mask(k)) {
when (io.needForward(i)(0)(j)) {
forwardMask1(k) := true.B
forwardData1(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
}
when (io.needForward(i)(1)(j)) {
forwardMask2(k) := true.B
forwardData2(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
}
XSDebug(io.needForward(i)(0)(j) || io.needForward(i)(1)(j),
p"forwarding $k-th byte ${Hexadecimal(data(j).data(8 * (k + 1) - 1, 8 * k))} " +
p"from ptr $j\n")
}
})
}
// merge forward lookup results
// forward2 is younger than forward1 and should have higher priority
val oldFwdResult = Wire(new FwdEntry)
(0 until XLEN / 8).map(k => {
oldFwdResult.mask(k) := RegNext(forwardMask1(k) || forwardMask2(k))
oldFwdResult.data(k) := RegNext(Mux(forwardMask2(k), forwardData2(k), forwardData1(k)))
})
// parallel fwd logic
val paddrMatch = Wire(Vec(size, Bool()))
val matchResultVec = Wire(Vec(size * 2, new FwdEntry))
def parallelFwd(xs: Seq[Data]): Data = {
ParallelOperation(xs, (a: Data, b: Data) => {
val l = a.asTypeOf(new FwdEntry)
val r = b.asTypeOf(new FwdEntry)
val res = Wire(new FwdEntry)
(0 until 8).map(p => {
res.mask(p) := l.mask(p) || r.mask(p)
res.data(p) := Mux(r.mask(p), r.data(p), l.data(p))
})
res
})
}
for (j <- 0 until size) {
paddrMatch(j) := io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
}
for (j <- 0 until size) {
val needCheck0 = RegNext(paddrMatch(j) && io.needForward(i)(0)(j))
val needCheck1 = RegNext(paddrMatch(j) && io.needForward(i)(1)(j))
(0 until XLEN / 8).foreach(k => {
matchResultVec(j).mask(k) := needCheck0 && data(j).mask(k)
matchResultVec(j).data(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
matchResultVec(size + j).mask(k) := needCheck1 && data(j).mask(k)
matchResultVec(size + j).data(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
})
}
val parallelFwdResult = parallelFwd(matchResultVec).asTypeOf(new FwdEntry)
io.forward(i).forwardMask := parallelFwdResult.mask
io.forward(i).forwardData := parallelFwdResult.data
when(
oldFwdResult.mask.asUInt =/= parallelFwdResult.mask.asUInt
){
printf("%d: mask error: right: %b false %b\n", GTimer(), oldFwdResult.mask.asUInt, parallelFwdResult.mask.asUInt)
}
for (p <- 0 until 8) {
when(
oldFwdResult.data(p) =/= parallelFwdResult.data(p) && oldFwdResult.mask(p)
){
printf("%d: data "+p+" error: right: %x false %x\n", GTimer(), oldFwdResult.data(p), parallelFwdResult.data(p))
}
}
})
// data read
io.rdata := data
// io.debug.debug_data := data
}
// inflight miss block reqs
class InflightBlockInfo extends XSBundle {
val block_addr = UInt(PAddrBits.W)
......
......@@ -76,8 +76,10 @@ class LoadQueue extends XSModule
val uop = Reg(Vec(LoadQueueSize, new MicroOp))
// val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
dataModule.io := DontCare
val vaddrModule = Module(new AsyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
vaddrModule.io := DontCare
val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
......@@ -144,7 +146,8 @@ class LoadQueue extends XSModule
* After cache refills, it will write back through arbiter with loadUnit.
*/
for (i <- 0 until LoadPipelineWidth) {
dataModule.io.wb(i).wen := false.B
dataModule.io.wb.wen(i) := false.B
vaddrModule.io.wen(i) := false.B
when(io.loadIn(i).fire()) {
when(io.loadIn(i).bits.miss) {
XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
......@@ -179,16 +182,18 @@ class LoadQueue extends XSModule
datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
val loadWbData = Wire(new LsqEntry)
val loadWbData = Wire(new LQDataEntry)
loadWbData.paddr := io.loadIn(i).bits.paddr
loadWbData.vaddr := io.loadIn(i).bits.vaddr
loadWbData.mask := io.loadIn(i).bits.mask
loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
loadWbData.data := io.loadIn(i).bits.data // fwd data
loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
loadWbData.fwdData := io.loadIn(i).bits.forwardData
loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
dataModule.io.wb(i).wen := true.B
dataModule.io.wb.wen(i) := true.B
vaddrModule.io.waddr(i) := loadWbIndex
vaddrModule.io.wdata(i) := io.loadIn(i).bits.vaddr
vaddrModule.io.wen(i) := true.B
debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
......@@ -270,13 +275,13 @@ class LoadQueue extends XSModule
// Refill 64 bit in a cycle
// Refill data comes back from io.dcache.resp
dataModule.io.refill.valid := io.dcache.valid
dataModule.io.refill.paddr := io.dcache.bits.addr
dataModule.io.refill.data := io.dcache.bits.data
(0 until LoadQueueSize).map(i => {
val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === get_block_addr(io.dcache.bits.addr)
dataModule.io.refill.wen(i) := false.B
when(allocated(i) && miss(i) && blockMatch && io.dcache.valid) {
dataModule.io.refill.wen(i) := true.B
dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
datavalid(i) := true.B
miss(i) := false.B
}
......@@ -290,7 +295,7 @@ class LoadQueue extends XSModule
// Stage 0
// Generate writeback indexes
val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
allocated(i) && datavalid(i) && !writebacked(i)
allocated(i) && !writebacked(i) && datavalid(i)
})).asUInt() // use uint instead vec to reduce verilog lines
val loadEvenSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i)}))
val loadOddSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i+1)}))
......@@ -329,10 +334,11 @@ class LoadQueue extends XSModule
// writeback data to cdb
(0 until LoadPipelineWidth).map(i => {
// data select
val rdata = dataModule.io.rdata(loadWbSel(i)).data
dataModule.io.wb.raddr(i) := loadWbSel(i)
val rdata = dataModule.io.wb.rdata(i).data
val seluop = uop(loadWbSel(i))
val func = seluop.ctrl.fuOpType
val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
val raddr = dataModule.io.wb.rdata(i).paddr
val rdataSel = LookupTree(raddr(2, 0), List(
"b000".U -> rdata(63, 0),
"b001".U -> rdata(63, 8),
......@@ -349,7 +355,7 @@ class LoadQueue extends XSModule
//
// Int load writeback will finish (if not blocked) in one cycle
io.ldout(i).bits.uop := seluop
io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.wb.rdata(i).exception.asBools
io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
io.ldout(i).bits.data := rdataPartialLoad
io.ldout(i).bits.redirectValid := false.B
......@@ -364,8 +370,8 @@ class LoadQueue extends XSModule
io.ldout(i).bits.uop.roqIdx.asUInt,
io.ldout(i).bits.uop.lqIdx.asUInt,
io.ldout(i).bits.uop.cf.pc,
dataModule.io.rdata(loadWbSel(i)).paddr,
dataModule.io.rdata(loadWbSel(i)).data,
dataModule.io.debug(loadWbSel(i)).paddr,
dataModule.io.debug(loadWbSel(i)).data,
debug_mmio(loadWbSel(i))
)
}
......@@ -433,18 +439,14 @@ class LoadQueue extends XSModule
val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
// check if load already in lq needs to be rolledback
val addrMatch = RegNext(VecInit((0 until LoadQueueSize).map(j => {
io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
})))
dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
})))
val overlap = RegNext(VecInit((0 until LoadQueueSize).map(j => {
val overlapVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
Cat(overlapVec).orR()
})))
val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
addrMatch(j) && entryNeedCheck(j) && overlap(j)
addrMaskMatch(j) && entryNeedCheck(j)
}))
val lqViolation = lqViolationVec.asUInt().orR()
val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
......@@ -552,18 +554,20 @@ class LoadQueue extends XSModule
io.roqDeqPtr === uop(deqPtr).roqIdx &&
!io.commits.isWalk
dataModule.io.uncache.raddr := deqPtr
io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD
io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
io.uncache.req.bits.meta.id := DontCare
io.uncache.req.bits.meta.vaddr := DontCare
io.uncache.req.bits.meta.paddr := dataModule.io.rdata(deqPtr).paddr
io.uncache.req.bits.meta.paddr := dataModule.io.uncache.rdata.paddr
io.uncache.req.bits.meta.uop := uop(deqPtr)
io.uncache.req.bits.meta.mmio := true.B
io.uncache.req.bits.meta.tlb_miss := false.B
io.uncache.req.bits.meta.mask := dataModule.io.rdata(deqPtr).mask
io.uncache.req.bits.meta.mask := dataModule.io.uncache.rdata.mask
io.uncache.req.bits.meta.replay := false.B
io.uncache.resp.ready := true.B
......@@ -590,7 +594,8 @@ class LoadQueue extends XSModule
}
// Read vaddr for mem exception
io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
vaddrModule.io.raddr(0) := io.exceptionAddr.lsIdx.lqIdx.value
io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
// misprediction recovery / exception redirect
// invalidate lq term using robIdx
......@@ -646,7 +651,7 @@ class LoadQueue extends XSModule
for (i <- 0 until LoadQueueSize) {
if (i % 4 == 0) XSDebug("")
XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
PrintFlag(allocated(i), "a")
PrintFlag(allocated(i) && datavalid(i), "v")
PrintFlag(allocated(i) && writebacked(i), "w")
......
package xiangshan.mem
import chisel3._
import chisel3.util._
import utils._
import xiangshan._
import xiangshan.cache._
import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants}
import xiangshan.backend.LSUOpType
import xiangshan.mem._
import xiangshan.backend.roq.RoqPtr
class LQDataEntry extends XSBundle {
// val vaddr = UInt(VAddrBits.W)
val paddr = UInt(PAddrBits.W)
val mask = UInt(8.W)
val data = UInt(XLEN.W)
val exception = UInt(16.W) // TODO: opt size
val fwdMask = Vec(8, Bool())
}
// Data module define
// These data modules are like SyncDataModuleTemplate, but support cam-like ops
class PaddrModule(numEntries: Int, numRead: Int, numWrite: Int) extends XSModule with HasDCacheParameters {
val io = IO(new Bundle {
val raddr = Input(Vec(numRead, UInt(log2Up(numEntries).W)))
val rdata = Output(Vec(numRead, UInt((PAddrBits).W)))
val wen = Input(Vec(numWrite, Bool()))
val waddr = Input(Vec(numWrite, UInt(log2Up(numEntries).W)))
val wdata = Input(Vec(numWrite, UInt((PAddrBits).W)))
val violationMdata = Input(Vec(2, UInt((PAddrBits).W)))
val violationMmask = Output(Vec(2, Vec(numEntries, Bool())))
val refillMdata = Input(UInt((PAddrBits).W))
val refillMmask = Output(Vec(numEntries, Bool()))
})
val data = Reg(Vec(numEntries, UInt((PAddrBits).W)))
// read ports
for (i <- 0 until numRead) {
io.rdata(i) := data(io.raddr(i))
}
// below is the write ports (with priorities)
for (i <- 0 until numWrite) {
when (io.wen(i)) {
data(io.waddr(i)) := io.wdata(i)
}
}
// content addressed match
for (i <- 0 until 2) {
for (j <- 0 until numEntries) {
io.violationMmask(i)(j) := io.violationMdata(i)(PAddrBits-1, 3) === data(j)(PAddrBits-1, 3)
}
}
for (j <- 0 until numEntries) {
io.refillMmask(j) := get_block_addr(io.refillMdata) === get_block_addr(data(j))
}
// DataModuleTemplate should not be used when there're any write conflicts
for (i <- 0 until numWrite) {
for (j <- i+1 until numWrite) {
assert(!(io.wen(i) && io.wen(j) && io.waddr(i) === io.waddr(j)))
}
}
}
class MaskModule(numEntries: Int, numRead: Int, numWrite: Int) extends XSModule {
val io = IO(new Bundle {
val raddr = Input(Vec(numRead, UInt(log2Up(numEntries).W)))
val rdata = Output(Vec(numRead, UInt(8.W)))
val wen = Input(Vec(numWrite, Bool()))
val waddr = Input(Vec(numWrite, UInt(log2Up(numEntries).W)))
val wdata = Input(Vec(numWrite, UInt(8.W)))
val violationMdata = Input(Vec(2, UInt((PAddrBits).W)))
val violationMmask = Output(Vec(2, Vec(numEntries, Bool())))
})
val data = Reg(Vec(numEntries, UInt(8.W)))
// read ports
for (i <- 0 until numRead) {
io.rdata(i) := data(io.raddr(i))
}
// below is the write ports (with priorities)
for (i <- 0 until numWrite) {
when (io.wen(i)) {
data(io.waddr(i)) := io.wdata(i)
}
}
// content addressed match
for (i <- 0 until 2) {
for (j <- 0 until numEntries) {
io.violationMmask(i)(j) := (io.violationMdata(i) & data(j)).orR
}
}
// DataModuleTemplate should not be used when there're any write conflicts
for (i <- 0 until numWrite) {
for (j <- i+1 until numWrite) {
assert(!(io.wen(i) && io.wen(j) && io.waddr(i) === io.waddr(j)))
}
}
}
class CoredataModule(numEntries: Int, numRead: Int, numWrite: Int) extends XSModule with HasDCacheParameters {
val io = IO(new Bundle {
// data io
// read
val raddr = Input(Vec(numRead, UInt(log2Up(numEntries).W)))
val rdata = Output(Vec(numRead, UInt(XLEN.W)))
// address indexed write
val wen = Input(Vec(numWrite, Bool()))
val waddr = Input(Vec(numWrite, UInt(log2Up(numEntries).W)))
val wdata = Input(Vec(numWrite, UInt(XLEN.W)))
// masked write
val mwmask = Input(Vec(numEntries, Bool()))
val refillData = Input(UInt((cfg.blockBytes * 8).W))
// fwdMask io
val fwdMaskWdata = Input(Vec(numWrite, UInt(8.W)))
val fwdMaskWen = Input(Vec(numWrite, Bool()))
// fwdMaskWaddr = waddr
// paddr io
// 3 bits in paddr need to be stored in CoredataModule for refilling
val paddrWdata = Input(Vec(numWrite, UInt((PAddrBits).W)))
val paddrWen = Input(Vec(numWrite, Bool()))
})
val data = Reg(Vec(numEntries, UInt(XLEN.W)))
val fwdMask = Reg(Vec(numEntries, UInt(8.W)))
val wordIndex = Reg(Vec(numEntries, UInt((blockOffBits - wordOffBits).W)))
// read ports
for (i <- 0 until numRead) {
io.rdata(i) := data(io.raddr(i))
}
// below is the write ports (with priorities)
for (i <- 0 until numWrite) {
when (io.wen(i)) {
data(io.waddr(i)) := io.wdata(i)
}
when (io.fwdMaskWen(i)) {
fwdMask(io.waddr(i)) := io.fwdMaskWdata(i)
}
when (io.paddrWen(i)) {
wordIndex(io.waddr(i)) := get_word(io.paddrWdata(i))
}
}
// masked write
// refill missed load
def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
val res = Wire(Vec(8, UInt(8.W)))
(0 until 8).foreach(i => {
res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
})
res.asUInt
}
// split dcache result into words
val words = VecInit((0 until blockWords) map { i => io.refillData(DataBits * (i + 1) - 1, DataBits * i)})
// refill data according to matchMask, refillMask and refill.vald
for (j <- 0 until numEntries) {
when (io.mwmask(j)) {
val refillData = words(wordIndex(j)) // TODO
data(j) := mergeRefillData(refillData, data(j), fwdMask(j))
}
}
// DataModuleTemplate should not be used when there're any write conflicts
for (i <- 0 until numWrite) {
for (j <- i+1 until numWrite) {
assert(!(io.wen(i) && io.wen(j) && io.waddr(i) === io.waddr(j)))
}
}
}
class LoadQueueData(size: Int, wbNumRead: Int, wbNumWrite: Int) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
val io = IO(new Bundle() {
val wb = new Bundle() {
val wen = Vec(wbNumWrite, Input(Bool()))
val waddr = Input(Vec(wbNumWrite, UInt(log2Up(size).W)))
val wdata = Input(Vec(wbNumWrite, new LQDataEntry))
val raddr = Input(Vec(wbNumRead, UInt(log2Up(size).W)))
val rdata = Output(Vec(wbNumRead, new LQDataEntry))
}
val uncache = new Bundle() {
val wen = Input(Bool())
val waddr = Input(UInt(log2Up(size).W))
val wdata = Input(UInt(XLEN.W)) // only write back uncache data
val raddr = Input(UInt(log2Up(size).W))
val rdata = Output(new LQDataEntry)
}
val refill = new Bundle() {
val valid = Input(Bool())
val paddr = Input(UInt(PAddrBits.W))
val data = Input(UInt((cfg.blockBytes * 8).W))
val refillMask = Input(Vec(size, Bool()))
val matchMask = Output(Vec(size, Bool()))
}
val violation = Vec(StorePipelineWidth, new Bundle() {
val paddr = Input(UInt(PAddrBits.W))
val mask = Input(UInt(8.W))
val violationMask = Output(Vec(size, Bool()))
})
val debug = Output(Vec(size, new LQDataEntry))
def wbWrite(channel: Int, waddr: UInt, wdata: LQDataEntry): Unit = {
require(channel < wbNumWrite && wbNumWrite >= 0)
// need extra "this.wb(channel).wen := true.B"
this.wb.waddr(channel) := waddr
this.wb.wdata(channel) := wdata
}
def uncacheWrite(waddr: UInt, wdata: UInt): Unit = {
// need extra "this.uncache.wen := true.B"
this.uncache.waddr := waddr
this.uncache.wdata := wdata
}
// def refillWrite(ldIdx: Int): Unit = {
// }
// use "this.refill.wen(ldIdx) := true.B" instead
})
// val data = Reg(Vec(size, new LQDataEntry))
// data module
val paddrModule = Module(new PaddrModule(size, numRead = 3, numWrite = 2))
val maskModule = Module(new MaskModule(size, numRead = 3, numWrite = 2))
val exceptionModule = Module(new AsyncDataModuleTemplate(UInt(16.W), size, numRead = 3, numWrite = 2))
val coredataModule = Module(new CoredataModule(size, numRead = 3, numWrite = 3))
// read data
// read port 0 -> wbNumRead-1
(0 until wbNumRead).map(i => {
paddrModule.io.raddr(i) := io.wb.raddr(i)
maskModule.io.raddr(i) := io.wb.raddr(i)
exceptionModule.io.raddr(i) := io.wb.raddr(i)
coredataModule.io.raddr(i) := io.wb.raddr(i)
io.wb.rdata(i).paddr := paddrModule.io.rdata(i)
io.wb.rdata(i).mask := maskModule.io.rdata(i)
io.wb.rdata(i).data := coredataModule.io.rdata(i)
io.wb.rdata(i).exception := exceptionModule.io.rdata(i)
io.wb.rdata(i).fwdMask := DontCare
})
// read port wbNumRead
paddrModule.io.raddr(wbNumRead) := io.uncache.raddr
maskModule.io.raddr(wbNumRead) := io.uncache.raddr
exceptionModule.io.raddr(wbNumRead) := io.uncache.raddr
coredataModule.io.raddr(wbNumRead) := io.uncache.raddr
io.uncache.rdata.paddr := paddrModule.io.rdata(wbNumRead)
io.uncache.rdata.mask := maskModule.io.rdata(wbNumRead)
io.uncache.rdata.data := exceptionModule.io.rdata(wbNumRead)
io.uncache.rdata.exception := coredataModule.io.rdata(wbNumRead)
io.uncache.rdata.fwdMask := DontCare
// write data
// write port 0 -> wbNumWrite-1
(0 until wbNumWrite).map(i => {
paddrModule.io.wen(i) := false.B
maskModule.io.wen(i) := false.B
exceptionModule.io.wen(i) := false.B
coredataModule.io.wen(i) := false.B
coredataModule.io.fwdMaskWen(i) := false.B
coredataModule.io.paddrWen(i) := false.B
paddrModule.io.waddr(i) := io.wb.waddr(i)
maskModule.io.waddr(i) := io.wb.waddr(i)
exceptionModule.io.waddr(i) := io.wb.waddr(i)
coredataModule.io.waddr(i) := io.wb.waddr(i)
paddrModule.io.wdata(i) := io.wb.wdata(i).paddr
maskModule.io.wdata(i) := io.wb.wdata(i).mask
exceptionModule.io.wdata(i) := io.wb.wdata(i).exception
coredataModule.io.wdata(i) := io.wb.wdata(i).data
coredataModule.io.fwdMaskWdata(i) := io.wb.wdata(i).fwdMask.asUInt
coredataModule.io.paddrWdata(i) := io.wb.wdata(i).paddr
when(io.wb.wen(i)){
paddrModule.io.wen(i) := true.B
maskModule.io.wen(i) := true.B
exceptionModule.io.wen(i) := true.B
coredataModule.io.wen(i) := true.B
coredataModule.io.fwdMaskWen(i) := true.B
coredataModule.io.paddrWen(i) := true.B
}
})
// write port wbNumWrite
// exceptionModule.io.wen(wbNumWrite) := false.B
coredataModule.io.wen(wbNumWrite) := io.uncache.wen
coredataModule.io.fwdMaskWen(wbNumWrite) := false.B
coredataModule.io.paddrWen(wbNumWrite) := false.B
coredataModule.io.waddr(wbNumWrite) := io.uncache.waddr
coredataModule.io.fwdMaskWdata(wbNumWrite) := DontCare
coredataModule.io.paddrWdata(wbNumWrite) := DontCare
coredataModule.io.wdata(wbNumWrite) := io.uncache.wdata
// mem access violation check, gen violationMask
(0 until StorePipelineWidth).map(i => {
paddrModule.io.violationMdata(i) := io.violation(i).paddr
maskModule.io.violationMdata(i) := io.violation(i).mask
io.violation(i).violationMask := (paddrModule.io.violationMmask(i).asUInt & maskModule.io.violationMmask(i).asUInt).asBools
// VecInit((0 until size).map(j => {
// val addrMatch = io.violation(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
// val violationVec = (0 until 8).map(k => data(j).mask(k) && io.violation(i).mask(k))
// Cat(violationVec).orR() && addrMatch
// }))
})
// refill missed load
def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
val res = Wire(Vec(8, UInt(8.W)))
(0 until 8).foreach(i => {
res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
})
res.asUInt
}
// gen paddr match mask
paddrModule.io.refillMdata := io.refill.paddr
(0 until size).map(i => {
io.refill.matchMask := paddrModule.io.refillMmask
// io.refill.matchMask(i) := get_block_addr(data(i).paddr) === get_block_addr(io.refill.paddr)
})
// refill data according to matchMask, refillMask and refill.valid
coredataModule.io.refillData := io.refill.data
(0 until size).map(i => {
coredataModule.io.mwmask(i) := io.refill.valid && io.refill.matchMask(i) && io.refill.refillMask(i)
})
// debug data read
io.debug := DontCare
}
......@@ -132,7 +132,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
writebacked(stWbIndex) := hasWritebacked
pending(stWbIndex) := !hasWritebacked // valid mmio require
val storeWbData = Wire(new LsqEntry)
val storeWbData = Wire(new SQDataEntry)
storeWbData := DontCare
storeWbData.paddr := io.storeIn(i).bits.paddr
storeWbData.mask := io.storeIn(i).bits.mask
......@@ -322,7 +322,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
}
// Read vaddr for mem exception
io.exceptionAddr.vaddr := exceptionModule.io.rdata(0)
io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
// misprediction recovery / exception redirect
// invalidate sq term using robIdx
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册