提交 e59f18af 编写于 作者: Y YikeZhou

Merge branch 'master' into decode-alt

......@@ -56,6 +56,7 @@ $(SIM_TOP_V): $(SCALA_FILE) $(TEST_FILE)
date -R
mill XiangShan.test.runMain $(SIMTOP) -X verilog -td $(@D) --full-stacktrace --output-file $(@F) $(SIM_ARGS)
sed -i '/module XSSimTop/,/endmodule/d' $(SIM_TOP_V)
sed -i -e 's/$$fatal/$$finish/g' $(SIM_TOP_V)
date -R
EMU_TOP = XSSimSoC
......
......@@ -44,11 +44,26 @@ class DummyCore()(implicit p: Parameters) extends LazyModule {
class XSSoc()(implicit p: Parameters) extends LazyModule with HasSoCParameter {
private val cores = Seq.fill(NumCores)(LazyModule(new XSCore()))
// CPU Cores
private val xs_core = Seq.fill(NumCores)(LazyModule(new XSCore()))
// only mem and extDev visible externally
val dma = AXI4IdentityNode()
val extDev = AXI4IdentityNode()
// L1 to L2 network
// -------------------------------------------------
private val l2_xbar = Seq.fill(NumCores)(TLXbar())
private val l2cache = Seq.fill(NumCores)(LazyModule(new InclusiveCache(
CacheParameters(
level = 2,
ways = L2NWays,
sets = L2NSets,
blockBytes = L2BlockSize,
beatBytes = L1BusWidth / 8, // beatBytes = l1BusDataWidth / 8
cacheName = s"L2"
),
InclusiveCacheMicroParameters(
writeBytes = 8
)
)))
// L2 to L3 network
// -------------------------------------------------
......@@ -69,7 +84,26 @@ class XSSoc()(implicit p: Parameters) extends LazyModule with HasSoCParameter {
)
)))
cores.foreach(core => l3_xbar := TLBuffer() := DebugIdentityNode() := core.mem)
// L3 to memory network
// -------------------------------------------------
private val memory_xbar = TLXbar()
private val mmioXbar = TLXbar()
// only mem, dma and extDev are visible externally
val mem = Seq.fill(L3NBanks)(AXI4IdentityNode())
val dma = AXI4IdentityNode()
val extDev = AXI4IdentityNode()
// connections
// -------------------------------------------------
for (i <- 0 until NumCores) {
l2_xbar(i) := TLBuffer() := DebugIdentityNode() := xs_core(i).dcache.clientNode
l2_xbar(i) := TLBuffer() := DebugIdentityNode() := xs_core(i).l1pluscache.clientNode
l2_xbar(i) := TLBuffer() := DebugIdentityNode() := xs_core(i).ptw.node
mmioXbar := TLBuffer() := DebugIdentityNode() := xs_core(i).uncache.clientNode
l2cache(i).node := TLBuffer() := DebugIdentityNode() := l2_xbar(i)
l3_xbar := TLBuffer() := DebugIdentityNode() := l2cache(i).node
}
// DMA should not go to MMIO
val mmioRange = AddressSet(base = 0x0000000000L, mask = 0x007fffffffL)
......@@ -103,12 +137,6 @@ class XSSoc()(implicit p: Parameters) extends LazyModule with HasSoCParameter {
l3_banks(i).node := TLBuffer() := DebugIdentityNode() := filter := l3_xbar
}
// L3 to memory network
// -------------------------------------------------
private val memory_xbar = TLXbar()
val mem = Seq.fill(L3NBanks)(AXI4IdentityNode())
for(i <- 0 until L3NBanks) {
mem(i) :=
AXI4UserYanker() :=
......@@ -118,37 +146,24 @@ class XSSoc()(implicit p: Parameters) extends LazyModule with HasSoCParameter {
l3_banks(i).node
}
private val mmioXbar = TLXbar()
private val clint = LazyModule(new TLTimer(
Seq(AddressSet(0x38000000L, 0x0000ffffL)),
sim = !env.FPGAPlatform
))
cores.foreach(core =>
mmioXbar :=
TLBuffer() :=
DebugIdentityNode() :=
core.mmio
)
clint.node :=
mmioXbar
extDev :=
AXI4UserYanker() :=
TLToAXI4() :=
mmioXbar
clint.node := mmioXbar
extDev := AXI4UserYanker() := TLToAXI4() := mmioXbar
lazy val module = new LazyModuleImp(this){
val io = IO(new Bundle{
val meip = Input(Bool())
val ila = if(env.FPGAPlatform && EnableILA) Some(Output(new ILABundle)) else None
})
cores.foreach(core => {
core.module.io.externalInterrupt.mtip := clint.module.io.mtip
core.module.io.externalInterrupt.msip := clint.module.io.msip
core.module.io.externalInterrupt.meip := RegNext(RegNext(io.meip))
})
for (i <- 0 until NumCores) {
xs_core(i).module.io.externalInterrupt.mtip := clint.module.io.mtip
xs_core(i).module.io.externalInterrupt.msip := clint.module.io.msip
xs_core(i).module.io.externalInterrupt.meip := RegNext(RegNext(io.meip))
}
// do not let dma AXI signals optimized out
chisel3.dontTouch(dma.out.head._1)
chisel3.dontTouch(extDev.out.head._1)
......
......@@ -55,11 +55,25 @@ object DisableAllPrintAnnotation extends HasShellOptions {
)
}
case class RemoveAssertAnnotation() extends NoTargetAnnotation
object RemoveAssertAnnotation extends HasShellOptions{
val options = Seq(
new ShellOption[Unit](
longOption = "remove-assert",
toAnnotationSeq = _ => Seq(RemoveAssertAnnotation()),
helpText = "All the 'assert' will be removed\n",
shortOption = None
)
)
}
trait XiangShanCli { this: Shell =>
parser.note("XiangShan Options")
DisablePrintfAnnotation.addOptions(parser)
EnablePrintfAnnotation.addOptions(parser)
DisableAllPrintAnnotation.addOptions(parser)
RemoveAssertAnnotation.addOptions(parser)
}
class XiangShanStage extends chisel3.stage.ChiselStage {
......
......@@ -51,21 +51,21 @@ case class XSCoreParameters
DecodeWidth: Int = 6,
RenameWidth: Int = 6,
CommitWidth: Int = 6,
BrqSize: Int = 12,
BrqSize: Int = 32,
IssQueSize: Int = 8,
NRPhyRegs: Int = 128,
NRPhyRegs: Int = 160,
NRIntReadPorts: Int = 14,
NRIntWritePorts: Int = 8,
NRFpReadPorts: Int = 14,
NRFpWritePorts: Int = 8,
LoadQueueSize: Int = 12,
StoreQueueSize: Int = 10,
RoqSize: Int = 32,
LoadQueueSize: Int = 64,
StoreQueueSize: Int = 48,
RoqSize: Int = 192,
dpParams: DispatchParameters = DispatchParameters(
DqEnqWidth = 4,
IntDqSize = 24,
FpDqSize = 16,
LsDqSize = 16,
IntDqSize = 128,
FpDqSize = 128,
LsDqSize = 96,
IntDqDeqWidth = 4,
FpDqDeqWidth = 4,
LsDqDeqWidth = 4,
......@@ -259,41 +259,12 @@ object AddressSpace extends HasXSParameter {
class XSCore()(implicit p: config.Parameters) extends LazyModule with HasXSParameter {
// inner nodes
// outer facing nodes
val dcache = LazyModule(new DCache())
val uncache = LazyModule(new Uncache())
val l1pluscache = LazyModule(new L1plusCache())
val ptw = LazyModule(new PTW())
// out facing nodes
val mem = TLIdentityNode()
val mmio = uncache.clientNode
// L1 to L2 network
// -------------------------------------------------
private val l2_xbar = TLXbar()
private val l2 = LazyModule(new InclusiveCache(
CacheParameters(
level = 2,
ways = L2NWays,
sets = L2NSets,
blockBytes = L2BlockSize,
beatBytes = L1BusWidth / 8, // beatBytes = l1BusDataWidth / 8
cacheName = s"L2"
),
InclusiveCacheMicroParameters(
writeBytes = 8
)
))
l2_xbar := TLBuffer() := DebugIdentityNode() := dcache.clientNode
l2_xbar := TLBuffer() := DebugIdentityNode() := l1pluscache.clientNode
l2_xbar := TLBuffer() := DebugIdentityNode() := ptw.node
l2.node := TLBuffer() := DebugIdentityNode() := l2_xbar
mem := l2.node
lazy val module = new XSCoreImp(this)
}
......
......@@ -103,6 +103,7 @@ class CtrlBlock extends XSModule {
rename.io.redirect <> redirect
rename.io.roqCommits <> roq.io.commits
rename.io.out <> dispatch.io.fromRename
rename.io.renameBypass <> dispatch.io.renameBypass
dispatch.io.redirect <> redirect
dispatch.io.enqRoq <> roq.io.enq
......
......@@ -193,9 +193,17 @@ class MemBlock
storeUnits(i).io.stin <> reservationStations(exuParameters.LduCnt + i).io.deq
// passdown to lsq
storeUnits(i).io.lsq <> lsq.io.storeIn(i)
io.toCtrlBlock.stOut(i).valid := lsq.io.stout(i).valid
io.toCtrlBlock.stOut(i).bits := lsq.io.stout(i).bits
lsq.io.stout(i).ready := true.B
io.toCtrlBlock.stOut(i).valid := storeUnits(i).io.stout.valid
io.toCtrlBlock.stOut(i).bits := storeUnits(i).io.stout.bits
storeUnits(i).io.stout.ready := true.B
}
// mmio store writeback will use store writeback port 0
lsq.io.mmioStout.ready := false.B
when(lsq.io.mmioStout.valid && !storeUnits(0).io.stout.valid) {
io.toCtrlBlock.stOut(0).valid := true.B
lsq.io.mmioStout.ready := true.B
io.toCtrlBlock.stOut(0).bits := lsq.io.mmioStout.bits
}
// Lsq
......
......@@ -40,18 +40,7 @@ class DecodeBuffer extends XSModule {
val r = RegEnable(io.in(i).bits, io.in(i).fire())
io.in(i).ready := leftCanIn
io.out(i).bits <> r
if(i > 0 ){
io.out(i).valid := validVec(i) &&
!flush &&
// Mux(r.ctrl.noSpecExec,
!ParallelOR(validVec.take(i))//,
// !ParallelOR(io.out.zip(validVec).take(i).map(x => x._2 && x._1.bits.ctrl.noSpecExec))
//) &&
!io.isWalking
} else {
require( i == 0)
io.out(i).valid := validVec(i) && !flush && !io.isWalking
}
io.out(i).valid := validVec(i) && !flush && !io.isWalking
}
for(in <- io.in){
......
......@@ -7,6 +7,7 @@ import utils._
import xiangshan.backend.regfile.RfReadPort
import chisel3.ExcitingUtils._
import xiangshan.backend.roq.RoqPtr
import xiangshan.backend.rename.RenameBypassInfo
case class DispatchParameters
(
......@@ -28,6 +29,7 @@ class Dispatch extends XSModule {
val redirect = Flipped(ValidIO(new Redirect))
// from rename
val fromRename = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp)))
val renameBypass = Input(new RenameBypassInfo)
// enq Roq
val enqRoq = new Bundle {
val canAccept = Input(Bool())
......@@ -72,6 +74,7 @@ class Dispatch extends XSModule {
// dispatch 1: accept uops from rename and dispatch them to the three dispatch queues
dispatch1.io.redirect <> io.redirect
dispatch1.io.renameBypass := RegEnable(io.renameBypass, io.fromRename(0).valid && dispatch1.io.fromRename(0).ready)
dispatch1.io.enqRoq <> io.enqRoq
dispatch1.io.enqLsq <> io.enqLsq
dispatch1.io.toIntDqReady <> intDq.io.enqReady
......
......@@ -6,6 +6,7 @@ import chisel3.ExcitingUtils._
import xiangshan._
import utils.{XSDebug, XSError, XSInfo}
import xiangshan.backend.roq.RoqPtr
import xiangshan.backend.rename.RenameBypassInfo
// read rob and enqueue
class Dispatch1 extends XSModule {
......@@ -13,6 +14,7 @@ class Dispatch1 extends XSModule {
val redirect = Flipped(ValidIO(new Redirect))
// from rename
val fromRename = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp)))
val renameBypass = Input(new RenameBypassInfo)
val recv = Output(Vec(RenameWidth, Bool()))
// enq Roq
val enqRoq = new Bundle {
......@@ -38,6 +40,8 @@ class Dispatch1 extends XSModule {
val toLsDqReady = Input(Bool())
val toLsDq = Vec(dpParams.DqEnqWidth, ValidIO(new MicroOp))
})
/**
* Part 1: choose the target dispatch queue and the corresponding write ports
*/
......@@ -67,8 +71,54 @@ class Dispatch1 extends XSModule {
ExcitingUtils.addSource(!dispatchNotEmpty, "perfCntCondDp1Empty", Perf)
}
/**
* Part 2:
* Update commitType, psrc1, psrc2, psrc3, old_pdest for the uops
*/
val updatedUop = Wire(Vec(RenameWidth, new MicroOp))
val updatedCommitType = Wire(Vec(RenameWidth, CommitType()))
val updatedPsrc1 = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
val updatedPsrc2 = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
val updatedPsrc3 = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
val updatedOldPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
for (i <- 0 until RenameWidth) {
updatedCommitType(i) := Cat(isLs(i), isStore(i) | isFp(i))
updatedPsrc1(i) := io.fromRename.take(i).map(_.bits.pdest)
.zip(if (i == 0) Seq() else io.renameBypass.lsrc1_bypass(i-1).asBools)
.foldLeft(io.fromRename(i).bits.psrc1) {
(z, next) => Mux(next._2, next._1, z)
}
updatedPsrc2(i) := io.fromRename.take(i).map(_.bits.pdest)
.zip(if (i == 0) Seq() else io.renameBypass.lsrc2_bypass(i-1).asBools)
.foldLeft(io.fromRename(i).bits.psrc2) {
(z, next) => Mux(next._2, next._1, z)
}
updatedPsrc3(i) := io.fromRename.take(i).map(_.bits.pdest)
.zip(if (i == 0) Seq() else io.renameBypass.lsrc3_bypass(i-1).asBools)
.foldLeft(io.fromRename(i).bits.psrc3) {
(z, next) => Mux(next._2, next._1, z)
}
updatedOldPdest(i) := io.fromRename.take(i).map(_.bits.pdest)
.zip(if (i == 0) Seq() else io.renameBypass.ldest_bypass(i-1).asBools)
.foldLeft(io.fromRename(i).bits.old_pdest) {
(z, next) => Mux(next._2, next._1, z)
}
updatedUop(i) := io.fromRename(i).bits
// update bypass psrc1/psrc2/psrc3/old_pdest
updatedUop(i).psrc1 := updatedPsrc1(i)
updatedUop(i).psrc2 := updatedPsrc2(i)
updatedUop(i).psrc3 := updatedPsrc3(i)
updatedUop(i).old_pdest := updatedOldPdest(i)
// update commitType
updatedUop(i).ctrl.commitType := updatedCommitType(i)
}
/**
* Part 3:
* acquire ROQ (all), LSQ (load/store only) and dispatch queue slots
* only set valid when all of them provides enough entries
*/
......@@ -82,20 +132,22 @@ class Dispatch1 extends XSModule {
// Thus, for i >= dpParams.DqEnqWidth, we have to check whether it's previous instructions (and the instruction itself) can enqueue.
// However, since, for instructions with indices less than dpParams.DqEnqWidth,
// they can always enter dispatch queue when ROB and LSQ are ready, we don't need to check whether they can enqueue.
// thisIsBlocked: this instruction is blocked by itself
// thisCanOut: this instruction can enqueue
// nextCanOut: next instructions can out
// thisIsBlocked: this instruction is blocked by itself (based on noSpecExec)
// thisCanOut: this instruction can enqueue (based on resource)
// nextCanOut: next instructions can out (based on blockBackward and previous instructions)
// notBlockedByPrevious: previous instructions can enqueue
val thisIsBlocked = VecInit((0 until RenameWidth).map(i =>
isNoSpecExec(i) && !io.enqRoq.isEmpty
))
val thisIsBlocked = VecInit((0 until RenameWidth).map(i => {
// for i > 0, when Roq is empty but dispatch1 have valid instructions to enqueue, it's blocked
if (i > 0) isNoSpecExec(i) && (!io.enqRoq.isEmpty || Cat(io.fromRename.take(i).map(_.valid)).orR)
else isNoSpecExec(i) && !io.enqRoq.isEmpty
}))
val thisCanOut = VecInit((0 until RenameWidth).map(i => {
// For i in [0, DqEnqWidth), they can always enqueue when ROB and LSQ are ready
if (i < dpParams.DqEnqWidth) !thisIsBlocked(i)
else Cat(Seq(intIndex, fpIndex, lsIndex).map(_.io.reverseMapping(i).valid)).orR && !thisIsBlocked(i)
if (i < dpParams.DqEnqWidth) true.B
else Cat(Seq(intIndex, fpIndex, lsIndex).map(_.io.reverseMapping(i).valid)).orR
}))
val nextCanOut = VecInit((0 until RenameWidth).map(i =>
(thisCanOut(i) && !isBlockBackward(i)) || !io.fromRename(i).valid
(thisCanOut(i) && !isNoSpecExec(i) && !isBlockBackward(i)) || !io.fromRename(i).valid
))
val notBlockedByPrevious = VecInit((0 until RenameWidth).map(i =>
if (i == 0) true.B
......@@ -105,53 +157,55 @@ class Dispatch1 extends XSModule {
// this instruction can actually dequeue: 3 conditions
// (1) resources are ready
// (2) previous instructions are ready
val thisCanActualOut = (0 until RenameWidth).map(i => allResourceReady && thisCanOut(i) && notBlockedByPrevious(i))
val uopWithIndex = Wire(Vec(RenameWidth, new MicroOp))
val thisCanActualOut = (0 until RenameWidth).map(i => allResourceReady && thisCanOut(i) && !thisIsBlocked(i) && notBlockedByPrevious(i))
// input for ROQ and LSQ
// note that LSQ needs roqIdx
for (i <- 0 until RenameWidth) {
// input for ROQ and LSQ
val commitType = Cat(isLs(i), isStore(i) | isFp(i))
io.enqRoq.extraWalk(i) := io.fromRename(i).valid && !thisCanActualOut(i)
io.enqRoq.req(i).valid := io.fromRename(i).valid && thisCanActualOut(i)
io.enqRoq.req(i).bits := io.fromRename(i).bits
io.enqRoq.req(i).bits.ctrl.commitType := commitType
io.enqRoq.req(i).bits := updatedUop(i)
val shouldEnqLsq = isLs(i) && io.fromRename(i).bits.ctrl.fuType =/= FuType.mou
io.enqLsq.req(i).valid := io.fromRename(i).valid && shouldEnqLsq && !redirectValid && thisCanActualOut(i)
io.enqLsq.req(i).bits := io.fromRename(i).bits
io.enqLsq.req(i).bits.ctrl.commitType := commitType
io.enqLsq.req(i).bits := updatedUop(i)
io.enqLsq.req(i).bits.roqIdx := io.enqRoq.resp(i)
// append ROQ and LSQ indexed to uop
uopWithIndex(i) := io.fromRename(i).bits
uopWithIndex(i).roqIdx := io.enqRoq.resp(i)
uopWithIndex(i).lqIdx := io.enqLsq.resp(i).lqIdx
uopWithIndex(i).sqIdx := io.enqLsq.resp(i).sqIdx
XSDebug(io.enqLsq.req(i).valid,
p"pc 0x${Hexadecimal(io.fromRename(i).bits.cf.pc)} receives lq ${io.enqLsq.resp(i).lqIdx} sq ${io.enqLsq.resp(i).sqIdx}\n")
XSDebug(io.enqRoq.req(i).valid, p"pc 0x${Hexadecimal(io.fromRename(i).bits.cf.pc)} receives nroq ${io.enqRoq.resp(i)}\n")
}
/**
* Part 4:
* append ROQ and LSQ indexed to uop, and send them to dispatch queue
*/
val updateUopWithIndex = Wire(Vec(RenameWidth, new MicroOp))
for (i <- 0 until RenameWidth) {
updateUopWithIndex(i) := updatedUop(i)
updateUopWithIndex(i).roqIdx := io.enqRoq.resp(i)
updateUopWithIndex(i).lqIdx := io.enqLsq.resp(i).lqIdx
updateUopWithIndex(i).sqIdx := io.enqLsq.resp(i).sqIdx
}
// send uops with correct indexes to dispatch queues
// Note that if one of their previous instructions cannot enqueue, they should not enter dispatch queue.
// We use notBlockedByPrevious here since mapping(i).valid implies there's a valid instruction that can enqueue,
// thus we don't need to check thisCanOut.
for (i <- 0 until dpParams.DqEnqWidth) {
io.toIntDq(i).bits := uopWithIndex(intIndex.io.mapping(i).bits)
io.toIntDq(i).bits := updateUopWithIndex(intIndex.io.mapping(i).bits)
io.toIntDq(i).valid := intIndex.io.mapping(i).valid && allResourceReady &&
!thisIsBlocked(intIndex.io.mapping(i).bits) && notBlockedByPrevious(intIndex.io.mapping(i).bits)
// NOTE: floating point instructions are not noSpecExec currently
// remove commit /**/ when fp instructions are possible to be noSpecExec
io.toFpDq(i).bits := uopWithIndex(fpIndex.io.mapping(i).bits)
io.toFpDq(i).bits := updateUopWithIndex(fpIndex.io.mapping(i).bits)
io.toFpDq(i).valid := fpIndex.io.mapping(i).valid && allResourceReady &&
/*!thisIsBlocked(fpIndex.io.mapping(i).bits) && */notBlockedByPrevious(fpIndex.io.mapping(i).bits)
io.toLsDq(i).bits := uopWithIndex(lsIndex.io.mapping(i).bits)
io.toLsDq(i).bits := updateUopWithIndex(lsIndex.io.mapping(i).bits)
io.toLsDq(i).valid := lsIndex.io.mapping(i).valid && allResourceReady &&
!thisIsBlocked(lsIndex.io.mapping(i).bits) && notBlockedByPrevious(lsIndex.io.mapping(i).bits)
......@@ -170,7 +224,7 @@ class Dispatch1 extends XSModule {
XSInfo(io.recv(i),
p"pc 0x${Hexadecimal(io.fromRename(i).bits.cf.pc)}, type(${isInt(i)}, ${isFp(i)}, ${isLs(i)}), " +
p"roq ${uopWithIndex(i).roqIdx}, lq ${uopWithIndex(i).lqIdx}, sq ${uopWithIndex(i).sqIdx}, " +
p"roq ${updateUopWithIndex(i).roqIdx}, lq ${updateUopWithIndex(i).lqIdx}, sq ${updateUopWithIndex(i).sqIdx}, " +
p"(${intIndex.io.reverseMapping(i).bits}, ${fpIndex.io.reverseMapping(i).bits}, ${lsIndex.io.reverseMapping(i).bits})\n"
)
......
......@@ -70,7 +70,10 @@ class FreeList extends XSModule with HasFreeListConsts with HasCircularQueuePtrH
tailPtr := tailPtrNext
// allocate new pregs to rename instructions
val freeRegs = distanceBetween(tailPtr, headPtr)
// number of free regs in freelist
val freeRegs = Wire(UInt())
// use RegNext for better timing
val hasEnoughRegs = RegNext(freeRegs >= RenameWidth.U, true.B)
XSDebug(p"free regs: $freeRegs\n")
......@@ -91,6 +94,7 @@ class FreeList extends XSModule with HasFreeListConsts with HasCircularQueuePtrH
XSDebug(p"req:${io.allocReqs(i)} canAlloc:$hasEnoughRegs pdest:${io.pdests(i)}\n")
}
val headPtrNext = Mux(hasEnoughRegs, newHeadPtrs.last, headPtr)
freeRegs := distanceBetween(tailPtr, headPtrNext)
headPtr := Mux(io.redirect.valid, // mispredict or exception happen
Mux(io.redirect.bits.isException || io.redirect.bits.isFlushPipe, // TODO: need check by JiaWei
......@@ -107,14 +111,13 @@ class FreeList extends XSModule with HasFreeListConsts with HasCircularQueuePtrH
XSDebug(io.redirect.valid, p"redirect: brqIdx=${io.redirect.bits.brTag.value}\n")
if(env.EnableDebug){
for( i <- 0 until FL_SIZE){
for(j <- i+1 until FL_SIZE){
assert(freeList(i) != freeList(j), s"Found same entry in freelist! (i=$i j=$j)")
}
}
val enableFreelistCheck = false
if(env.EnableDebug && enableFreelistCheck){
for( i <- 0 until FL_SIZE){
for(j <- i+1 until FL_SIZE){
assert(freeList(i) != freeList(j), s"Found same entry in freelist! (i=$i j=$j)")
}
}
}
}
......@@ -5,6 +5,13 @@ import chisel3.util._
import xiangshan._
import utils.XSInfo
class RenameBypassInfo extends XSBundle {
val lsrc1_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))
val lsrc2_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))
val lsrc3_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))
val ldest_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))
}
class Rename extends XSModule {
val io = IO(new Bundle() {
val redirect = Flipped(ValidIO(new Redirect))
......@@ -13,6 +20,7 @@ class Rename extends XSModule {
val in = Vec(RenameWidth, Flipped(DecoupledIO(new CfCtrl)))
// to dispatch1
val out = Vec(RenameWidth, DecoupledIO(new MicroOp))
val renameBypass = Output(new RenameBypassInfo)
})
def printRenameInfo(in: DecoupledIO[CfCtrl], out: DecoupledIO[MicroOp]) = {
......@@ -61,6 +69,8 @@ class Rename extends XSModule {
uop.sqIdx := DontCare
})
val needFpDest = Wire(Vec(RenameWidth, Bool()))
val needIntDest = Wire(Vec(RenameWidth, Bool()))
var lastReady = WireInit(io.out(0).ready)
// debug assert
val outRdy = Cat(io.out.map(_.ready))
......@@ -73,17 +83,17 @@ class Rename extends XSModule {
val inValid = io.in(i).valid
// alloc a new phy reg
val needFpDest = inValid && needDestReg(fp = true, io.in(i).bits)
val needIntDest = inValid && needDestReg(fp = false, io.in(i).bits)
fpFreeList.allocReqs(i) := needFpDest && lastReady
intFreeList.allocReqs(i) := needIntDest && lastReady
needFpDest(i) := inValid && needDestReg(fp = true, io.in(i).bits)
needIntDest(i) := inValid && needDestReg(fp = false, io.in(i).bits)
fpFreeList.allocReqs(i) := needFpDest(i) && lastReady
intFreeList.allocReqs(i) := needIntDest(i) && lastReady
val fpCanAlloc = fpFreeList.canAlloc(i)
val intCanAlloc = intFreeList.canAlloc(i)
val this_can_alloc = Mux(
needIntDest,
needIntDest(i),
intCanAlloc,
Mux(
needFpDest,
needFpDest(i),
fpCanAlloc,
true.B
)
......@@ -98,7 +108,7 @@ class Rename extends XSModule {
lastReady = io.in(i).ready
uops(i).pdest := Mux(needIntDest,
uops(i).pdest := Mux(needIntDest(i),
intFreeList.pdests(i),
Mux(
uops(i).ctrl.ldest===0.U && uops(i).ctrl.rfWen,
......@@ -173,6 +183,28 @@ class Rename extends XSModule {
uops(i).old_pdest := Mux(uops(i).ctrl.rfWen, intOldPdest, fpOldPdest)
}
// We don't bypass the old_pdest from valid instructions with the same ldest currently in rename stage.
// Instead, we determine whether there're some dependences between the valid instructions.
for (i <- 1 until RenameWidth) {
io.renameBypass.lsrc1_bypass(i-1) := Cat((0 until i).map(j => {
val fpMatch = needFpDest(j) && io.in(i).bits.ctrl.src1Type === SrcType.fp
val intMatch = needIntDest(j) && io.in(i).bits.ctrl.src1Type === SrcType.reg
(fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc1
}).reverse)
io.renameBypass.lsrc2_bypass(i-1) := Cat((0 until i).map(j => {
val fpMatch = needFpDest(j) && io.in(i).bits.ctrl.src2Type === SrcType.fp
val intMatch = needIntDest(j) && io.in(i).bits.ctrl.src2Type === SrcType.reg
(fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc2
}).reverse)
io.renameBypass.lsrc3_bypass(i-1) := Cat((0 until i).map(j => {
val fpMatch = needFpDest(j) && io.in(i).bits.ctrl.src3Type === SrcType.fp
val intMatch = needIntDest(j) && io.in(i).bits.ctrl.src3Type === SrcType.reg
(fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc3
}).reverse)
io.renameBypass.ldest_bypass(i-1) := Cat((0 until i).map(j => {
val fpMatch = needFpDest(j) && needFpDest(i)
val intMatch = needIntDest(j) && needIntDest(i)
(fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.ldest
}).reverse)
}
}
......@@ -35,9 +35,9 @@ class RenameTable(float: Boolean) extends XSModule {
for((r, i) <- io.readPorts.zipWithIndex){
r.rdata := spec_table(r.addr)
for(w <- io.specWritePorts.take(i/{if(float) 4 else 3})){ // bypass
when(w.wen && (w.addr === r.addr)){ r.rdata := w.wdata }
}
// for(w <- io.specWritePorts.take(i/{if(float) 4 else 3})){ // bypass
// when(w.wen && (w.addr === r.addr)){ r.rdata := w.wdata }
// }
}
for(w <- io.archWritePorts){
......@@ -58,4 +58,4 @@ class RenameTable(float: Boolean) extends XSModule {
ExcitingUtils.Debug
)
}
}
\ No newline at end of file
}
......@@ -85,7 +85,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
// ldest (dispatch) (walk)
// data for debug
val debug_microOp = Mem(RoqSize, new MicroOp)
val microOp = Mem(RoqSize, new MicroOp)
val debug_exuData = Reg(Vec(RoqSize, UInt(XLEN.W)))//for debug
val debug_exuDebug = Reg(Vec(RoqSize, new DebugBundle))//for debug
......@@ -127,20 +127,26 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
// Dispatch
val hasBlockBackward = RegInit(false.B)
val hasNoSpecExec = RegInit(false.B)
val blockBackwardCommit = Cat(io.commits.map(c => c.valid && !c.bits.isWalk && c.bits.uop.ctrl.blockBackward)).orR
val noSpecExecCommit = Cat(io.commits.map(c => c.valid && !c.bits.isWalk && c.bits.uop.ctrl.noSpecExec)).orR
when(blockBackwardCommit){ hasBlockBackward:= false.B }
// When blockBackward instruction leaves Roq (commit or walk), hasBlockBackward should be set to false.B
val blockBackwardLeave = Cat(io.commits.map(c => c.valid && c.bits.uop.ctrl.blockBackward)).orR || io.redirect.valid
when(blockBackwardLeave){ hasBlockBackward:= false.B }
// When noSpecExec instruction commits (it should not be walked except when it has not entered Roq),
// hasNoSpecExec should be set to false.B
val noSpecExecCommit = Cat(io.commits.map(c => c.valid && !c.bits.isWalk && c.bits.uop.ctrl.noSpecExec)).orR || io.redirect.valid
when(noSpecExecCommit){ hasNoSpecExec:= false.B }
// Assertion on that noSpecExec should never be walked since it's the only instruction in Roq.
// Extra walk should be ok since noSpecExec has not enter Roq.
val walkNoSpecExec = Cat(io.commits.map(c => c.valid && c.bits.isWalk && c.bits.uop.ctrl.noSpecExec)).orR
XSError(state =/= s_extrawalk && walkNoSpecExec, "noSpecExec should not walk\n")
val validDispatch = io.enq.req.map(_.valid)
XSDebug("(ready, valid): ")
for (i <- 0 until RenameWidth) {
val offset = PopCount(validDispatch.take(i))
val roqIdxExt = enqPtrExt + offset
val roqIdx = roqIdxExt.value
when(io.enq.req(i).valid) {
debug_microOp(roqIdx) := io.enq.req(i).bits
microOp(roqIdx) := io.enq.req(i).bits
when(io.enq.req(i).bits.ctrl.blockBackward) {
hasBlockBackward := true.B
}
......@@ -170,15 +176,15 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
when(io.exeWbResults(i).fire()){
val wbIdxExt = io.exeWbResults(i).bits.uop.roqIdx
val wbIdx = wbIdxExt.value
debug_microOp(wbIdx).cf.exceptionVec := io.exeWbResults(i).bits.uop.cf.exceptionVec
debug_microOp(wbIdx).lqIdx := io.exeWbResults(i).bits.uop.lqIdx
debug_microOp(wbIdx).sqIdx := io.exeWbResults(i).bits.uop.sqIdx
debug_microOp(wbIdx).ctrl.flushPipe := io.exeWbResults(i).bits.uop.ctrl.flushPipe
debug_microOp(wbIdx).diffTestDebugLrScValid := io.exeWbResults(i).bits.uop.diffTestDebugLrScValid
microOp(wbIdx).cf.exceptionVec := io.exeWbResults(i).bits.uop.cf.exceptionVec
microOp(wbIdx).lqIdx := io.exeWbResults(i).bits.uop.lqIdx
microOp(wbIdx).sqIdx := io.exeWbResults(i).bits.uop.sqIdx
microOp(wbIdx).ctrl.flushPipe := io.exeWbResults(i).bits.uop.ctrl.flushPipe
microOp(wbIdx).diffTestDebugLrScValid := io.exeWbResults(i).bits.uop.diffTestDebugLrScValid
debug_exuData(wbIdx) := io.exeWbResults(i).bits.data
debug_exuDebug(wbIdx) := io.exeWbResults(i).bits.debug
val debug_Uop = debug_microOp(wbIdx)
val debug_Uop = microOp(wbIdx)
XSInfo(true.B,
p"writebacked pc 0x${Hexadecimal(debug_Uop.cf.pc)} wen ${debug_Uop.ctrl.rfWen} " +
p"data 0x${Hexadecimal(io.exeWbResults(i).bits.data)} ldst ${debug_Uop.ctrl.ldest} pdst ${debug_Uop.ctrl.ldest} " +
......@@ -188,7 +194,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
}
// Interrupt
val deqUop = debug_microOp(deqPtr)
val deqUop = microOp(deqPtr)
val deqPtrWritebacked = writebacked(deqPtr) && valid(deqPtr)
val intrEnable = io.csr.intrBitSet && !isEmpty && !hasNoSpecExec &&
deqUop.ctrl.commitType =/= CommitType.STORE && deqUop.ctrl.commitType =/= CommitType.LOAD// TODO: wanna check why has hasCsr(hasNoSpec)
......@@ -204,7 +210,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
XSDebug(io.redirect.valid,
"generate redirect: pc 0x%x intr %d excp %d flushpp %d target:0x%x Traptarget 0x%x exceptionVec %b\n",
io.exception.cf.pc, intrEnable, exceptionEnable, isFlushPipe, io.redirect.bits.target, io.csr.trapTarget,
Cat(debug_microOp(deqPtr).cf.exceptionVec))
Cat(microOp(deqPtr).cf.exceptionVec))
// Commit uop to Rename (walk)
val walkCounter = Reg(UInt(log2Up(RoqSize).W))
......@@ -234,7 +240,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
switch(state){
is(s_idle){
val commitIdx = deqPtr + i.U
val commitUop = debug_microOp(commitIdx)
val commitUop = microOp(commitIdx)
val hasException = Cat(commitUop.cf.exceptionVec).orR() || intrEnable
val canCommit = if(i!=0) (io.commits(i-1).valid && !io.commits(i-1).bits.uop.ctrl.flushPipe) else true.B
val v = valid(commitIdx)
......@@ -279,7 +285,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
is(s_walk){
val idx = walkPtrVec(i).value
val v = valid(idx)
val walkUop = debug_microOp(idx)
val walkUop = microOp(idx)
io.commits(i).valid := v && shouldWalkVec(i)
io.commits(i).bits.uop := walkUop
when(shouldWalkVec(i)){
......@@ -467,7 +473,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
for(i <- 0 until RoqSize){
if(i % 4 == 0) XSDebug("")
XSDebug(false, true.B, "%x ", debug_microOp(i).cf.pc)
XSDebug(false, true.B, "%x ", microOp(i).cf.pc)
XSDebug(false, !valid(i), "- ")
XSDebug(false, valid(i) && writebacked(i), "w ")
XSDebug(false, valid(i) && !writebacked(i), "v ")
......@@ -525,8 +531,8 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
XSDebug(difftestIntrNO =/= 0.U, "difftest intrNO set %x\n", difftestIntrNO)
val retireCounterFix = Mux(io.redirect.valid, 1.U, retireCounter)
val retirePCFix = SignExt(Mux(io.redirect.valid, debug_microOp(deqPtr).cf.pc, debug_microOp(firstValidCommit).cf.pc), XLEN)
val retireInstFix = Mux(io.redirect.valid, debug_microOp(deqPtr).cf.instr, debug_microOp(firstValidCommit).cf.instr)
val retirePCFix = SignExt(Mux(io.redirect.valid, microOp(deqPtr).cf.pc, microOp(firstValidCommit).cf.pc), XLEN)
val retireInstFix = Mux(io.redirect.valid, microOp(deqPtr).cf.instr, microOp(firstValidCommit).cf.instr)
ExcitingUtils.addSource(RegNext(retireCounterFix), "difftestCommit", ExcitingUtils.Debug)
ExcitingUtils.addSource(RegNext(retirePCFix), "difftestThisPC", ExcitingUtils.Debug)//first valid PC
......
......@@ -75,17 +75,16 @@ class Ibuffer extends XSModule {
}
// Deque
when(deqValid) {
var deq_idx = head_ptr
for(i <- 0 until DecodeWidth) {
var outWire = WireInit(ibuf(deq_idx))
val head_wire = head_ptr + i.U
val outWire = WireInit(ibuf(head_wire))
io.out(i).valid := ibuf_valid(head_wire)
when(ibuf_valid(head_wire) && io.out(i).ready) {
ibuf_valid(head_wire) := false.B
}
io.out(i).valid := ibuf_valid(deq_idx)
// Only when the entry is valid can it be set invalid
when (ibuf_valid(deq_idx)) { ibuf_valid(deq_idx) := !io.out(i).fire }
io.out(i).bits.instr := outWire.inst
io.out(i).bits.pc := outWire.pc
// io.out(i).bits.exceptionVec := Mux(outWire.ipf, UIntToOH(instrPageFault.U), 0.U)
......@@ -98,10 +97,8 @@ class Ibuffer extends XSModule {
io.out(i).bits.brUpdate.pd := outWire.pd
io.out(i).bits.brUpdate.brInfo := outWire.brInfo
io.out(i).bits.crossPageIPFFix := outWire.crossPageIPFFix
deq_idx = deq_idx + io.out(i).fire
}
head_ptr := deq_idx
head_ptr := head_ptr + io.out.map(_.fire).fold(0.U(log2Up(DecodeWidth).W))(_+_)
}.otherwise {
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits <> DontCare)
......
......@@ -182,7 +182,7 @@ class LsqWrappper extends XSModule with HasDCacheParameters {
val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReq))
val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback store
val stout = Vec(2, DecoupledIO(new ExuOutput)) // writeback store
val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store
val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit)))
val rollback = Output(Valid(new Redirect))
......@@ -229,7 +229,7 @@ class LsqWrappper extends XSModule with HasDCacheParameters {
storeQueue.io.brqRedirect <> io.brqRedirect
storeQueue.io.storeIn <> io.storeIn
storeQueue.io.sbuffer <> io.sbuffer
storeQueue.io.stout <> io.stout
storeQueue.io.mmioStout <> io.mmioStout
storeQueue.io.commits <> io.commits
storeQueue.io.roqDeqPtr <> io.roqDeqPtr
storeQueue.io.oldestStore <> io.oldestStore
......
......@@ -32,7 +32,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
val brqRedirect = Input(Valid(new Redirect))
val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReq))
val stout = Vec(2, DecoupledIO(new ExuOutput)) // writeback store
val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store
val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit)))
val uncache = new DCacheWordIO
......@@ -72,7 +72,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
// Enqueue at dispatch
val validEntries = distanceBetween(enqPtrExt, deqPtrExt)
val firedDispatch = io.enq.req.map(_.valid)
io.enq.canAccept := validEntries <= (LoadQueueSize - RenameWidth).U
io.enq.canAccept := validEntries <= (StoreQueueSize - RenameWidth).U
XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n")
for (i <- 0 until RenameWidth) {
val offset = if (i == 0) 0.U else PopCount((0 until i).map(firedDispatch(_)))
......@@ -102,8 +102,10 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
when(io.storeIn(i).fire()) {
val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value
val hasException = io.storeIn(i).bits.uop.cf.exceptionVec.asUInt.orR
datavalid(stWbIndex) := !io.storeIn(i).bits.mmio || hasException
pending(stWbIndex) := io.storeIn(i).bits.mmio && !hasException
val hasWritebacked = !io.storeIn(i).bits.mmio || hasException
datavalid(stWbIndex) := hasWritebacked
writebacked(stWbIndex) := hasWritebacked
pending(stWbIndex) := !hasWritebacked // valid mmio require
val storeWbData = Wire(new LsqEntry)
storeWbData := DontCare
......@@ -183,29 +185,21 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
io.oldestStore.valid := allocated(deqPtrExt.value) && datavalid(deqPtrExt.value) && !commited(storeValidIndex)
io.oldestStore.bits := uop(storeValidIndex).roqIdx
// writeback up to 2 store insts to CDB
// choose the first two valid store requests from deqPtr
val storeWbSelVec = VecInit((0 until StoreQueueSize).map(i => allocated(i) && datavalid(i) && !writebacked(i)))
val (storeWbValid, storeWbSel) = selectFirstTwo(storeWbSelVec, tailMask)
(0 until StorePipelineWidth).map(i => {
io.stout(i).bits.uop := uop(storeWbSel(i))
io.stout(i).bits.uop.sqIdx := storeWbSel(i).asTypeOf(new SqPtr)
io.stout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(storeWbSel(i)).exception.asBools
io.stout(i).bits.data := dataModule.io.rdata(storeWbSel(i)).data
io.stout(i).bits.redirectValid := false.B
io.stout(i).bits.redirect := DontCare
io.stout(i).bits.brUpdate := DontCare
io.stout(i).bits.debug.isMMIO := dataModule.io.rdata(storeWbSel(i)).mmio
io.stout(i).valid := storeWbSelVec(storeWbSel(i)) && storeWbValid(i)
when(io.stout(i).fire()) {
writebacked(storeWbSel(i)) := true.B
when(dataModule.io.rdata(storeWbSel(i)).mmio) {
allocated(storeWbSel(i)) := false.B // potential opt: move deqPtr immediately
}
}
io.stout(i).bits.fflags := DontCare
})
// writeback finished mmio store
io.mmioStout.bits.uop := uop(deqPtr)
io.mmioStout.bits.uop.sqIdx := deqPtrExt
io.mmioStout.bits.uop.cf.exceptionVec := dataModule.io.rdata(deqPtr).exception.asBools
io.mmioStout.bits.data := dataModule.io.rdata(deqPtr).data
io.mmioStout.bits.redirectValid := false.B
io.mmioStout.bits.redirect := DontCare
io.mmioStout.bits.brUpdate := DontCare
io.mmioStout.bits.debug.isMMIO := true.B
io.mmioStout.bits.fflags := DontCare
io.mmioStout.valid := allocated(deqPtr) && datavalid(deqPtr) && !writebacked(deqPtr) // finished mmio store
when(io.mmioStout.fire()) {
writebacked(deqPtr) := true.B
allocated(deqPtr) := false.B // potential opt: move deqPtr immediately
}
// remove retired insts from sq, add retired store to sbuffer
......
......@@ -67,6 +67,7 @@ class StoreUnit_S1 extends XSModule {
val in = Flipped(Decoupled(new LsPipelineBundle))
val out = Decoupled(new LsPipelineBundle)
// val fp_out = Decoupled(new LsPipelineBundle)
val stout = DecoupledIO(new ExuOutput) // writeback store
val redirect = Flipped(ValidIO(new Redirect))
})
......@@ -79,6 +80,18 @@ class StoreUnit_S1 extends XSModule {
io.out.bits.mmio := AddressSpace.isMMIO(io.in.bits.paddr)
io.out.valid := io.in.fire() // TODO: && ! FP
io.stout.bits.uop := io.in.bits.uop
// io.stout.bits.uop.cf.exceptionVec := // TODO: update according to TLB result
io.stout.bits.data := DontCare
io.stout.bits.redirectValid := false.B
io.stout.bits.redirect := DontCare
io.stout.bits.brUpdate := DontCare
io.stout.bits.debug.isMMIO := io.out.bits.mmio
io.stout.bits.fflags := DontCare
val hasException = io.out.bits.uop.cf.exceptionVec.asUInt.orR
io.stout.valid := io.in.fire() && (!io.out.bits.mmio || hasException) // mmio inst will be writebacked immediately
// if fp
// io.fp_out.valid := ...
// io.fp_out.bits := ...
......@@ -104,6 +117,7 @@ class StoreUnit extends XSModule {
val tlbFeedback = ValidIO(new TlbFeedback)
val dtlb = new TlbRequestIO()
val lsq = ValidIO(new LsPipelineBundle)
val stout = DecoupledIO(new ExuOutput) // writeback store
})
val store_s0 = Module(new StoreUnit_S0)
......@@ -120,6 +134,7 @@ class StoreUnit extends XSModule {
// PipelineConnect(store_s1.io.fp_out, store_s2.io.in, true.B, false.B)
store_s1.io.redirect <> io.redirect
store_s1.io.stout <> io.stout
// send result to sq
io.lsq.valid := store_s1.io.out.valid
io.lsq.bits := store_s1.io.out.bits
......
......@@ -8,9 +8,7 @@ import scala.collection.mutable
class ShowPrintTransform extends Transform with DependencyAPIMigration {
// The first transform to run
override def prerequisites = firrtl.stage.Forms.ChirrtlForm
// Invalidates everything
override def optionalPrerequisiteOf = firrtl.stage.Forms.MinimalHighForm
override def invalidates(a: Transform) = true
override protected def execute(state: CircuitState): CircuitState = {
......@@ -26,6 +24,10 @@ class ShowPrintTransform extends Transform with DependencyAPIMigration {
case DisableAllPrintAnnotation() => true
}.nonEmpty
val removeAssert = state.annotations.collectFirst{
case RemoveAssertAnnotation() => true
}.nonEmpty
assert(
!(whiteList.nonEmpty && (disableAll || blackList.nonEmpty)),
"'white list' can't be used with 'disable all' or 'black list'!"
......@@ -56,26 +58,37 @@ class ShowPrintTransform extends Transform with DependencyAPIMigration {
}
def processModule(m: DefModule): DefModule = {
def disableModulePrint = {
def disableModulePrint(mod: DefModule) = {
def disableStmtPrint(s: Statement): Statement = s match {
case _: Print =>
EmptyStmt
case other =>
other.mapStmt(disableStmtPrint)
}
m.mapStmt(disableStmtPrint)
mod.mapStmt(disableStmtPrint)
}
def removeModuleAssert(mod: DefModule)= {
def removeStmtAssert(s: Statement): Statement = s match {
case _: Stop =>
EmptyStmt
case other =>
other.mapStmt(removeStmtAssert)
}
mod.mapStmt(removeStmtAssert)
}
val isInBlackList = blackList.nonEmpty && (
blackList.contains(m.name) || blackList.map( b => ancestors(m.name).contains(b)).reduce(_||_)
)
val isInWhiteList = whiteList.isEmpty || (
whiteList.nonEmpty && (whiteList.contains(m.name) || whiteList.map( x => ancestors(m.name).contains(x)).reduce(_||_))
)
if( disableAll || isInBlackList || !isInWhiteList ){
disableModulePrint
val tmpMod = if(disableAll || isInBlackList || !isInWhiteList){
disableModulePrint(m)
} else {
m
}
if(removeAssert) removeModuleAssert(tmpMod) else tmpMod
}
state.copy(c.mapModule(processModule))
......
......@@ -243,7 +243,7 @@ uint64_t Emulator::execute(uint64_t max_cycle, uint64_t max_instr) {
diff.wdata = wdata;
diff.wdst = wdst;
while (trapCode == STATE_RUNNING) {
while (!Verilated::gotFinish() && trapCode == STATE_RUNNING) {
if (!(max_cycle > 0 && max_instr > 0 && instr_left_last_cycle >= max_instr /* handle overflow */)) {
trapCode = STATE_LIMIT_EXCEEDED;
break;
......@@ -320,6 +320,11 @@ uint64_t Emulator::execute(uint64_t max_cycle, uint64_t max_instr) {
#endif
}
if (Verilated::gotFinish()) {
eprintf("The simulation stopped. There might be some assertion failed.\n");
trapCode = STATE_ABORT;
}
#if VM_TRACE == 1
if (enable_waveform) tfp->close();
#endif
......
......@@ -223,8 +223,10 @@ extern "C" uint64_t ram_read_helper(uint8_t en, uint64_t rIdx) {
extern "C" void ram_write_helper(uint64_t wIdx, uint64_t wdata, uint64_t wmask, uint8_t wen) {
if (wen) {
printf("ERROR: ram wIdx = 0x%lx out of bound!\n", wIdx);
assert(wIdx < RAMSIZE / sizeof(uint64_t));
if (wIdx >= RAMSIZE / sizeof(uint64_t)) {
printf("ERROR: ram wIdx = 0x%lx out of bound!\n", wIdx);
assert(wIdx < RAMSIZE / sizeof(uint64_t));
}
ram[wIdx] = (ram[wIdx] & ~wmask) | (wdata & wmask);
}
}
......
......@@ -4,8 +4,7 @@ import chipsalliance.rocketchip.config.{Field, Parameters}
import chisel3._
import chisel3.util._
import chiseltest.experimental.TestOptionBuilder._
import chiseltest.internal.VerilatorBackendAnnotation
import chiseltest.internal.LineCoverageAnnotation
import chiseltest.internal.{VerilatorBackendAnnotation, LineCoverageAnnotation, ToggleCoverageAnnotation, UserCoverageAnnotation, StructuralCoverageAnnotation}
import chiseltest._
import chisel3.experimental.BundleLiterals._
import firrtl.stage.RunFirrtlTransformAnnotation
......@@ -58,7 +57,7 @@ case object L3CacheTestKey extends Field[L3CacheTestParams]
class L2TestTopIO extends Bundle {
val in = Flipped(DecoupledIO(new Bundle() {
val wdata = Input(UInt(64.W))
val waddr = Input(UInt(20.W))
val waddr = Input(UInt(40.W))
val hartId = Input(UInt(1.W))
}))
val out = DecoupledIO(new Bundle() {
......@@ -260,6 +259,9 @@ class L2CacheTest extends AnyFlatSpec with ChiselScalatestTester with Matchers{
val annos = Seq(
VerilatorBackendAnnotation,
LineCoverageAnnotation,
ToggleCoverageAnnotation,
UserCoverageAnnotation,
StructuralCoverageAnnotation,
RunFirrtlTransformAnnotation(new PrintModuleName)
)
......@@ -281,7 +283,9 @@ class L2CacheTest extends AnyFlatSpec with ChiselScalatestTester with Matchers{
c.clock.step(100)
for(i <- 0 until 100000){
val addr = Random.nextInt(0xfffff) & 0xffe00 // align to block size
// DRAM AddressSet is above 0x80000000L
// also, note that, + has higher priority than & !!!
val addr = (Random.nextInt(0x7fffffff).toLong & 0xfffffe00L) + 0x80000000L // align to block size
val data = Random.nextLong() & 0x7fffffffffffffffL
c.io.in.enqueue(chiselTypeOf(c.io.in.bits).Lit(
_.waddr -> addr.U,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册