提交 fa9cf729 编写于 作者: Z zhanglinjuan

Merge branch 'dev-frontend' into dev-bpu-pipeline

......@@ -99,6 +99,9 @@ class Alu extends Exu(alu.litValue(), hasRedirect = true) {
io.in.ready := io.out.ready
val pcLatchSlot = Mux(isRVC, pc + 2.U, pc + 4.U)
//TODO fix me
io.out.bits.redirect := DontCare
io.out.bits.redirectValid := io.out.valid && isBru//isBranch
io.out.bits.redirect.pc := uop.cf.pc
io.out.bits.redirect.target := Mux(!taken && isBranch, pcLatchSlot, target)
......
......@@ -35,6 +35,9 @@ class Bru extends Exu(FuType.bru.litValue(), writeFpRf = true, hasRedirect = tru
val pcDelaySlot = Mux(isRVC, pc + 2.U, pc + 4.U)
val target = src1 + offset // NOTE: src1 is (pc/rf(rs1)), src2 is (offset)
//TODO fix me
io.out.bits.redirect := DontCare
io.out.bits.redirectValid := valid && isJUMP
io.out.bits.redirect.pc := io.in.bits.uop.cf.pc
io.out.bits.redirect.target := target
......
......@@ -253,191 +253,4 @@ class BPU extends XSModule {
io.tageOut <> s3.io.out
s3.io.redirectInfo <> io.redirectInfo
// TODO: delete this and put BTB and JBTAC into Stage1
/*
val flush = BoolStopWatch(io.redirect.valid, io.in.pc.valid, startHighPriority = true)
// BTB makes a quick prediction for branch and direct jump, which is
// 4-way set-associative, and each way is divided into 4 banks.
val btbAddr = new TableAddr(log2Up(BtbSets), BtbBanks)
def btbEntry() = new Bundle {
val valid = Bool()
// TODO: don't need full length of tag and target
val tag = UInt(btbAddr.tagBits.W)
val _type = UInt(2.W)
val target = UInt(VAddrBits.W)
val pred = UInt(2.W) // 2-bit saturated counter as a quick predictor
}
val btb = List.fill(BtbBanks)(List.fill(BtbWays)(
Module(new SRAMTemplate(btbEntry(), set = BtbSets / BtbBanks, shouldReset = true, holdRead = true, singlePort = true))))
// val fetchPkgAligned = btbAddr.getBank(io.in.pc.bits) === 0.U
val HeadBank = btbAddr.getBank(io.in.pc.bits)
val TailBank = btbAddr.getBank(io.in.pc.bits + FetchWidth.U << 2.U - 4.U)
for (b <- 0 until BtbBanks) {
for (w <- 0 until BtbWays) {
btb(b)(w).reset := reset.asBool
btb(b)(w).io.r.req.valid := io.in.pc.valid && Mux(TailBank > HeadBank, b.U >= HeadBank && b.U <= TailBank, b.U >= TailBank || b.U <= HeadBank)
btb(b)(w).io.r.req.bits.setIdx := btbAddr.getBankIdx(io.in.pc.bits)
}
}
// latch pc for 1 cycle latency when reading SRAM
val pcLatch = RegEnable(io.in.pc.bits, io.in.pc.valid)
val btbRead = Wire(Vec(BtbBanks, Vec(BtbWays, btbEntry())))
val btbHits = Wire(Vec(FetchWidth, Bool()))
val btbTargets = Wire(Vec(FetchWidth, UInt(VAddrBits.W)))
val btbTypes = Wire(Vec(FetchWidth, UInt(2.W)))
// val btbPreds = Wire(Vec(FetchWidth, UInt(2.W)))
val btbTakens = Wire(Vec(FetchWidth, Bool()))
for (b <- 0 until BtbBanks) {
for (w <- 0 until BtbWays) {
btbRead(b)(w) := btb(b)(w).io.r.resp.data(0)
}
}
for (i <- 0 until FetchWidth) {
btbHits(i) := false.B
for (b <- 0 until BtbBanks) {
for (w <- 0 until BtbWays) {
when (b.U === btbAddr.getBank(pcLatch) && btbRead(b)(w).valid && btbRead(b)(w).tag === btbAddr.getTag(Cat(pcLatch(VAddrBits - 1, 2), 0.U(2.W)) + i.U << 2)) {
btbHits(i) := !flush && RegNext(btb(b)(w).io.r.req.fire(), init = false.B)
btbTargets(i) := btbRead(b)(w).target
btbTypes(i) := btbRead(b)(w)._type
// btbPreds(i) := btbRead(b)(w).pred
btbTakens(i) := (btbRead(b)(w).pred)(1).asBool
}.otherwise {
btbHits(i) := false.B
btbTargets(i) := DontCare
btbTypes(i) := DontCare
btbTakens(i) := DontCare
}
}
}
}
// JBTAC, divided into 8 banks, makes prediction for indirect jump except ret.
val jbtacAddr = new TableAddr(log2Up(JbtacSize), JbtacBanks)
def jbtacEntry() = new Bundle {
val valid = Bool()
// TODO: don't need full length of tag and target
val tag = UInt(jbtacAddr.tagBits.W)
val target = UInt(VAddrBits.W)
}
val jbtac = List.fill(JbtacBanks)(Module(new SRAMTemplate(jbtacEntry(), set = JbtacSize / JbtacBanks, shouldReset = true, holdRead = true, singlePort = true)))
(0 until JbtacBanks).map(i => jbtac(i).reset := reset.asBool)
(0 until JbtacBanks).map(i => jbtac(i).io.r.req.valid := io.in.pc.valid)
(0 until JbtacBanks).map(i => jbtac(i).io.r.req.bits.setIdx := jbtacAddr.getBankIdx(Cat((io.in.pc.bits)(VAddrBits - 1, 2), 0.U(2.W)) + i.U << 2))
val jbtacRead = Wire(Vec(JbtacBanks, jbtacEntry()))
(0 until JbtacBanks).map(i => jbtacRead(i) := jbtac(i).io.r.resp.data(0))
val jbtacHits = Wire(Vec(FetchWidth, Bool()))
val jbtacTargets = Wire(Vec(FetchWidth, UInt(VAddrBits.W)))
val jbtacHeadBank = jbtacAddr.getBank(Cat(pcLatch(VAddrBits - 1, 2), 0.U(2.W)))
for (i <- 0 until FetchWidth) {
jbtacHits(i) := false.B
for (b <- 0 until JbtacBanks) {
when (jbtacHeadBank + i.U === b.U) {
jbtacHits(i) := jbtacRead(b).valid && jbtacRead(b).tag === jbtacAddr.getTag(Cat(pcLatch(VAddrBits - 1, 2), 0.U(2.W)) + i.U << 2) &&
!flush && RegNext(jbtac(b).io.r.req.fire(), init = false.B)
jbtacTargets(i) := jbtacRead(b).target
}.otherwise {
jbtacHits(i) := false.B
jbtacTargets(i) := DontCare
}
}
}
// redirect based on BTB and JBTAC
(0 until FetchWidth).map(i => io.predMask(i) := btbHits(i) && Mux(btbTypes(i) === BTBtype.B, btbTakens(i), true.B) || jbtacHits(i))
(0 until FetchWidth).map(i => io.predTargets(i) := Mux(btbHits(i) && !(btbTypes(i) === BTBtype.B && !btbTakens(i)), btbTargets(i), jbtacTargets(i)))
// update bpu, including BTB, JBTAC...
// 1. update BTB
// 1.1 read the selected bank
for (b <- 0 until BtbBanks) {
for (w <- 0 until BtbWays) {
btb(b)(w).io.r.req.valid := io.redirect.valid && btbAddr.getBank(io.redirect.bits.pc) === b.U
btb(b)(w).io.r.req.bits.setIdx := btbAddr.getBankIdx(io.redirect.bits.pc)
}
}
// 1.2 match redirect pc tag with the 4 tags in a btb line, find a way to write
// val redirectLatch = RegEnable(io.redirect.bits, io.redirect.valid)
val redirectLatch = RegNext(io.redirect.bits, init = 0.U.asTypeOf(new Redirect))
val bankLatch = btbAddr.getBank(redirectLatch.pc)
val btbUpdateRead = Wire(Vec(BtbWays, btbEntry()))
val btbValids = Wire(Vec(BtbWays, Bool()))
val btbUpdateTagHits = Wire(Vec(BtbWays, Bool()))
for (b <- 0 until BtbBanks) {
for (w <- 0 until BtbWays) {
when (b.U === bankLatch) {
btbUpdateRead(w) := btb(b)(w).io.r.resp.data(0)
btbValids(w) := btbUpdateRead(w).valid && RegNext(btb(b)(w).io.r.req.fire(), init = false.B)
}.otherwise {
btbUpdateRead(w) := 0.U.asTypeOf(btbEntry())
btbValids(w) := false.B
}
}
}
(0 until BtbWays).map(w => btbUpdateTagHits(w) := btbValids(w) && btbUpdateRead(w).tag === btbAddr.getTag(redirectLatch.pc))
// val btbWriteWay = Wire(Vec(BtbWays, Bool()))
val btbWriteWay = Wire(UInt(BtbWays.W))
val btbInvalids = ~ btbValids.asUInt
when (btbUpdateTagHits.asUInt.orR) {
// tag hits
btbWriteWay := btbUpdateTagHits.asUInt
}.elsewhen (!btbValids.asUInt.andR) {
// no tag hits but there are free entries
btbWriteWay := Mux(btbInvalids >= 8.U, "b1000".U,
Mux(btbInvalids >= 4.U, "b0100".U,
Mux(btbInvalids >= 2.U, "b0010".U, "b0001".U)))
}.otherwise {
// no tag hits and no free entry, select a victim way
btbWriteWay := UIntToOH(LFSR64()(log2Up(BtbWays) - 1, 0))
}
// 1.3 calculate new 2-bit counter value
val btbWrite = WireInit(0.U.asTypeOf(btbEntry()))
btbWrite.valid := true.B
btbWrite.tag := btbAddr.getTag(redirectLatch.pc)
btbWrite._type := redirectLatch._type
btbWrite.target := redirectLatch.brTarget
val oldPred = WireInit("b01".U)
oldPred := PriorityMux(btbWriteWay.asTypeOf(Vec(BtbWays, Bool())), btbUpdateRead.map{ e => e.pred })
val newPred = Mux(redirectLatch.taken, Mux(oldPred === "b11".U, "b11".U, oldPred + 1.U),
Mux(oldPred === "b00".U, "b00".U, oldPred - 1.U))
btbWrite.pred := Mux(btbUpdateTagHits.asUInt.orR && redirectLatch._type === BTBtype.B, newPred, "b01".U)
// 1.4 write BTB
for (b <- 0 until BtbBanks) {
for (w <- 0 until BtbWays) {
when (b.U === bankLatch) {
btb(b)(w).io.w.req.valid := OHToUInt(btbWriteWay) === w.U &&
RegNext(io.redirect.valid, init = false.B) &&
(redirectLatch._type === BTBtype.B || redirectLatch._type === BTBtype.J)
btb(b)(w).io.w.req.bits.setIdx := btbAddr.getBankIdx(redirectLatch.pc)
btb(b)(w).io.w.req.bits.data := btbWrite
}.otherwise {
btb(b)(w).io.w.req.valid := false.B
btb(b)(w).io.w.req.bits.setIdx := DontCare
btb(b)(w).io.w.req.bits.data := DontCare
}
}
}
// 2. update JBTAC
val jbtacWrite = WireInit(0.U.asTypeOf(jbtacEntry()))
jbtacWrite.valid := true.B
jbtacWrite.tag := jbtacAddr.getTag(io.redirect.bits.pc)
jbtacWrite.target := io.redirect.bits.target
(0 until JbtacBanks).map(b =>
jbtac(b).io.w.req.valid := io.redirect.valid &&
b.U === jbtacAddr.getBank(io.redirect.bits.pc) &&
io.redirect.bits._type === BTBtype.I)
(0 until JbtacBanks).map(b => jbtac(b).io.w.req.bits.setIdx := jbtacAddr.getBankIdx(io.redirect.bits.pc))
(0 until JbtacBanks).map(b => jbtac(b).io.w.req.bits.data := jbtacWrite)
*/
}
package xiangshan.frontend
import chisel3._
import chisel3.util._
import device.RAMHelper
import xiangshan._
import utils.{Debug, GTimer, XSDebug}
import xiangshan.backend.decode.isa
import xiangshan.backend.decode.Decoder
trait HasICacheConst { this: XSModule =>
// 4-byte align * FetchWidth-inst
val groupAlign = log2Up(FetchWidth * 4)
def groupPC(pc: UInt): UInt = Cat(pc(VAddrBits-1, groupAlign), 0.U(groupAlign.W))
}
class FakeIcacheReq extends XSBundle {
val addr = UInt(VAddrBits.W)
val flush = Bool()
}
class FakeIcacheResp extends XSBundle {
val icacheOut = Vec(FetchWidth, UInt(32.W))
val predecode = new Predecode
}
class TempPreDecoder extends XSModule {
val io = IO(new Bundle() {
val in = Input(Vec(FetchWidth,UInt(32.W)))
val out = Output(new Predecode)
})
val tempPreDecoders = Seq.fill(FetchWidth)(Module(new Decoder))
for (i <- 0 until FetchWidth) {
tempPreDecoders(i).io.in <> DontCare
tempPreDecoders(i).io.in.instr <> io.in(i)
io.out.fuTypes(i) := tempPreDecoders(i).io.out.ctrl.fuType
io.out.fuOpTypes(i) := tempPreDecoders(i).io.out.ctrl.fuOpType
}
io.out.mask := DontCare
}
class FakeCache extends XSModule with HasICacheConst {
val io = IO(new Bundle {
val in = Flipped(DecoupledIO(new FakeIcacheReq))
val out = DecoupledIO(new FakeIcacheResp)
})
val memByte = 128 * 1024 * 1024
val ramHelpers = Array.fill(FetchWidth/2)(Module(new RAMHelper(memByte)).io)
ramHelpers.foreach(_.clk := clock)
//fake instruction fetch pipeline
//----------------
// ICache Stage1
//----------------
val s1_valid = io.in.valid
val s2_ready = WireInit(false.B)
val s1_fire = s1_valid && s2_ready
val gpc = groupPC(io.in.bits.addr)
io.in.ready := s2_ready
val offsetBits = log2Up(memByte)
val offsetMask = (1 << offsetBits) - 1
def index(addr: UInt): UInt = ((addr & offsetMask.U) >> log2Ceil(DataBytes)).asUInt()
def inRange(idx: UInt): Bool = idx < (memByte / 8).U
val ramOut = Wire(Vec(FetchWidth,UInt(32.W)))
for(i <- ramHelpers.indices) {
val rIdx = index(gpc) + i.U
ramHelpers(i).rIdx := rIdx
ramOut(2*i) := ramHelpers(i).rdata.tail(32)
ramOut(2*i+1) := ramHelpers(i).rdata.head(32)
Seq(
ramHelpers(i).wmask,
ramHelpers(i).wdata,
ramHelpers(i).wen,
ramHelpers(i).wIdx
).foreach(_ := 0.U)
}
//----------------
// ICache Stage2
//----------------
val s2_valid = RegEnable(next=s1_valid,init=false.B,enable=s1_fire)
val s2_ram_out = RegEnable(next=ramOut,enable=s1_fire)
val s3_ready = WireInit(false.B)
val s2_fire = s2_valid && s3_ready
s2_ready := s2_fire || !s2_valid
//----------------
// ICache Stage3
//----------------
val s3_valid = RegEnable(next=s2_valid,init=false.B,enable=s2_fire)
val s3_ram_out = RegEnable(next=s2_ram_out,enable=s2_fire)
s3_ready := io.out.ready
val needflush = io.in.bits.flush
when(needflush){
s2_valid := false.B
s3_valid := false.B
}
val tempPredecode = Module(new TempPreDecoder)
tempPredecode.io.in := s3_ram_out
io.out.valid := s3_valid
io.out.bits.icacheOut := s3_ram_out
io.out.bits.predecode := tempPredecode.io.out
}
\ No newline at end of file
......@@ -11,15 +11,36 @@ class Frontend extends XSModule {
val backend = new FrontendToBackendIO
})
val fakeIFU = Module(new FakeIFU)
// val fakeIFU = Module(new FakeIFU)
// val ibuffer= Module(new Ibuffer)
// val needFlush = io.backend.redirectInfo.flush()
// fakeIFU.io.redirect.valid := needFlush
// fakeIFU.io.redirect.bits := io.backend.redirectInfo.redirect
// ibuffer.io.in <> fakeIFU.io.fetchPacket
// ibuffer.io.flush := needFlush
// io.backend.cfVec <> ibuffer.io.out
// for(out <- ibuffer.io.out){
// XSInfo(out.fire(),
// p"inst:${Hexadecimal(out.bits.instr)} pc:${Hexadecimal(out.bits.pc)}\n"
// )
// }
val ifu = Module(new IFU)
val fakeicache = Module(new FakeCache)
val ibuffer= Module(new Ibuffer)
val needFlush = io.backend.redirectInfo.flush()
fakeIFU.io.redirect.valid := needFlush
fakeIFU.io.redirect.bits := io.backend.redirectInfo.redirect
ifu.io.redirectInfo <> io.backend.redirectInfo
fakeicache.io.in <> ifu.io.icacheReq
ifu.io.icacheResp <> fakeicache.io.out
ibuffer.io.in <> fakeIFU.io.fetchPacket
ibuffer.io.in <> ifu.io.fetchPacket
ibuffer.io.flush := needFlush
io.backend.cfVec <> ibuffer.io.out
......@@ -30,4 +51,5 @@ class Frontend extends XSModule {
)
}
}
......@@ -2,9 +2,9 @@ package xiangshan.frontend
import chisel3._
import chisel3.util._
import chisel3.core.{withReset}
import device.RAMHelper
import xiangshan._
import xiangshan.utils._
trait HasIFUConst { this: XSModule =>
val resetVector = 0x80000000L//TODO: set reset vec
......@@ -14,52 +14,57 @@ trait HasIFUConst { this: XSModule =>
}
sealed abstract class IFUBundle extends XSBundle with HasIFUConst
sealed abstract class IFUModule extends XSModule with HasIFUConst with NeedImpl
class IFUIO extends IFUBundle
class IFUIO extends XSBundle
{
val fetchPacket = DecoupledIO(new FetchPacket)
val redirect = Flipped(ValidIO(new Redirect))
val redirectInfo = Input(new RedirectInfo)
val icacheReq = DecoupledIO(new FakeIcacheReq)
val icacheResp = Flipped(DecoupledIO(new FakeIcacheResp))
}
class IF1IO extends IFUBundle
{
val pc = UInt(VAddrBits.W)
class FakeBPU extends XSModule{
val io = IO(new Bundle() {
val in = new Bundle { val pc = Flipped(Valid(UInt(VAddrBits.W))) }
val btbOut = ValidIO(new BranchPrediction)
val tageOut = ValidIO(new BranchPrediction)
val predecode = Flipped(ValidIO(new Predecode))
})
io.btbOut.valid := false.B
io.btbOut.bits <> DontCare
io.tageOut.valid := false.B
io.tageOut.bits <> DontCare
}
class IF2IO extends IFUBundle
{
val pc = UInt(VAddrBits.W)
val btbOut = new BranchPrediction
val taken = Bool()
}
class IFU(implicit val p: XSConfig) extends IFUModule
class IFU extends XSModule with HasIFUConst
{
val io = IO(new IFUIO)
val bpu = Module(new BPU)
//val bpu = Module(new BPU)
val bpu = Module(new FakeBPU)
//-------------------------
// IF1 PC update
//-------------------------
//local
val if1_npc = WireInit(0.U(VAddrBits.W))
val if1_valid = WireInit(false.B)
val if1_valid = !reset.asBool //TODO:this is ugly
val if1_pc = RegInit(resetVector.U(VAddrBits.W))
//next
val if2_ready = WireInit(false.B)
val if1_ready = bpu.io.in.ready && if2_ready
val if2_snpc = Cat(if1_pc(VAddrBits-1, groupAlign) + 1.U, 0.U(groupAlign.W))
val if1_ready = if2_ready
//pipe fire
val if1_fire = if1_valid && if1_ready
val if1_pcUpdate = io.redirect.valid || if1_fire
val if1_pcUpdate = io.redirectInfo.flush() || if1_fire
when(RegNext(reset.asBool) && !reset.asBool)
{
if1_npc := resetVector
if1_valid := true.B
when(RegNext(reset.asBool) && !reset.asBool){
XSDebug("RESET....\n")
if1_npc := resetVector.U(VAddrBits.W)
} .otherwise{
if1_npc := if2_snpc
}
when(if1_pcUpdate)
......@@ -67,107 +72,111 @@ class IFU(implicit val p: XSConfig) extends IFUModule
if1_pc := if1_npc
}
bpu.io.in.valid := if1_valid
bpu.io.in.pc := if1_npc
bpu.io.in.pc.valid := if1_valid
bpu.io.in.pc.bits := if1_npc
XSDebug("[IF1]if1_valid:%d || if1_npc:0x%x || if1_pcUpdate:%d if1_pc:0x%x || if2_ready:%d",if1_valid,if1_npc,if1_pcUpdate,if1_pc,if2_ready)
XSDebug(false,if1_fire,"------IF1->fire!!!")
XSDebug(false,true.B,"\n")
//-------------------------
// IF2 btb resonse
// icache visit
//-------------------------
//local
val if2_flush = WireInit(false.B)
val if2_update = if1_fire && !if2_flush
val if2_valid = RegNext(if2_update)
val if2_valid = RegEnable(next=if1_valid,init=false.B,enable=if1_fire)
val if2_pc = if1_pc
val if2_btb_taken = bpu.io.btbOut.valid
val if2_btb_insMask = bpu.io.btbOut.bits.instrValid
val if2_btb_target = bpu.io.btbOut.bits.target
val if2_snpc = Cat(if2_pc(VAddrBits-1, groupAlign) + 1.U, 0.U(groupAlign.W))
val if2_flush = WireInit(false.B)
//next
val if3_ready = WireInit(false.B)
//pipe fire
val if2_fire = if2_valid && if3_ready
val if2_ready = (if2_fire && icache.io.in.fire()) || !if2_valid
val if2_fire = if2_valid && if3_ready && io.icacheReq.fire()
if2_ready := (if2_fire) || !if2_valid
icache.io.in.valid := if2_fire
icahce.io.in.bits := if2_pc
io.icacheReq.valid := if2_valid
io.icacheReq.bits.addr := groupPC(if2_pc)
io.icacheReq.bits.flush := io.redirectInfo.flush()
when(if2_valid && if2_btb_taken)
{
if1_npc := if2_btb_target
} .otherwise
{
if1_npc := if2_snpc
}
XSDebug("[IF2]if2_valid:%d || if2_pc:0x%x || if3_ready:%d ",if2_valid,if2_pc,if3_ready)
//XSDebug("[IF2-BPU-out]if2_btbTaken:%d || if2_btb_insMask:%b || if2_btb_target:0x%x \n",if2_btb_taken,if2_btb_insMask.asUInt,if2_btb_target)
XSDebug(false,if2_fire,"------IF2->fire!!!")
XSDebug(false,true.B,"\n")
XSDebug("[IF2-Icache-Req] icache_in_valid:%d icache_in_ready:%d\n",io.icacheReq.valid,io.icacheReq.ready)
//-------------------------
// IF3 icache hit check
//-------------------------
//local
val if3_flush = WireInit(false.B)
val if3_update = if2_fire && !if3_flush
val if3_valid = RegNext(if3_update)
val if3_pc = RegEnable(if2_pc,if3_update)
val if3_btb_target = RegEnable(if2_btb_target,if3_update)
val if3_btb_taken = RegEnable(if2_btb_taken,if3_update)
val if3_valid = RegEnable(next=if2_valid,init=false.B,enable=if2_fire)
val if3_pc = RegEnable(if2_pc,if2_fire)
val if3_btb_target = RegEnable(if2_btb_target,if2_fire)
val if3_btb_taken = RegEnable(if2_btb_taken,if2_fire)
//next
val if4_ready = WireInit(false.B)
//pipe fire
val if3_fire = if3_valid && if4_ready
val if3_ready = if3_fire || !if3_valid
if3_ready := if3_fire || !if3_valid
XSDebug("[IF3]if3_valid:%d || if3_pc:0x%x || if4_ready:%d ",if3_valid,if3_pc,if4_ready)
XSDebug(false,if3_fire,"------IF3->fire!!!")
XSDebug(false,true.B,"\n")
//-------------------------
// IF4 icache resonse
// RAS result
// taget generate
//-------------------------
val if4_flush = WireInit(false.B)
val if4_update = if3_fire && !if4_flush
val if4_valid = RegNext(if4_update)
val if4_pc = RegEnable(if3_pc,if4_update)
val if4_btb_target = RegEnable(if3_btb_target,if4_update)
val if4_btb_taken = RegEnable(if3_btb_taken,if4_update)
//TAGE
val tage_taken = bpu.io.tageOut.valid
//TODO: icache predecode info
val predecode = icache.io.out.bits.predecode
val icache_isBR = tage_taken
val icache_isDirectJmp = icache_isBR &&
val icache_isCall = icache_isDirectJmp &&
val icache_isReturn = !icache_isDirectJmp &&
val icache_isOtherNDJmp = !icache_isDirectJmp && !icache_isReturn
when(if4_valid && icahe.io.out.fire())
val if4_valid = RegEnable(next=if3_valid,init=false.B,enable=if3_fire)
val if4_pc = RegEnable(if3_pc,if3_fire)
val if4_btb_target = RegEnable(if3_btb_target,if3_fire)
val if4_btb_taken = RegEnable(if3_btb_taken,if3_fire)
val if4_tage_target = bpu.io.tageOut.bits.target
val if4_tage_taken = bpu.io.tageOut.valid
val if4_tage_insMask = bpu.io.tageOut.bits.instrValid
XSDebug("[IF4]if4_valid:%d || if4_pc:0x%x \n",if4_valid,if4_pc)
//XSDebug("[IF4-TAGE-out]if4_tage_taken:%d || if4_btb_insMask:%b || if4_tage_target:0x%x \n",if4_tage_taken,if4_tage_insMask.asUInt,if4_tage_target)
XSDebug("[IF4-ICACHE-RESP]icacheResp.valid:%d icacheResp.ready:%d\n",io.icacheResp.valid,io.icacheResp.ready)
when(if4_valid && io.icacheResp.fire() && if4_tage_taken)
{
if1_npc := if4_btb_target
if1_npc := if4_tage_target
}
//redirect
when(io.redirect.valid){
if1_npc := io.redirect.bits.target
if2_flush := true.B
if3_flush := true.B
if4_flush := true.B
//redirect: miss predict
when(io.redirectInfo.flush()){
if1_npc := io.redirectInfo.redirect.target
if3_valid := false.B
if4_valid := false.B
}
XSDebug(io.redirectInfo.flush(),"[IFU-REDIRECT] target:0x%x \n",io.redirectInfo.redirect.target.asUInt)
//Output -> iBuffer
if4_ready := io.fetchPacket.ready
io.fetchPacket.valid := if4_valid && !if4_flush
io.fetchPacket.instrs := io.icache.out.bits.rdata
io.fetchPacket.mask := Fill(FetchWidth*2, 1.U(1.W)) << pc(2+log2Up(FetchWidth)-1, 1)
io.fetchPacket.pc := if4_pc
io.fetchPacket <> DontCare
if4_ready := io.fetchPacket.ready && (io.icacheResp.valid || !if4_valid)
io.fetchPacket.valid := if4_valid && !io.redirectInfo.flush()
io.fetchPacket.bits.instrs := io.icacheResp.bits.icacheOut
io.fetchPacket.bits.mask := Fill(FetchWidth*2, 1.U(1.W)) << if4_pc(2+log2Up(FetchWidth)-1, 1)
io.fetchPacket.bits.pc := if4_pc
//to BPU
bpu.io.predecode.valid := io.icacheResp.fire() && if4_valid
bpu.io.predecode.bits <> io.icacheResp.bits.predecode
bpu.io.predecode.bits.mask := Fill(FetchWidth, 1.U(1.W)) << if4_pc(2+log2Up(FetchWidth)-1, 2) //TODO: consider RVC
io.icacheResp.ready := io.fetchPacket.ready
}
......@@ -15,7 +15,7 @@ trait HasTageParameter {
( 128, 64, 9))
val TageNTables = TableInfo.size
val UBitPeriod = 2048
val BankWidth = FetchWidth // 8
val BankWidth = 8 // FetchWidth
}
abstract class TageBundle extends XSBundle with HasTageParameter
......@@ -44,7 +44,7 @@ class TageUpdate extends TageBundle {
val u = Vec(BankWidth, UInt(2.W))
}
class TageTable(val nRows: Int, val histLen: Int, val tagLen: Int, val UBitPeriod: Int) extends TageModule {
class TageTable(val nRows: Int, val histLen: Int, val tagLen: Int, val uBitPeriod: Int) extends TageModule {
val io = IO(new Bundle() {
val req = Input(Valid(new TageReq))
val resp = Output(Vec(BankWidth, Valid(new TageResp)))
......@@ -123,7 +123,7 @@ class Tage extends TageModule {
// Create a mask fo tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatableSlots = (VecInit(resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(LowerMask(UIntToOH(provider)) & Fill(TageNTables, provided.asUInt))
~(LowerMask(UIntToOH(provider), TageNTables) & Fill(TageNTables, provided.asUInt))
)
val allocLFSR = LFSR64()(TageNTables - 1, 0)
val firstEntry = PriorityEncoder(allocatableSlots)
......@@ -163,7 +163,7 @@ class Tage extends TageModule {
updateU(allocate.bits)(idx) := 0.U
}.otherwise {
val provider = updateMeta.provider
val decrMask = Mux(provider.valid, ~LowerMask(UIntToOH(provider.bits)), 0.U)
val decrMask = Mux(provider.valid, ~LowerMask(UIntToOH(provider.bits), TageNTables), 0.U)
for (i <- 0 until TageNTables) {
when (decrMask(i)) {
updateUMask(i)(idx) := true.B
......@@ -184,8 +184,8 @@ class Tage extends TageModule {
tables(i).io.update.u(w) := updateU(i)(w)
}
// use fetch pc instead of instruction pc
tables(i).io.update.pc := io.RedirectInfo.redirect.pc - io.RedirectInfo.redirect.fetchIdx << 2.U
tables(i).io.update.hist := io.RedirectInfo.redirect.hist
tables(i).io.update.pc := io.redirectInfo.redirect.pc - io.redirectInfo.redirect.fetchIdx << 2.U
tables(i).io.update.hist := io.redirectInfo.redirect.hist
}
io.out.hits := outHits.asUInt
......
package chiseltest.tests
import org.scalatest._
import chisel3._
import chisel3.experimental.BundleLiterals._
import chiseltest._
import xiangshan._
import xiangshan.frontend.IFU
import xiangshan.utils._
import xiangshan.CtrlFlow
class IFUTest extends FlatSpec with ChiselScalatestTester with Matchers {
behavior of "IFU Test"
it should "test IFU pipeline" in {
test(new IFU) { c =>
//-----------------
//Cycle 0
//-----------------
//c.io.icacheReq.ready.poke(true.B)
c.io.icacheReq.ready.poke(false.B)
c.io.fetchPacket.ready.poke(true.B)
c.clock.step()
//-----------------
//Cycle 1
//-----------------
c.clock.step()
c.clock.step()
c.clock.step()
//-----------------
// Cycle 2
//-----------------
c.io.icacheReq.ready.poke(true.B)
c.clock.step()
//-----------------
// Cycle 3
//-----------------
c.clock.step()
//-----------------
// Cycle 4
//-----------------
c.io.icacheResp.valid.poke(true.B)
c.clock.step()
//-----------------
// Cycle 5
//-----------------
c.io.redirectInfo.valid.poke(true.B)
c.io.redirectInfo.misPred.poke(true.B)
c.io.redirectInfo.redirect.target.poke("h80002800".U)
c.clock.step()
//-----------------
// Cycle 6
//-----------------
c.io.redirectInfo.valid.poke(false.B)
c.io.redirectInfo.misPred.poke(false.B)
c.clock.step()
//-----------------
// Cycle 7
//-----------------
c.clock.step()
}
}
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册