提交 a8373f31 编写于 作者: L Lingrui98

Merge remote-tracking branch 'origin/debian-gogogo' into ifu-timing

......@@ -11,6 +11,8 @@ if __name__ == "__main__":
lines = []
line_count = 0
synthesis_nest_level = 0
reg_init_nest_level = 0
mem_init_nest_level = 0
with open(input_file) as f:
for line in f:
line_count += 1
......@@ -18,30 +20,72 @@ if __name__ == "__main__":
ifdef = re.compile('`ifdef')
ifndef = re.compile('`ifndef')
endif = re.compile('`endif')
# remove the line coverage results of not synthesizable code(mostly assert and fwrite)
synthesis = re.compile('`ifndef SYNTHESIS')
line_coverage = re.compile('^\s*([%]?\d+)\s+if')
# remove the coverage results of random init variables
reg_init = re.compile('`ifdef RANDOMIZE_REG_INIT')
mem_init = re.compile('`ifdef RANDOMIZE_MEM_INIT')
coverage = re.compile('^\s*(%?\d+)\s+')
ifdef_match = ifdef.search(line)
ifndef_match = ifndef.search(line)
endif_match = endif.search(line)
synthesis_match = synthesis.search(line)
line_coverage_match = line_coverage.search(line)
reg_init_match = reg_init.search(line)
mem_init_match = mem_init.search(line)
coverage_match = coverage.search(line)
# enter synthesis block
if synthesis_match:
assert synthesis_nest_level == 0, "Should not nest SYNTHESIS macro"
synthesis_nest_level = 1
if ifdef_match or (ifndef_match and not synthesis_match):
synthesis_nest_level += 1
if endif_match:
synthesis_nest_level -= 1
assert synthesis_nest_level >= 0, "Macro nest level should be >= 0"
if synthesis_nest_level > 0:
if ifdef_match or (ifndef_match and not synthesis_match):
synthesis_nest_level += 1
if endif_match:
synthesis_nest_level -= 1
assert synthesis_nest_level >= 0, "Macro nest level should be >= 0"
# remove line coverage results in systhesis block
if coverage_match:
coverage_stat = coverage_match.group(1)
line = line.replace(coverage_match.group(1), " " * len(coverage_stat))
# enter reg_init block
if reg_init_match:
assert reg_init_nest_level == 0, "Should not nest reg_init macro"
reg_init_nest_level = 1
if reg_init_nest_level > 0:
if (ifdef_match and not reg_init_match) or ifndef_match:
reg_init_nest_level += 1
if endif_match:
reg_init_nest_level -= 1
assert reg_init_nest_level >= 0, "Macro nest level should be >= 0"
# remove line coverage results in systhesis block
if coverage_match:
coverage_stat = coverage_match.group(1)
line = line.replace(coverage_match.group(1), " " * len(coverage_stat))
# enter mem_init block
if mem_init_match:
assert mem_init_nest_level == 0, "Should not nest mem_init macro"
mem_init_nest_level = 1
if mem_init_nest_level > 0:
if (ifdef_match and not mem_init_match) or ifndef_match:
mem_init_nest_level += 1
if endif_match:
mem_init_nest_level -= 1
assert mem_init_nest_level >= 0, "Macro nest level should be >= 0"
# remove line coverage results in systhesis block
if synthesis_nest_level > 0 and line_coverage_match:
coverage_stat = line_coverage_match.group(1)
line = line.replace(line_coverage_match.group(1), " " * len(coverage_stat))
# remove line coverage results in systhesis block
if coverage_match:
coverage_stat = coverage_match.group(1)
line = line.replace(coverage_match.group(1), " " * len(coverage_stat))
lines += line
......
......@@ -5,19 +5,31 @@ import re
import copy
import pprint
COVERRED = "COVERRED"
NOT_COVERRED = "NOT_COVERRED"
LINE_COVERRED = "LINE_COVERRED"
NOT_LINE_COVERRED = "NOT_LINE_COVERRED"
TOGGLE_COVERRED = "TOGGLE_COVERRED"
NOT_TOGGLE_COVERRED = "NOT_TOGGLE_COVERRED"
DONTCARE = "DONTCARE"
BEGIN = "BEGIN"
END = "END"
CHILDREN = "CHILDREN"
MODULE = "MODULE"
INSTANCE = "INSTANCE"
TYPE="TYPE"
ROOT="ROOT"
NODE="NODE"
SELFCOVERAGE="SELFCOVERAGE"
TREECOVERAGE="TREECOVERAGE"
TYPE = "TYPE"
ROOT = "ROOT"
NODE = "NODE"
SELFCOVERAGE = "SELFCOVERAGE"
TREECOVERAGE = "TREECOVERAGE"
LINECOVERAGE = 0
TOGGLECOVERAGE = 1
def check_one_hot(l):
cnt = 0
for e in l:
if e:
cnt += 1
return cnt <= 1
def get_lines(input_file):
lines = []
......@@ -31,41 +43,85 @@ def get_line_annotation(lines):
# pattern_1: 040192 if(array_0_MPORT_en & array_0_MPORT_mask) begin
# pattern_2: 2218110 end else if (_T_30) begin // @[Conditional.scala 40:58]
# pattern_2: 000417 end else begin
coverred_pattern_1 = re.compile('^\s*(\d+)\s+if')
coverred_pattern_2 = re.compile('^\s*(\d+)\s+end else')
not_coverred_pattern_1 = re.compile('^\s*(%0+)\s+if')
not_coverred_pattern_2 = re.compile('^\s*(%0+)\s+end else')
line_coverred_pattern_1 = re.compile('^\s*(\d+)\s+if')
line_coverred_pattern_2 = re.compile('^\s*(\d+)\s+end else')
not_line_coverred_pattern_1 = re.compile('^\s*(%0+)\s+if')
not_line_coverred_pattern_2 = re.compile('^\s*(%0+)\s+end else')
toggle_coverred_pattern_1 = re.compile('^\s*(\d+)\s+reg')
toggle_coverred_pattern_2 = re.compile('^\s*(\d+)\s+wire')
toggle_coverred_pattern_3 = re.compile('^\s*(\d+)\s+input')
toggle_coverred_pattern_4 = re.compile('^\s*(\d+)\s+output')
not_toggle_coverred_pattern_1 = re.compile('^\s*(%0+)\s+reg')
not_toggle_coverred_pattern_2 = re.compile('^\s*(%0+)\s+wire')
not_toggle_coverred_pattern_3 = re.compile('^\s*(%0+)\s+input')
not_toggle_coverred_pattern_4 = re.compile('^\s*(%0+)\s+output')
line_cnt = 0
for line in lines:
coverred_match = coverred_pattern_1.search(line) or coverred_pattern_2.search(line)
not_coverred_match = not_coverred_pattern_1.search(line) or not_coverred_pattern_2.search(line)
line_coverred_match = line_coverred_pattern_1.search(line) or line_coverred_pattern_2.search(line)
not_line_coverred_match = not_line_coverred_pattern_1.search(line) or not_line_coverred_pattern_2.search(line)
assert not (line_coverred_match and not_line_coverred_match)
assert not (coverred_match and not_coverred_match)
toggle_coverred_match = toggle_coverred_pattern_1.search(line) or toggle_coverred_pattern_2.search(line) or \
toggle_coverred_pattern_3.search(line) or toggle_coverred_pattern_4.search(line)
not_toggle_coverred_match = not_toggle_coverred_pattern_1.search(line) or not_toggle_coverred_pattern_2.search(line) or \
not_toggle_coverred_pattern_3.search(line) or not_toggle_coverred_pattern_4.search(line)
assert not (toggle_coverred_match and not_toggle_coverred_match)
if coverred_match:
line_annotations.append(COVERRED)
elif not_coverred_match:
line_annotations.append(NOT_COVERRED)
all_match = (line_coverred_match, not_line_coverred_match,
toggle_coverred_match, not_toggle_coverred_match)
if not check_one_hot(all_match):
print("not_one_hot")
print(line_cnt)
print(all_match)
assert False, "This line matches multiple patterns"
if line_coverred_match:
line_annotations.append(LINE_COVERRED)
elif not_line_coverred_match:
line_annotations.append(NOT_LINE_COVERRED)
elif toggle_coverred_match:
line_annotations.append(TOGGLE_COVERRED)
elif not_toggle_coverred_match:
line_annotations.append(NOT_TOGGLE_COVERRED)
else:
line_annotations.append(DONTCARE)
line_cnt += 1
return line_annotations
# get the line coverage statistics in line range [start, end)
def get_coverage_statistics(line_annotations, start, end):
coverred = 0
not_coverred = 0
line_coverred = 0
not_line_coverred = 0
toggle_coverred = 0
not_toggle_coverred = 0
for i in range(start, end):
if line_annotations[i] == COVERRED:
coverred += 1
if line_annotations[i] == LINE_COVERRED:
line_coverred += 1
if line_annotations[i] == NOT_LINE_COVERRED:
not_line_coverred += 1
if line_annotations[i] == TOGGLE_COVERRED:
toggle_coverred += 1
if line_annotations[i] == NOT_COVERRED:
not_coverred += 1
if line_annotations[i] == NOT_TOGGLE_COVERRED:
not_toggle_coverred += 1
# deal with divide by zero
coverage = 1.0
if coverred + not_coverred != 0:
coverage = float(coverred) / (coverred + not_coverred)
return (coverred, not_coverred, coverage)
line_coverage = 1.0
if line_coverred + not_line_coverred != 0:
line_coverage = float(line_coverred) / (line_coverred + not_line_coverred)
toggle_coverage = 1.0
if toggle_coverred + not_toggle_coverred != 0:
toggle_coverage = float(toggle_coverred) / (toggle_coverred + not_toggle_coverred)
return ((line_coverred, not_line_coverred, line_coverage),
(toggle_coverred, not_toggle_coverred, toggle_coverage))
# get modules and all it's submodules
def get_modules(lines):
......@@ -140,18 +196,26 @@ def get_tree_coverage(modules, coverage):
if CHILDREN not in modules[module]:
modules[module][TREECOVERAGE] = self_coverage
else:
coverred = self_coverage[0]
not_coverred = self_coverage[1]
line_coverred = self_coverage[LINECOVERAGE][0]
not_line_coverred = self_coverage[LINECOVERAGE][1]
toggle_coverred = self_coverage[TOGGLECOVERAGE][0]
not_toggle_coverred = self_coverage[TOGGLECOVERAGE][1]
# the dfs part
for child in modules[module][CHILDREN]:
child_coverage = dfs(child[MODULE])
coverred += child_coverage[0]
not_coverred += child_coverage[1]
line_coverred += child_coverage[LINECOVERAGE][0]
not_line_coverred += child_coverage[LINECOVERAGE][1]
toggle_coverred += child_coverage[TOGGLECOVERAGE][0]
not_toggle_coverred += child_coverage[TOGGLECOVERAGE][1]
# deal with divide by zero
coverage = 1.0
if coverred + not_coverred != 0:
coverage = float(coverred) / (coverred + not_coverred)
modules[module][TREECOVERAGE] = (coverred, not_coverred, coverage)
line_coverage = 1.0
if line_coverred + not_line_coverred != 0:
line_coverage = float(line_coverred) / (line_coverred + not_line_coverred)
toggle_coverage = 1.0
if toggle_coverred + not_toggle_coverred != 0:
toggle_coverage = float(toggle_coverred) / (toggle_coverred + not_toggle_coverred)
modules[module][TREECOVERAGE] = ((line_coverred, not_line_coverred, line_coverage),
(toggle_coverred, not_toggle_coverred, toggle_coverage))
return modules[module][TREECOVERAGE]
for module in modules:
......@@ -163,8 +227,8 @@ def get_tree_coverage(modules, coverage):
# arg1: tree coverage results
# arg2: coverage type
def sort_coverage(coverage, coverage_type):
l = [(module, coverage[module][coverage_type])for module in coverage]
def sort_coverage(coverage, self_or_tree, coverage_type):
l = [(module, coverage[module][self_or_tree][coverage_type])for module in coverage]
l.sort(key=lambda x:x[1][2])
return l
......@@ -174,10 +238,15 @@ def print_tree_coverage(tree_coverage):
tree = tree_coverage[module][TREECOVERAGE]
self = tree_coverage[module][SELFCOVERAGE]
print(" " * level + "- " + module)
print(" " * level + " tree", end="")
print("(%d, %d, %.2f)" % (tree[0], tree[1], tree[2] * 100.0))
print(" " * level + " self", end="")
print("(%d, %d, %.2f)" % (self[0], self[1], self[2] * 100.0))
print(" " * level + " tree_line", end="")
print("(%d, %d, %.2f)" % (tree[LINECOVERAGE][0], tree[LINECOVERAGE][1], tree[LINECOVERAGE][2] * 100.0))
print(" " * level + " self_line", end="")
print("(%d, %d, %.2f)" % (self[LINECOVERAGE][0], self[LINECOVERAGE][1], self[LINECOVERAGE][2] * 100.0))
print(" " * level + " tree_toggle", end="")
print("(%d, %d, %.2f)" % (tree[TOGGLECOVERAGE][0], tree[TOGGLECOVERAGE][1], tree[TOGGLECOVERAGE][2] * 100.0))
print(" " * level + " self_toggle", end="")
print("(%d, %d, %.2f)" % (self[TOGGLECOVERAGE][0], self[TOGGLECOVERAGE][1], self[TOGGLECOVERAGE][2] * 100.0))
# print children nodes
if CHILDREN in modules[module]:
......@@ -215,11 +284,15 @@ if __name__ == "__main__":
# print("tree_coverage:")
# pp.pprint(tree_coverage)
print("SelfCoverage:")
pp.pprint(sort_coverage(tree_coverage, SELFCOVERAGE))
print("LineSelfCoverage:")
pp.pprint(sort_coverage(tree_coverage, SELFCOVERAGE, LINECOVERAGE))
print("LineTreeCoverage:")
pp.pprint(sort_coverage(tree_coverage, TREECOVERAGE, LINECOVERAGE))
print("TreeCoverage:")
pp.pprint(sort_coverage(tree_coverage, TREECOVERAGE))
print("ToggleSelfCoverage:")
pp.pprint(sort_coverage(tree_coverage, SELFCOVERAGE, TOGGLECOVERAGE))
print("ToggleTreeCoverage:")
pp.pprint(sort_coverage(tree_coverage, TREECOVERAGE, TOGGLECOVERAGE))
print("AllCoverage:")
print_tree_coverage(tree_coverage)
package utils
import chisel3._
import chisel3.util._
class DataModuleTemplate[T <: Data](gen: T, numEntries: Int, numRead: Int, numWrite: Int, useBitVec: Boolean = false) extends Module {
val io = IO(new Bundle {
val raddr = Vec(numRead, Input(UInt(log2Up(numEntries).W)))
val rdata = Vec(numRead, Output(gen))
val wen = Vec(numWrite, Input(Bool()))
val waddr = Vec(numWrite, Input(UInt(log2Up(numEntries).W)))
val wdata = Vec(numWrite, Input(gen))
})
val data = Mem(numEntries, gen)
// read ports
for (i <- 0 until numRead) {
io.rdata(i) := data(io.raddr(i))
}
if (useBitVec) {
// waddr_dec(i)(j): waddr(i) is target at entry(j)
val waddr_dec = VecInit(io.waddr.map(UIntToOH(_)(numEntries - 1, 0)))
// waddr_dec_with_en(i)(j): entry(j) is written by io.wdata(i)
val waddr_dec_with_en = VecInit(io.wen.zip(waddr_dec).map{case (en, addr) => Fill(numEntries, en) & addr})
val wen_dec = VecInit((0 until numEntries).map(j => {
val data_wen = VecInit(waddr_dec_with_en.map(en => en(j)))
data_wen.suggestName(s"data_wen_$j")
data_wen.asUInt.orR
}))
val wdata_dec = VecInit((0 until numEntries).map(j =>
waddr_dec_with_en.zip(io.wdata).map{ case (en, data) => Fill(gen.getWidth, en(j)) & data.asUInt}.reduce(_ | _).asTypeOf(gen)
))
waddr_dec.suggestName("waddr_dec")
waddr_dec_with_en.suggestName("waddr_dec_with_en")
wen_dec.suggestName("wen_dec")
wdata_dec.suggestName("wdata_dec")
// write ports
for (i <- 0 until numEntries) {
when (wen_dec(i)) {
data(i) := wdata_dec(i)
}
}
}
else {
// below is the write ports (with priorities)
for (i <- 0 until numWrite) {
when (io.wen(i)) {
data(io.waddr(i)) := io.wdata(i)
}
}
}
// DataModuleTemplate should not be used when there're any write conflicts
for (i <- 0 until numWrite) {
for (j <- i+1 until numWrite) {
assert(!(io.wen(i) && io.wen(j) && io.waddr(i) === io.waddr(j)))
}
}
}
......@@ -31,7 +31,7 @@ class DebugIdentityNode()(implicit p: Parameters) extends LazyModule {
}
)
}
debug(in, true)
debug(in, false)
}
}
......
......@@ -6,6 +6,7 @@ import chisel3.util._
object PipelineConnect {
def apply[T <: Data](left: DecoupledIO[T], right: DecoupledIO[T], rightOutFire: Bool, isFlush: Bool) = {
val valid = RegInit(false.B)
valid.suggestName("pipeline_valid")
when (rightOutFire) { valid := false.B }
when (left.valid && right.ready) { valid := true.B }
when (isFlush) { valid := false.B }
......
package utils
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink.TLMessages._
import freechips.rocketchip.tilelink.TLPermissions._
import freechips.rocketchip.tilelink.{TLBundle, TLBundleA, TLBundleB, TLBundleC, TLBundleD, TLBundleE, TLChannel}
import xiangshan.HasXSLog
trait HasTLDump { this: HasXSLog =>
trait HasTLDump {
this: HasXSLog =>
implicit class TLDump(channel: TLChannel) {
def dump = channel match {
case a: TLBundleA =>
printChannelA(a)
case b: TLBundleB =>
printChannelB(b)
case c: TLBundleC =>
printChannelC(c)
case d: TLBundleD =>
printChannelD(d)
case e: TLBundleE =>
printChannelE(e)
}
}
def printChannelA(a: TLBundleA): Unit = {
switch(a.opcode) {
is(PutFullData) {
XSDebug(false, true.B,
a.channelName + " opcode: %x param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.opcode, a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
a.channelName + " PutFullData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
case b: TLBundleB =>
}
is(PutPartialData) {
XSDebug(false, true.B,
b.channelName + " opcode: %x param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.opcode, b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
a.channelName + " PutPartialData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
case c: TLBundleC =>
}
is(ArithmeticData) {
XSDebug(false, true.B,
c.channelName + " opcode: %x param: %x size: %x source: %d address: %x data: %x corrupt: %b\n",
c.opcode, c.param, c.size, c.source, c.address, c.data, c.corrupt
a.channelName + " ArithmeticData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
case d: TLBundleD =>
}
is(LogicalData) {
XSDebug(false, true.B,
d.channelName + " opcode: %x param: %x size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.opcode, d.param, d.size, d.source, d.sink, d.denied, d.data, d.corrupt
a.channelName + " LogicalData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
case e: TLBundleE =>
XSDebug(false, true.B, e.channelName + " sink: %d\n", e.sink)
}
is(Get) {
XSDebug(false, true.B,
a.channelName + " Get param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
is(Hint) {
XSDebug(false, true.B,
a.channelName + " Intent param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.param, a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
is(AcquireBlock) {
switch(a.param) {
is(NtoB) {
XSDebug(false, true.B,
a.channelName + " AcquireBlock NtoB size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
is(NtoT) {
XSDebug(false, true.B,
a.channelName + " AcquireBlock NtoT size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
is(BtoT) {
XSDebug(false, true.B,
a.channelName + " AcquireBlock BtoT size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
}
}
is(AcquirePerm) {
switch(a.param) {
is(NtoB) {
XSDebug(false, true.B,
a.channelName + " AcquirePerm NtoB size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
is(NtoT) {
XSDebug(false, true.B,
a.channelName + " AcquirePerm NtoT size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
is(BtoT) {
XSDebug(false, true.B,
a.channelName + " AcquirePerm BtoT size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
a.size, a.source, a.address, a.mask, a.data, a.corrupt
)
}
}
}
}
}
def printChannelB(b: TLBundleB): Unit = {
switch(b.opcode) {
is(PutFullData) {
XSDebug(false, true.B,
b.channelName + " PutFullData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(PutPartialData) {
XSDebug(false, true.B,
b.channelName + " PutPartialData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(ArithmeticData) {
XSDebug(false, true.B,
b.channelName + " ArithmeticData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(LogicalData) {
XSDebug(false, true.B,
b.channelName + " LogicalData param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(Get) {
XSDebug(false, true.B,
b.channelName + " Get param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(Hint) {
XSDebug(false, true.B,
b.channelName + " Intent param: %x size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.param, b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(Probe) {
switch(b.param) {
is(toN) {
XSDebug(false, true.B,
b.channelName + " Probe toN size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(toB) {
XSDebug(false, true.B,
b.channelName + " Probe toB size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
is(toT) {
XSDebug(false, true.B,
b.channelName + " Probe toT size: %x source: %d address: %x mask: %x data: %x corrupt: %b\n",
b.size, b.source, b.address, b.mask, b.data, b.corrupt
)
}
}
}
}
}
def printChannelC(c: TLBundleC): Unit = {
switch(c.opcode) {
is(AccessAck) {
XSDebug(false, true.B,
c.channelName + " AccessAck param: %x size: %x source: %d address: %x data: %x corrupt: %b\n",
c.param, c.size, c.source, c.address, c.data, c.corrupt
)
}
is(AccessAckData) {
XSDebug(false, true.B,
c.channelName + " AccessAckData param: %x size: %x source: %d address: %x data: %x corrupt: %b\n",
c.param, c.size, c.source, c.address, c.data, c.corrupt
)
}
is(HintAck) {
XSDebug(false, true.B,
c.channelName + " HintAck param: %x size: %x source: %d address: %x data: %x corrupt: %b\n",
c.param, c.size, c.source, c.address, c.data, c.corrupt
)
}
is(ProbeAck) {
switch(c.param) {
is(TtoB) {
XSDebug(false, true.B,
c.channelName + " ProbeAck TtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoN) {
XSDebug(false, true.B,
c.channelName + " ProbeAck TtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoN) {
XSDebug(false, true.B,
c.channelName + " ProbeAck BtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoT) {
XSDebug(false, true.B,
c.channelName + " ProbeAck TtoT size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoB) {
XSDebug(false, true.B,
c.channelName + " ProbeAck BtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(NtoN) {
XSDebug(false, true.B,
c.channelName + " ProbeAck NtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
}
}
is(ProbeAckData) {
switch(c.param) {
is(TtoB) {
XSDebug(false, true.B,
c.channelName + " ProbeAckData TtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoN) {
XSDebug(false, true.B,
c.channelName + " ProbeAckData TtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoN) {
XSDebug(false, true.B,
c.channelName + " ProbeAckData BtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoT) {
XSDebug(false, true.B,
c.channelName + " ProbeAckData TtoT size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoB) {
XSDebug(false, true.B,
c.channelName + " ProbeAckData BtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(NtoN) {
XSDebug(false, true.B,
c.channelName + " ProbeAckData NtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
}
}
is(Release) {
switch(c.param) {
is(TtoB) {
XSDebug(false, true.B,
c.channelName + " Release TtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoN) {
XSDebug(false, true.B,
c.channelName + " Release TtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoN) {
XSDebug(false, true.B,
c.channelName + " Release BtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoT) {
XSDebug(false, true.B,
c.channelName + " Release TtoT size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoB) {
XSDebug(false, true.B,
c.channelName + " Release BtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(NtoN) {
XSDebug(false, true.B,
c.channelName + " Release NtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
}
}
is(ReleaseData) {
switch(c.param) {
is(TtoB) {
XSDebug(false, true.B,
c.channelName + " ReleaseData TtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoN) {
XSDebug(false, true.B,
c.channelName + " ReleaseData TtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoN) {
XSDebug(false, true.B,
c.channelName + " ReleaseData BtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(TtoT) {
XSDebug(false, true.B,
c.channelName + " ReleaseData TtoT size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(BtoB) {
XSDebug(false, true.B,
c.channelName + " ReleaseData BtoB size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
is(NtoN) {
XSDebug(false, true.B,
c.channelName + " ReleaseData NtoN size: %x source: %d address: %x data: %x corrupt: %b\n",
c.size, c.source, c.address, c.data, c.corrupt
)
}
}
}
}
}
def printChannelD(d: TLBundleD): Unit = {
switch(d.opcode) {
is(AccessAck) {
XSDebug(false, true.B,
d.channelName + " AccessAck param: %x size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.param, d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(AccessAckData) {
XSDebug(false, true.B,
d.channelName + " AccessAckData param: %x size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.param, d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(HintAck) {
XSDebug(false, true.B,
d.channelName + " HintAck param: %x size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.param, d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(Grant) {
switch(d.param) {
is(toT) {
XSDebug(false, true.B,
d.channelName + " Grant toT size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(toB) {
XSDebug(false, true.B,
d.channelName + " Grant toB size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(toN) {
XSDebug(false, true.B,
d.channelName + " Grant toN size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
}
}
is(GrantData) {
switch(d.param) {
is(toT) {
XSDebug(false, true.B,
d.channelName + " GrantData toT size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(toB) {
XSDebug(false, true.B,
d.channelName + " GrantData toB size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
is(toN) {
XSDebug(false, true.B,
d.channelName + " GrantData toN size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
}
}
is(ReleaseAck) {
XSDebug(false, true.B,
d.channelName + " ReleaseAck param: %x size: %x source: %d sink: %d denied: %b data: %x corrupt: %b\n",
d.param, d.size, d.source, d.sink, d.denied, d.data, d.corrupt
)
}
}
}
def printChannelE(e: TLBundleE): Unit = {
XSDebug(false, true.B, e.channelName + "GrantAck sink: %d\n", e.sink)
}
}
......@@ -20,7 +20,6 @@ class DispatchQueueIO(enqnum: Int, deqnum: Int) extends XSBundle {
// dispatch queue: accepts at most enqnum uops from dispatch1 and dispatches deqnum uops at every clock cycle
class DispatchQueue(size: Int, enqnum: Int, deqnum: Int) extends XSModule with HasCircularQueuePtrHelper {
val io = IO(new DispatchQueueIO(enqnum, deqnum))
val indexWidth = log2Ceil(size)
val s_invalid :: s_valid:: Nil = Enum(2)
......@@ -34,10 +33,12 @@ class DispatchQueue(size: Int, enqnum: Int, deqnum: Int) extends XSModule with H
// tail: first invalid entry (free entry)
val tailPtr = RegInit(VecInit((0 until enqnum).map(_.U.asTypeOf(new CircularQueuePtr(size)))))
val tailPtrMask = UIntToMask(tailPtr(0).value, size)
// valid entries counter
val validCounter = RegInit(0.U(log2Ceil(size + 1).W))
val allowEnqueue = RegInit(true.B)
val validEntries = distanceBetween(tailPtr(0), headPtr(0))
val isTrueEmpty = ~Cat((0 until size).map(i => stateEntries(i) === s_valid)).orR
val canEnqueue = validEntries <= (size - enqnum).U
val canEnqueue = allowEnqueue
val canActualEnqueue = canEnqueue && !io.redirect.valid
/**
......@@ -93,7 +94,8 @@ class DispatchQueue(size: Int, enqnum: Int, deqnum: Int) extends XSModule with H
*/
// dequeue
val numDeqTry = Mux(validEntries > deqnum.U, deqnum.U, validEntries)
val currentValidCounter = distanceBetween(tailPtr(0), headPtr(0))
val numDeqTry = Mux(currentValidCounter > deqnum.U, deqnum.U, currentValidCounter)
val numDeqFire = PriorityEncoder(io.deq.zipWithIndex.map{case (deq, i) =>
// For dequeue, the first entry should never be s_invalid
// Otherwise, there should be a redirect and tail walks back
......@@ -146,6 +148,28 @@ class DispatchQueue(size: Int, enqnum: Int, deqnum: Int) extends XSModule with H
)
}
// update valid counter and allowEnqueue reg
validCounter := Mux(exceptionValid,
0.U,
Mux(io.redirect.valid,
validCounter,
Mux(lastLastCycleMisprediction,
currentValidCounter,
validCounter + numEnq - numDeq)
)
)
allowEnqueue := Mux(io.redirect.valid,
false.B,
Mux(lastLastCycleMisprediction,
currentValidCounter <= (size - enqnum).U,
// To optimize timing, we don't use numDeq here.
// It affects cases when validCount + numEnq - numDeq <= (size - enqnum).U.
// For example, there're 10 empty entries with 6 enqueue and 2 dequeue.
// However, since dispatch queue size > (numEnq + numDeq),
// even if we allow enqueue, they cannot be dispatched immediately.
validCounter + numEnq <= (size - enqnum).U
)
)
/**
* Part 3: set output and input
......
......@@ -140,6 +140,7 @@ class DivSqrt extends FPUSubModule {
// 53 + 2 + 2 = 57 bits are needed, but 57 % log2(4) != 0, use 58 bits instead
val mantDivSqrt = Module(new MantDivSqrt(D_MANT_WIDTH+2+2+1))
mantDivSqrt.io.kill := kill
mantDivSqrt.io.out.ready := true.B
mantDivSqrt.io.in.valid := state === s_start
mantDivSqrt.io.in.bits.a := Mux(isDivReg || aIsOddExp, Cat(aMantReg, 0.U(5.W)), Cat(0.U(1.W), aMantReg, 0.U(4.W)))
......
......@@ -11,6 +11,7 @@ class MantDivSqrt(len: Int) extends Module{
val a, b = UInt(len.W)
val isDiv = Bool()
}))
val kill = Input(Bool())
val out = DecoupledIO(new Bundle() {
val quotient = UInt(len.W)
val isZeroRem = Bool()
......@@ -45,6 +46,7 @@ class MantDivSqrt(len: Int) extends Module{
when(io.out.fire()){ state := s_idle }
}
}
when(io.kill){ state := s_idle }
val ws, wc = Reg(UInt((len+4).W))
......
......@@ -7,6 +7,8 @@ import xiangshan._
import utils._
import xiangshan.backend.LSUOpType
import xiangshan.backend.fu.fpu.Fflags
import xiangshan.mem.{LqPtr, SqPtr}
object roqDebugId extends Function0[Integer] {
var x = 0
def apply(): Integer = {
......@@ -48,24 +50,115 @@ class RoqEnqIO extends XSBundle {
val resp = Vec(RenameWidth, Output(new RoqPtr))
}
class RoqDataModule(numRead: Int, numWrite: Int) extends XSModule {
class RoqDispatchData extends XSBundle {
// commit info
val ldest = UInt(5.W)
val rfWen = Bool()
val fpWen = Bool()
val commitType = CommitType()
val pdest = UInt(PhyRegIdxWidth.W)
val old_pdest = UInt(PhyRegIdxWidth.W)
val lqIdx = new LqPtr
val sqIdx = new SqPtr
// exception info
val pc = UInt(VAddrBits.W)
val crossPageIPFFix = Bool()
val exceptionVec = Vec(16, Bool())
}
class RoqWbData extends XSBundle {
// mostly for exceptions
val exceptionVec = Vec(16, Bool())
val fflags = new Fflags
val flushPipe = Bool()
}
class RoqDeqPtrWrapper extends XSModule with HasCircularQueuePtrHelper {
val io = IO(new Bundle {
val raddr = Vec(numRead, Input(new RoqPtr))
val rdata = Vec(numRead, Output(new RoqCommitInfo))
val wen = Vec(numWrite, Input(Bool()))
val waddr = Vec(numWrite, Input(new RoqPtr))
val wdata = Vec(numWrite, Input(new RoqCommitInfo))
// for commits/flush
val state = Input(UInt(2.W))
val deq_v = Vec(CommitWidth, Input(Bool()))
val deq_w = Vec(CommitWidth, Input(Bool()))
val deq_exceptionVec = Vec(CommitWidth, Input(UInt(16.W)))
val deq_flushPipe = Vec(CommitWidth, Input(Bool()))
// for flush: when exception occurs, reset deqPtrs to range(0, CommitWidth)
val intrBitSetReg = Input(Bool())
val hasNoSpecExec = Input(Bool())
val commitType = Input(CommitType())
// output: the CommitWidth deqPtr
val out = Vec(CommitWidth, Output(new RoqPtr))
})
val data = Mem(RoqSize, new RoqCommitInfo)
for (i <- 0 until numRead) {
io.rdata(i) := data(io.raddr(i).value)
val deqPtrVec = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new RoqPtr))))
// for exceptions (flushPipe included) and interrupts:
// only consider the first instruction
val intrEnable = io.intrBitSetReg && !io.hasNoSpecExec && !CommitType.isLoadStore(io.commitType)
val exceptionEnable = io.deq_w(0) && (io.deq_exceptionVec(0).orR || io.deq_flushPipe(0))
val redirectOutValid = io.state === 0.U && io.deq_v(0) && (intrEnable || exceptionEnable)
// for normal commits: only to consider when there're no exceptions
// we don't need to consider whether the first instruction has exceptions since it wil trigger exceptions.
val commitBlocked = VecInit((0 until CommitWidth).map(i => if (i == 0) false.B else io.deq_exceptionVec(i).orR || io.deq_flushPipe(i)))
val canCommit = VecInit((0 until CommitWidth).map(i => io.deq_v(i) && io.deq_w(i) && !commitBlocked(i)))
val normalCommitCnt = PriorityEncoder(canCommit.map(c => !c) :+ true.B)
// when io.intrBitSetReg, only one instruction is allowed to commit
val commitCnt = Mux(io.intrBitSetReg, io.deq_v(0) && io.deq_w(0), normalCommitCnt)
when (redirectOutValid) {
deqPtrVec := VecInit((0 until CommitWidth).map(_.U.asTypeOf(new RoqPtr)))
}.elsewhen (io.state === 0.U) {
deqPtrVec := deqPtrVec.map(_ + commitCnt)
XSInfo(io.state === 0.U && commitCnt > 0.U, "retired %d insts\n", commitCnt)
}
for (i <- 0 until numWrite) {
when (io.wen(i)) {
data(io.waddr(i).value) := io.wdata(i)
}
io.out := deqPtrVec
}
class RoqEnqPtrWrapper extends XSModule with HasCircularQueuePtrHelper {
val io = IO(new Bundle {
// for exceptions and interrupts
val state = Input(UInt(2.W))
val deq_v = Input(Bool())
val deq_w = Input(Bool())
val deq_exceptionVec = Input(UInt(16.W))
val deq_flushPipe = Input(Bool())
val intrBitSetReg = Input(Bool())
val hasNoSpecExec = Input(Bool())
val commitType = Input(CommitType())
// for input redirect
val redirect = Input(Valid(new Redirect))
// for enqueue
val allowEnqueue = Input(Bool())
val hasBlockBackward = Input(Bool())
val enq = Vec(RenameWidth, Input(Bool()))
val out = Output(new RoqPtr)
})
val enqPtr = RegInit(0.U.asTypeOf(new RoqPtr))
// for exceptions (flushPipe included) and interrupts:
// only consider the first instruction
val intrEnable = io.intrBitSetReg && !io.hasNoSpecExec && !CommitType.isLoadStore(io.commitType)
val exceptionEnable = io.deq_w && (io.deq_exceptionVec.orR || io.deq_flushPipe)
val redirectOutValid = io.state === 0.U && io.deq_v && (intrEnable || exceptionEnable)
// enqueue
val canAccept = io.allowEnqueue && !io.hasBlockBackward
val dispatchNum = Mux(canAccept, PopCount(io.enq), 0.U)
when (redirectOutValid) {
enqPtr := 0.U.asTypeOf(new RoqPtr)
}.elsewhen (io.redirect.valid) {
enqPtr := io.redirect.bits.roqIdx + Mux(io.redirect.bits.flushItself(), 0.U, 1.U)
}.otherwise {
enqPtr := enqPtr + dispatchNum
}
io.out := enqPtr
}
class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
......@@ -84,37 +177,25 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
// instvalid field
val valid = RegInit(VecInit(List.fill(RoqSize)(false.B)))
// status
// writeback status
val writebacked = Reg(Vec(RoqSize, Bool()))
// data for redirect, exception, etc.
val flagBkup = RegInit(VecInit(List.fill(RoqSize)(false.B)))
val exuFflags = Mem(RoqSize, new Fflags)
// uop field used when commit
// flushPipe (wb) (commit) (used in roq)
// lidx (wb) (commit)
// sidx (wb) (commit)
// uop.ctrl.commitType (wb) (commit) (L/S)
// exceptionVec (wb) (commit)
// roqIdx (dispatch) (commit)
// crossPageIPFFix (dispatch) (commit)
// uop field used when walk
// ctrl.fpWen (dispatch) (walk)
// ctrl.rfWen (dispatch) (walk)
// ldest (dispatch) (walk)
// data for debug
val microOp = Mem(RoqSize, new MicroOp)
// Warn: debug_* prefix should not exist in generated verilog.
val debug_microOp = Mem(RoqSize, new MicroOp)
val debug_exuData = Reg(Vec(RoqSize, UInt(XLEN.W)))//for debug
val debug_exuDebug = Reg(Vec(RoqSize, new DebugBundle))//for debug
// ptr
val enqPtr = RegInit(0.U.asTypeOf(new RoqPtr))
val deqPtrVec = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new RoqPtr))))
// pointers
// For enqueue ptr, we don't duplicate it since only enqueue needs it.
val enqPtr = Wire(new RoqPtr)
val deqPtrVec = Wire(Vec(CommitWidth, new RoqPtr))
val walkPtrVec = Reg(Vec(CommitWidth, new RoqPtr))
val validCounter = RegInit(0.U(log2Ceil(RoqSize + 1).W))
val allowEnqueue = RegInit(true.B)
val enqPtrVec = VecInit((0 until RenameWidth).map(i => enqPtr + PopCount(io.enq.needAlloc.take(i))))
val deqPtr = deqPtrVec(0)
......@@ -122,41 +203,50 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
val isEmpty = enqPtr === deqPtr
/**
* states of Roq
*/
val s_idle :: s_walk :: s_extrawalk :: Nil = Enum(3)
val state = RegInit(s_idle)
io.roqDeqPtr := deqPtr
// For enqueue ptr, we don't duplicate it since only enqueue needs it.
/**
* CommitDataModule: store commit info separately
* (1) read: commits/walk
* Data Modules
*
* CommitDataModule: data from dispatch
* (1) read: commits/walk/exception
* (2) write: enqueue
*
* WritebackData: data from writeback
* (1) read: commits/walk/exception
* (2) write: write back from exe units
*/
val commitData = Module(new RoqDataModule(CommitWidth, RenameWidth))
val deqCommitData = commitData.io.rdata(0)
for (i <- 0 until RenameWidth) {
commitData.io.wen(i) := false.B
commitData.io.waddr(i) := enqPtrVec(i)
commitData.io.wdata(i).ldest := io.enq.req(i).bits.ctrl.ldest
commitData.io.wdata(i).rfWen := io.enq.req(i).bits.ctrl.rfWen
commitData.io.wdata(i).fpWen := io.enq.req(i).bits.ctrl.fpWen
commitData.io.wdata(i).commitType := io.enq.req(i).bits.ctrl.commitType
commitData.io.wdata(i).pdest := io.enq.req(i).bits.pdest
commitData.io.wdata(i).old_pdest := io.enq.req(i).bits.old_pdest
commitData.io.wdata(i).lqIdx := io.enq.req(i).bits.lqIdx
commitData.io.wdata(i).sqIdx := io.enq.req(i).bits.sqIdx
commitData.io.wdata(i).pc := io.enq.req(i).bits.cf.pc
}
for (i <- 0 until CommitWidth) {
commitData.io.raddr(i) := walkPtrVec(i)
when (state === s_idle) {
commitData.io.raddr(i) := deqPtrVec(i)
}
val dispatchData = Module(new DataModuleTemplate(new RoqDispatchData, RoqSize, CommitWidth, RenameWidth))
val writebackData = Module(new DataModuleTemplate(new RoqWbData, RoqSize, CommitWidth, numWbPorts))
def mergeExceptionVec(dpData: RoqDispatchData, wbData: RoqWbData) = {
// these exceptions can be determined before dispatch.
// by default, let all exceptions be determined by dispatch.
// mergeVec(instrAddrMisaligned) := dpData(instrAddrMisaligned)
// mergeVec(instrAccessFault) := dpData(instrAccessFault)
// mergeVec(instrPageFault) := dpData(instrPageFault)
val mergeVec = WireInit(dpData.exceptionVec)
// these exceptions are determined in execution units
mergeVec(illegalInstr) := wbData.exceptionVec(illegalInstr)
mergeVec(breakPoint) := wbData.exceptionVec(breakPoint)
mergeVec(loadAddrMisaligned) := wbData.exceptionVec(loadAddrMisaligned)
mergeVec(loadAccessFault) := wbData.exceptionVec(loadAccessFault)
mergeVec(storeAddrMisaligned) := wbData.exceptionVec(storeAddrMisaligned)
mergeVec(storeAccessFault) := wbData.exceptionVec(storeAccessFault)
mergeVec(ecallU) := wbData.exceptionVec(ecallU)
mergeVec(ecallS) := wbData.exceptionVec(ecallS)
mergeVec(ecallM) := wbData.exceptionVec(ecallM)
mergeVec(loadPageFault) := wbData.exceptionVec(loadPageFault)
mergeVec(storePageFault) := wbData.exceptionVec(storePageFault)
// returns the merged exception vector
mergeVec
}
io.roqDeqPtr := deqPtr
/**
* Enqueue (from dispatch)
*/
......@@ -168,270 +258,313 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
// To reduce registers usage, for hasBlockBackward cases, we allow enqueue after ROB is empty.
when (isEmpty) { hasBlockBackward:= false.B }
// When any instruction commits, hasNoSpecExec should be set to false.B
when (io.commits.valid.asUInt.orR) { hasNoSpecExec:= false.B }
when (io.commits.valid.asUInt.orR && state =/= s_extrawalk) { hasNoSpecExec:= false.B }
io.enq.canAccept := allowEnqueue && !hasBlockBackward
io.enq.isEmpty := isEmpty
io.enq.resp := enqPtrVec
val canEnqueue = VecInit(io.enq.req.map(_.valid && io.enq.canAccept))
for (i <- 0 until RenameWidth) {
when(io.enq.req(i).valid && io.enq.canAccept) {
// store uop in data module and microOp Vec
commitData.io.wen(i) := true.B
microOp(enqPtrVec(i).value) := io.enq.req(i).bits
when(io.enq.req(i).bits.ctrl.blockBackward) {
// we don't check whether io.redirect is valid here since redirect has higher priority
when (canEnqueue(i)) {
// store uop in data module and debug_microOp Vec
debug_microOp(enqPtrVec(i).value) := io.enq.req(i).bits
when (io.enq.req(i).bits.ctrl.blockBackward) {
hasBlockBackward := true.B
}
when(io.enq.req(i).bits.ctrl.noSpecExec) {
when (io.enq.req(i).bits.ctrl.noSpecExec) {
hasNoSpecExec := true.B
}
}
io.enq.resp(i) := enqPtrVec(i)
}
val validEntries = distanceBetween(enqPtr, deqPtr)
val firedDispatch = Mux(io.enq.canAccept, PopCount(Cat(io.enq.req.map(_.valid))), 0.U)
io.enq.canAccept := (validEntries <= (RoqSize - RenameWidth).U) && !hasBlockBackward
io.enq.isEmpty := isEmpty
// debug info for enqueue (dispatch)
val dispatchNum = Mux(io.enq.canAccept, PopCount(Cat(io.enq.req.map(_.valid))), 0.U)
XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
XSInfo(dispatchNum =/= 0.U, p"dispatched $dispatchNum insts\n")
enqPtr := enqPtr + firedDispatch
when (firedDispatch =/= 0.U) {
XSInfo("dispatched %d insts\n", firedDispatch)
}
/**
* Writeback (from execution units)
*/
val firedWriteback = io.exeWbResults.map(_.fire())
XSInfo(PopCount(firedWriteback) > 0.U, "writebacked %d insts\n", PopCount(firedWriteback))
for(i <- 0 until numWbPorts) {
when(io.exeWbResults(i).fire()){
val wbIdxExt = io.exeWbResults(i).bits.uop.roqIdx
val wbIdx = wbIdxExt.value
microOp(wbIdx).cf.exceptionVec := io.exeWbResults(i).bits.uop.cf.exceptionVec
microOp(wbIdx).ctrl.flushPipe := io.exeWbResults(i).bits.uop.ctrl.flushPipe
microOp(wbIdx).diffTestDebugLrScValid := io.exeWbResults(i).bits.uop.diffTestDebugLrScValid
for (i <- 0 until numWbPorts) {
when (io.exeWbResults(i).valid) {
val wbIdx = io.exeWbResults(i).bits.uop.roqIdx.value
debug_microOp(wbIdx).cf.exceptionVec := io.exeWbResults(i).bits.uop.cf.exceptionVec
debug_microOp(wbIdx).ctrl.flushPipe := io.exeWbResults(i).bits.uop.ctrl.flushPipe
debug_microOp(wbIdx).diffTestDebugLrScValid := io.exeWbResults(i).bits.uop.diffTestDebugLrScValid
debug_exuData(wbIdx) := io.exeWbResults(i).bits.data
debug_exuDebug(wbIdx) := io.exeWbResults(i).bits.debug
val debug_Uop = microOp(wbIdx)
val debug_Uop = debug_microOp(wbIdx)
XSInfo(true.B,
p"writebacked pc 0x${Hexadecimal(debug_Uop.cf.pc)} wen ${debug_Uop.ctrl.rfWen} " +
p"data 0x${Hexadecimal(io.exeWbResults(i).bits.data)} ldst ${debug_Uop.ctrl.ldest} pdst ${debug_Uop.pdest} " +
p"skip ${io.exeWbResults(i).bits.debug.isMMIO} roqIdx: ${wbIdxExt}\n"
p"skip ${io.exeWbResults(i).bits.debug.isMMIO} roqIdx: ${io.exeWbResults(i).bits.uop.roqIdx}\n"
)
}
}
val writebackNum = PopCount(io.exeWbResults.map(_.valid))
XSInfo(writebackNum =/= 0.U, "writebacked %d insts\n", writebackNum)
/**
* Interrupt and Exceptions
* RedirectOut: Interrupt and Exceptions
*/
val deqUop = microOp(deqPtr.value)
val deqDispatchData = dispatchData.io.rdata(0)
val deqWritebackData = writebackData.io.rdata(0)
val debug_deqUop = debug_microOp(deqPtr.value)
val deqPtrWritebacked = writebacked(deqPtr.value) && valid(deqPtr.value)
val intrEnable = io.csr.intrBitSet && !isEmpty && !hasNoSpecExec &&
deqCommitData.commitType =/= CommitType.STORE && deqCommitData.commitType =/= CommitType.LOAD
val exceptionEnable = deqPtrWritebacked && Cat(deqUop.cf.exceptionVec).orR()
val isFlushPipe = deqPtrWritebacked && deqUop.ctrl.flushPipe
val deqExceptionVec = mergeExceptionVec(deqDispatchData, deqWritebackData)
// For MMIO instructions, they should not trigger interrupts since they may be sent to lower level before it writes back.
// However, we cannot determine whether a load/store instruction is MMIO.
// Thus, we don't allow load/store instructions to trigger an interrupt.
val intrBitSetReg = RegNext(io.csr.intrBitSet)
val intrEnable = intrBitSetReg && valid(deqPtr.value) && !hasNoSpecExec && !CommitType.isLoadStore(deqDispatchData.commitType)
val exceptionEnable = deqPtrWritebacked && Cat(deqExceptionVec).orR()
val isFlushPipe = deqPtrWritebacked && deqWritebackData.flushPipe
io.redirectOut := DontCare
io.redirectOut.valid := (state === s_idle) && (intrEnable || exceptionEnable || isFlushPipe)
io.redirectOut.bits.level := Mux(isFlushPipe, RedirectLevel.flushAll, RedirectLevel.exception)
io.redirectOut.bits.interrupt := intrEnable
io.redirectOut.bits.target := Mux(isFlushPipe, deqCommitData.pc + 4.U, io.csr.trapTarget)
io.exception := deqUop
io.exception.ctrl.commitType := deqCommitData.commitType
io.exception.lqIdx := deqCommitData.lqIdx
io.exception.sqIdx := deqCommitData.sqIdx
io.exception.cf.pc := deqCommitData.pc
io.redirectOut.bits.target := Mux(isFlushPipe, deqDispatchData.pc + 4.U, io.csr.trapTarget)
io.exception := debug_deqUop
io.exception.ctrl.commitType := deqDispatchData.commitType
io.exception.lqIdx := deqDispatchData.lqIdx
io.exception.sqIdx := deqDispatchData.sqIdx
io.exception.cf.pc := deqDispatchData.pc
io.exception.cf.exceptionVec := deqExceptionVec
io.exception.cf.crossPageIPFFix := deqDispatchData.crossPageIPFFix
XSDebug(io.redirectOut.valid,
"generate redirect: pc 0x%x intr %d excp %d flushpp %d target:0x%x Traptarget 0x%x exceptionVec %b\n",
io.exception.cf.pc, intrEnable, exceptionEnable, isFlushPipe, io.redirectOut.bits.target, io.csr.trapTarget,
Cat(microOp(deqPtr.value).cf.exceptionVec))
p"generate redirect: pc 0x${Hexadecimal(io.exception.cf.pc)} intr $intrEnable " +
p"excp $exceptionEnable flushPipe $isFlushPipe target 0x${Hexadecimal(io.redirectOut.bits.target)} " +
p"Trap_target 0x${Hexadecimal(io.csr.trapTarget)} exceptionVec ${Binary(deqExceptionVec.asUInt)}\n")
/**
* Commits (and walk)
* They share the same width.
*/
val walkCounter = Reg(UInt(log2Up(RoqSize).W))
val shouldWalkVec = Wire(Vec(CommitWidth, Bool()))
for(i <- shouldWalkVec.indices){
shouldWalkVec(i) := i.U < walkCounter
}
val shouldWalkVec = VecInit((0 until CommitWidth).map(_.U < walkCounter))
val walkFinished = walkCounter <= CommitWidth.U
// extra space is used weh roq has no enough space, but mispredict recovery needs such info to walk regmap
val needExtraSpaceForMPR = WireInit(VecInit(
List.tabulate(RenameWidth)(i => io.redirect.valid && io.enq.needAlloc(i))
))
// extra space is used when roq has no enough space, but mispredict recovery needs such info to walk regmap
val needExtraSpaceForMPR = VecInit((0 until CommitWidth).map(i => io.redirect.valid && io.enq.needAlloc(i)))
val extraSpaceForMPR = Reg(Vec(RenameWidth, new RoqCommitInfo))
val usedSpaceForMPR = Reg(Vec(RenameWidth, Bool()))
val storeCommitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B)))
val cfiCommitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B)))
// wiring to csr
val fflags = WireInit(0.U.asTypeOf(new Fflags))
val dirty_fs = WireInit(false.B)
val dirty_fs = Mux(io.commits.isWalk, false.B, Cat(io.commits.valid.zip(io.commits.info.map(_.fpWen)).map{case (v, w) => v & w}).orR)
io.commits.isWalk := state =/= s_idle
val commit_v = Mux(state === s_idle, VecInit(deqPtrVec.map(ptr => valid(ptr.value))), VecInit(walkPtrVec.map(ptr => valid(ptr.value))))
val commit_w = VecInit(deqPtrVec.map(ptr => writebacked(ptr.value)))
val commit_exception = dispatchData.io.rdata.zip(writebackData.io.rdata).map{ case (d, w) => mergeExceptionVec(d, w).asUInt.orR }
val commit_block = VecInit((0 until CommitWidth).map(i => !commit_w(i) || commit_exception(i) || writebackData.io.rdata(i).flushPipe))
for (i <- 0 until CommitWidth) {
io.commits.valid(i) := false.B
val commitInfo = commitData.io.rdata(i)
io.commits.info(i) := commitInfo
switch (state) {
is (s_idle) {
val commitIdx = deqPtrVec(i).value
val commitUop = microOp(commitIdx)
val hasException = Cat(commitUop.cf.exceptionVec).orR() || intrEnable
val canCommit = if(i!=0) (io.commits.valid(i-1) && !microOp(deqPtrVec(i-1).value).ctrl.flushPipe) else true.B
val v = valid(commitIdx)
val w = writebacked(commitIdx)
io.commits.valid(i) := v && w && canCommit && !hasException
storeCommitVec(i) := io.commits.valid(i) && CommitType.isLoadStore(commitInfo.commitType) && CommitType.lsInstIsStore(commitInfo.commitType)
cfiCommitVec(i) := io.commits.valid(i) && CommitType.isBranch(commitInfo.commitType)
val commitFflags = exuFflags(commitIdx)
when(io.commits.valid(i)){
when(commitFflags.asUInt.orR()){
// update fflags
fflags := exuFflags(commitIdx)
}
when(commitInfo.fpWen){
// set fs to dirty
dirty_fs := true.B
}
}
XSInfo(io.commits.valid(i),
"retired pc %x wen %d ldest %d pdest %x old_pdest %x data %x fflags: %b\n",
commitUop.cf.pc,
commitInfo.rfWen,
commitInfo.ldest,
commitInfo.pdest,
commitInfo.old_pdest,
debug_exuData(commitIdx),
exuFflags(commitIdx).asUInt
)
}
// defaults: state === s_idle and instructions commit
val isBlocked = if (i != 0) Cat(commit_block.take(i)).orR || intrEnable else false.B
io.commits.valid(i) := commit_v(i) && commit_w(i) && !isBlocked && !commit_exception(i)
io.commits.info(i) := dispatchData.io.rdata(i)
is (s_walk) {
val idx = walkPtrVec(i).value
val v = valid(idx)
val walkUop = microOp(idx)
io.commits.valid(i) := v && shouldWalkVec(i)
when (shouldWalkVec(i)) {
v := false.B
}
XSInfo(io.commits.valid(i) && shouldWalkVec(i), "walked pc %x wen %d ldst %d data %x\n",
walkUop.cf.pc,
commitInfo.rfWen,
commitInfo.ldest,
debug_exuData(idx)
)
when (state === s_idle) {
when (io.commits.valid(i) && writebackData.io.rdata(i).fflags.asUInt.orR()) {
fflags := writebackData.io.rdata(i).fflags
}
}
is (s_extrawalk) {
val idx = RenameWidth-i-1
val walkUop = extraSpaceForMPR(idx)
io.commits.valid(i) := usedSpaceForMPR(idx)
io.commits.info(i) := walkUop
state := s_walk
XSInfo(io.commits.valid(i), "use extra space walked wen %d ldst %d\n",
// walkUop.cf.pc,
commitInfo.rfWen,
commitInfo.ldest
)
}
when (state === s_walk) {
io.commits.valid(i) := commit_v(i) && shouldWalkVec(i)
}.elsewhen(state === s_extrawalk) {
io.commits.valid(i) := usedSpaceForMPR(RenameWidth-i-1)
io.commits.info(i) := extraSpaceForMPR(RenameWidth-i-1)
state := s_walk
}
XSInfo(state === s_idle && io.commits.valid(i),
"retired pc %x wen %d ldest %d pdest %x old_pdest %x data %x fflags: %b\n",
debug_microOp(deqPtrVec(i).value).cf.pc,
io.commits.info(i).rfWen,
io.commits.info(i).ldest,
io.commits.info(i).pdest,
io.commits.info(i).old_pdest,
debug_exuData(deqPtrVec(i).value),
writebackData.io.rdata(i).fflags.asUInt
)
XSInfo(state === s_walk && io.commits.valid(i), "walked pc %x wen %d ldst %d data %x\n",
debug_microOp(walkPtrVec(i).value).cf.pc,
io.commits.info(i).rfWen,
io.commits.info(i).ldest,
debug_exuData(walkPtrVec(i).value)
)
XSInfo(state === s_extrawalk && io.commits.valid(i), "use extra space walked wen %d ldst %d\n",
io.commits.info(i).rfWen,
io.commits.info(i).ldest
)
}
io.csr.fflags := fflags
io.csr.dirty_fs := dirty_fs
// commit branch to brq
val cfiCommitVec = VecInit(io.commits.valid.zip(io.commits.info.map(_.commitType)).map{case(v, t) => v && CommitType.isBranch(t)})
io.bcommit := Mux(io.commits.isWalk, 0.U, PopCount(cfiCommitVec))
val validCommit = io.commits.valid
val commitCnt = PopCount(validCommit)
when(state===s_walk) {
//exit walk state when all roq entry is commited
when(walkFinished) {
state := s_idle
}
for (i <- 0 until CommitWidth) {
walkPtrVec(i) := walkPtrVec(i) - CommitWidth.U
}
walkCounter := walkCounter - commitCnt
XSInfo(p"rolling back: $enqPtr $deqPtr walk $walkPtr walkcnt $walkCounter\n")
}
// move tail ptr
when (state === s_idle) {
deqPtrVec := VecInit(deqPtrVec.map(_ + commitCnt))
/**
* read and write of data modules
*/
val commitReadAddr = Mux(state === s_idle, VecInit(deqPtrVec.map(_.value)), VecInit(walkPtrVec.map(_.value)))
dispatchData.io.wen := canEnqueue
dispatchData.io.waddr := enqPtrVec.map(_.value)
dispatchData.io.wdata.zip(io.enq.req.map(_.bits)).map{ case (wdata, req) =>
wdata.ldest := req.ctrl.ldest
wdata.rfWen := req.ctrl.rfWen
wdata.fpWen := req.ctrl.fpWen
wdata.commitType := req.ctrl.commitType
wdata.pdest := req.pdest
wdata.old_pdest := req.old_pdest
wdata.lqIdx := req.lqIdx
wdata.sqIdx := req.sqIdx
wdata.pc := req.cf.pc
wdata.crossPageIPFFix := req.cf.crossPageIPFFix
wdata.exceptionVec := req.cf.exceptionVec
}
dispatchData.io.raddr := commitReadAddr
writebackData.io.wen := io.exeWbResults.map(_.valid)
writebackData.io.waddr := io.exeWbResults.map(_.bits.uop.roqIdx.value)
writebackData.io.wdata.zip(io.exeWbResults.map(_.bits)).map{ case (wdata, wb) =>
wdata.exceptionVec := wb.uop.cf.exceptionVec
wdata.fflags := wb.fflags
wdata.flushPipe := wb.uop.ctrl.flushPipe
}
val retireCounter = Mux(state === s_idle, commitCnt, 0.U)
XSInfo(retireCounter > 0.U, "retired %d insts\n", retireCounter)
writebackData.io.raddr := commitReadAddr
// commit branch to brq
io.bcommit := PopCount(cfiCommitVec)
/**
* state changes
* (1) redirect: from s_valid to s_walk or s_extrawalk (depends on whether there're pending instructions in dispatch1)
* (2) s_extrawalk to s_walk
* (3) s_walk to s_idle: end of walking
*/
//exit walk state when all roq entry is commited
when (state === s_walk && walkFinished) {
state := s_idle
}
// when redirect, walk back roq entries
when (io.redirect.valid) {
state := s_walk
for (i <- 0 until CommitWidth) {
walkPtrVec(i) := Mux(state === s_walk,
walkPtrVec(i) - Mux(walkFinished, walkCounter, CommitWidth.U),
Mux(state === s_extrawalk, walkPtrVec(i), enqPtr - (i+1).U))
}
val currentWalkPtr = Mux(state === s_walk || state === s_extrawalk, walkPtr, enqPtr - 1.U)
walkCounter := distanceBetween(currentWalkPtr, io.redirect.bits.roqIdx) + io.redirect.bits.flushItself() - Mux(state === s_walk, commitCnt, 0.U)
enqPtr := io.redirect.bits.roqIdx + Mux(io.redirect.bits.flushItself(), 0.U, 1.U)
}
// no enough space for walk, allocate extra space
when (needExtraSpaceForMPR.asUInt.orR && io.redirect.valid) {
usedSpaceForMPR := needExtraSpaceForMPR
(0 until RenameWidth).foreach(i => extraSpaceForMPR(i) := commitData.io.wdata(i))
extraSpaceForMPR := dispatchData.io.wdata
state := s_extrawalk
XSDebug("roq full, switched to s_extrawalk. needExtraSpaceForMPR: %b\n", needExtraSpaceForMPR.asUInt)
}
// when exception occurs, cancels all
// when exception occurs, cancels all and switch to s_idle
when (io.redirectOut.valid) {
state := s_idle
enqPtr := 0.U.asTypeOf(new RoqPtr)
deqPtrVec := VecInit((0 until CommitWidth).map(_.U.asTypeOf(new RoqPtr)))
}
/**
* pointers and counters
*/
val deqPtrGenModule = Module(new RoqDeqPtrWrapper)
deqPtrGenModule.io.state := state
deqPtrGenModule.io.deq_v := commit_v
deqPtrGenModule.io.deq_w := commit_w
deqPtrGenModule.io.deq_exceptionVec := VecInit(dispatchData.io.rdata.zip(writebackData.io.rdata).map{ case (d, w) => mergeExceptionVec(d, w).asUInt })
deqPtrGenModule.io.deq_flushPipe := writebackData.io.rdata.map(_.flushPipe)
deqPtrGenModule.io.intrBitSetReg := intrBitSetReg
deqPtrGenModule.io.hasNoSpecExec := hasNoSpecExec
deqPtrGenModule.io.commitType := deqDispatchData.commitType
deqPtrVec := deqPtrGenModule.io.out
val enqPtrGenModule = Module(new RoqEnqPtrWrapper)
enqPtrGenModule.io.state := state
enqPtrGenModule.io.deq_v := commit_v(0)
enqPtrGenModule.io.deq_w := commit_w(0)
enqPtrGenModule.io.deq_exceptionVec := deqExceptionVec.asUInt
enqPtrGenModule.io.deq_flushPipe := writebackData.io.rdata(0).flushPipe
enqPtrGenModule.io.intrBitSetReg := intrBitSetReg
enqPtrGenModule.io.hasNoSpecExec := hasNoSpecExec
enqPtrGenModule.io.commitType := deqDispatchData.commitType
enqPtrGenModule.io.redirect := io.redirect
enqPtrGenModule.io.allowEnqueue := allowEnqueue
enqPtrGenModule.io.hasBlockBackward := hasBlockBackward
enqPtrGenModule.io.enq := VecInit(io.enq.req.map(_.valid))
enqPtr := enqPtrGenModule.io.out
val thisCycleWalkCount = Mux(walkFinished, walkCounter, CommitWidth.U)
when (io.redirect.valid && state =/= s_extrawalk) {
walkPtrVec := Mux(state === s_walk,
VecInit(walkPtrVec.map(_ - thisCycleWalkCount)),
VecInit((0 until CommitWidth).map(i => enqPtr - (i+1).U))
)
}.elsewhen (state === s_walk) {
walkPtrVec := VecInit(walkPtrVec.map(_ - CommitWidth.U))
}
val lastCycleRedirect = RegNext(io.redirect.valid)
val trueValidCounter = Mux(lastCycleRedirect, distanceBetween(enqPtr, deqPtr), validCounter)
val commitCnt = PopCount(io.commits.valid)
validCounter := Mux(io.redirectOut.valid,
0.U,
Mux(state === s_idle,
(validCounter - commitCnt) + dispatchNum,
trueValidCounter
)
)
allowEnqueue := Mux(io.redirectOut.valid,
true.B,
Mux(state === s_idle,
validCounter + dispatchNum <= (RoqSize - RenameWidth).U,
trueValidCounter <= (RoqSize - RenameWidth).U
)
)
val currentWalkPtr = Mux(state === s_walk || state === s_extrawalk, walkPtr, enqPtr - 1.U)
val redirectWalkDistance = distanceBetween(currentWalkPtr, io.redirect.bits.roqIdx)
when (io.redirect.valid) {
walkCounter := Mux(state === s_walk,
redirectWalkDistance + io.redirect.bits.flushItself() - commitCnt,
redirectWalkDistance + io.redirect.bits.flushItself()
)
}.elsewhen (state === s_walk) {
walkCounter := walkCounter - commitCnt
XSInfo(p"rolling back: $enqPtr $deqPtr walk $walkPtr walkcnt $walkCounter\n")
}
/**
* States
* We put all the stage changes here.
* We put all the stage bits changes here.
* All events: (1) enqueue (dispatch); (2) writeback; (3) cancel; (4) dequeue (commit);
* All states: (1) valid; (2) writebacked;
* All states: (1) valid; (2) writebacked; (3) flagBkup
*/
// write
// enqueue logic writes 6 valid
for (i <- 0 until RenameWidth) {
when(io.enq.req(i).valid && io.enq.canAccept && !io.redirect.valid){
when (canEnqueue(i) && !io.redirect.valid) {
valid(enqPtrVec(i).value) := true.B
}
}
// dequeue/walk logic writes 6 valid, dequeue and walk will not happen at the same time
for(i <- 0 until CommitWidth){
switch(state){
is(s_idle){
when(io.commits.valid(i)){valid(deqPtrVec(i).value) := false.B}
}
is(s_walk){
val idx = walkPtrVec(i).value
when(shouldWalkVec(i)){
valid(idx) := false.B
}
}
for (i <- 0 until CommitWidth) {
when (io.commits.valid(i) && state =/= s_extrawalk) {
valid(commitReadAddr(i)) := false.B
}
}
// read
// enqueue logic reads 6 valid
// dequeue/walk logic reads 6 valid, dequeue and walk will not happen at the same time
// rollback reads all valid? is it necessary?
// reset
// when exception, reset all valid to false
// reset: when exception, reset all valid to false
when (io.redirectOut.valid) {
for (i <- 0 until RoqSize) {
valid(i) := false.B
......@@ -439,51 +572,31 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
}
// status field: writebacked
// write
// enqueue logic set 6 writebacked to false
for (i <- 0 until RenameWidth) {
when(io.enq.req(i).valid && io.enq.canAccept && !io.redirect.valid){
when (canEnqueue(i)) {
writebacked(enqPtrVec(i).value) := false.B
}
}
// writeback logic set numWbPorts writebacked to true
for(i <- 0 until numWbPorts) {
when(io.exeWbResults(i).fire()){
val wbIdxExt = io.exeWbResults(i).bits.uop.roqIdx
val wbIdx = wbIdxExt.value
for (i <- 0 until numWbPorts) {
when (io.exeWbResults(i).valid) {
val wbIdx = io.exeWbResults(i).bits.uop.roqIdx.value
writebacked(wbIdx) := true.B
}
}
// read
// deqPtrWritebacked
// gen io.commits(i).valid read 6 (CommitWidth)
// flagBkup
// write: update when enqueue
// enqueue logic set 6 flagBkup at most
for (i <- 0 until RenameWidth) {
when(io.enq.req(i).valid && io.enq.canAccept && !io.redirect.valid){
when (canEnqueue(i)) {
flagBkup(enqPtrVec(i).value) := enqPtrVec(i).flag
}
}
// read: used in rollback logic
// all flagBkup will be used
// exuFflags
// write: writeback logic set numWbPorts exuFflags
for(i <- 0 until numWbPorts) {
when(io.exeWbResults(i).fire()){
val wbIdxExt = io.exeWbResults(i).bits.uop.roqIdx
val wbIdx = wbIdxExt.value
exuFflags(wbIdx) := io.exeWbResults(i).bits.fflags
}
}
// read: used in commit logic
// read CommitWidth exuFflags
// debug info
/**
* debug info
*/
XSDebug(p"enqPtr ${enqPtr} deqPtr ${deqPtr}\n")
XSDebug("")
for(i <- 0 until RoqSize){
......@@ -495,7 +608,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
for(i <- 0 until RoqSize) {
if(i % 4 == 0) XSDebug("")
XSDebug(false, true.B, "%x ", microOp(i).cf.pc)
XSDebug(false, true.B, "%x ", debug_microOp(i).cf.pc)
XSDebug(false, !valid(i), "- ")
XSDebug(false, valid(i) && writebacked(i), "w ")
XSDebug(false, valid(i) && !writebacked(i), "v ")
......@@ -511,7 +624,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
if(!env.FPGAPlatform) {
//difftest signals
val firstValidCommit = (deqPtr + PriorityMux(validCommit, VecInit(List.tabulate(CommitWidth)(_.U)))).value
val firstValidCommit = (deqPtr + PriorityMux(io.commits.valid, VecInit(List.tabulate(CommitWidth)(_.U)))).value
val skip = Wire(Vec(CommitWidth, Bool()))
val wen = Wire(Vec(CommitWidth, Bool()))
......@@ -524,7 +637,7 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
for(i <- 0 until CommitWidth){
// io.commits(i).valid
val idx = deqPtrVec(i).value
val uop = microOp(idx)
val uop = debug_microOp(idx)
val DifftestSkipSC = false
if(!DifftestSkipSC){
skip(i) := debug_exuDebug(idx).isMMIO && io.commits.valid(i)
......@@ -545,16 +658,17 @@ class Roq(numWbPorts: Int) extends XSModule with HasCircularQueuePtrHelper {
}
val scFailed = !diffTestDebugLrScValid(0) &&
microOp(deqPtr.value).ctrl.fuType === FuType.mou &&
(microOp(deqPtr.value).ctrl.fuOpType === LSUOpType.sc_d || microOp(deqPtr.value).ctrl.fuOpType === LSUOpType.sc_w)
debug_deqUop.ctrl.fuType === FuType.mou &&
(debug_deqUop.ctrl.fuOpType === LSUOpType.sc_d || debug_deqUop.ctrl.fuOpType === LSUOpType.sc_w)
val instrCnt = RegInit(0.U(64.W))
val retireCounter = Mux(state === s_idle, commitCnt, 0.U)
instrCnt := instrCnt + retireCounter
XSDebug(difftestIntrNO =/= 0.U, "difftest intrNO set %x\n", difftestIntrNO)
val retireCounterFix = Mux(io.redirectOut.valid, 1.U, retireCounter)
val retirePCFix = SignExt(Mux(io.redirectOut.valid, microOp(deqPtr.value).cf.pc, microOp(firstValidCommit).cf.pc), XLEN)
val retireInstFix = Mux(io.redirectOut.valid, microOp(deqPtr.value).cf.instr, microOp(firstValidCommit).cf.instr)
val retirePCFix = SignExt(Mux(io.redirectOut.valid, debug_deqUop.cf.pc, debug_microOp(firstValidCommit).cf.pc), XLEN)
val retireInstFix = Mux(io.redirectOut.valid, debug_deqUop.cf.instr, debug_microOp(firstValidCommit).cf.instr)
ExcitingUtils.addSource(RegNext(retireCounterFix), "difftestCommit", ExcitingUtils.Debug)
ExcitingUtils.addSource(RegNext(retirePCFix), "difftestThisPC", ExcitingUtils.Debug)//first valid PC
......
......@@ -501,11 +501,11 @@ class PTWImp(outer: PTW) extends PtwModule(outer){
} .otherwise {
when (sfence.bits.rs2) {
// specific leaf of addr && all asid
tlbv := tlbv & ~UIntToOH(sfence.bits.addr(log2Up(TlbL2EntrySize)-1+offLen, 0+offLen))
tlbg := tlbg & ~UIntToOH(sfence.bits.addr(log2Up(TlbL2EntrySize)-1+offLen, 0+offLen))
tlbv := tlbv & ~UIntToOH(genTlbL2Idx(sfence.bits.addr(sfence.bits.addr.getWidth-1, offLen)))
tlbg := tlbg & ~UIntToOH(genTlbL2Idx(sfence.bits.addr(sfence.bits.addr.getWidth-1, offLen)))
} .otherwise {
// specific leaf of addr && specific asid
tlbv := tlbv & (~UIntToOH(sfence.bits.addr(log2Up(TlbL2EntrySize)-1+offLen, 0+offLen)) | tlbg)
tlbv := tlbv & (~UIntToOH(genTlbL2Idx(sfence.bits.addr(sfence.bits.addr.getWidth-1, offLen)))| tlbg)
}
}
}
......
......@@ -127,7 +127,7 @@ class IFU extends XSModule with HasIFUConst
val if1_npc = WireInit(0.U(VAddrBits.W))
val if2_ready = WireInit(false.B)
val if2_allReady = WireInit(if2_ready && icache.io.req.ready)
val if1_fire = if1_valid && (if2_allReady || if1_flush)
val if1_fire = if1_valid && (if2_allReady || if2_flush)
// val if2_newPtr, if3_newPtr, if4_newPtr = Wire(UInt(log2Up(ExtHistoryLength).W))
......@@ -401,7 +401,7 @@ class IFU extends XSModule with HasIFUConst
if1_npc := npcGen()
icache.io.req.valid := if1_valid && (if2_ready || if1_flush)
icache.io.req.valid := if1_valid && (if2_ready || if2_flush)
icache.io.resp.ready := if4_ready
icache.io.req.bits.addr := if1_npc
icache.io.req.bits.mask := mask(if1_npc)
......
......@@ -62,6 +62,9 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
val allowEnqueue = RegInit(true.B)
val enqPtr = enqPtrExt(0).value
val deqPtr = deqPtrExt.value
val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
......@@ -80,10 +83,8 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
*
* Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
*/
val validEntries = distanceBetween(enqPtrExt(0), deqPtrExt)
val firedDispatch = io.enq.req.map(_.valid)
io.enq.canAccept := validEntries <= (LoadQueueSize - RenameWidth).U
XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n")
io.enq.canAccept := allowEnqueue
for (i <- 0 until RenameWidth) {
val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
val lqIdx = enqPtrExt(offset)
......@@ -100,13 +101,7 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
}
io.enq.resp(i) := lqIdx
}
// when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
when (Cat(firedDispatch).orR && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
val enqNumber = PopCount(firedDispatch)
enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
XSInfo("dispatched %d insts to lq\n", enqNumber)
}
XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
/**
* Writeback load from load units
......@@ -335,7 +330,6 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
}
})
deqPtrExt := deqPtrExt + PopCount(loadCommit)
def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
val length = mask.length
......@@ -549,13 +543,38 @@ class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueueP
allocated(i) := false.B
}
}
// we recover the pointers in the next cycle after redirect
val needCancelReg = RegNext(needCancel)
/**
* update pointers
*/
val lastCycleCancelCount = PopCount(RegNext(needCancel))
// when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
when (lastCycleRedirect.valid) {
val cancelCount = PopCount(needCancelReg)
enqPtrExt := VecInit(enqPtrExt.map(_ - cancelCount))
// we recover the pointers in the next cycle after redirect
enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
}.otherwise {
enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
}
val commitCount = PopCount(loadCommit)
deqPtrExt := deqPtrExt + commitCount
val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
validCounter := Mux(lastLastCycleRedirect,
trueValidCounter,
validCounter + enqNumber - commitCount
)
allowEnqueue := Mux(io.brqRedirect.valid,
false.B,
Mux(lastLastCycleRedirect,
trueValidCounter <= (LoadQueueSize - RenameWidth).U,
validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
)
)
// debug info
XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
......
......@@ -58,6 +58,9 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
require(StoreQueueSize > RenameWidth)
val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new SqPtr))))
val deqPtrExt = RegInit(VecInit((0 until StorePipelineWidth).map(_.U.asTypeOf(new SqPtr))))
val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
val allowEnqueue = RegInit(true.B)
val enqPtr = enqPtrExt(0).value
val deqPtr = deqPtrExt(0).value
......@@ -69,10 +72,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
*
* Currently, StoreQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
*/
val validEntries = distanceBetween(enqPtrExt(0), deqPtrExt(0))
val firedDispatch = io.enq.req.map(_.valid)
io.enq.canAccept := validEntries <= (StoreQueueSize - RenameWidth).U
XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n")
io.enq.canAccept := allowEnqueue
for (i <- 0 until RenameWidth) {
val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
val sqIdx = enqPtrExt(offset)
......@@ -87,12 +87,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
}
io.enq.resp(i) := sqIdx
}
when (Cat(firedDispatch).orR && io.enq.canAccept && io.enq.lqCanAccept && !io.brqRedirect.valid) {
val enqNumber = PopCount(firedDispatch)
enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
XSInfo("dispatched %d insts to sq\n", enqNumber)
}
XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
/**
* Writeback store from store units
......@@ -104,7 +99,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
* instead of pending to avoid sending them to lower level.
* (2) For an mmio instruction without exceptions, we mark it as pending.
* When the instruction reaches ROB's head, StoreQueue sends it to uncache channel.
* Upon receiving the response, StoreQueue writes back the instruction
* Upon receiving the response, StoreQueue writes back the instruction
* through arbiter with store units. It will later commit as normal.
*/
for (i <- 0 until StorePipelineWidth) {
......@@ -246,7 +241,7 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
when (io.mmioStout.fire()) {
writebacked(deqPtr) := true.B
allocated(deqPtr) := false.B
deqPtrExt := VecInit(deqPtrExt.map(_ + 1.U))
}
/**
......@@ -284,14 +279,10 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr)
}
}
// note that sbuffer will not accept req(1) if req(0) is not accepted.
when (Cat(io.sbuffer.map(_.fire())).orR) {
val stepForward = Mux(io.sbuffer(1).fire(), 2.U, 1.U)
deqPtrExt := VecInit(deqPtrExt.map(_ + stepForward))
when (io.sbuffer(1).fire()) {
assert(io.sbuffer(0).fire())
}
when (io.sbuffer(1).fire()) {
assert(io.sbuffer(0).fire())
}
if (!env.FPGAPlatform) {
val storeCommit = PopCount(io.sbuffer.map(_.fire()))
val waddr = VecInit(io.sbuffer.map(req => SignExt(req.bits.addr, 64)))
......@@ -316,13 +307,45 @@ class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueue
allocated(i) := false.B
}
}
// we recover the pointers in the next cycle after redirect
val lastCycleRedirectValid = RegNext(io.brqRedirect.valid)
val needCancelCount = PopCount(RegNext(needCancel))
when (lastCycleRedirectValid) {
enqPtrExt := VecInit(enqPtrExt.map(_ - needCancelCount))
/**
* update pointers
*/
val lastCycleRedirect = RegNext(io.brqRedirect.valid)
val lastCycleCancelCount = PopCount(RegNext(needCancel))
// when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
val enqNumber = Mux(io.enq.canAccept && io.enq.lqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
when (lastCycleRedirect) {
// we recover the pointers in the next cycle after redirect
enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
}.otherwise {
enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
}
deqPtrExt := Mux(io.sbuffer(1).fire(),
VecInit(deqPtrExt.map(_ + 2.U)),
Mux(io.sbuffer(0).fire() || io.mmioStout.fire(),
VecInit(deqPtrExt.map(_ + 1.U)),
deqPtrExt
)
)
val lastLastCycleRedirect = RegNext(lastCycleRedirect)
val dequeueCount = Mux(io.sbuffer(1).fire(), 2.U, Mux(io.sbuffer(0).fire() || io.mmioStout.fire(), 1.U, 0.U))
val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt(0))
validCounter := Mux(lastLastCycleRedirect,
trueValidCounter - dequeueCount,
validCounter + enqNumber - dequeueCount
)
allowEnqueue := Mux(io.brqRedirect.valid,
false.B,
Mux(lastLastCycleRedirect,
trueValidCounter <= (StoreQueueSize - RenameWidth).U,
validCounter + enqNumber <= (StoreQueueSize - RenameWidth).U
)
)
// debug info
XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr)
......
......@@ -93,6 +93,7 @@ package object xiangshan {
def apply() = UInt(2.W)
def isLoadStore(commitType: UInt) = commitType(1)
def lsInstIsStore(commitType: UInt) = commitType(0)
def isStore(commitType: UInt) = isLoadStore(commitType) && lsInstIsStore(commitType)
def isBranch(commitType: UInt) = commitType(0) && !commitType(1)
}
......
......@@ -2,6 +2,8 @@
#include "sdcard.h"
#include "difftest.h"
#include <getopt.h>
#include<signal.h>
#include<unistd.h>
#include "ram.h"
#include "zlib.h"
#include "compress.h"
......@@ -244,6 +246,16 @@ inline void Emulator::single_cycle() {
cycles ++;
}
#if VM_COVERAGE == 1
uint64_t *max_cycle_ptr = NULL;
// when interrupted, we set max_cycle to zero
// so that the emulator will stop gracefully
void sig_handler(int signo) {
if (signo == SIGINT)
*max_cycle_ptr = 0;
}
#endif
uint64_t Emulator::execute(uint64_t max_cycle, uint64_t max_instr) {
extern void poll_event(void);
extern uint32_t uptime(void);
......@@ -268,6 +280,9 @@ uint64_t Emulator::execute(uint64_t max_cycle, uint64_t max_instr) {
// since we are not sure when an emu will stop
// we distinguish multiple dat files by emu start time
time_t start_time = time(NULL);
max_cycle_ptr = &max_cycle;
if (signal(SIGINT, sig_handler) == SIG_ERR)
printf("\ncan't catch SIGINT\n");
#endif
while (!Verilated::gotFinish() && trapCode == STATE_RUNNING) {
......@@ -407,6 +422,7 @@ inline char* Emulator::waveform_filename(time_t t) {
static char buf[1024];
char *p = timestamp_filename(t, buf);
strcpy(p, ".vcd");
printf("dump wave to %s...\n", buf);
return buf;
}
......
......@@ -5,6 +5,7 @@ import chisel3._
import chisel3.util._
import chiseltest.experimental.TestOptionBuilder._
import chiseltest.internal.{VerilatorBackendAnnotation, LineCoverageAnnotation, ToggleCoverageAnnotation, UserCoverageAnnotation, StructuralCoverageAnnotation}
import chiseltest.legacy.backends.verilator.VerilatorFlags
import chiseltest._
import chisel3.experimental.BundleLiterals._
import firrtl.stage.RunFirrtlTransformAnnotation
......@@ -262,6 +263,7 @@ class L2CacheTest extends AnyFlatSpec with ChiselScalatestTester with Matchers{
ToggleCoverageAnnotation,
UserCoverageAnnotation,
StructuralCoverageAnnotation,
VerilatorFlags(Seq("--output-split 5000", "--output-split-cfuncs 5000")),
RunFirrtlTransformAnnotation(new PrintModuleName)
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册