Skip to content

Instantly share code, notes, and snippets.

@chick
Created September 13, 2019 17:06
Show Gist options
  • Save chick/c09799905d76926087928a40cc89069b to your computer and use it in GitHub Desktop.
Save chick/c09799905d76926087928a40cc89069b to your computer and use it in GitHub Desktop.
riscv-mini's Cache.fir. Running LowFirrtlOptimization on this will produce a combinational loop
;buildInfoPackage: chisel3, version: 3.2-SNAPSHOT, scalaVersion: 2.12.9, sbtVersion: 1.2.7
circuit Cache :
module Cache :
input clock : Clock
input reset : UInt<1>
output io : {cpu : {flip abort : UInt<1>, flip req : {valid : UInt<1>, bits : {addr : UInt<32>, data : UInt<32>, mask : UInt<4>}}, resp : {valid : UInt<1>, bits : {data : UInt<32>}}}, nasti : {aw : {flip ready : UInt<1>, valid : UInt<1>, bits : {addr : UInt<32>, len : UInt<8>, size : UInt<3>, burst : UInt<2>, lock : UInt<1>, cache : UInt<4>, prot : UInt<3>, qos : UInt<4>, region : UInt<4>, id : UInt<5>, user : UInt<1>}}, w : {flip ready : UInt<1>, valid : UInt<1>, bits : {data : UInt<64>, last : UInt<1>, id : UInt<5>, strb : UInt<8>, user : UInt<1>}}, flip b : {flip ready : UInt<1>, valid : UInt<1>, bits : {resp : UInt<2>, id : UInt<5>, user : UInt<1>}}, ar : {flip ready : UInt<1>, valid : UInt<1>, bits : {addr : UInt<32>, len : UInt<8>, size : UInt<3>, burst : UInt<2>, lock : UInt<1>, cache : UInt<4>, prot : UInt<3>, qos : UInt<4>, region : UInt<4>, id : UInt<5>, user : UInt<1>}}, flip r : {flip ready : UInt<1>, valid : UInt<1>, bits : {resp : UInt<2>, data : UInt<64>, last : UInt<1>, id : UInt<5>, user : UInt<1>}}}}
reg state : UInt<3>, clock with : (reset => (reset, UInt<3>("h00"))) @[Cache.scala 60:22]
reg v : UInt<256>, clock with : (reset => (reset, UInt<256>("h00"))) @[Cache.scala 62:25]
reg d : UInt<256>, clock with : (reset => (reset, UInt<256>("h00"))) @[Cache.scala 63:25]
smem metaMem : {tag : UInt<20>}[256] @[Cache.scala 64:29]
smem dataMem_0 : UInt<8>[4][256] @[Cache.scala 65:46]
smem dataMem_1 : UInt<8>[4][256] @[Cache.scala 65:46]
smem dataMem_2 : UInt<8>[4][256] @[Cache.scala 65:46]
smem dataMem_3 : UInt<8>[4][256] @[Cache.scala 65:46]
reg addr_reg : UInt<32>, clock @[Cache.scala 67:21]
reg cpu_data : UInt<32>, clock @[Cache.scala 68:21]
reg cpu_mask : UInt<4>, clock @[Cache.scala 69:21]
node _T = and(io.nasti.r.ready, io.nasti.r.valid) @[Decoupled.scala 40:37]
reg value : UInt<1>, clock with : (reset => (reset, UInt<1>("h00"))) @[Counter.scala 29:33]
when _T : @[Counter.scala 71:17]
node _T_1 = eq(value, UInt<1>("h01")) @[Counter.scala 37:24]
node _T_2 = add(value, UInt<1>("h01")) @[Counter.scala 38:22]
node _T_3 = tail(_T_2, 1) @[Counter.scala 38:22]
value <= _T_3 @[Counter.scala 38:13]
skip @[Counter.scala 71:17]
node read_wrap_out = and(_T, _T_1) @[Counter.scala 72:20]
node _T_4 = and(io.nasti.w.ready, io.nasti.w.valid) @[Decoupled.scala 40:37]
reg value_1 : UInt<1>, clock with : (reset => (reset, UInt<1>("h00"))) @[Counter.scala 29:33]
when _T_4 : @[Counter.scala 71:17]
node _T_5 = eq(value_1, UInt<1>("h01")) @[Counter.scala 37:24]
node _T_6 = add(value_1, UInt<1>("h01")) @[Counter.scala 38:22]
node _T_7 = tail(_T_6, 1) @[Counter.scala 38:22]
value_1 <= _T_7 @[Counter.scala 38:13]
skip @[Counter.scala 71:17]
node write_wrap_out = and(_T_4, _T_5) @[Counter.scala 72:20]
node is_idle = eq(state, UInt<3>("h00")) @[Cache.scala 76:25]
node is_read = eq(state, UInt<3>("h01")) @[Cache.scala 77:25]
node is_write = eq(state, UInt<3>("h02")) @[Cache.scala 78:25]
node _T_8 = eq(state, UInt<3>("h06")) @[Cache.scala 79:25]
node is_alloc = and(_T_8, read_wrap_out) @[Cache.scala 79:38]
reg is_alloc_reg : UInt<1>, clock @[Cache.scala 80:29]
is_alloc_reg <= is_alloc @[Cache.scala 80:29]
wire hit : UInt<1> @[Cache.scala 82:17]
node _T_9 = or(hit, is_alloc_reg) @[Cache.scala 83:30]
node _T_10 = and(is_write, _T_9) @[Cache.scala 83:22]
node _T_11 = eq(io.cpu.abort, UInt<1>("h00")) @[Cache.scala 83:50]
node _T_12 = and(_T_10, _T_11) @[Cache.scala 83:47]
node wen = or(_T_12, is_alloc) @[Cache.scala 83:64]
node _T_13 = eq(wen, UInt<1>("h00")) @[Cache.scala 84:13]
node _T_14 = or(is_idle, is_read) @[Cache.scala 84:30]
node _T_15 = and(_T_13, _T_14) @[Cache.scala 84:18]
node ren = and(_T_15, io.cpu.req.valid) @[Cache.scala 84:42]
reg ren_reg : UInt<1>, clock @[Cache.scala 85:24]
ren_reg <= ren @[Cache.scala 85:24]
node idx = bits(io.cpu.req.bits.addr, 11, 4) @[Cache.scala 88:22]
node tag_reg = bits(addr_reg, 31, 12) @[Cache.scala 89:26]
node idx_reg = bits(addr_reg, 11, 4) @[Cache.scala 90:26]
node off_reg = bits(addr_reg, 3, 2) @[Cache.scala 91:26]
wire _T_16 : UInt @[Cache.scala 93:27]
_T_16 is invalid @[Cache.scala 93:27]
when ren : @[Cache.scala 93:27]
_T_16 <= idx @[Cache.scala 93:27]
node _T_17 = or(_T_16, UInt<8>("h00")) @[Cache.scala 93:27]
node _T_18 = bits(_T_17, 7, 0) @[Cache.scala 93:27]
read mport rmeta = metaMem[_T_18], clock @[Cache.scala 93:27]
skip @[Cache.scala 93:27]
wire _T_19 : UInt @[Cache.scala 94:39]
_T_19 is invalid @[Cache.scala 94:39]
when ren : @[Cache.scala 94:39]
_T_19 <= idx @[Cache.scala 94:39]
node _T_20 = or(_T_19, UInt<8>("h00")) @[Cache.scala 94:39]
node _T_21 = bits(_T_20, 7, 0) @[Cache.scala 94:39]
read mport _T_22 = dataMem_0[_T_21], clock @[Cache.scala 94:39]
skip @[Cache.scala 94:39]
node _T_23 = cat(_T_22[1], _T_22[0]) @[Cache.scala 94:50]
node _T_24 = cat(_T_22[3], _T_22[2]) @[Cache.scala 94:50]
node _T_25 = cat(_T_24, _T_23) @[Cache.scala 94:50]
wire _T_26 : UInt @[Cache.scala 94:39]
_T_26 is invalid @[Cache.scala 94:39]
when ren : @[Cache.scala 94:39]
_T_26 <= idx @[Cache.scala 94:39]
node _T_27 = or(_T_26, UInt<8>("h00")) @[Cache.scala 94:39]
node _T_28 = bits(_T_27, 7, 0) @[Cache.scala 94:39]
read mport _T_29 = dataMem_1[_T_28], clock @[Cache.scala 94:39]
skip @[Cache.scala 94:39]
node _T_30 = cat(_T_29[1], _T_29[0]) @[Cache.scala 94:50]
node _T_31 = cat(_T_29[3], _T_29[2]) @[Cache.scala 94:50]
node _T_32 = cat(_T_31, _T_30) @[Cache.scala 94:50]
wire _T_33 : UInt @[Cache.scala 94:39]
_T_33 is invalid @[Cache.scala 94:39]
when ren : @[Cache.scala 94:39]
_T_33 <= idx @[Cache.scala 94:39]
node _T_34 = or(_T_33, UInt<8>("h00")) @[Cache.scala 94:39]
node _T_35 = bits(_T_34, 7, 0) @[Cache.scala 94:39]
read mport _T_36 = dataMem_2[_T_35], clock @[Cache.scala 94:39]
skip @[Cache.scala 94:39]
node _T_37 = cat(_T_36[1], _T_36[0]) @[Cache.scala 94:50]
node _T_38 = cat(_T_36[3], _T_36[2]) @[Cache.scala 94:50]
node _T_39 = cat(_T_38, _T_37) @[Cache.scala 94:50]
wire _T_40 : UInt @[Cache.scala 94:39]
_T_40 is invalid @[Cache.scala 94:39]
when ren : @[Cache.scala 94:39]
_T_40 <= idx @[Cache.scala 94:39]
node _T_41 = or(_T_40, UInt<8>("h00")) @[Cache.scala 94:39]
node _T_42 = bits(_T_41, 7, 0) @[Cache.scala 94:39]
read mport _T_43 = dataMem_3[_T_42], clock @[Cache.scala 94:39]
skip @[Cache.scala 94:39]
node _T_44 = cat(_T_43[1], _T_43[0]) @[Cache.scala 94:50]
node _T_45 = cat(_T_43[3], _T_43[2]) @[Cache.scala 94:50]
node _T_46 = cat(_T_45, _T_44) @[Cache.scala 94:50]
node _T_47 = cat(_T_32, _T_25) @[Cat.scala 29:58]
node _T_48 = cat(_T_46, _T_39) @[Cat.scala 29:58]
node rdata = cat(_T_48, _T_47) @[Cat.scala 29:58]
reg rdata_buf : UInt<128>, clock @[Reg.scala 15:16]
when ren_reg : @[Reg.scala 16:19]
rdata_buf <= rdata @[Reg.scala 16:23]
skip @[Reg.scala 16:19]
reg refill_buf : UInt<64>[2], clock @[Cache.scala 96:23]
node _T_49 = cat(refill_buf[1], refill_buf[0]) @[Cache.scala 97:43]
node _T_50 = mux(ren_reg, rdata, rdata_buf) @[Cache.scala 97:54]
node read = mux(is_alloc_reg, _T_49, _T_50) @[Cache.scala 97:17]
node _T_51 = dshr(v, idx_reg) @[Cache.scala 99:11]
node _T_52 = bits(_T_51, 0, 0) @[Cache.scala 99:11]
node _T_53 = eq(rmeta.tag, tag_reg) @[Cache.scala 99:34]
node _T_54 = and(_T_52, _T_53) @[Cache.scala 99:21]
hit <= _T_54 @[Cache.scala 99:7]
node _T_55 = bits(read, 31, 0) @[Cache.scala 102:62]
node _T_56 = bits(read, 63, 32) @[Cache.scala 102:62]
node _T_57 = bits(read, 95, 64) @[Cache.scala 102:62]
node _T_58 = bits(read, 127, 96) @[Cache.scala 102:62]
wire _T_59 : UInt<32>[4] @[Cache.scala 102:52]
_T_59[0] <= _T_55 @[Cache.scala 102:52]
_T_59[1] <= _T_56 @[Cache.scala 102:52]
_T_59[2] <= _T_57 @[Cache.scala 102:52]
_T_59[3] <= _T_58 @[Cache.scala 102:52]
io.cpu.resp.bits.data <= _T_59[off_reg] @[Cache.scala 102:25]
node _T_60 = and(is_read, hit) @[Cache.scala 103:47]
node _T_61 = or(is_idle, _T_60) @[Cache.scala 103:36]
node _T_62 = neq(cpu_mask, UInt<1>("h00")) @[Cache.scala 103:83]
node _T_63 = eq(_T_62, UInt<1>("h00")) @[Cache.scala 103:73]
node _T_64 = and(is_alloc_reg, _T_63) @[Cache.scala 103:70]
node _T_65 = or(_T_61, _T_64) @[Cache.scala 103:54]
io.cpu.resp.valid <= _T_65 @[Cache.scala 103:25]
when io.cpu.resp.valid : @[Cache.scala 105:27]
addr_reg <= io.cpu.req.bits.addr @[Cache.scala 106:15]
cpu_data <= io.cpu.req.bits.data @[Cache.scala 107:15]
cpu_mask <= io.cpu.req.bits.mask @[Cache.scala 108:15]
skip @[Cache.scala 105:27]
wire wmeta : {tag : UInt<20>} @[Cache.scala 111:19]
wmeta.tag <= tag_reg @[Cache.scala 112:13]
node _T_66 = eq(is_alloc, UInt<1>("h00")) @[Cache.scala 114:19]
node _T_67 = cat(off_reg, UInt<2>("h00")) @[Cat.scala 29:58]
node _T_68 = dshl(cpu_mask, _T_67) @[Cache.scala 114:40]
node _T_69 = cvt(_T_68) @[Cache.scala 114:87]
node wmask = mux(_T_66, _T_69, asSInt(UInt<1>("h01"))) @[Cache.scala 114:18]
node _T_70 = eq(is_alloc, UInt<1>("h00")) @[Cache.scala 115:19]
node _T_71 = cat(cpu_data, cpu_data) @[Cat.scala 29:58]
node _T_72 = cat(_T_71, _T_71) @[Cat.scala 29:58]
node _T_73 = cat(io.nasti.r.bits.data, refill_buf[0]) @[Cat.scala 29:58]
node wdata = mux(_T_70, _T_72, _T_73) @[Cache.scala 115:18]
when wen : @[Cache.scala 118:13]
node _T_74 = dshl(UInt<1>("h01"), idx_reg) @[Cache.scala 119:18]
node _T_75 = or(v, _T_74) @[Cache.scala 119:18]
node _T_76 = not(v) @[Cache.scala 119:18]
node _T_77 = or(_T_76, _T_74) @[Cache.scala 119:18]
node _T_78 = not(_T_77) @[Cache.scala 119:18]
node _T_79 = mux(UInt<1>("h01"), _T_75, _T_78) @[Cache.scala 119:18]
v <= _T_79 @[Cache.scala 119:7]
node _T_80 = eq(is_alloc, UInt<1>("h00")) @[Cache.scala 120:28]
node _T_81 = dshl(UInt<1>("h01"), idx_reg) @[Cache.scala 120:18]
node _T_82 = or(d, _T_81) @[Cache.scala 120:18]
node _T_83 = not(d) @[Cache.scala 120:18]
node _T_84 = or(_T_83, _T_81) @[Cache.scala 120:18]
node _T_85 = not(_T_84) @[Cache.scala 120:18]
node _T_86 = mux(_T_80, _T_82, _T_85) @[Cache.scala 120:18]
d <= _T_86 @[Cache.scala 120:7]
when is_alloc : @[Cache.scala 121:20]
write mport _T_87 = metaMem[idx_reg], clock
_T_87.tag <= wmeta.tag
skip @[Cache.scala 121:20]
node _T_88 = bits(wdata, 7, 0) @[Cache.scala 125:53]
node _T_89 = bits(wdata, 15, 8) @[Cache.scala 125:53]
node _T_90 = bits(wdata, 23, 16) @[Cache.scala 125:53]
node _T_91 = bits(wdata, 31, 24) @[Cache.scala 125:53]
wire _T_92 : UInt<8>[4] @[Cache.scala 125:42]
_T_92[0] <= _T_88 @[Cache.scala 125:42]
_T_92[1] <= _T_89 @[Cache.scala 125:42]
_T_92[2] <= _T_90 @[Cache.scala 125:42]
_T_92[3] <= _T_91 @[Cache.scala 125:42]
node _T_93 = bits(wmask, 3, 0) @[Cache.scala 126:37]
node _T_94 = bits(_T_93, 0, 0) @[Cache.scala 126:71]
node _T_95 = bits(_T_93, 1, 1) @[Cache.scala 126:71]
node _T_96 = bits(_T_93, 2, 2) @[Cache.scala 126:71]
node _T_97 = bits(_T_93, 3, 3) @[Cache.scala 126:71]
write mport _T_98 = dataMem_0[idx_reg], clock
when _T_94 :
_T_98[0] <= _T_92[0]
skip
when _T_95 :
_T_98[1] <= _T_92[1]
skip
when _T_96 :
_T_98[2] <= _T_92[2]
skip
when _T_97 :
_T_98[3] <= _T_92[3]
skip
node _T_99 = bits(wdata, 39, 32) @[Cache.scala 125:53]
node _T_100 = bits(wdata, 47, 40) @[Cache.scala 125:53]
node _T_101 = bits(wdata, 55, 48) @[Cache.scala 125:53]
node _T_102 = bits(wdata, 63, 56) @[Cache.scala 125:53]
wire _T_103 : UInt<8>[4] @[Cache.scala 125:42]
_T_103[0] <= _T_99 @[Cache.scala 125:42]
_T_103[1] <= _T_100 @[Cache.scala 125:42]
_T_103[2] <= _T_101 @[Cache.scala 125:42]
_T_103[3] <= _T_102 @[Cache.scala 125:42]
node _T_104 = bits(wmask, 7, 4) @[Cache.scala 126:37]
node _T_105 = bits(_T_104, 0, 0) @[Cache.scala 126:71]
node _T_106 = bits(_T_104, 1, 1) @[Cache.scala 126:71]
node _T_107 = bits(_T_104, 2, 2) @[Cache.scala 126:71]
node _T_108 = bits(_T_104, 3, 3) @[Cache.scala 126:71]
write mport _T_109 = dataMem_1[idx_reg], clock
when _T_105 :
_T_109[0] <= _T_103[0]
skip
when _T_106 :
_T_109[1] <= _T_103[1]
skip
when _T_107 :
_T_109[2] <= _T_103[2]
skip
when _T_108 :
_T_109[3] <= _T_103[3]
skip
node _T_110 = bits(wdata, 71, 64) @[Cache.scala 125:53]
node _T_111 = bits(wdata, 79, 72) @[Cache.scala 125:53]
node _T_112 = bits(wdata, 87, 80) @[Cache.scala 125:53]
node _T_113 = bits(wdata, 95, 88) @[Cache.scala 125:53]
wire _T_114 : UInt<8>[4] @[Cache.scala 125:42]
_T_114[0] <= _T_110 @[Cache.scala 125:42]
_T_114[1] <= _T_111 @[Cache.scala 125:42]
_T_114[2] <= _T_112 @[Cache.scala 125:42]
_T_114[3] <= _T_113 @[Cache.scala 125:42]
node _T_115 = bits(wmask, 11, 8) @[Cache.scala 126:37]
node _T_116 = bits(_T_115, 0, 0) @[Cache.scala 126:71]
node _T_117 = bits(_T_115, 1, 1) @[Cache.scala 126:71]
node _T_118 = bits(_T_115, 2, 2) @[Cache.scala 126:71]
node _T_119 = bits(_T_115, 3, 3) @[Cache.scala 126:71]
write mport _T_120 = dataMem_2[idx_reg], clock
when _T_116 :
_T_120[0] <= _T_114[0]
skip
when _T_117 :
_T_120[1] <= _T_114[1]
skip
when _T_118 :
_T_120[2] <= _T_114[2]
skip
when _T_119 :
_T_120[3] <= _T_114[3]
skip
node _T_121 = bits(wdata, 103, 96) @[Cache.scala 125:53]
node _T_122 = bits(wdata, 111, 104) @[Cache.scala 125:53]
node _T_123 = bits(wdata, 119, 112) @[Cache.scala 125:53]
node _T_124 = bits(wdata, 127, 120) @[Cache.scala 125:53]
wire _T_125 : UInt<8>[4] @[Cache.scala 125:42]
_T_125[0] <= _T_121 @[Cache.scala 125:42]
_T_125[1] <= _T_122 @[Cache.scala 125:42]
_T_125[2] <= _T_123 @[Cache.scala 125:42]
_T_125[3] <= _T_124 @[Cache.scala 125:42]
node _T_126 = bits(wmask, 15, 12) @[Cache.scala 126:37]
node _T_127 = bits(_T_126, 0, 0) @[Cache.scala 126:71]
node _T_128 = bits(_T_126, 1, 1) @[Cache.scala 126:71]
node _T_129 = bits(_T_126, 2, 2) @[Cache.scala 126:71]
node _T_130 = bits(_T_126, 3, 3) @[Cache.scala 126:71]
write mport _T_131 = dataMem_3[idx_reg], clock
when _T_127 :
_T_131[0] <= _T_125[0]
skip
when _T_128 :
_T_131[1] <= _T_125[1]
skip
when _T_129 :
_T_131[2] <= _T_125[2]
skip
when _T_130 :
_T_131[3] <= _T_125[3]
skip
skip @[Cache.scala 118:13]
node _T_132 = cat(tag_reg, idx_reg) @[Cat.scala 29:58]
node _T_133 = dshl(_T_132, UInt<3>("h04")) @[Cache.scala 132:33]
wire _T_134 : {addr : UInt<32>, len : UInt<8>, size : UInt<3>, burst : UInt<2>, lock : UInt<1>, cache : UInt<4>, prot : UInt<3>, qos : UInt<4>, region : UInt<4>, id : UInt<5>, user : UInt<1>} @[nasti.scala 178:18]
_T_134 is invalid @[nasti.scala 178:18]
_T_134.id <= UInt<1>("h00") @[nasti.scala 179:11]
_T_134.addr <= _T_133 @[nasti.scala 180:13]
_T_134.len <= UInt<1>("h01") @[nasti.scala 181:12]
_T_134.size <= UInt<2>("h03") @[nasti.scala 182:13]
_T_134.burst <= UInt<1>("h01") @[nasti.scala 183:14]
_T_134.lock <= UInt<1>("h00") @[nasti.scala 184:13]
_T_134.cache <= UInt<1>("h00") @[nasti.scala 185:14]
node _T_135 = cat(UInt<1>("h00"), UInt<1>("h00")) @[Cat.scala 29:58]
node _T_136 = cat(_T_135, UInt<1>("h00")) @[Cat.scala 29:58]
_T_134.prot <= _T_136 @[nasti.scala 186:13]
_T_134.qos <= UInt<1>("h00") @[nasti.scala 187:12]
_T_134.region <= UInt<1>("h00") @[nasti.scala 188:15]
_T_134.user <= UInt<1>("h00") @[nasti.scala 189:13]
io.nasti.ar.bits.user <= _T_134.user @[Cache.scala 131:20]
io.nasti.ar.bits.id <= _T_134.id @[Cache.scala 131:20]
io.nasti.ar.bits.region <= _T_134.region @[Cache.scala 131:20]
io.nasti.ar.bits.qos <= _T_134.qos @[Cache.scala 131:20]
io.nasti.ar.bits.prot <= _T_134.prot @[Cache.scala 131:20]
io.nasti.ar.bits.cache <= _T_134.cache @[Cache.scala 131:20]
io.nasti.ar.bits.lock <= _T_134.lock @[Cache.scala 131:20]
io.nasti.ar.bits.burst <= _T_134.burst @[Cache.scala 131:20]
io.nasti.ar.bits.size <= _T_134.size @[Cache.scala 131:20]
io.nasti.ar.bits.len <= _T_134.len @[Cache.scala 131:20]
io.nasti.ar.bits.addr <= _T_134.addr @[Cache.scala 131:20]
io.nasti.ar.valid <= UInt<1>("h00") @[Cache.scala 133:21]
node _T_137 = eq(state, UInt<3>("h06")) @[Cache.scala 135:29]
io.nasti.r.ready <= _T_137 @[Cache.scala 135:20]
node _T_138 = and(io.nasti.r.ready, io.nasti.r.valid) @[Decoupled.scala 40:37]
when _T_138 : @[Cache.scala 136:27]
refill_buf[value] <= io.nasti.r.bits.data @[Cache.scala 136:52]
skip @[Cache.scala 136:27]
node _T_139 = cat(rmeta.tag, idx_reg) @[Cat.scala 29:58]
node _T_140 = dshl(_T_139, UInt<3>("h04")) @[Cache.scala 140:35]
wire _T_141 : {addr : UInt<32>, len : UInt<8>, size : UInt<3>, burst : UInt<2>, lock : UInt<1>, cache : UInt<4>, prot : UInt<3>, qos : UInt<4>, region : UInt<4>, id : UInt<5>, user : UInt<1>} @[nasti.scala 158:18]
_T_141 is invalid @[nasti.scala 158:18]
_T_141.id <= UInt<1>("h00") @[nasti.scala 159:11]
_T_141.addr <= _T_140 @[nasti.scala 160:13]
_T_141.len <= UInt<1>("h01") @[nasti.scala 161:12]
_T_141.size <= UInt<2>("h03") @[nasti.scala 162:13]
_T_141.burst <= UInt<1>("h01") @[nasti.scala 163:14]
_T_141.lock <= UInt<1>("h00") @[nasti.scala 164:13]
_T_141.cache <= UInt<1>("h00") @[nasti.scala 165:14]
node _T_142 = cat(UInt<1>("h00"), UInt<1>("h00")) @[Cat.scala 29:58]
node _T_143 = cat(_T_142, UInt<1>("h00")) @[Cat.scala 29:58]
_T_141.prot <= _T_143 @[nasti.scala 166:13]
_T_141.qos <= UInt<1>("h00") @[nasti.scala 167:12]
_T_141.region <= UInt<1>("h00") @[nasti.scala 168:15]
_T_141.user <= UInt<1>("h00") @[nasti.scala 169:13]
io.nasti.aw.bits.user <= _T_141.user @[Cache.scala 139:20]
io.nasti.aw.bits.id <= _T_141.id @[Cache.scala 139:20]
io.nasti.aw.bits.region <= _T_141.region @[Cache.scala 139:20]
io.nasti.aw.bits.qos <= _T_141.qos @[Cache.scala 139:20]
io.nasti.aw.bits.prot <= _T_141.prot @[Cache.scala 139:20]
io.nasti.aw.bits.cache <= _T_141.cache @[Cache.scala 139:20]
io.nasti.aw.bits.lock <= _T_141.lock @[Cache.scala 139:20]
io.nasti.aw.bits.burst <= _T_141.burst @[Cache.scala 139:20]
io.nasti.aw.bits.size <= _T_141.size @[Cache.scala 139:20]
io.nasti.aw.bits.len <= _T_141.len @[Cache.scala 139:20]
io.nasti.aw.bits.addr <= _T_141.addr @[Cache.scala 139:20]
io.nasti.aw.valid <= UInt<1>("h00") @[Cache.scala 141:21]
node _T_144 = bits(read, 63, 0) @[Cache.scala 144:42]
node _T_145 = bits(read, 127, 64) @[Cache.scala 144:42]
wire _T_146 : UInt<64>[2] @[Cache.scala 144:32]
_T_146[0] <= _T_144 @[Cache.scala 144:32]
_T_146[1] <= _T_145 @[Cache.scala 144:32]
wire _T_147 : {data : UInt<64>, last : UInt<1>, id : UInt<5>, strb : UInt<8>, user : UInt<1>} @[nasti.scala 198:17]
_T_147 is invalid @[nasti.scala 198:17]
node _T_148 = mux(UInt<1>("h01"), UInt<8>("h0ff"), UInt<8>("h00")) @[Bitwise.scala 71:12]
_T_147.strb <= _T_148 @[nasti.scala 199:12]
_T_147.data <= _T_146[value_1] @[nasti.scala 200:12]
_T_147.last <= write_wrap_out @[nasti.scala 201:12]
_T_147.id <= UInt<1>("h00") @[nasti.scala 202:12]
_T_147.user <= UInt<1>("h00") @[nasti.scala 203:12]
io.nasti.w.bits.user <= _T_147.user @[Cache.scala 143:19]
io.nasti.w.bits.strb <= _T_147.strb @[Cache.scala 143:19]
io.nasti.w.bits.id <= _T_147.id @[Cache.scala 143:19]
io.nasti.w.bits.last <= _T_147.last @[Cache.scala 143:19]
io.nasti.w.bits.data <= _T_147.data @[Cache.scala 143:19]
io.nasti.w.valid <= UInt<1>("h00") @[Cache.scala 146:20]
io.nasti.b.ready <= UInt<1>("h00") @[Cache.scala 148:20]
node _T_149 = dshr(v, idx_reg) @[Cache.scala 151:19]
node _T_150 = bits(_T_149, 0, 0) @[Cache.scala 151:19]
node _T_151 = dshr(d, idx_reg) @[Cache.scala 151:33]
node _T_152 = bits(_T_151, 0, 0) @[Cache.scala 151:33]
node is_dirty = and(_T_150, _T_152) @[Cache.scala 151:29]
node _T_153 = eq(UInt<3>("h00"), state) @[Conditional.scala 37:30]
when _T_153 : @[Conditional.scala 40:58]
when io.cpu.req.valid : @[Cache.scala 154:30]
node _T_154 = neq(io.cpu.req.bits.mask, UInt<1>("h00")) @[Cache.scala 155:43]
node _T_155 = mux(_T_154, UInt<3>("h02"), UInt<3>("h01")) @[Cache.scala 155:21]
state <= _T_155 @[Cache.scala 155:15]
skip @[Cache.scala 154:30]
skip @[Conditional.scala 40:58]
else : @[Conditional.scala 39:67]
node _T_156 = eq(UInt<3>("h01"), state) @[Conditional.scala 37:30]
when _T_156 : @[Conditional.scala 39:67]
when hit : @[Cache.scala 159:17]
when io.cpu.req.valid : @[Cache.scala 160:32]
node _T_157 = neq(io.cpu.req.bits.mask, UInt<1>("h00")) @[Cache.scala 161:45]
node _T_158 = mux(_T_157, UInt<3>("h02"), UInt<3>("h01")) @[Cache.scala 161:23]
state <= _T_158 @[Cache.scala 161:17]
skip @[Cache.scala 160:32]
else : @[Cache.scala 162:21]
state <= UInt<3>("h00") @[Cache.scala 163:17]
skip @[Cache.scala 162:21]
skip @[Cache.scala 159:17]
else : @[Cache.scala 165:19]
io.nasti.aw.valid <= is_dirty @[Cache.scala 166:27]
node _T_159 = eq(is_dirty, UInt<1>("h00")) @[Cache.scala 167:30]
io.nasti.ar.valid <= _T_159 @[Cache.scala 167:27]
node _T_160 = and(io.nasti.aw.ready, io.nasti.aw.valid) @[Decoupled.scala 40:37]
when _T_160 : @[Cache.scala 168:34]
state <= UInt<3>("h03") @[Cache.scala 169:17]
skip @[Cache.scala 168:34]
else : @[Cache.scala 170:40]
node _T_161 = and(io.nasti.ar.ready, io.nasti.ar.valid) @[Decoupled.scala 40:37]
when _T_161 : @[Cache.scala 170:40]
state <= UInt<3>("h06") @[Cache.scala 171:17]
skip @[Cache.scala 170:40]
skip @[Cache.scala 165:19]
skip @[Conditional.scala 39:67]
else : @[Conditional.scala 39:67]
node _T_162 = eq(UInt<3>("h02"), state) @[Conditional.scala 37:30]
when _T_162 : @[Conditional.scala 39:67]
node _T_163 = or(hit, is_alloc_reg) @[Cache.scala 176:16]
node _T_164 = or(_T_163, io.cpu.abort) @[Cache.scala 176:32]
when _T_164 : @[Cache.scala 176:49]
state <= UInt<3>("h00") @[Cache.scala 177:15]
skip @[Cache.scala 176:49]
else : @[Cache.scala 178:19]
io.nasti.aw.valid <= is_dirty @[Cache.scala 179:27]
node _T_165 = eq(is_dirty, UInt<1>("h00")) @[Cache.scala 180:30]
io.nasti.ar.valid <= _T_165 @[Cache.scala 180:27]
node _T_166 = and(io.nasti.aw.ready, io.nasti.aw.valid) @[Decoupled.scala 40:37]
when _T_166 : @[Cache.scala 181:34]
state <= UInt<3>("h03") @[Cache.scala 182:17]
skip @[Cache.scala 181:34]
else : @[Cache.scala 183:40]
node _T_167 = and(io.nasti.ar.ready, io.nasti.ar.valid) @[Decoupled.scala 40:37]
when _T_167 : @[Cache.scala 183:40]
state <= UInt<3>("h06") @[Cache.scala 184:17]
skip @[Cache.scala 183:40]
skip @[Cache.scala 178:19]
skip @[Conditional.scala 39:67]
else : @[Conditional.scala 39:67]
node _T_168 = eq(UInt<3>("h03"), state) @[Conditional.scala 37:30]
when _T_168 : @[Conditional.scala 39:67]
io.nasti.w.valid <= UInt<1>("h01") @[Cache.scala 189:24]
when write_wrap_out : @[Cache.scala 190:28]
state <= UInt<3>("h04") @[Cache.scala 191:15]
skip @[Cache.scala 190:28]
skip @[Conditional.scala 39:67]
else : @[Conditional.scala 39:67]
node _T_169 = eq(UInt<3>("h04"), state) @[Conditional.scala 37:30]
when _T_169 : @[Conditional.scala 39:67]
io.nasti.b.ready <= UInt<1>("h01") @[Cache.scala 195:24]
node _T_170 = and(io.nasti.b.ready, io.nasti.b.valid) @[Decoupled.scala 40:37]
when _T_170 : @[Cache.scala 196:31]
state <= UInt<3>("h05") @[Cache.scala 197:15]
skip @[Cache.scala 196:31]
skip @[Conditional.scala 39:67]
else : @[Conditional.scala 39:67]
node _T_171 = eq(UInt<3>("h05"), state) @[Conditional.scala 37:30]
when _T_171 : @[Conditional.scala 39:67]
io.nasti.ar.valid <= UInt<1>("h01") @[Cache.scala 201:25]
node _T_172 = and(io.nasti.ar.ready, io.nasti.ar.valid) @[Decoupled.scala 40:37]
when _T_172 : @[Cache.scala 202:32]
state <= UInt<3>("h06") @[Cache.scala 203:15]
skip @[Cache.scala 202:32]
skip @[Conditional.scala 39:67]
else : @[Conditional.scala 39:67]
node _T_173 = eq(UInt<3>("h06"), state) @[Conditional.scala 37:30]
when _T_173 : @[Conditional.scala 39:67]
when read_wrap_out : @[Cache.scala 207:27]
node _T_174 = neq(cpu_mask, UInt<1>("h00")) @[Cache.scala 208:31]
node _T_175 = mux(_T_174, UInt<3>("h02"), UInt<3>("h00")) @[Cache.scala 208:21]
state <= _T_175 @[Cache.scala 208:15]
skip @[Cache.scala 207:27]
skip @[Conditional.scala 39:67]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment