Overall Coverage: 98%
All Uncovered:
for (genvar k = 0 ; k < NAlerts ; k++) begin : gen_alerts
prim_alert_receiver #(
.AsyncOn(AsyncOn[k])
.clk_i ,
.rst_ni ,
.ping_en_i ( alert_ping_en[k] ),
.ping_ok_o ( alert_ping_ok[k] ),
.integ_fail_o ( alert_integfail[k] ),
.alert_o ( alert_trig[k] ),
.alert_rx_o ( alert_rx_o[k] ),
.alert_tx_i ( alert_tx_i[k] )
for (genvar k = 0; k < N_CLASSES; k++) begin : gen_classes
.clk_i,
.rst_ni,
.class_en_i ( reg2hw_wrap.class_en[k] ),
.clr_i ( reg2hw_wrap.class_clr[k] ),
.class_trig_i ( hw2reg_wrap.class_trig[k] ),
.thresh_i ( reg2hw_wrap.class_accum_thresh[k] ),
.accu_cnt_o ( hw2reg_wrap.class_accum_cnt[k] ),
.accu_trig_o ( class_accum_trig[k] )
.clk_i,
.rst_ni,
.en_i ( reg2hw_wrap.class_en[k] ),
.clr_i ( reg2hw_wrap.class_clr[k] ),
.timeout_en_i ( irq[k] ),
.accum_trig_i ( class_accum_trig[k] ),
.timeout_cyc_i ( reg2hw_wrap.class_timeout_cyc[k] ),
.esc_en_i ( reg2hw_wrap.class_esc_en[k] ),
.esc_map_i ( reg2hw_wrap.class_esc_map[k] ),
.phase_cyc_i ( reg2hw_wrap.class_phase_cyc[k] ),
.esc_trig_o ( hw2reg_wrap.class_esc_trig[k] ),
.esc_cnt_o ( hw2reg_wrap.class_esc_cnt[k] ),
.esc_state_o ( hw2reg_wrap.class_esc_state[k] ),
.esc_sig_en_o ( class_esc_sig_en[k] )
for (genvar k = 0; k < N_ESC_SEV; k++) begin : gen_esc_sev
for (genvar j = 0; j < N_CLASSES; j++) begin : gen_transp
assign esc_sig_en_trsp[k][j] = class_esc_sig_en[j][k];
assign esc_sig_en[k] = |esc_sig_en_trsp[k];
.clk_i,
.rst_ni,
.ping_en_i ( esc_ping_en[k] ),
.ping_ok_o ( esc_ping_ok[k] ),
.integ_fail_o ( esc_integfail[k] ),
.esc_en_i ( esc_sig_en[k] ),
.esc_rx_i ( esc_rx_i[k] ),
.esc_tx_o ( esc_tx_o[k] )
for (genvar k = 0; k < N_CLASSES; k++) begin : gen_classifier
assign class_trig_o[k] = (|{ alert_cause_o & class_masks[k],
loc_alert_cause_o & loc_class_masks[k] });
input [N_ESC_SEV-1:0]
input [N_PHASES-1:0]
for (genvar k = 0; k < N_ESC_SEV; k++) begin : gen_phase_map
assign esc_map_oh[k] = N_ESC_SEV'(esc_en_i[k]) << esc_map_i[k];
assign esc_sig_en_o[k] = |(esc_map_oh[k] & phase_oh);
for (genvar k = 0; k < NAlerts; k++) begin : gen_alert_cause
assign hw2reg.alert_cause[k].d = 1'b1;
assign hw2reg.alert_cause[k].de = reg2hw.alert_cause[k].q |
hw2reg_wrap.alert_cause[k];
for (genvar k = 0; k < N_LOC_ALERT; k++) begin : gen_loc_alert_cause
assign hw2reg.loc_alert_cause[k].d = 1'b1;
assign hw2reg.loc_alert_cause[k].de = reg2hw.loc_alert_cause[k].q |
hw2reg_wrap.loc_alert_cause[k];
for (genvar k = 0; k < NAlerts; k++) begin : gen_alert_en_class
assign reg2hw_wrap.alert_en[k] = reg2hw.alert_en[k].q;
assign reg2hw_wrap.alert_class[k] = reg2hw.alert_class[k].q;
for (genvar k = 0; k < N_LOC_ALERT; k++) begin : gen_loc_alert_en_class
assign reg2hw_wrap.loc_alert_en[k] = reg2hw.loc_alert_en[k].q;
assign reg2hw_wrap.loc_alert_class[k] = reg2hw.loc_alert_class[k].q;
for (genvar k = 0; k < NAlerts; k++) begin : gen_alert_cause_dump
assign crashdump_o.alert_cause[k] = reg2hw.alert_cause[k].q;
for (genvar k = 0; k < N_LOC_ALERT; k++) begin : gen_loc_alert_cause_dump
assign crashdump_o.loc_alert_cause[k] = reg2hw.loc_alert_cause[k].q;
end else begin : gen_multdiv_no_m
assign multdiv_en_sel = 1'b0;
assign multdiv_en = 1'b0;
if (MultiplierImplementation == "slow") begin : gen_multdiv_slow
.clk_i ( clk_i ),
.rst_ni ( rst_ni ),
.mult_en_i ( mult_en_i ),
.div_en_i ( div_en_i ),
.operator_i ( multdiv_operator_i ),
.signed_mode_i ( multdiv_signed_mode_i ),
.op_a_i ( multdiv_operand_a_i ),
.op_b_i ( multdiv_operand_b_i ),
.alu_adder_ext_i ( alu_adder_result_ext ),
.alu_adder_i ( alu_adder_result_ex_o ),
.equal_to_zero ( alu_is_equal_result ),
.valid_o ( multdiv_valid ),
.alu_operand_a_o ( multdiv_alu_operand_a ),
.alu_operand_b_o ( multdiv_alu_operand_b ),
.multdiv_result_o ( multdiv_result )
end else if (MultiplierImplementation == "fast") begin : gen_multdiv_fast
if (RV32E) begin : gen_rv32e_reg_check_active
assign illegal_reg_rv32e = ((regfile_raddr_a_o[4] & (alu_op_a_mux_sel_o == OP_A_REG_A)) |
(regfile_raddr_b_o[4] & (alu_op_b_mux_sel_o == OP_B_REG_B)) |
(regfile_waddr_o[4] & regfile_we));
if (PMPEnable) begin : g_pmp
logic [33:0] pmp_req_addr [PMP_NUM_CHAN];
pmp_req_e pmp_req_type [PMP_NUM_CHAN];
priv_lvl_e pmp_priv_lvl [PMP_NUM_CHAN];
assign pmp_req_addr[PMP_I] = {2'b00,instr_addr_o[31:0]};
assign pmp_req_type[PMP_I] = PMP_ACC_EXEC;
assign pmp_priv_lvl[PMP_I] = priv_mode_if;
assign pmp_req_addr[PMP_D] = {2'b00,data_addr_o[31:0]};
assign pmp_req_type[PMP_D] = data_we_o ? PMP_ACC_WRITE : PMP_ACC_READ;
assign pmp_priv_lvl[PMP_D] = priv_mode_lsu;
ibex_pmp #(
.PMPGranularity ( PMPGranularity ),
.PMPNumChan ( PMP_NUM_CHAN ),
.PMPNumRegions ( PMPNumRegions )
.clk_i ( clk ),
.rst_ni ( rst_ni ),
.csr_pmp_cfg_i ( csr_pmp_cfg ),
.csr_pmp_addr_i ( csr_pmp_addr ),
.priv_mode_i ( pmp_priv_lvl ),
.pmp_req_addr_i ( pmp_req_addr ),
.pmp_req_type_i ( pmp_req_type ),
.pmp_req_err_o ( pmp_req_err )
if (PMPEnable) begin : g_pmp_registers
pmp_cfg_t pmp_cfg [PMPNumRegions];
pmp_cfg_t pmp_cfg_wdata [PMPNumRegions];
logic [31:0] pmp_addr [PMPNumRegions];
logic [PMPNumRegions-1:0] pmp_cfg_we;
logic [PMPNumRegions-1:0] pmp_addr_we;
for (genvar i = 0; i < PMP_MAX_REGIONS; i++) begin : g_exp_rd_data
if (i < PMPNumRegions) begin : g_implemented_regions
assign pmp_cfg_rdata[i] = {pmp_cfg[i].lock, 2'b00, pmp_cfg[i].mode,
pmp_cfg[i].exec, pmp_cfg[i].write, pmp_cfg[i].read};
if (PMPGranularity == 0) begin : g_pmp_g0
assign pmp_addr_rdata[i] = pmp_addr[i];
end else if (PMPGranularity == 1) begin : g_pmp_g1
always_comb begin
pmp_addr_rdata[i] = pmp_addr[i];
if ((pmp_cfg[i].mode == PMP_MODE_OFF) || (pmp_cfg[i].mode == PMP_MODE_TOR)) begin
pmp_addr_rdata[i][PMPGranularity-1:0] = '0;
end else begin : g_pmp_g2
always_comb begin
pmp_addr_rdata[i] = pmp_addr[i];
if ((pmp_cfg[i].mode == PMP_MODE_OFF) || (pmp_cfg[i].mode == PMP_MODE_TOR)) begin
pmp_addr_rdata[i][PMPGranularity-1:0] = '0;
end else if (pmp_cfg[i].mode == PMP_MODE_NAPOT) begin
pmp_addr_rdata[i][PMPGranularity-2:0] = '1;
end else begin : g_other_regions
assign pmp_cfg_rdata[i] = '0;
assign pmp_addr_rdata[i] = '0;
for (genvar i = 0; i < PMPNumRegions; i++) begin : g_pmp_csrs
assign pmp_cfg_we[i] = csr_we_int & ~pmp_cfg[i].lock &
(csr_addr == (CSR_OFF_PMP_CFG + (i[11:0] >> 2)));
assign pmp_cfg_wdata[i].lock = csr_wdata_int[(i%4)*PMP_CFG_W+7];
always_comb begin
unique case (csr_wdata_int[(i%4)*PMP_CFG_W+3+:2])
2'b00 : pmp_cfg_wdata[i].mode = PMP_MODE_OFF;
2'b01 : pmp_cfg_wdata[i].mode = PMP_MODE_TOR;
2'b10 : pmp_cfg_wdata[i].mode = (PMPGranularity == 0) ? PMP_MODE_NA4:
PMP_MODE_OFF;
2'b11 : pmp_cfg_wdata[i].mode = PMP_MODE_NAPOT;
default : pmp_cfg_wdata[i].mode = PMP_MODE_OFF;
assign pmp_cfg_wdata[i].exec = csr_wdata_int[(i%4)*PMP_CFG_W+2];
assign pmp_cfg_wdata[i].write = &csr_wdata_int[(i%4)*PMP_CFG_W+:2];
assign pmp_cfg_wdata[i].read = csr_wdata_int[(i%4)*PMP_CFG_W];
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
pmp_cfg[i] <= pmp_cfg_t'('b0);
end else if (pmp_cfg_we[i]) begin
pmp_cfg[i] <= pmp_cfg_wdata[i];
if (i < PMPNumRegions - 1) begin : g_lower
assign pmp_addr_we[i] = csr_we_int & ~pmp_cfg[i].lock &
(pmp_cfg[i+1].mode != PMP_MODE_TOR) &
(csr_addr == (CSR_OFF_PMP_ADDR + i[11:0]));
end else begin : g_upper
assign pmp_addr_we[i] = csr_we_int & ~pmp_cfg[i].lock &
(csr_addr == (CSR_OFF_PMP_ADDR + i[11:0]));
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
pmp_addr[i] <= 'b0;
end else if (pmp_addr_we[i]) begin
pmp_addr[i] <= csr_wdata_int;
assign csr_pmp_cfg_o[i] = pmp_cfg[i];
assign csr_pmp_addr_o[i] = {pmp_addr[i],2'b00};
end else begin : g_mhpmcounter_full
assign mhpmcounter[i] = mhpmcounter_q;
end else begin : g_mcountinhibit_full
assign mcountinhibit = mcountinhibit_q;
if (DbgTriggerEn) begin : gen_trigger_regs
logic tmatch_control_d, tmatch_control_q;
logic [31:0] tmatch_value_d, tmatch_value_q;
logic tmatch_control_we;
logic tmatch_value_we;
assign tmatch_control_we = csr_we_int & debug_mode_i & (csr_addr_i == CSR_TDATA1);
assign tmatch_value_we = csr_we_int & debug_mode_i & (csr_addr_i == CSR_TDATA2);
assign tmatch_control_d = tmatch_control_we ? csr_wdata_int[2] :
tmatch_control_q;
assign tmatch_value_d = csr_wdata_int[31:0];
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
tmatch_control_q <= 'b0;
end else begin
tmatch_control_q <= tmatch_control_d;
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
tmatch_value_q <= 'b0;
end else if (tmatch_value_we) begin
tmatch_value_q <= tmatch_value_d;
assign tselect_rdata = 'b0;
assign tmatch_control_rdata = {4'h2, // type : address/data match
1'b1, // dmode : access from D mode only
6'h00, // maskmax : exact match only
1'b0, // hit : not supported
1'b0, // select : address match only
1'b0, // timing : match before execution
2'b00, // sizelo : match any access
4'h1, // action : enter debug mode
1'b0, // chain : not supported
4'h0, // match : simple match
1'b1, // m : match in m-mode
1'b0, // 0 : zero
1'b0, // s : not supported
1'b1, // u : match in u-mode
tmatch_control_q, // execute : match instruction address
1'b0, // store : not supported
1'b0}; // load : not supported
assign tmatch_value_rdata = tmatch_value_q;
assign trigger_match_o = tmatch_control_q & (pc_if_i[31:0] == tmatch_value_q[31:0]);
for (genvar r = 0; r < PMPNumRegions; r++) begin : g_addr_exp
if (r == 0) begin : g_entry0
assign region_start_addr[r] = (csr_pmp_cfg_i[r].mode == PMP_MODE_TOR) ? 34'h000000000 :
csr_pmp_addr_i[r];
end else begin : g_oth
assign region_start_addr[r] = (csr_pmp_cfg_i[r].mode == PMP_MODE_TOR) ? csr_pmp_addr_i[r-1] :
csr_pmp_addr_i[r];
for (genvar b = PMPGranularity+2; b < 34; b++) begin : g_bitmask
if (b == PMPGranularity+2) begin : g_bit0
assign region_addr_mask[r][b] = (csr_pmp_cfg_i[r].mode != PMP_MODE_NAPOT);
end else begin : g_others
assign region_addr_mask[r][b] = (csr_pmp_cfg_i[r].mode != PMP_MODE_NAPOT) |
~&csr_pmp_addr_i[r][b-1:PMPGranularity+2];
for (genvar c = 0; c < PMPNumChan; c++) begin : g_access_check
for (genvar r = 0; r < PMPNumRegions; r++) begin : g_regions
assign region_match_low[c][r] = (pmp_req_addr_i[c][33:PMPGranularity+2] >=
(region_start_addr[r][33:PMPGranularity+2] &
region_addr_mask[r]));
assign region_match_high[c][r] = (pmp_req_addr_i[c][33:PMPGranularity+2] <=
csr_pmp_addr_i[r][33:PMPGranularity+2]);
assign region_match_both[c][r] = region_match_low[c][r] & region_match_high[c][r] &
(csr_pmp_cfg_i[r].mode != PMP_MODE_OFF);
assign region_perm_check[c][r] =
((pmp_req_type_i[c] == PMP_ACC_EXEC) & csr_pmp_cfg_i[r].exec) |
((pmp_req_type_i[c] == PMP_ACC_WRITE) & csr_pmp_cfg_i[r].write) |
((pmp_req_type_i[c] == PMP_ACC_READ) & csr_pmp_cfg_i[r].read);
assign machine_access_fault[c][r] = region_match_both[c][r] & csr_pmp_cfg_i[r].lock &
~region_perm_check[c][r];
assign user_access_allowed[c][r] = region_match_both[c][r] & region_perm_check[c][r];
assign access_fault[c] = (priv_mode_i[c] == PRIV_LVL_M) ? |machine_access_fault[c] :
~|user_access_allowed[c];
assign pmp_req_err_o[c] = access_fault[c];
}; // Store the request only. Doesn't have to store data
end : gen_esc_sev
end else if (Impl == ImplXilinx) begin : gen_xilinx
.clk0_i,
.clk1_i,
.sel_i,
.clk_o
end else begin : gen_failure
};
end else if (Impl == ImplXilinx) begin: gen_rom_xilinx
prim_xilinx_rom #(
.Width(Width),
.Depth(Depth)
.clk_i,
.addr_i,
.cs_i,
.dout_o,
.dvalid_o
end else begin : gen_rom_unsupported_impl
if (BusWidth == 64) begin : gen_word_mux64
assign rdata_o = word_mux;
dm_sba_access_size: assert property(@(posedge clk_i) disable iff (dmactive_i !== 1'b0)
(state_d != Idle) |-> (sbaccess_i < 4))
else $warning ("accesses > 8 byte not supported at the moment");
haltsum: assert property (
@(posedge clk_i) disable iff (!rst_ni)
(dmi_req_ready_o && dmi_req_valid_i && dtm_op == dm::DTM_READ) |->
!({1'b0, dmi_req_i.addr} inside
{dm::HaltSum0, dm::HaltSum1, dm::HaltSum2, dm::HaltSum3}))
else $warning("Haltsums have not been properly tested yet.");
if (N == 1) begin : gen_degenerate_case
assign valid_o = req_i[0];
assign data_o = data_i[0];
assign gnt_o[0] = valid_o & ready_i;
assign idx_o = '0;
prim_diff_decode #(
.AsyncOn(1'b0)
.clk_i,
.rst_ni,
.diff_pi ( esc_rx_i.resp_p ),
.diff_ni ( esc_rx_i.resp_n ),
.level_o ( resp ),
.rise_o ( ),
.fall_o ( ),
.event_o ( ),
.sigint_o ( sigint_detected )
end else begin : gen_stored_out
assign stored_data_next = {{(OutW-InW){1'b0}}, concat_data[OutW+:InW]};
assign stored_mask_next = {{(OutW-InW){1'b0}}, concat_mask[OutW+:InW]};
if (CustomCoeffs > 0) begin : gen_custom
assign coeffs = CustomCoeffs[LfsrDw-1:0];
end else if (64'(LfsrType) == "FIB_XNOR") begin : gen_fib_xnor
if (CustomCoeffs > 0) begin : gen_custom
assign coeffs = CustomCoeffs[LfsrDw-1:0];
end else begin : gen_lut
assign coeffs = FIB_XNOR_COEFFS[LfsrDw-FIB_XNOR_LUT_OFF][LfsrDw-1:0];
assign next_lfsr_state = LfsrDw'(entropy_i) ^ {lfsr_q[LfsrDw-2:0], ~(^(lfsr_q & coeffs))};
assign lockup = &lfsr_q;
if (ExtSeedSVA) begin : gen_ext_seed_sva
if (MemT == "REGISTER") begin : gen_regmem
prim_ram_2p #(
.Width (TotalWidth),
.Depth (Depth),
.Impl(prim_pkg::ImplGeneric)
.clk_a_i (clk_a_i),
.clk_b_i (clk_b_i),
.a_req_i (a_req_q),
.a_write_i (a_write_q),
.a_addr_i (a_addr_q),
.a_wdata_i (a_wdata_q),
.a_rdata_o (a_rdata_sram),
.b_req_i (b_req_q),
.b_write_i (b_write_q),
.b_addr_i (b_addr_q),
.b_wdata_i (b_wdata_q),
.b_rdata_o (b_rdata_sram)
end else if (MemT == "SRAM") begin : gen_srammem
if (EnableParity == 0 && EnableECC) begin : gen_secded
if (Width == 32) begin : gen_secded_39_32
.in (a_rdata_sram),
.d_o (a_rdata_d[0+:Width]),
.syndrome_o (a_rdata_d[Width+:ParWidth]),
.err_o (a_rerror_d)
.in (b_rdata_sram),
.d_o (b_rdata_d[0+:Width]),
.syndrome_o (b_rdata_d[Width+:ParWidth]),
.err_o (b_rerror_d)
assign a_rvalid_d = a_rvalid_sram;
assign b_rvalid_d = b_rvalid_sram;
if (EnableInputPipeline) begin : gen_regslice_input
always_ff @(posedge clk_a_i or negedge rst_a_ni) begin
if (!rst_a_ni) begin
a_req_q <= '0;
a_write_q <= '0;
a_addr_q <= '0;
a_wdata_q <= '0;
end else begin
a_req_q <= a_req_d;
a_write_q <= a_write_d;
a_addr_q <= a_addr_d;
a_wdata_q <= a_wdata_d;
always_ff @(posedge clk_b_i or negedge rst_b_ni) begin
if (!rst_b_ni) begin
b_req_q <= '0;
b_write_q <= '0;
b_addr_q <= '0;
b_wdata_q <= '0;
end else begin
b_req_q <= b_req_d;
b_write_q <= b_write_d;
b_addr_q <= b_addr_d;
b_wdata_q <= b_wdata_d;
if (EnableOutputPipeline) begin : gen_regslice_output
always_ff @(posedge clk_a_i or negedge rst_a_ni) begin
if (!rst_a_ni) begin
a_rvalid_q <= '0;
a_rdata_q <= '0;
a_rerror_q <= '0;
end else begin
a_rvalid_q <= a_rvalid_d;
a_rdata_q <= a_rdata_d[0+:Width] ;
a_rerror_q <= a_rerror_d;
always_ff @(posedge clk_b_i or negedge rst_b_ni) begin
if (!rst_b_ni) begin
b_rvalid_q <= '0;
b_rdata_q <= '0;
b_rerror_q <= '0;
end else begin
b_rvalid_q <= b_rvalid_d;
b_rdata_q <= b_rdata_d[0+:Width] ;
b_rerror_q <= b_rerror_d;
if (N == 1) begin : gen_degenerate_case
assign valid_o = req_i[0];
assign data_o = data_i[0];
assign gnt_o[0] = valid_o & ready_i;
assign idx_o = '0;
end else begin : gen_no_lock
assign req = req_i;
end else begin : gen_tie_off
assign req_tree[pa] = '0;
assign idx_tree[pa] = '0;
assign data_tree[pa] = '0;
end : gen_level
end : gen_tree
if (MemT == "REGISTER") begin : gen_regmem
prim_ram_2p #(
.Width (TotalWidth),
.Depth (Depth),
.Impl(prim_pkg::ImplGeneric)
.clk_a_i (clk_i),
.clk_b_i (clk_i),
.a_req_i (a_req_q),
.a_write_i (a_write_q),
.a_addr_i (a_addr_q),
.a_wdata_i (a_wdata_q),
.a_rdata_o (a_rdata_sram),
.b_req_i (b_req_q),
.b_write_i (b_write_q),
.b_addr_i (b_addr_q),
.b_wdata_i (b_wdata_q),
.b_rdata_o (b_rdata_sram)
end else if (MemT == "SRAM") begin : gen_srammem
end else begin : gen_nosecded
assign a_wdata_d[0+:Width] = a_wdata_i;
assign b_wdata_d[0+:Width] = b_wdata_i;
assign a_rdata_d = a_rdata_sram;
assign b_rdata_d = b_rdata_sram;
assign a_rvalid_d = a_rvalid_sram;
assign b_rvalid_d = b_rvalid_sram;
assign a_rerror_d = 2'b00;
assign b_rerror_d = 2'b00;
if (EnableInputPipeline) begin : gen_regslice_input
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
a_req_q <= '0;
a_write_q <= '0;
a_addr_q <= '0;
a_wdata_q <= '0;
b_req_q <= '0;
b_write_q <= '0;
b_addr_q <= '0;
b_wdata_q <= '0;
end else begin
a_req_q <= a_req_d;
a_write_q <= a_write_d;
a_addr_q <= a_addr_d;
a_wdata_q <= a_wdata_d;
b_req_q <= b_req_d;
b_write_q <= b_write_d;
b_addr_q <= b_addr_d;
b_wdata_q <= b_wdata_d;
if (EnableOutputPipeline) begin : gen_regslice_output
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
a_rvalid_q <= '0;
a_rdata_q <= '0;
a_rerror_q <= '0;
b_rvalid_q <= '0;
b_rdata_q <= '0;
b_rerror_q <= '0;
end else begin
a_rvalid_q <= a_rvalid_d;
a_rdata_q <= a_rdata_d ;
a_rerror_q <= a_rerror_d;
b_rvalid_q <= b_rvalid_d;
b_rdata_q <= b_rdata_d ;
b_rerror_q <= b_rerror_d;
end else begin : gen_noscan
logic unused_scanmode;
assign unused_scanmode = scanmode_i;
assign clk_no = ~clk_i;
if (AsyncOn) begin : gen_async_assert
end else if (SWACCESS == "RO") begin : gen_ro
end else if (SWACCESS == "W1S") begin : gen_w1s
assign wr_en = we | de ;
assign wr_data = (de ? d : q) | (we ? wd : '0);
end else if (SWACCESS == "W1C") begin : gen_w1c
end else if (SWACCESS == "W0C") begin : gen_w0c
end else if (SWACCESS == "RC") begin : gen_rc
assign wr_en = we | de ;
assign wr_data = (de ? d : q) & (we ? '0 : '1);
end else begin : gen_hw
assign wr_en = de ;
assign wr_data = d ;
end else if (ArbiterImpl == "BINTREE") begin : gen_tree_arb
prim_arbiter_arb #(
.N (N),
.DW(ARB_DW)
.clk_i,
.rst_ni,
.req_i ( req ),
.data_i ( req_packed ),
.gnt_o ( gnt ),
.idx_o ( ),
.valid_o ( sram_req ),
.data_o ( sram_packed ),
.ready_i ( 1'b1 )
prim_diff_decode #(
.AsyncOn(AsyncOn)
.clk_i,
.rst_ni,
.diff_pi ( alert_tx_i.alert_p ),
.diff_ni ( alert_tx_i.alert_n ),
.level_o ( alert_level ),
.rise_o ( ),
.fall_o ( ),
.event_o ( ),
.sigint_o ( alert_sigint )
if (AsyncOn) begin : gen_async_assert
end else begin : gen_sync_assert
end else if (Impl == ImplXilinx) begin : gen_mem_xilinx
prim_xilinx_ram_2p #(
.Width(Width),
.Depth(Depth)
.clk_a_i,
.clk_b_i,
.a_req_i,
.a_write_i,
.a_addr_i,
.a_wdata_i,
.a_rdata_o,
.b_req_i,
.b_write_i,
.b_addr_i,
.b_wdata_i,
.b_rdata_o
end else begin : gen_failure
.I (clk_i),
.CE (en_i | test_en_i),
.O (clk_o)
end else begin : gen_failure
.S ( sel_i ),
.I0 ( clk0_i ),
.I1 ( clk1_i ),
.O ( clk_o )
.BANDWIDTH ("OPTIMIZED"),
.COMPENSATION ("ZHOLD"),
.STARTUP_WAIT ("FALSE"),
.DIVCLK_DIVIDE (1),
.CLKFBOUT_MULT (12),
.CLKFBOUT_PHASE (0.000),
.CLKOUT0_DIVIDE (24),
.CLKOUT0_PHASE (0.000),
.CLKOUT0_DUTY_CYCLE (0.500),
.CLKOUT1_DIVIDE (25),
.CLKOUT1_PHASE (0.000),
.CLKOUT1_DUTY_CYCLE (0.500),
.CLKIN1_PERIOD (10.000)
end else begin : gen_tie_off
assign is_tree[pa] = '0;
assign id_tree[pa] = '0;
assign max_tree[pa] = '0;
end : gen_level
end : gen_tree
end else begin : gen_failure
end else if (SBoxImpl == "canright") begin : gen_sbox_canright
.mode_i,
.data_i,
.data_o
end : gen_alert_tx
end : fill_w
end : compress_round
if (AsyncOn) begin : gen_async
state_e state_d, state_q;
logic diff_p_edge, diff_n_edge, diff_check_ok, level;
logic diff_pq, diff_nq, diff_pd, diff_nd;
prim_flop_2sync #(
.Width(1),
.ResetValue(0)
.clk_i,
.rst_ni,
.d(diff_pi),
.q(diff_pd)
prim_flop_2sync #(
.Width(1),
.ResetValue(1)
.clk_i,
.rst_ni,
.d(diff_ni),
.q(diff_nd)
assign diff_p_edge = diff_pq ^ diff_pd;
assign diff_n_edge = diff_nq ^ diff_nd;
assign diff_check_ok = diff_pd ^ diff_nd;
assign level = diff_pd;
assign level_o = level_d;
assign event_o = rise_o | fall_o;
always_comb begin : p_diff_fsm
state_d = state_q;
level_d = level_q;
rise_o = 1'b0;
fall_o = 1'b0;
sigint_o = 1'b0;
unique case (state_q)
IsStd: begin
if (diff_check_ok) begin
level_d = level;
if (diff_p_edge && diff_n_edge) begin
if (level) begin
rise_o = 1'b1;
end else begin
fall_o = 1'b1;
end else begin
if (diff_p_edge || diff_n_edge) begin
state_d = IsSkewed;
end else begin
state_d = SigInt;
sigint_o = 1'b1;
IsSkewed: begin
if (diff_check_ok) begin
state_d = IsStd;
level_d = level;
if (level) rise_o = 1'b1;
else fall_o = 1'b1;
end else begin
state_d = SigInt;
sigint_o = 1'b1;
SigInt: begin
sigint_o = 1'b1;
if (diff_check_ok) begin
state_d = IsStd;
sigint_o = 1'b0;
default : ;
always_ff @(posedge clk_i or negedge rst_ni) begin : p_sync_reg
if (!rst_ni) begin
state_q <= IsStd;
diff_pq <= 1'b0;
diff_nq <= 1'b1;
level_q <= 1'b0;
end else begin
state_q <= state_d;
diff_pq <= diff_pd;
diff_nq <= diff_nd;
level_q <= level_d;
if (AsyncOn) begin : gen_async_assert
// | |
};
if (tlul_pkg::ArbiterImpl == "PPC") begin : gen_arb_ppc
prim_arbiter_ppc #(
.N (M),
.DW ($bits(tlul_pkg::tl_h2d_t))
.clk_i,
.rst_ni,
.req_i ( hrequest ),
.data_i ( hreq_fifo_o ),
.gnt_o ( hgrant ),
.idx_o ( ),
.valid_o ( arb_valid ),
.data_o ( arb_data ),
.ready_i ( arb_ready )
end else if (tlul_pkg::ArbiterImpl == "BINTREE") begin : gen_tree_arb
end else if (Impl == ImplXilinx) begin : gen_xilinx
.clk_i,
.en_i,
.test_en_i,
.clk_o
end else begin : gen_failure
end else if (Impl == ImplXilinx) begin : gen_pad_xilinx
prim_xilinx_pad_wrapper #(
.AttrDw(AttrDw)
.inout_io,
.in_o,
.out_i,
.oe_i,
.attr_i
end else begin : gen_failure