From 75a80763095d1767602aa8547684b60600404edc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 8 Nov 2023 12:56:22 +0000 Subject: [PATCH 01/90] Update rv32v instructions to RVV v1.0 --- src/isa/rv32v_instr.sv | 449 +++++++++++++++++++++++------------------ 1 file changed, 250 insertions(+), 199 deletions(-) diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index ee0f14f4..015c51aa 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -15,138 +15,230 @@ * limitations under the License. */ -// Vector CSR access instruction -`DEFINE_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) -`DEFINE_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) +// 6. Configuration-Setting Instructions +`DEFINE_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) +`DEFINE_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV) +`DEFINE_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) -// Vector integer arithmetic instruction -`DEFINE_VA_INSTR(VADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VRSUB, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VWADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VWSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) -`DEFINE_VA_INSTR(VMADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM, VV, VX, VI}) -`DEFINE_VA_INSTR(VSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM}) -`DEFINE_VA_INSTR(VMSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VV, VX}) -`DEFINE_VA_INSTR(VAND, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VOR, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VXOR, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSLL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VNSRL, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}) -`DEFINE_VA_INSTR(VNSRA, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}) -`DEFINE_VA_INSTR(VMSEQ, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSNE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSLTU, VA_FORMAT, COMPARE, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMSLT, VA_FORMAT, COMPARE, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMSLEU, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSLE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSGTU, VA_FORMAT, COMPARE, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VMSGT, VA_FORMAT, COMPARE, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VMINU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMAXU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMULH, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMULHU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMULHSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VDIVU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VREMU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VREM, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMULU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMULSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACCU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACCSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACCUS, VA_FORMAT, ARITHMETIC, RVV, {VX}) -/* Quad widening is not yet supported -`DEFINE_VA_INSTR(VQMACCU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VQMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VQMACCSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VQMACCUS, VA_FORMAT, ARITHMETIC, RVV, {VX}) -*/ -`DEFINE_VA_INSTR(VMERGE, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) -`DEFINE_VA_INSTR(VMV, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +// 7. Vector Loads and Stores +`DEFINE_VA_INSTR(VLE8_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLE16_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLE32_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLE64_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSE8_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSE16_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSE32_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSE64_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLM_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSM_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLSE8_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSE16_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSE32_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSE64_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSSE8_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSE16_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSE32_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSE64_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLUXEI8_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLUXEI16_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLUXEI32_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLUXEI64_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXEI8_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXEI16_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXEI32_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXEI64_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSUXEI8_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSUXEI16_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSUXEI32_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSUXEI64_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXEI8_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXEI16_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXEI32_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXEI64_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLE8FF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLE16FF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLE32FF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLE64FF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSEGE8_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSEGE16_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSEGE32_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSEGE64_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSSEGE8_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSEGE16_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSEGE32_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSEGE64_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLSSEGE8_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSSEGE16_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSSEGE32_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSSEGE64_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSSSEGE8_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSSEGE16_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSSEGE32_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSSSEGE64_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLUXSEGEI8_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLUXSEGEI16_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLUXSEGEI32_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLUXSEGEI64_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXSEGEI8_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXSEGEI16_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXSEGEI32_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXSEGEI64_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSUXSEGEI8_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSUXSEGEI16_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSUXSEGEI32_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSUXSEGEI64_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXSEGEI8_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXSEGEI16_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXSEGEI32_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXSEGEI64_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLRE8_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLRE16_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLRE32_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLRE64_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSR8_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSR16_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSR32_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSR64_V, VS_FORMAT, STORE, RVV) -// Vector Fixed-Point Arithmetic Instructions -`DEFINE_VA_INSTR(VSADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VSSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VAADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VAADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VASUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VASUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VSSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VNCLIPU, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}) -`DEFINE_VA_INSTR(VNCLIP, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}) +// 11. Vector Integer Arithmetic Instructions +`DEFINE_VA_INSTR(VADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VRSUB, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) +`DEFINE_VA_INSTR(VWADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_VA_INSTR(VWSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_VA_INSTR(VWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_VA_INSTR(VWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_VA_INSTR(VZEXT_VF2, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VZEXT_VF4, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VZEXT_VF8, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VSEXT_VF2, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VSEXT_VF4, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VSEXT_VF8, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) +`DEFINE_VA_INSTR(VMADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM, VV, VX, VI}) +`DEFINE_VA_INSTR(VSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM}) +`DEFINE_VA_INSTR(VMSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VV, VX}) +`DEFINE_VA_INSTR(VAND, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VXOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSLL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VNSRL, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}) +`DEFINE_VA_INSTR(VNSRA, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}) +`DEFINE_VA_INSTR(VMSEQ, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VMSNE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VMSLTU, VA_FORMAT, COMPARE, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMSLT, VA_FORMAT, COMPARE, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMSLEU, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VMSLE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VMSGTU, VA_FORMAT, COMPARE, RVV, {VX, VI}) +`DEFINE_VA_INSTR(VMSGT, VA_FORMAT, COMPARE, RVV, {VX, VI}) +`DEFINE_VA_INSTR(VMINU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMAXU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMULH, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMULHU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMULHSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VDIVU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VREMU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VREM, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMULU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMULSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMACCU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMACCSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VWMACCUS, VA_FORMAT, ARITHMETIC, RVV, {VX}) +`DEFINE_VA_INSTR(VMERGE, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) +`DEFINE_VA_INSTR(VMV_V_V, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VMV_V_X, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VMV_V_I, VA_FORMAT, ARITHMETIC, RVV) -// Vector Floating-Point Instructions -`DEFINE_VA_INSTR(VFADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFRSUB, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VFMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFRDIV, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VFWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSQRT_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSGNJ, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSGNJN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSGNJX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFEQ, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFNE, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFLT, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFLE, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFGT, VA_FORMAT, COMPARE, RVV, {VF}) -`DEFINE_VA_INSTR(VMFGE, VA_FORMAT, COMPARE, RVV, {VF}) -`DEFINE_VA_INSTR(VFCLASS_V,VS2_FORMAT, COMPARE, RVV) -`DEFINE_VA_INSTR(VFMERGE, VA_FORMAT, ARITHMETIC, RVV, {VFM}) -`DEFINE_VA_INSTR(VFMV, VA_FORMAT, ARITHMETIC, RVV, {VF}) +// 12. Vector Fixed-Point Arithmetic Instructions +`DEFINE_VA_INSTR(VSADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VSSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VAADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VAADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VASUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VASUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VSMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_VA_INSTR(VSSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VSSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VNCLIPU, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}) +`DEFINE_VA_INSTR(VNCLIP, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}) -// Vector conversion instructions -`DEFINE_VA_INSTR(VFCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_F_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_F_XU_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_F_X_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_ROD_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) +// 13. Vector Floating-Point Instructions +`DEFINE_VA_INSTR(VFADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFRSUB, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_VA_INSTR(VFWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF, WV, WF}) +`DEFINE_VA_INSTR(VFWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF, WV, WF}) +`DEFINE_VA_INSTR(VFMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFRDIV, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_VA_INSTR(VFWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFNMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFWNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFWMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFWNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFSQRT_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFRSQRT7_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFREC7_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFSGNJ, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFSGNJN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VFSGNJX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VMFEQ, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VMFNE, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VMFLT, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VMFLE, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_VA_INSTR(VMFGT, VA_FORMAT, COMPARE, RVV, {VF}) +`DEFINE_VA_INSTR(VMFGE, VA_FORMAT, COMPARE, RVV, {VF}) +`DEFINE_VA_INSTR(VFCLASS_V, VS2_FORMAT, COMPARE, RVV) +`DEFINE_VA_INSTR(VFMERGE, VA_FORMAT, ARITHMETIC, RVV, {VFM}) +`DEFINE_VA_INSTR(VFMV_V_F, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFCVT_RTZ_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFCVT_RTZ_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_RTZ_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_RTZ_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWCVT_F_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_RTZ_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_RTZ_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_F_XU_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_F_X_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFNCVT_ROD_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) -// Vector reduction instruction +// 14. Vector Reduction Instructions `DEFINE_VA_INSTR(VREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VREDMAXU_VS, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VREDMAX_VS, VA_FORMAT, ARITHMETIC, RVV) @@ -158,22 +250,22 @@ `DEFINE_VA_INSTR(VWREDSUMU_VS, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VWREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFREDOSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFREDUSUM_VS, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFREDMAX_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFREDMIN_VS, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFWREDOSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFWREDUSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -// Vector mask instruction -`DEFINE_VA_INSTR(VMAND_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMNAND_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMANDNOT_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMXOR_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMOR_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMNOR_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMORNOT_MM, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMXNOR_MM, VA_FORMAT, ARITHMETIC, RVV) - -`DEFINE_VA_INSTR(VPOPC_M, VS2_FORMAT, ARITHMETIC, RVV) +// 15. Vector Mask Instructions +`DEFINE_VA_INSTR(VMAND_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMNAND_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMANDN_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMXOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMNOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMORN_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VMXNOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_VA_INSTR(VCPOP_M, VS2_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFIRST_M, VS2_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VMSBF_M, VS2_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VMSIF_M, VS2_FORMAT, ARITHMETIC, RVV) @@ -181,62 +273,21 @@ `DEFINE_VA_INSTR(VIOTA_M, VS2_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VID_V, VS2_FORMAT, ARITHMETIC, RVV) -// Vector permutation instruction -`DEFINE_VA_INSTR(VMV_X_S, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV_S_X, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMV_F_S, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMV_S_F, VA_FORMAT, ARITHMETIC, RVV) - -`DEFINE_VA_INSTR(VSLIDEUP, VA_FORMAT, ARITHMETIC, RVV, {VI,VX}) -`DEFINE_VA_INSTR(VSLIDEDOWN, VA_FORMAT, ARITHMETIC, RVV, {VI,VX}) -`DEFINE_VA_INSTR(VSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VX}) -`DEFINE_VA_INSTR(VSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VX}) -`DEFINE_VA_INSTR(VRGATHER, VA_FORMAT, ARITHMETIC, RVV, {VV,VX,VI}) -`DEFINE_VA_INSTR(VCOMPRESS, VA_FORMAT, ARITHMETIC, RVV, {VM}) - -`DEFINE_VA_INSTR(VMV1R_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV2R_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV4R_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV8R_V, VS2_FORMAT, ARITHMETIC, RVV) - -// ------------------------------------------------------------------------- -// Section 7. Vector Loads and Stores -// ------------------------------------------------------------------------- -// Section 7.4 - Vector Unit-Stride Instructions -`DEFINE_VA_INSTR(VLE_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSE_V, VS_FORMAT, STORE, RVV) -// Section 7.5 - Vector Strided Instructions -`DEFINE_VA_INSTR(VLSE_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSE_V, VSS_FORMAT, STORE, RVV) -// Section 7.6 - Vector Indexed Instructions -`DEFINE_VA_INSTR(VLXEI_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSXEI_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXEI_V, VSX_FORMAT, STORE, RVV) -// Section 7.7 - Vector Unit-Stride Fault-Only-First Loads -`DEFINE_VA_INSTR(VLEFF_V, VL_FORMAT, LOAD, RVV) -// Section 7.8 - Vector Load/Store Segment Instructions (Zvlsseg) -// 7.8.1. Vector Unit Strided Segment Loads and Stores -`DEFINE_VA_INSTR(VLSEGE_V, VL_FORMAT, LOAD, RVV, {}, "zvlsseg") -`DEFINE_VA_INSTR(VSSEGE_V, VS_FORMAT, STORE, RVV, {}, "zvlsseg") -`DEFINE_VA_INSTR(VLSEGEFF_V, VL_FORMAT, LOAD, RVV, {}, "zvlsseg") -// 7.8.2. Vector Strided Segment Loads and Stores -`DEFINE_VA_INSTR(VLSSEGE_V, VLS_FORMAT, LOAD, RVV, {}, "zvlsseg") -`DEFINE_VA_INSTR(VSSSEGE_V, VSS_FORMAT, STORE, RVV, {}, "zvlsseg") -// 7.8.3. Vector Indexed Segment Loads and Stores -`DEFINE_VA_INSTR(VLXSEGEI_V, VLX_FORMAT, LOAD, RVV, {}, "zvlsseg") -`DEFINE_VA_INSTR(VSXSEGEI_V, VSX_FORMAT, STORE, RVV, {}, "zvlsseg") -`DEFINE_VA_INSTR(VSUXSEGEI_V, VSX_FORMAT, STORE, RVV, {}, "zvlsseg") - -// ------------------------------------------------------------------------- -// Section 8. Vector AMO Operations (Zvamo) -// ------------------------------------------------------------------------- -// EEW vector AMOs -`DEFINE_VA_INSTR(VAMOSWAPE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOADDE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOXORE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOANDE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOORE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOMINE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOMAXE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOMINUE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") -`DEFINE_VA_INSTR(VAMOMAXUE_V, VAMO_FORMAT, AMO, RVV, {}, "zvamo") +// 16. Vector Permutation Instructions +`DEFINE_VA_INSTR(VMV_X_S, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VMV_S_X, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFMV_F_S, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VFMV_S_F, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VSLIDEUP, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) +`DEFINE_VA_INSTR(VSLIDEDOWN, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) +`DEFINE_VA_INSTR(VSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VX}) +`DEFINE_VA_INSTR(VSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VX}) +`DEFINE_VA_INSTR(VFSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_VA_INSTR(VFSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_VA_INSTR(VRGATHER, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VRGATHEREI16, VA_FORMAT, ARITHMETIC, RVV, {VV}) +`DEFINE_VA_INSTR(VCOMPRESS, VA_FORMAT, ARITHMETIC, RVV, {VM}) +`DEFINE_VA_INSTR(VMV1R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VMV2R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VMV4R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_VA_INSTR(VMV8R_V, VS2_FORMAT, ARITHMETIC, RVV) From cb69c9dc03ca8a82ce2dbb3e45cf911af506aca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 8 Nov 2023 13:20:41 +0000 Subject: [PATCH 02/90] Update riscv instruction category type with new RVV instructions --- src/riscv_instr_pkg.sv | 151 ++++++++++++++++++++++++++++++----------- 1 file changed, 113 insertions(+), 38 deletions(-) diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 96207667..7670bc8d 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -443,9 +443,88 @@ package riscv_instr_pkg; AMOMAX_D, AMOMINU_D, AMOMAXU_D, - // Vector instructions - VSETVL, + // RVV VSETVLI, + VSETIVLI, + VSETVL, + VLE8_V, + VLE16_V, + VLE32_V, + VLE64_V, + VSE8_V, + VSE16_V, + VSE32_V, + VSE64_V, + VLM_V, + VSM_V, + VLSE8_V, + VLSE16_V, + VLSE32_V, + VLSE64_V, + VSSE8_V, + VSSE16_V, + VSSE32_V, + VSSE64_V, + VLUXEI8_V, + VLUXEI16_V, + VLUXEI32_V, + VLUXEI64_V, + VLOXEI8_V, + VLOXEI16_V, + VLOXEI32_V, + VLOXEI64_V, + VSUXEI8_V, + VSUXEI16_V, + VSUXEI32_V, + VSUXEI64_V, + VSOXEI8_V, + VSOXEI16_V, + VSOXEI32_V, + VSOXEI64_V, + VLE8FF_V, + VLE16FF_V, + VLE32FF_V, + VLE64FF_V, + VLSEGE8_V, + VLSEGE16_V, + VLSEGE32_V, + VLSEGE64_V, + VSSEGE8_V, + VSSEGE16_V, + VSSEGE32_V, + VSSEGE64_V, + VLSSEGE8_V, + VLSSEGE16_V, + VLSSEGE32_V, + VLSSEGE64_V, + VSSSEGE8_V, + VSSSEGE16_V, + VSSSEGE32_V, + VSSSEGE64_V, + VLUXSEGEI8_V, + VLUXSEGEI16_V, + VLUXSEGEI32_V, + VLUXSEGEI64_V, + VLOXSEGEI8_V, + VLOXSEGEI16_V, + VLOXSEGEI32_V, + VLOXSEGEI64_V, + VSUXSEGEI8_V, + VSUXSEGEI16_V, + VSUXSEGEI32_V, + VSUXSEGEI64_V, + VSOXSEGEI8_V, + VSOXSEGEI16_V, + VSOXSEGEI32_V, + VSOXSEGEI64_V, + VLRE8_V, + VLRE16_V, + VLRE32_V, + VLRE64_V, + VSR8_V, + VSR16_V, + VSR32_V, + VSR64_V, VADD, VSUB, VRSUB, @@ -453,6 +532,12 @@ package riscv_instr_pkg; VWSUBU, VWADD, VWSUB, + VZEXT_VF2, + VZEXT_VF4, + VZEXT_VF8, + VSEXT_VF2, + VSEXT_VF4, + VSEXT_VF8, VADC, VMADC, VSBC, @@ -496,12 +581,10 @@ package riscv_instr_pkg; VWMACC, VWMACCSU, VWMACCUS, - //VQMACCU, - //VQMACC, - //VQMACCSU, - //VQMACCUS, VMERGE, - VMV, + VMV_V_V, + VMV_V_X, + VMV_V_I, VSADDU, VSADD, VSSUBU, @@ -510,14 +593,16 @@ package riscv_instr_pkg; VAADD, VASUBU, VASUB, + VSMUL, VSSRL, VSSRA, VNCLIPU, VNCLIP, - // 14. Vector Floating-Point Instructions VFADD, VFSUB, VFRSUB, + VFWADD, + VFWSUB, VFMUL, VFDIV, VFRDIV, @@ -535,6 +620,8 @@ package riscv_instr_pkg; VFWMSAC, VFWNMSAC, VFSQRT_V, + VFRSQRT7_V, + VFREC7_V, VFMIN, VFMAX, VFSGNJ, @@ -548,23 +635,28 @@ package riscv_instr_pkg; VMFGE, VFCLASS_V, VFMERGE, - VFMV, + VFMV_V_F, VFCVT_XU_F_V, VFCVT_X_F_V, + VFCVT_RTZ_XU_F_V, + VFCVT_RTZ_X_F_V, VFCVT_F_XU_V, VFCVT_F_X_V, VFWCVT_XU_F_V, VFWCVT_X_F_V, + VFWCVT_RTZ_XU_F_V, + VFWCVT_RTZ_X_F_V, VFWCVT_F_XU_V, VFWCVT_F_X_V, VFWCVT_F_F_V, VFNCVT_XU_F_W, VFNCVT_X_F_W, + VFNCVT_RTZ_XU_F_W, + VFNCVT_RTZ_X_F_W, VFNCVT_F_XU_W, VFNCVT_F_X_W, VFNCVT_F_F_W, VFNCVT_ROD_F_F_W, - // 15. Vector reduction instruction VREDSUM_VS, VREDMAXU_VS, VREDMAX_VS, @@ -576,27 +668,26 @@ package riscv_instr_pkg; VWREDSUMU_VS, VWREDSUM_VS, VFREDOSUM_VS, - VFREDSUM_VS, + VFREDUSUM_VS, VFREDMAX_VS, + VFREDMIN_VS, VFWREDOSUM_VS, - VFWREDSUM_VS, - // Vector mask instruction + VFWREDUSUM_VS, VMAND_MM, VMNAND_MM, - VMANDNOT_MM, + VMANDN_MM, VMXOR_MM, VMOR_MM, VMNOR_MM, - VMORNOT_MM, + VMORN_MM, VMXNOR_MM, - VPOPC_M, + VCPOP_M, VFIRST_M, VMSBF_M, VMSIF_M, VMSOF_M, VIOTA_M, VID_V, - // Vector permutation instruction VMV_X_S, VMV_S_X, VFMV_F_S, @@ -605,30 +696,15 @@ package riscv_instr_pkg; VSLIDEDOWN, VSLIDE1UP, VSLIDE1DOWN, + VFSLIDE1UP, + VFSLIDE1DOWN, VRGATHER, + VRGATHEREI16, VCOMPRESS, VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V, - // Vector load/store instruction - VLE_V, - VSE_V, - VLSE_V, - VSSE_V, - VLXEI_V, - VSXEI_V, - VSUXEI_V, - VLEFF_V, - // Segmented load/store instruction - VLSEGE_V, - VSSEGE_V, - VLSEGEFF_V, - VLSSEGE_V, - VSSSEGE_V, - VLXSEGEI_V, - VSXSEGEI_V, - VSUXSEGEI_V, // Vector AMO instruction // EEW vector AMOs VAMOSWAPE_V, @@ -716,6 +792,7 @@ package riscv_instr_pkg; WV, WI, WX, + WF, VVM, VIM, VXM, @@ -740,7 +817,6 @@ package riscv_instr_pkg; CHANGELEVEL, TRAP, INTERRUPT, - `VECTOR_INCLUDE("riscv_instr_pkg_inc_riscv_instr_category_t.sv") AMO // (last one) } riscv_instr_category_t; @@ -1283,6 +1359,7 @@ package riscv_instr_pkg; bit [XLEN-2:7] reserved; int vediv; int vsew; + bit fractional_lmul; int vlmul; } vtype_t; @@ -1307,8 +1384,6 @@ package riscv_instr_pkg; ZB_TMP // for uncategorized instructions } b_ext_group_t; - `VECTOR_INCLUDE("riscv_instr_pkg_inc_variables.sv") - typedef bit [15:0] program_id_t; // xSTATUS bit mask From d63e2f70a24a5d3769e0dd053af163837f50cc3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 8 Nov 2023 13:32:31 +0000 Subject: [PATCH 03/90] Update vector CSR addresses and types --- src/riscv_instr_pkg.sv | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 7670bc8d..87569c45 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -1169,8 +1169,9 @@ package riscv_instr_pkg; DSCRATCH0 = 'h7B2, // Debug scratch register DSCRATCH1 = 'h7B3, // Debug scratch register (last one) VSTART = 'h008, // Vector start position - VXSTAT = 'h009, // Fixed point saturate flag + VXSAT = 'h009, // Fixed point saturate flag VXRM = 'h00A, // Fixed point rounding mode + VCSR = 'h00F, // Vector control and status register VL = 'hC20, // Vector length VTYPE = 'hC21, // Vector data type register VLENB = 'hC22 // VLEN/8 (vector register length in bytes) @@ -1355,9 +1356,8 @@ package riscv_instr_pkg; typedef struct packed { bit ill; - bit fractional_lmul; - bit [XLEN-2:7] reserved; - int vediv; + bit vma; + bit vta; int vsew; bit fractional_lmul; int vlmul; @@ -1370,6 +1370,11 @@ package riscv_instr_pkg; RoundToOdd } vxrm_t; + typedef struct packed { + vxrm_t vxrm; + bit vxsat; + } vcsr_t; + typedef enum int { ZBA, ZBB, From 5a146c9a489ff3eafae9cb2537e58517b091a00b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 11:16:34 +0000 Subject: [PATCH 04/90] Remove reference to obsolete vediv vector configuration --- src/riscv_asm_program_gen.sv | 43 +++++++++++++++++------------------- src/riscv_vector_cfg.sv | 9 ++------ 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index 4c969241..0ae268b6 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -543,19 +543,21 @@ class riscv_asm_program_gen extends uvm_object; // Initialize vector general purpose registers virtual function void init_vec_gpr(); - int SEW; - int LMUL; - int EDIV = 1; - int len = (ELEN <= XLEN) ? ELEN : XLEN; - int num_elements = VLEN / len; + int SEW = (ELEN <= XLEN) ? ELEN : XLEN; + int LMUL = 1; + int num_elements = VLEN / SEW; + + // Do not init vector registers if RVV is not enabled if (!(RVV inside {supported_isa})) return; - LMUL = 1; - SEW = (ELEN <= XLEN) ? ELEN : XLEN; - instr_stream.push_back($sformatf("li x%0d, %0d", cfg.gpr[1], cfg.vector_cfg.vl)); - instr_stream.push_back($sformatf("%svsetvli x%0d, x%0d, e%0d, m%0d, d%0d", - indent, cfg.gpr[0], cfg.gpr[1], SEW, LMUL, EDIV)); + + // Create RVV init label instr_stream.push_back("vec_reg_init:"); + // Set vector configuration + instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], num_elements)); + instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, m%0d, ta, ma", + indent, cfg.gpr[0], cfg.gpr[1], SEW, LMUL)); + // Vector registers will be initialized using one of the following three methods case (cfg.vreg_init_method) SAME_VALUES_ALL_ELEMS: begin @@ -566,16 +568,10 @@ class riscv_asm_program_gen extends uvm_object; RANDOM_VALUES_VMV: begin for (int v = 0; v < NUM_VEC_GPR; v++) begin for (int e = 0; e < num_elements; e++) begin - if (e > 0) instr_stream.push_back($sformatf("%0svmv.v.v v0, v%0d", indent, v)); instr_stream.push_back($sformatf("%0sli x%0d, 0x%0x", indent, cfg.gpr[0], $urandom_range(0, 2 ** SEW - 1))); - if (v > 0) begin - instr_stream.push_back($sformatf("%0svslide1up.vx v%0d, v0, x%0d", - indent, v, cfg.gpr[0])); - end else begin - instr_stream.push_back($sformatf("%0svslide1up.vx v%0d, v1, x%0d", - indent, v, cfg.gpr[0])); - end + instr_stream.push_back($sformatf("%0svslide1down.vx v%0d, v%0d, x%0d", + indent, v, v, cfg.gpr[0])); end end end @@ -590,8 +586,8 @@ class riscv_asm_program_gen extends uvm_object; for (int v = 0; v < NUM_VEC_GPR; v++) begin int region = $urandom_range(0, valid_mem_region.size()-1); - instr_stream.push_back($sformatf("%0sla t0, %0s", indent, valid_mem_region[region].name)); - instr_stream.push_back($sformatf("%0svle.v v%0d, (t0)", indent, v)); + instr_stream.push_back($sformatf("%0sla x%0s, %0s", indent, cfg.gpr[0], valid_mem_region[region].name)); + instr_stream.push_back($sformatf("%0svle%0s.v v%0d, (x%0s)", indent, SEW, v, cfg.gpr[0])); end end endcase @@ -1627,19 +1623,20 @@ class riscv_asm_program_gen extends uvm_object; instr_stream.push_back({indent, $sformatf("csrwi vxsat, %0d", cfg.vector_cfg.vxsat)}); instr_stream.push_back({indent, $sformatf("csrwi vxrm, %0d", cfg.vector_cfg.vxrm)}); init_vec_gpr(); // GPR init uses a temporary SEW/LMUL setting before the final value set below. - instr_stream.push_back($sformatf("li x%0d, %0d", cfg.gpr[1], cfg.vector_cfg.vl)); + instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], cfg.vector_cfg.vl)); if ((cfg.vector_cfg.vtype.vlmul > 1) && (cfg.vector_cfg.vtype.fractional_lmul)) begin lmul = $sformatf("mf%0d", cfg.vector_cfg.vtype.vlmul); end else begin lmul = $sformatf("m%0d", cfg.vector_cfg.vtype.vlmul); end - instr_stream.push_back($sformatf("%svsetvli x%0d, x%0d, e%0d, %0s, d%0d", + instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, %0s, %0s, %0s", indent, cfg.gpr[0], cfg.gpr[1], cfg.vector_cfg.vtype.vsew, lmul, - cfg.vector_cfg.vtype.vediv)); + cfg.vector_cfg.vtype.vta ? "ta" : "tu", + cfg.vector_cfg.vtype.vma ? "ma" : "mu")); endfunction endclass diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 7bda5504..4a860209 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -74,7 +74,6 @@ class riscv_vector_cfg extends uvm_object; constraint bringup_c { vstart == 0; vl == VLEN/vtype.vsew; - vtype.vediv == 1; } // For all widening instructions, the destination element width must be a supported element @@ -103,14 +102,10 @@ class riscv_vector_cfg extends uvm_object; enable_zvlsseg -> (vtype.vlmul < 8); } - constraint vdeiv_c { - vtype.vediv inside {1, 2, 4, 8}; - vtype.vediv <= (vtype.vsew / SELEN); - } - `uvm_object_utils_begin(riscv_vector_cfg) `uvm_field_int(vtype.ill, UVM_DEFAULT) - `uvm_field_int(vtype.vediv, UVM_DEFAULT) + `uvm_field_int(vtype.vma, UVM_DEFAULT) + `uvm_field_int(vtype.vta, UVM_DEFAULT) `uvm_field_int(vtype.vsew, UVM_DEFAULT) `uvm_field_int(vtype.vlmul, UVM_DEFAULT) `uvm_field_int(vtype.fractional_lmul, UVM_DEFAULT) From bd392e37c1059f9ef9c46f358432b95e0c650ccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 11:17:28 +0000 Subject: [PATCH 05/90] Fix vslideup instruction constraints --- src/isa/riscv_vector_instr.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 7dd76425..2225b760 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -151,8 +151,8 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Section 17.3 Vector Slide Instructions // The destination vector register group for vslideup cannot overlap the vector register // group of the source vector register group or the mask register - constraint vector_slide_c { - if (instr_name inside {VSLIDEUP, VSLIDE1UP, VSLIDEDOWN, VSLIDE1DOWN}) { + constraint vector_slideup_c { + if (instr_name inside {VSLIDEUP, VSLIDE1UP, VFSLIDE1UP}) { vd != vs2; vd != vs1; (vm == 0) -> (vd != 0); From 4346f5062e15ac3363855028df7585d6cad047ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 11:18:45 +0000 Subject: [PATCH 06/90] Change to new VMV instruction names --- src/isa/riscv_vector_instr.sv | 97 +++++++++++++++++------------------ src/riscv_instr_stream.sv | 2 +- 2 files changed, 47 insertions(+), 52 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 2225b760..5d19fa42 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -236,8 +236,10 @@ class riscv_vector_instr extends riscv_floating_point_instr; constraint vector_mask_disable_c { // (vm=0) is reserved for below ops - if (instr_name inside {VMV, VFMV, VCOMPRESS, VFMV_F_S, VFMV_S_F, VMV_X_S, VMV_S_X, - VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V}) { + if (instr_name inside {VMV_V_V, VMV_V_X, VMV_V_I, VFMV_V_F, + VFMV_F_S, VFMV_S_F, VMV_X_S, VMV_S_X, + VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V, + VCOMPRESS}) { vm == 1'b1; } } @@ -361,64 +363,57 @@ class riscv_vector_instr extends riscv_floating_point_instr; VS2_FORMAT: begin if (instr_name == VID_V) begin asm_str = $sformatf("vid.v %s", vd.name()); - end else if (instr_name inside {VPOPC_M, VFIRST_M}) begin + end else if (instr_name inside {VCPOP_M, VFIRST_M}) begin asm_str = $sformatf("%0s %0s,%0s", get_instr_name(), rd.name(), vs2.name()); end else begin asm_str = $sformatf("%0s %0s,%0s", get_instr_name(), vd.name(), vs2.name()); end end VA_FORMAT: begin - if (instr_name == VMV) begin - case (va_variant) - VV: asm_str = $sformatf("vmv.v.v %s,%s", vd.name(), vs1.name()); - VX: asm_str = $sformatf("vmv.v.x %s,%s", vd.name(), rs1.name()); - VI: asm_str = $sformatf("vmv.v.i %s,%s", vd.name(), imm_str); - default: `uvm_info(`gfn, $sformatf("Unsupported va_variant %0s", va_variant), UVM_LOW) - endcase - end else if (instr_name == VFMV) begin - asm_str = $sformatf("vfmv.v.f %s,%s", vd.name(), fs1.name()); - end else if (instr_name == VMV_X_S) begin - asm_str = $sformatf("vmv.x.s %s,%s", rd.name(), vs2.name()); - end else if (instr_name == VMV_S_X) begin - asm_str = $sformatf("vmv.s.x %s,%s", vd.name(), rs1.name()); - end else if (instr_name == VFMV_F_S) begin - asm_str = $sformatf("vfmv.f.s %s,%s", fd.name(), vs2.name()); - end else if (instr_name == VFMV_S_F) begin - asm_str = $sformatf("vfmv.s.f %s,%s", vd.name(), fs1.name()); - end else begin - if (!has_va_variant) begin - asm_str = $sformatf("%0s ", get_instr_name()); - asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), vs1.name())}; - end else begin - asm_str = $sformatf("%0s.%0s ", get_instr_name(), va_variant.name()); - asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); - case (va_variant) inside - WV, VV, VVM, VM: begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), vs1.name())}; - end - WI, VI, VIM: begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), imm_str)}; - end - VF, VFM: begin - if (instr_name inside {VFMADD, VFNMADD, VFMACC, VFNMACC, VFNMSUB, VFWNMSAC, - VFWMACC, VFMSUB, VFMSAC, VFNMSAC, VFWNMACC, VFWMSAC}) begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), fs1.name(), vs2.name())}; - end else begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), fs1.name())}; + case (instr_name) + VMV_V_V: asm_str = $sformatf("vmv.v.v %s,%s", vd.name(), vs1.name()); + VMV_V_X: asm_str = $sformatf("vmv.v.x %s,%s", vd.name(), rs1.name()); + VMV_V_I: asm_str = $sformatf("vmv.v.i %s,%s", vd.name(), imm_str); + VFMV_V_F: asm_str = $sformatf("vfmv.v.f %s,%s", vd.name(), fs1.name()); + VMV_X_S: asm_str = $sformatf("vmv.x.s %s,%s", rd.name(), vs2.name()); + VMV_S_X: asm_str = $sformatf("vmv.s.x %s,%s", vd.name(), rs1.name()); + VFMV_F_S: asm_str = $sformatf("vfmv.f.s %s,%s", fd.name(), vs2.name()); + VFMV_S_F: asm_str = $sformatf("vfmv.s.f %s,%s", vd.name(), fs1.name()); + default: begin + if (!has_va_variant) begin + asm_str = $sformatf("%0s ", get_instr_name()); + asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), vs1.name())}; + end else begin + asm_str = $sformatf("%0s.%0s ", get_instr_name(), va_variant.name()); + asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); + case (va_variant) inside + WV, VV, VVM, VM: begin + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), vs1.name())}; + end + WI, VI, VIM: begin + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), imm_str)}; end - end - WX, VX, VXM: begin - if (instr_name inside {VMADD, VNMSUB, VMACC, VNMSAC, VWMACCSU, VWMACCU, - VWMACCUS, VWMACC}) begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), rs1.name(), vs2.name())}; - end else begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), rs1.name())}; + VF, VFM: begin + if (instr_name inside {VFMADD, VFNMADD, VFMACC, VFNMACC, VFNMSUB, VFWNMSAC, + VFWMACC, VFMSUB, VFMSAC, VFNMSAC, VFWNMACC, VFWMSAC}) begin + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), fs1.name(), vs2.name())}; + end else begin + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), fs1.name())}; + end end - end - endcase + WX, VX, VXM: begin + if (instr_name inside {VMADD, VNMSUB, VMACC, VNMSAC, VWMACCSU, VWMACCU, + VWMACCUS, VWMACC}) begin + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), rs1.name(), vs2.name())}; + end else begin + asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), rs1.name())}; + end + end + endcase + end end - end + endcase end VL_FORMAT: begin if (sub_extension == "zvlsseg") begin diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 9e980da8..6b8bc843 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -299,7 +299,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; function void add_init_vector_gpr_instr(riscv_vreg_t gpr, bit [XLEN-1:0] val); riscv_vector_instr instr; - $cast(instr, riscv_instr::get_instr(VMV)); + $cast(instr, riscv_instr::get_instr(VMV_V_V)); instr.m_cfg = cfg; instr.avoid_reserved_vregs_c.constraint_mode(0); `DV_CHECK_RANDOMIZE_WITH_FATAL(instr, From c48ff5017b32fe2a8f08994b967b6bed94d6301e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 11:21:14 +0000 Subject: [PATCH 07/90] Clean up masked vector instruction ASM --- src/isa/riscv_vector_instr.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 5d19fa42..e7329d61 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -542,9 +542,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; return ""; end else begin if (instr_name inside {VMERGE, VFMERGE, VADC, VSBC, VMADC, VMSBC}) begin - return ",v0"; + return ", v0"; end else begin - return ",v0.t"; + return ", v0.t"; end end endfunction From d3525971c3acba1b4c751c416d8a8586aceb74b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 12:29:40 +0000 Subject: [PATCH 08/90] Constrain set first vector instructions --- src/isa/riscv_vector_instr.sv | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index e7329d61..5bc1cfca 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -252,6 +252,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } + // 14.5, 14.6, 14.7. The destination register cannot overlap the + // source register and, if masked, cannot overlap the mask register ('v0'). + constraint vector_set_first_c { + if (instr_name inside {VMSBF_M, VMSIF_M, VMSOF_M}) { + vd != vs2; + (vm == 0) -> vd != 0; + } + } + constraint disable_floating_point_varaint_c { if (!m_cfg.vector_cfg.vec_fp) { va_variant != VF; From 40583369c3f4411a35c04f70bc27945bf6babea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 14:36:12 +0000 Subject: [PATCH 09/90] Constrain v[z|s]ext instructions --- src/isa/riscv_vector_instr.sv | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 5bc1cfca..9d19cd02 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -37,6 +37,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit is_narrowing_instr; bit is_quad_widening_instr; bit is_convert_instr; + int ext_widening_factor = 1; va_variant_t allowed_va_variants[$]; string sub_extension; rand bit [2:0] nfields; // Used by segmented load/store @@ -100,6 +101,24 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } + // 11.3. Vector Integer Extension + constraint integer_extension_c { + if (instr_name inside {VZEXT_VF2, VZEXT_VF4, VZEXT_VF8, + VSEXT_VF2, VSEXT_VF4, VSEXT_VF8}) { + // VD needs to be LMUL aligned + vd % m_cfg.vector_cfg.vtype.vlmul == 0; + if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor >= 1) { + // VS2 needs to be LMUL/ext_widening_factor aligned + vs2 % (m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor) == 0; + // VS2 can only overlap last ext_widening_factor'th of VD + !(vs2 inside {[vd : vd + ((m_cfg.vector_cfg.vtype.vlmul-1) * ext_widening_factor - 1)]}); + } else { + // If source has fractional LMUL, VD and VS2 cannot overlap + vs2 != vd; + } + } + } + // 12.3. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions constraint add_sub_with_carry_c { if (m_cfg.vector_cfg.vtype.vlmul > 1) { @@ -524,9 +543,8 @@ class riscv_vector_instr extends riscv_floating_point_instr; if ((name.substr(0, 1) == "VW") || (name.substr(0, 2) == "VFW")) begin is_widening_instr = 1'b1; end - if (name.substr(0, 2) == "VQW") begin - is_quad_widening_instr = 1'b1; - is_widening_instr = 1'b1; + if (uvm_is_match("V[SZ]EXT_VF[248]", name)) begin + ext_widening_factor = name.substr(name.len()-2, name.len()-1).atoi(); end if ((name.substr(0, 1) == "VN") || (name.substr(0, 2) == "VFN")) begin is_narrowing_instr = 1'b1; From cf7566bf106e17febc79cd59d722ccc4458c4adf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 14:36:46 +0000 Subject: [PATCH 10/90] Constrain vrgatherei16 instruction --- src/isa/riscv_vector_instr.sv | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 9d19cd02..e8fc962e 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -178,15 +178,22 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // Section 17.4: Vector Register Gather Instruction + // Section 16.4: Vector Register Gather Instruction // For any vrgather instruction, the destination vector register group cannot overlap // with the source vector register group + // The vrgatherei16.vv form uses SEW/LMUL for the data in vs2 but EEW=16 and + // EMUL = (16/SEW)*LMUL for the indices in vs1. constraint vector_gather_c { - if (instr_name == VRGATHER) { + if (instr_name inside {VRGATHER, VRGATHEREI16}) { vd != vs2; vd != vs1; (vm == 0) -> (vd != 0); } + if (instr_name == VRGATHEREI16) { + if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vsew == 8) { + vs1 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; + } + } } // Section 17.5: Vector compress instruction From 5b79faeb8f8eb44129fe8d632c20d54abe1f8436 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 14:40:19 +0000 Subject: [PATCH 11/90] Remove unused wd vector instr signal --- src/isa/riscv_vector_instr.sv | 1 - 1 file changed, 1 deletion(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index e8fc962e..b4bcdf29 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -25,7 +25,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; rand riscv_vreg_t vd; rand va_variant_t va_variant; rand bit vm; - rand bit wd; rand bit [10:0] eew; bit has_vd = 1'b1; bit has_vs1 = 1'b1; From 6380a36ba0d773c9478e9e56f9f5aef8dc8afc6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 14:52:46 +0000 Subject: [PATCH 12/90] Remove reference to vector quad widening instructions --- src/isa/riscv_vector_instr.sv | 4 ---- src/riscv_vector_cfg.sv | 13 ------------- 2 files changed, 17 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index b4bcdf29..fa655af4 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -34,7 +34,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit has_va_variant; bit is_widening_instr; bit is_narrowing_instr; - bit is_quad_widening_instr; bit is_convert_instr; int ext_widening_factor = 1; va_variant_t allowed_va_variants[$]; @@ -355,9 +354,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; (is_widening_instr || is_narrowing_instr)) begin return 1'b0; end - if (!cfg.vector_cfg.vec_quad_widening && is_quad_widening_instr) begin - return 1'b0; - end // TODO: Clean up this list, it's causing gcc compile error now if (instr_name inside {VWMACCSU, VMERGE, VFMERGE, VMADC, VMSBC}) begin return 1'b0; diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 4a860209..2041f1a6 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -37,15 +37,6 @@ class riscv_vector_cfg extends uvm_object; // Allow vector narrowing or widening instructions. rand bit vec_narrowing_widening; - // Allow vector quad-widening instructions. - rand bit vec_quad_widening; - - constraint vec_quad_widening_c { - (!vec_narrowing_widening) -> (!vec_quad_widening); - // FP requires at least 16 bits and quad-widening requires no more than ELEN/4 bits. - (ELEN < 64) -> (!(vec_fp && vec_quad_widening)); - } - rand bit allow_illegal_vec_instr; constraint allow_illegal_vec_instr_c {soft allow_illegal_vec_instr == 0;} @@ -84,9 +75,6 @@ class riscv_vector_cfg extends uvm_object; if (vec_narrowing_widening) { (vtype.vlmul < 8) || (vtype.fractional_lmul == 1'b1); } - if (vec_quad_widening) { - (vtype.vlmul < 4) || (vtype.fractional_lmul == 1'b1); - } } constraint vsew_c { @@ -95,7 +83,6 @@ class riscv_vector_cfg extends uvm_object; // TODO: Determine the legal range of floating point format if (vec_fp) {vtype.vsew inside {32};} if (vec_narrowing_widening) {vtype.vsew < ELEN;} - if (vec_quad_widening) {vtype.vsew < (ELEN >> 1);} } constraint vseg_c { From 648333afb9e9b8d7aaf7d29aaa0cd6eeab7e3470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 9 Nov 2023 14:55:43 +0000 Subject: [PATCH 13/90] Mark v[z|s]ext instructions unsupported on mismatching LMUL/SEW --- src/isa/riscv_vector_instr.sv | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index fa655af4..5ac4cb9a 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -367,6 +367,23 @@ class riscv_vector_instr extends riscv_floating_point_instr; return 1'b0; end end + if (instr_name inside {VZEXT_VF8, VSEXT_VF8}) begin + if (cfg.vector_cfg.vtype.vsew < 64 || cfg.vector_cfg.vtype.fractional_lmul) begin + return 1'b0; + end + end + if (instr_name inside {VZEXT_VF4, VSEXT_VF4}) begin + if (cfg.vector_cfg.vtype.vsew < 32 || + (cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul > 2)) begin + return 1'b0; + end + end + if (instr_name inside {VZEXT_VF2, VSEXT_VF2}) begin + if (cfg.vector_cfg.vtype.vsew < 16 || + (cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul > 4)) begin + return 1'b0; + end + end return 1'b1; endfunction From c54656499536ba4fc0cd296b06be5c78a2fd04d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 10 Nov 2023 06:55:41 +0000 Subject: [PATCH 14/90] Constrain VMADC and VMSBC instructions --- src/isa/riscv_vector_instr.sv | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 5ac4cb9a..126afa1e 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -252,20 +252,26 @@ class riscv_vector_instr extends riscv_floating_point_instr; } constraint vector_mask_enable_c { - // Below instruction is always masked + // Instructions that require vm=0 if (instr_name inside {VMERGE, VFMERGE, VADC, VSBC}) { vm == 1'b0; } + if (instr_name inside {VMADC, VMSBC} && va_variant inside {VVM, VXM, VIM}) { + vm == 1'b0; + } } constraint vector_mask_disable_c { - // (vm=0) is reserved for below ops + // Instructions that require vm=1 if (instr_name inside {VMV_V_V, VMV_V_X, VMV_V_I, VFMV_V_F, VFMV_F_S, VFMV_S_F, VMV_X_S, VMV_S_X, VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V, VCOMPRESS}) { vm == 1'b1; } + if (instr_name inside {VMADC, VMSBC} && va_variant inside {VV, VX, VI}) { + vm == 1'b1; + } } // 16.1. Vector Mask-Register Logical Instructions From 0789b00ff693b7d666b87f2825846445aac5381f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 10 Nov 2023 06:57:08 +0000 Subject: [PATCH 15/90] Print v0 on vector mask enabled only when required --- src/isa/riscv_vector_instr.sv | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 126afa1e..74432299 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -593,7 +593,8 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (vm) begin return ""; end else begin - if (instr_name inside {VMERGE, VFMERGE, VADC, VSBC, VMADC, VMSBC}) begin + if (instr_name inside {VMERGE, VFMERGE, VADC, VSBC, VMADC, VMSBC} && + va_variant inside {VVM, VXM, VIM, VFM}) begin return ", v0"; end else begin return ", v0.t"; From dd4868d9186d96fd83a57e252ac46c8b5af94c1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 10 Nov 2023 09:02:53 +0000 Subject: [PATCH 16/90] Update v0 overlap constraint for masked vector instr --- src/isa/riscv_vector_instr.sv | 44 ++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 74432299..e5f68edf 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -31,10 +31,12 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit has_vs2 = 1'b1; bit has_vs3 = 1'b1; bit has_vm = 1'b0; - bit has_va_variant; - bit is_widening_instr; - bit is_narrowing_instr; - bit is_convert_instr; + bit has_va_variant = 1'b0; + bit is_widening_instr = 1'b0; + bit is_narrowing_instr = 1'b0; + bit is_convert_instr = 1'b0; + bit is_reduction_instr = 1'b0; + bit is_mask_producing_instr = 1'b0; int ext_widening_factor = 1; va_variant_t allowed_va_variants[$]; string sub_extension; @@ -244,11 +246,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; /////////////////// Vector mask constraint /////////////////// - // Section 5.3 - // The destination vector register group for a masked vector instruction can only overlap - // the source mask register (v0) when LMUL=1 - constraint vmask_overlap_c { - (vm == 0) && (m_cfg.vector_cfg.vtype.vlmul > 1) -> (vd != 0); + // 5.3 Vector Masking + // The destination vector register group for a masked vector instruction cannot overlap + // the source mask register (v0), unless the destination vector register is being written + // with a mask value (e.g., compares) or the scalar result of a reduction. These + // instruction encodings are reserved. + constraint mask_v0_overlap_c { + if (!vm) { + !(group == COMPARE || is_mask_producing_instr || is_reduction_instr) -> (vd != 0); + } } constraint vector_mask_enable_c { @@ -338,12 +344,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // Some temporarily constraint to avoid illegal instruction - // TODO: Review these constraints - constraint temp_c { - (vm == 0) -> (vd != 0); - } - `uvm_object_utils(riscv_vector_instr) `uvm_object_new @@ -568,18 +568,24 @@ class riscv_vector_instr extends riscv_floating_point_instr; if ((name.substr(0, 1) == "VW") || (name.substr(0, 2) == "VFW")) begin is_widening_instr = 1'b1; end - if (uvm_is_match("V[SZ]EXT_VF[248]", name)) begin - ext_widening_factor = name.substr(name.len()-2, name.len()-1).atoi(); + if (!uvm_re_match("V[SZ]EXT_VF[248]", name)) begin + ext_widening_factor = name.substr(name.len()-1, name.len()-1).atoi(); end if ((name.substr(0, 1) == "VN") || (name.substr(0, 2) == "VFN")) begin is_narrowing_instr = 1'b1; end - if (uvm_is_match("*CVT*", name)) begin + if (!uvm_re_match("VF[NW]?CVT_.*", name)) begin is_convert_instr = 1'b1; has_vs1 = 1'b0; end + if (!uvm_re_match("VF?RED.*", name)) begin + is_reduction_instr = 1'b1; + end + if (!uvm_re_match("VM.*_MM?", name)) begin + is_mask_producing_instr = 1'b1; + end if (allowed_va_variants.size() > 0) begin - has_va_variant = 1; + has_va_variant = 1'b1; end // Set the rand mode based on the superset of all VA variants if (format == VA_FORMAT) begin From 66f91a97f57ffc183e0e189f55dd550d1f441356 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 10 Nov 2023 09:07:23 +0000 Subject: [PATCH 17/90] Enable previously disabled instructions again --- src/isa/riscv_vector_instr.sv | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index e5f68edf..c711e42f 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -350,20 +350,11 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Filter unsupported instructions based on configuration virtual function bit is_supported(riscv_instr_gen_config cfg); string name = instr_name.name(); - // 19.2.2. Vector Add with Carry/Subtract with Borrow Reserved under EDIV>1 - if ((cfg.vector_cfg.vtype.vediv > 1) && - (instr_name inside {VADC, VSBC, VMADC, VMSBC})) begin - return 1'b0; - end // Disable widening/narrowing instruction when LMUL == 8 if ((!cfg.vector_cfg.vec_narrowing_widening) && (is_widening_instr || is_narrowing_instr)) begin return 1'b0; end - // TODO: Clean up this list, it's causing gcc compile error now - if (instr_name inside {VWMACCSU, VMERGE, VFMERGE, VMADC, VMSBC}) begin - return 1'b0; - end // The standard vector floating-point instructions treat 16-bit, 32-bit, 64-bit, // and 128-bit elements as IEEE-754/2008-compatible values. If the current SEW does // not correspond to a supported IEEE floating-pointtype, an illegal instruction From 9d4f3c1d0b16aada89b24f3dec301ef61ec6931a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 10 Nov 2023 11:55:58 +0000 Subject: [PATCH 18/90] Enable support for Zve* and Zvfh vector extensions --- src/isa/riscv_vector_instr.sv | 37 ++++++++++++++------ src/riscv_vector_cfg.sv | 66 +++++++++++++++++++++++++++-------- 2 files changed, 78 insertions(+), 25 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index c711e42f..0879ea80 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -297,10 +297,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - constraint disable_floating_point_varaint_c { - if (!m_cfg.vector_cfg.vec_fp) { - va_variant != VF; - } + // Do not use float variants if FP is disabled + constraint disable_fp_variant_c { + !m_cfg.vector_cfg.enable_fp_support -> !(va_variant inside {VF, WF, VFM}); } constraint vector_load_store_mask_overlap_c { @@ -355,12 +354,28 @@ class riscv_vector_instr extends riscv_floating_point_instr; (is_widening_instr || is_narrowing_instr)) begin return 1'b0; end - // The standard vector floating-point instructions treat 16-bit, 32-bit, 64-bit, - // and 128-bit elements as IEEE-754/2008-compatible values. If the current SEW does - // not correspond to a supported IEEE floating-pointtype, an illegal instruction - // exception is raised - if (!cfg.vector_cfg.vec_fp) begin - if ((name.substr(0, 1) == "VF") || (name.substr(0, 2) == "VMF")) begin + // Check FP instructions + if ((name.substr(0, 1) == "VF" && name != VFIRST_M) || (name.substr(0, 2) == "VMF")) begin + // FP instructions are not supported + if (!cfg.vector_cfg.enable_fp_support) begin + return 1'b0; + end + // FP instruction is unsupported if outside of valid EEW range + if (!(cfg.vector_cfg.vtype.vsew inside {[cfg.vector_cfg.min_fp_sew : + cfg.vector_cfg.max_fp_sew]})) begin + return 1'b0; + end + // Widening/narrowing is unsupported if only one fp size is valid + if ((is_widening_instr || is_narrowing_instr) && + cfg.vector_cfg.min_fp_sew == cfg.vector_cfg.max_fp_sew) begin + return 1'b0; + end + // Widening requires 2*SEW = SEW op SEW + if (is_widening_instr && cfg.vector_cfg.vtype.vsew == cfg.vector_cfg.min_fp_sew) begin + return 1'b0; + end + // Narrowing requires SEW = 2*SEW op SEW + if (is_narrowing_instr && cfg.vector_cfg.vtype.vsew == cfg.vector_cfg.max_fp_sew) begin return 1'b0; end end @@ -438,7 +453,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; WI, VI, VIM: begin asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), imm_str)}; end - VF, VFM: begin + WF, VF, VFM: begin if (instr_name inside {VFMADD, VFNMADD, VFMACC, VFNMACC, VFNMSUB, VFWNMSAC, VFWMACC, VFMSUB, VFMSAC, VFNMSAC, VFWNMACC, VFWMSAC}) begin asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), fs1.name(), vs2.name())}; diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 2041f1a6..aa13b20b 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -24,6 +24,16 @@ class riscv_vector_cfg extends uvm_object; rand bit vxsat; riscv_vreg_t reserved_vregs[$]; + // Zve* extension + string zve_extension = ""; + bit enable_fp_support = 1'b1; + int unsigned max_int_sew = 64; + int unsigned max_fp_sew = 64; + + // Zvfh extension + bit enable_zvfh_extension = 1'b0; + int unsigned min_fp_sew = 32; + // Allowed effective element width based on the LMUL setting int unsigned legal_eew[$]; @@ -31,9 +41,6 @@ class riscv_vector_cfg extends uvm_object; rand bit only_vec_instr; constraint only_vec_instr_c {soft only_vec_instr == 0;} - // Allow vector floating-point instructions (Allows vtype.vsew to be set <16 or >32). - rand bit vec_fp; - // Allow vector narrowing or widening instructions. rand bit vec_narrowing_widening; @@ -67,22 +74,16 @@ class riscv_vector_cfg extends uvm_object; vl == VLEN/vtype.vsew; } - // For all widening instructions, the destination element width must be a supported element - // width and the destination LMUL value must also be a supported LMUL value constraint vlmul_c { vtype.vlmul inside {1, 2, 4, 8}; - vtype.vlmul <= MAX_LMUL; - if (vec_narrowing_widening) { - (vtype.vlmul < 8) || (vtype.fractional_lmul == 1'b1); - } + vtype.fractional_lmul -> vtype.vlmul != 1; + // Fractional LMUL 1/8th only supported when EEW 64 is supported + vtype.fractional_lmul -> vtype.vlmul <= max_int_sew / 8; } constraint vsew_c { - vtype.vsew inside {8, 16, 32, 64, 128}; - vtype.vsew <= ELEN; - // TODO: Determine the legal range of floating point format - if (vec_fp) {vtype.vsew inside {32};} - if (vec_narrowing_widening) {vtype.vsew < ELEN;} + vtype.vsew inside {8, 16, 32, 64}; + vtype.vsew <= max_int_sew; } constraint vseg_c { @@ -101,6 +102,12 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(vstart, UVM_DEFAULT) `uvm_field_enum(vxrm_t,vxrm, UVM_DEFAULT) `uvm_field_int(vxsat, UVM_DEFAULT) + `uvm_field_string(zve_extension, UVM_DEFAULT) + `uvm_field_int(enable_fp_support, UVM_DEFAULT) + `uvm_field_int(max_int_sew, UVM_DEFAULT) + `uvm_field_int(max_fp_sew, UVM_DEFAULT) + `uvm_field_int(enable_zvfh_extension, UVM_DEFAULT) + `uvm_field_int(min_fp_sew, UVM_DEFAULT) `uvm_field_int(enable_zvlsseg, UVM_DEFAULT) `uvm_field_int(enable_fault_only_first_load, UVM_DEFAULT) `uvm_object_utils_end @@ -113,6 +120,37 @@ class riscv_vector_cfg extends uvm_object; if ($value$plusargs("enable_fault_only_first_load=%0d", enable_fault_only_first_load)) begin enable_fault_only_first_load.rand_mode(0); end + // Check for Zve* extension + if ($value$plusargs("zve_extension=%0s", zve_extension)) begin + int minimum_vlen; + string supported_type; + zve_extension = zve_extension.tolower(); + minimum_vlen = zve_extension.substr(3,4).atoi(); + supported_type = zve_extension.substr(5,5); + + // Is the extension valid + if (zve_extension.substr(0,2) != "zve" || !(minimum_vlen inside {32, 64}) || + !(supported_type inside {"x", "f", "d"}) || (minimum_vlen == 32 && supported_type == "d")) begin + `uvm_fatal(`gfn, $sformatf("Unsupported Zve* extension %0s. Supported are Zve32{x,f} and Zve64{x,f,d}.", + zve_extension)) + end + `uvm_info(`gfn, $sformatf("Enabling vector spec %0s extension", zve_extension), UVM_LOW) + // Check VLEN to be of correct minimum size + if (VLEN < minimum_vlen) begin + `uvm_fatal(`gfn, $sformatf("%0s extension requires a VLEN of at least %0d bits", + zve_extension, minimum_vlen)) + end + // Set configuration + enable_fp_support = supported_type inside {"f", "d"}; + max_int_sew = minimum_vlen; + max_fp_sew = supported_type == "f" ? 32 : + supported_type == "d" ? 64 : 0; + end + if ($value$plusargs("enable_zvfh_extension=%0b", enable_zvfh_extension)) begin + if (enable_zvfh_extension) begin + min_fp_sew = 16; + end + end endfunction : new function void post_randomize(); From 77c69be697706a1a69fd6bbdb21a0bf942e48693 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 13 Nov 2023 08:59:39 +0000 Subject: [PATCH 19/90] Remove reference to vector register hazard --- src/riscv_vector_cfg.sv | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index aa13b20b..ccd8d219 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -47,14 +47,6 @@ class riscv_vector_cfg extends uvm_object; rand bit allow_illegal_vec_instr; constraint allow_illegal_vec_instr_c {soft allow_illegal_vec_instr == 0;} - // Cause frequent hazards for the Vector Registers: - // * Write-After-Read (WAR) - // * Read-After-Write (RAW) - // * Read-After-Read (RAR) - // * Write-After-Write (WAW) - // These hazard conditions are induced by keeping a small (~5) list of registers to select from. - rand bit vec_reg_hazards; - // Enable segmented load/store extension ops rand bit enable_zvlsseg = 1'b1; From 3b7a944d3c95e382b05d832b59203d3a15bfaebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 13 Nov 2023 10:52:07 +0000 Subject: [PATCH 20/90] Add validation of LMUL and SEW for narrowing and widening instr --- src/isa/riscv_vector_instr.sv | 16 +++++++++++----- src/riscv_vector_cfg.sv | 5 +++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 0879ea80..fd1baec6 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -37,6 +37,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit is_convert_instr = 1'b0; bit is_reduction_instr = 1'b0; bit is_mask_producing_instr = 1'b0; + bit is_fp_instr = 1'b0; int ext_widening_factor = 1; va_variant_t allowed_va_variants[$]; string sub_extension; @@ -349,13 +350,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Filter unsupported instructions based on configuration virtual function bit is_supported(riscv_instr_gen_config cfg); string name = instr_name.name(); - // Disable widening/narrowing instruction when LMUL == 8 - if ((!cfg.vector_cfg.vec_narrowing_widening) && - (is_widening_instr || is_narrowing_instr)) begin - return 1'b0; + // Check that current LMUL and SEW are valid for narrowing and widening instruction + if (is_widening_instr || is_narrowing_instr) begin + if (cfg.vector_cfg.vtype.vsew == (is_fp_instr ? cfg.vector_cfg.max_fp_sew : cfg.vector_cfg.max_int_sew) || + (!cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul == 8)) begin + return 1'b0; + end end // Check FP instructions - if ((name.substr(0, 1) == "VF" && name != VFIRST_M) || (name.substr(0, 2) == "VMF")) begin + if (is_fp_instr) begin // FP instructions are not supported if (!cfg.vector_cfg.enable_fp_support) begin return 1'b0; @@ -590,6 +593,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (!uvm_re_match("VM.*_MM?", name)) begin is_mask_producing_instr = 1'b1; end + if ((name.substr(0, 1) == "VF" && name != VFIRST_M) || (name.substr(0, 2) == "VMF")) begin + is_fp_instr = 1'b1; + end if (allowed_va_variants.size() > 0) begin has_va_variant = 1'b1; end diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index ccd8d219..91007bc2 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -63,13 +63,14 @@ class riscv_vector_cfg extends uvm_object; // Basic constraint for initial bringup constraint bringup_c { vstart == 0; - vl == VLEN/vtype.vsew; } constraint vlmul_c { vtype.vlmul inside {1, 2, 4, 8}; vtype.fractional_lmul -> vtype.vlmul != 1; - // Fractional LMUL 1/8th only supported when EEW 64 is supported + // Fractional LMUL only allowed iff at least one SEW element fits into vector + (8 >> $clog2(vtype.vsew/8)) < vtype.vlmul -> !vtype.fractional_lmul; + // Fractional LMUL 1/8th only supported iff EEW 64 is supported vtype.fractional_lmul -> vtype.vlmul <= max_int_sew / 8; } From 40d2bb48e00c08c54f4a0da7d36bd9833842e6c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 13 Nov 2023 10:53:33 +0000 Subject: [PATCH 21/90] Check for valid LMUL for vrgatherei16 --- src/isa/riscv_vector_instr.sv | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index fd1baec6..5ecc232c 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -399,6 +399,19 @@ class riscv_vector_instr extends riscv_floating_point_instr; return 1'b0; end end + // Check for valid LMUL for vrgatherei16 + if (instr_name == VRGATHEREI16) begin + if (16/cfg.vector_cfg.vtype.vsew > 1) begin + if (!cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul == 8) begin + return 1'b0; + end + end else begin + if (cfg.vector_cfg.vtype.fractional_lmul && + (cfg.vector_cfg.vtype.vsew/16)*cfg.vector_cfg.vtype.vlmul > cfg.vector_cfg.max_int_sew/8) begin + return 1'b0; + end + end + end return 1'b1; endfunction From e1057f9aa1a988553cb566d8fdf34f2e42148d62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 13 Nov 2023 10:54:21 +0000 Subject: [PATCH 22/90] Cleanup vector asm creation --- src/isa/riscv_vector_instr.sv | 58 +++++++++++++++++------------------ 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 5ecc232c..b4871546 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -439,50 +439,50 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (instr_name == VID_V) begin asm_str = $sformatf("vid.v %s", vd.name()); end else if (instr_name inside {VCPOP_M, VFIRST_M}) begin - asm_str = $sformatf("%0s %0s,%0s", get_instr_name(), rd.name(), vs2.name()); + asm_str = $sformatf("%0s %0s, %0s", get_instr_name(), rd.name(), vs2.name()); end else begin - asm_str = $sformatf("%0s %0s,%0s", get_instr_name(), vd.name(), vs2.name()); + asm_str = $sformatf("%0s %0s, %0s", get_instr_name(), vd.name(), vs2.name()); end end VA_FORMAT: begin case (instr_name) - VMV_V_V: asm_str = $sformatf("vmv.v.v %s,%s", vd.name(), vs1.name()); - VMV_V_X: asm_str = $sformatf("vmv.v.x %s,%s", vd.name(), rs1.name()); - VMV_V_I: asm_str = $sformatf("vmv.v.i %s,%s", vd.name(), imm_str); - VFMV_V_F: asm_str = $sformatf("vfmv.v.f %s,%s", vd.name(), fs1.name()); - VMV_X_S: asm_str = $sformatf("vmv.x.s %s,%s", rd.name(), vs2.name()); - VMV_S_X: asm_str = $sformatf("vmv.s.x %s,%s", vd.name(), rs1.name()); - VFMV_F_S: asm_str = $sformatf("vfmv.f.s %s,%s", fd.name(), vs2.name()); - VFMV_S_F: asm_str = $sformatf("vfmv.s.f %s,%s", vd.name(), fs1.name()); + VMV_V_V: asm_str = $sformatf("vmv.v.v %s, %s", vd.name(), vs1.name()); + VMV_V_X: asm_str = $sformatf("vmv.v.x %s, %s", vd.name(), rs1.name()); + VMV_V_I: asm_str = $sformatf("vmv.v.i %s, %s", vd.name(), imm_str); + VFMV_V_F: asm_str = $sformatf("vfmv.v.f %s, %s", vd.name(), fs1.name()); + VMV_X_S: asm_str = $sformatf("vmv.x.s %s, %s", rd.name(), vs2.name()); + VMV_S_X: asm_str = $sformatf("vmv.s.x %s, %s", vd.name(), rs1.name()); + VFMV_F_S: asm_str = $sformatf("vfmv.f.s %s, %s", fd.name(), vs2.name()); + VFMV_S_F: asm_str = $sformatf("vfmv.s.f %s, %s", vd.name(), fs1.name()); default: begin if (!has_va_variant) begin asm_str = $sformatf("%0s ", get_instr_name()); asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), vs1.name())}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), vs1.name())}; end else begin asm_str = $sformatf("%0s.%0s ", get_instr_name(), va_variant.name()); asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); case (va_variant) inside WV, VV, VVM, VM: begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), vs1.name())}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), vs1.name())}; end WI, VI, VIM: begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), imm_str)}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), imm_str)}; end WF, VF, VFM: begin if (instr_name inside {VFMADD, VFNMADD, VFMACC, VFNMACC, VFNMSUB, VFWNMSAC, VFWMACC, VFMSUB, VFMSAC, VFNMSAC, VFWNMACC, VFWMSAC}) begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), fs1.name(), vs2.name())}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), fs1.name(), vs2.name())}; end else begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), fs1.name())}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), fs1.name())}; end end WX, VX, VXM: begin if (instr_name inside {VMADD, VNMSUB, VMACC, VNMSAC, VWMACCSU, VWMACCU, VWMACCUS, VWMACC}) begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), rs1.name(), vs2.name())}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), rs1.name(), vs2.name())}; end else begin - asm_str = {asm_str, $sformatf("%0s,%0s,%0s", vd.name(), vs2.name(), rs1.name())}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), rs1.name())}; end end endcase @@ -492,53 +492,53 @@ class riscv_vector_instr extends riscv_floating_point_instr; end VL_FORMAT: begin if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %s,(%s)", add_nfields(get_instr_name(), "vlseg"), + asm_str = $sformatf("%0s %s, (%s)", add_nfields(get_instr_name(), "vlseg"), vd.name(), rs1.name()); end else begin - asm_str = $sformatf("%0s %s,(%s)", get_instr_name(), vd.name(), rs1.name()); + asm_str = $sformatf("%0s %s, (%s)", get_instr_name(), vd.name(), rs1.name()); end end VS_FORMAT: begin if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %s,(%s)", add_nfields(get_instr_name(), "vsseg"), + asm_str = $sformatf("%0s %s, (%s)", add_nfields(get_instr_name(), "vsseg"), vs3.name(), rs1.name()); end else begin - asm_str = $sformatf("%0s %s,(%s)", get_instr_name(), vs3.name(), rs1.name()); + asm_str = $sformatf("%0s %s, (%s)", get_instr_name(), vs3.name(), rs1.name()); end end VLS_FORMAT: begin if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", add_nfields(get_instr_name(), "vlsseg"), + asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vlsseg"), vd.name(), rs1.name(), rs2.name()); end else begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", get_instr_name(), + asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), vd.name(), rs1.name(), rs2.name()); end end VSS_FORMAT: begin if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", add_nfields(get_instr_name(), "vssseg"), + asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vssseg"), vs3.name(), rs1.name(), rs2.name()); end else begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", get_instr_name(), + asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), vs3.name(), rs1.name(), rs2.name()); end end VLX_FORMAT: begin if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", add_nfields(get_instr_name(), "vlxseg"), + asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vlxseg"), vd.name(), rs1.name(), vs2.name()); end else begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", get_instr_name(), + asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), vd.name(), rs1.name(), vs2.name()); end end VSX_FORMAT: begin if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", add_nfields(get_instr_name(), "vsxseg"), + asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vsxseg"), vs3.name(), rs1.name(), vs2.name()); end else begin - asm_str = $sformatf("%0s %0s,(%0s),%0s", get_instr_name(), + asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), vs3.name(), rs1.name(), vs2.name()); end end From 03c23f5c5c16544c4cd46db7d756495b76d5fecf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 13 Nov 2023 15:03:41 +0000 Subject: [PATCH 23/90] Cleanup vector instruction constraints --- src/isa/riscv_vector_instr.sv | 258 ++++++++++++++++------------------ 1 file changed, 121 insertions(+), 137 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index b4871546..55ad4051 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -59,10 +59,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Section 3.3.2: Vector Register Grouping (vlmul) // Instructions specifying a vector operand with an odd-numbered vector register will raisean // illegal instruction exception. - // TODO: Exclude the instruction that ignore VLMUL - // TODO: Update this constraint for fractional LMUL - constraint operand_group_c { - if (m_cfg.vector_cfg.vtype.vlmul > 0) { + constraint vector_operand_group_c { + if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vlmul > 0 && + !(instr_name inside {VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F})) { vd % m_cfg.vector_cfg.vtype.vlmul == 0; vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; vs2 % m_cfg.vector_cfg.vtype.vlmul == 0; @@ -70,49 +69,101 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // Section 11.2: Widening Vector Arithmetic Instructions - constraint widening_instr_c { + // Section 5.2 and 10.2: Widening Vector Arithmetic Instructions + constraint vector_widening_instr_c { if (is_widening_instr) { - // The destination vector register group results are arranged as if both - // SEW and LMUL were at twice their current settings. - vd % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; - // The destination vector register group cannot overlap a source vector - // register group of a different element width (including the mask register if masked) - !(vs1 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul * 2 - 1]}); - !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul * 2 - 1]}); - (vm == 0) -> (vd != 0); - // Double-width result, first source double-width, second source single-width - if (va_variant inside {WV, WX}) { - vs2 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; - } + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + // The destination vector register group results are arranged as if both + // SEW and LMUL were at twice their current settings. + vd % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; + // The destination vector register group cannot overlap a source vector + // register group of a different element width + // For reduction instructions, vs1 is double width + if (!is_reduction_instr) { + !(vs1 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } + // Double-width vd, vs2 double-width, vs1 single-width + if (va_variant inside {WV, WX}) { + vs2 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; + } else { + !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } + } else { + // Double-width vs2 is allowed to overlap double-width vd + if (!(va_variant inside {WV, WX})) { + vs2 != vd; + } + vs1 != vd; + } } } - // Section 11.3: Narrowing Vector Arithmetic Instructions - constraint narrowing_instr_c { + // Section 5.2 and 10.3: Narrowing Vector Arithmetic Instructions + constraint vector_narrowing_instr_c { if (is_narrowing_instr) { - // The source and destination vector register numbers must be aligned - // appropriately for the vector registergroup size - vs2 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; - // The destination vector register group cannot overlap the rst source - // vector register group (specied by vs2) - !(vd inside {[vs2 : vs2 + m_cfg.vector_cfg.vtype.vlmul * 2 - 1]}); - // The destination vector register group cannot overlap the mask register - // if used, unless LMUL=1 (implemented in vmask_overlap_c) + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + // The source and destination vector register numbers must be aligned + // appropriately for the vector registergroup size + vs2 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; + // The destination vector register group cannot overlap the vs2 source + // vector register group + !(vd inside {[vs2 + m_cfg.vector_cfg.vtype.vlmul : vs2 + m_cfg.vector_cfg.vtype.vlmul*2 - 1]}); + } else { + vs2 != vd; + } + } + } + + // Section 5.3: Vector Masking + // The destination vector register group for a masked vector instruction cannot overlap + // the source mask register (v0), unless the destination vector register is being written + // with a mask value (e.g., compares) or the scalar result of a reduction. These + // instruction encodings are reserved. + constraint vector_mask_v0_overlap_c { + if (!vm) { + !(group == COMPARE || is_mask_producing_instr || is_reduction_instr) -> (vd != 0); + } + } + + // VM-bit required to be zero + constraint vector_mask_enable_c { + // Instructions that require vm=0 + if (instr_name inside {VMERGE, VFMERGE, VADC, VSBC}) { + vm == 1'b0; + } + if (instr_name inside {VMADC, VMSBC} && va_variant inside {VVM, VXM, VIM}) { + vm == 1'b0; + } + } + + // VM-bit required to be one + constraint vector_mask_disable_c { + // Instructions that require vm=1 + if (instr_name inside {VMV_V_V, VMV_V_X, VMV_V_I, VFMV_V_F, + VFMV_F_S, VFMV_S_F, VMV_X_S, VMV_S_X, + VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V, + VCOMPRESS}) { + vm == 1'b1; + } + if (instr_name inside {VMADC, VMSBC} && va_variant inside {VV, VX, VI}) { + vm == 1'b1; + } + if (instr_name inside {[VMAND_MM : VMXNOR_MM]}) { + vm == 1'b1; } } - // 11.3. Vector Integer Extension - constraint integer_extension_c { + // Section 11.3: Vector Integer Extension + constraint vector_integer_extension_c { if (instr_name inside {VZEXT_VF2, VZEXT_VF4, VZEXT_VF8, VSEXT_VF2, VSEXT_VF4, VSEXT_VF8}) { - // VD needs to be LMUL aligned - vd % m_cfg.vector_cfg.vtype.vlmul == 0; if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor >= 1) { + // VD needs to be LMUL aligned + vd % m_cfg.vector_cfg.vtype.vlmul == 0; // VS2 needs to be LMUL/ext_widening_factor aligned vs2 % (m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor) == 0; // VS2 can only overlap last ext_widening_factor'th of VD - !(vs2 inside {[vd : vd + ((m_cfg.vector_cfg.vtype.vlmul-1) * ext_widening_factor - 1)]}); + !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - (m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor) - 1]}); } else { // If source has fractional LMUL, VD and VS2 cannot overlap vs2 != vd; @@ -120,62 +171,47 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // 12.3. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions - constraint add_sub_with_carry_c { - if (m_cfg.vector_cfg.vtype.vlmul > 1) { - // For vadc and vsbc, an illegal instruction exception is raised if the - // destination vector register is v0 and LMUL> 1 - if (instr_name inside {VADC, VSBC}) { - vd != 0; - } - // For vmadc and vmsbc, an illegal instruction exception is raised if the - // destination vector register overlaps asource vector register group and LMUL > 1 - if (instr_name inside {VMADC, VMSBC}) { - vd != vs2; - vd != vs1; - } + // Section 11.16: Vector Integer Move Instructions + constraint vector_int_vmv_c { + // VS2 needs to be zero + if (instr_name inside {VMV_V_V, VMV_V_I, VMV_V_X}) { + vs2 == 0; } } - // 12.7. Vector Integer Comparison Instructions - // For all comparison instructions, an illegal instruction exception is raised if the - // destination vector register overlaps a source vector register group and LMUL > 1 - constraint compare_instr_c { - if (category == COMPARE) { + // Section 15.5, 15.6, 15.7: The destination register cannot overlap the + // source register and, if masked, cannot overlap the mask register ('v0') + constraint vector_set_first_c { + if (instr_name inside {VMSBF_M, VMSIF_M, VMSOF_M}) { vd != vs2; - vd != vs1; + (vm == 0) -> vd != 0; } } - // 16.8. Vector Iota Instruction - // An illegal instruction exception is raised if the destination vector register group - // overlaps the source vector mask register. If the instruction is masked, an illegal - // instruction exception is issued if the destination vector register group overlaps v0. - constraint vector_itoa_c { + // Section 15.8: Vector Iota Instruction + // The destination register group cannot overlap the source register + // and, if masked, cannot overlap the mask register (v0) + constraint vector_iota_c { if (instr_name == VIOTA_M) { vd != vs2; - (vm == 0) -> (vd != 0); } } - // 16.9. Vector Element Index Instruction - // The vs2 eld of the instruction must be set to v0, otherwise the encoding is reserved + // Section 15.9: Vector Element Index Instruction + // The vs2 field of the instruction must be set to v0, otherwise the encoding is reserved constraint vector_element_index_c { if (instr_name == VID_V) { vs2 == 0; - // TODO; Check if this constraint is needed - vd != vs2; } } - // Section 17.3 Vector Slide Instructions + // Section 16.3: Vector Slide Instructions // The destination vector register group for vslideup cannot overlap the vector register // group of the source vector register group or the mask register constraint vector_slideup_c { if (instr_name inside {VSLIDEUP, VSLIDE1UP, VFSLIDE1UP}) { vd != vs2; vd != vs1; - (vm == 0) -> (vd != 0); } } @@ -188,7 +224,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (instr_name inside {VRGATHER, VRGATHEREI16}) { vd != vs2; vd != vs1; - (vm == 0) -> (vd != 0); } if (instr_name == VRGATHEREI16) { if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vsew == 8) { @@ -197,14 +232,31 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // Section 17.5: Vector compress instruction + // Section 16.5: Vector compress instruction // The destination vector register group cannot overlap the source vector register - // group or the source vector mask register + // group or the source mask register, otherwise the instruction encoding is reserved constraint vector_compress_c { if (instr_name == VCOMPRESS) { vd != vs2; vd != vs1; - (vm == 0) -> (vd != 0); + } + } + + // Section 16.6: Whole Vector Register Move + // The source and destination vector register numbers must be aligned appropriately for + // the vector register group size, and encodings with other vector register numbers are reserved + constraint vector_vmvxr_c { + if (instr_name == VMV2R_V) { + vs2 % 2 == 0; + vd % 2 == 0; + } + if (instr_name == VMV4R_V) { + vs2 % 4 == 0; + vd % 4 == 0; + } + if (instr_name == VMV8R_V) { + vs2 % 8 == 0; + vd % 8 == 0; } } @@ -230,74 +282,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - constraint vmv_alignment_c { - if (instr_name == VMV2R_V) { - int'(vs2) % 2 == 0; - int'(vd) % 2 == 0; - } - if (instr_name == VMV4R_V) { - int'(vs2) % 4 == 0; - int'(vd) % 4 == 0; - } - if (instr_name == VMV8R_V) { - int'(vs2) % 8 == 0; - int'(vd) % 8 == 0; - } - } - - /////////////////// Vector mask constraint /////////////////// - - // 5.3 Vector Masking - // The destination vector register group for a masked vector instruction cannot overlap - // the source mask register (v0), unless the destination vector register is being written - // with a mask value (e.g., compares) or the scalar result of a reduction. These - // instruction encodings are reserved. - constraint mask_v0_overlap_c { - if (!vm) { - !(group == COMPARE || is_mask_producing_instr || is_reduction_instr) -> (vd != 0); - } - } - - constraint vector_mask_enable_c { - // Instructions that require vm=0 - if (instr_name inside {VMERGE, VFMERGE, VADC, VSBC}) { - vm == 1'b0; - } - if (instr_name inside {VMADC, VMSBC} && va_variant inside {VVM, VXM, VIM}) { - vm == 1'b0; - } - } - - constraint vector_mask_disable_c { - // Instructions that require vm=1 - if (instr_name inside {VMV_V_V, VMV_V_X, VMV_V_I, VFMV_V_F, - VFMV_F_S, VFMV_S_F, VMV_X_S, VMV_S_X, - VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V, - VCOMPRESS}) { - vm == 1'b1; - } - if (instr_name inside {VMADC, VMSBC} && va_variant inside {VV, VX, VI}) { - vm == 1'b1; - } - } - - // 16.1. Vector Mask-Register Logical Instructions - // No vector mask for these instructions - constraint vector_mask_instr_c { - if (instr_name inside {[VMAND_MM : VMXNOR_MM]}) { - vm == 1'b1; - } - } - - // 14.5, 14.6, 14.7. The destination register cannot overlap the - // source register and, if masked, cannot overlap the mask register ('v0'). - constraint vector_set_first_c { - if (instr_name inside {VMSBF_M, VMSIF_M, VMSOF_M}) { - vd != vs2; - (vm == 0) -> vd != 0; - } - } - // Do not use float variants if FP is disabled constraint disable_fp_variant_c { !m_cfg.vector_cfg.enable_fp_support -> !(va_variant inside {VF, WF, VFM}); From 684e0359f26e130635ada1538ceda714ff968ffe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 15 Nov 2023 08:45:17 +0000 Subject: [PATCH 24/90] Cleanup vector gpr and csr initialisation --- src/riscv_asm_program_gen.sv | 53 ++++++++++++++---------------------- 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index 0ae268b6..1deb2160 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -423,13 +423,13 @@ class riscv_asm_program_gen extends uvm_object; if (cfg.enable_floating_point) begin init_floating_point_gpr(); end + if (cfg.enable_vector_extension) begin + init_vector_gpr(); + end init_gpr(); // Init stack pointer to point to the end of the user stack str = {indent, $sformatf("la x%0d, %0suser_stack_end", cfg.sp, hart_prefix(hart))}; instr_stream.push_back(str); - if (cfg.enable_vector_extension) begin - randomize_vec_gpr_and_csr(); - end core_is_initialized(); gen_dummy_csr_write(); // TODO add a way to disable xStatus read if (riscv_instr_pkg::support_pmp) begin @@ -542,7 +542,7 @@ class riscv_asm_program_gen extends uvm_object; endfunction // Initialize vector general purpose registers - virtual function void init_vec_gpr(); + virtual function void init_vector_gpr(); int SEW = (ELEN <= XLEN) ? ELEN : XLEN; int LMUL = 1; int num_elements = VLEN / SEW; @@ -550,9 +550,6 @@ class riscv_asm_program_gen extends uvm_object; // Do not init vector registers if RVV is not enabled if (!(RVV inside {supported_isa})) return; - // Create RVV init label - instr_stream.push_back("vec_reg_init:"); - // Set vector configuration instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], num_elements)); instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, m%0d, ta, ma", @@ -591,6 +588,22 @@ class riscv_asm_program_gen extends uvm_object; end end endcase + + // Initialize vector CSRs + instr_stream.push_back({indent, $sformatf("csrwi vxsat, %0d", cfg.vector_cfg.vxsat)}); + instr_stream.push_back({indent, $sformatf("csrwi vxrm, %0d", cfg.vector_cfg.vxrm)}); + + // Initialize vector configuration + instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], cfg.vector_cfg.vl)); + instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, m%0s%0d, %0s, %0s", + indent, + cfg.gpr[0], + cfg.gpr[1], + cfg.vector_cfg.vtype.vsew, + cfg.vector_cfg.vtype.fractional_lmul ? "f" : "", + cfg.vector_cfg.vtype.vlmul, + cfg.vector_cfg.vtype.vta ? "ta" : "tu", + cfg.vector_cfg.vtype.vma ? "ma" : "mu")); endfunction // Initialize floating point general purpose registers @@ -1613,30 +1626,4 @@ class riscv_asm_program_gen extends uvm_object; instr_stream = {instr_stream, debug_rom.instr_stream}; endfunction - //--------------------------------------------------------------------------------------- - // Vector extension generation - //--------------------------------------------------------------------------------------- - - virtual function void randomize_vec_gpr_and_csr(); - string lmul; - if (!(RVV inside {supported_isa})) return; - instr_stream.push_back({indent, $sformatf("csrwi vxsat, %0d", cfg.vector_cfg.vxsat)}); - instr_stream.push_back({indent, $sformatf("csrwi vxrm, %0d", cfg.vector_cfg.vxrm)}); - init_vec_gpr(); // GPR init uses a temporary SEW/LMUL setting before the final value set below. - instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], cfg.vector_cfg.vl)); - if ((cfg.vector_cfg.vtype.vlmul > 1) && (cfg.vector_cfg.vtype.fractional_lmul)) begin - lmul = $sformatf("mf%0d", cfg.vector_cfg.vtype.vlmul); - end else begin - lmul = $sformatf("m%0d", cfg.vector_cfg.vtype.vlmul); - end - instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, %0s, %0s, %0s", - indent, - cfg.gpr[0], - cfg.gpr[1], - cfg.vector_cfg.vtype.vsew, - lmul, - cfg.vector_cfg.vtype.vta ? "ta" : "tu", - cfg.vector_cfg.vtype.vma ? "ma" : "mu")); - endfunction - endclass From f2eeb81c4c93bb30ae766556df9167d0e90393e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 16 Nov 2023 08:18:57 +0000 Subject: [PATCH 25/90] Enable support for int/float narrowing widening instructions --- src/isa/riscv_vector_instr.sv | 41 ++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 55ad4051..c95baa39 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -328,15 +328,12 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - `uvm_object_utils(riscv_vector_instr) - `uvm_object_new - // Filter unsupported instructions based on configuration virtual function bit is_supported(riscv_instr_gen_config cfg); string name = instr_name.name(); // Check that current LMUL and SEW are valid for narrowing and widening instruction if (is_widening_instr || is_narrowing_instr) begin - if (cfg.vector_cfg.vtype.vsew == (is_fp_instr ? cfg.vector_cfg.max_fp_sew : cfg.vector_cfg.max_int_sew) || + if (cfg.vector_cfg.vtype.vsew == cfg.vector_cfg.max_int_sew || (!cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul == 8)) begin return 1'b0; end @@ -347,18 +344,32 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (!cfg.vector_cfg.enable_fp_support) begin return 1'b0; end - // FP instruction is unsupported if outside of valid EEW range - if (!(cfg.vector_cfg.vtype.vsew inside {[cfg.vector_cfg.min_fp_sew : - cfg.vector_cfg.max_fp_sew]})) begin - return 1'b0; + if (instr_name inside {VFWCVT_F_XU_V, VFWCVT_F_X_V, VFNCVT_XU_F_W, + VFNCVT_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_RTZ_X_F_W}) begin + // Single-width (unsigned) integer, double-width float + if (!((2*cfg.vector_cfg.vtype.vsew) inside {[cfg.vector_cfg.min_fp_sew : + cfg.vector_cfg.max_fp_sew]})) begin + return 1'b0; + end + end else begin + // FP instruction is unsupported if outside of valid EEW range + if (!(cfg.vector_cfg.vtype.vsew inside {[cfg.vector_cfg.min_fp_sew : + cfg.vector_cfg.max_fp_sew]})) begin + return 1'b0; + end + if (!instr_name inside {VFWCVT_XU_F_V, VFWCVT_X_F_V, VFWCVT_RTZ_XU_F_V, + VFWCVT_RTZ_X_F_V, VFNCVT_F_XU_W, VFNCVT_X_F_W}) begin + // Additional check not required for single-width float, double-width (unsigned) integer + // Widening/narrowing is unsupported if only one fp size is valid and + // requires 2*SEW to be of legal size + if ((is_widening_instr || is_narrowing_instr) && + cfg.vector_cfg.max_fp_sew inside {cfg.vector_cfg.min_fp_sew, cfg.vector_cfg.vtype.vsew}) begin + return 1'b0; + end end - // Widening/narrowing is unsupported if only one fp size is valid - if ((is_widening_instr || is_narrowing_instr) && - cfg.vector_cfg.min_fp_sew == cfg.vector_cfg.max_fp_sew) begin - return 1'b0; end - // Widening requires 2*SEW = SEW op SEW - if (is_widening_instr && cfg.vector_cfg.vtype.vsew == cfg.vector_cfg.min_fp_sew) begin + // Narrowing requires SEW = 2*SEW op SEW + if (is_narrowing_instr && cfg.vector_cfg.vtype.vsew == cfg.vector_cfg.max_fp_sew) begin return 1'b0; end // Narrowing requires SEW = 2*SEW op SEW @@ -577,7 +588,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (!uvm_re_match("V[SZ]EXT_VF[248]", name)) begin ext_widening_factor = name.substr(name.len()-1, name.len()-1).atoi(); end - if ((name.substr(0, 1) == "VN") || (name.substr(0, 2) == "VFN")) begin + if ((name.substr(0, 1) == "VN") || !uvm_re_match("VFN.*_W", name)) begin is_narrowing_instr = 1'b1; end if (!uvm_re_match("VF[NW]?CVT_.*", name)) begin From a8457f9423abb158e80ce8f60109c40a61676337 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 16 Nov 2023 08:40:23 +0000 Subject: [PATCH 26/90] Enable support for Zvfhmin vector extension --- src/isa/riscv_vector_instr.sv | 7 +++++-- src/riscv_vector_cfg.sv | 12 ++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index c95baa39..9b75e44e 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -344,8 +344,11 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (!cfg.vector_cfg.enable_fp_support) begin return 1'b0; end - if (instr_name inside {VFWCVT_F_XU_V, VFWCVT_F_X_V, VFNCVT_XU_F_W, - VFNCVT_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_RTZ_X_F_W}) begin + if (instr_name inside {VFWCVT_F_XU_V, VFWCVT_F_X_V, VFNCVT_XU_F_W, + VFNCVT_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_RTZ_X_F_W} || + (instr_name inside {VFWCVT_F_F_V, VFNCVT_F_F_W} && + cfg.vector_cfg.enable_zvfhmin_extension && + !cfg.vector_cfg.enable_zvfh_extension)) begin // Single-width (unsigned) integer, double-width float if (!((2*cfg.vector_cfg.vtype.vsew) inside {[cfg.vector_cfg.min_fp_sew : cfg.vector_cfg.max_fp_sew]})) begin diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 91007bc2..dbf3e70e 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -30,6 +30,9 @@ class riscv_vector_cfg extends uvm_object; int unsigned max_int_sew = 64; int unsigned max_fp_sew = 64; + // Zvfhmin extension + bit enable_zvfhmin_extension = 1'b0; + // Zvfh extension bit enable_zvfh_extension = 1'b0; int unsigned min_fp_sew = 32; @@ -99,6 +102,7 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(enable_fp_support, UVM_DEFAULT) `uvm_field_int(max_int_sew, UVM_DEFAULT) `uvm_field_int(max_fp_sew, UVM_DEFAULT) + `uvm_field_int(enable_zvfhmin_extension, UVM_DEFAULT) `uvm_field_int(enable_zvfh_extension, UVM_DEFAULT) `uvm_field_int(min_fp_sew, UVM_DEFAULT) `uvm_field_int(enable_zvlsseg, UVM_DEFAULT) @@ -139,7 +143,15 @@ class riscv_vector_cfg extends uvm_object; max_fp_sew = supported_type == "f" ? 32 : supported_type == "d" ? 64 : 0; end + if ($value$plusargs("enable_zvfhmin_extension=%0b", enable_zvfhmin_extension)) begin + if (enable_zvfhmin_extension && !enable_fp_support) begin + `uvm_fatal(`gfn, $sformatf("Zvfhmin extension requires floating point support (Zve32x is invalid)")) + end + end if ($value$plusargs("enable_zvfh_extension=%0b", enable_zvfh_extension)) begin + if (enable_zvfh_extension && !enable_zvfhmin_extension) begin + `uvm_fatal(`gfn, $sformatf("Zvfh extension requires the Zvfhmin extension")) + end if (enable_zvfh_extension) begin min_fp_sew = 16; end From e9c0adc1acd33a59b07cc1414a125c3b21d9d32d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 16 Nov 2023 10:05:45 +0000 Subject: [PATCH 27/90] Fix enable/disable randomisation of vector registers --- src/isa/riscv_vector_instr.sv | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 9b75e44e..aad79731 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -30,7 +30,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit has_vs1 = 1'b1; bit has_vs2 = 1'b1; bit has_vs3 = 1'b1; - bit has_vm = 1'b0; bit has_va_variant = 1'b0; bit is_widening_instr = 1'b0; bit is_narrowing_instr = 1'b0; @@ -567,21 +566,28 @@ class riscv_vector_instr extends riscv_floating_point_instr; vs2.rand_mode(has_vs2); vs3.rand_mode(has_vs3); vd.rand_mode(has_vd); + va_variant.rand_mode(has_va_variant); if (!(category inside {LOAD, STORE, AMO})) begin load_store_solve_order_c.constraint_mode(0); end + // $info("Randomizing for %0s, vd: %0d, vs2: %0d, vs1: %0d", instr_name, vd, vs2, vs2); endfunction : pre_randomize + function void post_randomize(); + super.post_randomize(); + // $info("Randomized for %0s, vd: %0d, vs2: %0d, vs1: %0d", instr_name, vd, vs2, vs2); + endfunction : post_randomize + virtual function void set_rand_mode(); string name = instr_name.name(); - has_rs1 = 1; - has_rs2 = 0; - has_rd = 0; - has_fs1 = 0; - has_fs2 = 0; - has_fs3 = 0; - has_fd = 0; - has_imm = 0; + has_rs1 = 1'b1; + has_rs2 = 1'b0; + has_rd = 1'b0; + has_fs1 = 1'b0; + has_fs2 = 1'b0; + has_fs3 = 1'b0; + has_fd = 1'b0; + has_imm = 1'b0; if (sub_extension != "zvlsseg") begin nfields.rand_mode(0); end @@ -596,7 +602,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; end if (!uvm_re_match("VF[NW]?CVT_.*", name)) begin is_convert_instr = 1'b1; - has_vs1 = 1'b0; end if (!uvm_re_match("VF?RED.*", name)) begin is_reduction_instr = 1'b1; @@ -616,6 +621,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; has_rs1 = 1'b1; has_fs1 = 1'b1; end + if (format == VS2_FORMAT) begin + has_vs1 = 1'b0; + end + if (name inside {"VCPOP_M", "VFIRST_M", "VMV_X_S"}) begin + has_rd = 1'b1; + end + if (name == "VFMV_F_S") begin + has_fd = 1'b1; + end endfunction : set_rand_mode virtual function string vec_vm_str(); From 98c6a34ef0e74d0dd83e0b5a6fdf5878a20d2948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 16 Nov 2023 10:38:54 +0000 Subject: [PATCH 28/90] Add immediate randomization for vector instr --- src/isa/riscv_vector_instr.sv | 8 ++++++-- src/isa/rv32v_instr.sv | 24 ++++++++++++------------ src/riscv_defines.svh | 8 ++++---- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index aad79731..02d347e4 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -445,7 +445,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; case (instr_name) VMV_V_V: asm_str = $sformatf("vmv.v.v %s, %s", vd.name(), vs1.name()); VMV_V_X: asm_str = $sformatf("vmv.v.x %s, %s", vd.name(), rs1.name()); - VMV_V_I: asm_str = $sformatf("vmv.v.i %s, %s", vd.name(), imm_str); + VMV_V_I: asm_str = $sformatf("vmv.v.i %s, %s", vd.name(), get_imm()); VFMV_V_F: asm_str = $sformatf("vfmv.v.f %s, %s", vd.name(), fs1.name()); VMV_X_S: asm_str = $sformatf("vmv.x.s %s, %s", rd.name(), vs2.name()); VMV_S_X: asm_str = $sformatf("vmv.s.x %s, %s", vd.name(), rs1.name()); @@ -464,7 +464,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), vs1.name())}; end WI, VI, VIM: begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), imm_str)}; + asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), get_imm())}; end WF, VF, VFM: begin if (instr_name inside {VFMADD, VFNMADD, VFMACC, VFNMACC, VFNMSUB, VFWNMSAC, @@ -632,6 +632,10 @@ class riscv_vector_instr extends riscv_floating_point_instr; end endfunction : set_rand_mode + virtual function void set_imm_len(); + imm_len = 5; + endfunction: set_imm_len + virtual function string vec_vm_str(); if (vm) begin return ""; diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index 015c51aa..3e5cacea 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -121,11 +121,11 @@ `DEFINE_VA_INSTR(VAND, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) `DEFINE_VA_INSTR(VOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) `DEFINE_VA_INSTR(VXOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSLL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VNSRL, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}) -`DEFINE_VA_INSTR(VNSRA, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}) +`DEFINE_VA_INSTR(VSLL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_VA_INSTR(VSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_VA_INSTR(VSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_VA_INSTR(VNSRL, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}, UIMM) +`DEFINE_VA_INSTR(VNSRA, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}, UIMM) `DEFINE_VA_INSTR(VMSEQ, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) `DEFINE_VA_INSTR(VMSNE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) `DEFINE_VA_INSTR(VMSLTU, VA_FORMAT, COMPARE, RVV, {VV, VX}) @@ -172,10 +172,10 @@ `DEFINE_VA_INSTR(VASUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) `DEFINE_VA_INSTR(VASUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) `DEFINE_VA_INSTR(VSMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VSSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VNCLIPU, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}) -`DEFINE_VA_INSTR(VNCLIP, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}) +`DEFINE_VA_INSTR(VSSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_VA_INSTR(VSSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_VA_INSTR(VNCLIPU, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}, UIMM) +`DEFINE_VA_INSTR(VNCLIP, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}, UIMM) // 13. Vector Floating-Point Instructions `DEFINE_VA_INSTR(VFADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) @@ -278,13 +278,13 @@ `DEFINE_VA_INSTR(VMV_S_X, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFMV_F_S, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFMV_S_F, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VSLIDEUP, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VSLIDEDOWN, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) +`DEFINE_VA_INSTR(VSLIDEUP, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}, UIMM) +`DEFINE_VA_INSTR(VSLIDEDOWN, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}, UIMM) `DEFINE_VA_INSTR(VSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VX}) `DEFINE_VA_INSTR(VSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VX}) `DEFINE_VA_INSTR(VFSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VF}) `DEFINE_VA_INSTR(VFSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VRGATHER, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_VA_INSTR(VRGATHER, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}, UIMM) `DEFINE_VA_INSTR(VRGATHEREI16, VA_FORMAT, ARITHMETIC, RVV, {VV}) `DEFINE_VA_INSTR(VCOMPRESS, VA_FORMAT, ARITHMETIC, RVV, {VM}) `DEFINE_VA_INSTR(VMV1R_V, VS2_FORMAT, ARITHMETIC, RVV) diff --git a/src/riscv_defines.svh b/src/riscv_defines.svh index 4d078e16..af87c610 100644 --- a/src/riscv_defines.svh +++ b/src/riscv_defines.svh @@ -48,7 +48,7 @@ endfunction \ endclass - `define VA_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, ext = "") \ + `define VA_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, imm_tp, ext = "") \ static bit valid = riscv_instr::register(instr_n); \ `uvm_object_utils(riscv_``instr_n``_instr) \ function new(string name = ""); \ @@ -57,7 +57,7 @@ this.format = ``instr_format; \ this.group = ``instr_group; \ this.category = ``instr_category; \ - this.imm_type = IMM; \ + this.imm_type = ``imm_tp; \ this.allowed_va_variants = ``vav; \ this.sub_extension = ``ext; \ set_imm_len(); \ @@ -96,9 +96,9 @@ `INSTR_BODY(instr_n, instr_format, instr_category, instr_group, imm_tp) // Vector arithmetic instruction -`define DEFINE_VA_INSTR(instr_n, instr_format, instr_category, instr_group, vav = {}, ext = "")\ +`define DEFINE_VA_INSTR(instr_n, instr_format, instr_category, instr_group, vav = {}, imm_tp = IMM, ext = "")\ class riscv_``instr_n``_instr extends riscv_vector_instr; \ - `VA_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, ext) + `VA_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, imm_tp, ext) // Custom extension instruction `define DEFINE_CUSTOM_INSTR(instr_n, instr_format, instr_category, instr_group, imm_tp = IMM) \ From b5a28f84b7db711cf2b08ff69d21ac05400a7aac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 16 Nov 2023 10:39:39 +0000 Subject: [PATCH 29/90] Enable vector instructions only support --- src/riscv_instr_gen_config.sv | 1 + src/riscv_vector_cfg.sv | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/riscv_instr_gen_config.sv b/src/riscv_instr_gen_config.sv index 0712821e..1be40e60 100644 --- a/src/riscv_instr_gen_config.sv +++ b/src/riscv_instr_gen_config.sv @@ -606,6 +606,7 @@ class riscv_instr_gen_config extends uvm_object; get_bool_arg_value("+set_mstatus_mprv=", set_mstatus_mprv); get_bool_arg_value("+enable_floating_point=", enable_floating_point); get_bool_arg_value("+enable_vector_extension=", enable_vector_extension); + get_bool_arg_value("+vector_instr_only=", vector_instr_only); get_bool_arg_value("+enable_b_extension=", enable_b_extension); get_bool_arg_value("+enable_zba_extension=", enable_zba_extension); get_bool_arg_value("+enable_zbb_extension=", enable_zbb_extension); diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index dbf3e70e..48319c1d 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -40,10 +40,6 @@ class riscv_vector_cfg extends uvm_object; // Allowed effective element width based on the LMUL setting int unsigned legal_eew[$]; - // Allow only vector instructions from the random sequences - rand bit only_vec_instr; - constraint only_vec_instr_c {soft only_vec_instr == 0;} - // Allow vector narrowing or widening instructions. rand bit vec_narrowing_widening; From 2f6703976b7e858d50156efcf06a58fe702bdbc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 17 Nov 2023 10:32:43 +0000 Subject: [PATCH 30/90] Prepare vector config for load and store instructions --- src/riscv_vector_cfg.sv | 53 +++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 48319c1d..c5df58ca 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -37,8 +37,9 @@ class riscv_vector_cfg extends uvm_object; bit enable_zvfh_extension = 1'b0; int unsigned min_fp_sew = 32; - // Allowed effective element width based on the LMUL setting - int unsigned legal_eew[$]; + // Legal EEW encoded in load/store instructions based + // on current SEW and LMUL setting + int unsigned legal_ls_eew[$]; // Allow vector narrowing or widening instructions. rand bit vec_narrowing_widening; @@ -46,9 +47,6 @@ class riscv_vector_cfg extends uvm_object; rand bit allow_illegal_vec_instr; constraint allow_illegal_vec_instr_c {soft allow_illegal_vec_instr == 0;} - // Enable segmented load/store extension ops - rand bit enable_zvlsseg = 1'b1; - // Enable fault only first load ops rand bit enable_fault_only_first_load; @@ -56,7 +54,7 @@ class riscv_vector_cfg extends uvm_object; solve vtype before vl; solve vl before vstart; vstart inside {[0:vl]}; - vl inside {[1:VLEN/vtype.vsew]}; + vl inside {[0:VLEN/vtype.vsew]}; } // Basic constraint for initial bringup @@ -78,10 +76,6 @@ class riscv_vector_cfg extends uvm_object; vtype.vsew <= max_int_sew; } - constraint vseg_c { - enable_zvlsseg -> (vtype.vlmul < 8); - } - `uvm_object_utils_begin(riscv_vector_cfg) `uvm_field_int(vtype.ill, UVM_DEFAULT) `uvm_field_int(vtype.vma, UVM_DEFAULT) @@ -89,7 +83,7 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(vtype.vsew, UVM_DEFAULT) `uvm_field_int(vtype.vlmul, UVM_DEFAULT) `uvm_field_int(vtype.fractional_lmul, UVM_DEFAULT) - `uvm_field_queue_int(legal_eew, UVM_DEFAULT) + `uvm_field_queue_int(legal_ls_eew, UVM_DEFAULT) `uvm_field_int(vl, UVM_DEFAULT) `uvm_field_int(vstart, UVM_DEFAULT) `uvm_field_enum(vxrm_t,vxrm, UVM_DEFAULT) @@ -101,15 +95,11 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(enable_zvfhmin_extension, UVM_DEFAULT) `uvm_field_int(enable_zvfh_extension, UVM_DEFAULT) `uvm_field_int(min_fp_sew, UVM_DEFAULT) - `uvm_field_int(enable_zvlsseg, UVM_DEFAULT) `uvm_field_int(enable_fault_only_first_load, UVM_DEFAULT) `uvm_object_utils_end function new (string name = ""); super.new(name); - if ($value$plusargs("enable_zvlsseg=%0d", enable_zvlsseg)) begin - enable_zvlsseg.rand_mode(0); - end if ($value$plusargs("enable_fault_only_first_load=%0d", enable_fault_only_first_load)) begin enable_fault_only_first_load.rand_mode(0); end @@ -155,23 +145,28 @@ class riscv_vector_cfg extends uvm_object; endfunction : new function void post_randomize(); - real temp_eew; - legal_eew = {}; - // Section 7.3 Vector loads and stores have the EEW encoded directly in the instruction. - // EMUL is calculated as EMUL =(EEW/SEW)*LMUL. If the EMUL would be out of range - // (EMUL>8 or EMUL<1/8), an illegal instruction exceptionis raised. - // EEW = SEW * EMUL / LMUL - for (real emul = 0.125; emul <= 8; emul = emul * 2) begin - if (vtype.fractional_lmul == 0) begin - temp_eew = real'(vtype.vsew) * emul / real'(vtype.vlmul); + set_legal_ls_eew(); + endfunction : post_randomize + + // Section 7.3: Vector Load/Store Width Encoding + // Vector loads and stores have an EEW encoded directly in the instruction. The + // corresponding EMUL is calculated as EMUL = (EEW/SEW)*LMUL. If the EMUL would + // be out of range (EMUL>8 or EMUL<1/8), the instruction encoding is reserved. + function void set_legal_ls_eew(); + real eew; + legal_ls_eew = {}; + for (real emul = 1.0 / real'(max_int_sew/8); emul <= 8.0; emul = emul * 2) begin + // Calculate EEW + if (vtype.fractional_lmul) begin + eew = real'(vtype.vsew) * emul * real'(vtype.vlmul); end else begin - temp_eew = real'(vtype.vsew) * emul * real'(vtype.vlmul); + eew = real'(vtype.vsew) * emul / real'(vtype.vlmul); end - if (temp_eew inside {[8:1024]}) begin - legal_eew.push_back(int'(temp_eew)); + // Check EEW and append iff legal + if (eew inside {[8:max_int_sew]}) begin + legal_ls_eew.push_back(int'(eew)); end - `uvm_info(`gfn, $sformatf("Checking emul: %.2f", emul), UVM_LOW) end - endfunction : post_randomize + endfunction endclass : riscv_vector_cfg From cc6912fc85166d955bdb33119864506812ee2cd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 17 Nov 2023 10:42:24 +0000 Subject: [PATCH 31/90] Add support for all vector load and store instructions --- src/isa/riscv_vector_instr.sv | 379 ++++++++++++++++++++-------------- src/isa/rv32v_instr.sv | 102 +++------ src/riscv_instr_pkg.sv | 98 ++------- 3 files changed, 272 insertions(+), 307 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 02d347e4..5d129718 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -16,7 +16,7 @@ */ -// Base class for RISC-V vector exenstion ISA, implmented based on spec v0.8 +// Base class for RISC-V vector extension ISA, implementation based on spec v1.0 class riscv_vector_instr extends riscv_floating_point_instr; rand riscv_vreg_t vs1; @@ -25,11 +25,12 @@ class riscv_vector_instr extends riscv_floating_point_instr; rand riscv_vreg_t vd; rand va_variant_t va_variant; rand bit vm; - rand bit [10:0] eew; + rand int ls_eew; + rand int nfields; bit has_vd = 1'b1; bit has_vs1 = 1'b1; bit has_vs2 = 1'b1; - bit has_vs3 = 1'b1; + bit has_vs3 = 1'b0; bit has_va_variant = 1'b0; bit is_widening_instr = 1'b0; bit is_narrowing_instr = 1'b0; @@ -37,11 +38,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit is_reduction_instr = 1'b0; bit is_mask_producing_instr = 1'b0; bit is_fp_instr = 1'b0; + bit is_segmented_ls_instr = 1'b0; + bit is_whole_register_ls_instr = 1'b0; int ext_widening_factor = 1; va_variant_t allowed_va_variants[$]; + rand int ls_emul_non_frac; string sub_extension; - rand bit [2:0] nfields; // Used by segmented load/store - rand bit [3:0] emul; + + `uvm_object_utils(riscv_vector_instr) + `uvm_object_new constraint avoid_reserved_vregs_c { if (m_cfg.vector_cfg.reserved_vregs.size() > 0) { @@ -60,7 +65,8 @@ class riscv_vector_instr extends riscv_floating_point_instr; // illegal instruction exception. constraint vector_operand_group_c { if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vlmul > 0 && - !(instr_name inside {VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F})) { + !(instr_name inside {VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F}) && + !(category inside {LOAD, STORE})) { vd % m_cfg.vector_cfg.vtype.vlmul == 0; vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; vs2 % m_cfg.vector_cfg.vtype.vlmul == 0; @@ -120,7 +126,8 @@ class riscv_vector_instr extends riscv_floating_point_instr; // instruction encodings are reserved. constraint vector_mask_v0_overlap_c { if (!vm) { - !(group == COMPARE || is_mask_producing_instr || is_reduction_instr) -> (vd != 0); + !(category == COMPARE || is_mask_producing_instr || is_reduction_instr) -> (vd != 0); + category == STORE -> vs3 != 0; } } @@ -150,6 +157,104 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (instr_name inside {[VMAND_MM : VMXNOR_MM]}) { vm == 1'b1; } + if (is_whole_register_ls_instr) { + vm == 1'b1; + } + if (instr_name inside {VLM_V, VSM_V}) { + vm == 1'b1; + } + } + + // Oder to solve load and store constraints in + constraint load_store_solve_order_c { + solve ls_eew before ls_emul_non_frac; + solve ls_emul_non_frac before vd; + solve ls_emul_non_frac before vs2; + solve ls_emul_non_frac before vs3; + solve ls_emul_non_frac before nfields; + } + + // Section 7.3: Vector Load/Store Width Encoding + // Vector loads and stores have an EEW encoded directly in the instruction + constraint load_store_eew_c { + ls_eew inside {m_cfg.vector_cfg.legal_ls_eew}; + } + + // Section 7.3: Vector Load/Store Width Encoding + // The corresponding EMUL is calculated as EMUL = (EEW/SEW)*LMUL. If the + // EMUL would be out of range (EMUL>8 or EMUL<1/8), the instruction encoding + // is reserved. + constraint load_store_emul_c { + ls_emul_non_frac == emul_non_frac(ls_eew); + } + + // Section 7.3: Vector Load/Store Width Encoding + // The vector register groups must have legal register specifiers for the + // selected EMUL, otherwise the instruction encoding is reserved + constraint load_store_register_alignment_c { + if (category inside {LOAD, STORE}) { + vs2 % ls_emul_non_frac == 0; + if (format inside {VLX_FORMAT, VSX_FORMAT} && !m_cfg.vector_cfg.vtype.fractional_lmul) { + vd % m_cfg.vector_cfg.vtype.vlmul == 0; + vs3 % m_cfg.vector_cfg.vtype.vlmul == 0; + } else { + vd % ls_emul_non_frac == 0; + vs3 % ls_emul_non_frac == 0; + } + } + } + + // Section 7.8.3: Vector Indexed Segment Loads and Stores + // For vector indexed segment loads, the destination vector register groups cannot + // overlap the source vector register group (specified by vs2), else the instruction + // encoding is reserved + constraint load_store_group_overlap_c { + if (format == VLX_FORMAT) { + if (is_segmented_ls_instr) { + // No overlap at all for segmented loads + !(vd inside {[vs2 : vs2 + ls_emul_non_frac - 1]}); + vd < vs2 -> vd + nfields * emul_non_frac(m_cfg.vector_cfg.vtype.vsew) - 1 < vs2; + } else { + // Partial overlap allowed + if (ls_eew < m_cfg.vector_cfg.vtype.vsew && !m_cfg.vector_cfg.vtype.fractional_lmul && + (m_cfg.vector_cfg.vtype.vlmul * ls_eew / m_cfg.vector_cfg.vtype.vsew >= 1)) { + // If src_eew < dst_eew and src_emul is not fractional, overlap in highest part of dst + !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - ls_emul_non_frac - 1]}); + } else if (ls_eew < m_cfg.vector_cfg.vtype.vsew) { + // If src_eew < dst_eew and src_emul is fractional, no overlap allowed + !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } else if (ls_eew > m_cfg.vector_cfg.vtype.vsew && !m_cfg.vector_cfg.vtype.fractional_lmul) { + // If src_eew > dst_eew, overlap in lowest part of src + !(vd inside {[vs2 + ls_emul_non_frac - m_cfg.vector_cfg.vtype.vlmul : vs2 + ls_emul_non_frac - 1]}); + } + } + } + } + + // Section 7.8: Vector Load/Store Segment Instructions + // The EMUL setting must be such that EMUL * NFIELDS โ‰ค 8, otherwise the + // instruction encoding is reserved. + // If the vector register numbers accessed by the segment load or store would + // increment past 31, then the instruction encoding is reserved. + constraint load_store_nfields_c { + if (is_segmented_ls_instr) { + nfields inside {[2 : 8]}; + if (format inside {VLX_FORMAT, VSX_FORMAT}) { + nfields * (m_cfg.vector_cfg.vtype.fractional_lmul ? 1 : m_cfg.vector_cfg.vtype.vlmul) <= 8; + vd + nfields * (m_cfg.vector_cfg.vtype.fractional_lmul ? 1 : m_cfg.vector_cfg.vtype.vlmul) <= 32; + vs3 + nfields * (m_cfg.vector_cfg.vtype.fractional_lmul ? 1 : m_cfg.vector_cfg.vtype.vlmul) <= 32; + } else { + nfields * ls_emul_non_frac <= 8; + nfields * ls_emul_non_frac + vd <= 32; + nfields * ls_emul_non_frac + vs3 <= 32; + } + } + // Whole register l/s + if (is_whole_register_ls_instr) { + nfields inside {1, 2, 4, 8}; + vd % nfields == 0; + vs3 % nfields == 0; + } } // Section 11.3: Vector Integer Extension @@ -259,74 +364,11 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // Section 7.8. Vector Load/Store Segment Instructions - // The LMUL setting must be such that LMUL * NFIELDS <= 8 - // Vector register numbers accessed by the segment load or store would increment - // cannot past 31 - constraint nfields_c { - if (check_sub_extension(sub_extension, "zvlsseg")) { - if (m_cfg.vector_cfg.vtype.vlmul < 8) { - (nfields + 1) * m_cfg.vector_cfg.vtype.vlmul <= 8; - if (category == LOAD) { - vd + nfields <= 31; - } - if (category == STORE) { - vs3 + nfields <= 31; - } - // TODO: Check gcc compile issue with nfields == 0 - nfields > 0; - } else { - nfields == 0; - } - } - } - // Do not use float variants if FP is disabled constraint disable_fp_variant_c { !m_cfg.vector_cfg.enable_fp_support -> !(va_variant inside {VF, WF, VFM}); } - constraint vector_load_store_mask_overlap_c { - // TODO: Check why this is needed? - if (category == STORE) { - (vm == 0) -> (vs3 != 0); - vs2 != vs3; - } - // 7.8.3 For vector indexed segment loads, the destination vector register groups - // cannot overlap the source vectorregister group (specied by vs2), nor can they - // overlap the mask register if masked - // AMO instruction uses indexed address mode - if (format inside {VLX_FORMAT, VAMO_FORMAT}) { - vd != vs2; - } - } - - // load/store EEW/EMUL and corresponding register grouping constraints - constraint load_store_solve_order_c { - solve eew before emul; - solve emul before vd; - solve emul before vs1; - solve emul before vs2; - solve emul before vs3; - } - - constraint load_store_eew_emul_c { - if (category inside {LOAD, STORE, AMO}) { - eew inside {m_cfg.vector_cfg.legal_eew}; - if (eew > m_cfg.vector_cfg.vtype.vsew) { - emul == eew / m_cfg.vector_cfg.vtype.vsew; - } else { - emul == 1; - } - if (emul > 1) { - vd % emul == 0; - vs1 % emul == 0; - vs2 % emul == 0; - vs3 % emul == 0; - } - } - } - // Filter unsupported instructions based on configuration virtual function bit is_supported(riscv_instr_gen_config cfg); string name = instr_name.name(); @@ -409,21 +451,43 @@ class riscv_vector_instr extends riscv_floating_point_instr; end end end + // Check load and stores + if (category inside {LOAD, STORE}) begin + // Requires a legal EEW + if (cfg.vector_cfg.legal_ls_eew.size() == 0 && !is_whole_register_ls_instr && + !(instr_name inside {VLM_V, VSM_V})) begin + return 0; + end + // Segmented l/s need at least two segments + if (is_segmented_ls_instr) begin + if (format inside {VLX_FORMAT, VSX_FORMAT}) begin + if (!cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul == 8) begin + return 0; + end + end else begin + if (int'(real'(cfg.vector_cfg.legal_ls_eew.max().pop_front()) / real'(cfg.vector_cfg.vtype.vsew) * + (cfg.vector_cfg.vtype.fractional_lmul ? 1.0 / real'(cfg.vector_cfg.vtype.vlmul) : + real'(cfg.vector_cfg.vtype.vlmul))) == 8) begin + return 0; + end + end + end + end return 1'b1; endfunction virtual function string get_instr_name(); string name = super.get_instr_name(); if (category inside {LOAD, STORE}) begin + name = add_nfields(name); // Add eew before ".v" or "ff.v" suffix if (instr_name inside {VLEFF_V, VLSEGEFF_V}) begin name = name.substr(0, name.len() - 5); - name = $sformatf("%0s%0dFF.V", name, eew); - end else begin + name = $sformatf("%0s%0dFF.V", name, ls_eew); + end else if (!(instr_name inside {VLM_V, VSM_V, VSR_V})) begin name = name.substr(0, name.len() - 3); - name = $sformatf("%0s%0d.V", name, eew); + name = $sformatf("%0s%0d.V", name, ls_eew); end - `uvm_info(`gfn, $sformatf("%0s -> %0s", super.get_instr_name(), name), UVM_LOW) end return name; endfunction @@ -487,66 +551,21 @@ class riscv_vector_instr extends riscv_floating_point_instr; end endcase end - VL_FORMAT: begin - if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %s, (%s)", add_nfields(get_instr_name(), "vlseg"), - vd.name(), rs1.name()); - end else begin - asm_str = $sformatf("%0s %s, (%s)", get_instr_name(), vd.name(), rs1.name()); - end - end - VS_FORMAT: begin - if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %s, (%s)", add_nfields(get_instr_name(), "vsseg"), - vs3.name(), rs1.name()); - end else begin - asm_str = $sformatf("%0s %s, (%s)", get_instr_name(), vs3.name(), rs1.name()); - end - end - VLS_FORMAT: begin - if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vlsseg"), - vd.name(), rs1.name(), rs2.name()); - end else begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), - vd.name(), rs1.name(), rs2.name()); - end + VL_FORMAT, + VS_FORMAT, + VLR_FORMAT, + VSR_FORMAT: begin + asm_str = $sformatf("%0s %s, (%s)", get_instr_name(), category == LOAD ? vd.name() : vs3.name(), rs1.name()); end + VLS_FORMAT, VSS_FORMAT: begin - if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vssseg"), - vs3.name(), rs1.name(), rs2.name()); - end else begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), - vs3.name(), rs1.name(), rs2.name()); - end - end - VLX_FORMAT: begin - if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vlxseg"), - vd.name(), rs1.name(), vs2.name()); - end else begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), - vd.name(), rs1.name(), vs2.name()); - end + asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), category == LOAD ? vd.name() : vs3.name(), + rs1.name(), rs2.name()); end + VLX_FORMAT, VSX_FORMAT: begin - if (sub_extension == "zvlsseg") begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", add_nfields(get_instr_name(), "vsxseg"), - vs3.name(), rs1.name(), vs2.name()); - end else begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), - vs3.name(), rs1.name(), vs2.name()); - end - end - VAMO_FORMAT: begin - if (wd) begin - asm_str = $sformatf("%0s %0s,(%0s),%0s,%0s", get_instr_name(), vd.name(), - rs1.name(), vs2.name(), vd.name()); - end else begin - asm_str = $sformatf("%0s x0,(%0s),%0s,%0s", get_instr_name(), - rs1.name(), vs2.name(), vs3.name()); - end + asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), category == LOAD ? vd.name() : vs3.name(), + rs1.name(), vs2.name()); end default: begin `uvm_fatal(`gfn, $sformatf("Unsupported format %0s", format.name())) @@ -567,20 +586,11 @@ class riscv_vector_instr extends riscv_floating_point_instr; vs3.rand_mode(has_vs3); vd.rand_mode(has_vd); va_variant.rand_mode(has_va_variant); - if (!(category inside {LOAD, STORE, AMO})) begin - load_store_solve_order_c.constraint_mode(0); - end - // $info("Randomizing for %0s, vd: %0d, vs2: %0d, vs1: %0d", instr_name, vd, vs2, vs2); endfunction : pre_randomize - function void post_randomize(); - super.post_randomize(); - // $info("Randomized for %0s, vd: %0d, vs2: %0d, vs1: %0d", instr_name, vd, vs2, vs2); - endfunction : post_randomize - virtual function void set_rand_mode(); string name = instr_name.name(); - has_rs1 = 1'b1; + has_rs1 = 1'b0; has_rs2 = 1'b0; has_rd = 1'b0; has_fs1 = 1'b0; @@ -588,9 +598,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; has_fs3 = 1'b0; has_fd = 1'b0; has_imm = 1'b0; - if (sub_extension != "zvlsseg") begin - nfields.rand_mode(0); - end if ((name.substr(0, 1) == "VW") || (name.substr(0, 2) == "VFW")) begin is_widening_instr = 1'b1; end @@ -612,6 +619,12 @@ class riscv_vector_instr extends riscv_floating_point_instr; if ((name.substr(0, 1) == "VF" && name != VFIRST_M) || (name.substr(0, 2) == "VMF")) begin is_fp_instr = 1'b1; end + if (!uvm_re_match("V[LS].*SEGE.*_V", name)) begin + is_segmented_ls_instr = 1'b1; + end + if (name inside {"VLRE_V", "VSR_V"}) begin + is_whole_register_ls_instr = 1'b1; + end if (allowed_va_variants.size() > 0) begin has_va_variant = 1'b1; end @@ -624,6 +637,18 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (format == VS2_FORMAT) begin has_vs1 = 1'b0; end + if (category inside {LOAD, STORE}) begin + has_vs1 = 1'b0; + has_vs2 = 1'b0; + has_vs3 = category == STORE; + has_rs1 = 1'b1; + end + if (format inside {VLS_FORMAT, VSS_FORMAT}) begin + has_rs2 = 1'b1; + end + if (format inside {VLX_FORMAT, VSX_FORMAT}) begin + has_vs2 = 1'b1; + end if (name inside {"VCPOP_M", "VFIRST_M", "VMV_X_S"}) begin has_rd = 1'b1; end @@ -649,18 +674,68 @@ class riscv_vector_instr extends riscv_floating_point_instr; end endfunction - function string add_nfields(string instr_name, string prefix); - string suffix = instr_name.substr(prefix.len(), instr_name.len() - 1); - return $sformatf("%0s%0d%0s", prefix, nfields + 1, suffix); + // Add nfields to the name of segmented l/s instructions + function string add_nfields(string instr_name); + string name; + string prefix; + string suffix; + if (is_segmented_ls_instr || is_whole_register_ls_instr) begin + case (format) + VL_FORMAT, + VS_FORMAT: prefix = instr_name.substr(0, 4); + VLS_FORMAT, + VSS_FORMAT: prefix = instr_name.substr(0, 5); + VLX_FORMAT, + VSX_FORMAT: prefix = instr_name.substr(0, 6); + VLR_FORMAT, + VSR_FORMAT: prefix = instr_name.substr(0, 1); + default: ; + endcase + suffix = instr_name.substr(prefix.len(), instr_name.len() - 1); + name = $sformatf("%0s%0d%0s", prefix, nfields, suffix); + end else begin + name = instr_name; + end + return name; endfunction - function string add_eew(string instr_name, string prefix); - string suffix = instr_name.substr(prefix.len(), instr_name.len() - 1); - return $sformatf("%0s%0d%0s", prefix, eew, suffix); + // Effective multiplier used by load and store instructions + // If emul is fractional -> EMUL=1 + // For mask l/s -> EMUL=1 + function int emul_non_frac(int eew); + real emul = real'(eew) / real'(m_cfg.vector_cfg.vtype.vsew) * + (m_cfg.vector_cfg.vtype.fractional_lmul ? 1.0 / real'(m_cfg.vector_cfg.vtype.vlmul) : + real'(m_cfg.vector_cfg.vtype.vlmul)); + return emul <= 1.0 || instr_name inside {VLM_V, VSM_V} ? 1 : int'(emul); endfunction - function bit check_sub_extension(string s, string literal); - return s == literal; - endfunction + virtual function void do_copy(uvm_object rhs); + riscv_vector_instr rhs_; + super.copy(rhs); + assert($cast(rhs_, rhs)); + this.vs1 = rhs_.vs1; + this.vs2 = rhs_.vs2; + this.vs3 = rhs_.vs3; + this.vd = rhs_.vd; + this.va_variant = rhs_.va_variant; + this.vm = rhs_.vm; + this.ls_eew = rhs_.ls_eew; + this.nfields = rhs_.nfields; + this.has_vs1 = rhs_.has_vs1; + this.has_vs2 = rhs_.has_vs2; + this.has_vs3 = rhs_.has_vs3; + this.has_vd = rhs_.has_vd; + this.has_va_variant = rhs_.has_va_variant; + this.is_widening_instr = rhs_.is_widening_instr; + this.is_narrowing_instr = rhs_.is_narrowing_instr; + this.is_convert_instr = rhs_.is_convert_instr; + this.is_reduction_instr = rhs_.is_reduction_instr; + this.is_mask_producing_instr = rhs_.is_mask_producing_instr; + this.is_fp_instr = rhs_.is_fp_instr; + this.is_segmented_ls_instr = rhs_.is_segmented_ls_instr; + this.is_whole_register_ls_instr = rhs_.is_whole_register_ls_instr; + this.ext_widening_factor = rhs_.ext_widening_factor; + this.allowed_va_variants = rhs_.allowed_va_variants; + endfunction : do_copy endclass : riscv_vector_instr diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index 3e5cacea..55217c95 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -17,88 +17,32 @@ // 6. Configuration-Setting Instructions `DEFINE_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) -`DEFINE_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV) +`DEFINE_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV, UIMM) `DEFINE_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) // 7. Vector Loads and Stores -`DEFINE_VA_INSTR(VLE8_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLE16_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLE32_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLE64_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSE8_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSE16_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSE32_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSE64_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLM_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSM_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLSE8_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSE16_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSE32_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSE64_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSE8_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSE16_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSE32_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSE64_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLUXEI8_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLUXEI16_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLUXEI32_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLUXEI64_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXEI8_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXEI16_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXEI32_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXEI64_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSUXEI8_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXEI16_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXEI32_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXEI64_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXEI8_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXEI16_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXEI32_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXEI64_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLE8FF_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLE16FF_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLE32FF_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLE64FF_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSEGE8_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSEGE16_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSEGE32_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSEGE64_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSEGE8_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSEGE16_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSEGE32_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSEGE64_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLSSEGE8_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSSEGE16_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSSEGE32_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSSEGE64_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSSEGE8_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSSEGE16_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSSEGE32_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSSSEGE64_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLUXSEGEI8_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLUXSEGEI16_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLUXSEGEI32_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLUXSEGEI64_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXSEGEI8_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXSEGEI16_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXSEGEI32_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXSEGEI64_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSUXSEGEI8_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXSEGEI16_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXSEGEI32_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSUXSEGEI64_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXSEGEI8_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXSEGEI16_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXSEGEI32_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXSEGEI64_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLRE8_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLRE16_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLRE32_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLRE64_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSR8_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSR16_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSR32_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSR64_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLE_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSE_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLM_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSM_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLSE_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSSE_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLUXEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSUXEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLEFF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSEGE_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSSEGE_V, VS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLSEGEFF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLSSEGE_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSSSEGE_V, VSS_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLUXSEGEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VLOXSEGEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSUXSEGEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VSOXSEGEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_VA_INSTR(VLRE_V, VLR_FORMAT, LOAD, RVV) +`DEFINE_VA_INSTR(VSR_V, VSR_FORMAT, STORE, RVV) // 11. Vector Integer Arithmetic Instructions `DEFINE_VA_INSTR(VADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 87569c45..2da870b9 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -447,84 +447,28 @@ package riscv_instr_pkg; VSETVLI, VSETIVLI, VSETVL, - VLE8_V, - VLE16_V, - VLE32_V, - VLE64_V, - VSE8_V, - VSE16_V, - VSE32_V, - VSE64_V, + VLE_V, + VSE_V, VLM_V, VSM_V, - VLSE8_V, - VLSE16_V, - VLSE32_V, - VLSE64_V, - VSSE8_V, - VSSE16_V, - VSSE32_V, - VSSE64_V, - VLUXEI8_V, - VLUXEI16_V, - VLUXEI32_V, - VLUXEI64_V, - VLOXEI8_V, - VLOXEI16_V, - VLOXEI32_V, - VLOXEI64_V, - VSUXEI8_V, - VSUXEI16_V, - VSUXEI32_V, - VSUXEI64_V, - VSOXEI8_V, - VSOXEI16_V, - VSOXEI32_V, - VSOXEI64_V, - VLE8FF_V, - VLE16FF_V, - VLE32FF_V, - VLE64FF_V, - VLSEGE8_V, - VLSEGE16_V, - VLSEGE32_V, - VLSEGE64_V, - VSSEGE8_V, - VSSEGE16_V, - VSSEGE32_V, - VSSEGE64_V, - VLSSEGE8_V, - VLSSEGE16_V, - VLSSEGE32_V, - VLSSEGE64_V, - VSSSEGE8_V, - VSSSEGE16_V, - VSSSEGE32_V, - VSSSEGE64_V, - VLUXSEGEI8_V, - VLUXSEGEI16_V, - VLUXSEGEI32_V, - VLUXSEGEI64_V, - VLOXSEGEI8_V, - VLOXSEGEI16_V, - VLOXSEGEI32_V, - VLOXSEGEI64_V, - VSUXSEGEI8_V, - VSUXSEGEI16_V, - VSUXSEGEI32_V, - VSUXSEGEI64_V, - VSOXSEGEI8_V, - VSOXSEGEI16_V, - VSOXSEGEI32_V, - VSOXSEGEI64_V, - VLRE8_V, - VLRE16_V, - VLRE32_V, - VLRE64_V, - VSR8_V, - VSR16_V, - VSR32_V, - VSR64_V, + VLSE_V, + VSSE_V, + VLUXEI_V, + VLOXEI_V, + VSUXEI_V, + VSOXEI_V, + VLEFF_V, + VLSEGE_V, + VSSEGE_V, + VLSSEGE_V, + VSSSEGE_V, + VLSEGEFF_V, + VLUXSEGEI_V, + VLOXSEGEI_V, + VSUXSEGEI_V, + VSOXSEGEI_V, + VLRE_V, + VSR_V, VADD, VSUB, VRSUB, @@ -779,6 +723,8 @@ package riscv_instr_pkg; VSX_FORMAT, VLS_FORMAT, VSS_FORMAT, + VLR_FORMAT, + VSR_FORMAT, VAMO_FORMAT } riscv_instr_format_t; From 43a2a668b1af9ce6506a11566000bc4d50caf581 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 21 Nov 2023 14:36:44 +0000 Subject: [PATCH 32/90] Add vset{i}vl{i} instr to list of working vector instr --- src/isa/riscv_vector_instr.sv | 35 ++++++++++++++++++++++++++++++++++- src/isa/rv32v_instr.sv | 6 +++--- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 5d129718..23b9d0bd 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -163,6 +163,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (instr_name inside {VLM_V, VSM_V}) { vm == 1'b1; } + if (format == VSET_FORMAT) { + vm == 1'b1; + } } // Oder to solve load and store constraints in @@ -473,6 +476,13 @@ class riscv_vector_instr extends riscv_floating_point_instr; end end end + // Check vector configuration-setting + if (instr_name == VSETIVLI) begin + // Immediate vsetivli can only be used if VL fits into space of immediate value + if (cfg.vector_cfg.vl >= 2**5) begin + return 0; + end + end return 1'b1; endfunction @@ -496,6 +506,21 @@ class riscv_vector_instr extends riscv_floating_point_instr; virtual function string convert2asm(string prefix = ""); string asm_str; case (format) + VSET_FORMAT: begin + if (instr_name == VSETVL) begin + asm_str = $sformatf("%0s %0s, %0s, %0s", get_instr_name(), rd.name(), rs1.name(), rs2.name()); + end else begin + asm_str = $sformatf("%0s %0s, %0s, e%0d, m%0s%0d, t%0s, m%0s", + get_instr_name(), + rd.name(), + instr_name == VSETIVLI ? get_imm() : rs1.name(), + m_cfg.vector_cfg.vtype.vsew, + m_cfg.vector_cfg.vtype.fractional_lmul ? "f" : "", + m_cfg.vector_cfg.vtype.vlmul, + m_cfg.vector_cfg.vtype.vta ? "a" : "u", + m_cfg.vector_cfg.vtype.vma ? "a" : "u"); + end + end VS2_FORMAT: begin if (instr_name == VID_V) begin asm_str = $sformatf("vid.v %s", vd.name()); @@ -628,7 +653,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (allowed_va_variants.size() > 0) begin has_va_variant = 1'b1; end - // Set the rand mode based on the superset of all VA variants + if (format == VSET_FORMAT) begin + has_vs1 = 1'b0; + has_vs2 = 1'b0; + has_vd = 1'b0; + has_rs1 = name != "VSETIVLI"; + has_rs2 = name == "VSETVL"; + has_rd = 1'b1; + has_imm = name == "VSETIVLI"; + end if (format == VA_FORMAT) begin has_imm = 1'b1; has_rs1 = 1'b1; diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index 55217c95..53feea3b 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -16,9 +16,9 @@ */ // 6. Configuration-Setting Instructions -`DEFINE_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) -`DEFINE_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV, UIMM) -`DEFINE_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) +`DEFINE_VA_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) +`DEFINE_VA_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV, {}, UIMM) +`DEFINE_VA_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) // 7. Vector Loads and Stores `DEFINE_VA_INSTR(VLE_V, VL_FORMAT, LOAD, RVV) From ac66f93f0211acfb062503b506c1952c17e0789f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 21 Nov 2023 14:39:03 +0000 Subject: [PATCH 33/90] Adapt directed vector load/store test for RVV1.0 --- src/riscv_instr_gen_config.sv | 1 + src/riscv_instr_stream.sv | 103 +++++++++++++-- src/riscv_load_store_instr_lib.sv | 212 +++++++++++++++--------------- src/riscv_vector_cfg.sv | 26 ++++ 4 files changed, 227 insertions(+), 115 deletions(-) diff --git a/src/riscv_instr_gen_config.sv b/src/riscv_instr_gen_config.sv index 1be40e60..5c6a0d49 100644 --- a/src/riscv_instr_gen_config.sv +++ b/src/riscv_instr_gen_config.sv @@ -542,6 +542,7 @@ class riscv_instr_gen_config extends uvm_object; `uvm_field_int(enable_zbc_extension, UVM_DEFAULT) `uvm_field_int(enable_zbs_extension, UVM_DEFAULT) `uvm_field_int(use_push_data_section, UVM_DEFAULT) + `uvm_field_object(vector_cfg, UVM_DEFAULT) `uvm_object_utils_end function new (string name = ""); diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 6b8bc843..20cc0e89 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -297,18 +297,101 @@ class riscv_rand_instr_stream extends riscv_instr_stream; return li_instr; endfunction - function void add_init_vector_gpr_instr(riscv_vreg_t gpr, bit [XLEN-1:0] val); - riscv_vector_instr instr; - $cast(instr, riscv_instr::get_instr(VMV_V_V)); - instr.m_cfg = cfg; - instr.avoid_reserved_vregs_c.constraint_mode(0); - `DV_CHECK_RANDOMIZE_WITH_FATAL(instr, - va_variant == VX; - vd == gpr; + // Initialize a v-register with pre-defined values + // Instructions will be inserted at defined index (-1: random, 0: front, instr_list.size(): back) + function void add_init_vector_gpr(riscv_vreg_t vreg, int unsigned values [], int sew, int idx = instr_list.size()); + riscv_instr init_instr_list [$]; + riscv_vector_instr vinstr; + riscv_instr_gen_config init_cfg; + + // Clone current configuration + init_cfg = new(); + init_cfg.copy(cfg); + + // Set vtype to new vsew and vl to VLMAX + init_cfg.vector_cfg.update_vsew_keep_vl(sew); + $cast(vinstr, riscv_instr::get_instr(VSETVLI)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + rs1 == 0; + rd == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + + // Initialize v-register + if (values.size() == 1) begin + // Load initialize value to x-register + init_instr_list.push_back(get_init_gpr_instr(init_cfg.gpr[0], values[0])); + + // Splatter value to v-register + $cast(vinstr, riscv_instr::get_instr(VMV_V_X)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + vd == vreg; + rs1 == cfg.gpr[0]; + vm == 1'b1; + ) + init_instr_list.push_back(vinstr); + end else begin + int i; + // Load all defined values into v-register + for (i = 0; i < values.size() && i < init_cfg.vector_cfg.vlmax(); i++) begin + // Load initialize value to x-register + init_instr_list.push_back(get_init_gpr_instr(init_cfg.gpr[0], values[i])); + + // Slide down value in v-register + $cast(vinstr, riscv_instr::get_instr(VSLIDE1DOWN)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + vm == 1'b1; + ) + init_instr_list.push_back(vinstr); + end + // Rotate v-register to get init values to correct place + for (; i < init_cfg.vector_cfg.vlmax(); i++) begin + // Get first value of v-register + $cast(vinstr, riscv_instr::get_instr(VMV_X_S)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + vs2 == vreg; + rd == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + + // Slide down value in v-register + $cast(vinstr, riscv_instr::get_instr(VSLIDE1DOWN)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + vm == 1'b1; + ) + init_instr_list.push_back(vinstr); + end + end + + // Reset vtype + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], cfg.vector_cfg.vl)); + $cast(vinstr, riscv_instr::get_instr(VSETVLI)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, rs1 == cfg.gpr[0]; + vd == 0; ) - instr_list.push_front(instr); - instr_list.push_front(get_init_gpr_instr(cfg.gpr[0], val)); + init_instr_list.push_back(vinstr); + + // Add instructions to instruction stream + insert_instr_stream(init_instr_list, idx); endfunction endclass diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index cae08731..d96b8ec5 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -521,162 +521,164 @@ endclass class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; + // Types of vector load/store address modes typedef enum {UNIT_STRIDED, STRIDED, INDEXED} address_mode_e; - rand bit [10:0] eew; - rand int unsigned data_page_id; - rand int unsigned num_mixed_instr; - rand int unsigned stride_byte_offset; - rand int unsigned index_addr; + rand int unsigned data_eew; + rand int unsigned index_eew; + rand int unsigned data_page_id; + rand int unsigned data_page_base_offset; + rand int unsigned num_mixed_instr; + rand int byte_stride; + rand int unsigned indexed_byte_offset []; rand address_mode_e address_mode; - rand riscv_reg_t rs1_reg; // Base address - rand riscv_reg_t rs2_reg; // Stride offset - riscv_vreg_t vs2_reg; // Index address + // Base address + rand riscv_reg_t rs1_reg; + // Stride + rand riscv_reg_t rs2_reg; + // Indexes - randomized by instructions + riscv_vreg_t vs2_reg; + // Generated load/store instruction + riscv_vector_instr load_store_instr; + + constraint solve_order_c { + solve data_page_id before data_page_base_offset; + solve address_mode before data_eew; + solve address_mode before index_eew; + solve data_eew before data_page_base_offset; + solve index_eew before data_page_base_offset; + solve data_page_base_offset before byte_stride; + solve data_page_base_offset before indexed_byte_offset; + solve index_eew before indexed_byte_offset; + } - constraint vec_mixed_instr_c { - num_mixed_instr inside {[0:10]}; + // Choose from available data pages + constraint data_page_id_c { + data_page_id < max_data_page_id; + } + + // Find base address inside data page + constraint data_page_base_offset_c { + // Base address needs to be inside page + data_page_base_offset <= data_page[data_page_id].size_in_bytes - 1; + // Base address has to be aligned to data width + data_page_base_offset % (data_eew / 8) == 0; } + // Choose legal EEW for current config constraint eew_c { - eew inside {cfg.vector_cfg.legal_eew}; + if (address_mode != INDEXED) { + data_eew inside {cfg.vector_cfg.legal_ls_eew}; + } else { + data_eew == cfg.vector_cfg.vtype.vsew; + index_eew inside {cfg.vector_cfg.legal_ls_eew}; + } } - constraint stride_byte_offset_c { - solve eew before stride_byte_offset; - // Keep a reasonable byte offset range to avoid vector memory address overflow - stride_byte_offset inside {[1 : 128]}; - stride_byte_offset % (eew / 8) == 1; + // How many non-l/s instructions should be interleaved + constraint vec_mixed_instr_c { + num_mixed_instr inside {[0:10]}; } - constraint index_addr_c { - solve eew before index_addr; - // Keep a reasonable index address range to avoid vector memory address overflow - index_addr inside {[0 : 128]}; - index_addr % (eew / 8) == 1; + // Choose a legal byte stride for strided l/s + constraint byte_stride_c { + // Negative strides are allowed + byte_stride * (data_eew / 8) * cfg.vector_cfg.vl inside {[-data_page_base_offset : + data_page[data_page_id].size_in_bytes - data_page_base_offset]}; + // Addresses have to be data width aligned + byte_stride % (data_eew / 8) == 0; } - constraint vec_rs_c { + // Choose legal index byte offsets for every element in vector + constraint index_byte_offset_c { + // We need a byte offset for every element in the vector + indexed_byte_offset.size() == cfg.vector_cfg.vl; + foreach (indexed_byte_offset[i]) { + // Only positive index byte offsets are allowed + // +8 since nfields <= 8 + (indexed_byte_offset[i] + 8) * (data_eew / 8) <= data_page[data_page_id].size_in_bytes - data_page_base_offset; + // Index has to be data width aligned + indexed_byte_offset[i] % (data_eew / 8) == 0; + // Index has to fit into index EEW size + indexed_byte_offset[i] <= 2**index_eew - 1; + } + } + + // Do not use reserved xregs for base address and stride + constraint xreg_source_c { !(rs1_reg inside {cfg.reserved_regs, reserved_rd, ZERO}); !(rs2_reg inside {cfg.reserved_regs, reserved_rd, ZERO}); rs1_reg != rs2_reg; } - constraint vec_data_page_id_c { - data_page_id < max_data_page_id; - } - - int base; - int max_load_store_addr; - riscv_vector_instr load_store_instr; - `uvm_object_utils(riscv_vector_load_store_instr_stream) `uvm_object_new function void post_randomize(); + // Randomize the available registers reserved_rd = {reserved_rd, rs1_reg, rs2_reg}; randomize_avail_regs(); + // Generate a random load/store instruction gen_load_store_instr(); - randomize_addr(); + // Make sure not to overwrite the indexes + cfg.vector_cfg.reserved_vregs = {cfg.vector_cfg.reserved_vregs, vs2_reg}; + // Insert a random-mixed instruction stream add_mixed_instr(num_mixed_instr); - add_rs1_init_la_instr(rs1_reg, data_page_id, base); + // Insert the load/store instruction at a random place in the instruction stream + insert_instr(load_store_instr); + // Insert the load base address instruction + add_rs1_init_la_instr(rs1_reg, data_page_id, data_page_base_offset); if (address_mode == STRIDED) begin - instr_list.push_front(get_init_gpr_instr(rs2_reg, stride_byte_offset)); + // Initialize rs2 with the stride + insert_instr(get_init_gpr_instr(rs2_reg, byte_stride), 0); end else if (address_mode == INDEXED) begin - // TODO: Support different index address for each element - add_init_vector_gpr_instr(vs2_reg, index_addr); + // Initialize vs2 with random/pre-defined indexes + add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); end super.post_randomize(); endfunction - virtual function void randomize_addr(); - int ss = address_span(); - bit success; - - repeat (10) begin - max_load_store_addr = data_page[data_page_id].size_in_bytes - ss; - if (max_load_store_addr >= 0) begin - success = 1'b1; - break; - end - `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(data_page_id, data_page_id < max_data_page_id;) - end - - assert (success) else begin - `uvm_fatal(`gfn, $sformatf({"Expected positive value for max_load_store_addr, got %0d.", - " Perhaps more memory needs to be allocated in the data pages for vector loads and stores.", - "\ndata_page_id:%0d\ndata_page[data_page_id].size_in_bytes:%0d\naddress_span:%0d", - "\nstride_bytes:%0d\nVLEN:%0d\nLMUL:%0d\ncfg.vector_cfg.vtype.vsew:%0d\n\n"}, - max_load_store_addr, data_page_id, data_page[data_page_id].size_in_bytes, ss, - stride_bytes(), VLEN, cfg.vector_cfg.vtype.vlmul, cfg.vector_cfg.vtype.vsew)) - end - - `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(base, base inside {[0 : max_load_store_addr]}; - base % eew == 0;) - endfunction - - virtual function int address_span(); - int num_elements = VLEN * cfg.vector_cfg.vtype.vlmul / cfg.vector_cfg.vtype.vsew; - case (address_mode) - UNIT_STRIDED : address_span = num_elements * stride_bytes(); - STRIDED : address_span = num_elements * stride_byte_offset; - INDEXED : address_span = index_addr + num_elements * stride_bytes(); - endcase - endfunction - - virtual function int stride_bytes(); - stride_bytes = eew / 8; - endfunction - - // Generate each load/store instruction + // Generate a load/store instruction virtual function void gen_load_store_instr(); build_allowed_instr(); - randomize_vec_load_store_instr(); - instr_list.push_back(load_store_instr); + randomize_vector_load_store_instr(); endfunction + // Choose allowed load/store instructions for current address mode virtual function void build_allowed_instr(); case (address_mode) UNIT_STRIDED : begin - allowed_instr = {VLE_V, VSE_V, allowed_instr}; - if (cfg.vector_cfg.enable_fault_only_first_load) begin - allowed_instr = {VLEFF_V, allowed_instr}; - end - if (cfg.vector_cfg.enable_zvlsseg) begin - allowed_instr = {VLSEGE_V, VSSEGE_V, allowed_instr}; - if (cfg.vector_cfg.enable_fault_only_first_load) begin - allowed_instr = {VLSEGEFF_V, allowed_instr}; - end - end + allowed_instr = {VLE_V, VSE_V, VLEFF_V, + VLM_V, VSM_V, VLRE_V, VSR_V, + VLSEGE_V, VSSEGE_V, VLSEGEFF_V, + allowed_instr}; end STRIDED : begin - allowed_instr = {VLSE_V, VSSE_V, allowed_instr}; - if (cfg.vector_cfg.enable_zvlsseg) begin - allowed_instr = {VLSSEGE_V, VSSSEGE_V, allowed_instr}; - end + allowed_instr = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V, allowed_instr}; end INDEXED : begin - allowed_instr = {VLXEI_V, VSXEI_V, VSUXEI_V, allowed_instr}; - if (cfg.vector_cfg.enable_zvlsseg) begin - allowed_instr = {VLXSEGEI_V, VSXSEGEI_V, VSUXSEGEI_V, allowed_instr}; - end + allowed_instr = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, + VLUXSEGEI_V, VLOXSEGEI_V, VSUXSEGEI_V, VSOXSEGEI_V, + allowed_instr}; end endcase endfunction - virtual function void randomize_vec_load_store_instr(); + // Randomize the vector load and store instruction + // Constrain to pre-randomized eew, rs1, rs2 + virtual function void randomize_vector_load_store_instr(); $cast(load_store_instr, riscv_instr::get_load_store_instr(allowed_instr)); - load_store_instr.m_cfg = cfg; - load_store_instr.has_rs1 = 0; - load_store_instr.has_vs2 = 1; - load_store_instr.has_imm = 0; + load_store_instr.m_cfg = cfg; + load_store_instr.has_rs1 = 1'b0; + load_store_instr.rs1 = rs1_reg; + load_store_instr.has_rs2 = 1'b0; + load_store_instr.rs2 = rs2_reg; + load_store_instr.ls_eew.rand_mode(0); + load_store_instr.ls_eew = address_mode == INDEXED ? index_eew : data_eew; randomize_gpr(load_store_instr); - load_store_instr.rs1 = rs1_reg; - load_store_instr.rs2 = rs2_reg; - load_store_instr.vs2 = vs2_reg; if (address_mode == INDEXED) begin cfg.vector_cfg.reserved_vregs = {load_store_instr.vs2}; - vs2_reg = load_store_instr.vs2; - `uvm_info(`gfn, $sformatf("vs2_reg = v%0d", vs2_reg), UVM_LOW) + vs2_reg = load_store_instr.vs2; end load_store_instr.process_load_store = 0; endfunction diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index c5df58ca..565fe057 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -169,4 +169,30 @@ class riscv_vector_cfg extends uvm_object; end endfunction + // Updates the current vtype with a new desired SEW. VL will stay the same, so + // LMUL will be scaled accordingly. + function void update_vsew_keep_vl(int vsew); + real lmul_o, lmul_n; + lmul_o = vtype.fractional_lmul ? 1.0 / real'(vtype.vlmul) : real'(vtype.vlmul); + lmul_n = lmul_o * real'(vsew) / real'(vtype.vsew); + if (lmul_n < 1.0) begin + vtype.fractional_lmul = 1'b1; + vtype.vlmul = int'(1.0 / lmul_n); + end else begin + vtype.fractional_lmul = 1'b0; + vtype.vlmul = int'(lmul_n); + end + vtype.vsew = vsew; + endfunction + + // Get the vlmax for the current vtype + function int vlmax(); + if (vtype.fractional_lmul) begin + vlmax = VLEN / vtype.vsew / vtype.vlmul; + end else begin + vlmax = VLEN / vtype.vsew * vtype.vlmul; + end + return vlmax; + endfunction + endclass : riscv_vector_cfg From fbdca17e86724dd908347ca1a62fd2a8589d1e5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 22 Nov 2023 08:06:22 +0000 Subject: [PATCH 34/90] Move vfclass instruction to arithmetic group --- src/isa/rv32v_instr.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index 53feea3b..af33550a 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -157,7 +157,7 @@ `DEFINE_VA_INSTR(VMFLE, VA_FORMAT, COMPARE, RVV, {VV, VF}) `DEFINE_VA_INSTR(VMFGT, VA_FORMAT, COMPARE, RVV, {VF}) `DEFINE_VA_INSTR(VMFGE, VA_FORMAT, COMPARE, RVV, {VF}) -`DEFINE_VA_INSTR(VFCLASS_V, VS2_FORMAT, COMPARE, RVV) +`DEFINE_VA_INSTR(VFCLASS_V, VS2_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFMERGE, VA_FORMAT, ARITHMETIC, RVV, {VFM}) `DEFINE_VA_INSTR(VFMV_V_F, VA_FORMAT, ARITHMETIC, RVV) `DEFINE_VA_INSTR(VFCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) From 943eea2bd916800eef77739246186c73fbc48f45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 22 Nov 2023 08:07:18 +0000 Subject: [PATCH 35/90] Filter out illegal load/stores for current config --- src/riscv_load_store_instr_lib.sv | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index d96b8ec5..42cb076a 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -646,22 +646,32 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Choose allowed load/store instructions for current address mode virtual function void build_allowed_instr(); + riscv_instr_name_t possible_instr[]; + + // Get instructions for selected address mode case (address_mode) UNIT_STRIDED : begin - allowed_instr = {VLE_V, VSE_V, VLEFF_V, - VLM_V, VSM_V, VLRE_V, VSR_V, - VLSEGE_V, VSSEGE_V, VLSEGEFF_V, - allowed_instr}; + possible_instr = {VLE_V, VSE_V, VLEFF_V, + VLM_V, VSM_V, VLRE_V, VSR_V, + VLSEGE_V, VSSEGE_V, VLSEGEFF_V}; end STRIDED : begin - allowed_instr = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V, allowed_instr}; + possible_instr = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V}; end INDEXED : begin - allowed_instr = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, - VLUXSEGEI_V, VLOXSEGEI_V, VSUXSEGEI_V, VSOXSEGEI_V, - allowed_instr}; + possible_instr = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, + VLUXSEGEI_V, VLOXSEGEI_V, VSUXSEGEI_V, VSOXSEGEI_V}; end endcase + + // Filter out illegal instructions for current config + foreach (possible_instr[i]) begin + riscv_instr instr_inst; + instr_inst = instr_inst.create_instr(possible_instr[i]); + if (instr_inst.is_supported(cfg)) begin + allowed_instr = {allowed_instr, possible_instr[i]}; + end + end endfunction // Randomize the vector load and store instruction From 16a1ae0f20937e78542454388f11fce16bd8e538 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 22 Nov 2023 08:49:12 +0000 Subject: [PATCH 36/90] Remove support for stale vector amo extension Support for vector amo instructions has been removed from the standard vector extension. The encoding of the instructions also collides with the scalar subword atomic endcoding and no new encoding is currently available. Once there is further progress on Zvamo, the instructions can be added back and updated to the newest version --- src/riscv_amo_instr_lib.sv | 19 ------------------- src/riscv_instr_pkg.sv | 14 +------------- target/rv64gcv/testlist.yaml | 16 ---------------- 3 files changed, 1 insertion(+), 48 deletions(-) diff --git a/src/riscv_amo_instr_lib.sv b/src/riscv_amo_instr_lib.sv index d42d87c1..7eb9d97c 100644 --- a/src/riscv_amo_instr_lib.sv +++ b/src/riscv_amo_instr_lib.sv @@ -209,22 +209,3 @@ class riscv_amo_instr_stream extends riscv_amo_base_instr_stream; endfunction endclass : riscv_amo_instr_stream - - -class riscv_vector_amo_instr_stream extends riscv_vector_load_store_instr_stream; - - constraint amo_address_mode_c { - // AMO operation uses indexed address mode - address_mode == INDEXED; - } - - `uvm_object_utils(riscv_vector_amo_instr_stream) - `uvm_object_new - - virtual function void add_element_vec_load_stores(); - allowed_instr = {VAMOSWAPE_V, VAMOADDE_V, VAMOXORE_V, - VAMOANDE_V, VAMOORE_V, VAMOMINE_V, - VAMOMAXE_V, VAMOMINUE_V, VAMOMAXUE_V, allowed_instr}; - endfunction - -endclass : riscv_vector_amo_instr_stream diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 2da870b9..be26ffba 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -649,17 +649,6 @@ package riscv_instr_pkg; VMV2R_V, VMV4R_V, VMV8R_V, - // Vector AMO instruction - // EEW vector AMOs - VAMOSWAPE_V, - VAMOADDE_V, - VAMOXORE_V, - VAMOANDE_V, - VAMOORE_V, - VAMOMINE_V, - VAMOMAXE_V, - VAMOMINUE_V, - VAMOMAXUE_V, // Supervisor instruction DRET, MRET, @@ -724,8 +713,7 @@ package riscv_instr_pkg; VLS_FORMAT, VSS_FORMAT, VLR_FORMAT, - VSR_FORMAT, - VAMO_FORMAT + VSR_FORMAT } riscv_instr_format_t; diff --git a/target/rv64gcv/testlist.yaml b/target/rv64gcv/testlist.yaml index 99ab3bfa..af04f06d 100644 --- a/target/rv64gcv/testlist.yaml +++ b/target/rv64gcv/testlist.yaml @@ -154,19 +154,3 @@ iterations: 5 gen_test: riscv_instr_base_test rtl_test: core_base_test - -- test: riscv_vector_amo_test - description: > - Vector AMO random test - gen_opts: > - +instr_cnt=10000 - +num_of_sub_program=0 - +enable_floating_point=1 - +enable_vector_extension=1 - +directed_instr_0=riscv_vector_amo_instr_stream,10 - +no_branch_jump=1 - +boot_mode=m - +no_csr_instr=1 - iterations: 5 - gen_test: riscv_instr_base_test - rtl_test: core_base_test From 8bcdcdda9c43f7e8d007e9c932567d3ae5e10789 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 22 Nov 2023 09:09:05 +0000 Subject: [PATCH 37/90] Cleanup vector instruction definition macros --- src/isa/riscv_vector_instr.sv | 1 - src/isa/rv32v_instr.sv | 410 +++++++++++++++++----------------- src/riscv_defines.svh | 9 +- 3 files changed, 209 insertions(+), 211 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 23b9d0bd..1d2f66cc 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -43,7 +43,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; int ext_widening_factor = 1; va_variant_t allowed_va_variants[$]; rand int ls_emul_non_frac; - string sub_extension; `uvm_object_utils(riscv_vector_instr) `uvm_object_new diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index af33550a..333e8bd7 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -16,222 +16,222 @@ */ // 6. Configuration-Setting Instructions -`DEFINE_VA_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) -`DEFINE_VA_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV, {}, UIMM) -`DEFINE_VA_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) +`DEFINE_V_INSTR(VSETVLI, VSET_FORMAT, CSR, RVV) +`DEFINE_V_INSTR(VSETIVLI, VSET_FORMAT, CSR, RVV, {}, UIMM) +`DEFINE_V_INSTR(VSETVL, VSET_FORMAT, CSR, RVV) // 7. Vector Loads and Stores -`DEFINE_VA_INSTR(VLE_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSE_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLM_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSM_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLSE_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSE_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLUXEI_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXEI_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSUXEI_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXEI_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLEFF_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSEGE_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSEGE_V, VS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLSEGEFF_V, VL_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLSSEGE_V, VLS_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSSSEGE_V, VSS_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLUXSEGEI_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VLOXSEGEI_V, VLX_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSUXSEGEI_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VSOXSEGEI_V, VSX_FORMAT, STORE, RVV) -`DEFINE_VA_INSTR(VLRE_V, VLR_FORMAT, LOAD, RVV) -`DEFINE_VA_INSTR(VSR_V, VSR_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLE_V, VL_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSE_V, VS_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLM_V, VL_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSM_V, VS_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLSE_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSSE_V, VSS_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLUXEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VLOXEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSUXEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VSOXEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLEFF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VLSEGE_V, VL_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSSEGE_V, VS_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLSEGEFF_V, VL_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VLSSEGE_V, VLS_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSSSEGE_V, VSS_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLUXSEGEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VLOXSEGEI_V, VLX_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSUXSEGEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VSOXSEGEI_V, VSX_FORMAT, STORE, RVV) +`DEFINE_V_INSTR(VLRE_V, VLR_FORMAT, LOAD, RVV) +`DEFINE_V_INSTR(VSR_V, VSR_FORMAT, STORE, RVV) // 11. Vector Integer Arithmetic Instructions -`DEFINE_VA_INSTR(VADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VRSUB, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VWADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VWSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) -`DEFINE_VA_INSTR(VZEXT_VF2, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VZEXT_VF4, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VZEXT_VF8, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VSEXT_VF2, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VSEXT_VF4, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VSEXT_VF8, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) -`DEFINE_VA_INSTR(VMADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM, VV, VX, VI}) -`DEFINE_VA_INSTR(VSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM}) -`DEFINE_VA_INSTR(VMSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VV, VX}) -`DEFINE_VA_INSTR(VAND, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VXOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSLL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) -`DEFINE_VA_INSTR(VSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) -`DEFINE_VA_INSTR(VSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) -`DEFINE_VA_INSTR(VNSRL, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}, UIMM) -`DEFINE_VA_INSTR(VNSRA, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}, UIMM) -`DEFINE_VA_INSTR(VMSEQ, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSNE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSLTU, VA_FORMAT, COMPARE, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMSLT, VA_FORMAT, COMPARE, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMSLEU, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSLE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VMSGTU, VA_FORMAT, COMPARE, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VMSGT, VA_FORMAT, COMPARE, RVV, {VX, VI}) -`DEFINE_VA_INSTR(VMINU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMAXU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMULH, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMULHU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMULHSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VDIVU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VREMU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VREM, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMULU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMULSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACCU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACCSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VWMACCUS, VA_FORMAT, ARITHMETIC, RVV, {VX}) -`DEFINE_VA_INSTR(VMERGE, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) -`DEFINE_VA_INSTR(VMV_V_V, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV_V_X, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV_V_I, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VRSUB, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}) +`DEFINE_V_INSTR(VWADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_V_INSTR(VWSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_V_INSTR(VWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_V_INSTR(VWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, WV, WX}) +`DEFINE_V_INSTR(VZEXT_VF2, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VZEXT_VF4, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VZEXT_VF8, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VSEXT_VF2, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VSEXT_VF4, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VSEXT_VF8, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) +`DEFINE_V_INSTR(VMADC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM, VV, VX, VI}) +`DEFINE_V_INSTR(VSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM}) +`DEFINE_V_INSTR(VMSBC, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VV, VX}) +`DEFINE_V_INSTR(VAND, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VXOR, VA_FORMAT, LOGICAL, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VSLL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_V_INSTR(VSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_V_INSTR(VSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_V_INSTR(VNSRL, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}, UIMM) +`DEFINE_V_INSTR(VNSRA, VA_FORMAT, SHIFT, RVV, {WV, WX, WI}, UIMM) +`DEFINE_V_INSTR(VMSEQ, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VMSNE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VMSLTU, VA_FORMAT, COMPARE, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMSLT, VA_FORMAT, COMPARE, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMSLEU, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VMSLE, VA_FORMAT, COMPARE, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VMSGTU, VA_FORMAT, COMPARE, RVV, {VX, VI}) +`DEFINE_V_INSTR(VMSGT, VA_FORMAT, COMPARE, RVV, {VX, VI}) +`DEFINE_V_INSTR(VMINU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMAXU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMULH, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMULHU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMULHSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VDIVU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VREMU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VREM, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMULU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMULSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMACCU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMACCSU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VWMACCUS, VA_FORMAT, ARITHMETIC, RVV, {VX}) +`DEFINE_V_INSTR(VMERGE, VA_FORMAT, ARITHMETIC, RVV, {VVM, VXM, VIM}) +`DEFINE_V_INSTR(VMV_V_V, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV_V_X, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV_V_I, VA_FORMAT, ARITHMETIC, RVV) // 12. Vector Fixed-Point Arithmetic Instructions -`DEFINE_VA_INSTR(VSADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) -`DEFINE_VA_INSTR(VSSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VSSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VAADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VAADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VASUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VASUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VSMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) -`DEFINE_VA_INSTR(VSSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) -`DEFINE_VA_INSTR(VSSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) -`DEFINE_VA_INSTR(VNCLIPU, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}, UIMM) -`DEFINE_VA_INSTR(VNCLIP, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}, UIMM) +`DEFINE_V_INSTR(VSADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VSADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}) +`DEFINE_V_INSTR(VSSUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VSSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VAADDU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VAADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VASUBU, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VASUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VSMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VX}) +`DEFINE_V_INSTR(VSSRL, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_V_INSTR(VSSRA, VA_FORMAT, SHIFT, RVV, {VV, VX, VI}, UIMM) +`DEFINE_V_INSTR(VNCLIPU, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}, UIMM) +`DEFINE_V_INSTR(VNCLIP, VA_FORMAT, ARITHMETIC, RVV, {WV, WX, WI}, UIMM) // 13. Vector Floating-Point Instructions -`DEFINE_VA_INSTR(VFADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFRSUB, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VFWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF, WV, WF}) -`DEFINE_VA_INSTR(VFWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF, WV, WF}) -`DEFINE_VA_INSTR(VFMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFRDIV, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VFWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFWNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSQRT_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFRSQRT7_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFREC7_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSGNJ, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSGNJN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VFSGNJX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFEQ, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFNE, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFLT, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFLE, VA_FORMAT, COMPARE, RVV, {VV, VF}) -`DEFINE_VA_INSTR(VMFGT, VA_FORMAT, COMPARE, RVV, {VF}) -`DEFINE_VA_INSTR(VMFGE, VA_FORMAT, COMPARE, RVV, {VF}) -`DEFINE_VA_INSTR(VFCLASS_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMERGE, VA_FORMAT, ARITHMETIC, RVV, {VFM}) -`DEFINE_VA_INSTR(VFMV_V_F, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_RTZ_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_RTZ_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_RTZ_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_RTZ_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWCVT_F_F_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_RTZ_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_RTZ_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_F_XU_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_F_X_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFNCVT_ROD_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFRSUB, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_V_INSTR(VFWADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF, WV, WF}) +`DEFINE_V_INSTR(VFWSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF, WV, WF}) +`DEFINE_V_INSTR(VFMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFDIV, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFRDIV, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_V_INSTR(VFWMUL, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFNMADD, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFNMSUB, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFWMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFWNMACC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFWMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFWNMSAC, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFSQRT_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFRSQRT7_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFREC7_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFMIN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFMAX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFSGNJ, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFSGNJN, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VFSGNJX, VA_FORMAT, ARITHMETIC, RVV, {VV, VF}) +`DEFINE_V_INSTR(VMFEQ, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_V_INSTR(VMFNE, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_V_INSTR(VMFLT, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_V_INSTR(VMFLE, VA_FORMAT, COMPARE, RVV, {VV, VF}) +`DEFINE_V_INSTR(VMFGT, VA_FORMAT, COMPARE, RVV, {VF}) +`DEFINE_V_INSTR(VMFGE, VA_FORMAT, COMPARE, RVV, {VF}) +`DEFINE_V_INSTR(VFCLASS_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFMERGE, VA_FORMAT, ARITHMETIC, RVV, {VFM}) +`DEFINE_V_INSTR(VFMV_V_F, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFCVT_RTZ_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFCVT_RTZ_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_RTZ_XU_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_RTZ_X_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_F_XU_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_F_X_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWCVT_F_F_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_RTZ_XU_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_RTZ_X_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_F_XU_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_F_X_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFNCVT_ROD_F_F_W, VS2_FORMAT, ARITHMETIC, RVV) // 14. Vector Reduction Instructions -`DEFINE_VA_INSTR(VREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDMAXU_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDMAX_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDMINU_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDMIN_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDAND_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDOR_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VREDXOR_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VWREDSUMU_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VWREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFREDOSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFREDUSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFREDMAX_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFREDMIN_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWREDOSUM_VS, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFWREDUSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDMAXU_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDMAX_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDMINU_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDMIN_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDAND_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDOR_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VREDXOR_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VWREDSUMU_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VWREDSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFREDOSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFREDUSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFREDMAX_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFREDMIN_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWREDOSUM_VS, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFWREDUSUM_VS, VA_FORMAT, ARITHMETIC, RVV) // 15. Vector Mask Instructions -`DEFINE_VA_INSTR(VMAND_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMNAND_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMANDN_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMXOR_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMOR_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMNOR_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMORN_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VMXNOR_MM, VA_FORMAT, LOGICAL, RVV) -`DEFINE_VA_INSTR(VCPOP_M, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFIRST_M, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMSBF_M, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMSIF_M, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMSOF_M, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VIOTA_M, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VID_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMAND_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMNAND_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMANDN_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMXOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMNOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMORN_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VMXNOR_MM, VA_FORMAT, LOGICAL, RVV) +`DEFINE_V_INSTR(VCPOP_M, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFIRST_M, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMSBF_M, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMSIF_M, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMSOF_M, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VIOTA_M, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VID_V, VS2_FORMAT, ARITHMETIC, RVV) // 16. Vector Permutation Instructions -`DEFINE_VA_INSTR(VMV_X_S, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV_S_X, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMV_F_S, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VFMV_S_F, VA_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VSLIDEUP, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}, UIMM) -`DEFINE_VA_INSTR(VSLIDEDOWN, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}, UIMM) -`DEFINE_VA_INSTR(VSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VX}) -`DEFINE_VA_INSTR(VSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VX}) -`DEFINE_VA_INSTR(VFSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VFSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VF}) -`DEFINE_VA_INSTR(VRGATHER, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}, UIMM) -`DEFINE_VA_INSTR(VRGATHEREI16, VA_FORMAT, ARITHMETIC, RVV, {VV}) -`DEFINE_VA_INSTR(VCOMPRESS, VA_FORMAT, ARITHMETIC, RVV, {VM}) -`DEFINE_VA_INSTR(VMV1R_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV2R_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV4R_V, VS2_FORMAT, ARITHMETIC, RVV) -`DEFINE_VA_INSTR(VMV8R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV_X_S, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV_S_X, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFMV_F_S, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VFMV_S_F, VA_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VSLIDEUP, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}, UIMM) +`DEFINE_V_INSTR(VSLIDEDOWN, VA_FORMAT, ARITHMETIC, RVV, {VX, VI}, UIMM) +`DEFINE_V_INSTR(VSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VX}) +`DEFINE_V_INSTR(VSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VX}) +`DEFINE_V_INSTR(VFSLIDE1UP, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_V_INSTR(VFSLIDE1DOWN, VA_FORMAT, ARITHMETIC, RVV, {VF}) +`DEFINE_V_INSTR(VRGATHER, VA_FORMAT, ARITHMETIC, RVV, {VV, VX, VI}, UIMM) +`DEFINE_V_INSTR(VRGATHEREI16, VA_FORMAT, ARITHMETIC, RVV, {VV}) +`DEFINE_V_INSTR(VCOMPRESS, VA_FORMAT, ARITHMETIC, RVV, {VM}) +`DEFINE_V_INSTR(VMV1R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV2R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV4R_V, VS2_FORMAT, ARITHMETIC, RVV) +`DEFINE_V_INSTR(VMV8R_V, VS2_FORMAT, ARITHMETIC, RVV) diff --git a/src/riscv_defines.svh b/src/riscv_defines.svh index af87c610..d08050e9 100644 --- a/src/riscv_defines.svh +++ b/src/riscv_defines.svh @@ -48,7 +48,7 @@ endfunction \ endclass - `define VA_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, imm_tp, ext = "") \ + `define V_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, imm_tp) \ static bit valid = riscv_instr::register(instr_n); \ `uvm_object_utils(riscv_``instr_n``_instr) \ function new(string name = ""); \ @@ -59,7 +59,6 @@ this.category = ``instr_category; \ this.imm_type = ``imm_tp; \ this.allowed_va_variants = ``vav; \ - this.sub_extension = ``ext; \ set_imm_len(); \ set_rand_mode(); \ endfunction \ @@ -95,10 +94,10 @@ class riscv_``instr_n``_instr extends riscv_floating_point_instr; \ `INSTR_BODY(instr_n, instr_format, instr_category, instr_group, imm_tp) -// Vector arithmetic instruction -`define DEFINE_VA_INSTR(instr_n, instr_format, instr_category, instr_group, vav = {}, imm_tp = IMM, ext = "")\ +// Vector instruction +`define DEFINE_V_INSTR(instr_n, instr_format, instr_category, instr_group, vav = {}, imm_tp = IMM)\ class riscv_``instr_n``_instr extends riscv_vector_instr; \ - `VA_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, imm_tp, ext) + `V_INSTR_BODY(instr_n, instr_format, instr_category, instr_group, vav, imm_tp) // Custom extension instruction `define DEFINE_CUSTOM_INSTR(instr_n, instr_format, instr_category, instr_group, imm_tp = IMM) \ From 7f0f570e4155b4980a53d25fa9d713beb40a9070 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 22 Nov 2023 09:22:59 +0000 Subject: [PATCH 38/90] Cleanup vector instr and config files --- src/isa/riscv_vector_instr.sv | 3 +++ src/riscv_vector_cfg.sv | 33 +++++++++++++-------------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 1d2f66cc..c3b40a3e 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -47,12 +47,14 @@ class riscv_vector_instr extends riscv_floating_point_instr; `uvm_object_utils(riscv_vector_instr) `uvm_object_new + // Make sure that reserved vregs are not overwritten constraint avoid_reserved_vregs_c { if (m_cfg.vector_cfg.reserved_vregs.size() > 0) { !(vd inside {m_cfg.vector_cfg.reserved_vregs}); } } + // Select a vector arithmetic variant constraint va_variant_c { if (has_va_variant) { va_variant inside {allowed_va_variants}; @@ -768,6 +770,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; this.is_whole_register_ls_instr = rhs_.is_whole_register_ls_instr; this.ext_widening_factor = rhs_.ext_widening_factor; this.allowed_va_variants = rhs_.allowed_va_variants; + this.ls_emul_non_frac = rhs_.ls_emul_non_frac; endfunction : do_copy endclass : riscv_vector_instr diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 565fe057..c4106e4b 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -41,27 +41,22 @@ class riscv_vector_cfg extends uvm_object; // on current SEW and LMUL setting int unsigned legal_ls_eew[$]; - // Allow vector narrowing or widening instructions. - rand bit vec_narrowing_widening; - - rand bit allow_illegal_vec_instr; - constraint allow_illegal_vec_instr_c {soft allow_illegal_vec_instr == 0;} - - // Enable fault only first load ops - rand bit enable_fault_only_first_load; - - constraint legal_c { + constraint solve_order_c { solve vtype before vl; solve vl before vstart; - vstart inside {[0:vl]}; - vl inside {[0:VLEN/vtype.vsew]}; } - // Basic constraint for initial bringup - constraint bringup_c { - vstart == 0; + // vl has to be within VLMAX + constraint vl_c { + vl inside {[0 : vlmax()]}; + } + + // vstart has to be within vl + constraint vstart_c { + vstart inside {[0 : vl]}; } + // Select valid vlmul constraint vlmul_c { vtype.vlmul inside {1, 2, 4, 8}; vtype.fractional_lmul -> vtype.vlmul != 1; @@ -71,6 +66,7 @@ class riscv_vector_cfg extends uvm_object; vtype.fractional_lmul -> vtype.vlmul <= max_int_sew / 8; } + // Set current element width constraint vsew_c { vtype.vsew inside {8, 16, 32, 64}; vtype.vsew <= max_int_sew; @@ -83,11 +79,11 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(vtype.vsew, UVM_DEFAULT) `uvm_field_int(vtype.vlmul, UVM_DEFAULT) `uvm_field_int(vtype.fractional_lmul, UVM_DEFAULT) - `uvm_field_queue_int(legal_ls_eew, UVM_DEFAULT) `uvm_field_int(vl, UVM_DEFAULT) `uvm_field_int(vstart, UVM_DEFAULT) `uvm_field_enum(vxrm_t,vxrm, UVM_DEFAULT) `uvm_field_int(vxsat, UVM_DEFAULT) + `uvm_field_queue_enum(riscv_vreg_t, reserved_vregs, UVM_DEFAULT) `uvm_field_string(zve_extension, UVM_DEFAULT) `uvm_field_int(enable_fp_support, UVM_DEFAULT) `uvm_field_int(max_int_sew, UVM_DEFAULT) @@ -95,14 +91,11 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(enable_zvfhmin_extension, UVM_DEFAULT) `uvm_field_int(enable_zvfh_extension, UVM_DEFAULT) `uvm_field_int(min_fp_sew, UVM_DEFAULT) - `uvm_field_int(enable_fault_only_first_load, UVM_DEFAULT) + `uvm_field_queue_int(legal_ls_eew, UVM_DEFAULT) `uvm_object_utils_end function new (string name = ""); super.new(name); - if ($value$plusargs("enable_fault_only_first_load=%0d", enable_fault_only_first_load)) begin - enable_fault_only_first_load.rand_mode(0); - end // Check for Zve* extension if ($value$plusargs("zve_extension=%0s", zve_extension)) begin int minimum_vlen; From 77a9f5b9e3bc0ad36bb44b0c2bd03107e0bba953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 22 Nov 2023 10:09:01 +0000 Subject: [PATCH 39/90] Align vector instruction assembly formating to other instrs --- src/isa/riscv_vector_instr.sv | 64 +++++++++++++++++------------------ src/riscv_instr_pkg.sv | 2 +- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index c3b40a3e..d02d25c8 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -500,19 +500,23 @@ class riscv_vector_instr extends riscv_floating_point_instr; name = $sformatf("%0s%0d.V", name, ls_eew); end end + if (has_va_variant) begin + name = $sformatf("%0s.%0s", name, va_variant.name()); + end return name; endfunction // Convert the instruction to assembly code virtual function string convert2asm(string prefix = ""); string asm_str; + asm_str = format_string(get_instr_name(), MAX_INSTR_STR_LEN); case (format) VSET_FORMAT: begin if (instr_name == VSETVL) begin - asm_str = $sformatf("%0s %0s, %0s, %0s", get_instr_name(), rd.name(), rs1.name(), rs2.name()); + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, rd.name(), rs1.name(), rs2.name()); end else begin - asm_str = $sformatf("%0s %0s, %0s, e%0d, m%0s%0d, t%0s, m%0s", - get_instr_name(), + asm_str = $sformatf("%0s%0s, %0s, e%0d, m%0s%0d, t%0s, m%0s", + asm_str, rd.name(), instr_name == VSETIVLI ? get_imm() : rs1.name(), m_cfg.vector_cfg.vtype.vsew, @@ -524,52 +528,48 @@ class riscv_vector_instr extends riscv_floating_point_instr; end VS2_FORMAT: begin if (instr_name == VID_V) begin - asm_str = $sformatf("vid.v %s", vd.name()); + asm_str = $sformatf("%0s%0s", asm_str, vd.name()); end else if (instr_name inside {VCPOP_M, VFIRST_M}) begin - asm_str = $sformatf("%0s %0s, %0s", get_instr_name(), rd.name(), vs2.name()); + asm_str = $sformatf("%0s%0s, %0s", asm_str, rd.name(), vs2.name()); end else begin - asm_str = $sformatf("%0s %0s, %0s", get_instr_name(), vd.name(), vs2.name()); + asm_str = $sformatf("%0s%0s, %0s", asm_str, vd.name(), vs2.name()); end end VA_FORMAT: begin case (instr_name) - VMV_V_V: asm_str = $sformatf("vmv.v.v %s, %s", vd.name(), vs1.name()); - VMV_V_X: asm_str = $sformatf("vmv.v.x %s, %s", vd.name(), rs1.name()); - VMV_V_I: asm_str = $sformatf("vmv.v.i %s, %s", vd.name(), get_imm()); - VFMV_V_F: asm_str = $sformatf("vfmv.v.f %s, %s", vd.name(), fs1.name()); - VMV_X_S: asm_str = $sformatf("vmv.x.s %s, %s", rd.name(), vs2.name()); - VMV_S_X: asm_str = $sformatf("vmv.s.x %s, %s", vd.name(), rs1.name()); - VFMV_F_S: asm_str = $sformatf("vfmv.f.s %s, %s", fd.name(), vs2.name()); - VFMV_S_F: asm_str = $sformatf("vfmv.s.f %s, %s", vd.name(), fs1.name()); + VMV_V_V: asm_str = $sformatf("%0s%0s, %0s", asm_str, vd.name(), vs1.name()); + VMV_V_X, + VMV_S_X: asm_str = $sformatf("%0s%0s, %0s", asm_str, vd.name(), rs1.name()); + VMV_V_I: asm_str = $sformatf("%0s%0s, %0s", asm_str, vd.name(), get_imm()); + VFMV_V_F, + VFMV_S_F: asm_str = $sformatf("%0s%0s, %0s", asm_str, vd.name(), fs1.name()); + VMV_X_S: asm_str = $sformatf("%0s%0s, %0s", asm_str, rd.name(), vs2.name()); + VFMV_F_S: asm_str = $sformatf("%0s%0s, %0s", asm_str, fd.name(), vs2.name()); default: begin if (!has_va_variant) begin - asm_str = $sformatf("%0s ", get_instr_name()); - asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), vs1.name())}; + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), vs2.name(), vs1.name()); end else begin - asm_str = $sformatf("%0s.%0s ", get_instr_name(), va_variant.name()); - asm_str = format_string(asm_str, MAX_INSTR_STR_LEN); case (va_variant) inside WV, VV, VVM, VM: begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), vs1.name())}; + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), vs2.name(), vs1.name()); end WI, VI, VIM: begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), get_imm())}; + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), vs2.name(), get_imm()); end WF, VF, VFM: begin if (instr_name inside {VFMADD, VFNMADD, VFMACC, VFNMACC, VFNMSUB, VFWNMSAC, - VFWMACC, VFMSUB, VFMSAC, VFNMSAC, VFWNMACC, VFWMSAC}) begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), fs1.name(), vs2.name())}; + VFWMACC, VFMSUB, VFMSAC, VFNMSAC, VFWNMACC, VFWMSAC}) begin + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), fs1.name(), vs2.name()); end else begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), fs1.name())}; + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), vs2.name(), fs1.name()); end end WX, VX, VXM: begin if (instr_name inside {VMADD, VNMSUB, VMACC, VNMSAC, VWMACCSU, VWMACCU, - VWMACCUS, VWMACC}) begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), rs1.name(), vs2.name())}; + VWMACCUS, VWMACC}) begin + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), rs1.name(), vs2.name()); end else begin - asm_str = {asm_str, $sformatf("%0s, %0s, %0s", vd.name(), vs2.name(), rs1.name())}; + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, vd.name(), vs2.name(), rs1.name()); end end endcase @@ -581,17 +581,17 @@ class riscv_vector_instr extends riscv_floating_point_instr; VS_FORMAT, VLR_FORMAT, VSR_FORMAT: begin - asm_str = $sformatf("%0s %s, (%s)", get_instr_name(), category == LOAD ? vd.name() : vs3.name(), rs1.name()); + asm_str = $sformatf("%0s%0s, (%0s)", asm_str, category == LOAD ? vd.name() : vs3.name(), rs1.name()); end VLS_FORMAT, VSS_FORMAT: begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), category == LOAD ? vd.name() : vs3.name(), - rs1.name(), rs2.name()); + asm_str = $sformatf("%0s%0s, (%0s), %0s", asm_str, category == LOAD ? vd.name() : vs3.name(), + rs1.name(), rs2.name()); end VLX_FORMAT, VSX_FORMAT: begin - asm_str = $sformatf("%0s %0s, (%0s), %0s", get_instr_name(), category == LOAD ? vd.name() : vs3.name(), - rs1.name(), vs2.name()); + asm_str = $sformatf("%0s%0s, (%0s), %0s", asm_str, category == LOAD ? vd.name() : vs3.name(), + rs1.name(), vs2.name()); end default: begin `uvm_fatal(`gfn, $sformatf("Unsupported format %0s", format.name())) diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index be26ffba..f244f7e1 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -1336,7 +1336,7 @@ package riscv_instr_pkg; parameter int DATA_WIDTH = 32; // Parameters for output assembly program formatting - parameter int MAX_INSTR_STR_LEN = 13; + parameter int MAX_INSTR_STR_LEN = 18; parameter int LABEL_STR_LEN = 18; // Parameter for program generation From 4122f228efeea6f76b58ab2b408ef07c862123fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 24 Nov 2023 10:25:50 +0000 Subject: [PATCH 40/90] Take lmul into account for reserved vregs --- src/isa/riscv_vector_instr.sv | 41 ++++++++++++++++++++----------- src/riscv_instr_stream.sv | 1 + src/riscv_load_store_instr_lib.sv | 13 +++++++--- 3 files changed, 36 insertions(+), 19 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index d02d25c8..86582dc5 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -41,6 +41,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit is_segmented_ls_instr = 1'b0; bit is_whole_register_ls_instr = 1'b0; int ext_widening_factor = 1; + int whole_register_move_cnt = 1; va_variant_t allowed_va_variants[$]; rand int ls_emul_non_frac; @@ -50,7 +51,21 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Make sure that reserved vregs are not overwritten constraint avoid_reserved_vregs_c { if (m_cfg.vector_cfg.reserved_vregs.size() > 0) { - !(vd inside {m_cfg.vector_cfg.reserved_vregs}); + foreach (m_cfg.vector_cfg.reserved_vregs[i]) { + if (is_widening_instr && !m_cfg.vector_cfg.vtype.fractional_lmul) { + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul * 2 - 1]}); + } else if (instr_name inside {VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V}) { + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + whole_register_move_cnt - 1]}); + } else if (group inside {LOAD, STORE}) { + if (format inside {VLX_FORMAT, VSX_FORMAT}) { + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vlmul) * nfields - 1]}); + } else { + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(ls_eew) * nfields - 1]}); + } + } else { + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vlmul) - 1]}); + } + } } } @@ -252,12 +267,12 @@ class riscv_vector_instr extends riscv_floating_point_instr; nfields * ls_emul_non_frac + vd <= 32; nfields * ls_emul_non_frac + vs3 <= 32; } - } - // Whole register l/s - if (is_whole_register_ls_instr) { + } else if (is_whole_register_ls_instr) { nfields inside {1, 2, 4, 8}; vd % nfields == 0; vs3 % nfields == 0; + } else { + nfields == 1; } } @@ -354,17 +369,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; // The source and destination vector register numbers must be aligned appropriately for // the vector register group size, and encodings with other vector register numbers are reserved constraint vector_vmvxr_c { - if (instr_name == VMV2R_V) { - vs2 % 2 == 0; - vd % 2 == 0; - } - if (instr_name == VMV4R_V) { - vs2 % 4 == 0; - vd % 4 == 0; - } - if (instr_name == VMV8R_V) { - vs2 % 8 == 0; - vd % 8 == 0; + if (instr_name inside {VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V}) { + vs2 % whole_register_move_cnt == 0; + vd % whole_register_move_cnt == 0; } } @@ -645,6 +652,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; if ((name.substr(0, 1) == "VF" && name != VFIRST_M) || (name.substr(0, 2) == "VMF")) begin is_fp_instr = 1'b1; end + if (instr_name inside {VMV2R_V, VMV4R_V, VMV8R_V}) begin + whole_register_move_cnt = instr_name.name().substr(3, 3).atoi(); + end if (!uvm_re_match("V[LS].*SEGE.*_V", name)) begin is_segmented_ls_instr = 1'b1; end @@ -769,6 +779,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; this.is_segmented_ls_instr = rhs_.is_segmented_ls_instr; this.is_whole_register_ls_instr = rhs_.is_whole_register_ls_instr; this.ext_widening_factor = rhs_.ext_widening_factor; + this.whole_register_move_cnt = rhs_.whole_register_move_cnt; this.allowed_va_variants = rhs_.allowed_va_variants; this.ls_emul_non_frac = rhs_.ls_emul_non_frac; endfunction : do_copy diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 20cc0e89..c449e5c3 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -386,6 +386,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; vinstr.m_cfg = cfg; `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, rs1 == cfg.gpr[0]; + !(rd inside {cfg.reserved_regs, reserved_rd}); vd == 0; ) init_instr_list.push_back(vinstr); diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 42cb076a..84e5e713 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -620,8 +620,6 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; randomize_avail_regs(); // Generate a random load/store instruction gen_load_store_instr(); - // Make sure not to overwrite the indexes - cfg.vector_cfg.reserved_vregs = {cfg.vector_cfg.reserved_vregs, vs2_reg}; // Insert a random-mixed instruction stream add_mixed_instr(num_mixed_instr); // Insert the load/store instruction at a random place in the instruction stream @@ -632,6 +630,10 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Initialize rs2 with the stride insert_instr(get_init_gpr_instr(rs2_reg, byte_stride), 0); end else if (address_mode == INDEXED) begin + // Unreserve index vector registers + for (int i = 0; i < load_store_instr.emul_non_frac(index_eew); i++) begin + cfg.vector_cfg.reserved_vregs.pop_back(); + end // Initialize vs2 with random/pre-defined indexes add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); end @@ -687,8 +689,11 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; load_store_instr.ls_eew = address_mode == INDEXED ? index_eew : data_eew; randomize_gpr(load_store_instr); if (address_mode == INDEXED) begin - cfg.vector_cfg.reserved_vregs = {load_store_instr.vs2}; - vs2_reg = load_store_instr.vs2; + vs2_reg = load_store_instr.vs2; + // Make sure that indexes are not overwritten + for (int i = 0; i < load_store_instr.emul_non_frac(index_eew); i++) begin + cfg.vector_cfg.reserved_vregs.push_back(riscv_vreg_t'(vs2_reg + i)); + end end load_store_instr.process_load_store = 0; endfunction From 9ca2a5f277cea34dbd5657c1cfc6665fa39b0960 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 24 Nov 2023 10:27:34 +0000 Subject: [PATCH 41/90] Fix unsupported statements by VCS --- src/isa/riscv_vector_instr.sv | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 86582dc5..27d61757 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -476,7 +476,8 @@ class riscv_vector_instr extends riscv_floating_point_instr; return 0; end end else begin - if (int'(real'(cfg.vector_cfg.legal_ls_eew.max().pop_front()) / real'(cfg.vector_cfg.vtype.vsew) * + int unsigned max_eew [$] = cfg.vector_cfg.legal_ls_eew.max(); + if (int'(real'(max_eew.pop_front()) / real'(cfg.vector_cfg.vtype.vsew) * (cfg.vector_cfg.vtype.fractional_lmul ? 1.0 / real'(cfg.vector_cfg.vtype.vlmul) : real'(cfg.vector_cfg.vtype.vlmul))) == 8) begin return 0; @@ -634,28 +635,28 @@ class riscv_vector_instr extends riscv_floating_point_instr; if ((name.substr(0, 1) == "VW") || (name.substr(0, 2) == "VFW")) begin is_widening_instr = 1'b1; end - if (!uvm_re_match("V[SZ]EXT_VF[248]", name)) begin + if (uvm_is_match("V?EXT_VF?", name)) begin ext_widening_factor = name.substr(name.len()-1, name.len()-1).atoi(); end - if ((name.substr(0, 1) == "VN") || !uvm_re_match("VFN.*_W", name)) begin + if ((name.substr(0, 1) == "VN") || uvm_is_match("VFN*_W", name)) begin is_narrowing_instr = 1'b1; end - if (!uvm_re_match("VF[NW]?CVT_.*", name)) begin + if (uvm_is_match("VF*CVT_*", name)) begin is_convert_instr = 1'b1; end - if (!uvm_re_match("VF?RED.*", name)) begin + if (uvm_is_match("*RED*", name)) begin is_reduction_instr = 1'b1; end - if (!uvm_re_match("VM.*_MM?", name)) begin + if (uvm_is_match("VM*_M*", name)) begin is_mask_producing_instr = 1'b1; end - if ((name.substr(0, 1) == "VF" && name != VFIRST_M) || (name.substr(0, 2) == "VMF")) begin + if ((name.substr(0, 1) == "VF" && name != "VFIRST_M") || (name.substr(0, 2) == "VMF")) begin is_fp_instr = 1'b1; end if (instr_name inside {VMV2R_V, VMV4R_V, VMV8R_V}) begin whole_register_move_cnt = instr_name.name().substr(3, 3).atoi(); end - if (!uvm_re_match("V[LS].*SEGE.*_V", name)) begin + if (uvm_is_match("V*SEGE*_V", name)) begin is_segmented_ls_instr = 1'b1; end if (name inside {"VLRE_V", "VSR_V"}) begin From 3f4c276b24e721aa96c71f9f28c31283acddf5c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 24 Nov 2023 10:29:04 +0000 Subject: [PATCH 42/90] Check that instruction is supported when inserting mixed instr --- src/riscv_directed_instr_lib.sv | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/riscv_directed_instr_lib.sv b/src/riscv_directed_instr_lib.sv index f3bc7080..523f3275 100644 --- a/src/riscv_directed_instr_lib.sv +++ b/src/riscv_directed_instr_lib.sv @@ -82,12 +82,16 @@ class riscv_mem_access_stream extends riscv_directed_instr_stream; // Insert some other instructions to mix with mem_access instruction virtual function void add_mixed_instr(int instr_cnt); - riscv_instr instr; + riscv_instr instr; + int i = 0; setup_allowed_instr(1, 1); - for(int i = 0; i < instr_cnt; i ++) begin + while (i < instr_cnt) begin instr = riscv_instr::type_id::create("instr"); randomize_instr(instr); - insert_instr(instr); + if (instr.is_supported(cfg)) begin + insert_instr(instr); + i++; + end end endfunction From 15699535ba77e92cc4693f127c78545aabb5e804 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 24 Nov 2023 10:30:02 +0000 Subject: [PATCH 43/90] Fix thread pointer increment/decrement in exception routine --- src/riscv_instr_pkg.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index f244f7e1..39edcf3d 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -1394,7 +1394,7 @@ package riscv_instr_pkg; string store_instr = (XLEN == 32) ? "sw" : "sd"; if (scratch inside {implemented_csr}) begin // Push USP from gpr.SP onto the kernel stack - instr.push_back($sformatf("addi x%0d, x%0d, -4", tp, tp)); + instr.push_back($sformatf("addi x%0d, x%0d, -%0d", tp, tp, XLEN/8)); instr.push_back($sformatf("%0s x%0d, (x%0d)", store_instr, sp, tp)); // Move KSP to gpr.SP instr.push_back($sformatf("add x%0d, x%0d, zero", sp, tp)); @@ -1449,7 +1449,7 @@ package riscv_instr_pkg; instr.push_back($sformatf("add x%0d, x%0d, zero", tp, sp)); // Pop USP from the kernel stack, move back to gpr.SP instr.push_back($sformatf("%0s x%0d, (x%0d)", load_instr, sp, tp)); - instr.push_back($sformatf("addi x%0d, x%0d, 4", tp, tp)); + instr.push_back($sformatf("addi x%0d, x%0d, %0d", tp, tp, XLEN/8)); end endfunction From bcd59b261d69f2cda34164df382d67c6dc89e817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 24 Nov 2023 10:31:16 +0000 Subject: [PATCH 44/90] Fix vector instr corner cases generating exceptions --- src/isa/riscv_vector_instr.sv | 40 +++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 27d61757..6f34ba0c 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -56,7 +56,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul * 2 - 1]}); } else if (instr_name inside {VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V}) { !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + whole_register_move_cnt - 1]}); - } else if (group inside {LOAD, STORE}) { + } else if (category inside {LOAD, STORE}) { if (format inside {VLX_FORMAT, VSX_FORMAT}) { !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vlmul) * nfields - 1]}); } else { @@ -80,8 +80,9 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Instructions specifying a vector operand with an odd-numbered vector register will raisean // illegal instruction exception. constraint vector_operand_group_c { - if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vlmul > 0 && + if (!m_cfg.vector_cfg.vtype.fractional_lmul && !(instr_name inside {VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F}) && + !(instr_name inside {VRGATHEREI16}) && !(category inside {LOAD, STORE})) { vd % m_cfg.vector_cfg.vtype.vlmul == 0; vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; @@ -104,14 +105,14 @@ class riscv_vector_instr extends riscv_floating_point_instr; !(vs1 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); } // Double-width vd, vs2 double-width, vs1 single-width - if (va_variant inside {WV, WX}) { + if (va_variant inside {WV, WX, WF}) { vs2 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; } else { !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); } } else { // Double-width vs2 is allowed to overlap double-width vd - if (!(va_variant inside {WV, WX})) { + if (!(va_variant inside {WV, WX, WF})) { vs2 != vd; } vs1 != vd; @@ -244,7 +245,10 @@ class riscv_vector_instr extends riscv_floating_point_instr; !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); } else if (ls_eew > m_cfg.vector_cfg.vtype.vsew && !m_cfg.vector_cfg.vtype.fractional_lmul) { // If src_eew > dst_eew, overlap in lowest part of src - !(vd inside {[vs2 + ls_emul_non_frac - m_cfg.vector_cfg.vtype.vlmul : vs2 + ls_emul_non_frac - 1]}); + !(vd inside {[vs2 + 1 : vs2 + ls_emul_non_frac - 1]}); + } else if (ls_eew != m_cfg.vector_cfg.vtype.vsew) { + // No overlap if fractional + vs2 != vd; } } } @@ -280,16 +284,20 @@ class riscv_vector_instr extends riscv_floating_point_instr; constraint vector_integer_extension_c { if (instr_name inside {VZEXT_VF2, VZEXT_VF4, VZEXT_VF8, VSEXT_VF2, VSEXT_VF4, VSEXT_VF8}) { - if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor >= 1) { - // VD needs to be LMUL aligned - vd % m_cfg.vector_cfg.vtype.vlmul == 0; + // VD needs to be LMUL aligned + vd % m_cfg.vector_cfg.vtype.vlmul == 0; + if (!m_cfg.vector_cfg.vtype.fractional_lmul && (m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor) >= 1) { // VS2 needs to be LMUL/ext_widening_factor aligned vs2 % (m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor) == 0; // VS2 can only overlap last ext_widening_factor'th of VD !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - (m_cfg.vector_cfg.vtype.vlmul / ext_widening_factor) - 1]}); } else { // If source has fractional LMUL, VD and VS2 cannot overlap - vs2 != vd; + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } else { + vs2 != vd; + } } } } @@ -340,17 +348,28 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Section 16.4: Vector Register Gather Instruction // For any vrgather instruction, the destination vector register group cannot overlap - // with the source vector register group + // with the source vector register groups, otherwise the instruction encoding is reserved // The vrgatherei16.vv form uses SEW/LMUL for the data in vs2 but EEW=16 and // EMUL = (16/SEW)*LMUL for the indices in vs1. constraint vector_gather_c { if (instr_name inside {VRGATHER, VRGATHEREI16}) { vd != vs2; vd != vs1; + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + vd % m_cfg.vector_cfg.vtype.vlmul == 0; + vs2 % m_cfg.vector_cfg.vtype.vlmul == 0; + } } if (instr_name == VRGATHEREI16) { if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vsew == 8) { vs1 % (m_cfg.vector_cfg.vtype.vlmul * 2) == 0; + !(vd inside {[vs1 : vs1 + m_cfg.vector_cfg.vtype.vlmul * 2 - 1]}); + } + if (!m_cfg.vector_cfg.vtype.fractional_lmul && m_cfg.vector_cfg.vtype.vsew >= 16) { + if (m_cfg.vector_cfg.vtype.vlmul >= m_cfg.vector_cfg.vtype.vsew / 16) { + vs1 % (m_cfg.vector_cfg.vtype.vlmul / (m_cfg.vector_cfg.vtype.vsew / 16)) == 0; + } + !(vs1 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); } } } @@ -382,7 +401,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Filter unsupported instructions based on configuration virtual function bit is_supported(riscv_instr_gen_config cfg); - string name = instr_name.name(); // Check that current LMUL and SEW are valid for narrowing and widening instruction if (is_widening_instr || is_narrowing_instr) begin if (cfg.vector_cfg.vtype.vsew == cfg.vector_cfg.max_int_sew || From 619cecf7755b6ca6aab129b0f70b7a6336ab4b9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 30 Nov 2023 14:12:52 +0000 Subject: [PATCH 45/90] Add Zvl* extension --- src/riscv_asm_program_gen.sv | 6 +++--- src/riscv_vector_cfg.sv | 18 +++++++++++++++--- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index 1deb2160..2fb66e9f 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -545,7 +545,7 @@ class riscv_asm_program_gen extends uvm_object; virtual function void init_vector_gpr(); int SEW = (ELEN <= XLEN) ? ELEN : XLEN; int LMUL = 1; - int num_elements = VLEN / SEW; + int num_elements = cfg.vector_cfg.vlen / SEW; // Do not init vector registers if RVV is not enabled if (!(RVV inside {supported_isa})) return; @@ -573,10 +573,10 @@ class riscv_asm_program_gen extends uvm_object; end end RANDOM_VALUES_LOAD: begin - // Select those memory regions that are big enough for load a vreg + // Select those memory regions that are big enough to load a vreg mem_region_t valid_mem_region [$]; foreach (cfg.mem_region[i]) - if (cfg.mem_region[i].size_in_bytes * 8 >= VLEN) valid_mem_region.push_back(cfg.mem_region[i]); + if (cfg.mem_region[i].size_in_bytes * 8 >= cfg.vector_cfg.vlen) valid_mem_region.push_back(cfg.mem_region[i]); if (valid_mem_region.size() == 0) `uvm_fatal(`gfn, "Couldn't find a memory region big enough to initialize the vector registers") diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index c4106e4b..168dc01b 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -24,6 +24,9 @@ class riscv_vector_cfg extends uvm_object; rand bit vxsat; riscv_vreg_t reserved_vregs[$]; + // Zvl* extension + int unsigned vlen = VLEN; + // Zve* extension string zve_extension = ""; bit enable_fp_support = 1'b1; @@ -96,6 +99,15 @@ class riscv_vector_cfg extends uvm_object; function new (string name = ""); super.new(name); + // Check for Zvl* extension + if ($value$plusargs("zvl_extension=%0d", vlen)) begin + if (vlen < 32 || vlen > 2**16 || 2**$clog2(vlen) != vlen) begin + `uvm_fatal(`gfn, $sformatf({"Unsupported Zvl* extension Zvl%0db. VLEN needs to be within 32 and 2**16", + " and be of power of two"}, vlen)) + end + `uvm_info(`gfn, $sformatf("Enabling Zvl%0db extension. Setting VLEN to %0d (overwriting old VLEN of %0d)", + vlen, vlen, VLEN), UVM_LOW) + end // Check for Zve* extension if ($value$plusargs("zve_extension=%0s", zve_extension)) begin int minimum_vlen; @@ -112,7 +124,7 @@ class riscv_vector_cfg extends uvm_object; end `uvm_info(`gfn, $sformatf("Enabling vector spec %0s extension", zve_extension), UVM_LOW) // Check VLEN to be of correct minimum size - if (VLEN < minimum_vlen) begin + if (vlen < minimum_vlen) begin `uvm_fatal(`gfn, $sformatf("%0s extension requires a VLEN of at least %0d bits", zve_extension, minimum_vlen)) end @@ -181,9 +193,9 @@ class riscv_vector_cfg extends uvm_object; // Get the vlmax for the current vtype function int vlmax(); if (vtype.fractional_lmul) begin - vlmax = VLEN / vtype.vsew / vtype.vlmul; + vlmax = vlen / vtype.vsew / vtype.vlmul; end else begin - vlmax = VLEN / vtype.vsew * vtype.vlmul; + vlmax = vlen / vtype.vsew * vtype.vlmul; end return vlmax; endfunction From 4ea83ce26f33ba3de17940c43ca0a9459764d088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 1 Dec 2023 08:35:22 +0000 Subject: [PATCH 46/90] Fix index generation and data page offset for vector l/s --- src/riscv_instr_stream.sv | 6 ++-- src/riscv_load_store_instr_lib.sv | 54 ++++++++++++++++++------------- 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index c449e5c3..9c9b3bd2 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -299,7 +299,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // Initialize a v-register with pre-defined values // Instructions will be inserted at defined index (-1: random, 0: front, instr_list.size(): back) - function void add_init_vector_gpr(riscv_vreg_t vreg, int unsigned values [], int sew, int idx = instr_list.size()); + function void add_init_vector_gpr(riscv_vreg_t vreg, logic [XLEN-1:0] values [], int sew, int idx = instr_list.size()); riscv_instr init_instr_list [$]; riscv_vector_instr vinstr; riscv_instr_gen_config init_cfg; @@ -322,7 +322,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // Initialize v-register if (values.size() == 1) begin // Load initialize value to x-register - init_instr_list.push_back(get_init_gpr_instr(init_cfg.gpr[0], values[0])); + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], values[0])); // Splatter value to v-register $cast(vinstr, riscv_instr::get_instr(VMV_V_X)); @@ -339,7 +339,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // Load all defined values into v-register for (i = 0; i < values.size() && i < init_cfg.vector_cfg.vlmax(); i++) begin // Load initialize value to x-register - init_instr_list.push_back(get_init_gpr_instr(init_cfg.gpr[0], values[i])); + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], values[i])); // Slide down value in v-register $cast(vinstr, riscv_instr::get_instr(VSLIDE1DOWN)); diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 84e5e713..f089cb69 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -530,7 +530,6 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; rand int unsigned data_page_base_offset; rand int unsigned num_mixed_instr; rand int byte_stride; - rand int unsigned indexed_byte_offset []; rand address_mode_e address_mode; // Base address rand riscv_reg_t rs1_reg; @@ -540,21 +539,26 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; riscv_vreg_t vs2_reg; // Generated load/store instruction riscv_vector_instr load_store_instr; + // Generated index byte offsets + logic [XLEN-1:0] indexed_byte_offset []; constraint solve_order_c { - solve data_page_id before data_page_base_offset; + solve address_mode before data_page_id; solve address_mode before data_eew; solve address_mode before index_eew; + solve data_eew before data_page_id; + solve index_eew before data_page_id; solve data_eew before data_page_base_offset; solve index_eew before data_page_base_offset; + solve data_page_id before data_page_base_offset; solve data_page_base_offset before byte_stride; - solve data_page_base_offset before indexed_byte_offset; - solve index_eew before indexed_byte_offset; } // Choose from available data pages constraint data_page_id_c { data_page_id < max_data_page_id; + // Unit strided address mode requires a big enough data page + address_mode == UNIT_STRIDED -> data_page[data_page_id].size_in_bytes >= cfg.vector_cfg.vl * (data_eew / 8); } // Find base address inside data page @@ -563,6 +567,9 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; data_page_base_offset <= data_page[data_page_id].size_in_bytes - 1; // Base address has to be aligned to data width data_page_base_offset % (data_eew / 8) == 0; + // For unit-strided accesses, base address has to be VL element bytes below page end + address_mode == UNIT_STRIDED -> data_page_base_offset <= data_page[data_page_id].size_in_bytes - + (cfg.vector_cfg.vl * (data_eew / 8)); } // Choose legal EEW for current config @@ -582,25 +589,12 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Choose a legal byte stride for strided l/s constraint byte_stride_c { - // Negative strides are allowed - byte_stride * (data_eew / 8) * cfg.vector_cfg.vl inside {[-data_page_base_offset : - data_page[data_page_id].size_in_bytes - data_page_base_offset]}; - // Addresses have to be data width aligned - byte_stride % (data_eew / 8) == 0; - } - - // Choose legal index byte offsets for every element in vector - constraint index_byte_offset_c { - // We need a byte offset for every element in the vector - indexed_byte_offset.size() == cfg.vector_cfg.vl; - foreach (indexed_byte_offset[i]) { - // Only positive index byte offsets are allowed - // +8 since nfields <= 8 - (indexed_byte_offset[i] + 8) * (data_eew / 8) <= data_page[data_page_id].size_in_bytes - data_page_base_offset; - // Index has to be data width aligned - indexed_byte_offset[i] % (data_eew / 8) == 0; - // Index has to fit into index EEW size - indexed_byte_offset[i] <= 2**index_eew - 1; + if (address_mode == STRIDED) { + // Negative strides are allowed + byte_stride * (data_eew / 8) * cfg.vector_cfg.vl inside {[-data_page_base_offset : + data_page[data_page_id].size_in_bytes - data_page_base_offset]}; + // Addresses have to be data width aligned + byte_stride % (data_eew / 8) == 0; } } @@ -635,6 +629,7 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; cfg.vector_cfg.reserved_vregs.pop_back(); end // Initialize vs2 with random/pre-defined indexes + randomize_indexed_byte_offset(); add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); end super.post_randomize(); @@ -698,4 +693,17 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; load_store_instr.process_load_store = 0; endfunction + // Randomize the index byte offsets for index load and stores + function void randomize_indexed_byte_offset(); + logic [XLEN-1:0] element; + indexed_byte_offset = new [cfg.vector_cfg.vl]; + for (int i = 0; i < cfg.vector_cfg.vl; i++) begin + // Get a random offset which fits into page + element = $urandom_range(0, ((2**index_eew - 1) < data_page[data_page_id].size_in_bytes - data_page_base_offset - 1 ? + (2**index_eew - 1) : data_page[data_page_id].size_in_bytes - data_page_base_offset - 1)); + // Align offset to data width + indexed_byte_offset[i] = (element & ('1 << $clog2(data_eew / 8))); + end + endfunction + endclass From b677d0dba7eefbec2862d671d1152d320d784369 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 1 Dec 2023 08:37:06 +0000 Subject: [PATCH 47/90] Fix VL randomization --- src/riscv_vector_cfg.sv | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 168dc01b..7f75d410 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -51,7 +51,7 @@ class riscv_vector_cfg extends uvm_object; // vl has to be within VLMAX constraint vl_c { - vl inside {[0 : vlmax()]}; + vl inside {[0 : vlmax(vtype)]}; } // vstart has to be within vl @@ -191,11 +191,11 @@ class riscv_vector_cfg extends uvm_object; endfunction // Get the vlmax for the current vtype - function int vlmax(); - if (vtype.fractional_lmul) begin - vlmax = vlen / vtype.vsew / vtype.vlmul; + function int vlmax(vtype_t _vtype = vtype); + if (_vtype.fractional_lmul) begin + vlmax = vlen / _vtype.vsew / _vtype.vlmul; end else begin - vlmax = vlen / vtype.vsew * vtype.vlmul; + vlmax = vlen / _vtype.vsew * _vtype.vlmul; end return vlmax; endfunction From 9f3cb2161678a4845d08e3a33f2ea5a3bbaf3488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 1 Dec 2023 11:12:23 +0000 Subject: [PATCH 48/90] Remove ELEN dependency for vector reg initialization --- src/riscv_asm_program_gen.sv | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index 2fb66e9f..1729745c 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -543,17 +543,16 @@ class riscv_asm_program_gen extends uvm_object; // Initialize vector general purpose registers virtual function void init_vector_gpr(); - int SEW = (ELEN <= XLEN) ? ELEN : XLEN; - int LMUL = 1; - int num_elements = cfg.vector_cfg.vlen / SEW; + int sew = cfg.vector_cfg.max_int_sew; + int num_elements = cfg.vector_cfg.vlen / sew; // Do not init vector registers if RVV is not enabled if (!(RVV inside {supported_isa})) return; // Set vector configuration instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], num_elements)); - instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, m%0d, ta, ma", - indent, cfg.gpr[0], cfg.gpr[1], SEW, LMUL)); + instr_stream.push_back($sformatf("%0svsetvli x%0d, x%0d, e%0d, m1, ta, ma", + indent, cfg.gpr[0], cfg.gpr[1], sew)); // Vector registers will be initialized using one of the following three methods case (cfg.vreg_init_method) @@ -566,7 +565,7 @@ class riscv_asm_program_gen extends uvm_object; for (int v = 0; v < NUM_VEC_GPR; v++) begin for (int e = 0; e < num_elements; e++) begin instr_stream.push_back($sformatf("%0sli x%0d, 0x%0x", - indent, cfg.gpr[0], $urandom_range(0, 2 ** SEW - 1))); + indent, cfg.gpr[0], $urandom_range(0, 2 ** sew - 1))); instr_stream.push_back($sformatf("%0svslide1down.vx v%0d, v%0d, x%0d", indent, v, v, cfg.gpr[0])); end @@ -584,7 +583,7 @@ class riscv_asm_program_gen extends uvm_object; for (int v = 0; v < NUM_VEC_GPR; v++) begin int region = $urandom_range(0, valid_mem_region.size()-1); instr_stream.push_back($sformatf("%0sla x%0s, %0s", indent, cfg.gpr[0], valid_mem_region[region].name)); - instr_stream.push_back($sformatf("%0svle%0s.v v%0d, (x%0s)", indent, SEW, v, cfg.gpr[0])); + instr_stream.push_back($sformatf("%0svle%0s.v v%0d, (x%0s)", indent, sew, v, cfg.gpr[0])); end end endcase From 5ad96a098b307a55a94136d878813f102f91d6af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 5 Dec 2023 08:36:44 +0000 Subject: [PATCH 49/90] Fix fractional lmul and sew legal combination --- src/riscv_vector_cfg.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 7f75d410..003aa9e9 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -64,7 +64,7 @@ class riscv_vector_cfg extends uvm_object; vtype.vlmul inside {1, 2, 4, 8}; vtype.fractional_lmul -> vtype.vlmul != 1; // Fractional LMUL only allowed iff at least one SEW element fits into vector - (8 >> $clog2(vtype.vsew/8)) < vtype.vlmul -> !vtype.fractional_lmul; + ((max_int_sew / 8) >> $clog2(vtype.vsew/8)) < vtype.vlmul -> !vtype.fractional_lmul; // Fractional LMUL 1/8th only supported iff EEW 64 is supported vtype.fractional_lmul -> vtype.vlmul <= max_int_sew / 8; } From ab69613177de3c6bf08cc597b4df39f613d4a414 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 30 Jan 2024 07:53:30 +0000 Subject: [PATCH 50/90] Remove zvfhmin requirement for zvfh extension --- src/riscv_vector_cfg.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 003aa9e9..33149faf 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -140,8 +140,8 @@ class riscv_vector_cfg extends uvm_object; end end if ($value$plusargs("enable_zvfh_extension=%0b", enable_zvfh_extension)) begin - if (enable_zvfh_extension && !enable_zvfhmin_extension) begin - `uvm_fatal(`gfn, $sformatf("Zvfh extension requires the Zvfhmin extension")) + if (enable_zvfh_extension && !enable_fp_support) begin + `uvm_fatal(`gfn, $sformatf("Zvfh extension requires floating point support (Zve32x is invalid)")) end if (enable_zvfh_extension) begin min_fp_sew = 16; From 3bb1a85d72c0397a6b3d0195cc8d2a940a0f879e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 30 Jan 2024 09:11:25 +0000 Subject: [PATCH 51/90] Fix mask register overlap and alignment constraint --- src/isa/riscv_vector_instr.sv | 49 ++++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 6f34ba0c..dd9ff415 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -37,6 +37,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; bit is_convert_instr = 1'b0; bit is_reduction_instr = 1'b0; bit is_mask_producing_instr = 1'b0; + bit is_mask_operands = 1'b0; bit is_fp_instr = 1'b0; bit is_segmented_ls_instr = 1'b0; bit is_whole_register_ls_instr = 1'b0; @@ -77,16 +78,20 @@ class riscv_vector_instr extends riscv_floating_point_instr; } // Section 3.3.2: Vector Register Grouping (vlmul) - // Instructions specifying a vector operand with an odd-numbered vector register will raisean + // Instructions specifying a vector operand with an odd-numbered vector register will raise an // illegal instruction exception. constraint vector_operand_group_c { if (!m_cfg.vector_cfg.vtype.fractional_lmul && !(instr_name inside {VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F}) && !(instr_name inside {VRGATHEREI16}) && !(category inside {LOAD, STORE})) { - vd % m_cfg.vector_cfg.vtype.vlmul == 0; - vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; - vs2 % m_cfg.vector_cfg.vtype.vlmul == 0; + if (!is_mask_producing_instr) { + vd % m_cfg.vector_cfg.vtype.vlmul == 0; + } + if (!is_mask_operands) { + (instr_name != VCOMPRESS) -> vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; + vs2 % m_cfg.vector_cfg.vtype.vlmul == 0; + } vs3 % m_cfg.vector_cfg.vtype.vlmul == 0; } } @@ -136,6 +141,29 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } + // If operand and result registers are not masks, then (mask) operand and + // (mask) result registers cannot overlap + constraint vector_mask_reg_overlap_c { + if (is_mask_producing_instr && !is_mask_operands) { + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + !(vd inside {[vs1 : vs1 + m_cfg.vector_cfg.vtype.vlmul - 1]}); + !(vd inside {[vs2 : vs2 + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } else { + vd != vs1; + vd != vs2; + } + } + if (!is_mask_producing_instr && is_mask_operands) { + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + !(vs1 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + !(vs2 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } else { + vs1 != vd; + vs2 != vd; + } + } + } + // Section 5.3: Vector Masking // The destination vector register group for a masked vector instruction cannot overlap // the source mask register (v0), unless the destination vector register is being written @@ -380,7 +408,11 @@ class riscv_vector_instr extends riscv_floating_point_instr; constraint vector_compress_c { if (instr_name == VCOMPRESS) { vd != vs2; - vd != vs1; + if (!m_cfg.vector_cfg.vtype.fractional_lmul) { + !(vs1 inside {[vd : vd + m_cfg.vector_cfg.vtype.vlmul - 1]}); + } else { + vd != vs1; + } } } @@ -665,9 +697,13 @@ class riscv_vector_instr extends riscv_floating_point_instr; if (uvm_is_match("*RED*", name)) begin is_reduction_instr = 1'b1; end - if (uvm_is_match("VM*_M*", name)) begin + if (uvm_is_match("VM*_M*", name) || (name.substr(0, 2) == "VMF") || + (name.substr(0, 2) == "VMS") || instr_name inside {VMADC, VMSBC}) begin is_mask_producing_instr = 1'b1; end + if (uvm_is_match("*_M*", name)) begin + is_mask_operands = 1'b1; + end if ((name.substr(0, 1) == "VF" && name != "VFIRST_M") || (name.substr(0, 2) == "VMF")) begin is_fp_instr = 1'b1; end @@ -794,6 +830,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; this.is_convert_instr = rhs_.is_convert_instr; this.is_reduction_instr = rhs_.is_reduction_instr; this.is_mask_producing_instr = rhs_.is_mask_producing_instr; + this.is_mask_operands = rhs_.is_mask_operands; this.is_fp_instr = rhs_.is_fp_instr; this.is_segmented_ls_instr = rhs_.is_segmented_ls_instr; this.is_whole_register_ls_instr = rhs_.is_whole_register_ls_instr; From 1617af7e1fc9f805d7de2426437004de4bd8c18f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 30 Jan 2024 09:55:55 +0000 Subject: [PATCH 52/90] Solve VSEW before VLMUL --- src/riscv_vector_cfg.sv | 1 + 1 file changed, 1 insertion(+) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 33149faf..83039aab 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -45,6 +45,7 @@ class riscv_vector_cfg extends uvm_object; int unsigned legal_ls_eew[$]; constraint solve_order_c { + solve vtype.vsew before vtype.vlmul; solve vtype before vl; solve vl before vstart; } From 30026df591105f2949d0d824fe39144f584d50ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 30 Jan 2024 10:53:12 +0000 Subject: [PATCH 53/90] Pass vreg initialisation method as plusargs --- src/riscv_asm_program_gen.sv | 10 ++++++++-- src/riscv_instr_gen_config.sv | 14 ++++++++------ src/riscv_instr_pkg.sv | 20 ++++++++++++++++++++ target/rv64gcv/testlist.yaml | 1 + 4 files changed, 37 insertions(+), 8 deletions(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index 1729745c..3c41351a 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -581,9 +581,15 @@ class riscv_asm_program_gen extends uvm_object; `uvm_fatal(`gfn, "Couldn't find a memory region big enough to initialize the vector registers") for (int v = 0; v < NUM_VEC_GPR; v++) begin + // Select random region int region = $urandom_range(0, valid_mem_region.size()-1); - instr_stream.push_back($sformatf("%0sla x%0s, %0s", indent, cfg.gpr[0], valid_mem_region[region].name)); - instr_stream.push_back($sformatf("%0svle%0s.v v%0d, (x%0s)", indent, sew, v, cfg.gpr[0])); + // Get valid start offset in region + int offset = $urandom_range(0, (valid_mem_region[region].size_in_bytes - (cfg.vector_cfg.vlen / 8)) / + (sew / 8)) * (sew / 8); + // Generate load + instr_stream.push_back($sformatf("%0sla x%0d, %0s+%0d", indent, cfg.gpr[0], + valid_mem_region[region].name, offset)); + instr_stream.push_back($sformatf("%0svle%0d.v v%0d, (x%0d)", indent, sew, v, cfg.gpr[0])); end end endcase diff --git a/src/riscv_instr_gen_config.sv b/src/riscv_instr_gen_config.sv index 5c6a0d49..fbba56ac 100644 --- a/src/riscv_instr_gen_config.sv +++ b/src/riscv_instr_gen_config.sv @@ -39,12 +39,6 @@ class riscv_instr_gen_config extends uvm_object; // Pattern of data section: RAND_DATA, ALL_ZERO, INCR_VAL rand data_pattern_t data_page_pattern; - // Initialization of the vregs - // SAME_VALUES_ALL_ELEMS - Using vmv.v.x to fill all the elements of the vreg with the same value as the one in the GPR selected - // RANDOM_VALUES_VMV - Using vmv.v.x + vslide1up.vx to randomize the contents of each vector element - // RANDOM_VALUES_LOAD - Using vle.v, same approach as RANDOM_VALUES_VMV but more efficient for big VLEN - vreg_init_method_t vreg_init_method = RANDOM_VALUES_VMV; - // Associate array for delegation configuration for each exception and interrupt // When the bit is 1, the corresponding delegation is enabled. rand bit m_mode_exception_delegation[exception_cause_t]; @@ -257,6 +251,11 @@ class riscv_instr_gen_config extends uvm_object; bit enable_vector_extension; // Only generate vector instructions bit vector_instr_only; + // Initialization of the vregs + // SAME_VALUES_ALL_ELEMS - Using vmv.v.x to fill all the elements of the vreg with the same value as the one in the GPR selected + // RANDOM_VALUES_VMV - Using vmv.v.x + vslide1down.vx to randomize the contents of each vector element + // RANDOM_VALUES_LOAD - Using vle.v, same approach as RANDOM_VALUES_VMV but more efficient for big VLEN + vreg_init_method_t vreg_init_method = RANDOM_VALUES_VMV; // Bit manipulation extension support bit enable_b_extension; @@ -535,6 +534,7 @@ class riscv_instr_gen_config extends uvm_object; `uvm_field_int(enable_floating_point, UVM_DEFAULT) `uvm_field_int(enable_vector_extension, UVM_DEFAULT) `uvm_field_int(vector_instr_only, UVM_DEFAULT) + `uvm_field_enum(vreg_init_method_t, vreg_init_method, UVM_DEFAULT) `uvm_field_int(enable_b_extension, UVM_DEFAULT) `uvm_field_array_enum(b_ext_group_t, enable_bitmanip_groups, UVM_DEFAULT) `uvm_field_int(enable_zba_extension, UVM_DEFAULT) @@ -608,6 +608,8 @@ class riscv_instr_gen_config extends uvm_object; get_bool_arg_value("+enable_floating_point=", enable_floating_point); get_bool_arg_value("+enable_vector_extension=", enable_vector_extension); get_bool_arg_value("+vector_instr_only=", vector_instr_only); + cmdline_enum_processor #(vreg_init_method_t)::get_value("+vreg_init_method=", + 1'b0, vreg_init_method); get_bool_arg_value("+enable_b_extension=", enable_b_extension); get_bool_arg_value("+enable_zba_extension=", enable_zba_extension); get_bool_arg_value("+enable_zbb_extension=", enable_zbb_extension); diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 39edcf3d..39b51d11 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -1504,6 +1504,26 @@ package riscv_instr_pkg; end end endfunction + + static function void get_value(string cmdline_str, bit allow_raw_vals, ref T val); + string s; + void'(inst.get_arg_value(cmdline_str, s)); + if(s != "") begin + T value; + if (allow_raw_vals && s.substr(0, 1) == "0x") begin + logic[$bits(T)-1:0] raw_val; + + string raw_val_hex_digits = s.substr(2, s.len()-1); + raw_val = raw_val_hex_digits.atohex(); + val = T'(raw_val); + end else if (uvm_enum_wrapper#(T)::from_name(s.toupper(), value)) begin + val = value; + end else begin + `uvm_fatal("riscv_instr_pkg", $sformatf( + "Invalid value (%0s) specified in command line: %0s", s, cmdline_str)) + end + end + endfunction endclass riscv_reg_t all_gpr[] = {ZERO, RA, SP, GP, TP, T0, T1, T2, S0, S1, A0, diff --git a/target/rv64gcv/testlist.yaml b/target/rv64gcv/testlist.yaml index af04f06d..e52c74f2 100644 --- a/target/rv64gcv/testlist.yaml +++ b/target/rv64gcv/testlist.yaml @@ -147,6 +147,7 @@ +num_of_sub_program=0 +enable_floating_point=1 +enable_vector_extension=1 + +vreg_init_method=RANDOM_VALUES_LOAD +directed_instr_0=riscv_vector_load_store_instr_stream,10 +no_branch_jump=1 +boot_mode=m From 592caeb81e1aa49a9e72ca55b3fc7a4d5d4a2142 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 19 Apr 2024 12:46:37 +0000 Subject: [PATCH 54/90] Re-seed RNG on instruction copy This will allow riscv-dv to work properly on Questa --- src/isa/riscv_instr.sv | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/isa/riscv_instr.sv b/src/isa/riscv_instr.sv index f44e972a..924d2624 100644 --- a/src/isa/riscv_instr.sv +++ b/src/isa/riscv_instr.sv @@ -236,6 +236,8 @@ class riscv_instr extends uvm_object; end // Shallow copy for all relevant fields, avoid using create() to improve performance instr_h = new instr_template[name]; + // Put instruction RNG in unique state + instr_h.srandom($urandom()); return instr_h; endfunction : get_rand_instr @@ -265,6 +267,8 @@ class riscv_instr extends uvm_object; name = load_store_instr[idx]; // Shallow copy for all relevant fields, avoid using create() to improve performance instr_h = new instr_template[name]; + // Put instruction RNG in unique state + instr_h.srandom($urandom()); return instr_h; endfunction : get_load_store_instr From 06baeecd05e03b6539164c5204b2cd21644427f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 5 Jun 2024 06:40:32 +0000 Subject: [PATCH 55/90] Add ability to exclude vector load/store instr using the unsupported_instr list --- src/riscv_load_store_instr_lib.sv | 57 ++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 8 deletions(-) diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index f089cb69..8b159c31 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -521,8 +521,18 @@ endclass class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; + // List of vector load/store instructions (grouped into different address modes) + localparam riscv_instr_name_t unit_strided[] = {VLE_V, VSE_V, VLEFF_V, + VLM_V, VSM_V, VLRE_V, VSR_V, + VLSEGE_V, VSSEGE_V, VLSEGEFF_V}; + localparam riscv_instr_name_t strided[] = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V}; + localparam riscv_instr_name_t indexed[] = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, + VLUXSEGEI_V, VLOXSEGEI_V, VSUXSEGEI_V, VSOXSEGEI_V}; + // Types of vector load/store address modes typedef enum {UNIT_STRIDED, STRIDED, INDEXED} address_mode_e; + // List of allowed address modes + address_mode_e allowed_address_modes[]; rand int unsigned data_eew; rand int unsigned index_eew; @@ -554,6 +564,11 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; solve data_page_base_offset before byte_stride; } + // Find legal address modes + constraint address_mode_c { + address_mode inside {allowed_address_modes}; + } + // Choose from available data pages constraint data_page_id_c { data_page_id < max_data_page_id; @@ -608,12 +623,37 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; `uvm_object_utils(riscv_vector_load_store_instr_stream) `uvm_object_new + function void pre_randomize(); + super.pre_randomize(); + + // Build list of allowed address modes (according to unsupported_instr list) + foreach(unit_strided[i]) begin + if (!(unit_strided[i] inside {unsupported_instr})) begin + allowed_address_modes = {allowed_address_modes, UNIT_STRIDED}; + break; + end + end + foreach(strided[i]) begin + if (!(strided[i] inside {unsupported_instr})) begin + allowed_address_modes = {allowed_address_modes, STRIDED}; + break; + end + end + foreach(indexed[i]) begin + if (!(indexed[i] inside {unsupported_instr})) begin + allowed_address_modes = {allowed_address_modes, INDEXED}; + break; + end + end + endfunction + function void post_randomize(); // Randomize the available registers reserved_rd = {reserved_rd, rs1_reg, rs2_reg}; randomize_avail_regs(); // Generate a random load/store instruction - gen_load_store_instr(); + // Exit and skip directed test if there is no load/store instruction for current config + if (gen_load_store_instr()) return; // Insert a random-mixed instruction stream add_mixed_instr(num_mixed_instr); // Insert the load/store instruction at a random place in the instruction stream @@ -636,9 +676,12 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; endfunction // Generate a load/store instruction - virtual function void gen_load_store_instr(); + virtual function int gen_load_store_instr(); build_allowed_instr(); + // If there are no allowed instructions, do not try to randomize and return early + if (allowed_instr.size() == 0) return 1; randomize_vector_load_store_instr(); + return 0; endfunction // Choose allowed load/store instructions for current address mode @@ -648,22 +691,20 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Get instructions for selected address mode case (address_mode) UNIT_STRIDED : begin - possible_instr = {VLE_V, VSE_V, VLEFF_V, - VLM_V, VSM_V, VLRE_V, VSR_V, - VLSEGE_V, VSSEGE_V, VLSEGEFF_V}; + possible_instr = {unit_strided}; end STRIDED : begin - possible_instr = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V}; + possible_instr = {strided}; end INDEXED : begin - possible_instr = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, - VLUXSEGEI_V, VLOXSEGEI_V, VSUXSEGEI_V, VSOXSEGEI_V}; + possible_instr = {indexed}; end endcase // Filter out illegal instructions for current config foreach (possible_instr[i]) begin riscv_instr instr_inst; + if (possible_instr[i] inside {unsupported_instr}) continue; instr_inst = instr_inst.create_instr(possible_instr[i]); if (instr_inst.is_supported(cfg)) begin allowed_instr = {allowed_instr, possible_instr[i]}; From eddd62aec8e05cd06b4c99e01b3b4236d814f62f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 5 Jun 2024 13:43:39 +0000 Subject: [PATCH 56/90] Add function to initialise vector gpr with random values based on lfsr --- src/riscv_instr_stream.sv | 221 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 9c9b3bd2..ee15c46e 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -297,6 +297,227 @@ class riscv_rand_instr_stream extends riscv_instr_stream; return li_instr; endfunction + // Initialize a v-register with random values (determined through a linear feedback shift register) + // vreg: register with randomised elements + // seed: register that contains initial seed (cannot be equal to vreg) + // vtemp: temporary vector register used during calculation (cannot be equal to vreg or seed) + // reseed: reseed the original seed with the vector element index (seed += vid) + // min_value: lower bound of random value (inclusive) + // max_value: upper bound of random value (inclusive) + // align_by: align random value by number of bytes (e.g align_by == 2 would clear the lowest bit) + // sew: element width + // insert_idx: position in instruction stream to insert instruction at + // (-1: random, 0: front, instr_list.size(): back (default)) + function void add_init_vector_gpr_random(riscv_vreg_t vreg, riscv_vreg_t seed, riscv_vreg_t vtemp, + int reseed, int min_value, int max_value, + int align_by, int sew, int insert_idx = instr_list.size()); + // The LSFR is based on the fibonacci lsfr (https://en.wikipedia.org/wiki/Linear-feedback_shift_register) + // The polinomial parameters are based on a paper by Xilinx (http://www.xilinx.com/support/documentation/application_notes/xapp052.pdf) + // + // LFSR + // Feedback polynomial + // i8: x^8 + x^6 + x^5 + x^4 + 1 + // i16: x^16 + x^15 + x^13 + x^4 + 1 + // i32: x^32 + x^22 + x^2 + x^1 + 1 + // + // Calculation (example for i16): + // # taps: 16 15 13 4; feedback polynomial: x^16 + x^15 + x^13 + x^4 + 1 + // bit = (lfsr ^ (lfsr >> 1) ^ (lfsr >> 3) ^ (lfsr >> 12)) & 1 + // lfsr = (lfsr >> 1) | (bit << 15) + + riscv_instr init_instr_list [$]; + riscv_vector_instr vinstr; + riscv_instr_gen_config init_cfg; + int polinomial[]; + + unique case (sew) + 8: polinomial = {6, 5, 4}; + 16: polinomial = {15, 13, 4}; + 32: polinomial = {22, 2, 1}; + default: `uvm_fatal("add_init_vector_gpr_random", + $sformatf("Error: Unable to initialize vector with randomised values of SEW == %0d", sew)) + endcase + + // Clone current configuration + init_cfg = new(); + init_cfg.copy(cfg); + + // Set vtype to new vsew and vl to VLMAX + init_cfg.vector_cfg.update_vsew_keep_vl(sew); + $cast(vinstr, riscv_instr::get_instr(VSETVLI)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + rs1 == 0; + rd == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + + // Add vid to seed values + if (reseed) begin + // vtemp = vid + $cast(vinstr, riscv_instr::get_instr(VID_V)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + vm == 1'b1; + vd == vtemp; + ) + init_instr_list.push_back(vinstr); + + // seed = seed + vtemp + $cast(vinstr, riscv_instr::get_instr(VADD)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VV; + vm == 1'b1; + vd == seed; + vs1 == vtemp; + vs2 == seed; + ) + init_instr_list.push_back(vinstr); + end + + // vreg = seed + $cast(vinstr, riscv_instr::get_instr(VMV_V_V)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VV; + vm == 1'b1; + vd == vreg; + vs1 == seed; + ) + init_instr_list.push_back(vinstr); + + foreach (polinomial[i]) begin + // vtemp = seed >> (sew - polinomial[i]) + $cast(vinstr, riscv_instr::get_instr(VSRL)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VI; + vm == 1'b1; + vd == vtemp; + vs2 == seed; + imm == sew - polinomial[i]; + ) + init_instr_list.push_back(vinstr); + + // vreg = vtemp ^ vreg + $cast(vinstr, riscv_instr::get_instr(VXOR)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VV; + vm == 1'b1; + vd == vreg; + vs2 == vtemp; + vs1 == vreg; + ) + init_instr_list.push_back(vinstr); + end + + // vreg = vreg << sew - 1 + $cast(vinstr, riscv_instr::get_instr(VSLL)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VI; + vm == 1'b1; + vd == vreg; + vs2 == vreg; + imm == sew - 1; + ) + init_instr_list.push_back(vinstr); + + // vtemp = seed >> 1 + $cast(vinstr, riscv_instr::get_instr(VSRL)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VI; + vm == 1'b1; + vd == vtemp; + vs2 == seed; + imm == 1; + ) + init_instr_list.push_back(vinstr); + + // vreg = vreg | vtemp + $cast(vinstr, riscv_instr::get_instr(VOR)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VV; + vm == 1'b1; + vd == vreg; + vs2 == vtemp; + vs1 == vreg; + ) + init_instr_list.push_back(vinstr); + + // Cast to range + if (min_value > 0) begin + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], min_value)); + $cast(vinstr, riscv_instr::get_instr(VMAXU)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VX; + vm == 1'b1; + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + end + if (max_value > 0) begin + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], max_value)); + $cast(vinstr, riscv_instr::get_instr(VMINU)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VX; + vm == 1'b1; + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + end + if (align_by > 1) begin + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], '1 << $clog2(align_by))); + $cast(vinstr, riscv_instr::get_instr(VAND)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VX; + vm == 1'b1; + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + end + + // Reset vtype + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], cfg.vector_cfg.vl)); + $cast(vinstr, riscv_instr::get_instr(VSETVLI)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + rs1 == cfg.gpr[0]; + !(rd inside {cfg.reserved_regs, reserved_rd}); + vd == 0; + ) + init_instr_list.push_back(vinstr); + + // Add instructions to instruction stream + insert_instr_stream(init_instr_list, insert_idx); + endfunction + // Initialize a v-register with pre-defined values // Instructions will be inserted at defined index (-1: random, 0: front, instr_list.size(): back) function void add_init_vector_gpr(riscv_vreg_t vreg, logic [XLEN-1:0] values [], int sew, int idx = instr_list.size()); From ed35eaedfba582333195d59ae937a15d22a97194 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 5 Jun 2024 13:45:50 +0000 Subject: [PATCH 57/90] Add function to get random vector gpr that is not reserved --- src/riscv_instr_stream.sv | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index ee15c46e..432a9c3a 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -286,6 +286,27 @@ class riscv_rand_instr_stream extends riscv_instr_stream; ) endfunction + // Get a random vreg that is aligned to non fractional emul and is not already reserved + // Optionally add random vreg to list of reserved vregs + function riscv_vreg_t get_random_vreg (int emul, int reserve_vreg); + // Get random start register and align to emul + int base = $urandom_range(31) / emul * emul; + for (int i = 0; i < 32; i += emul) begin + for (int idx = 0; idx < emul; idx++) begin + if (riscv_vreg_t'(base + idx) inside {cfg.vector_cfg.reserved_vregs}) break; + if (reserve_vreg) begin + for (int i = 0; i < emul; i++) begin + cfg.vector_cfg.reserved_vregs.push_back(riscv_vreg_t'(base + i)); + end + end + return riscv_vreg_t'(base); + end + base += emul; + base %= 32; + end + `uvm_fatal(`gfn, $sformatf("Cannot find random vector register with emul = %0d that is not already reserved", emul)) + endfunction + function riscv_instr get_init_gpr_instr(riscv_reg_t gpr, bit [XLEN-1:0] val); riscv_pseudo_instr li_instr; li_instr = riscv_pseudo_instr::type_id::create("li_instr"); From 107e63411865bc3abaca959cb173bbeff771d6fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 5 Jun 2024 13:49:40 +0000 Subject: [PATCH 58/90] Use lfsr random index values for vector indexed loads/stores --- src/riscv_load_store_instr_lib.sv | 44 ++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 8b159c31..dbb98c0f 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -547,10 +547,15 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; rand riscv_reg_t rs2_reg; // Indexes - randomized by instructions riscv_vreg_t vs2_reg; + // Temporary random index calculation registers + riscv_vreg_t vseed; + riscv_vreg_t vtemp; // Generated load/store instruction riscv_vector_instr load_store_instr; // Generated index byte offsets logic [XLEN-1:0] indexed_byte_offset []; + // Emul of index register + int index_emul; constraint solve_order_c { solve address_mode before data_page_id; @@ -653,7 +658,15 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; randomize_avail_regs(); // Generate a random load/store instruction // Exit and skip directed test if there is no load/store instruction for current config - if (gen_load_store_instr()) return; + if (gen_load_store_instr()) begin + if (address_mode == INDEXED) begin + // Unreserve index vector register + for (int i = 0; i < index_emul; i++) begin + cfg.vector_cfg.reserved_vregs.pop_back(); + end + end + return; + end // Insert a random-mixed instruction stream add_mixed_instr(num_mixed_instr); // Insert the load/store instruction at a random place in the instruction stream @@ -664,13 +677,24 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Initialize rs2 with the stride insert_instr(get_init_gpr_instr(rs2_reg, byte_stride), 0); end else if (address_mode == INDEXED) begin - // Unreserve index vector registers - for (int i = 0; i < load_store_instr.emul_non_frac(index_eew); i++) begin + // Unreserve index vector register + for (int i = 0; i < index_emul; i++) begin cfg.vector_cfg.reserved_vregs.pop_back(); end // Initialize vs2 with random/pre-defined indexes - randomize_indexed_byte_offset(); - add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); + // randomize_indexed_byte_offset(); + // add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); + add_init_vector_gpr_random( + .vreg ( vs2_reg ), + .seed ( vseed ), + .vtemp ( vtemp ), + .reseed ( 1'b1 ), + .min_value ( 0 ), + .max_value ( data_page[data_page_id].size_in_bytes - data_page_base_offset - 1 ), + .align_by ( data_eew / 8 ), + .sew ( index_eew ), + .insert_idx ( 0 ) + ); end super.post_randomize(); endfunction @@ -727,9 +751,17 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; if (address_mode == INDEXED) begin vs2_reg = load_store_instr.vs2; // Make sure that indexes are not overwritten - for (int i = 0; i < load_store_instr.emul_non_frac(index_eew); i++) begin + index_emul = load_store_instr.emul_non_frac(index_eew); + for (int i = 0; i < index_emul; i++) begin cfg.vector_cfg.reserved_vregs.push_back(riscv_vreg_t'(vs2_reg + i)); end + // Find seed and temporary vector registers + vseed = get_random_vreg(index_emul, 1); + vtemp = get_random_vreg(index_emul, 0); + // Unreserve vseed register + for (int i = 0; i < index_emul; i++) begin + cfg.vector_cfg.reserved_vregs.pop_back(); + end end load_store_instr.process_load_store = 0; endfunction From 8143604da359c14f4c68f0226f70f52037059ed1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 5 Jun 2024 14:30:00 +0000 Subject: [PATCH 59/90] Add plusarg to switch between old and new index vreg initialisation methods --- src/riscv_instr_gen_config.sv | 7 +++++++ src/riscv_instr_pkg.sv | 6 ++++++ src/riscv_load_store_instr_lib.sv | 32 +++++++++++++++++-------------- 3 files changed, 31 insertions(+), 14 deletions(-) diff --git a/src/riscv_instr_gen_config.sv b/src/riscv_instr_gen_config.sv index fbba56ac..3edf0c68 100644 --- a/src/riscv_instr_gen_config.sv +++ b/src/riscv_instr_gen_config.sv @@ -256,6 +256,10 @@ class riscv_instr_gen_config extends uvm_object; // RANDOM_VALUES_VMV - Using vmv.v.x + vslide1down.vx to randomize the contents of each vector element // RANDOM_VALUES_LOAD - Using vle.v, same approach as RANDOM_VALUES_VMV but more efficient for big VLEN vreg_init_method_t vreg_init_method = RANDOM_VALUES_VMV; + // Initialization of the index register for vector indexed loads/stores + // LS_INDEX_INIT_LFSR - Calculation of random indexes through the use of an lfsr + // LS_INDEX_INIT_SLIDE - Using multiple vslide1down instructions to prefill the vector one by one + vreg_ls_index_init_t vreg_ls_index_init = LS_INDEX_INIT_LFSR; // Bit manipulation extension support bit enable_b_extension; @@ -535,6 +539,7 @@ class riscv_instr_gen_config extends uvm_object; `uvm_field_int(enable_vector_extension, UVM_DEFAULT) `uvm_field_int(vector_instr_only, UVM_DEFAULT) `uvm_field_enum(vreg_init_method_t, vreg_init_method, UVM_DEFAULT) + `uvm_field_enum(vreg_ls_index_init_t, vreg_ls_index_init, UVM_DEFAULT) `uvm_field_int(enable_b_extension, UVM_DEFAULT) `uvm_field_array_enum(b_ext_group_t, enable_bitmanip_groups, UVM_DEFAULT) `uvm_field_int(enable_zba_extension, UVM_DEFAULT) @@ -610,6 +615,8 @@ class riscv_instr_gen_config extends uvm_object; get_bool_arg_value("+vector_instr_only=", vector_instr_only); cmdline_enum_processor #(vreg_init_method_t)::get_value("+vreg_init_method=", 1'b0, vreg_init_method); + cmdline_enum_processor #(vreg_ls_index_init_t)::get_value("+vreg_ls_index_init=", + 1'b0, vreg_ls_index_init); get_bool_arg_value("+enable_b_extension=", enable_b_extension); get_bool_arg_value("+enable_zba_extension=", enable_zba_extension); get_bool_arg_value("+enable_zbb_extension=", enable_zbb_extension); diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 39b51d11..297aacd8 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -43,6 +43,12 @@ package riscv_instr_pkg; RANDOM_VALUES_LOAD } vreg_init_method_t; + // Initialisation of the index vreg for indexed vector load/stores + typedef enum { + LS_INDEX_INIT_LFSR, + LS_INDEX_INIT_SLIDE + } vreg_ls_index_init_t; + typedef enum bit [3:0] { BARE = 4'b0000, SV32 = 4'b0001, diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index dbb98c0f..fc5b7735 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -681,20 +681,24 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; for (int i = 0; i < index_emul; i++) begin cfg.vector_cfg.reserved_vregs.pop_back(); end - // Initialize vs2 with random/pre-defined indexes - // randomize_indexed_byte_offset(); - // add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); - add_init_vector_gpr_random( - .vreg ( vs2_reg ), - .seed ( vseed ), - .vtemp ( vtemp ), - .reseed ( 1'b1 ), - .min_value ( 0 ), - .max_value ( data_page[data_page_id].size_in_bytes - data_page_base_offset - 1 ), - .align_by ( data_eew / 8 ), - .sew ( index_eew ), - .insert_idx ( 0 ) - ); + if (cfg.vreg_ls_index_init == LS_INDEX_INIT_LFSR) begin + // Initialize vs2 with randomly calculated indexes + add_init_vector_gpr_random( + .vreg ( vs2_reg ), + .seed ( vseed ), + .vtemp ( vtemp ), + .reseed ( 1'b1 ), + .min_value ( 0 ), + .max_value ( data_page[data_page_id].size_in_bytes - data_page_base_offset - 1 ), + .align_by ( data_eew / 8 ), + .sew ( index_eew ), + .insert_idx ( 0 ) + ); + end else if (cfg.vreg_ls_index_init == LS_INDEX_INIT_SLIDE) begin + // Initialize vs2 with random/pre-defined indexes + randomize_indexed_byte_offset(); + add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); + end end super.post_randomize(); endfunction From 4352468ab33571a71e0f7c93e28b97267746b56e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 5 Jun 2024 15:11:01 +0000 Subject: [PATCH 60/90] Fix typos in instruction stream class --- src/riscv_instr_stream.sv | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 432a9c3a..4b3b8c06 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -332,8 +332,8 @@ class riscv_rand_instr_stream extends riscv_instr_stream; function void add_init_vector_gpr_random(riscv_vreg_t vreg, riscv_vreg_t seed, riscv_vreg_t vtemp, int reseed, int min_value, int max_value, int align_by, int sew, int insert_idx = instr_list.size()); - // The LSFR is based on the fibonacci lsfr (https://en.wikipedia.org/wiki/Linear-feedback_shift_register) - // The polinomial parameters are based on a paper by Xilinx (http://www.xilinx.com/support/documentation/application_notes/xapp052.pdf) + // The LFSR is based on the fibonacci lfsr (https://en.wikipedia.org/wiki/Linear-feedback_shift_register) + // The polynomial parameters are based on a paper by Xilinx (http://www.xilinx.com/support/documentation/application_notes/xapp052.pdf) // // LFSR // Feedback polynomial @@ -349,12 +349,12 @@ class riscv_rand_instr_stream extends riscv_instr_stream; riscv_instr init_instr_list [$]; riscv_vector_instr vinstr; riscv_instr_gen_config init_cfg; - int polinomial[]; + int polynomial[]; unique case (sew) - 8: polinomial = {6, 5, 4}; - 16: polinomial = {15, 13, 4}; - 32: polinomial = {22, 2, 1}; + 8: polynomial = {6, 5, 4}; + 16: polynomial = {15, 13, 4}; + 32: polynomial = {22, 2, 1}; default: `uvm_fatal("add_init_vector_gpr_random", $sformatf("Error: Unable to initialize vector with randomised values of SEW == %0d", sew)) endcase @@ -412,8 +412,8 @@ class riscv_rand_instr_stream extends riscv_instr_stream; ) init_instr_list.push_back(vinstr); - foreach (polinomial[i]) begin - // vtemp = seed >> (sew - polinomial[i]) + foreach (polynomial[i]) begin + // vtemp = seed >> (sew - polynomial[i]) $cast(vinstr, riscv_instr::get_instr(VSRL)); vinstr.avoid_reserved_vregs_c.constraint_mode(0); vinstr.m_cfg = init_cfg; @@ -422,7 +422,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; vm == 1'b1; vd == vtemp; vs2 == seed; - imm == sew - polinomial[i]; + imm == sew - polynomial[i]; ) init_instr_list.push_back(vinstr); From 17bc1d992c6db082e7da521e0b45718b9434e3d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:13:55 +0000 Subject: [PATCH 61/90] [vector_cfg] Relax solve order between vtype and vl --- src/riscv_vector_cfg.sv | 1 - 1 file changed, 1 deletion(-) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 83039aab..63c535e2 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -46,7 +46,6 @@ class riscv_vector_cfg extends uvm_object; constraint solve_order_c { solve vtype.vsew before vtype.vlmul; - solve vtype before vl; solve vl before vstart; } From 31f1b2ac4b795853ab652416499f068617b8547f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:14:33 +0000 Subject: [PATCH 62/90] [vector_cfg] Add function to get flat vtype csr register state --- src/riscv_vector_cfg.sv | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 63c535e2..cc85966a 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -200,4 +200,13 @@ class riscv_vector_cfg extends uvm_object; return vlmax; endfunction + // Get flat register state of vtype csr + function logic [XLEN-1:0] get_vtype_content(vtype_t _vtype = vtype); + get_vtype_content = '0; + get_vtype_content[2:0] = _vtype.fractional_lmul ? 3'(-$clog2(_vtype.vlmul)) : 3'($clog2(_vtype.vlmul)); + get_vtype_content[5:3] = $clog2(_vtype.vsew / 8); + get_vtype_content[ 6] = _vtype.vta; + get_vtype_content[ 7] = _vtype.vma; + endfunction + endclass : riscv_vector_cfg From ae3c89f53dc8b329df37c9f0a88b11c41d056428 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:15:36 +0000 Subject: [PATCH 63/90] [vector_instr] Do not use x0 as rs2 for vsetvl --- src/isa/riscv_vector_instr.sv | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index dd9ff415..c27a8cbd 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -213,7 +213,15 @@ class riscv_vector_instr extends riscv_floating_point_instr; } } - // Oder to solve load and store constraints in + // Section 6: Configuration-Setting Instructions + // RS2 of vsetvl can only be x0 if vtype is zero + constraint vsetvl_rs2_c { + if (instr_name == VSETVL) { + m_cfg.vector_cfg.vtype != '0 -> rs2 != ZERO; + } + } + + // Order to solve load and store constraints in constraint load_store_solve_order_c { solve ls_eew before ls_emul_non_frac; solve ls_emul_non_frac before vd; From 6883b473b0698a664c7d6dacf6c9378cae921d96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:16:16 +0000 Subject: [PATCH 64/90] [vector_instr] Cleanup code --- src/isa/riscv_vector_instr.sv | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index c27a8cbd..0b7243ab 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -731,10 +731,10 @@ class riscv_vector_instr extends riscv_floating_point_instr; has_vs1 = 1'b0; has_vs2 = 1'b0; has_vd = 1'b0; - has_rs1 = name != "VSETIVLI"; - has_rs2 = name == "VSETVL"; + has_rs1 = instr_name != VSETIVLI; + has_rs2 = instr_name == VSETVL; has_rd = 1'b1; - has_imm = name == "VSETIVLI"; + has_imm = instr_name == VSETIVLI; end if (format == VA_FORMAT) begin has_imm = 1'b1; From 7f960b65913d265798da73d9d0d7bbc7aaae5db3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:17:29 +0000 Subject: [PATCH 65/90] [vector_instr] Remove vsetivli VL constraint --- src/isa/riscv_vector_instr.sv | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 0b7243ab..9e4039af 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -543,13 +543,6 @@ class riscv_vector_instr extends riscv_floating_point_instr; end end end - // Check vector configuration-setting - if (instr_name == VSETIVLI) begin - // Immediate vsetivli can only be used if VL fits into space of immediate value - if (cfg.vector_cfg.vl >= 2**5) begin - return 0; - end - end return 1'b1; endfunction From ac25fca40af51acbff022bc12f3893c10781d74f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:19:18 +0000 Subject: [PATCH 66/90] [mixed_instr_stream] Add insertion position argument --- src/riscv_amo_instr_lib.sv | 4 ++-- src/riscv_directed_instr_lib.sv | 30 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/riscv_amo_instr_lib.sv b/src/riscv_amo_instr_lib.sv index 7eb9d97c..b7eecea8 100644 --- a/src/riscv_amo_instr_lib.sv +++ b/src/riscv_amo_instr_lib.sv @@ -155,7 +155,7 @@ class riscv_lr_sc_instr_stream extends riscv_amo_base_instr_stream; // jumps, taken backward branches, JALR, FENCE, and SYSTEM instructions. If the โ€œCโ€ // extension is supported, then compressed forms of the aforementioned โ€œIโ€ instructions // are also permitted. - virtual function void add_mixed_instr(int instr_cnt); + virtual function void add_mixed_instr(int instr_cnt, int insert_idx = -1); riscv_instr instr; int i; setup_allowed_instr(.no_branch(1), .no_load_store(1)); @@ -163,7 +163,7 @@ class riscv_lr_sc_instr_stream extends riscv_amo_base_instr_stream; instr = riscv_instr::type_id::create("instr"); randomize_instr(instr, .include_group({RV32I, RV32C})); if (!(instr.category inside {SYNCH, SYSTEM})) begin - insert_instr(instr); + insert_instr(instr, insert_idx); i++; end end diff --git a/src/riscv_directed_instr_lib.sv b/src/riscv_directed_instr_lib.sv index 523f3275..dab22d4e 100644 --- a/src/riscv_directed_instr_lib.sv +++ b/src/riscv_directed_instr_lib.sv @@ -38,6 +38,21 @@ class riscv_directed_instr_stream extends riscv_rand_instr_stream; end endfunction + // Insert some other instructions to mix with mem_access instruction + virtual function void add_mixed_instr(int instr_cnt, int insert_idx = -1); + riscv_instr instr; + int i = 0; + setup_allowed_instr(1, 1); + while (i < instr_cnt) begin + instr = riscv_instr::type_id::create("instr"); + randomize_instr(instr); + if (instr.is_supported(cfg)) begin + insert_instr(instr, insert_idx); + i++; + end + end + endfunction + endclass // Base class for memory access stream @@ -80,21 +95,6 @@ class riscv_mem_access_stream extends riscv_directed_instr_stream; instr_list.push_front(la_instr); endfunction - // Insert some other instructions to mix with mem_access instruction - virtual function void add_mixed_instr(int instr_cnt); - riscv_instr instr; - int i = 0; - setup_allowed_instr(1, 1); - while (i < instr_cnt) begin - instr = riscv_instr::type_id::create("instr"); - randomize_instr(instr); - if (instr.is_supported(cfg)) begin - insert_instr(instr); - i++; - end - end - endfunction - endclass // Jump instruction (JAL, JALR) From 541ad8aefaf3e5b62d06a865645d308c69f0eeb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 08:20:51 +0000 Subject: [PATCH 67/90] [random_instr_stream] Add support for vsetvl instructions interleaving --- src/riscv_instr_stream.sv | 64 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 4b3b8c06..f7e78782 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -218,7 +218,15 @@ class riscv_rand_instr_stream extends riscv_instr_stream; bit is_debug_program = 1'b0); setup_allowed_instr(no_branch, no_load_store); foreach(instr_list[i]) begin + if (instr_list[i] != null) continue; randomize_instr(instr_list[i], is_debug_program); + // Handle special instructions + if (instr_list[i].group == RVV && instr_list[i].category == CSR) begin + handle_vector_configuration_instr(i); + // Regenerate list of supported instruction + riscv_instr::create_instr_list(cfg); + setup_allowed_instr(no_branch, no_load_store); + end end // Do not allow branch instruction as the last instruction because there's no // forward branch target @@ -273,6 +281,10 @@ class riscv_rand_instr_stream extends riscv_instr_stream; if (format == CB_FORMAT) { rs1 != reserved_rd[i]; } + if (format == VSET_FORMAT) { + has_rs1 -> rs1 != reserved_rd[i]; + has_rs2 -> rs2 != reserved_rd[i]; + } } foreach (cfg.reserved_regs[i]) { if (has_rd) { @@ -281,11 +293,63 @@ class riscv_rand_instr_stream extends riscv_instr_stream; if (format == CB_FORMAT) { rs1 != cfg.reserved_regs[i]; } + if (format == VSET_FORMAT) { + has_rs1 -> rs1 != cfg.reserved_regs[i]; + has_rs2 -> rs2 != cfg.reserved_regs[i]; + } } // TODO: Add constraint for CSR, floating point register ) endfunction + // Handle vset{i}vl{i} instructions + // Regenerate vector configuration and initialize rs1/rs2 + function handle_vector_configuration_instr(int idx); + riscv_instr instr; + instr = instr_list[idx]; + + // Create new config instance and deep copy old config + cfg = new (); + cfg.copy(instr.m_cfg); + // Set instruction config to new instance + instr.m_cfg = cfg; + + // Randomize cfg + if (instr.rs1 == ZERO && instr.instr_name != VSETIVLI) begin + if (instr.rd == ZERO) begin + // Keep existing vl + cfg.vector_cfg.vl.rand_mode(0); + end + end + `DV_CHECK_RANDOMIZE_WITH_FATAL(cfg.vector_cfg, + if (instr.instr_name == VSETIVLI) { + cfg.vector_cfg.vl < 2**5; + } + ) + // Special vsetvl{ยก} conditions + if (instr.instr_name != VSETIVLI && instr.rs1 == ZERO && instr.rd != ZERO) begin + // Set vl to vlmax + cfg.vector_cfg.vl = cfg.vector_cfg.vlmax(); + cfg.vector_cfg.vstart = 0; + end + cfg.vector_cfg.vl.rand_mode(1); + + // Handle not fully immediate instructions + if (instr.instr_name != VSETIVLI) begin + // Copy vsetvl{i} instruction to position further back + instr_list[idx+(instr.instr_name == VSETVL ? 2 : 1) - (instr.rs1 == ZERO)] = instr; + + // Setup rs1 (avl) + if (instr.rs1 != ZERO) begin + instr_list[idx] = get_init_gpr_instr(instr.rs1, cfg.vector_cfg.vl); + end + // Setup rs2 (vtype) + if (instr.instr_name == VSETVL) begin + instr_list[idx+(instr.rs1 != ZERO)] = get_init_gpr_instr(instr.rs2, cfg.vector_cfg.get_vtype_content()); + end + end + endfunction + // Get a random vreg that is aligned to non fractional emul and is not already reserved // Optionally add random vreg to list of reserved vregs function riscv_vreg_t get_random_vreg (int emul, int reserve_vreg); From e514a8e22eede146513fd4dc23b6985ebbac0bc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 11:37:09 +0000 Subject: [PATCH 68/90] [csr_instr] Generate read/writes to vector CSRs --- src/isa/riscv_csr_instr.sv | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/isa/riscv_csr_instr.sv b/src/isa/riscv_csr_instr.sv index 35d98fe4..31679a90 100644 --- a/src/isa/riscv_csr_instr.sv +++ b/src/isa/riscv_csr_instr.sv @@ -98,6 +98,7 @@ class riscv_csr_instr extends riscv_instr; create_include_write_reg(cfg.add_csr_write, cfg.remove_csr_write, default_include_csr_write); end else begin + allow_ro_write = 0; // Use scratch register to avoid the side effect of modifying other privileged mode CSR. if (cfg.init_privileged_mode == MACHINE_MODE) begin include_reg = {MSCRATCH}; @@ -106,6 +107,10 @@ class riscv_csr_instr extends riscv_instr; end else begin include_reg = {USCRATCH}; end + // Add vector CSRs + if (cfg.enable_vector_extension) begin + include_reg = {include_reg, VXSAT, VXRM, VCSR, VL, VTYPE, VLENB}; + end end endfunction : create_csr_filter From fa77aae3528eb891312f642fc4ecb99a2b173b6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 13:01:48 +0000 Subject: [PATCH 69/90] [csr_instr] Use csr names instead of hex vaddress for csr instructions --- src/isa/riscv_csr_instr.sv | 4 ++-- src/isa/riscv_instr.sv | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/isa/riscv_csr_instr.sv b/src/isa/riscv_csr_instr.sv index 31679a90..d6f271db 100644 --- a/src/isa/riscv_csr_instr.sv +++ b/src/isa/riscv_csr_instr.sv @@ -144,9 +144,9 @@ class riscv_csr_instr extends riscv_instr; case(format) I_FORMAT: // instr rd,rs1,imm - asm_str = $sformatf("%0s%0s, 0x%0x, %0s", asm_str, rd.name(), csr, get_imm()); + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, rd.name(), csr.name(), get_imm()); R_FORMAT: // instr rd,rs1,rs2 - asm_str = $sformatf("%0s%0s, 0x%0x, %0s", asm_str, rd.name(), csr, rs1.name()); + asm_str = $sformatf("%0s%0s, %0s, %0s", asm_str, rd.name(), csr.name(), rs1.name()); default: `uvm_fatal(`gfn, $sformatf("Unsupported format %0s [%0s]", format.name(), instr_name.name())) diff --git a/src/isa/riscv_instr.sv b/src/isa/riscv_instr.sv index 924d2624..dcd3ea55 100644 --- a/src/isa/riscv_instr.sv +++ b/src/isa/riscv_instr.sv @@ -40,7 +40,7 @@ class riscv_instr extends uvm_object; bit [4:0] imm_len; // Operands - rand bit [11:0] csr; + rand privileged_reg_t csr; rand riscv_reg_t rs2; rand riscv_reg_t rs1; rand riscv_reg_t rd; From 0ce97521eaeaae66870b17b2e1eed4b51aa84214 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 13:03:03 +0000 Subject: [PATCH 70/90] [vector_instr] Remove config irrelevant csr variables --- src/riscv_asm_program_gen.sv | 4 ++-- src/riscv_instr_stream.sv | 1 - src/riscv_vector_cfg.sv | 13 +------------ 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index 3c41351a..dfbf30f0 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -595,8 +595,8 @@ class riscv_asm_program_gen extends uvm_object; endcase // Initialize vector CSRs - instr_stream.push_back({indent, $sformatf("csrwi vxsat, %0d", cfg.vector_cfg.vxsat)}); - instr_stream.push_back({indent, $sformatf("csrwi vxrm, %0d", cfg.vector_cfg.vxrm)}); + instr_stream.push_back({indent, $sformatf("csrwi vxsat, %0d", $urandom() & 'b1)}); + instr_stream.push_back({indent, $sformatf("csrwi vxrm, %0d", $urandom() & 'b11)}); // Initialize vector configuration instr_stream.push_back($sformatf("%0sli x%0d, %0d", indent, cfg.gpr[1], cfg.vector_cfg.vl)); diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index f7e78782..2394f96b 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -330,7 +330,6 @@ class riscv_rand_instr_stream extends riscv_instr_stream; if (instr.instr_name != VSETIVLI && instr.rs1 == ZERO && instr.rd != ZERO) begin // Set vl to vlmax cfg.vector_cfg.vl = cfg.vector_cfg.vlmax(); - cfg.vector_cfg.vstart = 0; end cfg.vector_cfg.vl.rand_mode(1); diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index cc85966a..8c794a0e 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -19,9 +19,6 @@ class riscv_vector_cfg extends uvm_object; rand vtype_t vtype; rand bit [XLEN-1:0] vl; - rand bit [XLEN-1:0] vstart; - rand vxrm_t vxrm; - rand bit vxsat; riscv_vreg_t reserved_vregs[$]; // Zvl* extension @@ -44,9 +41,9 @@ class riscv_vector_cfg extends uvm_object; // on current SEW and LMUL setting int unsigned legal_ls_eew[$]; + // Random value solve order constraint solve_order_c { solve vtype.vsew before vtype.vlmul; - solve vl before vstart; } // vl has to be within VLMAX @@ -54,11 +51,6 @@ class riscv_vector_cfg extends uvm_object; vl inside {[0 : vlmax(vtype)]}; } - // vstart has to be within vl - constraint vstart_c { - vstart inside {[0 : vl]}; - } - // Select valid vlmul constraint vlmul_c { vtype.vlmul inside {1, 2, 4, 8}; @@ -83,9 +75,6 @@ class riscv_vector_cfg extends uvm_object; `uvm_field_int(vtype.vlmul, UVM_DEFAULT) `uvm_field_int(vtype.fractional_lmul, UVM_DEFAULT) `uvm_field_int(vl, UVM_DEFAULT) - `uvm_field_int(vstart, UVM_DEFAULT) - `uvm_field_enum(vxrm_t,vxrm, UVM_DEFAULT) - `uvm_field_int(vxsat, UVM_DEFAULT) `uvm_field_queue_enum(riscv_vreg_t, reserved_vregs, UVM_DEFAULT) `uvm_field_string(zve_extension, UVM_DEFAULT) `uvm_field_int(enable_fp_support, UVM_DEFAULT) From 4591412719f32e7ed01f8398fa0ada047634c18e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 13:03:43 +0000 Subject: [PATCH 71/90] [vector_load_store] Set vstart in directed tests --- src/riscv_load_store_instr_lib.sv | 36 +++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index fc5b7735..30cb5152 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -541,6 +541,7 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; rand int unsigned num_mixed_instr; rand int byte_stride; rand address_mode_e address_mode; + rand bit [XLEN-1:0] vstart; // Base address rand riscv_reg_t rs1_reg; // Stride @@ -618,6 +619,14 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; } } + // Find a suitable vstart + constraint vstart_c { + // vstart has to be within vl + vstart inside {[0 : cfg.vector_cfg.vl]}; + // Generate as many zero vstart as non zero + vstart dist { 0 := 1, [1:cfg.vector_cfg.vl] :/ 1 }; + } + // Do not use reserved xregs for base address and stride constraint xreg_source_c { !(rs1_reg inside {cfg.reserved_regs, reserved_rd, ZERO}); @@ -700,9 +709,36 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); end end + add_init_vstart(); super.post_randomize(); endfunction + // Initialize the vstart CSR + function void add_init_vstart(); + riscv_instr csr_instr; + int last_rvv_idx = -1; + // Find position of last vector instruction before load/store + // After last rvv instruction it is save to insert vstart CSR write + foreach (instr_list[i]) begin + if (instr_list[i].group == RVV) begin + // We have reached the vector load/store instruction, end here + if (instr_list[i].category inside {LOAD, STORE}) begin + break; + end + // Set index of last rvv instruction + last_rvv_idx = i; + end + end + // Preload vstart value to temporary register and write to CSR + $cast(csr_instr, riscv_instr::get_instr(CSRRW)); + csr_instr.m_cfg = cfg; + randomize_gpr(csr_instr); + csr_instr.csr = VSTART; + csr_instr.rs1 = csr_instr.rd; + insert_instr(csr_instr, last_rvv_idx+1); + insert_instr(get_init_gpr_instr(csr_instr.rd, vstart), last_rvv_idx+1); + endfunction + // Generate a load/store instruction virtual function int gen_load_store_instr(); build_allowed_instr(); From 148b953e6723a3dc6fcc1c37ee5f48c186a3d0b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 7 Jun 2024 13:03:59 +0000 Subject: [PATCH 72/90] [instr_stream] Fix function return type warning --- src/riscv_instr_stream.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 2394f96b..6bcd1745 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -304,7 +304,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // Handle vset{i}vl{i} instructions // Regenerate vector configuration and initialize rs1/rs2 - function handle_vector_configuration_instr(int idx); + function void handle_vector_configuration_instr(int idx); riscv_instr instr; instr = instr_list[idx]; From 131a4285ca460fffbc35f85b9480bdac44e3b852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 12 Jun 2024 06:40:36 +0000 Subject: [PATCH 73/90] [laod_store_instr_lib] Fix comment typo --- src/riscv_load_store_instr_lib.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 30cb5152..31b963e8 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -718,7 +718,7 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; riscv_instr csr_instr; int last_rvv_idx = -1; // Find position of last vector instruction before load/store - // After last rvv instruction it is save to insert vstart CSR write + // After last rvv instruction it is safe to insert vstart CSR write foreach (instr_list[i]) begin if (instr_list[i].group == RVV) begin // We have reached the vector load/store instruction, end here From be8feecabcc7bb40fa76a245120f996cd9173635 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 9 Jul 2024 07:49:54 +0000 Subject: [PATCH 74/90] [vector_load_store] Fix range of vstart --- src/riscv_load_store_instr_lib.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 31b963e8..02372abb 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -622,7 +622,7 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Find a suitable vstart constraint vstart_c { // vstart has to be within vl - vstart inside {[0 : cfg.vector_cfg.vl]}; + vstart inside {[0 : cfg.vector_cfg.vl-1]}; // Generate as many zero vstart as non zero vstart dist { 0 := 1, [1:cfg.vector_cfg.vl] :/ 1 }; } From 1a0c05bb4f6d8eee857cbb7cc20f29c72ca2b8f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 16 Aug 2024 06:54:34 +0000 Subject: [PATCH 75/90] [vector_load_store] Add plusarg to enable/disable vstart insertion --- src/riscv_instr_gen_config.sv | 3 +++ src/riscv_load_store_instr_lib.sv | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/riscv_instr_gen_config.sv b/src/riscv_instr_gen_config.sv index 3edf0c68..211d3c04 100644 --- a/src/riscv_instr_gen_config.sv +++ b/src/riscv_instr_gen_config.sv @@ -251,6 +251,8 @@ class riscv_instr_gen_config extends uvm_object; bit enable_vector_extension; // Only generate vector instructions bit vector_instr_only; + // Randomise vstart during vector load and store tests + bit enable_vstart_randomisation = 1'b0; // Initialization of the vregs // SAME_VALUES_ALL_ELEMS - Using vmv.v.x to fill all the elements of the vreg with the same value as the one in the GPR selected // RANDOM_VALUES_VMV - Using vmv.v.x + vslide1down.vx to randomize the contents of each vector element @@ -613,6 +615,7 @@ class riscv_instr_gen_config extends uvm_object; get_bool_arg_value("+enable_floating_point=", enable_floating_point); get_bool_arg_value("+enable_vector_extension=", enable_vector_extension); get_bool_arg_value("+vector_instr_only=", vector_instr_only); + get_bool_arg_value("+enable_vstart_randomisation=", enable_vstart_randomisation); cmdline_enum_processor #(vreg_init_method_t)::get_value("+vreg_init_method=", 1'b0, vreg_init_method); cmdline_enum_processor #(vreg_ls_index_init_t)::get_value("+vreg_ls_index_init=", diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 02372abb..8d4177d1 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -709,7 +709,9 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; add_init_vector_gpr(vs2_reg, indexed_byte_offset, index_eew, 0); end end - add_init_vstart(); + if (cfg.enable_vstart_randomisation) begin + add_init_vstart(); + end super.post_randomize(); endfunction From 855eff66692cc7c714dd1c56b4b3bbe576c1b323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Tue, 20 Aug 2024 15:02:52 +0000 Subject: [PATCH 76/90] [vector_instr] Fix reserved vregs constraint --- src/isa/riscv_vector_instr.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 9e4039af..529e7bb5 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -59,12 +59,12 @@ class riscv_vector_instr extends riscv_floating_point_instr; !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + whole_register_move_cnt - 1]}); } else if (category inside {LOAD, STORE}) { if (format inside {VLX_FORMAT, VSX_FORMAT}) { - !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vlmul) * nfields - 1]}); + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vsew) * nfields - 1]}); } else { !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(ls_eew) * nfields - 1]}); } } else { - !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vlmul) - 1]}); + !(m_cfg.vector_cfg.reserved_vregs[i] inside {[vd : vd + emul_non_frac(m_cfg.vector_cfg.vtype.vsew) - 1]}); } } } From ffbaebd8c8798319cd2a8afd3a64c69d70a311d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 20 Sep 2024 07:34:59 +0000 Subject: [PATCH 77/90] [vector_instr] Relax reduction register group alignment constraint --- src/isa/riscv_vector_instr.sv | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 529e7bb5..8774348a 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -85,11 +85,13 @@ class riscv_vector_instr extends riscv_floating_point_instr; !(instr_name inside {VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F}) && !(instr_name inside {VRGATHEREI16}) && !(category inside {LOAD, STORE})) { - if (!is_mask_producing_instr) { + if (!(is_mask_producing_instr || is_reduction_instr)) { vd % m_cfg.vector_cfg.vtype.vlmul == 0; } if (!is_mask_operands) { - (instr_name != VCOMPRESS) -> vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; + if (instr_name != VCOMPRESS && !is_reduction_instr) { + vs1 % m_cfg.vector_cfg.vtype.vlmul == 0; + } vs2 % m_cfg.vector_cfg.vtype.vlmul == 0; } vs3 % m_cfg.vector_cfg.vtype.vlmul == 0; From 06e493be62666e017cbeac2dd6318fcaf611bfe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 2 Oct 2024 13:27:12 +0000 Subject: [PATCH 78/90] [asm_program_gen] Flush D$ after write to tohost --- src/riscv_asm_program_gen.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index dfbf30f0..bd7a1e19 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -343,7 +343,7 @@ class riscv_asm_program_gen extends uvm_object; virtual function void gen_program_end(int hart); if (hart == 0) begin // Use write_tohost to terminate spike simulation - gen_section("write_tohost", {"sw gp, tohost, t5"}); + gen_section("write_tohost", {"sw gp, tohost, t5", "fence"}); gen_section("_exit", {"j write_tohost"}); end endfunction From 26ab0b929ef26be19735f05dda0bb4c70e7e0613 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 24 Oct 2024 12:30:37 +0000 Subject: [PATCH 79/90] [instr_stream] Fix constraint solve for vsetvl with persistent vl case --- src/riscv_instr_stream.sv | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 6bcd1745..2b2cac50 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -306,6 +306,9 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // Regenerate vector configuration and initialize rs1/rs2 function void handle_vector_configuration_instr(int idx); riscv_instr instr; + int unsigned vsew; + + // Get relevant instruction instr = instr_list[idx]; // Create new config instance and deep copy old config @@ -314,13 +317,26 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // Set instruction config to new instance instr.m_cfg = cfg; - // Randomize cfg - if (instr.rs1 == ZERO && instr.instr_name != VSETIVLI) begin - if (instr.rd == ZERO) begin - // Keep existing vl - cfg.vector_cfg.vl.rand_mode(0); + if (instr.instr_name != VSETIVLI && instr.rs1 == ZERO && instr.rd == ZERO) begin + // Keep vl but vtype might change, provided vtype ratio does not change + cfg.vector_cfg.vl.rand_mode(0); + cfg.vector_cfg.vtype.rand_mode(0); + + // Find a random new vsew + vsew = $urandom_range(3,$clog2(cfg.vector_cfg.max_int_sew)); + vsew = 2**vsew; + // If there is no legal vlmul for new vsew, set vsew to known legal value. + // For every current vtype config, there will always be a legal vlmul when vsew + // is reduced, since there is always space for at least one element in a fractional + // register. So setting to smallest vsew here is always possible. + if (!cfg.vector_cfg.vtype.fractional_lmul && vsew/8 > cfg.vector_cfg.vtype.vsew/cfg.vector_cfg.vtype.vlmul) begin + vsew = 8; end + // Calculate new vlmul and update vtype, while vl remains constant + cfg.vector_cfg.update_vsew_keep_vl(vsew); end + + // Randomize cfg `DV_CHECK_RANDOMIZE_WITH_FATAL(cfg.vector_cfg, if (instr.instr_name == VSETIVLI) { cfg.vector_cfg.vl < 2**5; @@ -332,6 +348,7 @@ class riscv_rand_instr_stream extends riscv_instr_stream; cfg.vector_cfg.vl = cfg.vector_cfg.vlmax(); end cfg.vector_cfg.vl.rand_mode(1); + cfg.vector_cfg.vtype.rand_mode(1); // Handle not fully immediate instructions if (instr.instr_name != VSETIVLI) begin From 1bbab630f8e0e640ab246db63bf2baff6bfdab83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Thu, 24 Oct 2024 17:40:38 +0000 Subject: [PATCH 80/90] [vector_instr] Adapt vsetvl[i] register distribution for special AVL cases --- src/isa/riscv_vector_instr.sv | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 8774348a..2e9274c2 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -441,6 +441,17 @@ class riscv_vector_instr extends riscv_floating_point_instr; !m_cfg.vector_cfg.enable_fp_support -> !(va_variant inside {VF, WF, VFM}); } + // Section 6.2: AVL encoding + // Make special cases appear more often + constraint rs_rd_distribution_vsetvli_c { + if (instr_name inside {VSETVL, VSETVLI}) { + rd dist {0 :/ 50, [1:31] :/ 50}; + rd == 0 -> rs1 dist {0 :/ 50, [1:31] :/ 50}; + // Limit the amount of vl == vlmax to 10% + rd != 0 -> rs1 dist {0 :/ 10, [1:31] :/ 90}; + } + } + // Filter unsupported instructions based on configuration virtual function bit is_supported(riscv_instr_gen_config cfg); // Check that current LMUL and SEW are valid for narrowing and widening instruction From fc1de31c8942d765a63795b1c28ae939b9eca6e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 29 Jan 2025 07:59:48 +0000 Subject: [PATCH 81/90] [vector] Fix legality of segmented l/s on fraction lmul --- src/isa/riscv_vector_instr.sv | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 2e9274c2..31fb4115 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -543,14 +543,16 @@ class riscv_vector_instr extends riscv_floating_point_instr; // Segmented l/s need at least two segments if (is_segmented_ls_instr) begin if (format inside {VLX_FORMAT, VSX_FORMAT}) begin + // For indexed, data EMUL == LMUL if (!cfg.vector_cfg.vtype.fractional_lmul && cfg.vector_cfg.vtype.vlmul == 8) begin return 0; end end else begin + // For (unit)-strided, data EMUL == EEW/SEW*LMUL, but with LMUL not fractional + // (since we always need a full register for every field) int unsigned max_eew [$] = cfg.vector_cfg.legal_ls_eew.max(); if (int'(real'(max_eew.pop_front()) / real'(cfg.vector_cfg.vtype.vsew) * - (cfg.vector_cfg.vtype.fractional_lmul ? 1.0 / real'(cfg.vector_cfg.vtype.vlmul) : - real'(cfg.vector_cfg.vtype.vlmul))) == 8) begin + (cfg.vector_cfg.vtype.fractional_lmul ? 1.0 : real'(cfg.vector_cfg.vtype.vlmul))) >= 8) begin return 0; end end From c630f9e58937ab10804882d9c17efde014ecf442 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Fri, 31 Jan 2025 12:38:35 +0000 Subject: [PATCH 82/90] [vector] Fix vsetivli setting incorrect VL --- src/isa/riscv_vector_instr.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 31fb4115..325a490c 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -592,7 +592,7 @@ class riscv_vector_instr extends riscv_floating_point_instr; asm_str = $sformatf("%0s%0s, %0s, e%0d, m%0s%0d, t%0s, m%0s", asm_str, rd.name(), - instr_name == VSETIVLI ? get_imm() : rs1.name(), + instr_name == VSETIVLI ? $sformatf("%0d", m_cfg.vector_cfg.vl) : rs1.name(), m_cfg.vector_cfg.vtype.vsew, m_cfg.vector_cfg.vtype.fractional_lmul ? "f" : "", m_cfg.vector_cfg.vtype.vlmul, From b7c21ff7b4f0e2cf4abbb3c9bbdbc164ec74b84b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 3 Feb 2025 15:30:35 +0000 Subject: [PATCH 83/90] [vector_load_store] Fix index range casting when base address at top of region --- src/riscv_instr_stream.sv | 64 +++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 2b2cac50..78e86fd6 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -403,15 +403,16 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // seed: register that contains initial seed (cannot be equal to vreg) // vtemp: temporary vector register used during calculation (cannot be equal to vreg or seed) // reseed: reseed the original seed with the vector element index (seed += vid) - // min_value: lower bound of random value (inclusive) - // max_value: upper bound of random value (inclusive) // align_by: align random value by number of bytes (e.g align_by == 2 would clear the lowest bit) // sew: element width + // min_value: lower bound of random value (inclusive), default (INT_MIN) + // max_value: upper bound of random value (inclusive), default (INT_MAX) // insert_idx: position in instruction stream to insert instruction at // (-1: random, 0: front, instr_list.size(): back (default)) function void add_init_vector_gpr_random(riscv_vreg_t vreg, riscv_vreg_t seed, riscv_vreg_t vtemp, - int reseed, int min_value, int max_value, - int align_by, int sew, int insert_idx = instr_list.size()); + int reseed, int align_by, int sew, + longint min_value = 64'd1 << 63, longint max_value = '1 >> 1, + int insert_idx = instr_list.size()); // The LFSR is based on the fibonacci lfsr (https://en.wikipedia.org/wiki/Linear-feedback_shift_register) // The polynomial parameters are based on a paper by Xilinx (http://www.xilinx.com/support/documentation/application_notes/xapp052.pdf) // @@ -560,34 +561,33 @@ class riscv_rand_instr_stream extends riscv_instr_stream; init_instr_list.push_back(vinstr); // Cast to range - if (min_value > 0) begin - init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], min_value)); - $cast(vinstr, riscv_instr::get_instr(VMAXU)); - vinstr.avoid_reserved_vregs_c.constraint_mode(0); - vinstr.m_cfg = init_cfg; - `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, - va_variant == VX; - vm == 1'b1; - vd == vreg; - vs2 == vreg; - rs1 == cfg.gpr[0]; - ) - init_instr_list.push_back(vinstr); - end - if (max_value > 0) begin - init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], max_value)); - $cast(vinstr, riscv_instr::get_instr(VMINU)); - vinstr.avoid_reserved_vregs_c.constraint_mode(0); - vinstr.m_cfg = init_cfg; - `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, - va_variant == VX; - vm == 1'b1; - vd == vreg; - vs2 == vreg; - rs1 == cfg.gpr[0]; - ) - init_instr_list.push_back(vinstr); - end + // Min bound + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], min_value)); + $cast(vinstr, riscv_instr::get_instr(VMAX)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VX; + vm == 1'b1; + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + // Max bound + init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], max_value)); + $cast(vinstr, riscv_instr::get_instr(VMIN)); + vinstr.avoid_reserved_vregs_c.constraint_mode(0); + vinstr.m_cfg = init_cfg; + `DV_CHECK_RANDOMIZE_WITH_FATAL(vinstr, + va_variant == VX; + vm == 1'b1; + vd == vreg; + vs2 == vreg; + rs1 == cfg.gpr[0]; + ) + init_instr_list.push_back(vinstr); + // Value alignment if (align_by > 1) begin init_instr_list.push_back(get_init_gpr_instr(cfg.gpr[0], '1 << $clog2(align_by))); $cast(vinstr, riscv_instr::get_instr(VAND)); From 637d13c8c7a8f17d17543e61bb09347565a6a338 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Mon, 24 Feb 2025 08:02:26 +0000 Subject: [PATCH 84/90] [priviledge] Fix sstatus.vs initialisation when booting in S-mode --- src/riscv_privil_reg.sv | 3 ++- src/riscv_privileged_common_seq.sv | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/riscv_privil_reg.sv b/src/riscv_privil_reg.sv index f7dadcfe..df7f6de0 100644 --- a/src/riscv_privil_reg.sv +++ b/src/riscv_privil_reg.sv @@ -350,7 +350,8 @@ class riscv_privil_reg extends riscv_reg#(privileged_reg_t); add_field("SPIE", 1, WARL); add_field("WPRI1", 2, WPRI); add_field("SPP", 1, WLRL); - add_field("WPRI2", 4, WPRI); + add_field("VS", 2, WARL); + add_field("WPRI2", 2, WPRI); add_field("FS", 2, WARL); add_field("XS", 2, WARL); add_field("WPRI3", 1, WPRI); diff --git a/src/riscv_privileged_common_seq.sv b/src/riscv_privileged_common_seq.sv index 6b90c997..f5eaae45 100644 --- a/src/riscv_privileged_common_seq.sv +++ b/src/riscv_privileged_common_seq.sv @@ -138,6 +138,7 @@ class riscv_privileged_common_seq extends uvm_sequence; sstatus.set_field("UXL", 2'b10); end sstatus.set_field("FS", cfg.mstatus_fs); + sstatus.set_field("VS", cfg.mstatus_vs); sstatus.set_field("XS", 0); sstatus.set_field("SD", 0); sstatus.set_field("UIE", 0); From 531bbdaa6f86a117645ceb2df54f9153817ed852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 7 May 2025 07:17:06 +0000 Subject: [PATCH 85/90] Add Axelera copyright notice to source header --- src/isa/riscv_vector_instr.sv | 1 + src/isa/rv32v_instr.sv | 1 + src/riscv_instr_pkg.sv | 1 + src/riscv_instr_stream.sv | 1 + src/riscv_load_store_instr_lib.sv | 1 + src/riscv_vector_cfg.sv | 1 + 6 files changed, 6 insertions(+) diff --git a/src/isa/riscv_vector_instr.sv b/src/isa/riscv_vector_instr.sv index 325a490c..c77dbd7f 100644 --- a/src/isa/riscv_vector_instr.sv +++ b/src/isa/riscv_vector_instr.sv @@ -1,6 +1,7 @@ /* * Copyright 2020 Google LLC * Copyright 2020 Andes Technology Co., Ltd. + * Copyright 2025 Axelera AI BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/isa/rv32v_instr.sv b/src/isa/rv32v_instr.sv index 333e8bd7..5935901e 100644 --- a/src/isa/rv32v_instr.sv +++ b/src/isa/rv32v_instr.sv @@ -1,6 +1,7 @@ /* * Copyright 2020 Google LLC * Copyright 2020 Andes Technology Co., Ltd. + * Copyright 2025 Axelera AI BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/riscv_instr_pkg.sv b/src/riscv_instr_pkg.sv index 297aacd8..c0f615e3 100644 --- a/src/riscv_instr_pkg.sv +++ b/src/riscv_instr_pkg.sv @@ -1,6 +1,7 @@ /* * Copyright 2018 Google LLC * Copyright 2020 Andes Technology Co., Ltd. + * Copyright 2025 Axelera AI BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 78e86fd6..4f25c11d 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -1,5 +1,6 @@ /* * Copyright 2018 Google LLC + * Copyright 2025 Axelera AI BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 8d4177d1..726336b2 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -1,6 +1,7 @@ /* * Copyright 2018 Google LLC * Copyright 2020 Andes Technology Co., Ltd. + * Copyright 2025 Axelera AI BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/riscv_vector_cfg.sv b/src/riscv_vector_cfg.sv index 8c794a0e..401f1901 100644 --- a/src/riscv_vector_cfg.sv +++ b/src/riscv_vector_cfg.sv @@ -1,6 +1,7 @@ /* * Copyright 2020 Google LLC * Copyright 2020 Andes Technology Co., Ltd. + * Copyright 2025 Axelera AI BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. From 1690b6e761bafd897ab5de76dbe4cd44e2cff463 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 7 May 2025 07:57:48 +0000 Subject: [PATCH 86/90] 64 bit EEW LFSR index register initialisation support --- src/riscv_instr_stream.sv | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 4f25c11d..6230dc3a 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -434,9 +434,10 @@ class riscv_rand_instr_stream extends riscv_instr_stream; int polynomial[]; unique case (sew) - 8: polynomial = {6, 5, 4}; - 16: polynomial = {15, 13, 4}; - 32: polynomial = {22, 2, 1}; + 8: polynomial = {6, 5, 4}; + 16: polynomial = {15, 13, 4}; + 32: polynomial = {22, 2, 1}; + 64: polynomial = {63, 61, 60}; default: `uvm_fatal("add_init_vector_gpr_random", $sformatf("Error: Unable to initialize vector with randomised values of SEW == %0d", sew)) endcase From 1596a116578ea06a6d0d75343a86ad83f092bec2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 7 May 2025 08:06:56 +0000 Subject: [PATCH 87/90] Update rv64gcv testlist with new vector scenarios --- target/rv64gcv/testlist.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/target/rv64gcv/testlist.yaml b/target/rv64gcv/testlist.yaml index e52c74f2..4e7425aa 100644 --- a/target/rv64gcv/testlist.yaml +++ b/target/rv64gcv/testlist.yaml @@ -112,8 +112,8 @@ +num_of_sub_program=0 +enable_floating_point=1 +enable_vector_extension=1 + +vreg_init_method=RANDOM_VALUES_LOAD +no_fence=1 - +no_data_page=1 +no_branch_jump=1 +boot_mode=m +no_csr_instr=1 @@ -130,11 +130,11 @@ +enable_floating_point=1 +enable_vector_extension=1 +vector_instr_only=1 + +vreg_init_method=RANDOM_VALUES_LOAD +no_fence=1 - +no_data_page=1 +no_branch_jump=1 +boot_mode=m - +no_csr_instr=1 + +no_csr_instr=0 iterations: 2 gen_test: riscv_instr_base_test rtl_test: core_base_test @@ -148,6 +148,8 @@ +enable_floating_point=1 +enable_vector_extension=1 +vreg_init_method=RANDOM_VALUES_LOAD + +vreg_ls_index_init=LS_INDEX_INIT_LFSR + +enable_vstart_randomisation=1 +directed_instr_0=riscv_vector_load_store_instr_stream,10 +no_branch_jump=1 +boot_mode=m From 2dac8fc3f25842a325182b508da3ae9bf2a60564 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 7 May 2025 08:18:47 +0000 Subject: [PATCH 88/90] Update all target configs with new vector settings --- target/ml/riscv_core_setting.sv | 16 +------------- target/multi_harts/riscv_core_setting.sv | 16 +------------- target/rv32i/riscv_core_setting.sv | 16 +------------- target/rv32imafdc/riscv_core_setting.sv | 16 +------------- target/rv32imc/riscv_core_setting.sv | 16 +------------- target/rv32imc_sv32/riscv_core_setting.sv | 16 +------------- target/rv32imcb/riscv_core_setting.sv | 16 +------------- target/rv64gc/riscv_core_setting.sv | 16 +------------- target/rv64gcv/riscv_core_setting.sv | 26 +++++++++-------------- target/rv64imafdc/riscv_core_setting.sv | 16 +------------- target/rv64imc/riscv_core_setting.sv | 16 +------------- target/rv64imcb/riscv_core_setting.sv | 16 +------------- 12 files changed, 21 insertions(+), 181 deletions(-) diff --git a/target/ml/riscv_core_setting.sv b/target/ml/riscv_core_setting.sv index b5704536..d74cac61 100644 --- a/target/ml/riscv_core_setting.sv +++ b/target/ml/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/multi_harts/riscv_core_setting.sv b/target/multi_harts/riscv_core_setting.sv index 95b2433f..3a46031d 100644 --- a/target/multi_harts/riscv_core_setting.sv +++ b/target/multi_harts/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv32i/riscv_core_setting.sv b/target/rv32i/riscv_core_setting.sv index 0cc2309e..8b6a58be 100644 --- a/target/rv32i/riscv_core_setting.sv +++ b/target/rv32i/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv32imafdc/riscv_core_setting.sv b/target/rv32imafdc/riscv_core_setting.sv index f2c54bb3..98cbfa6e 100644 --- a/target/rv32imafdc/riscv_core_setting.sv +++ b/target/rv32imafdc/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub - element, which must be at most 8 - bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element(encoded in vsew format) -parameter int VELEN = int'($ln(ELEN) /$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi - harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv32imc/riscv_core_setting.sv b/target/rv32imc/riscv_core_setting.sv index 0bfb92b6..da5da220 100644 --- a/target/rv32imc/riscv_core_setting.sv +++ b/target/rv32imc/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv32imc_sv32/riscv_core_setting.sv b/target/rv32imc_sv32/riscv_core_setting.sv index c9141e3c..21388c97 100644 --- a/target/rv32imc_sv32/riscv_core_setting.sv +++ b/target/rv32imc_sv32/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv32imcb/riscv_core_setting.sv b/target/rv32imcb/riscv_core_setting.sv index 75e95d8a..0cbbca35 100644 --- a/target/rv32imcb/riscv_core_setting.sv +++ b/target/rv32imcb/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv64gc/riscv_core_setting.sv b/target/rv64gc/riscv_core_setting.sv index 376b90f4..9b2511a2 100644 --- a/target/rv64gc/riscv_core_setting.sv +++ b/target/rv64gc/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv64gcv/riscv_core_setting.sv b/target/rv64gcv/riscv_core_setting.sv index 4888baae..16b6da83 100644 --- a/target/rv64gcv/riscv_core_setting.sv +++ b/target/rv64gcv/riscv_core_setting.sv @@ -65,23 +65,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 1; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- @@ -139,7 +125,15 @@ const privileged_reg_t implemented_csr[] = { MTVAL, // Machine bad address or instruction MIP, // Machine interrupt pending // Floating point CSR - FCSR // Floating point control and status + FCSR, // Floating point control and status + // Vector CSRVSTART + VSTART, // Vector start position + VXSAT, // Fixed point saturate flag + VXRM, // Fixed point rounding mode + VCSR, // Vector control and status register + VL, // Vector length + VTYPE, // Vector data type register + VLENB // VLEN/8 (vector register length in bytes) }; // Implementation-specific custom CSRs diff --git a/target/rv64imafdc/riscv_core_setting.sv b/target/rv64imafdc/riscv_core_setting.sv index 376b90f4..9b2511a2 100644 --- a/target/rv64imafdc/riscv_core_setting.sv +++ b/target/rv64imafdc/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv64imc/riscv_core_setting.sv b/target/rv64imc/riscv_core_setting.sv index 8baf25ca..53efb2fb 100644 --- a/target/rv64imc/riscv_core_setting.sv +++ b/target/rv64imc/riscv_core_setting.sv @@ -65,23 +65,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- diff --git a/target/rv64imcb/riscv_core_setting.sv b/target/rv64imcb/riscv_core_setting.sv index 75ae13fa..d5896df6 100644 --- a/target/rv64imcb/riscv_core_setting.sv +++ b/target/rv64imcb/riscv_core_setting.sv @@ -66,23 +66,9 @@ parameter int NUM_VEC_GPR = 32; // Vector extension configuration // ---------------------------------------------------------------------------- -// Parameter for vector extension -parameter int VECTOR_EXTENSION_ENABLE = 0; - +// Vector Register Length parameter int VLEN = 512; -// Maximum size of a single vector element -parameter int ELEN = 32; - -// Minimum size of a sub-element, which must be at most 8-bits. -parameter int SELEN = 8; - -// Maximum size of a single vector element (encoded in vsew format) -parameter int VELEN = int'($ln(ELEN)/$ln(2)) - 3; - -// Maxium LMUL supported by the core -parameter int MAX_LMUL = 8; - // ---------------------------------------------------------------------------- // Multi-harts configuration // ---------------------------------------------------------------------------- From b6ffdba7835c95b372d2ee84eaa479958a8019b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 7 May 2025 08:38:57 +0000 Subject: [PATCH 89/90] Fix lint warnings --- src/isa/riscv_csr_instr.sv | 8 ++++---- src/riscv_asm_program_gen.sv | 9 +++++---- src/riscv_illegal_instr.sv | 2 +- src/riscv_instr_stream.sv | 3 ++- src/riscv_load_store_instr_lib.sv | 29 +++++++++++++++-------------- 5 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/isa/riscv_csr_instr.sv b/src/isa/riscv_csr_instr.sv index d6f271db..b5324fc8 100644 --- a/src/isa/riscv_csr_instr.sv +++ b/src/isa/riscv_csr_instr.sv @@ -46,25 +46,25 @@ class riscv_csr_instr extends riscv_instr; } } - constraint csr_csrrw { + constraint csr_csrrw_c { if (instr_name == CSRRW || instr_name == CSRRWI) { write_csr == 1'b1; } } - constraint csr_csrrsc { + constraint csr_csrrsc_c { if (instr_name == CSRRS || instr_name == CSRRC) { (write_csr == 1'b1) || rs1 == 0; } } - constraint csr_csrrsci { + constraint csr_csrrsci_c { if(instr_name == CSRRSI || instr_name == CSRRCI) { (write_csr == 1'b1) || imm == 0; } } - constraint order { + constraint order_c { // Choose a CSR before deciding whether we want to write to the CSR values. Then choose whether // to read or write before choosing the rs1 and imm values. This ensures read-only accesses to // read-only CSRs with similar probability to other CSR accesses and ensures a reasonable write diff --git a/src/riscv_asm_program_gen.sv b/src/riscv_asm_program_gen.sv index bd7a1e19..ce944d72 100644 --- a/src/riscv_asm_program_gen.sv +++ b/src/riscv_asm_program_gen.sv @@ -581,17 +581,18 @@ class riscv_asm_program_gen extends uvm_object; `uvm_fatal(`gfn, "Couldn't find a memory region big enough to initialize the vector registers") for (int v = 0; v < NUM_VEC_GPR; v++) begin - // Select random region + // Select random region int region = $urandom_range(0, valid_mem_region.size()-1); - // Get valid start offset in region + // Get valid start offset in region int offset = $urandom_range(0, (valid_mem_region[region].size_in_bytes - (cfg.vector_cfg.vlen / 8)) / - (sew / 8)) * (sew / 8); - // Generate load + (sew / 8)) * (sew / 8); + // Generate load instr_stream.push_back($sformatf("%0sla x%0d, %0s+%0d", indent, cfg.gpr[0], valid_mem_region[region].name, offset)); instr_stream.push_back($sformatf("%0svle%0d.v v%0d, (x%0d)", indent, sew, v, cfg.gpr[0])); end end + default: ; endcase // Initialize vector CSRs diff --git a/src/riscv_illegal_instr.sv b/src/riscv_illegal_instr.sv index 60ff4b54..5777fa11 100644 --- a/src/riscv_illegal_instr.sv +++ b/src/riscv_illegal_instr.sv @@ -142,7 +142,7 @@ class riscv_illegal_instr extends uvm_object; } } - constraint legal_rv32_c_slli { + constraint legal_rv32_c_slli_c { if ((c_msb == 3'b000) && (c_op == 2'b10) && (XLEN == 32)) { if (exception == kReservedCompressedInstr) { instr_bin[12] == 1; diff --git a/src/riscv_instr_stream.sv b/src/riscv_instr_stream.sv index 6230dc3a..afba783d 100644 --- a/src/riscv_instr_stream.sv +++ b/src/riscv_instr_stream.sv @@ -330,7 +330,8 @@ class riscv_rand_instr_stream extends riscv_instr_stream; // For every current vtype config, there will always be a legal vlmul when vsew // is reduced, since there is always space for at least one element in a fractional // register. So setting to smallest vsew here is always possible. - if (!cfg.vector_cfg.vtype.fractional_lmul && vsew/8 > cfg.vector_cfg.vtype.vsew/cfg.vector_cfg.vtype.vlmul) begin + if (!cfg.vector_cfg.vtype.fractional_lmul && + vsew/8 > cfg.vector_cfg.vtype.vsew/cfg.vector_cfg.vtype.vlmul) begin vsew = 8; end // Calculate new vlmul and update vtype, while vl remains constant diff --git a/src/riscv_load_store_instr_lib.sv b/src/riscv_load_store_instr_lib.sv index 726336b2..04570864 100644 --- a/src/riscv_load_store_instr_lib.sv +++ b/src/riscv_load_store_instr_lib.sv @@ -523,11 +523,11 @@ endclass class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // List of vector load/store instructions (grouped into different address modes) - localparam riscv_instr_name_t unit_strided[] = {VLE_V, VSE_V, VLEFF_V, - VLM_V, VSM_V, VLRE_V, VSR_V, - VLSEGE_V, VSSEGE_V, VLSEGEFF_V}; - localparam riscv_instr_name_t strided[] = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V}; - localparam riscv_instr_name_t indexed[] = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, + localparam riscv_instr_name_t UnitStrided[] = {VLE_V, VSE_V, VLEFF_V, + VLM_V, VSM_V, VLRE_V, VSR_V, + VLSEGE_V, VSSEGE_V, VLSEGEFF_V}; + localparam riscv_instr_name_t Strided[] = {VLSE_V, VSSE_V, VLSSEGE_V, VSSSEGE_V}; + localparam riscv_instr_name_t Indexed[] = {VLUXEI_V, VLOXEI_V, VSUXEI_V, VSOXEI_V, VLUXSEGEI_V, VLOXSEGEI_V, VSUXSEGEI_V, VSOXSEGEI_V}; // Types of vector load/store address modes @@ -642,20 +642,20 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; super.pre_randomize(); // Build list of allowed address modes (according to unsupported_instr list) - foreach(unit_strided[i]) begin - if (!(unit_strided[i] inside {unsupported_instr})) begin + foreach(UnitStrided[i]) begin + if (!(UnitStrided[i] inside {unsupported_instr})) begin allowed_address_modes = {allowed_address_modes, UNIT_STRIDED}; break; end end - foreach(strided[i]) begin - if (!(strided[i] inside {unsupported_instr})) begin + foreach(Strided[i]) begin + if (!(Strided[i] inside {unsupported_instr})) begin allowed_address_modes = {allowed_address_modes, STRIDED}; break; end end - foreach(indexed[i]) begin - if (!(indexed[i] inside {unsupported_instr})) begin + foreach(Indexed[i]) begin + if (!(Indexed[i] inside {unsupported_instr})) begin allowed_address_modes = {allowed_address_modes, INDEXED}; break; end @@ -758,14 +758,15 @@ class riscv_vector_load_store_instr_stream extends riscv_mem_access_stream; // Get instructions for selected address mode case (address_mode) UNIT_STRIDED : begin - possible_instr = {unit_strided}; + possible_instr = {UnitStrided}; end STRIDED : begin - possible_instr = {strided}; + possible_instr = {Strided}; end INDEXED : begin - possible_instr = {indexed}; + possible_instr = {Indexed}; end + default: ; endcase // Filter out illegal instructions for current config From a9e723ba921aee0cd2aea7999d92859f2a2f088e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Domenic=20W=C3=BCthrich?= Date: Wed, 7 May 2025 11:10:20 +0000 Subject: [PATCH 90/90] Upgrade cache actions to v4 --- .github/workflows/build-spike.yml | 2 +- .github/workflows/run-tests.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-spike.yml b/.github/workflows/build-spike.yml index 59fbfffb..fcd02d97 100644 --- a/.github/workflows/build-spike.yml +++ b/.github/workflows/build-spike.yml @@ -26,7 +26,7 @@ jobs: echo "cache_name=$cache_name" >> "$GITHUB_ENV" - name: Setup cache - uses: actions/cache@v3 + uses: actions/cache@v4 id: cache timeout-minutes: 60 with: diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 7858a706..95fa385b 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -56,7 +56,7 @@ jobs: echo "cache_code=${cache_code}_${{ env.CACHE_HASH }}" | tee -a "$GITHUB_ENV" - name: Cache Code - uses: actions/cache@v3 + uses: actions/cache@v4 id: cache-code timeout-minutes: 60 with: @@ -152,7 +152,7 @@ jobs: - name: Restore Spike cache id: cache-spike-restore - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | /opt/spike @@ -168,7 +168,7 @@ jobs: echo "PYTHONPATH=pygen" >> $GITHUB_ENV - name: Cache Code Restore - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 id: cache-code-restore timeout-minutes: 60 with: