diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 5f51bd68e138db..b337d12c7b3ddb 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3104,25 +3104,25 @@ class Compiler #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, @@ -3130,25 +3130,25 @@ class Compiler GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdAbsNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); #if defined(TARGET_ARM64) - GenTree* gtNewSimdAllTrueMaskNode(CorInfoType simdBaseJitType); + GenTree* gtNewSimdAllTrueMaskNode(var_types simdBaseType); GenTree* gtNewSimdFalseMaskByteNode(); #endif @@ -3156,184 +3156,184 @@ class Compiler var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCeilNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); #if defined(FEATURE_MASKED_HW_INTRINSICS) - GenTree* gtNewSimdCvtMaskToVectorNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + GenTree* gtNewSimdCvtMaskToVectorNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); #endif // FEATURE_MASKED_HW_INTRINSICS GenTree* gtNewSimdCvtNode(var_types type, GenTree* op1, - CorInfoType simdTargetBaseJitType, - CorInfoType simdSourceBaseJitType, + var_types simdTargetBaseType, + var_types simdSourceBaseType, unsigned simdSize); GenTree* gtNewSimdCvtNativeNode(var_types type, GenTree* op1, - CorInfoType simdTargetBaseJitType, - CorInfoType simdSourceBaseJitType, + var_types simdTargetBaseType, + var_types simdSourceBaseType, unsigned simdSize); #if defined(FEATURE_MASKED_HW_INTRINSICS) - GenTree* gtNewSimdCvtVectorToMaskNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + GenTree* gtNewSimdCvtVectorToMaskNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); #endif // FEATURE_MASKED_HW_INTRINSICS GenTree* gtNewSimdCreateBroadcastNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCreateScalarNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCreateScalarUnsafeNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdCreateSequenceNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdFloorNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdFmaNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); - GenTree* gtNewSimdGetIndicesNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize); + GenTree* gtNewSimdGetIndicesNode(var_types type, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdGetLowerNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdGetUpperNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsEvenIntegerNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsFiniteNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsInfinityNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsIntegerNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsNaNNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsNegativeNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsNegativeInfinityNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsNormalNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsOddIntegerNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsPositiveNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsPositiveInfinityNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsSubnormalNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdIsZeroNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdLoadNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdLoadAlignedNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdLoadNonTemporalNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdMinMaxNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize, bool isMax, bool isMagnitude, @@ -3342,47 +3342,47 @@ class Compiler GenTree* gtNewSimdMinMaxNativeNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize, bool isMax); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdRoundNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdShuffleVariableNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize, bool isShuffleNative); GenTree* gtNewSimdShuffleNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize, bool isShuffleNative); GenTree* gtNewSimdSqrtNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdStoreNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); + GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdStoreAlignedNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); + GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdStoreNonTemporalNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); + GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdSumNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); #if defined(TARGET_XARCH) GenTree* gtNewSimdTernaryLogicNode(var_types type, @@ -3390,48 +3390,48 @@ class Compiler GenTree* op2, GenTree* op3, GenTree* op4, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); #endif // TARGET_XARCH GenTree* gtNewSimdToScalarNode(var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdTruncNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdWidenLowerNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdWidenUpperNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); + var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdWithLowerNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTree* gtNewSimdWithUpperNode(var_types type, GenTree* op1, GenTree* op2, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); @@ -3442,9 +3442,10 @@ class Compiler NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); - CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, - CORINFO_SIG_INFO* sig, - CorInfoType simdBaseJitType); + + var_types getBaseTypeFromArgIfNeeded(NamedIntrinsic intrinsic, + CORINFO_SIG_INFO* sig, + var_types simdBaseType); #ifdef TARGET_ARM64 GenTreeFieldList* gtConvertTableOpToFieldList(GenTree* op, unsigned fieldCount); @@ -4740,13 +4741,13 @@ class Compiler CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig R2RARG(CORINFO_CONST_LOOKUP* entryPoint), - CorInfoType simdBaseJitType, + var_types simdBaseType, var_types retType, unsigned simdSize, bool mustExpand); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass); - GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); + GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types simdBaseType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); @@ -4757,7 +4758,7 @@ class Compiler GenTree** immOp2Ptr); bool CheckHWIntrinsicImmRange(NamedIntrinsic intrinsic, - CorInfoType simdBaseJitType, + var_types simdBaseType, GenTree* immOp, bool mustExpand, int immLowerBound, @@ -4831,7 +4832,7 @@ class Compiler GenTree* impGetNodeAddr(GenTree* val, unsigned curLevel, GenTreeFlags* pDerefFlags); - var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); + var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* simdBaseType = nullptr); GenTree* impNormStructVal(GenTree* structVal, unsigned curLevel); @@ -9084,8 +9085,17 @@ class Compiler return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } + var_types getBaseTypeForPrimitiveNumericClass(CORINFO_CLASS_HANDLE cls); + // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. + var_types getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); + + var_types getBaseTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) + { + return getBaseTypeAndSizeOfSIMDType(typeHnd, nullptr); + } + CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) @@ -9106,7 +9116,7 @@ class Compiler int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; - (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); + (void)getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp index 6425c393b3620d..52720461c2411a 100644 --- a/src/coreclr/jit/decomposelongs.cpp +++ b/src/coreclr/jit/decomposelongs.cpp @@ -592,8 +592,8 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use) NamedIntrinsic intrinsicId = NI_Illegal; GenTree* srcOp = cast->CastOp(); var_types dstType = cast->CastToType(); - CorInfoType baseFloatingType = (dstType == TYP_FLOAT) ? CORINFO_TYPE_FLOAT : CORINFO_TYPE_DOUBLE; - CorInfoType baseIntegralType = cast->IsUnsigned() ? CORINFO_TYPE_ULONG : CORINFO_TYPE_LONG; + var_types baseFloatingType = (dstType == TYP_FLOAT) ? TYP_FLOAT : TYP_DOUBLE; + var_types baseIntegralType = cast->IsUnsigned() ? TYP_ULONG : TYP_LONG; assert(!cast->gtOverflow()); assert(m_compiler->compIsaSupportedDebugOnly(InstructionSet_AVX512)); @@ -1874,9 +1874,8 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicGetElement(LIR::Use& use, GenTreeHW // Create: // loResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, tmp_index_times_two) - GenTreeHWIntrinsic* loResult = - m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, indexTimesTwo, node->GetHWIntrinsicId(), - CORINFO_TYPE_INT, simdSize); + GenTreeHWIntrinsic* loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, indexTimesTwo, + node->GetHWIntrinsicId(), TYP_INT, simdSize); Range().InsertBefore(node, loResult); simdTmpVar = m_compiler->gtNewLclLNode(simdTmpVarNum, simdTmpVar->TypeGet()); @@ -1905,9 +1904,8 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicGetElement(LIR::Use& use, GenTreeHW Range().InsertBefore(node, indexTimesTwo, one, indexTimesTwoPlusOne); } - GenTreeHWIntrinsic* hiResult = - m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, indexTimesTwoPlusOne, node->GetHWIntrinsicId(), - CORINFO_TYPE_INT, simdSize); + GenTreeHWIntrinsic* hiResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, indexTimesTwoPlusOne, + node->GetHWIntrinsicId(), TYP_INT, simdSize); Range().InsertBefore(node, hiResult); // Done with the original tree; remove it. @@ -1954,14 +1952,14 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicToScalar(LIR::Use& use, GenTreeHWIn JITDUMP("[DecomposeHWIntrinsicToScalar]: Saving op1 tree to a temp var:\n"); DISPTREERANGE(Range(), simdTmpVar); - GenTree* loResult = m_compiler->gtNewSimdToScalarNode(TYP_INT, simdTmpVar, CORINFO_TYPE_INT, simdSize); + GenTree* loResult = m_compiler->gtNewSimdToScalarNode(TYP_INT, simdTmpVar, TYP_INT, simdSize); Range().InsertAfter(simdTmpVar, loResult); simdTmpVar = m_compiler->gtNewLclLNode(simdTmpVarNum, simdTmpVar->TypeGet()); Range().InsertAfter(loResult, simdTmpVar); GenTree* one = m_compiler->gtNewIconNode(1); - GenTree* hiResult = m_compiler->gtNewSimdGetElementNode(TYP_INT, simdTmpVar, one, CORINFO_TYPE_INT, simdSize); + GenTree* hiResult = m_compiler->gtNewSimdGetElementNode(TYP_INT, simdTmpVar, one, TYP_INT, simdSize); Range().InsertAfter(simdTmpVar, one, hiResult); Range().Remove(node); @@ -2001,10 +1999,9 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicMoveMask(LIR::Use& use, GenTreeHWIn assert(varTypeIsLong(node)); assert(node->GetHWIntrinsicId() == NI_AVX512_MoveMask); - GenTree* op1 = node->Op(1); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + GenTree* op1 = node->Op(1); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); assert(varTypeIsArithmetic(simdBaseType)); assert(op1->TypeIs(TYP_MASK)); @@ -2029,7 +2026,7 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicMoveMask(LIR::Use& use, GenTreeHWIn // Create: // loResult = GT_HWINTRINSIC{MoveMask}(simdTmpVar) - loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, NI_AVX512_MoveMask, simdBaseJitType, 32); + loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, NI_AVX512_MoveMask, simdBaseType, 32); Range().InsertBefore(node, loResult); simdTmpVar = m_compiler->gtNewLclLNode(simdTmpVarNum, simdTmpVar->TypeGet()); @@ -2043,10 +2040,10 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicMoveMask(LIR::Use& use, GenTreeHWIn Range().InsertBefore(node, shiftIcon); simdTmpVar = m_compiler->gtNewSimdHWIntrinsicNode(TYP_MASK, simdTmpVar, shiftIcon, NI_AVX512_ShiftRightMask, - simdBaseJitType, 64); + simdBaseType, 64); Range().InsertBefore(node, simdTmpVar); - hiResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, NI_AVX512_MoveMask, simdBaseJitType, 32); + hiResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar, NI_AVX512_MoveMask, simdBaseType, 32); Range().InsertBefore(node, hiResult); } else @@ -2054,7 +2051,7 @@ GenTree* DecomposeLongs::DecomposeHWIntrinsicMoveMask(LIR::Use& use, GenTreeHWIn // Create: // loResult = GT_HWINTRINSIC{MoveMask}(op1) - loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, op1, NI_AVX512_MoveMask, simdBaseJitType, simdSize); + loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, op1, NI_AVX512_MoveMask, simdBaseType, simdSize); Range().InsertBefore(node, loResult); // Create: diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index b9d49d5655f500..fa59f478c91762 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -9850,8 +9850,8 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) case GT_HWINTRINSIC: copy = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()), - tree->AsHWIntrinsic()->GetHWIntrinsicId(), - tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize()); + tree->AsHWIntrinsic()->GetHWIntrinsicId(), tree->AsHWIntrinsic()->GetSimdBaseType(), + tree->AsHWIntrinsic()->GetSimdSize()); copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType()); if (tree->AsHWIntrinsic()->IsUserCall()) @@ -20218,13 +20218,21 @@ var_types GenTreeJitIntrinsic::GetAuxiliaryType() const var_types GenTreeJitIntrinsic::GetSimdBaseType() const { - CorInfoType simdBaseJitType = GetSimdBaseJitType(); + return (var_types)gtSimdBaseType; +} - if (simdBaseJitType == CORINFO_TYPE_UNDEF) +var_types GenTreeJitIntrinsic::GetSimdBaseTypeAsVarType() const +{ + var_types simdBaseType = GetSimdBaseType(); + switch (simdBaseType) { - return TYP_UNKNOWN; + case TYP_UINT: + return TYP_INT; + case TYP_ULONG: + return TYP_LONG; + default: + return simdBaseType; } - return JitType2PreciseVarType(simdBaseJitType); } #endif // FEATURE_SIMD @@ -20661,10 +20669,10 @@ bool GenTree::isEmbeddedMaskingCompatible() const // Return Value: // true if the node lowering instruction has a EVEX embedded masking support // -bool GenTree::isEmbeddedMaskingCompatible(Compiler* comp, - unsigned tgtMaskSize, - CorInfoType& tgtSimdBaseJitType, - size_t* broadcastOpIndex /* = nullptr */) const +bool GenTree::isEmbeddedMaskingCompatible(Compiler* comp, + unsigned tgtMaskSize, + var_types& tgtSimdBaseType, + size_t* broadcastOpIndex /* = nullptr */) const { if (!isEmbeddedMaskingCompatible()) { @@ -20687,17 +20695,16 @@ bool GenTree::isEmbeddedMaskingCompatible(Compiler* comp, return false; } - const GenTreeHWIntrinsic* node = AsHWIntrinsic(); - NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - var_types simdType = node->TypeGet(); + const GenTreeHWIntrinsic* node = AsHWIntrinsic(); + NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + var_types simdType = node->TypeGet(); instruction ins = HWIntrinsicInfo::lookupIns(intrinsic, simdBaseType, comp); unsigned maskBaseSize = CodeGenInterface::instKMaskBaseSize(ins); unsigned tgtMaskBaseSize = tgtMaskSize / (genTypeSize(simdType) / 16); - tgtSimdBaseJitType = CORINFO_TYPE_UNDEF; + tgtSimdBaseType = TYP_UNDEF; if (maskBaseSize != tgtMaskBaseSize) { @@ -20813,39 +20820,37 @@ bool GenTree::isEmbeddedMaskingCompatible(Compiler* comp, { if (varTypeIsFloating(simdBaseType)) { - tgtSimdBaseJitType = CORINFO_TYPE_DOUBLE; + tgtSimdBaseType = TYP_DOUBLE; } else if (varTypeIsSigned(simdBaseType)) { - tgtSimdBaseJitType = CORINFO_TYPE_LONG; + tgtSimdBaseType = TYP_LONG; } else { - tgtSimdBaseJitType = CORINFO_TYPE_ULONG; + tgtSimdBaseType = TYP_ULONG; } } else if (tgtMaskBaseSize == 4) { if (varTypeIsFloating(simdBaseType)) { - tgtSimdBaseJitType = CORINFO_TYPE_FLOAT; + tgtSimdBaseType = TYP_FLOAT; } else if (varTypeIsSigned(simdBaseType)) { - tgtSimdBaseJitType = CORINFO_TYPE_INT; + tgtSimdBaseType = TYP_INT; } else { - tgtSimdBaseJitType = CORINFO_TYPE_UINT; + tgtSimdBaseType = TYP_UINT; } } } } - if (tgtSimdBaseJitType != CORINFO_TYPE_UNDEF) + if (tgtSimdBaseType != TYP_UNDEF) { - var_types tgtSimdBaseType = JitType2PreciseVarType(tgtSimdBaseJitType); - instruction tgtIns = HWIntrinsicInfo::lookupIns(intrinsic, tgtSimdBaseType, comp); assert(ins != tgtIns); @@ -20862,34 +20867,30 @@ bool GenTree::isEmbeddedMaskingCompatible(Compiler* comp, GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize) { return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseType, simdSize); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode( - var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, var_types simdBaseType, unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseType, simdSize, op1); } -GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, - GenTree* op1, - GenTree* op2, - NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode( + var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, var_types simdBaseType, unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); SetOpLclRelatedToSIMDIntrinsic(op2); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1, op2); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseType, simdSize, op1, op2); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -20897,7 +20898,7 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); @@ -20905,7 +20906,7 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, SetOpLclRelatedToSIMDIntrinsic(op3); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1, op2, op3); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseType, simdSize, op1, op2, op3); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -20914,7 +20915,7 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); @@ -20922,15 +20923,15 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, SetOpLclRelatedToSIMDIntrinsic(op3); SetOpLclRelatedToSIMDIntrinsic(op4); - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - simdBaseJitType, simdSize, op1, op2, op3, op4); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseType, simdSize, op1, op2, op3, op4); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize) { IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount); @@ -20941,13 +20942,13 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, } return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize); + GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseType, simdSize); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize) { for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++) @@ -20956,10 +20957,10 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types ty } return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize); + GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseType, simdSize); } -GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -20967,7 +20968,6 @@ GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType si assert(op1 != nullptr); assert(op1->TypeGet() == type); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsUnsigned(simdBaseType)) @@ -20984,14 +20984,14 @@ GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType si if (simdBaseType == TYP_FLOAT) { bitMask = gtNewIconNode(0x7FFFFFFF); - bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, CORINFO_TYPE_INT, simdSize); + bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, TYP_INT, simdSize); } else { bitMask = gtNewLconNode(0x7FFFFFFFFFFFFFFF); - bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, CORINFO_TYPE_LONG, simdSize); + bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, TYP_LONG, simdSize); } - return gtNewSimdBinOpNode(GT_AND, type, op1, bitMask, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_AND, type, op1, bitMask, simdBaseType, simdSize); } NamedIntrinsic intrinsic = NI_Illegal; @@ -21015,7 +21015,7 @@ GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType si if (intrinsic != NI_Illegal) { - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } else { @@ -21024,13 +21024,13 @@ GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType si GenTree* op1Dup2 = gtCloneExpr(op1Dup1); // op1 = IsNegative(op1) - op1 = gtNewSimdIsNegativeNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdIsNegativeNode(type, op1, simdBaseType, simdSize); // tmp = -op1Dup1 - tmp = gtNewSimdUnOpNode(GT_NEG, type, op1Dup1, simdBaseJitType, simdSize); + tmp = gtNewSimdUnOpNode(GT_NEG, type, op1Dup1, simdBaseType, simdSize); // result = ConditionalSelect(op1, tmp, op1Dup2) - return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize); + return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseType, simdSize); } #elif defined(TARGET_ARM64) NamedIntrinsic intrinsic = NI_AdvSimd_Abs; @@ -21045,19 +21045,18 @@ GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType si } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); #else #error Unsupported platform #endif } GenTree* Compiler::gtNewSimdBinOpNode( - genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); assert(op1 != nullptr); @@ -21085,7 +21084,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( { if (varTypeIsArithmetic(op2)) { - op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseType, simdSize); } break; } @@ -21099,13 +21098,11 @@ GenTree* Compiler::gtNewSimdBinOpNode( if (simdBaseType == TYP_FLOAT) { - simdBaseJitType = CORINFO_TYPE_INT; - simdBaseType = TYP_INT; + simdBaseType = TYP_INT; } else if (simdBaseType == TYP_DOUBLE) { - simdBaseJitType = CORINFO_TYPE_LONG; - simdBaseType = TYP_LONG; + simdBaseType = TYP_LONG; } // "over shifting" is platform specific behavior. We will match the C# behavior @@ -21136,14 +21133,14 @@ GenTree* Compiler::gtNewSimdBinOpNode( #if defined(TARGET_XARCH) op2ForLookup = op2; - op2 = gtNewSimdCreateScalarNode(TYP_SIMD16, op2, CORINFO_TYPE_INT, 16); + op2 = gtNewSimdCreateScalarNode(TYP_SIMD16, op2, TYP_INT, 16); #elif defined(TARGET_ARM64) if (op != GT_LSH) { op2 = gtNewOperNode(GT_NEG, TYP_INT, op2); } - op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseType, simdSize); #endif // !TARGET_XARCH && !TARGET_ARM64 } break; @@ -21181,12 +21178,12 @@ GenTree* Compiler::gtNewSimdBinOpNode( else if (!varTypeIsByte(simdBaseType)) { op2ForLookup = *broadcastOp; - *broadcastOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *broadcastOp, simdBaseJitType, 8); + *broadcastOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *broadcastOp, simdBaseType, 8); break; } #endif // TARGET_ARM64 - *broadcastOp = gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize); + *broadcastOp = gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseType, simdSize); } break; } @@ -21203,13 +21200,11 @@ GenTree* Compiler::gtNewSimdBinOpNode( { if (varTypeIsLong(simdBaseType)) { - simdBaseJitType = CORINFO_TYPE_DOUBLE; - simdBaseType = TYP_DOUBLE; + simdBaseType = TYP_DOUBLE; } else { - simdBaseJitType = CORINFO_TYPE_FLOAT; - simdBaseType = TYP_FLOAT; + simdBaseType = TYP_FLOAT; } } } @@ -21255,7 +21250,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( std::swap(op1, op2); #endif // TARGET_XARCH } - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); } switch (op) @@ -21267,8 +21262,8 @@ GenTree* Compiler::gtNewSimdBinOpNode( // and produce overall better codegen. assert(fgNodeThreading != NodeThreading::LIR); - op2 = gtNewSimdUnOpNode(GT_NOT, type, op2, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdUnOpNode(GT_NOT, type, op2, simdBaseType, simdSize); + return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseType, simdSize); } #if defined(TARGET_XARCH) @@ -21324,20 +21319,20 @@ GenTree* Compiler::gtNewSimdBinOpNode( { GenTree* op1Dup = fgMakeMultiUse(&op1); GenTree* signOp = - gtNewSimdCmpOpNode(GT_GT, type, gtNewZeroConNode(type), op1Dup, simdBaseJitType, simdSize); + gtNewSimdCmpOpNode(GT_GT, type, gtNewZeroConNode(type), op1Dup, simdBaseType, simdSize); - CorInfoType shiftType = varTypeIsSmall(simdBaseType) ? CORINFO_TYPE_INT : simdBaseJitType; - GenTree* shiftOp = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, shiftType, simdSize); - GenTree* maskOp = gtNewSimdCreateBroadcastNode(type, maskAmountOp, simdBaseJitType, simdSize); + var_types shiftType = varTypeIsSmall(simdBaseType) ? TYP_INT : simdBaseType; + GenTree* shiftOp = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, shiftType, simdSize); + GenTree* maskOp = gtNewSimdCreateBroadcastNode(type, maskAmountOp, simdBaseType, simdSize); - return gtNewSimdCndSelNode(type, maskOp, shiftOp, signOp, simdBaseJitType, simdSize); + return gtNewSimdCndSelNode(type, maskOp, shiftOp, signOp, simdBaseType, simdSize); } else { - GenTree* shiftOp = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, CORINFO_TYPE_INT, simdSize); - GenTree* maskOp = gtNewSimdCreateBroadcastNode(type, maskAmountOp, simdBaseJitType, simdSize); + GenTree* shiftOp = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, TYP_INT, simdSize); + GenTree* maskOp = gtNewSimdCreateBroadcastNode(type, maskAmountOp, simdBaseType, simdSize); - return gtNewSimdBinOpNode(GT_AND, type, shiftOp, maskOp, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_AND, type, shiftOp, maskOp, simdBaseType, simdSize); } } #endif // TARGET_XARCH @@ -21355,15 +21350,15 @@ GenTree* Compiler::gtNewSimdBinOpNode( var_types divType = simdSize == 64 ? TYP_SIMD32 : TYP_SIMD16; GenTree* op1Dup = fgMakeMultiUse(&op1); GenTree* op2Dup = fgMakeMultiUse(&op2); - GenTree* op1Lower = gtNewSimdGetLowerNode(divType, op1, simdBaseJitType, simdSize); - GenTree* op2Lower = gtNewSimdGetLowerNode(divType, op2, simdBaseJitType, simdSize); + GenTree* op1Lower = gtNewSimdGetLowerNode(divType, op1, simdBaseType, simdSize); + GenTree* op2Lower = gtNewSimdGetLowerNode(divType, op2, simdBaseType, simdSize); GenTree* divLower = - gtNewSimdBinOpNode(GT_DIV, divType, op1Lower, op2Lower, simdBaseJitType, simdSize / 2); - GenTree* op1Upper = gtNewSimdGetUpperNode(divType, op1Dup, simdBaseJitType, simdSize); - GenTree* op2Upper = gtNewSimdGetUpperNode(divType, op2Dup, simdBaseJitType, simdSize); + gtNewSimdBinOpNode(GT_DIV, divType, op1Lower, op2Lower, simdBaseType, simdSize / 2); + GenTree* op1Upper = gtNewSimdGetUpperNode(divType, op1Dup, simdBaseType, simdSize); + GenTree* op2Upper = gtNewSimdGetUpperNode(divType, op2Dup, simdBaseType, simdSize); GenTree* divUpper = - gtNewSimdBinOpNode(GT_DIV, divType, op1Upper, op2Upper, simdBaseJitType, simdSize / 2); - GenTree* divResult = gtNewSimdWithUpperNode(type, divLower, divUpper, simdBaseJitType, simdSize); + gtNewSimdBinOpNode(GT_DIV, divType, op1Upper, op2Upper, simdBaseType, simdSize / 2); + GenTree* divResult = gtNewSimdWithUpperNode(type, divLower, divUpper, simdBaseType, simdSize); return divResult; } @@ -21372,8 +21367,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( assert(simdSize == 16); if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { - CorInfoType cvtBaseType = - varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UINT : CORINFO_TYPE_INT; + var_types cvtBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UINT : TYP_INT; NamedIntrinsic widenCvtIntrinsic = varTypeIsByte(simdBaseType) ? (varTypeIsSigned(simdBaseType) ? NI_AVX512_ConvertToVector512Int32 @@ -21388,41 +21382,39 @@ GenTree* Compiler::gtNewSimdBinOpNode( var_types cvtType = varTypeIsByte(simdBaseType) ? TYP_SIMD64 : TYP_SIMD32; int cvtSize = varTypeIsByte(simdBaseType) ? 64 : 32; - op1 = gtNewSimdHWIntrinsicNode(cvtType, op1, widenCvtIntrinsic, simdBaseJitType, cvtSize); - op2 = gtNewSimdHWIntrinsicNode(cvtType, op2, widenCvtIntrinsic, simdBaseJitType, cvtSize); + op1 = gtNewSimdHWIntrinsicNode(cvtType, op1, widenCvtIntrinsic, simdBaseType, cvtSize); + op2 = gtNewSimdHWIntrinsicNode(cvtType, op2, widenCvtIntrinsic, simdBaseType, cvtSize); GenTree* div = gtNewSimdBinOpNode(GT_DIV, cvtType, op1, op2, cvtBaseType, cvtSize); return gtNewSimdHWIntrinsicNode(type, div, narrowCvtIntrinsic, cvtBaseType, cvtSize); } - CorInfoType signedType = varTypeIsShort(simdBaseType) ? CORINFO_TYPE_INT : CORINFO_TYPE_SHORT; - CorInfoType unsignedType = varTypeIsShort(simdBaseType) ? CORINFO_TYPE_UINT : CORINFO_TYPE_USHORT; - CorInfoType cvtType = varTypeIsSigned(simdBaseType) ? signedType : unsignedType; - GenTree* op1Dup = fgMakeMultiUse(&op1); - GenTree* op2Dup = fgMakeMultiUse(&op2); - GenTree* op1LowerWiden = gtNewSimdWidenLowerNode(type, op1, simdBaseJitType, simdSize); - GenTree* op2LowerWiden = gtNewSimdWidenLowerNode(type, op2, simdBaseJitType, simdSize); - GenTree* divLower = + var_types signedType = varTypeIsShort(simdBaseType) ? TYP_INT : TYP_SHORT; + var_types unsignedType = varTypeIsShort(simdBaseType) ? TYP_UINT : TYP_USHORT; + var_types cvtType = varTypeIsSigned(simdBaseType) ? signedType : unsignedType; + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); + GenTree* op1LowerWiden = gtNewSimdWidenLowerNode(type, op1, simdBaseType, simdSize); + GenTree* op2LowerWiden = gtNewSimdWidenLowerNode(type, op2, simdBaseType, simdSize); + GenTree* divLower = gtNewSimdBinOpNode(GT_DIV, type, op1LowerWiden, op2LowerWiden, cvtType, simdSize); - GenTree* op1UpperWiden = gtNewSimdWidenUpperNode(type, op1Dup, simdBaseJitType, simdSize); - GenTree* op2UpperWiden = gtNewSimdWidenUpperNode(type, op2Dup, simdBaseJitType, simdSize); + GenTree* op1UpperWiden = gtNewSimdWidenUpperNode(type, op1Dup, simdBaseType, simdSize); + GenTree* op2UpperWiden = gtNewSimdWidenUpperNode(type, op2Dup, simdBaseType, simdSize); GenTree* divUpper = gtNewSimdBinOpNode(GT_DIV, type, op1UpperWiden, op2UpperWiden, cvtType, simdSize); - return gtNewSimdNarrowNode(type, divLower, divUpper, simdBaseJitType, simdSize); + return gtNewSimdNarrowNode(type, divLower, divUpper, simdBaseType, simdSize); } assert(varTypeIsInt(simdBaseType)); if (compOpportunisticallyDependsOn(InstructionSet_AVX512) && simdSize == 32) { - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_Vector256_op_Division, simdBaseJitType, - simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_Vector256_op_Division, simdBaseType, simdSize); } assert(simdSize == 16); if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_Vector128_op_Division, simdBaseJitType, - simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_Vector128_op_Division, simdBaseType, simdSize); } GenTree* op1Dup = fgMakeMultiUse(&op1); @@ -21430,16 +21422,16 @@ GenTree* Compiler::gtNewSimdBinOpNode( GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup); GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup); GenTree* op1Hi = - gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_X86Base_MoveHighToLow, CORINFO_TYPE_FLOAT, simdSize); + gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_X86Base_MoveHighToLow, TYP_FLOAT, simdSize); GenTree* op2Hi = - gtNewSimdHWIntrinsicNode(type, op2, op2Dup, NI_X86Base_MoveHighToLow, CORINFO_TYPE_FLOAT, simdSize); - GenTree* divLo = gtNewSimdHWIntrinsicNode(type, op1Dup2, op2Dup2, NI_Vector128_op_Division, - simdBaseJitType, simdSize); + gtNewSimdHWIntrinsicNode(type, op2, op2Dup, NI_X86Base_MoveHighToLow, TYP_FLOAT, simdSize); + GenTree* divLo = + gtNewSimdHWIntrinsicNode(type, op1Dup2, op2Dup2, NI_Vector128_op_Division, simdBaseType, simdSize); GenTree* divHi = - gtNewSimdHWIntrinsicNode(type, op1Hi, op2Hi, NI_Vector128_op_Division, simdBaseJitType, simdSize); - GenTree* div = gtNewSimdHWIntrinsicNode(type, divHi, divLo, NI_X86Base_MoveLowToHigh, - CORINFO_TYPE_FLOAT, simdSize); - return gtNewSimdHWIntrinsicNode(type, div, gtNewIconNode(0x4E), NI_X86Base_Shuffle, simdBaseJitType, + gtNewSimdHWIntrinsicNode(type, op1Hi, op2Hi, NI_Vector128_op_Division, simdBaseType, simdSize); + GenTree* div = + gtNewSimdHWIntrinsicNode(type, divHi, divLo, NI_X86Base_MoveLowToHigh, TYP_FLOAT, simdSize); + return gtNewSimdHWIntrinsicNode(type, div, gtNewIconNode(0x4E), NI_X86Base_Shuffle, simdBaseType, simdSize); } unreached(); @@ -21451,7 +21443,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( #if defined(TARGET_XARCH) if (varTypeIsByte(simdBaseType)) { - CorInfoType widenedSimdBaseJitType; + var_types widenedSimdBaseType; NamedIntrinsic widenIntrinsic; NamedIntrinsic narrowIntrinsic; var_types widenedType; @@ -21465,15 +21457,15 @@ GenTree* Compiler::gtNewSimdBinOpNode( // - Narrow widened product (SIMD64 [U]Short) as SIMD32 [U]Byte if (simdBaseType == TYP_BYTE) { - widenedSimdBaseJitType = CORINFO_TYPE_SHORT; - widenIntrinsic = NI_AVX512_ConvertToVector512Int16; - narrowIntrinsic = NI_AVX512_ConvertToVector256SByte; + widenedSimdBaseType = TYP_SHORT; + widenIntrinsic = NI_AVX512_ConvertToVector512Int16; + narrowIntrinsic = NI_AVX512_ConvertToVector256SByte; } else { - widenedSimdBaseJitType = CORINFO_TYPE_USHORT; - widenIntrinsic = NI_AVX512_ConvertToVector512UInt16; - narrowIntrinsic = NI_AVX512_ConvertToVector256Byte; + widenedSimdBaseType = TYP_USHORT; + widenIntrinsic = NI_AVX512_ConvertToVector512UInt16; + narrowIntrinsic = NI_AVX512_ConvertToVector256Byte; } widenedType = TYP_SIMD64; @@ -21481,18 +21473,18 @@ GenTree* Compiler::gtNewSimdBinOpNode( // Vector512 widenedOp1 = Avx512BW.ConvertToVector512UInt16(op1) GenTree* widenedOp1 = - gtNewSimdHWIntrinsicNode(widenedType, op1, widenIntrinsic, simdBaseJitType, widenedSimdSize); + gtNewSimdHWIntrinsicNode(widenedType, op1, widenIntrinsic, simdBaseType, widenedSimdSize); // Vector512 widenedOp2 = Avx512BW.ConvertToVector512UInt16(op2) GenTree* widenedOp2 = - gtNewSimdHWIntrinsicNode(widenedType, op2, widenIntrinsic, simdBaseJitType, widenedSimdSize); + gtNewSimdHWIntrinsicNode(widenedType, op2, widenIntrinsic, simdBaseType, widenedSimdSize); // Vector512 widenedProduct = widenedOp1 * widenedOp2; GenTree* widenedProduct = gtNewSimdBinOpNode(GT_MUL, widenedType, widenedOp1, widenedOp2, - widenedSimdBaseJitType, widenedSimdSize); + widenedSimdBaseType, widenedSimdSize); // Vector256 product = Avx512BW.ConvertToVector256Byte(widenedProduct) - return gtNewSimdHWIntrinsicNode(type, widenedProduct, narrowIntrinsic, widenedSimdBaseJitType, + return gtNewSimdHWIntrinsicNode(type, widenedProduct, narrowIntrinsic, widenedSimdBaseType, widenedSimdSize); } else if (simdSize == 16 && compOpportunisticallyDependsOn(InstructionSet_AVX2)) @@ -21507,32 +21499,32 @@ GenTree* Compiler::gtNewSimdBinOpNode( if (simdBaseType == TYP_BYTE) { - widenedSimdBaseJitType = CORINFO_TYPE_SHORT; - narrowIntrinsic = NI_AVX512_ConvertToVector128SByte; + widenedSimdBaseType = TYP_SHORT; + narrowIntrinsic = NI_AVX512_ConvertToVector128SByte; } else { - widenedSimdBaseJitType = CORINFO_TYPE_USHORT; - narrowIntrinsic = NI_AVX512_ConvertToVector128Byte; + widenedSimdBaseType = TYP_USHORT; + narrowIntrinsic = NI_AVX512_ConvertToVector128Byte; } widenedType = TYP_SIMD32; widenedSimdSize = 32; // Vector256 widenedOp1 = Avx2.ConvertToVector256Int16(op1).AsUInt16() - GenTree* widenedOp1 = gtNewSimdHWIntrinsicNode(widenedType, op1, widenIntrinsic, - simdBaseJitType, widenedSimdSize); + GenTree* widenedOp1 = + gtNewSimdHWIntrinsicNode(widenedType, op1, widenIntrinsic, simdBaseType, widenedSimdSize); // Vector256 widenedOp2 = Avx2.ConvertToVector256Int16(op2).AsUInt16() - GenTree* widenedOp2 = gtNewSimdHWIntrinsicNode(widenedType, op2, widenIntrinsic, - simdBaseJitType, widenedSimdSize); + GenTree* widenedOp2 = + gtNewSimdHWIntrinsicNode(widenedType, op2, widenIntrinsic, simdBaseType, widenedSimdSize); // Vector256 widenedProduct = widenedOp1 * widenedOp2 GenTree* widenedProduct = gtNewSimdBinOpNode(GT_MUL, widenedType, widenedOp1, widenedOp2, - widenedSimdBaseJitType, widenedSimdSize); + widenedSimdBaseType, widenedSimdSize); // Vector128 product = Avx512BW.VL.ConvertToVector128Byte(widenedProduct) - return gtNewSimdHWIntrinsicNode(type, widenedProduct, narrowIntrinsic, widenedSimdBaseJitType, + return gtNewSimdHWIntrinsicNode(type, widenedProduct, narrowIntrinsic, widenedSimdBaseType, widenedSimdSize); } else @@ -21544,22 +21536,22 @@ GenTree* Compiler::gtNewSimdBinOpNode( // - Pack masked product so that relevant bits are packed together in upper and lower halves // - Shuffle packed product so that relevant bits are placed together in the lower half // - Select lower (SIMD16 [U]Byte) from shuffled product (SIMD32 [U]Short) - widenedSimdBaseJitType = simdBaseType == TYP_BYTE ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT; - widenIntrinsic = NI_AVX2_ConvertToVector256Int16; - widenedType = TYP_SIMD32; - widenedSimdSize = 32; + widenedSimdBaseType = simdBaseType == TYP_BYTE ? TYP_SHORT : TYP_USHORT; + widenIntrinsic = NI_AVX2_ConvertToVector256Int16; + widenedType = TYP_SIMD32; + widenedSimdSize = 32; // Vector256 widenedOp1 = Avx2.ConvertToVector256Int16(op1).AsUInt16() GenTree* widenedOp1 = - gtNewSimdHWIntrinsicNode(widenedType, op1, widenIntrinsic, simdBaseJitType, simdSize); + gtNewSimdHWIntrinsicNode(widenedType, op1, widenIntrinsic, simdBaseType, simdSize); // Vector256 widenedOp2 = Avx2.ConvertToVector256Int16(op2).AsUInt16() GenTree* widenedOp2 = - gtNewSimdHWIntrinsicNode(widenedType, op2, widenIntrinsic, simdBaseJitType, simdSize); + gtNewSimdHWIntrinsicNode(widenedType, op2, widenIntrinsic, simdBaseType, simdSize); // Vector256 widenedProduct = widenedOp1 * widenedOp2 GenTree* widenedProduct = gtNewSimdBinOpNode(GT_MUL, widenedType, widenedOp1, widenedOp2, - widenedSimdBaseJitType, widenedSimdSize); + widenedSimdBaseType, widenedSimdSize); // Vector256 vecCon1 = Vector256.Create(0x00FF00FF00FF00FF).AsUInt16() GenTreeVecCon* vecCon1 = gtNewVconNode(widenedType); @@ -21571,25 +21563,24 @@ GenTree* Compiler::gtNewSimdBinOpNode( // Vector256 maskedProduct = Avx2.And(widenedProduct, vecCon1).AsInt16() GenTree* maskedProduct = gtNewSimdBinOpNode(GT_AND, widenedType, widenedProduct, vecCon1, - widenedSimdBaseJitType, widenedSimdSize); + widenedSimdBaseType, widenedSimdSize); GenTree* maskedProductDup = fgMakeMultiUse(&maskedProduct); // Vector256 packedProduct = Avx2.PackUnsignedSaturate(maskedProduct, // maskedProduct).AsUInt64() GenTree* packedProduct = gtNewSimdHWIntrinsicNode(widenedType, maskedProduct, maskedProductDup, - NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE, widenedSimdSize); + NI_AVX2_PackUnsignedSaturate, TYP_UBYTE, widenedSimdSize); - CorInfoType permuteBaseJitType = - (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; + var_types permuteBaseType = (simdBaseType == TYP_BYTE) ? TYP_LONG : TYP_ULONG; // Vector256 shuffledProduct = Avx2.Permute4x64(w1, 0xD8).AsByte() GenTree* shuffledProduct = gtNewSimdHWIntrinsicNode(widenedType, packedProduct, gtNewIconNode(SHUFFLE_WYZX), - NI_AVX2_Permute4x64, permuteBaseJitType, widenedSimdSize); + NI_AVX2_Permute4x64, permuteBaseType, widenedSimdSize); // Vector128 product = shuffledProduct.getLower() - return gtNewSimdGetLowerNode(type, shuffledProduct, simdBaseJitType, widenedSimdSize); + return gtNewSimdGetLowerNode(type, shuffledProduct, simdBaseType, widenedSimdSize); } } @@ -21597,7 +21588,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( // - Widen both inputs lower and upper halves as [U]Short (using helper method) // - Multiply corrsponding widened input halves together as widened product halves // - Narrow widened product halves as [U]Byte (using helper method) - widenedSimdBaseJitType = simdBaseType == TYP_BYTE ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT; + widenedSimdBaseType = simdBaseType == TYP_BYTE ? TYP_SHORT : TYP_USHORT; // op1Dup = op1 GenTree* op1Dup = fgMakeMultiUse(&op1); @@ -21606,27 +21597,27 @@ GenTree* Compiler::gtNewSimdBinOpNode( GenTree* op2Dup = fgMakeMultiUse(&op2); // Vector256 lowerOp1 = Avx2.ConvertToVector256Int16(op1.GetLower()).AsUInt16() - GenTree* lowerOp1 = gtNewSimdWidenLowerNode(type, op1, simdBaseJitType, simdSize); + GenTree* lowerOp1 = gtNewSimdWidenLowerNode(type, op1, simdBaseType, simdSize); // Vector256 lowerOp2 = Avx2.ConvertToVector256Int16(op2.GetLower()).AsUInt16() - GenTree* lowerOp2 = gtNewSimdWidenLowerNode(type, op2, simdBaseJitType, simdSize); + GenTree* lowerOp2 = gtNewSimdWidenLowerNode(type, op2, simdBaseType, simdSize); // Vector256 lowerProduct = lowerOp1 * lowerOp2 GenTree* lowerProduct = - gtNewSimdBinOpNode(GT_MUL, type, lowerOp1, lowerOp2, widenedSimdBaseJitType, simdSize); + gtNewSimdBinOpNode(GT_MUL, type, lowerOp1, lowerOp2, widenedSimdBaseType, simdSize); // Vector256 upperOp1 = Avx2.ConvertToVector256Int16(op1.GetUpper()).AsUInt16() - GenTree* upperOp1 = gtNewSimdWidenUpperNode(type, op1Dup, simdBaseJitType, simdSize); + GenTree* upperOp1 = gtNewSimdWidenUpperNode(type, op1Dup, simdBaseType, simdSize); // Vector256 upperOp2 = Avx2.ConvertToVector256Int16(op2.GetUpper()).AsUInt16() - GenTree* upperOp2 = gtNewSimdWidenUpperNode(type, op2Dup, simdBaseJitType, simdSize); + GenTree* upperOp2 = gtNewSimdWidenUpperNode(type, op2Dup, simdBaseType, simdSize); // Vector256 upperProduct = upperOp1 * upperOp2 GenTree* upperProduct = - gtNewSimdBinOpNode(GT_MUL, type, upperOp1, upperOp2, widenedSimdBaseJitType, simdSize); + gtNewSimdBinOpNode(GT_MUL, type, upperOp1, upperOp2, widenedSimdBaseType, simdSize); // Narrow and merge halves using helper method - return gtNewSimdNarrowNode(type, lowerProduct, upperProduct, simdBaseJitType, simdSize); + return gtNewSimdNarrowNode(type, lowerProduct, upperProduct, simdBaseType, simdSize); } else if (varTypeIsInt(simdBaseType)) { @@ -21638,32 +21629,31 @@ GenTree* Compiler::gtNewSimdBinOpNode( // op1Dup = Sse2.ShiftRightLogical128BitLane(op1Dup, 4) op1Dup = gtNewSimdHWIntrinsicNode(type, op1Dup, gtNewIconNode(4, TYP_INT), - NI_X86Base_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); + NI_X86Base_ShiftRightLogical128BitLane, simdBaseType, simdSize); // op2Dup = Sse2.ShiftRightLogical128BitLane(op2Dup, 4) op2Dup = gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(4, TYP_INT), - NI_X86Base_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); + NI_X86Base_ShiftRightLogical128BitLane, simdBaseType, simdSize); // op2Dup = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32() - op2Dup = - gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_X86Base_Multiply, CORINFO_TYPE_ULONG, simdSize); + op2Dup = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_X86Base_Multiply, TYP_ULONG, simdSize); // op2Dup = Sse2.Shuffle(op2Dup, (0, 0, 2, 0)) op2Dup = gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), - NI_X86Base_Shuffle, simdBaseJitType, simdSize); + NI_X86Base_Shuffle, simdBaseType, simdSize); // op1 = Sse2.Multiply(op1.AsUInt32(), op2.AsUInt32()).AsInt32() - op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Multiply, CORINFO_TYPE_ULONG, simdSize); + op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Multiply, TYP_ULONG, simdSize); // op1 = Sse2.Shuffle(op1, (0, 0, 2, 0)) op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_X86Base_Shuffle, - simdBaseJitType, simdSize); + simdBaseType, simdSize); // op2 = op2Dup; op2 = op2Dup; // result = Sse2.UnpackLow(op1, op2) - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_UnpackLow, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_UnpackLow, simdBaseType, simdSize); } else if (varTypeIsLong(simdBaseType)) { @@ -21684,28 +21674,28 @@ GenTree* Compiler::gtNewSimdBinOpNode( GenTree* op2Dup2 = gtCloneExpr(op2Dup1); // Vector128 low = Sse2.Multiply(a.AsUInt32(), b.AsUInt32()); - GenTree* low = gtNewSimdHWIntrinsicNode(type, op1, op2, muludq, CORINFO_TYPE_ULONG, simdSize); + GenTree* low = gtNewSimdHWIntrinsicNode(type, op1, op2, muludq, TYP_ULONG, simdSize); // Vector128 mid = (b >>> 32).AsUInt64(); - GenTree* mid = gtNewSimdBinOpNode(GT_RSZ, type, op2Dup1, gtNewIconNode(32), simdBaseJitType, simdSize); + GenTree* mid = gtNewSimdBinOpNode(GT_RSZ, type, op2Dup1, gtNewIconNode(32), simdBaseType, simdSize); // mid = Sse2.Multiply(mid.AsUInt32(), a.AsUInt32()); - mid = gtNewSimdHWIntrinsicNode(type, mid, op1Dup1, muludq, CORINFO_TYPE_ULONG, simdSize); + mid = gtNewSimdHWIntrinsicNode(type, mid, op1Dup1, muludq, TYP_ULONG, simdSize); // Vector128 tmp = (a >>> 32).AsUInt64(); - GenTree* tmp = gtNewSimdBinOpNode(GT_RSZ, type, op1Dup2, gtNewIconNode(32), simdBaseJitType, simdSize); + GenTree* tmp = gtNewSimdBinOpNode(GT_RSZ, type, op1Dup2, gtNewIconNode(32), simdBaseType, simdSize); // tmp = Sse2.Multiply(tmp.AsUInt32(), b.AsUInt32()); - tmp = gtNewSimdHWIntrinsicNode(type, tmp, op2Dup2, muludq, CORINFO_TYPE_ULONG, simdSize); + tmp = gtNewSimdHWIntrinsicNode(type, tmp, op2Dup2, muludq, TYP_ULONG, simdSize); // mid += tmp; - mid = gtNewSimdBinOpNode(GT_ADD, type, mid, tmp, simdBaseJitType, simdSize); + mid = gtNewSimdBinOpNode(GT_ADD, type, mid, tmp, simdBaseType, simdSize); // mid <<= 32; - mid = gtNewSimdBinOpNode(GT_LSH, type, mid, gtNewIconNode(32), simdBaseJitType, simdSize); + mid = gtNewSimdBinOpNode(GT_LSH, type, mid, gtNewIconNode(32), simdBaseType, simdSize); // return low + mid; - return gtNewSimdBinOpNode(GT_ADD, type, low, mid, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_ADD, type, low, mid, simdBaseType, simdSize); } #elif defined(TARGET_ARM64) if (varTypeIsLong(simdBaseType)) @@ -21713,12 +21703,12 @@ GenTree* Compiler::gtNewSimdBinOpNode( GenTree** op2ToDup = nullptr; assert(varTypeIsSIMD(op1)); - op1 = gtNewSimdToScalarNode(TYP_LONG, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdToScalarNode(TYP_LONG, op1, simdBaseType, simdSize); GenTree** op1ToDup = &op1->AsHWIntrinsic()->Op(1); if (varTypeIsSIMD(op2)) { - op2 = gtNewSimdToScalarNode(TYP_LONG, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdToScalarNode(TYP_LONG, op2, simdBaseType, simdSize); op2ToDup = &op2->AsHWIntrinsic()->Op(1); } @@ -21729,7 +21719,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( { op2ToDup = &lower->AsOp()->gtOp2; } - lower = gtNewSimdCreateScalarUnsafeNode(type, lower, simdBaseJitType, simdSize); + lower = gtNewSimdCreateScalarUnsafeNode(type, lower, simdBaseType, simdSize); if (simdSize == 8) { @@ -21742,18 +21732,18 @@ GenTree* Compiler::gtNewSimdBinOpNode( GenTree* op2Dup = fgMakeMultiUse(op2ToDup); assert(!varTypeIsArithmetic(op1Dup)); - op1Dup = gtNewSimdGetElementNode(TYP_LONG, op1Dup, gtNewIconNode(1), simdBaseJitType, simdSize); + op1Dup = gtNewSimdGetElementNode(TYP_LONG, op1Dup, gtNewIconNode(1), simdBaseType, simdSize); if (!varTypeIsArithmetic(op2Dup)) { - op2Dup = gtNewSimdGetElementNode(TYP_LONG, op2Dup, gtNewIconNode(1), simdBaseJitType, simdSize); + op2Dup = gtNewSimdGetElementNode(TYP_LONG, op2Dup, gtNewIconNode(1), simdBaseType, simdSize); } // upper = op1.GetElement(1) * op2.GetElement(1) GenTree* upper = gtNewOperNode(GT_MUL, TYP_LONG, op1Dup, op2Dup); // return Vector128.Create(lower, upper) - return gtNewSimdWithElementNode(type, lower, gtNewIconNode(1), upper, simdBaseJitType, simdSize); + return gtNewSimdWithElementNode(type, lower, gtNewIconNode(1), upper, simdBaseType, simdSize); } #endif // !TARGET_XARCH && !TARGET_ARM64 unreached(); @@ -21766,7 +21756,7 @@ GenTree* Compiler::gtNewSimdBinOpNode( } } -GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -21774,7 +21764,6 @@ GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType s assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsFloating(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -21787,7 +21776,7 @@ GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType s else if (simdSize == 64) { GenTree* op2 = gtNewIconNode(static_cast(FloatRoundingMode::ToPositiveInfinity)); - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseType, simdSize); } else { @@ -21807,7 +21796,7 @@ GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType s #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } #if defined(FEATURE_MASKED_HW_INTRINSICS) @@ -21817,44 +21806,36 @@ GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType s // Arguments: // type -- The type of the node to convert to // op1 -- The node to convert -// simdBaseJitType -- the base jit type of the converted node +// simdBaseType -- The base type of the converted node // simdSize -- the simd size of the converted node // // Return Value: // The node converted to the given type // -GenTree* Compiler::gtNewSimdCvtMaskToVectorNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCvtMaskToVectorNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsMask(op1)); assert(varTypeIsSIMD(type)); compMaskConvertUsed = true; #if defined(TARGET_XARCH) - return gtNewSimdHWIntrinsicNode(type, op1, NI_AVX512_ConvertMaskToVector, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, NI_AVX512_ConvertMaskToVector, simdBaseType, simdSize); #elif defined(TARGET_ARM64) - return gtNewSimdHWIntrinsicNode(type, op1, NI_Sve_ConvertMaskToVector, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, NI_Sve_ConvertMaskToVector, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } #endif // FEATURE_MASKED_HW_INTRINSICS -GenTree* Compiler::gtNewSimdCvtNode(var_types type, - GenTree* op1, - CorInfoType simdTargetBaseJitType, - CorInfoType simdSourceBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCvtNode( + var_types type, GenTree* op1, var_types simdTargetBaseType, var_types simdSourceBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdSourceBaseType = JitType2PreciseVarType(simdSourceBaseJitType); - var_types simdTargetBaseType = JitType2PreciseVarType(simdTargetBaseJitType); assert(varTypeIsFloating(simdSourceBaseType)); assert(varTypeIsIntegral(simdTargetBaseType)); @@ -21889,7 +21870,7 @@ GenTree* Compiler::gtNewSimdCvtNode(var_types type, unreached(); } } - return gtNewSimdHWIntrinsicNode(type, op1, cvtIntrinsic, simdSourceBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, cvtIntrinsic, simdSourceBaseType, simdSize); } else if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { @@ -21914,14 +21895,14 @@ GenTree* Compiler::gtNewSimdCvtNode(var_types type, // +VAL: 0b0000 */ int32_t iconVal = varTypeIsUnsigned(simdTargetBaseType) ? 0x08080088 : 0x00000088; - GenTree* tblCon = gtNewSimdCreateBroadcastNode(type, gtNewIconNode(iconVal), simdTargetBaseJitType, simdSize); + GenTree* tblCon = gtNewSimdCreateBroadcastNode(type, gtNewIconNode(iconVal), simdTargetBaseType, simdSize); // We need op1Clone to run fixup GenTree* op1Clone = fgMakeMultiUse(&op1); NamedIntrinsic fixupHwIntrinsicID = NI_AVX512_Fixup; // run vfixupimmsd base on table and no flags reporting fixupVal = gtNewSimdHWIntrinsicNode(type, op1, op1Clone, tblCon, gtNewIconNode(0), fixupHwIntrinsicID, - simdSourceBaseJitType, simdSize); + simdSourceBaseType, simdSize); } else { @@ -21929,8 +21910,8 @@ GenTree* Compiler::gtNewSimdCvtNode(var_types type, // mask1 contains the output either 0xFFFFFFFF or 0. // FixupVal zeros out any NaN values in the input by ANDing input with mask1. GenTree* op1Clone1 = fgMakeMultiUse(&op1); - GenTree* mask1 = gtNewSimdIsNaNNode(type, op1, simdSourceBaseJitType, simdSize); - fixupVal = gtNewSimdBinOpNode(GT_AND_NOT, type, op1Clone1, mask1, simdSourceBaseJitType, simdSize); + GenTree* mask1 = gtNewSimdIsNaNNode(type, op1, simdSourceBaseType, simdSize); + fixupVal = gtNewSimdBinOpNode(GT_AND_NOT, type, op1Clone1, mask1, simdSourceBaseType, simdSize); } if (varTypeIsSigned(simdTargetBaseType)) @@ -21941,56 +21922,48 @@ GenTree* Compiler::gtNewSimdCvtNode(var_types type, { int64_t actualMaxVal = INT64_MAX; maxVal = gtNewDconNode(static_cast(actualMaxVal), simdSourceBaseType); - maxVal = gtNewSimdCreateBroadcastNode(type, maxVal, simdSourceBaseJitType, simdSize); - maxValDup = - gtNewSimdCreateBroadcastNode(type, gtNewLconNode(actualMaxVal), simdTargetBaseJitType, simdSize); + maxVal = gtNewSimdCreateBroadcastNode(type, maxVal, simdSourceBaseType, simdSize); + maxValDup = gtNewSimdCreateBroadcastNode(type, gtNewLconNode(actualMaxVal), simdTargetBaseType, simdSize); } else { ssize_t actualMaxVal = INT32_MAX; maxVal = gtNewDconNode(static_cast(actualMaxVal), simdSourceBaseType); - maxVal = gtNewSimdCreateBroadcastNode(type, maxVal, simdSourceBaseJitType, simdSize); - maxValDup = - gtNewSimdCreateBroadcastNode(type, gtNewIconNode(actualMaxVal), simdTargetBaseJitType, simdSize); + maxVal = gtNewSimdCreateBroadcastNode(type, maxVal, simdSourceBaseType, simdSize); + maxValDup = gtNewSimdCreateBroadcastNode(type, gtNewIconNode(actualMaxVal), simdTargetBaseType, simdSize); } // we will be using the input value twice GenTree* fixupValDup = fgMakeMultiUse(&fixupVal); // compare with max value of integer/long - fixupVal = gtNewSimdCmpOpNode(GT_GE, type, fixupVal, maxVal, simdSourceBaseJitType, simdSize); + fixupVal = gtNewSimdCmpOpNode(GT_GE, type, fixupVal, maxVal, simdSourceBaseType, simdSize); // cast it - GenTree* castNode = - gtNewSimdCvtNativeNode(type, fixupValDup, simdTargetBaseJitType, simdSourceBaseJitType, simdSize); + GenTree* castNode = gtNewSimdCvtNativeNode(type, fixupValDup, simdTargetBaseType, simdSourceBaseType, simdSize); // use the fixupVal mask with input value and max value to blend - return gtNewSimdCndSelNode(type, fixupVal, maxValDup, castNode, simdTargetBaseJitType, simdSize); + return gtNewSimdCndSelNode(type, fixupVal, maxValDup, castNode, simdTargetBaseType, simdSize); } else { - return gtNewSimdCvtNativeNode(type, fixupVal, simdTargetBaseJitType, simdSourceBaseJitType, simdSize); + return gtNewSimdCvtNativeNode(type, fixupVal, simdTargetBaseType, simdSourceBaseType, simdSize); } #elif defined(TARGET_ARM64) - return gtNewSimdCvtNativeNode(type, op1, simdTargetBaseJitType, simdSourceBaseJitType, simdSize); + return gtNewSimdCvtNativeNode(type, op1, simdTargetBaseType, simdSourceBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, - GenTree* op1, - CorInfoType simdTargetBaseJitType, - CorInfoType simdSourceBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCvtNativeNode( + var_types type, GenTree* op1, var_types simdTargetBaseType, var_types simdSourceBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdSourceBaseType = JitType2PreciseVarType(simdSourceBaseJitType); - var_types simdTargetBaseType = JitType2PreciseVarType(simdTargetBaseJitType); assert(varTypeIsFloating(simdSourceBaseType)); assert(varTypeIsIntegral(simdTargetBaseType)); @@ -22000,13 +21973,13 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, #if defined(TARGET_XARCH) assert(compIsaSupportedDebugOnly(InstructionSet_AVX512) || (simdTargetBaseType == TYP_INT)); - switch (simdSourceBaseJitType) + switch (simdSourceBaseType) { - case CORINFO_TYPE_FLOAT: + case TYP_FLOAT: { - switch (simdTargetBaseJitType) + switch (simdTargetBaseType) { - case CORINFO_TYPE_INT: + case TYP_INT: { switch (simdSize) { @@ -22034,7 +22007,7 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, break; } - case CORINFO_TYPE_UINT: + case TYP_UINT: { switch (simdSize) { @@ -22068,11 +22041,11 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, break; } - case CORINFO_TYPE_DOUBLE: + case TYP_DOUBLE: { - switch (simdTargetBaseJitType) + switch (simdTargetBaseType) { - case CORINFO_TYPE_LONG: + case TYP_LONG: { switch (simdSize) { @@ -22100,7 +22073,7 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, break; } - case CORINFO_TYPE_ULONG: + case TYP_ULONG: { switch (simdSize) { @@ -22140,19 +22113,19 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, #elif defined(TARGET_ARM64) assert((simdSize == 8) || (simdSize == 16)); - switch (simdSourceBaseJitType) + switch (simdSourceBaseType) { - case CORINFO_TYPE_FLOAT: + case TYP_FLOAT: { - switch (simdTargetBaseJitType) + switch (simdTargetBaseType) { - case CORINFO_TYPE_INT: + case TYP_INT: { hwIntrinsicID = NI_AdvSimd_ConvertToInt32RoundToZero; break; } - case CORINFO_TYPE_UINT: + case TYP_UINT: { hwIntrinsicID = NI_AdvSimd_ConvertToUInt32RoundToZero; break; @@ -22164,18 +22137,18 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, break; } - case CORINFO_TYPE_DOUBLE: + case TYP_DOUBLE: { - switch (simdTargetBaseJitType) + switch (simdTargetBaseType) { - case CORINFO_TYPE_LONG: + case TYP_LONG: { hwIntrinsicID = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToInt64RoundToZeroScalar : NI_AdvSimd_Arm64_ConvertToInt64RoundToZero; break; } - case CORINFO_TYPE_ULONG: + case TYP_ULONG: { hwIntrinsicID = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToUInt64RoundToZeroScalar : NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero; @@ -22196,7 +22169,7 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, #endif // !TARGET_XARCH && !TARGET_ARM64 assert(hwIntrinsicID != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdSourceBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdSourceBaseType, simdSize); } #if defined(FEATURE_MASKED_HW_INTRINSICS) @@ -22206,27 +22179,23 @@ GenTree* Compiler::gtNewSimdCvtNativeNode(var_types type, // Arguments: // type -- The type of the mask to produce. // op1 -- The node to convert -// simdBaseJitType -- the base jit type of the converted node // simdSize -- the simd size of the converted node // // Return Value: // The node converted to the a mask type // -GenTree* Compiler::gtNewSimdCvtVectorToMaskNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCvtVectorToMaskNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsMask(type)); assert(varTypeIsSIMD(op1)); compMaskConvertUsed = true; #if defined(TARGET_XARCH) - return gtNewSimdHWIntrinsicNode(TYP_MASK, op1, NI_AVX512_ConvertVectorToMask, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(TYP_MASK, op1, NI_AVX512_ConvertVectorToMask, simdBaseType, simdSize); #elif defined(TARGET_ARM64) // ConvertVectorToMask uses cmpne which requires an embedded mask. - GenTree* trueMask = gtNewSimdHWIntrinsicNode(TYP_MASK, NI_Sve_ConversionTrueMask, simdBaseJitType, simdSize); - return gtNewSimdHWIntrinsicNode(TYP_MASK, trueMask, op1, NI_Sve_ConvertVectorToMask, simdBaseJitType, simdSize); + GenTree* trueMask = gtNewSimdHWIntrinsicNode(TYP_MASK, NI_Sve_ConversionTrueMask, simdBaseType, simdSize); + return gtNewSimdHWIntrinsicNode(TYP_MASK, trueMask, op1, NI_Sve_ConvertVectorToMask, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -22234,7 +22203,7 @@ GenTree* Compiler::gtNewSimdCvtVectorToMaskNode(var_types type, #endif // FEATURE_MASKED_HW_INTRINSICS GenTree* Compiler::gtNewSimdCmpOpNode( - genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -22245,7 +22214,6 @@ GenTree* Compiler::gtNewSimdCmpOpNode( assert(op2 != nullptr); assert(op2->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); var_types lookupType = GenTreeHWIntrinsic::GetLookupTypeForCmpOp(this, op, type, simdBaseType, simdSize); @@ -22259,13 +22227,13 @@ GenTree* Compiler::gtNewSimdCmpOpNode( if (lookupType != type) { assert(varTypeIsMask(lookupType)); - GenTree* retNode = gtNewSimdHWIntrinsicNode(lookupType, op1, op2, intrinsic, simdBaseJitType, simdSize); - return gtNewSimdCvtMaskToVectorNode(type, retNode, simdBaseJitType, simdSize); + GenTree* retNode = gtNewSimdHWIntrinsicNode(lookupType, op1, op2, intrinsic, simdBaseType, simdSize); + return gtNewSimdCvtMaskToVectorNode(type, retNode, simdBaseType, simdSize); } #else assert(lookupType == type); #endif // !FEATURE_MASKED_HW_INTRINSICS - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); } assert(lookupType == type); @@ -22289,13 +22257,13 @@ GenTree* Compiler::gtNewSimdCmpOpNode( // Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of // respective long elements. - GenTree* tmp = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize); + GenTree* tmp = gtNewSimdCmpOpNode(op, type, op1, op2, TYP_INT, simdSize); op1 = fgMakeMultiUse(&tmp); - op2 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_ZWXY), NI_X86Base_Shuffle, CORINFO_TYPE_INT, - simdSize); + op2 = + gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_ZWXY), NI_X86Base_Shuffle, TYP_INT, simdSize); - return gtNewSimdBinOpNode(GT_AND, type, tmp, op2, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_AND, type, tmp, op2, simdBaseType, simdSize); } case GT_GE: @@ -22321,9 +22289,9 @@ GenTree* Compiler::gtNewSimdCmpOpNode( bool isMax = (op == GT_GE); // EQ(MinMax(op1, op2), op1) - op1 = gtNewSimdMinMaxNativeNode(type, op1, op2, simdBaseJitType, simdSize, isMax); + op1 = gtNewSimdMinMaxNativeNode(type, op1, op2, simdBaseType, simdSize, isMax); - return gtNewSimdCmpOpNode(GT_EQ, type, op1, op1Dup, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_EQ, type, op1, op1Dup, simdBaseType, simdSize); } } @@ -22355,10 +22323,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode( op = GT_LT; } - op1 = gtNewSimdCmpOpNode(op, type, op1, op2, simdBaseJitType, simdSize); - op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize); + op1 = gtNewSimdCmpOpNode(op, type, op1, op2, simdBaseType, simdSize); + op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseType, simdSize); - return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseType, simdSize); } case GT_GT: @@ -22380,41 +22348,36 @@ GenTree* Compiler::gtNewSimdCmpOpNode( // We need to treat op1 and op2 as signed for comparison purpose after // the transformation. - uint64_t constVal = 0; - CorInfoType opJitType = simdBaseJitType; - var_types opType = simdBaseType; + uint64_t constVal = 0; + var_types opType = simdBaseType; switch (simdBaseType) { case TYP_UBYTE: { - constVal = 0x8080808080808080; - simdBaseJitType = CORINFO_TYPE_BYTE; - simdBaseType = TYP_BYTE; + constVal = 0x8080808080808080; + simdBaseType = TYP_BYTE; break; } case TYP_USHORT: { - constVal = 0x8000800080008000; - simdBaseJitType = CORINFO_TYPE_SHORT; - simdBaseType = TYP_SHORT; + constVal = 0x8000800080008000; + simdBaseType = TYP_SHORT; break; } case TYP_UINT: { - constVal = 0x8000000080000000; - simdBaseJitType = CORINFO_TYPE_INT; - simdBaseType = TYP_INT; + constVal = 0x8000000080000000; + simdBaseType = TYP_INT; break; } case TYP_ULONG: { - constVal = 0x8000000000000000; - simdBaseJitType = CORINFO_TYPE_LONG; - simdBaseType = TYP_LONG; + constVal = 0x8000000000000000; + simdBaseType = TYP_LONG; break; } @@ -22434,12 +22397,12 @@ GenTree* Compiler::gtNewSimdCmpOpNode( GenTree* vecCon2 = gtCloneExpr(vecCon1); // op1 = op1 - constVector - op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opType, simdSize); // op2 = op2 - constVector - op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize); + op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opType, simdSize); - return gtNewSimdCmpOpNode(op, type, op1, op2, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(op, type, op1, op2, simdBaseType, simdSize); } else { @@ -22486,19 +22449,19 @@ GenTree* Compiler::gtNewSimdCmpOpNode( GenTree* op2Dup1 = fgMakeMultiUse(&op2); GenTree* op2Dup2 = gtCloneExpr(op2Dup1); - GenTree* t = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize); - GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize); - GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize); + GenTree* t = gtNewSimdCmpOpNode(op, type, op1, op2, TYP_INT, simdSize); + GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, TYP_INT, simdSize); + GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, TYP_UINT, simdSize); op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_X86Base_Shuffle, - CORINFO_TYPE_INT, simdSize); - u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_X86Base_Shuffle, - CORINFO_TYPE_INT, simdSize); - v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_X86Base_Shuffle, - CORINFO_TYPE_INT, simdSize); + TYP_INT, simdSize); + u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_X86Base_Shuffle, TYP_INT, + simdSize); + v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_X86Base_Shuffle, TYP_INT, + simdSize); - op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseType, simdSize); + return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseType, simdSize); } break; } @@ -22511,8 +22474,8 @@ GenTree* Compiler::gtNewSimdCmpOpNode( assert(!canUseEvexEncodingDebugOnly()); #endif // TARGET_XARCH - GenTree* result = gtNewSimdCmpOpNode(GT_EQ, type, op1, op2, simdBaseJitType, simdSize); - return gtNewSimdUnOpNode(GT_NOT, type, result, simdBaseJitType, simdSize); + GenTree* result = gtNewSimdCmpOpNode(GT_EQ, type, op1, op2, simdBaseType, simdSize); + return gtNewSimdUnOpNode(GT_NOT, type, result, simdBaseType, simdSize); } default: @@ -22523,7 +22486,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode( } GenTree* Compiler::gtNewSimdCmpOpAllNode( - genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { assert(type == TYP_INT); @@ -22536,7 +22499,6 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode( assert(op2 != nullptr); assert(op2->TypeIs(simdType)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -22588,18 +22550,16 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode( intrinsic = NI_Vector128_op_Equality; } - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseType, simdSize); op2 = gtNewAllBitsSetConNode(simdType); if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } else if (simdBaseType == TYP_DOUBLE) { - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } break; } @@ -22627,18 +22587,16 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode( intrinsic = NI_Vector128_op_Equality; } - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseType, simdSize); op2 = gtNewAllBitsSetConNode(simdType); if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } else if (simdBaseType == TYP_DOUBLE) { - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } break; } @@ -22653,11 +22611,11 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdCmpOpAnyNode( - genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { assert(type == TYP_INT); @@ -22670,7 +22628,6 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode( assert(op2 != nullptr); assert(op2->TypeIs(simdType)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -22706,18 +22663,16 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode( intrinsic = NI_Vector128_op_Inequality; } - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseType, simdSize); op2 = gtNewZeroConNode(simdType); if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } else if (simdBaseType == TYP_DOUBLE) { - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } break; } @@ -22752,18 +22707,16 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode( intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality; - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseType, simdSize); op2 = gtNewZeroConNode(simdType); if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } else if (simdBaseType == TYP_DOUBLE) { - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } break; } @@ -22784,11 +22737,11 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdCndSelNode( - var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, GenTree* op3, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -22802,7 +22755,6 @@ GenTree* Compiler::gtNewSimdCndSelNode( assert(op3 != nullptr); assert(op3->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -22821,9 +22773,9 @@ GenTree* Compiler::gtNewSimdCndSelNode( { intrinsic = NI_Vector128_ConditionalSelect; } - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseType, simdSize); #elif defined(TARGET_ARM64) - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -22835,19 +22787,15 @@ GenTree* Compiler::gtNewSimdCndSelNode( // Arguments: // type - The return type of SIMD node being created // op1 - The value of broadcast to every element of the simd value -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created CreateBroadcast node // -GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_Create; - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); if (op1->IsIntegralConst() || op1->IsCnsFltOrDbl()) { @@ -22952,7 +22900,7 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type, #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -22961,19 +22909,15 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The value of element 0 of the simd value -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created CreateScalar node // -GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_CreateScalar; - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); if (op1->IsIntegralConst() || op1->IsCnsFltOrDbl()) { @@ -23055,7 +22999,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type, #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23064,7 +23008,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The value of element 0 of the simd value -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: @@ -23073,13 +23017,12 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type, // Remarks: // This API is unsafe as it leaves the upper-bits of the vector undefined // -GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type, + GenTree* op1, + var_types simdBaseType, + unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_CreateScalarUnsafe; - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); if (op1->IsIntegralConst() || op1->IsCnsFltOrDbl()) { @@ -23190,7 +23133,7 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type, #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23200,14 +23143,14 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type, // type - The return type of SIMD node being created // op1 - The starting value // op2 - The step value -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created CreateSequence node // GenTree* Compiler::gtNewSimdCreateSequenceNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { // This effectively does: (Indices * op2) + Create(op1) // @@ -23218,7 +23161,6 @@ GenTree* Compiler::gtNewSimdCreateSequenceNode( assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); GenTree* result = nullptr; @@ -23371,21 +23313,21 @@ GenTree* Compiler::gtNewSimdCreateSequenceNode( } else { - GenTree* indices = gtNewSimdGetIndicesNode(type, simdBaseJitType, simdSize); - result = gtNewSimdBinOpNode(GT_MUL, type, indices, op2, simdBaseJitType, simdSize); + GenTree* indices = gtNewSimdGetIndicesNode(type, simdBaseType, simdSize); + result = gtNewSimdBinOpNode(GT_MUL, type, indices, op2, simdBaseType, simdSize); } if (isPartial) { - GenTree* start = gtNewSimdCreateBroadcastNode(type, op1, simdBaseJitType, simdSize); - result = gtNewSimdBinOpNode(GT_ADD, type, result, start, simdBaseJitType, simdSize); + GenTree* start = gtNewSimdCreateBroadcastNode(type, op1, simdBaseType, simdSize); + result = gtNewSimdBinOpNode(GT_ADD, type, result, start, simdBaseType, simdSize); } return result; } GenTree* Compiler::gtNewSimdDotProdNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); @@ -23396,7 +23338,6 @@ GenTree* Compiler::gtNewSimdDotProdNode( assert(op2 != nullptr); assert(op2->TypeIs(simdType)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsSIMD(type)); NamedIntrinsic intrinsic = NI_Illegal; @@ -23422,10 +23363,10 @@ GenTree* Compiler::gtNewSimdDotProdNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); } -GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23433,7 +23374,6 @@ GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, CorInfoType assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsFloating(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -23446,7 +23386,7 @@ GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, CorInfoType else if (simdSize == 64) { GenTree* op2 = gtNewIconNode(static_cast(FloatRoundingMode::ToNegativeInfinity)); - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseType, simdSize); } else { @@ -23466,11 +23406,11 @@ GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, CorInfoType #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdFmaNode( - var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, GenTree* op3, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23484,7 +23424,6 @@ GenTree* Compiler::gtNewSimdFmaNode( assert(op3 != nullptr); assert(op3->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsFloating(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -23518,21 +23457,20 @@ GenTree* Compiler::gtNewSimdFmaNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdGetElementNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { - NamedIntrinsic intrinsicId = NI_Vector128_GetElement; - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); + NamedIntrinsic intrinsicId = NI_Vector128_GetElement; assert(varTypeIsArithmetic(simdBaseType)); #if defined(TARGET_XARCH) if (op2->IsIntegralConst(0)) { - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); + return gtNewSimdToScalarNode(type, op1, simdBaseType, simdSize); } if (simdSize == 64) @@ -23546,7 +23484,7 @@ GenTree* Compiler::gtNewSimdGetElementNode( #elif defined(TARGET_ARM64) if (op2->IsIntegralConst(0)) { - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); + return gtNewSimdToScalarNode(type, op1, simdBaseType, simdSize); } if (simdSize == 8) @@ -23571,7 +23509,7 @@ GenTree* Compiler::gtNewSimdGetElementNode( op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound); } - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23579,18 +23517,17 @@ GenTree* Compiler::gtNewSimdGetElementNode( // // Arguments: // type - The return type of SIMD node being created -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created get_Indices node // -GenTree* Compiler::gtNewSimdGetIndicesNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdGetIndicesNode(var_types type, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); GenTreeVecCon* indices = gtNewVconNode(type); @@ -23665,9 +23602,8 @@ GenTree* Compiler::gtNewSimdGetIndicesNode(var_types type, CorInfoType simdBaseJ return indices; } -GenTree* Compiler::gtNewSimdGetLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdGetLowerNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsicId = NI_Illegal; @@ -23691,12 +23627,11 @@ GenTree* Compiler::gtNewSimdGetLowerNode(var_types type, GenTree* op1, CorInfoTy #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsicId != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseType, simdSize); } -GenTree* Compiler::gtNewSimdGetUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdGetUpperNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsicId = NI_Illegal; @@ -23720,7 +23655,7 @@ GenTree* Compiler::gtNewSimdGetUpperNode(var_types type, GenTree* op1, CorInfoTy #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsicId != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23729,16 +23664,13 @@ GenTree* Compiler::gtNewSimdGetUpperNode(var_types type, GenTree* op1, CorInfoTy // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for even integers -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsEvenInteger node // -GenTree* Compiler::gtNewSimdIsEvenIntegerNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdIsEvenIntegerNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23746,11 +23678,10 @@ GenTree* Compiler::gtNewSimdIsEvenIntegerNode(var_types type, assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsIntegral(simdBaseType)); - op1 = gtNewSimdBinOpNode(GT_AND, type, op1, gtNewOneConNode(type, simdBaseType), simdBaseJitType, simdSize); - return gtNewSimdIsZeroNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_AND, type, op1, gtNewOneConNode(type, simdBaseType), simdBaseType, simdSize); + return gtNewSimdIsZeroNode(type, op1, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23759,13 +23690,13 @@ GenTree* Compiler::gtNewSimdIsEvenIntegerNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for finite values -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsFinite node // -GenTree* Compiler::gtNewSimdIsFiniteNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsFiniteNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23773,7 +23704,6 @@ GenTree* Compiler::gtNewSimdIsFiniteNode(var_types type, GenTree* op1, CorInfoTy assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) @@ -23782,22 +23712,20 @@ GenTree* Compiler::gtNewSimdIsFiniteNode(var_types type, GenTree* op1, CorInfoTy if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_UINT; - cnsNode = gtNewIconNode(0x7F800000); + simdBaseType = TYP_INT; + cnsNode = gtNewIconNode(0x7F800000); } else { assert(simdBaseType == TYP_DOUBLE); - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_ULONG; - cnsNode = gtNewLconNode(0x7FF0000000000000); + simdBaseType = TYP_LONG; + cnsNode = gtNewLconNode(0x7FF0000000000000); } - cnsNode = gtNewSimdCreateBroadcastNode(type, cnsNode, simdBaseJitType, simdSize); + cnsNode = gtNewSimdCreateBroadcastNode(type, cnsNode, simdBaseType, simdSize); - op1 = gtNewSimdBinOpNode(GT_AND_NOT, type, cnsNode, op1, simdBaseJitType, simdSize); - return gtNewSimdCmpOpNode(GT_NE, type, op1, gtNewZeroConNode(type), simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_AND_NOT, type, cnsNode, op1, simdBaseType, simdSize); + return gtNewSimdCmpOpNode(GT_NE, type, op1, gtNewZeroConNode(type), simdBaseType, simdSize); } assert(varTypeIsIntegral(simdBaseType)); @@ -23810,13 +23738,13 @@ GenTree* Compiler::gtNewSimdIsFiniteNode(var_types type, GenTree* op1, CorInfoTy // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for infinities -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsInfinity node // -GenTree* Compiler::gtNewSimdIsInfinityNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsInfinityNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23824,13 +23752,12 @@ GenTree* Compiler::gtNewSimdIsInfinityNode(var_types type, GenTree* op1, CorInfo assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) { - op1 = gtNewSimdAbsNode(type, op1, simdBaseJitType, simdSize); - return gtNewSimdIsPositiveInfinityNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdAbsNode(type, op1, simdBaseType, simdSize); + return gtNewSimdIsPositiveInfinityNode(type, op1, simdBaseType, simdSize); } return gtNewZeroConNode(type); } @@ -23841,13 +23768,13 @@ GenTree* Compiler::gtNewSimdIsInfinityNode(var_types type, GenTree* op1, CorInfo // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for integers -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsInteger node // -GenTree* Compiler::gtNewSimdIsIntegerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsIntegerNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23855,7 +23782,6 @@ GenTree* Compiler::gtNewSimdIsIntegerNode(var_types type, GenTree* op1, CorInfoT assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) @@ -23863,12 +23789,12 @@ GenTree* Compiler::gtNewSimdIsIntegerNode(var_types type, GenTree* op1, CorInfoT GenTree* op1Dup1 = fgMakeMultiUse(&op1); GenTree* op1Dup2 = gtCloneExpr(op1Dup1); - op1 = gtNewSimdIsFiniteNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdIsFiniteNode(type, op1, simdBaseType, simdSize); - op1Dup1 = gtNewSimdTruncNode(type, op1Dup1, simdBaseJitType, simdSize); - GenTree* op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op1Dup2, simdBaseJitType, simdSize); + op1Dup1 = gtNewSimdTruncNode(type, op1Dup1, simdBaseType, simdSize); + GenTree* op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op1Dup2, simdBaseType, simdSize); - return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseType, simdSize); } assert(varTypeIsIntegral(simdBaseType)); @@ -23881,13 +23807,13 @@ GenTree* Compiler::gtNewSimdIsIntegerNode(var_types type, GenTree* op1, CorInfoT // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for NaNs -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsNaN node // -GenTree* Compiler::gtNewSimdIsNaNNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsNaNNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23895,13 +23821,12 @@ GenTree* Compiler::gtNewSimdIsNaNNode(var_types type, GenTree* op1, CorInfoType assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) { GenTree* op1Dup = fgMakeMultiUse(&op1); - return gtNewSimdCmpOpNode(GT_NE, type, op1, op1Dup, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_NE, type, op1, op1Dup, simdBaseType, simdSize); } return gtNewZeroConNode(type); } @@ -23912,13 +23837,13 @@ GenTree* Compiler::gtNewSimdIsNaNNode(var_types type, GenTree* op1, CorInfoType // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for negatives -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsNegative node // -GenTree* Compiler::gtNewSimdIsNegativeNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsNegativeNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23926,23 +23851,22 @@ GenTree* Compiler::gtNewSimdIsNegativeNode(var_types type, GenTree* op1, CorInfo assert(op1 != nullptr); assert(op1->TypeIs(type)); - if (simdBaseJitType == CORINFO_TYPE_FLOAT) + if (simdBaseType == TYP_FLOAT) { - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } - else if (simdBaseJitType == CORINFO_TYPE_DOUBLE) + else if (simdBaseType == TYP_DOUBLE) { - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsIntegral(simdBaseType)); if (varTypeIsUnsigned(simdBaseType)) { return gtNewZeroConNode(type); } - return gtNewSimdCmpOpNode(GT_LT, type, op1, gtNewZeroConNode(type), simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_LT, type, op1, gtNewZeroConNode(type), simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23951,16 +23875,16 @@ GenTree* Compiler::gtNewSimdIsNegativeNode(var_types type, GenTree* op1, CorInfo // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for negative infinities -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsNegativeInfinity node // -GenTree* Compiler::gtNewSimdIsNegativeInfinityNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdIsNegativeInfinityNode(var_types type, + GenTree* op1, + var_types simdBaseType, + unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -23968,7 +23892,6 @@ GenTree* Compiler::gtNewSimdIsNegativeInfinityNode(var_types type, assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) @@ -23977,21 +23900,19 @@ GenTree* Compiler::gtNewSimdIsNegativeInfinityNode(var_types type, if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_UINT; - cnsNode = gtNewIconNode(0xFF800000); + simdBaseType = TYP_UINT; + cnsNode = gtNewIconNode(0xFF800000); } else { assert(simdBaseType == TYP_DOUBLE); - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_ULONG; - cnsNode = gtNewLconNode(0xFFF0000000000000); + simdBaseType = TYP_ULONG; + cnsNode = gtNewLconNode(0xFFF0000000000000); } - cnsNode = gtNewSimdCreateBroadcastNode(type, cnsNode, simdBaseJitType, simdSize); + cnsNode = gtNewSimdCreateBroadcastNode(type, cnsNode, simdBaseType, simdSize); - return gtNewSimdCmpOpNode(GT_EQ, type, op1, cnsNode, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_EQ, type, op1, cnsNode, simdBaseType, simdSize); } return gtNewZeroConNode(type); } @@ -24002,13 +23923,13 @@ GenTree* Compiler::gtNewSimdIsNegativeInfinityNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for normal values -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsNormal node // -GenTree* Compiler::gtNewSimdIsNormalNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsNormalNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -24016,20 +23937,18 @@ GenTree* Compiler::gtNewSimdIsNormalNode(var_types type, GenTree* op1, CorInfoTy assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) { - op1 = gtNewSimdAbsNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdAbsNode(type, op1, simdBaseType, simdSize); GenTree* cnsNode1; GenTree* cnsNode2; if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_UINT; + simdBaseType = TYP_UINT; cnsNode1 = gtNewIconNode(0x00800000); cnsNode2 = gtNewIconNode(0x7F800000 - 0x00800000); @@ -24038,22 +23957,21 @@ GenTree* Compiler::gtNewSimdIsNormalNode(var_types type, GenTree* op1, CorInfoTy { assert(simdBaseType == TYP_DOUBLE); - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_ULONG; + simdBaseType = TYP_ULONG; cnsNode1 = gtNewLconNode(0x0010000000000000); cnsNode2 = gtNewLconNode(0x7FF0000000000000 - 0x0010000000000000); } - cnsNode1 = gtNewSimdCreateBroadcastNode(type, cnsNode1, simdBaseJitType, simdSize); - cnsNode2 = gtNewSimdCreateBroadcastNode(type, cnsNode2, simdBaseJitType, simdSize); + cnsNode1 = gtNewSimdCreateBroadcastNode(type, cnsNode1, simdBaseType, simdSize); + cnsNode2 = gtNewSimdCreateBroadcastNode(type, cnsNode2, simdBaseType, simdSize); - op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, cnsNode1, simdBaseJitType, simdSize); - return gtNewSimdCmpOpNode(GT_LT, type, op1, cnsNode2, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, cnsNode1, simdBaseType, simdSize); + return gtNewSimdCmpOpNode(GT_LT, type, op1, cnsNode2, simdBaseType, simdSize); } assert(varTypeIsIntegral(simdBaseType)); - return gtNewSimdCmpOpNode(GT_NE, type, op1, gtNewZeroConNode(type), simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_NE, type, op1, gtNewZeroConNode(type), simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -24062,16 +23980,13 @@ GenTree* Compiler::gtNewSimdIsNormalNode(var_types type, GenTree* op1, CorInfoTy // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for odd integers -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsOddInteger node // -GenTree* Compiler::gtNewSimdIsOddIntegerNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdIsOddIntegerNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -24079,11 +23994,10 @@ GenTree* Compiler::gtNewSimdIsOddIntegerNode(var_types type, assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsIntegral(simdBaseType)); - op1 = gtNewSimdBinOpNode(GT_AND, type, op1, gtNewOneConNode(type, simdBaseType), simdBaseJitType, simdSize); - return gtNewSimdCmpOpNode(GT_NE, type, op1, gtNewZeroConNode(type), simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_AND, type, op1, gtNewOneConNode(type, simdBaseType), simdBaseType, simdSize); + return gtNewSimdCmpOpNode(GT_NE, type, op1, gtNewZeroConNode(type), simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -24092,13 +24006,13 @@ GenTree* Compiler::gtNewSimdIsOddIntegerNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for positives -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsPositive node // -GenTree* Compiler::gtNewSimdIsPositiveNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsPositiveNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -24106,23 +24020,22 @@ GenTree* Compiler::gtNewSimdIsPositiveNode(var_types type, GenTree* op1, CorInfo assert(op1 != nullptr); assert(op1->TypeIs(type)); - if (simdBaseJitType == CORINFO_TYPE_FLOAT) + if (simdBaseType == TYP_FLOAT) { - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } - else if (simdBaseJitType == CORINFO_TYPE_DOUBLE) + else if (simdBaseType == TYP_DOUBLE) { - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsIntegral(simdBaseType)); if (varTypeIsUnsigned(simdBaseType)) { return gtNewAllBitsSetConNode(type); } - return gtNewSimdCmpOpNode(GT_GE, type, op1, gtNewZeroConNode(type), simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_GE, type, op1, gtNewZeroConNode(type), simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -24131,16 +24044,16 @@ GenTree* Compiler::gtNewSimdIsPositiveNode(var_types type, GenTree* op1, CorInfo // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for positive infinities -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsPositiveInfinity node // -GenTree* Compiler::gtNewSimdIsPositiveInfinityNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdIsPositiveInfinityNode(var_types type, + GenTree* op1, + var_types simdBaseType, + unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -24148,7 +24061,6 @@ GenTree* Compiler::gtNewSimdIsPositiveInfinityNode(var_types type, assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) @@ -24157,21 +24069,19 @@ GenTree* Compiler::gtNewSimdIsPositiveInfinityNode(var_types type, if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_UINT; - cnsNode = gtNewIconNode(0x7F800000); + simdBaseType = TYP_UINT; + cnsNode = gtNewIconNode(0x7F800000); } else { assert(simdBaseType == TYP_DOUBLE); - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_ULONG; - cnsNode = gtNewLconNode(0x7FF0000000000000); + simdBaseType = TYP_ULONG; + cnsNode = gtNewLconNode(0x7FF0000000000000); } - cnsNode = gtNewSimdCreateBroadcastNode(type, cnsNode, simdBaseJitType, simdSize); + cnsNode = gtNewSimdCreateBroadcastNode(type, cnsNode, simdBaseType, simdSize); - return gtNewSimdCmpOpNode(GT_EQ, type, op1, cnsNode, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_EQ, type, op1, cnsNode, simdBaseType, simdSize); } return gtNewZeroConNode(type); } @@ -24182,16 +24092,13 @@ GenTree* Compiler::gtNewSimdIsPositiveInfinityNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for subnormal values -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsSubnormal node // -GenTree* Compiler::gtNewSimdIsSubnormalNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdIsSubnormalNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -24199,20 +24106,18 @@ GenTree* Compiler::gtNewSimdIsSubnormalNode(var_types type, assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); if (varTypeIsFloating(simdBaseType)) { - op1 = gtNewSimdAbsNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdAbsNode(type, op1, simdBaseType, simdSize); GenTree* cnsNode1; GenTree* cnsNode2; if (simdBaseType == TYP_FLOAT) { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_UINT; + simdBaseType = TYP_UINT; cnsNode2 = gtNewIconNode(0x007FFFFF); } @@ -24220,18 +24125,17 @@ GenTree* Compiler::gtNewSimdIsSubnormalNode(var_types type, { assert(simdBaseType == TYP_DOUBLE); - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_ULONG; + simdBaseType = TYP_ULONG; cnsNode2 = gtNewLconNode(0x000FFFFFFFFFFFFF); } cnsNode1 = gtNewOneConNode(type, simdBaseType); - cnsNode2 = gtNewSimdCreateBroadcastNode(type, cnsNode2, simdBaseJitType, simdSize); + cnsNode2 = gtNewSimdCreateBroadcastNode(type, cnsNode2, simdBaseType, simdSize); - op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, cnsNode1, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, cnsNode1, simdBaseType, simdSize); - return gtNewSimdCmpOpNode(GT_LT, type, op1, cnsNode2, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_LT, type, op1, cnsNode2, simdBaseType, simdSize); } return gtNewZeroConNode(type); } @@ -24242,13 +24146,13 @@ GenTree* Compiler::gtNewSimdIsSubnormalNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The vector to check for Zeroes -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created IsZero node // -GenTree* Compiler::gtNewSimdIsZeroNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdIsZeroNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -24256,10 +24160,9 @@ GenTree* Compiler::gtNewSimdIsZeroNode(var_types type, GenTree* op1, CorInfoType assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); - return gtNewSimdCmpOpNode(GT_EQ, type, op1, gtNewZeroConNode(type), simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_EQ, type, op1, gtNewZeroConNode(type), simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -24268,20 +24171,19 @@ GenTree* Compiler::gtNewSimdIsZeroNode(var_types type, GenTree* op1, CorInfoType // Arguments: // type - The return type of SIMD node being created // op1 - The address of the value to be loaded -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created Load node // -GenTree* Compiler::gtNewSimdLoadNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdLoadNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); assert(op1 != nullptr); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); return gtNewIndir(type, op1); @@ -24293,16 +24195,13 @@ GenTree* Compiler::gtNewSimdLoadNode(var_types type, GenTree* op1, CorInfoType s // Arguments: // type - The return type of SIMD node being created // op1 - The address of the value to be loaded -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created LoadAligned node // -GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { #if defined(TARGET_XARCH) assert(varTypeIsSIMD(type)); @@ -24310,7 +24209,6 @@ GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type, assert(op1 != nullptr); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -24329,14 +24227,14 @@ GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type, } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize); + return gtNewSimdLoadNode(type, op1, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -24348,16 +24246,13 @@ GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type, // Arguments: // type - The return type of SIMD node being created // op1 - The address of the value to be loaded -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created LoadNonTemporal node // -GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { #if defined(TARGET_XARCH) assert(varTypeIsSIMD(type)); @@ -24365,7 +24260,6 @@ GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, assert(op1 != nullptr); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -24404,23 +24298,23 @@ GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, if (simdBaseType == TYP_FLOAT) { - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } else if (simdBaseType == TYP_DOUBLE) { - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize); + return gtNewSimdLoadNode(type, op1, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -24434,7 +24328,7 @@ GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, // type -- The type of the node to generate // op1 -- The first operand // op2 -- The second operand -// simdBaseJitType -- the base jit type of the node +// simdBaseType -- the base type of the node // simdSize -- the simd size of the node // isMax -- true to compute the maximum; otherwise, false for the minimum // isMagnitude -- true to compare the absolute values of op1/op2; otherwise false to compare op1/op2 directly @@ -24443,14 +24337,14 @@ GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, // Return Value: // The node representing the minimum or maximum operation // -GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isMax, - bool isMagnitude, - bool isNumber) +GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, + GenTree* op1, + GenTree* op2, + var_types simdBaseType, + unsigned simdSize, + bool isMax, + bool isMagnitude, + bool isNumber) { assert(op1 != nullptr); assert(op1->TypeIs(type)); @@ -24458,7 +24352,6 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, assert(op2 != nullptr); assert(op2->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); bool isScalar = false; @@ -24537,8 +24430,8 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, { if (isScalar) { - op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseJitType, simdSize); - op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseType, simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseType, simdSize); } ctrlByte |= isNumber ? 0x10 : 0x00; @@ -24546,7 +24439,7 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, GenTree* op3 = gtNewIconNode(ctrlByte); intrinsic = isScalar ? NI_AVX10v2_MinMaxScalar : NI_AVX10v2_MinMax; - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseType, simdSize); } else if ((cnsNode != nullptr) && !otherNode->OperIsConst()) { @@ -24692,10 +24585,10 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, vecCon->EvaluateBroadcastInPlace(simdBaseType, cnsNode->AsDblCon()->DconValue()); op1 = vecCon; - op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseType, simdSize); } - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); if (needsFixup) { @@ -24749,7 +24642,7 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, intrinsic = isScalar ? NI_AVX512_FixupScalar : NI_AVX512_Fixup; retNode = gtNewSimdHWIntrinsicNode(type, retNode, op2Clone, tblVecCon, gtNewIconNode(0), - intrinsic, simdBaseJitType, simdSize); + intrinsic, simdBaseType, simdSize); } if (isNumber) @@ -24768,8 +24661,8 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, { if (isScalar) { - op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseJitType, simdSize); - op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseType, simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseType, simdSize); } if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) @@ -24796,11 +24689,11 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, if (isNumber) { retNode = - gtNewSimdHWIntrinsicNode(type, op1Clone, op2Clone, op3, intrinsic, simdBaseJitType, simdSize); + gtNewSimdHWIntrinsicNode(type, op1Clone, op2Clone, op3, intrinsic, simdBaseType, simdSize); } else { - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseType, simdSize); } // FixupScalar(left, right, table, control) computes the input type of right @@ -24859,10 +24752,10 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, // Otherwise, the result was already correct op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, tblVecCon1, gtNewIconNode(0), intrinsic, - simdBaseJitType, simdSize); + simdBaseType, simdSize); retNode = gtNewSimdHWIntrinsicNode(type, op1, retNode, tblVecCon2, gtNewIconNode(0), intrinsic, - simdBaseJitType, simdSize); + simdBaseType, simdSize); } else { @@ -24903,17 +24796,17 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, // Otherwise, the result was already correct op1Clone = gtNewSimdHWIntrinsicNode(type, op1Clone, op2Clone, tblVecCon1, gtNewIconNode(0), - intrinsic, simdBaseJitType, simdSize); + intrinsic, simdBaseType, simdSize); retNode = gtNewSimdHWIntrinsicNode(type, retNode, op1Clone, tblVecCon2, gtNewIconNode(0), intrinsic, - simdBaseJitType, simdSize); + simdBaseType, simdSize); } } } #elif defined(TARGET_ARM64) if (!isMagnitude && !isNumber) { - return gtNewSimdMinMaxNativeNode(type, op1, op2, simdBaseJitType, simdSize, isMax); + return gtNewSimdMinMaxNativeNode(type, op1, op2, simdBaseType, simdSize, isMax); } if (isScalar) @@ -24921,8 +24814,8 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, simdSize = 8; type = TYP_SIMD8; - op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseJitType, simdSize); - op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseType, simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseType, simdSize); } #else assert(!isScalar); @@ -24954,72 +24847,72 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, if (isMagnitude) { - GenTree* absOp1 = gtNewSimdAbsNode(type, op1, simdBaseJitType, simdSize); - GenTree* absOp2 = gtNewSimdAbsNode(type, op2, simdBaseJitType, simdSize); + GenTree* absOp1 = gtNewSimdAbsNode(type, op1, simdBaseType, simdSize); + GenTree* absOp2 = gtNewSimdAbsNode(type, op2, simdBaseType, simdSize); absOp1Dup = fgMakeMultiUse(&absOp1); absOp2Dup = fgMakeMultiUse(&absOp2); - equalsMask = gtNewSimdCmpOpNode(GT_EQ, type, absOp1, absOp2, simdBaseJitType, simdSize); + equalsMask = gtNewSimdCmpOpNode(GT_EQ, type, absOp1, absOp2, simdBaseType, simdSize); if (isMax) { - signMask = gtNewSimdIsPositiveNode(type, op1Dup, simdBaseJitType, simdSize); - cmpMask = gtNewSimdCmpOpNode(GT_GT, type, absOp1Dup, absOp2Dup, simdBaseJitType, simdSize); + signMask = gtNewSimdIsPositiveNode(type, op1Dup, simdBaseType, simdSize); + cmpMask = gtNewSimdCmpOpNode(GT_GT, type, absOp1Dup, absOp2Dup, simdBaseType, simdSize); } else { - signMask = gtNewSimdIsNegativeNode(type, op1Dup, simdBaseJitType, simdSize); - cmpMask = gtNewSimdCmpOpNode(GT_LT, type, absOp1Dup, absOp2Dup, simdBaseJitType, simdSize); + signMask = gtNewSimdIsNegativeNode(type, op1Dup, simdBaseType, simdSize); + cmpMask = gtNewSimdCmpOpNode(GT_LT, type, absOp1Dup, absOp2Dup, simdBaseType, simdSize); } if (isNumber) { - nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(absOp2Dup), simdBaseJitType, simdSize); + nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(absOp2Dup), simdBaseType, simdSize); } else { - nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(absOp1Dup), simdBaseJitType, simdSize); + nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(absOp1Dup), simdBaseType, simdSize); } } else { - equalsMask = gtNewSimdCmpOpNode(GT_EQ, type, op1, op2, simdBaseJitType, simdSize); + equalsMask = gtNewSimdCmpOpNode(GT_EQ, type, op1, op2, simdBaseType, simdSize); if (isMax) { - signMask = gtNewSimdIsNegativeNode(type, op2Dup, simdBaseJitType, simdSize); - cmpMask = gtNewSimdCmpOpNode(GT_LT, type, gtCloneExpr(op2Dup), op1Dup, simdBaseJitType, simdSize); + signMask = gtNewSimdIsNegativeNode(type, op2Dup, simdBaseType, simdSize); + cmpMask = gtNewSimdCmpOpNode(GT_LT, type, gtCloneExpr(op2Dup), op1Dup, simdBaseType, simdSize); } else { - signMask = gtNewSimdIsNegativeNode(type, op1Dup, simdBaseJitType, simdSize); - cmpMask = gtNewSimdCmpOpNode(GT_LT, type, gtCloneExpr(op1Dup), op2Dup, simdBaseJitType, simdSize); + signMask = gtNewSimdIsNegativeNode(type, op1Dup, simdBaseType, simdSize); + cmpMask = gtNewSimdCmpOpNode(GT_LT, type, gtCloneExpr(op1Dup), op2Dup, simdBaseType, simdSize); } if (isNumber) { - nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(op2Dup), simdBaseJitType, simdSize); + nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(op2Dup), simdBaseType, simdSize); } else { - nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(op1Dup), simdBaseJitType, simdSize); + nanMask = gtNewSimdIsNaNNode(type, gtCloneExpr(op1Dup), simdBaseType, simdSize); } op2Dup = gtCloneExpr(op2Dup); } - GenTree* mask = gtNewSimdBinOpNode(GT_AND, type, equalsMask, signMask, simdBaseJitType, simdSize); - mask = gtNewSimdBinOpNode(GT_OR, type, mask, nanMask, simdBaseJitType, simdSize); - mask = gtNewSimdBinOpNode(GT_OR, type, mask, cmpMask, simdBaseJitType, simdSize); + GenTree* mask = gtNewSimdBinOpNode(GT_AND, type, equalsMask, signMask, simdBaseType, simdSize); + mask = gtNewSimdBinOpNode(GT_OR, type, mask, nanMask, simdBaseType, simdSize); + mask = gtNewSimdBinOpNode(GT_OR, type, mask, cmpMask, simdBaseType, simdSize); - retNode = gtNewSimdCndSelNode(type, mask, gtCloneExpr(op1Dup), op2Dup, simdBaseJitType, simdSize); + retNode = gtNewSimdCndSelNode(type, mask, gtCloneExpr(op1Dup), op2Dup, simdBaseType, simdSize); } assert(retNode != nullptr); if (isScalar) { - retNode = gtNewSimdToScalarNode(simdBaseType, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdToScalarNode(simdBaseType, retNode, simdBaseType, simdSize); } return retNode; } @@ -25031,13 +24924,13 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, GenTree* op1Dup = fgMakeMultiUse(&op1); GenTree* op2Dup = fgMakeMultiUse(&op2); - GenTree* absOp1 = gtNewSimdAbsNode(type, op1, simdBaseJitType, simdSize); - GenTree* absOp2 = gtNewSimdAbsNode(type, op2, simdBaseJitType, simdSize); + GenTree* absOp1 = gtNewSimdAbsNode(type, op1, simdBaseType, simdSize); + GenTree* absOp2 = gtNewSimdAbsNode(type, op2, simdBaseType, simdSize); GenTree* absOp1Dup = fgMakeMultiUse(&absOp1); GenTree* absOp2Dup = fgMakeMultiUse(&absOp2); - GenTree* equalsMask = gtNewSimdCmpOpNode(GT_EQ, type, absOp1, absOp2, simdBaseJitType, simdSize); + GenTree* equalsMask = gtNewSimdCmpOpNode(GT_EQ, type, absOp1, absOp2, simdBaseType, simdSize); ; GenTree* signMask1 = nullptr; GenTree* signMask2 = nullptr; @@ -25046,29 +24939,29 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, if (isMax) { - signMask1 = gtNewSimdIsNegativeNode(type, op2Dup, simdBaseJitType, simdSize); - signMask2 = gtNewSimdIsPositiveNode(type, absOp2Dup, simdBaseJitType, simdSize); - signMask3 = gtNewSimdIsNegativeNode(type, absOp1Dup, simdBaseJitType, simdSize); - cmpMask = gtNewSimdCmpOpNode(GT_GT, type, gtCloneExpr(absOp1Dup), gtCloneExpr(absOp2Dup), simdBaseJitType, - simdSize); + signMask1 = gtNewSimdIsNegativeNode(type, op2Dup, simdBaseType, simdSize); + signMask2 = gtNewSimdIsPositiveNode(type, absOp2Dup, simdBaseType, simdSize); + signMask3 = gtNewSimdIsNegativeNode(type, absOp1Dup, simdBaseType, simdSize); + cmpMask = + gtNewSimdCmpOpNode(GT_GT, type, gtCloneExpr(absOp1Dup), gtCloneExpr(absOp2Dup), simdBaseType, simdSize); } else { - signMask1 = gtNewSimdIsNegativeNode(type, op1Dup, simdBaseJitType, simdSize); - signMask2 = gtNewSimdIsPositiveNode(type, absOp1Dup, simdBaseJitType, simdSize); - signMask3 = gtNewSimdIsNegativeNode(type, absOp2Dup, simdBaseJitType, simdSize); - cmpMask = gtNewSimdCmpOpNode(GT_LT, type, gtCloneExpr(absOp1Dup), gtCloneExpr(absOp2Dup), simdBaseJitType, - simdSize); + signMask1 = gtNewSimdIsNegativeNode(type, op1Dup, simdBaseType, simdSize); + signMask2 = gtNewSimdIsPositiveNode(type, absOp1Dup, simdBaseType, simdSize); + signMask3 = gtNewSimdIsNegativeNode(type, absOp2Dup, simdBaseType, simdSize); + cmpMask = + gtNewSimdCmpOpNode(GT_LT, type, gtCloneExpr(absOp1Dup), gtCloneExpr(absOp2Dup), simdBaseType, simdSize); } - GenTree* mask1 = gtNewSimdBinOpNode(GT_AND, type, equalsMask, signMask1, simdBaseJitType, simdSize); - GenTree* mask2 = gtNewSimdBinOpNode(GT_AND, type, cmpMask, signMask2, simdBaseJitType, simdSize); - GenTree* mask3 = gtNewSimdBinOpNode(GT_OR, type, mask1, mask2, simdBaseJitType, simdSize); - mask3 = gtNewSimdBinOpNode(GT_OR, type, mask3, signMask3, simdBaseJitType, simdSize); + GenTree* mask1 = gtNewSimdBinOpNode(GT_AND, type, equalsMask, signMask1, simdBaseType, simdSize); + GenTree* mask2 = gtNewSimdBinOpNode(GT_AND, type, cmpMask, signMask2, simdBaseType, simdSize); + GenTree* mask3 = gtNewSimdBinOpNode(GT_OR, type, mask1, mask2, simdBaseType, simdSize); + mask3 = gtNewSimdBinOpNode(GT_OR, type, mask3, signMask3, simdBaseType, simdSize); - return gtNewSimdCndSelNode(type, mask3, gtCloneExpr(op1Dup), gtCloneExpr(op2Dup), simdBaseJitType, simdSize); + return gtNewSimdCndSelNode(type, mask3, gtCloneExpr(op1Dup), gtCloneExpr(op2Dup), simdBaseType, simdSize); } - return gtNewSimdMinMaxNativeNode(type, op1, op2, simdBaseJitType, simdSize, isMax); + return gtNewSimdMinMaxNativeNode(type, op1, op2, simdBaseType, simdSize, isMax); } //------------------------------------------------------------------------ @@ -25079,7 +24972,7 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, // type -- The type of the node to generate // op1 -- The first operand // op2 -- The second operand -// simdBaseJitType -- the base jit type of the node +// simdBaseType -- the base type of the node // simdSize -- the simd size of the node // isMax -- true to compute the maximum; otherwise, false for the minimum // @@ -25092,7 +24985,7 @@ GenTree* Compiler::gtNewSimdMinMaxNode(var_types type, // NaN or -0 can differ based on the underlying hardware. // GenTree* Compiler::gtNewSimdMinMaxNativeNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isMax) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize, bool isMax) { assert(op1 != nullptr); assert(op1->TypeIs(type)); @@ -25100,7 +24993,6 @@ GenTree* Compiler::gtNewSimdMinMaxNativeNode( assert(op2 != nullptr); assert(op2->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); bool isScalar = false; @@ -25151,8 +25043,8 @@ GenTree* Compiler::gtNewSimdMinMaxNativeNode( simdSize = 16; type = TYP_SIMD16; - op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseJitType, simdSize); - op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseType, simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseType, simdSize); intrinsic = isMax ? NI_X86Base_MaxScalar : NI_X86Base_MinScalar; } @@ -25173,8 +25065,8 @@ GenTree* Compiler::gtNewSimdMinMaxNativeNode( simdSize = 8; type = TYP_SIMD8; - op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseJitType, simdSize); - op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(type, op1, simdBaseType, simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(type, op2, simdBaseType, simdSize); intrinsic = isMax ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_MinScalar; } @@ -25200,11 +25092,11 @@ GenTree* Compiler::gtNewSimdMinMaxNativeNode( if (intrinsic != NI_Illegal) { - GenTree* retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); + GenTree* retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseType, simdSize); if (isScalar) { - retNode = gtNewSimdToScalarNode(simdBaseType, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdToScalarNode(simdBaseType, retNode, simdBaseType, simdSize); } return retNode; } @@ -25217,14 +25109,14 @@ GenTree* Compiler::gtNewSimdMinMaxNativeNode( // op1 = op1 < op2 // -or- // op1 = op1 > op2 - op1 = gtNewSimdCmpOpNode(isMax ? GT_GT : GT_LT, type, op1, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCmpOpNode(isMax ? GT_GT : GT_LT, type, op1, op2, simdBaseType, simdSize); // result = ConditionalSelect(op1, op1Dup, op2Dup) - return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize); + return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdNarrowNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -25235,7 +25127,6 @@ GenTree* Compiler::gtNewSimdNarrowNode( assert(op2 != nullptr); assert(op2->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType)); GenTree* tmp1; @@ -25252,7 +25143,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( var_types tmpSimdType = (simdSize == 64) ? TYP_SIMD32 : TYP_SIMD16; NamedIntrinsic intrinsicId; - CorInfoType opBaseJitType; + var_types opBaseType; switch (simdBaseType) { @@ -25267,7 +25158,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_AVX512_ConvertToVector128SByte; } - opBaseJitType = CORINFO_TYPE_SHORT; + opBaseType = TYP_SHORT; break; } @@ -25282,7 +25173,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_AVX512_ConvertToVector128Byte; } - opBaseJitType = CORINFO_TYPE_USHORT; + opBaseType = TYP_USHORT; break; } @@ -25297,7 +25188,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_AVX512_ConvertToVector128Int16; } - opBaseJitType = CORINFO_TYPE_INT; + opBaseType = TYP_INT; break; } @@ -25312,7 +25203,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_AVX512_ConvertToVector128UInt16; } - opBaseJitType = CORINFO_TYPE_UINT; + opBaseType = TYP_UINT; break; } @@ -25327,7 +25218,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_AVX512_ConvertToVector128Int32; } - opBaseJitType = CORINFO_TYPE_LONG; + opBaseType = TYP_LONG; break; } @@ -25342,7 +25233,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_AVX512_ConvertToVector128UInt32; } - opBaseJitType = CORINFO_TYPE_ULONG; + opBaseType = TYP_ULONG; break; } @@ -25361,7 +25252,7 @@ GenTree* Compiler::gtNewSimdNarrowNode( intrinsicId = NI_X86Base_ConvertToVector128Single; } - opBaseJitType = CORINFO_TYPE_DOUBLE; + opBaseType = TYP_DOUBLE; break; } @@ -25371,18 +25262,18 @@ GenTree* Compiler::gtNewSimdNarrowNode( } } - tmp1 = gtNewSimdHWIntrinsicNode(tmpSimdType, op1, intrinsicId, opBaseJitType, simdSize); - tmp2 = gtNewSimdHWIntrinsicNode(tmpSimdType, op2, intrinsicId, opBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(tmpSimdType, op1, intrinsicId, opBaseType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(tmpSimdType, op2, intrinsicId, opBaseType, simdSize); if (simdSize == 16) { - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_MoveLowToHigh, CORINFO_TYPE_FLOAT, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_MoveLowToHigh, TYP_FLOAT, simdSize); } intrinsicId = (simdSize == 64) ? NI_Vector256_ToVector512Unsafe : NI_Vector128_ToVector256Unsafe; - tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsicId, simdBaseJitType, simdSize / 2); - return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsicId, simdBaseType, simdSize / 2); + return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseType, simdSize); } else if (simdSize == 32) { @@ -25404,14 +25295,13 @@ GenTree* Compiler::gtNewSimdNarrowNode( GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); - tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE, - simdSize); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseType, simdSize); + tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, TYP_UBYTE, simdSize); - CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; + var_types permuteBaseType = (simdBaseType == TYP_BYTE) ? TYP_LONG : TYP_ULONG; return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64, - permuteBaseJitType, simdSize); + permuteBaseType, simdSize); } case TYP_SHORT: @@ -25442,14 +25332,13 @@ GenTree* Compiler::gtNewSimdNarrowNode( GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); - tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_USHORT, - simdSize); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseType, simdSize); + tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, TYP_USHORT, simdSize); - CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; + var_types permuteBaseType = (simdBaseType == TYP_BYTE) ? TYP_LONG : TYP_ULONG; return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64, - permuteBaseJitType, simdSize); + permuteBaseType, simdSize); } case TYP_INT: @@ -25470,17 +25359,17 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp3 = Avx2.UnpackLow(tmp1, tmp2); // return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32(); - CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; + var_types opBaseType = (simdBaseType == TYP_INT) ? TYP_LONG : TYP_ULONG; GenTree* op1Dup = fgMakeMultiUse(&op1); GenTree* op2Dup = fgMakeMultiUse(&op2); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize); - tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize); - tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseType, simdSize); + tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseType, simdSize); return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64, - opBaseJitType, simdSize); + opBaseType, simdSize); } case TYP_FLOAT: @@ -25496,15 +25385,13 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp2 = Avx.ConvertToVector128Single(op2); // return tmp1.WithUpper(tmp2); - CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE; + var_types opBaseType = TYP_DOUBLE; - tmp1 = - gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, opBaseJitType, simdSize); - tmp2 = - gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, opBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, opBaseType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, opBaseType, simdSize); - tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16); - return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseType, 16); + return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseType, simdSize); } default: @@ -25543,11 +25430,10 @@ GenTree* Compiler::gtNewSimdNarrowNode( GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_PackUnsignedSaturate, CORINFO_TYPE_UBYTE, - simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_PackUnsignedSaturate, TYP_UBYTE, simdSize); } case TYP_SHORT: @@ -25574,10 +25460,10 @@ GenTree* Compiler::gtNewSimdNarrowNode( GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_PackUnsignedSaturate, CORINFO_TYPE_USHORT, + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_PackUnsignedSaturate, TYP_USHORT, simdSize); } @@ -25598,10 +25484,10 @@ GenTree* Compiler::gtNewSimdNarrowNode( GenTree* op1Dup = fgMakeMultiUse(&op1); GenTree* op2Dup = fgMakeMultiUse(&op2); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_UnpackLow, simdBaseJitType, simdSize); - tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_X86Base_UnpackHigh, simdBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_UnpackLow, simdBaseType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_X86Base_UnpackHigh, simdBaseType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_UnpackLow, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_UnpackLow, simdBaseType, simdSize); } case TYP_FLOAT: @@ -25617,14 +25503,12 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp2 = Sse2.ConvertToVector128Single(op2); // return Sse.MoveLowToHigh(tmp1, tmp2); - CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE; + var_types opBaseType = TYP_DOUBLE; - tmp1 = - gtNewSimdHWIntrinsicNode(type, op1, NI_X86Base_ConvertToVector128Single, opBaseJitType, simdSize); - tmp2 = - gtNewSimdHWIntrinsicNode(type, op2, NI_X86Base_ConvertToVector128Single, opBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_X86Base_ConvertToVector128Single, opBaseType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_X86Base_ConvertToVector128Single, opBaseType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_MoveLowToHigh, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_X86Base_MoveLowToHigh, simdBaseType, simdSize); } default: @@ -25641,8 +25525,8 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1); // return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8); - return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType, + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseType, 8); + return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseType, simdSize); } else @@ -25650,9 +25534,8 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp1 = AdvSimd.ExtractNarrowingLower(op1); // return AdvSimd.ExtractNarrowingUpper(tmp1, op2); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8); - return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType, - simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseType, 8); + return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseType, simdSize); } } else if (varTypeIsFloating(simdBaseType)) @@ -25661,12 +25544,12 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp2 = AdvSimd.InsertScalar(tmp1, op2); // return AdvSimd.Arm64.ConvertToSingleLower(tmp2); - CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE; + var_types tmp2BaseType = TYP_DOUBLE; - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize); - tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseType, simdSize); + tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseType, 16); - return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseType, simdSize); } else { @@ -25674,10 +25557,10 @@ GenTree* Compiler::gtNewSimdNarrowNode( // var tmp2 = tmp1.WithUpper(op2); // return AdvSimd.ExtractNarrowingLower(tmp2); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize); - tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, simdBaseJitType, 16); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseType, simdSize); + tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, simdBaseType, 16); - return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseType, simdSize); } #else #error Unsupported platform @@ -25690,13 +25573,13 @@ GenTree* Compiler::gtNewSimdNarrowNode( // Arguments: // type -- The type of the node // op1 -- The node to round -// simdBaseJitType -- the base jit type of the node +// simdBaseType -- the base type of the node // simdSize -- the simd size of the node // // Return Value: // The round node // -GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -25704,7 +25587,6 @@ GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, CorInfoType assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsFloating(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -25717,7 +25599,7 @@ GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, CorInfoType else if (simdSize == 64) { GenTree* op2 = gtNewIconNode(static_cast(FloatRoundingMode::ToNearestInteger)); - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseType, simdSize); } else { @@ -25737,7 +25619,7 @@ GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, CorInfoType #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } //------------------------------------------------------------------------ @@ -25749,7 +25631,7 @@ GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, CorInfoType // type -- The type of the node // op1 -- The values to shuffle // op2 -- The indices to pick from (variable) -// simdBaseJitType -- The base jit type of the node +// simdBaseType -- The base type of the node // simdSize -- The simd size of the node // isShuffleNative -- Whether we're making a ShuffleNative node vs a Shuffle one // @@ -25757,7 +25639,7 @@ GenTree* Compiler::gtNewSimdRoundNode(var_types type, GenTree* op1, CorInfoType // The shuffle node // GenTree* Compiler::gtNewSimdShuffleVariableNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isShuffleNative) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize, bool isShuffleNative) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -25765,7 +25647,6 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(op2 != nullptr); assert(op2->TypeIs(type)); assert((!op2->IsCnsVec()) || isShuffleNative); @@ -25807,19 +25688,19 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( assert(compIsaSupportedDebugOnly(InstructionSet_AVX512v2)); // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512v2_PermuteVar64x8, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512v2_PermuteVar64x8, simdBaseType, simdSize); retNode->SetReverseOp(); } else if (elementSize == 2) { // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar32x16, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar32x16, simdBaseType, simdSize); retNode->SetReverseOp(); } else if (elementSize == 4) { // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar16x32, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar16x32, simdBaseType, simdSize); retNode->SetReverseOp(); } else @@ -25827,13 +25708,13 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( assert(elementSize == 8); // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar8x64, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar8x64, simdBaseType, simdSize); retNode->SetReverseOp(); } } else if ((elementSize == 1) && (simdSize == 16)) { - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Shuffle, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Shuffle, simdBaseType, simdSize); // high bit on index gives 0 already canUseSignedComparisonHint = true; @@ -25843,7 +25724,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( NamedIntrinsic intrinsic = NI_AVX512v2_PermuteVar32x8; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, intrinsic, simdBaseType, simdSize); retNode->SetReverseOp(); } else if ((elementSize == 2) && compOpportunisticallyDependsOn(InstructionSet_AVX512)) @@ -25851,7 +25732,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( NamedIntrinsic intrinsic = (simdSize == 16) ? NI_AVX512_PermuteVar8x16 : NI_AVX512_PermuteVar16x16; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, intrinsic, simdBaseType, simdSize); retNode->SetReverseOp(); } else if ((elementSize == 4) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_AVX))) @@ -25863,14 +25744,14 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( assert(compIsaSupportedDebugOnly(InstructionSet_AVX2)); // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseType, simdSize); retNode->SetReverseOp(); } else { assert(compIsaSupportedDebugOnly(InstructionSet_AVX)); - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX_PermuteVar, CORINFO_TYPE_FLOAT, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX_PermuteVar, TYP_FLOAT, simdSize); } } else if ((elementSize == 8) && (simdSize == 32) && compOpportunisticallyDependsOn(InstructionSet_AVX512)) @@ -25878,14 +25759,14 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( NamedIntrinsic intrinsic = NI_AVX512_PermuteVar4x64; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, intrinsic, simdBaseType, simdSize); retNode->SetReverseOp(); } else if ((elementSize == 8) && (simdSize == 16) && compOpportunisticallyDependsOn(InstructionSet_AVX512)) { GenTree* op1Copy = fgMakeMultiUse(&op1); // just use op1 again for the other variable NamedIntrinsic intrinsic = NI_AVX512_PermuteVar2x64x2; - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, op1Copy, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, op1Copy, intrinsic, simdBaseType, simdSize); } else { @@ -25905,29 +25786,30 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( } // the below is implemented for integral types - if (varTypeIsFloating(simdBaseType)) + var_types origSimdBaseType = simdBaseType; + + if (varTypeIsFloating(origSimdBaseType)) { assert(elementSize == 8); - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } // shift all indices to the left by 1 (long to int index, first step of converting long->int indices) cnsNode = gtNewIconNode(1, TYP_INT); if (simdSize == 32) { - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_ShiftLeftLogical, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_ShiftLeftLogical, simdBaseType, simdSize); } else { - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_ShiftLeftLogical, simdBaseJitType, - simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_ShiftLeftLogical, simdBaseType, simdSize); } // the below are implemented with float/int/uint - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UINT : CORINFO_TYPE_INT; - if (varTypeIsFloating(simdBaseType)) + simdBaseType = varTypeIsUnsigned(origSimdBaseType) ? TYP_UINT : TYP_INT; + if (varTypeIsFloating(origSimdBaseType)) { - simdBaseJitType = CORINFO_TYPE_FLOAT; + simdBaseType = TYP_FLOAT; } // shuffle & manipulate the long indices to int indices (e.g., 3 2 1 0 -> 6 7 4 5 2 3 0 1) @@ -25938,12 +25820,11 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( if (varTypeIsFloating(simdBaseType)) { GenTree* op2Dup = fgMakeMultiUse(&op2); - op2 = - gtNewSimdHWIntrinsicNode(type, op2, op2Dup, cnsNode, NI_AVX_Shuffle, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, op2Dup, cnsNode, NI_AVX_Shuffle, simdBaseType, simdSize); } else { - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_Shuffle, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_Shuffle, simdBaseType, simdSize); } } else @@ -25951,12 +25832,12 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( if (varTypeIsFloating(simdBaseType)) { GenTree* op2Dup = fgMakeMultiUse(&op2); - op2 = gtNewSimdHWIntrinsicNode(type, op2, op2Dup, cnsNode, NI_X86Base_Shuffle, simdBaseJitType, + op2 = gtNewSimdHWIntrinsicNode(type, op2, op2Dup, cnsNode, NI_X86Base_Shuffle, simdBaseType, simdSize); } else { - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_Shuffle, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_Shuffle, simdBaseType, simdSize); } } @@ -25969,18 +25850,18 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = orCns; - op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseJitType, simdSize); + op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseType, simdSize); // perform the shuffle with our int indices if (simdSize == 32) { // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseType, simdSize); retNode->SetReverseOp(); } else { - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX_PermuteVar, CORINFO_TYPE_FLOAT, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX_PermuteVar, TYP_FLOAT, simdSize); } } else if (simdSize == 32) @@ -26021,10 +25902,10 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( { // shift all indices to the left by tzcnt(size) = 1 cnsNode = gtNewIconNode(1, TYP_INT); - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_ShiftLeftLogical, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_ShiftLeftLogical, simdBaseType, simdSize); // the below are implemented with byte/sbyte - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; // shuffle with a pattern like 0 0 2 2 4 4 6 6 ... 0 0 2 2 ... (for shorts) // (note: the 0x10 bit is ignored for Avx2.Shuffle) @@ -26037,7 +25918,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = shufCns; - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_Shuffle, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AVX2_Shuffle, simdBaseType, simdSize); // or every second index with 1 (short) simd_t orCns = {}; @@ -26049,7 +25930,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = orCns; - op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseJitType, simdSize); + op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseType, simdSize); // create required clones of op2 op2Dup1 = fgMakeMultiUse(&op2); @@ -26073,7 +25954,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( uint8_t control = 1; cnsNode = gtNewIconNode(control, TYP_INT); - swap = gtNewSimdHWIntrinsicNode(type, op1Dup1, op1Dup2, cnsNode, NI_AVX2_Permute2x128, simdBaseJitType, + swap = gtNewSimdHWIntrinsicNode(type, op1Dup1, op1Dup2, cnsNode, NI_AVX2_Permute2x128, simdBaseType, simdSize); } else @@ -26090,8 +25971,8 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( // shuffle with both the normal and swapped values // Vector256 shuf1 = Avx2.Shuffle(vector, indices); // Vector256 shuf2 = Avx2.Shuffle(swap, indices); - GenTree* shuf1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_Shuffle, simdBaseJitType, simdSize); - GenTree* shuf2 = gtNewSimdHWIntrinsicNode(type, swap, op2Dup1, NI_AVX2_Shuffle, simdBaseJitType, simdSize); + GenTree* shuf1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_Shuffle, simdBaseType, simdSize); + GenTree* shuf2 = gtNewSimdHWIntrinsicNode(type, swap, op2Dup1, NI_AVX2_Shuffle, simdBaseType, simdSize); // get the indices, and xor the cross-lane bit on the high 128-bit lane part of indices. // V256 indicesXord = indices ^ V256.Create(V128.Create((byte)0), V128.Create((byte)0x10))); @@ -26103,7 +25984,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = xorCns; - GenTree* indicesXord = gtNewSimdBinOpNode(GT_XOR, type, op2Dup2, cnsNode, simdBaseJitType, simdSize); + GenTree* indicesXord = gtNewSimdBinOpNode(GT_XOR, type, op2Dup2, cnsNode, simdBaseType, simdSize); // compare our modified indices to 0x0F (highest value not swapping lane), we get 0xFF when we are swapping // lane and 0x00 otherwise. we will also get "swapping lane" also when index is more than 32 @@ -26116,12 +25997,12 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( comparandCnd.u64[3] = 0x0F0F0F0F0F0F0F0F; cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = comparandCnd; - GenTree* selection = gtNewSimdCmpOpNode(GT_GT, type, indicesXord, cnsNode, CORINFO_TYPE_BYTE, simdSize); + GenTree* selection = gtNewSimdCmpOpNode(GT_GT, type, indicesXord, cnsNode, TYP_BYTE, simdSize); // blend our two shuffles based on whether each element swaps lanes or not // return Avx2.BlendVariable(shuf1, shuf2, selection); - retNode = gtNewSimdHWIntrinsicNode(type, shuf1, shuf2, selection, NI_AVX2_BlendVariable, simdBaseJitType, - simdSize); + retNode = + gtNewSimdHWIntrinsicNode(type, shuf1, shuf2, selection, NI_AVX2_BlendVariable, simdBaseType, simdSize); } else { @@ -26136,21 +26017,21 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( { if (elementSize == 4) { - simdBaseJitType = CORINFO_TYPE_UINT; + simdBaseType = TYP_UINT; } else { assert(elementSize == 8); - simdBaseJitType = CORINFO_TYPE_ULONG; + simdBaseType = TYP_ULONG; } } // shift all indices to the left by tzcnt(size) cnsNode = gtNewIconNode(BitOperations::TrailingZeroCount(static_cast(elementSize)), TYP_INT); - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_ShiftLeftLogical, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_ShiftLeftLogical, simdBaseType, simdSize); // the below are implemented with byte/sbyte - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; // we need to convert the indices to byte indices // shuffle with a pattern like 0 0 2 2 4 4 6 6 ... (for short, and similar for larger) @@ -26167,7 +26048,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = shufCns; - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_Shuffle, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_X86Base_Shuffle, simdBaseType, simdSize); // or the relevant bits @@ -26180,11 +26061,11 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = orCns; - op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseJitType, simdSize); + op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseType, simdSize); // apply normal byte shuffle now that we've converted it - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Shuffle, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Shuffle, simdBaseType, simdSize); } } #elif defined(TARGET_ARM64) @@ -26207,25 +26088,25 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( { if (elementSize == 4) { - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; } else { assert(elementSize == 8); - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; } } - if ((simdSize == 16) && (simdBaseJitType == CORINFO_TYPE_INT)) + if ((simdSize == 16) && (simdBaseType == TYP_INT)) { - simdBaseJitType = CORINFO_TYPE_UINT; + simdBaseType = TYP_UINT; } // shift all indices to the left by tzcnt(size) cnsNode = gtNewIconNode(BitOperations::TrailingZeroCount(static_cast(elementSize)), TYP_INT); - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AdvSimd_ShiftLeftLogical, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, NI_AdvSimd_ShiftLeftLogical, simdBaseType, simdSize); // VectorTableLookup is only valid on byte/sbyte - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; simd_t shufCns = {}; for (size_t index = 0; index < elementCount; index++) @@ -26239,7 +26120,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = shufCns; - op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, lookupIntrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(type, op2, cnsNode, lookupIntrinsic, simdBaseType, simdSize); // or the relevant bits simd_t orCns = {}; @@ -26251,10 +26132,10 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( cnsNode = gtNewVconNode(type); cnsNode->AsVecCon()->gtSimdVal = orCns; - op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseJitType, simdSize); + op2 = gtNewSimdBinOpNode(GT_OR, type, op2, cnsNode, simdBaseType, simdSize); } - retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -26272,19 +26153,19 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( assert(op2DupSafe != nullptr); - // get the CorInfoType used for the index comparison - CorInfoType corType = CORINFO_TYPE_UBYTE; + // get the var_types used for the index comparison + var_types baseType = TYP_UBYTE; if (elementSize == 2) { - corType = CORINFO_TYPE_USHORT; + baseType = TYP_USHORT; } else if (elementSize == 4) { - corType = CORINFO_TYPE_UINT; + baseType = TYP_UINT; } else if (elementSize == 8) { - corType = CORINFO_TYPE_ULONG; + baseType = TYP_ULONG; } // track whether we need to xor the high bit from the comparand @@ -26294,18 +26175,18 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( // if the hardware doesn't support direct unsigned comparison, we attempt to use signed comparison if (!compOpportunisticallyDependsOn(InstructionSet_AVX512)) { - corType = CORINFO_TYPE_BYTE; + baseType = TYP_BYTE; if (elementSize == 2) { - corType = CORINFO_TYPE_SHORT; + baseType = TYP_SHORT; } else if (elementSize == 4) { - corType = CORINFO_TYPE_INT; + baseType = TYP_INT; } else if (elementSize == 8) { - corType = CORINFO_TYPE_LONG; + baseType = TYP_LONG; } // if we can't use signed comparison for free, update the comparand and op2DupSafe appropriately. @@ -26315,9 +26196,9 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( subComparandNode = true; uint64_t subtractionValue = static_cast(1) << (elementSize * 8 - 1); GenTree* subtraction = - gtNewSimdCreateBroadcastNode(type, gtNewLconNode(subtractionValue), corType, simdSize); + gtNewSimdCreateBroadcastNode(type, gtNewLconNode(subtractionValue), baseType, simdSize); - op2DupSafe = gtNewSimdBinOpNode(GT_SUB, type, op2DupSafe, subtraction, corType, simdSize); + op2DupSafe = gtNewSimdBinOpNode(GT_SUB, type, op2DupSafe, subtraction, baseType, simdSize); } } #endif @@ -26329,13 +26210,13 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( uint64_t subtraction = (uint64_t)1 << (elementSize * 8 - 1); comparandValue -= subtraction; } - GenTree* comparand = gtNewSimdCreateBroadcastNode(type, gtNewLconNode(comparandValue), corType, simdSize); + GenTree* comparand = gtNewSimdCreateBroadcastNode(type, gtNewLconNode(comparandValue), baseType, simdSize); - assert(genTypeSize(JitType2PreciseVarType(corType)) == elementSize); + assert(genTypeSize(baseType) == elementSize); // create the mask node (op2 < comparand), and the result node (mask & nativeResult) - GenTree* mask = gtNewSimdCmpOpNode(GT_LT, type, op2DupSafe, comparand, corType, simdSize); - retNode = gtNewSimdBinOpNode(GT_AND, type, retNode, mask, simdBaseJitType, simdSize); + GenTree* mask = gtNewSimdCmpOpNode(GT_LT, type, op2DupSafe, comparand, baseType, simdSize); + retNode = gtNewSimdBinOpNode(GT_AND, type, retNode, mask, simdBaseType, simdSize); } else { @@ -26352,7 +26233,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( // type -- The type of the node // op1 -- The values to shuffle // op2 -- The indices to pick from -// simdBaseJitType -- The base jit type of the node +// simdBaseType -- The base type of the node // simdSize -- The simd size of the node // isShuffleNative -- Whether we're making a ShuffleNative node vs a Shuffle one // @@ -26360,7 +26241,7 @@ GenTree* Compiler::gtNewSimdShuffleVariableNode( // The shuffle node // GenTree* Compiler::gtNewSimdShuffleNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isShuffleNative) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize, bool isShuffleNative) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -26374,10 +26255,9 @@ GenTree* Compiler::gtNewSimdShuffleNode( // If op2 is not constant, call into the gtNewSimdShuffleVariableNode routine if (!op2->IsCnsVec()) { - return gtNewSimdShuffleVariableNode(type, op1, op2, simdBaseJitType, simdSize, isShuffleNative); + return gtNewSimdShuffleVariableNode(type, op1, op2, simdBaseType, simdSize, isShuffleNative); } - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); size_t elementSize = genTypeSize(simdBaseType); @@ -26410,7 +26290,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( if (isShuffleNative && gotInvalidIndex) { // Call variable implementation. - return gtNewSimdShuffleVariableNode(type, op1, op2, simdBaseJitType, simdSize, isShuffleNative); + return gtNewSimdShuffleVariableNode(type, op1, op2, simdBaseType, simdSize, isShuffleNative); } if (hasIdentityShuffle) { @@ -26518,7 +26398,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( ((!crossLane) && (needsZero || (elementSize < 4) || ((elementSize == 4) && differsByLane)))) { // we want to treat our type like byte here - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; uint8_t leftWants = 0; // result left lane wants which lanes bitfield (1 - left, 2 - right) uint8_t rightWants = 0; // result right lane wants which lanes bitfield (1 - left, 2 - right) @@ -26585,7 +26465,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( cnsNode = gtNewIconNode(control); retNode = gtNewSimdHWIntrinsicNode(type, retNode, retNodeDup, cnsNode, NI_AVX2_Permute2x128, - simdBaseJitType, simdSize); + simdBaseType, simdSize); } // if we have a non-default shuffle mask, we need to do Avx2.Shuffle @@ -26594,7 +26474,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimdVal = vecCns; - retNode = gtNewSimdHWIntrinsicNode(type, retNode, op2, NI_AVX2_Shuffle, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, retNode, op2, NI_AVX2_Shuffle, simdBaseType, simdSize); } } else @@ -26606,7 +26486,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( uint8_t control = 1; // 0b00000001 cnsNode = gtNewIconNode(control); GenTree* swap = gtNewSimdHWIntrinsicNode(type, op1Dup1, op1Dup2, cnsNode, NI_AVX2_Permute2x128, - simdBaseJitType, simdSize); + simdBaseType, simdSize); // if we have non-default shuffle mask if (nonDefaultShuffleMask) @@ -26618,15 +26498,15 @@ GenTree* Compiler::gtNewSimdShuffleNode( GenTree* op2Dup = fgMakeMultiUse(&op2); // shuffle both op1 and swap(op1) - op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_Shuffle, simdBaseJitType, simdSize); - swap = gtNewSimdHWIntrinsicNode(type, swap, op2Dup, NI_AVX2_Shuffle, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_Shuffle, simdBaseType, simdSize); + swap = gtNewSimdHWIntrinsicNode(type, swap, op2Dup, NI_AVX2_Shuffle, simdBaseType, simdSize); } // select the appropriate values GenTree* selNode = gtNewVconNode(type); selNode->AsVecCon()->gtSimdVal = selCns; - retNode = gtNewSimdHWIntrinsicNode(type, op1, swap, selNode, NI_AVX2_BlendVariable, simdBaseJitType, - simdSize); + retNode = + gtNewSimdHWIntrinsicNode(type, op1, swap, selNode, NI_AVX2_BlendVariable, simdBaseType, simdSize); } assert(retNode != nullptr); @@ -26649,12 +26529,12 @@ GenTree* Compiler::gtNewSimdShuffleNode( { op2 = gtNewIconNode(immediate); GenTree* op1Copy = fgMakeMultiUse(&op1); - return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX_Shuffle, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX_Shuffle, simdBaseType, simdSize); } else { op2 = gtNewIconNode(immediate); - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_Shuffle, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_Shuffle, simdBaseType, simdSize); } } @@ -26670,7 +26550,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseType, simdSize); } } else if (elementSize == 2) @@ -26686,7 +26566,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar16x16, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar16x16, simdBaseType, simdSize); } else if (elementSize == 1) { @@ -26696,7 +26576,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512v2_PermuteVar32x8, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512v2_PermuteVar32x8, simdBaseType, simdSize); } else { @@ -26714,14 +26594,14 @@ GenTree* Compiler::gtNewSimdShuffleNode( } op2 = gtNewIconNode(immediate); GenTree* op1Copy = fgMakeMultiUse(&op1); - return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX_Shuffle, CORINFO_TYPE_DOUBLE, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX_Shuffle, TYP_DOUBLE, simdSize); } // otherwise, use vpermpd. else { cnsNode = gtNewIconNode(control); - retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX2_Permute4x64, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX2_Permute4x64, simdBaseType, simdSize); } } } @@ -26740,8 +26620,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( } op2 = gtNewIconNode(immediate); GenTree* op1Copy = fgMakeMultiUse(&op1); - return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX512_Shuffle, CORINFO_TYPE_DOUBLE, - simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX512_Shuffle, TYP_DOUBLE, simdSize); } // if the element size is 32-bit, try to use vpshufd/vshufps instead of vpshufb, @@ -26758,21 +26637,20 @@ GenTree* Compiler::gtNewSimdShuffleNode( { op2 = gtNewIconNode(immediate); GenTree* op1Copy = fgMakeMultiUse(&op1); - return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX512_Shuffle, simdBaseJitType, - simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op1Copy, op2, NI_AVX512_Shuffle, simdBaseType, simdSize); } else { op2 = gtNewIconNode(immediate); - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_Shuffle, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_Shuffle, simdBaseType, simdSize); } } op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimdVal = vecCns; - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_Shuffle, simdBaseJitType, simdSize); + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_Shuffle, simdBaseType, simdSize); } else if (elementSize == 4) { @@ -26785,7 +26663,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar16x32, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar16x32, simdBaseType, simdSize); } else if (elementSize == 2) { @@ -26798,7 +26676,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar32x16, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar32x16, simdBaseType, simdSize); } else if (elementSize == 1) { @@ -26807,7 +26685,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512v2_PermuteVar64x8, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512v2_PermuteVar64x8, simdBaseType, simdSize); } else { @@ -26822,7 +26700,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar8x64, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX512_PermuteVar8x64, simdBaseType, simdSize); } assert(retNode != nullptr); @@ -26831,19 +26709,19 @@ GenTree* Compiler::gtNewSimdShuffleNode( { op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimdVal = mskCns; - retNode = gtNewSimdBinOpNode(GT_AND, type, op2, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_AND, type, op2, retNode, simdBaseType, simdSize); } return retNode; } else if (needsZero) { - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimd16Val = vecCns.v128[0]; - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Shuffle, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_X86Base_Shuffle, simdBaseType, simdSize); } else { @@ -26854,26 +26732,25 @@ GenTree* Compiler::gtNewSimdShuffleNode( // down into a TYP_INT or TYP_UINT based shuffle, but that's additional complexity for no // real benefit since shuffle gets its own port rather than using the fp specific ports. - simdBaseJitType = CORINFO_TYPE_DOUBLE; - simdBaseType = TYP_DOUBLE; + simdBaseType = TYP_DOUBLE; } cnsNode = gtNewIconNode(control); if (varTypeIsIntegral(simdBaseType)) { - retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_X86Base_Shuffle, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_X86Base_Shuffle, simdBaseType, simdSize); } else if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { - retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX_Permute, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX_Permute, simdBaseType, simdSize); } else { // for double we need SSE2, but we can't use the integral path ^ because we still need op1Dup here NamedIntrinsic ni = NI_X86Base_Shuffle; GenTree* op1Dup = fgMakeMultiUse(&op1); - retNode = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, cnsNode, ni, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, cnsNode, ni, simdBaseType, simdSize); } } @@ -26885,7 +26762,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimdVal = mskCns; - retNode = gtNewSimdBinOpNode(GT_AND, type, op2, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_AND, type, op2, retNode, simdBaseType, simdSize); } return retNode; @@ -26920,7 +26797,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( { GenTree* op1Clone = fgMakeMultiUse(&op1); return gtNewSimdHWIntrinsicNode(type, op1, op1Clone, gtNewIconNode(1), NI_AdvSimd_ExtractVector128, - CORINFO_TYPE_ULONG, simdSize); + TYP_ULONG, simdSize); } } @@ -26932,18 +26809,18 @@ GenTree* Compiler::gtNewSimdShuffleNode( } // VectorTableLookup is only valid on byte/sbyte - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimdVal = vecCns; - return gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -26951,7 +26828,6 @@ GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, CorInfoType s assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsFloating(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -26983,7 +26859,7 @@ GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, CorInfoType s #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -26992,13 +26868,13 @@ GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, CorInfoType s // Arguments: // op1 - The address to which op2 is stored // op2 - The SIMD value to be stored at op1 -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created Store node // -GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { assert(op1 != nullptr); assert(op2 != nullptr); @@ -27006,7 +26882,6 @@ GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, CorInfoType si assert(varTypeIsSIMD(op2)); assert(getSIMDTypeForSize(simdSize) == op2->TypeGet()); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); return gtNewStoreIndNode(op2->TypeGet(), op1, op2); @@ -27018,13 +26893,13 @@ GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, CorInfoType si // Arguments: // op1 - The address to which op2 is stored // op2 - The SIMD value to be stored at op1 -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created StoreAligned node // -GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { #if defined(TARGET_XARCH) assert(op1 != nullptr); @@ -27033,7 +26908,6 @@ GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, CorInfo assert(varTypeIsSIMD(op2)); assert(getSIMDTypeForSize(simdSize) == op2->TypeGet()); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27051,14 +26925,14 @@ GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, CorInfo intrinsic = NI_X86Base_StoreAligned; } - return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); + return gtNewSimdStoreNode(op1, op2, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -27070,16 +26944,13 @@ GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, CorInfo // Arguments: // op1 - The address to which op2 is stored // op2 - The SIMD value to be stored at op1 -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created StoreNonTemporal node // -GenTree* Compiler::gtNewSimdStoreNonTemporalNode(GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdStoreNonTemporalNode(GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { #if defined(TARGET_XARCH) assert(op1 != nullptr); @@ -27088,7 +26959,6 @@ GenTree* Compiler::gtNewSimdStoreNonTemporalNode(GenTree* op1, assert(varTypeIsSIMD(op2)); assert(getSIMDTypeForSize(simdSize) == op2->TypeGet()); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27106,20 +26976,20 @@ GenTree* Compiler::gtNewSimdStoreNonTemporalNode(GenTree* op1, intrinsic = NI_X86Base_StoreAlignedNonTemporal; } - return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); + return gtNewSimdStoreNode(op1, op2, simdBaseType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); @@ -27127,7 +26997,6 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si assert(op1 != nullptr); assert(op1->TypeIs(simdType)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27139,8 +27008,8 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si { GenTree* op1Dup = fgMakeMultiUse(&op1); - op1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseJitType, simdSize); - op1Dup = gtNewSimdGetUpperNode(TYP_SIMD32, op1Dup, simdBaseJitType, simdSize); + op1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseType, simdSize); + op1Dup = gtNewSimdGetUpperNode(TYP_SIMD32, op1Dup, simdBaseType, simdSize); if (varTypeIsFloating(simdBaseType)) { @@ -27148,22 +27017,22 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si // consistently adding values together. Since many operations // end up operating on 128-bit lanes, we break sum the same way. - op1 = gtNewSimdSumNode(type, op1, simdBaseJitType, 32); - op1Dup = gtNewSimdSumNode(type, op1Dup, simdBaseJitType, 32); + op1 = gtNewSimdSumNode(type, op1, simdBaseType, 32); + op1Dup = gtNewSimdSumNode(type, op1Dup, simdBaseType, 32); return gtNewOperNode(GT_ADD, type, op1, op1Dup); } simdSize = 32; - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD32, op1, op1Dup, simdBaseJitType, 32); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD32, op1, op1Dup, simdBaseType, 32); } if (simdSize == 32) { GenTree* op1Dup = fgMakeMultiUse(&op1); - op1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); - op1Dup = gtNewSimdGetUpperNode(TYP_SIMD16, op1Dup, simdBaseJitType, simdSize); + op1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseType, simdSize); + op1Dup = gtNewSimdGetUpperNode(TYP_SIMD16, op1Dup, simdBaseType, simdSize); if (varTypeIsFloating(simdBaseType)) { @@ -27171,14 +27040,14 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si // consistently adding values together. Since many operations // end up operating on 128-bit lanes, we break sum the same way. - op1 = gtNewSimdSumNode(type, op1, simdBaseJitType, 16); - op1Dup = gtNewSimdSumNode(type, op1Dup, simdBaseJitType, 16); + op1 = gtNewSimdSumNode(type, op1, simdBaseType, 16); + op1Dup = gtNewSimdSumNode(type, op1Dup, simdBaseType, 16); return gtNewOperNode(GT_ADD, type, op1, op1Dup); } simdSize = 16; - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Dup, simdBaseJitType, 16); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Dup, simdBaseType, 16); } assert(simdSize == 16); @@ -27193,32 +27062,32 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si { // The permute below gives us [0, 1, 2, 3] -> [1, 0, 3, 2] op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode((int)0b10110001, TYP_INT), NI_AVX_Permute, - simdBaseJitType, simdSize); + simdBaseType, simdSize); // The add below now results in [0 + 1, 1 + 0, 2 + 3, 3 + 2] - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseType, simdSize); op1Shuffled = fgMakeMultiUse(&op1); // The permute below gives us [0 + 1, 1 + 0, 2 + 3, 3 + 2] -> [2 + 3, 3 + 2, 0 + 1, 1 + 0] op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode((int)0b01001110, TYP_INT), NI_AVX_Permute, - simdBaseJitType, simdSize); + simdBaseType, simdSize); } else { // The shuffle below gives us [0, 1, 2, 3] -> [1, 0, 3, 2] op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op1Shuffled, gtNewIconNode((int)0b10110001, TYP_INT), - NI_X86Base_Shuffle, simdBaseJitType, simdSize); + NI_X86Base_Shuffle, simdBaseType, simdSize); op1Shuffled = fgMakeMultiUse(&op1Shuffled); // The add below now results in [0 + 1, 1 + 0, 2 + 3, 3 + 2] - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseType, simdSize); op1Shuffled = fgMakeMultiUse(&op1); // The shuffle below gives us [0 + 1, 1 + 0, 2 + 3, 3 + 2] -> [2 + 3, 3 + 2, 0 + 1, 1 + 0] op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op1Shuffled, gtNewIconNode((int)0b01001110, TYP_INT), - NI_X86Base_Shuffle, simdBaseJitType, simdSize); + NI_X86Base_Shuffle, simdBaseType, simdSize); op1Shuffled = fgMakeMultiUse(&op1Shuffled); } // Finally adding the results gets us [(0 + 1) + (2 + 3), (1 + 0) + (3 + 2), (2 + 3) + (0 + 1), (3 + 2) + (1 // + 0)] - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseJitType, simdSize); - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseType, simdSize); + return gtNewSimdToScalarNode(type, op1, simdBaseType, simdSize); } else { @@ -27228,18 +27097,18 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si { // The permute below gives us [0, 1] -> [1, 0] op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode((int)0b0001, TYP_INT), NI_AVX_Permute, - simdBaseJitType, simdSize); + simdBaseType, simdSize); } else { // The shuffle below gives us [0, 1] -> [1, 0] op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op1Shuffled, gtNewIconNode((int)0b0001, TYP_INT), - NI_X86Base_Shuffle, simdBaseJitType, simdSize); + NI_X86Base_Shuffle, simdBaseType, simdSize); op1Shuffled = fgMakeMultiUse(&op1Shuffled); } // Finally adding the results gets us [0 + 1, 1 + 0] - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseJitType, simdSize); - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, op1, op1Shuffled, simdBaseType, simdSize); + return gtNewSimdToScalarNode(type, op1, simdBaseType, simdSize); } } @@ -27257,12 +27126,12 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si { tmp = fgMakeMultiUse(&op1); opShifted = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(shiftVal, TYP_INT), - NI_X86Base_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, opShifted, tmp, simdBaseJitType, simdSize); + NI_X86Base_ShiftRightLogical128BitLane, simdBaseType, simdSize); + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, opShifted, tmp, simdBaseType, simdSize); shiftVal = shiftVal / 2; } - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); + return gtNewSimdToScalarNode(type, op1, simdBaseType, simdSize); #elif defined(TARGET_ARM64) switch (simdBaseType) @@ -27272,8 +27141,8 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si case TYP_SHORT: case TYP_USHORT: { - tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize); - return gtNewSimdToScalarNode(type, tmp, simdBaseJitType, 8); + tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseType, simdSize); + return gtNewSimdToScalarNode(type, tmp, simdBaseType, 8); } case TYP_INT: @@ -27282,20 +27151,20 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si if (simdSize == 8) { tmp = fgMakeMultiUse(&op1); - tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize); + tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseType, simdSize); } else { - tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16); + tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseType, 16); } - return gtNewSimdToScalarNode(type, tmp, simdBaseJitType, 8); + return gtNewSimdToScalarNode(type, tmp, simdBaseType, 8); } case TYP_FLOAT: { if (simdSize == 8) { - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseType, simdSize); } else @@ -27306,11 +27175,11 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si for (int i = 0; i < haddCount; i++) { tmp = fgMakeMultiUse(&op1); - op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType, + op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseType, simdSize); } } - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); + return gtNewSimdToScalarNode(type, op1, simdBaseType, simdSize); } case TYP_DOUBLE: @@ -27319,10 +27188,10 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si { if (simdSize == 16) { - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseType, simdSize); } - return gtNewSimdToScalarNode(type, op1, simdBaseJitType, 8); + return gtNewSimdToScalarNode(type, op1, simdBaseType, 8); } default: { @@ -27344,19 +27213,14 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si // op2 - The value of the second operand: 'B' // op3 - The value of the third operand: 'C' // op4 - The constant control byte used to determine how 'A', 'B', and 'C' are manipulated -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic +// simdBaseType - The base type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic // // Returns: // The created TernaryLogic node that performs the specified operation on 'A', 'B', and 'C'. // -GenTree* Compiler::gtNewSimdTernaryLogicNode(var_types type, - GenTree* op1, - GenTree* op2, - GenTree* op3, - GenTree* op4, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Compiler::gtNewSimdTernaryLogicNode( + var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, var_types simdBaseType, unsigned simdSize) { assert(compIsaSupportedDebugOnly(InstructionSet_AVX512)); @@ -27375,12 +27239,11 @@ GenTree* Compiler::gtNewSimdTernaryLogicNode(var_types type, assert(op4 != nullptr); assert(genActualType(op4) == TYP_INT); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_AVX512_TernaryLogic; - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, op4, intrinsic, simdBaseType, simdSize); } #endif // TARGET_XARCH @@ -27390,20 +27253,19 @@ GenTree* Compiler::gtNewSimdTernaryLogicNode(var_types type, // Arguments: // type - The return type of SIMD node being created. // op1 - The SIMD operand. -// simdBaseJitType - The base JIT type of SIMD type of the intrinsic. +// simdBaseType - The base type of SIMD type of the intrinsic. // simdSize - The size of the SIMD type of the intrinsic. // // Returns: // The created node that has the ToScalar implementation. // -GenTree* Compiler::gtNewSimdToScalarNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdToScalarNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsArithmetic(type)); assert(op1 != nullptr); assert(varTypeIsSIMD(op1)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27435,7 +27297,7 @@ GenTree* Compiler::gtNewSimdToScalarNode(var_types type, GenTree* op1, CorInfoTy #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } //------------------------------------------------------------------------ @@ -27444,13 +27306,12 @@ GenTree* Compiler::gtNewSimdToScalarNode(var_types type, GenTree* op1, CorInfoTy // Arguments: // type -- The type of the node // op1 -- The node to truncate -// simdBaseJitType -- the base jit type of the node // simdSize -- the simd size of the node // // Return Value: // The truncate node // -GenTree* Compiler::gtNewSimdTruncNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdTruncNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -27458,7 +27319,6 @@ GenTree* Compiler::gtNewSimdTruncNode(var_types type, GenTree* op1, CorInfoType assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsFloating(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27471,7 +27331,7 @@ GenTree* Compiler::gtNewSimdTruncNode(var_types type, GenTree* op1, CorInfoType else if (simdSize == 64) { GenTree* op2 = gtNewIconNode(static_cast(FloatRoundingMode::ToZero)); - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX512_RoundScale, simdBaseType, simdSize); } else { @@ -27491,11 +27351,11 @@ GenTree* Compiler::gtNewSimdTruncNode(var_types type, GenTree* op1, CorInfoType #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdUnOpNode( - genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) + genTreeOps op, var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -27503,7 +27363,6 @@ GenTree* Compiler::gtNewSimdUnOpNode( assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); #if defined(TARGET_ARM64) @@ -27513,29 +27372,25 @@ GenTree* Compiler::gtNewSimdUnOpNode( { case TYP_UBYTE: { - simdBaseJitType = CORINFO_TYPE_BYTE; - simdBaseType = TYP_BYTE; + simdBaseType = TYP_BYTE; break; } case TYP_USHORT: { - simdBaseJitType = CORINFO_TYPE_SHORT; - simdBaseType = TYP_SHORT; + simdBaseType = TYP_SHORT; break; } case TYP_UINT: { - simdBaseJitType = CORINFO_TYPE_INT; - simdBaseType = TYP_INT; + simdBaseType = TYP_INT; break; } case TYP_ULONG: { - simdBaseJitType = CORINFO_TYPE_LONG; - simdBaseType = TYP_LONG; + simdBaseType = TYP_LONG; break; } @@ -27552,7 +27407,7 @@ GenTree* Compiler::gtNewSimdUnOpNode( if (intrinsic != NI_Illegal) { - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } switch (op) @@ -27564,14 +27419,14 @@ GenTree* Compiler::gtNewSimdUnOpNode( { // op1 ^ -0.0 GenTree* negZero = gtNewDconNode(-0.0, simdBaseType); - negZero = gtNewSimdCreateBroadcastNode(type, negZero, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_XOR, type, op1, negZero, simdBaseJitType, simdSize); + negZero = gtNewSimdCreateBroadcastNode(type, negZero, simdBaseType, simdSize); + return gtNewSimdBinOpNode(GT_XOR, type, op1, negZero, simdBaseType, simdSize); } else { // Zero - op1 GenTree* zero = gtNewZeroConNode(type); - return gtNewSimdBinOpNode(GT_SUB, type, zero, op1, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_SUB, type, zero, op1, simdBaseType, simdSize); } } @@ -27579,7 +27434,7 @@ GenTree* Compiler::gtNewSimdUnOpNode( { // op1 ^ AllBitsSet GenTree* allBitsSet = gtNewAllBitsSetConNode(type); - return gtNewSimdBinOpNode(GT_XOR, type, op1, allBitsSet, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_XOR, type, op1, allBitsSet, simdBaseType, simdSize); } #endif // TARGET_XARCH @@ -27590,7 +27445,7 @@ GenTree* Compiler::gtNewSimdUnOpNode( } } -GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -27598,7 +27453,6 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27608,7 +27462,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo #if defined(TARGET_XARCH) if (simdSize == 64) { - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseJitType, simdSize); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseType, simdSize); switch (simdBaseType) { @@ -27661,13 +27515,13 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseType, simdSize); } else if (simdSize == 32) { assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2)); - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseType, simdSize); switch (simdBaseType) { @@ -27705,7 +27559,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseType, simdSize); } else { @@ -27745,12 +27599,12 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } #elif defined(TARGET_ARM64) if (simdSize == 16) { - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseType, simdSize); } else { @@ -27773,11 +27627,11 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, intrinsic, simdBaseJitType, 8); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, intrinsic, simdBaseType, 8); if (simdSize == 8) { - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, tmp1, simdBaseJitType, 16); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, tmp1, simdBaseType, 16); } return tmp1; @@ -27786,7 +27640,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfo #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, var_types simdBaseType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -27794,7 +27648,6 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo assert(op1 != nullptr); assert(op1->TypeIs(type)); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType)); NamedIntrinsic intrinsic = NI_Illegal; @@ -27804,7 +27657,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo #if defined(TARGET_XARCH) if (simdSize == 64) { - tmp1 = gtNewSimdGetUpperNode(TYP_SIMD32, op1, simdBaseJitType, simdSize); + tmp1 = gtNewSimdGetUpperNode(TYP_SIMD32, op1, simdBaseType, simdSize); switch (simdBaseType) { @@ -27857,13 +27710,13 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseType, simdSize); } else if (simdSize == 32) { assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2)); - tmp1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); + tmp1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseType, simdSize); switch (simdBaseType) { @@ -27901,7 +27754,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseType, simdSize); } else if (varTypeIsFloating(simdBaseType)) { @@ -27909,13 +27762,13 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo GenTree* op1Dup = fgMakeMultiUse(&op1); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_X86Base_MoveHighToLow, simdBaseJitType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp1, NI_X86Base_ConvertToVector128Double, simdBaseJitType, simdSize); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_X86Base_MoveHighToLow, simdBaseType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, NI_X86Base_ConvertToVector128Double, simdBaseType, simdSize); } else { tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_X86Base_ShiftRightLogical128BitLane, - simdBaseJitType, simdSize); + simdBaseType, simdSize); switch (simdBaseType) { @@ -27947,7 +27800,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseType, simdSize); } #elif defined(TARGET_ARM64) if (simdSize == 16) @@ -27967,7 +27820,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseType, simdSize); } else { @@ -27990,8 +27843,8 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo assert(intrinsic != NI_Illegal); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize); - return gtNewSimdGetUpperNode(TYP_SIMD8, tmp1, simdBaseJitType, 16); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseType, simdSize); + return gtNewSimdGetUpperNode(TYP_SIMD8, tmp1, simdBaseType, 16); } #else #error Unsupported platform @@ -27999,10 +27852,9 @@ GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfo } GenTree* Compiler::gtNewSimdWithElementNode( - var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, GenTree* op3, var_types simdBaseType, unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement; - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); assert(varTypeIsArithmetic(op3)); @@ -28026,7 +27878,7 @@ GenTree* Compiler::gtNewSimdWithElementNode( case TYP_DOUBLE: if (simdSize == 8) { - return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseType, simdSize); } break; @@ -28062,7 +27914,7 @@ GenTree* Compiler::gtNewSimdWithElementNode( op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound); } - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseType, simdSize); } #ifdef TARGET_ARM64 @@ -28122,7 +27974,7 @@ GenTreeFieldList* Compiler::gtConvertParamOpToFieldList(GenTree* op, unsigned fi { CORINFO_FIELD_HANDLE fieldHandle = info.compCompHnd->getFieldInClass(clsHnd, fieldId); JitType2PreciseVarType(info.compCompHnd->getFieldType(fieldHandle, &structType)); - getBaseJitTypeAndSizeOfSIMDType(structType, &sizeBytes); + getBaseTypeAndSizeOfSIMDType(structType, &sizeBytes); var_types simdType = getSIMDTypeForSize(sizeBytes); GenTreeLclFld* fldNode = gtNewLclFldNode(lclNum, simdType, offset); @@ -28135,9 +27987,8 @@ GenTreeFieldList* Compiler::gtConvertParamOpToFieldList(GenTree* op, unsigned fi #endif // TARGET_ARM64 GenTree* Compiler::gtNewSimdWithLowerNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsicId = NI_Illegal; @@ -28160,13 +28011,12 @@ GenTree* Compiler::gtNewSimdWithLowerNode( #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseType, simdSize); } GenTree* Compiler::gtNewSimdWithUpperNode( - var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) + var_types type, GenTree* op1, GenTree* op2, var_types simdBaseType, unsigned simdSize) { - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); NamedIntrinsic intrinsicId = NI_Illegal; @@ -28189,13 +28039,13 @@ GenTree* Compiler::gtNewSimdWithUpperNode( #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseType, simdSize); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID) { return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, TYP_UNKNOWN, 0); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID) @@ -28203,7 +28053,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree SetOpLclRelatedToSIMDIntrinsic(op1); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, TYP_UNKNOWN, 0, op1); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, @@ -28215,7 +28065,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, SetOpLclRelatedToSIMDIntrinsic(op2); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1, op2); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, TYP_UNKNOWN, 0, op1, op2); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode( @@ -28226,7 +28076,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode( SetOpLclRelatedToSIMDIntrinsic(op3); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1, op2, op3); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, TYP_UNKNOWN, 0, op1, op2, op3); } //------------------------------------------------------------------------ @@ -28998,7 +28848,7 @@ void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId) if (HWIntrinsicInfo::NeedsNormalizeSmallTypeToInt(intrinsicId) && varTypeIsSmall(simdBaseType)) { - SetSimdBaseJitType(varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UINT : CORINFO_TYPE_INT); + SetSimdBaseType(varTypeIsUnsigned(simdBaseType) ? TYP_UINT : TYP_INT); } #endif // TARGET_XARCH } @@ -31894,11 +31744,10 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree) assert(!optValnumCSE_phase); assert(opts.Tier0OptimizationEnabled()); - NamedIntrinsic ni = tree->GetHWIntrinsicId(); - var_types retType = tree->TypeGet(); - var_types simdBaseType = tree->GetSimdBaseType(); - CorInfoType simdBaseJitType = tree->GetSimdBaseJitType(); - unsigned int simdSize = tree->GetSimdSize(); + NamedIntrinsic ni = tree->GetHWIntrinsicId(); + var_types retType = tree->TypeGet(); + var_types simdBaseType = tree->GetSimdBaseType(); + unsigned int simdSize = tree->GetSimdSize(); simd_t simdVal = {}; @@ -32125,8 +31974,8 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree) // The bitwise operation is likely normalized to int or uint, while // the underlying convert ops may be a small type. We need to preserve // such a small type since that indicates how many elements are in the mask. - simdBaseJitType = cvtOp1->GetSimdBaseJitType(); - tree->SetSimdBaseJitType(simdBaseJitType); + simdBaseType = cvtOp1->GetSimdBaseType(); + tree->SetSimdBaseType(simdBaseType); tree->gtType = TYP_MASK; DEBUG_DESTROY_NODE(op1); @@ -32142,7 +31991,7 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree) } tree->SetMorphed(this); - tree = gtNewSimdCvtMaskToVectorNode(retType, tree, simdBaseJitType, simdSize)->AsHWIntrinsic(); + tree = gtNewSimdCvtMaskToVectorNode(retType, tree, simdBaseType, simdSize)->AsHWIntrinsic(); tree->SetMorphed(this); return tree; @@ -32210,7 +32059,7 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree) tree->gtType = TYP_MASK; tree->SetMorphed(this); - tree = gtNewSimdCvtMaskToVectorNode(retType, tree, simdBaseJitType, simdSize)->AsHWIntrinsic(); + tree = gtNewSimdCvtMaskToVectorNode(retType, tree, simdBaseType, simdSize)->AsHWIntrinsic(); tree->SetMorphed(this); op1 = tree->Op(1); op2 = nullptr; @@ -33799,7 +33648,7 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree) if (varTypeIsMask(retType) && !varTypeIsMask(resultNode)) { - resultNode = gtNewSimdCvtVectorToMaskNode(retType, resultNode, simdBaseJitType, simdSize); + resultNode = gtNewSimdCvtVectorToMaskNode(retType, resultNode, simdBaseType, simdSize); return gtFoldExprHWIntrinsic(resultNode->AsHWIntrinsic()); } diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 794b6f66b09203..19639c7f357377 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -1521,10 +1521,10 @@ struct GenTree #if defined(TARGET_XARCH) bool isEvexCompatibleHWIntrinsic(Compiler* comp) const; bool isEmbeddedBroadcastCompatibleHWIntrinsic(Compiler* comp) const; - bool isEmbeddedMaskingCompatible(Compiler* comp, - unsigned tgtMaskSize, - CorInfoType& tgtSimdBaseJitType, - size_t* broadcastOpIndex = nullptr) const; + bool isEmbeddedMaskingCompatible(Compiler* comp, + unsigned tgtMaskSize, + var_types& tgtSimdBaseType, + size_t* broadcastOpIndex = nullptr) const; #endif // TARGET_XARCH bool isEmbeddedMaskingCompatible() const; #else @@ -6313,7 +6313,7 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp regNumberSmall gtOtherReg; // The second register for multi-reg intrinsics. MultiRegSpillFlags gtSpillFlags; // Spill flags for multi-reg intrinsics. unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element)) - unsigned char gtSimdBaseJitType; // SIMD vector base JIT type + unsigned char gtSimdBaseType; // SIMD vector base JIT type unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics NamedIntrinsic gtHWIntrinsicId; @@ -6419,47 +6419,18 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp var_types GetAuxiliaryType() const; - CorInfoType GetSimdBaseJitType() const + // The invariant here is that simdBaseType is a converted + // CorInfoType using JitType2PreciseVarType. + void SetSimdBaseType(var_types simdBaseType) { - return (CorInfoType)gtSimdBaseJitType; - } - - CorInfoType GetNormalizedSimdBaseJitType() const - { - CorInfoType simdBaseJitType = GetSimdBaseJitType(); - switch (simdBaseJitType) - { - case CORINFO_TYPE_NATIVEINT: - { -#ifdef TARGET_64BIT - return CORINFO_TYPE_LONG; -#else - return CORINFO_TYPE_INT; -#endif - } - - case CORINFO_TYPE_NATIVEUINT: - { -#ifdef TARGET_64BIT - return CORINFO_TYPE_ULONG; -#else - return CORINFO_TYPE_UINT; -#endif - } - - default: - return simdBaseJitType; - } - } - - void SetSimdBaseJitType(CorInfoType simdBaseJitType) - { - gtSimdBaseJitType = (unsigned char)simdBaseJitType; - assert(gtSimdBaseJitType == simdBaseJitType); + gtSimdBaseType = (unsigned char)simdBaseType; + assert(gtSimdBaseType == simdBaseType); } var_types GetSimdBaseType() const; + var_types GetSimdBaseTypeAsVarType() const; + unsigned char GetSimdSize() const { return gtSimdSize; @@ -6475,18 +6446,18 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp GenTreeJitIntrinsic(genTreeOps oper, var_types type, CompAllocator allocator, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize, Operands... operands) : GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...) , gtOtherReg(REG_NA) , gtSpillFlags(0) , gtAuxiliaryJitType(CORINFO_TYPE_UNDEF) - , gtSimdBaseJitType((unsigned char)simdBaseJitType) + , gtSimdBaseType((unsigned char)simdBaseType) , gtSimdSize((unsigned char)simdSize) , gtHWIntrinsicId(NI_Illegal) { - assert(gtSimdBaseJitType == simdBaseJitType); + assert(gtSimdBaseType == simdBaseType); assert(gtSimdSize == simdSize); } @@ -6498,11 +6469,8 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp #endif protected: - GenTreeJitIntrinsic(genTreeOps oper, - var_types type, - IntrinsicNodeBuilder&& nodeBuilder, - CorInfoType simdBaseJitType, - unsigned simdSize) + GenTreeJitIntrinsic( + genTreeOps oper, var_types type, IntrinsicNodeBuilder&& nodeBuilder, var_types simdBaseType, unsigned simdSize) : GenTreeMultiOp(oper, type, nodeBuilder.GetBuiltOperands(), @@ -6511,11 +6479,11 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp , gtOtherReg(REG_NA) , gtSpillFlags(0) , gtAuxiliaryJitType(CORINFO_TYPE_UNDEF) - , gtSimdBaseJitType((unsigned char)simdBaseJitType) + , gtSimdBaseType((unsigned char)simdBaseType) , gtSimdSize((unsigned char)simdSize) , gtHWIntrinsicId(NI_Illegal) { - assert(gtSimdBaseJitType == simdBaseJitType); + assert(gtSimdBaseType == simdBaseType); assert(gtSimdSize == simdSize); } @@ -6533,9 +6501,9 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic GenTreeHWIntrinsic(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize) - : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize) + : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseType, simdSize) { Initialize(hwIntrinsicID); } @@ -6544,10 +6512,10 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic GenTreeHWIntrinsic(var_types type, CompAllocator allocator, NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, + var_types simdBaseType, unsigned simdSize, Operands... operands) - : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...) + : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseType, simdSize, operands...) { Initialize(hwIntrinsicID); } diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index 83909542992db3..beb9315395c184 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -900,9 +900,7 @@ uint8_t TernaryLogicInfo::GetTernaryControlByte(const TernaryLogicInfo& info, ui // Return Value: // The basetype of intrinsic of it can be fetched from 1st or 2nd argument, else return baseType unmodified. // -CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, - CORINFO_SIG_INFO* sig, - CorInfoType simdBaseJitType) +var_types Compiler::getBaseTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_SIG_INFO* sig, var_types simdBaseType) { if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic)) { @@ -914,22 +912,24 @@ CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, } CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg); - simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass); + simdBaseType = getBaseTypeAndSizeOfSIMDType(argClass); - if (simdBaseJitType == CORINFO_TYPE_UNDEF) // the argument is not a vector + if (simdBaseType == TYP_UNDEF) // the argument is not a vector { CORINFO_CLASS_HANDLE tmpClass; - simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)); + CorInfoType simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)); if (simdBaseJitType == CORINFO_TYPE_PTR) { simdBaseJitType = info.compCompHnd->getChildType(argClass, &tmpClass); } + + simdBaseType = JitType2PreciseVarType(simdBaseJitType); } - assert(simdBaseJitType != CORINFO_TYPE_UNDEF); + assert(simdBaseType != TYP_UNDEF); } - return simdBaseJitType; + return simdBaseType; } struct HWIntrinsicIsaRange @@ -1441,8 +1441,8 @@ unsigned HWIntrinsicInfo::lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORI typeHnd = sig->retTypeSigClass; } - CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(typeHnd, &simdSize); - assert((simdSize > 0) && (simdBaseJitType != CORINFO_TYPE_UNDEF)); + var_types simdBaseType = comp->getBaseTypeAndSizeOfSIMDType(typeHnd, &simdSize); + assert((simdSize > 0) && (simdBaseType != TYP_UNDEF)); return simdSize; } @@ -1503,7 +1503,7 @@ GenTree* Compiler::getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE if (!varTypeIsSIMD(argType)) { unsigned int argSizeBytes; - (void)getBaseJitTypeAndSizeOfSIMDType(argClass, &argSizeBytes); + (void)getBaseTypeAndSizeOfSIMDType(argClass, &argSizeBytes); argType = getSIMDTypeForSize(argSizeBytes); } assert(varTypeIsSIMD(argType)); @@ -1676,6 +1676,34 @@ static bool isSupportedBaseType(NamedIntrinsic intrinsic, CorInfoType baseJitTyp return false; } +static bool isSupportedBaseType(NamedIntrinsic intrinsic, var_types baseType) +{ + if (baseType == TYP_UNDEF) + { + return false; + } + + // We don't actually check the intrinsic outside of the false case as we expect + // the exposed managed signatures are either generic and support all types + // or they are explicit and support the type indicated. + + if (varTypeIsArithmetic(baseType)) + { + return true; + } + +#ifdef DEBUG + CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); +#ifdef TARGET_XARCH + assert((isa == InstructionSet_Vector512) || (isa == InstructionSet_Vector256) || (isa == InstructionSet_Vector128)); +#endif // TARGET_XARCH +#ifdef TARGET_ARM64 + assert((isa == InstructionSet_Vector64) || (isa == InstructionSet_Vector128)); +#endif // TARGET_ARM64 +#endif // DEBUG + return false; +} + // HWIntrinsicSignatureReader: a helper class that "reads" a list of hardware intrinsic arguments and stores // the corresponding argument type descriptors as the fields of the class instance. // @@ -1744,6 +1772,26 @@ struct HWIntrinsicSignatureReader final { return JITtype2varType(op4JitType); } + + var_types GetOp1TypeAsPrecise() const + { + return JitType2PreciseVarType(op1JitType); + } + + var_types GetOp2TypeAsPrecise() const + { + return JitType2PreciseVarType(op2JitType); + } + + var_types GetOp3TypeAsPrecise() const + { + return JitType2PreciseVarType(op3JitType); + } + + var_types GetOp4TypeAsPrecise() const + { + return JitType2PreciseVarType(op4JitType); + } }; //------------------------------------------------------------------------ @@ -1763,7 +1811,7 @@ struct HWIntrinsicSignatureReader final // returns true if immOp is within range. Otherwise false. // bool Compiler::CheckHWIntrinsicImmRange(NamedIntrinsic intrinsic, - CorInfoType simdBaseJitType, + var_types simdBaseType, GenTree* immOp, bool mustExpand, int immLowerBound, @@ -1826,7 +1874,6 @@ bool Compiler::CheckHWIntrinsicImmRange(NamedIntrinsic intrinsic, else if (HWIntrinsicInfo::MaybeNoJmpTableImm(intrinsic)) { #if defined(TARGET_X86) - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); if (varTypeIsLong(simdBaseType)) { @@ -1882,17 +1929,17 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, return nullptr; } - HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); - CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); - int numArgs = sig->numArgs; - var_types retType = genActualType(JITtype2varType(sig->retType)); - CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; - GenTree* retNode = nullptr; + HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); + CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); + int numArgs = sig->numArgs; + var_types retType = genActualType(JITtype2varType(sig->retType)); + var_types simdBaseType = TYP_UNDEF; + GenTree* retNode = nullptr; if (retType == TYP_STRUCT) { unsigned int sizeBytes; - simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes); + simdBaseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes); if (HWIntrinsicInfo::IsMultiReg(intrinsic)) { @@ -1902,8 +1949,9 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, #ifdef TARGET_ARM64 else if ((intrinsic == NI_AdvSimd_LoadAndInsertScalar) || (intrinsic == NI_AdvSimd_Arm64_LoadAndInsertScalar)) { - CorInfoType pSimdBaseJitType = CORINFO_TYPE_UNDEF; - var_types retFieldType = impNormStructType(sig->retTypeSigClass, &pSimdBaseJitType); + var_types retFieldBaseType = TYP_UNDEF; + + var_types retFieldType = impNormStructType(sig->retTypeSigClass, &retFieldBaseType); if (retFieldType == TYP_STRUCT) { @@ -1911,12 +1959,12 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, unsigned int sizeBytes = 0; // LoadAndInsertScalar that returns 2,3 or 4 vectors - assert(pSimdBaseJitType == CORINFO_TYPE_UNDEF); + assert(retFieldBaseType == TYP_UNDEF); unsigned fieldCount = info.compCompHnd->getClassNumInstanceFields(sig->retTypeSigClass); assert(fieldCount > 1); CORINFO_FIELD_HANDLE fieldHandle = info.compCompHnd->getFieldInClass(sig->retTypeClass, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fieldHandle, &structType); - simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structType, &sizeBytes); + simdBaseType = getBaseTypeAndSizeOfSIMDType(structType, &sizeBytes); switch (fieldCount) { case 2: @@ -1938,7 +1986,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, else { assert((retFieldType == TYP_SIMD8) || (retFieldType == TYP_SIMD16)); - assert(isSupportedBaseType(intrinsic, simdBaseJitType)); + assert(isSupportedBaseType(intrinsic, simdBaseType)); retType = getSIMDTypeForSize(sizeBytes); } } @@ -1947,7 +1995,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, { // We want to return early here for cases where retType was TYP_STRUCT as per method signature and // rather than deferring the decision after getting the simdBaseJitType of arg. - if (!isSupportedBaseType(intrinsic, simdBaseJitType)) + if (!isSupportedBaseType(intrinsic, simdBaseType)) { return nullptr; } @@ -1957,28 +2005,28 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, } } - simdBaseJitType = getBaseJitTypeFromArgIfNeeded(intrinsic, sig, simdBaseJitType); + simdBaseType = getBaseTypeFromArgIfNeeded(intrinsic, sig, simdBaseType); unsigned simdSize = 0; - if (simdBaseJitType == CORINFO_TYPE_UNDEF) + if (simdBaseType == TYP_UNDEF) { if ((category == HW_Category_Scalar) || (category == HW_Category_Special)) { - simdBaseJitType = sig->retType; + simdBaseType = JitType2PreciseVarType(sig->retType); - if (simdBaseJitType == CORINFO_TYPE_VOID) + if (simdBaseType == TYP_VOID) { - simdBaseJitType = CORINFO_TYPE_UNDEF; + simdBaseType = TYP_UNDEF; } } else { unsigned int sizeBytes; - simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &sizeBytes); + simdBaseType = getBaseTypeAndSizeOfSIMDType(clsHnd, &sizeBytes); #ifdef TARGET_ARM64 - if (simdBaseJitType == CORINFO_TYPE_UNDEF && HWIntrinsicInfo::HasScalarInputVariant(intrinsic)) + if (simdBaseType == TYP_UNDEF && HWIntrinsicInfo::HasScalarInputVariant(intrinsic)) { // Did not find a valid vector type. The intrinsic has alternate scalar version. Switch to that. @@ -1987,10 +2035,10 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, category = HWIntrinsicInfo::lookupCategory(intrinsic); isa = HWIntrinsicInfo::lookupIsa(intrinsic); - simdBaseJitType = sig->retType; - assert(simdBaseJitType != CORINFO_TYPE_VOID); - assert(simdBaseJitType != CORINFO_TYPE_UNDEF); - assert(simdBaseJitType != CORINFO_TYPE_VALUECLASS); + simdBaseType = JitType2PreciseVarType(sig->retType); + assert(simdBaseType != TYP_VOID); + assert(simdBaseType != TYP_UNDEF); + assert(simdBaseType != TYP_STRUCT); } else #endif // TARGET_ARM64 @@ -2000,7 +2048,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, } } #ifdef TARGET_ARM64 - else if ((simdBaseJitType == CORINFO_TYPE_VALUECLASS) && (HWIntrinsicInfo::BaseTypeFromValueTupleArg(intrinsic))) + else if ((simdBaseType == TYP_STRUCT) && (HWIntrinsicInfo::BaseTypeFromValueTupleArg(intrinsic))) { // If HW_Flag_BaseTypeFromValueTupleArg is set, one of the base type position flags must be set. assert(HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic)); @@ -2021,29 +2069,24 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, CorInfoType fieldType = info.compCompHnd->getFieldType(fieldHandle, &classHnd); assert(isIntrinsicType(classHnd)); - simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(classHnd, &simdSize); + simdBaseType = getBaseTypeAndSizeOfSIMDType(classHnd, &simdSize); assert(simdSize > 0); } #endif // TARGET_ARM64 // Immediately return if the category is other than scalar/special and this is not a supported base type. if ((category != HW_Category_Special) && (category != HW_Category_Scalar) && - !isSupportedBaseType(intrinsic, simdBaseJitType)) + !isSupportedBaseType(intrinsic, simdBaseType)) { return nullptr; } - var_types simdBaseType = TYP_UNKNOWN; - - if (simdBaseJitType != CORINFO_TYPE_UNDEF) + if (simdBaseType != TYP_UNDEF) { - simdBaseType = JitType2PreciseVarType(simdBaseJitType); - #ifdef TARGET_XARCH if (HWIntrinsicInfo::NeedsNormalizeSmallTypeToInt(intrinsic) && varTypeIsSmall(simdBaseType)) { - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UINT : CORINFO_TYPE_INT; - simdBaseType = JitType2PreciseVarType(simdBaseJitType); + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UINT : TYP_INT; } #endif // TARGET_XARCH } @@ -2071,12 +2114,12 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, getHWIntrinsicImmTypes(intrinsic, sig, 2, &immSimdSize, &immSimdBaseType); HWIntrinsicInfo::lookupImmBounds(intrinsic, immSimdSize, immSimdBaseType, 2, &immLowerBound, &immUpperBound); - if (!CheckHWIntrinsicImmRange(intrinsic, simdBaseJitType, immOp2, mustExpand, immLowerBound, immUpperBound, - false, &useFallback)) + if (!CheckHWIntrinsicImmRange(intrinsic, simdBaseType, immOp2, mustExpand, immLowerBound, immUpperBound, false, + &useFallback)) { if (useFallback) { - return impNonConstFallback(intrinsic, retType, simdBaseJitType); + return impNonConstFallback(intrinsic, retType, simdBaseType); } else if (immOp2->IsCnsIntOrI()) { @@ -2122,12 +2165,12 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, hasFullRangeImm = HWIntrinsicInfo::HasFullRangeImm(intrinsic); #endif - if (!CheckHWIntrinsicImmRange(intrinsic, simdBaseJitType, immOp1, mustExpand, immLowerBound, immUpperBound, + if (!CheckHWIntrinsicImmRange(intrinsic, simdBaseType, immOp1, mustExpand, immLowerBound, immUpperBound, hasFullRangeImm, &useFallback)) { if (useFallback) { - return impNonConstFallback(intrinsic, retType, simdBaseJitType); + return impNonConstFallback(intrinsic, retType, simdBaseType); } else if (immOp1->IsCnsIntOrI()) { @@ -2241,7 +2284,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, case 0: { assert(!isScalar); - retNode = gtNewSimdHWIntrinsicNode(nodeRetType, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(nodeRetType, intrinsic, simdBaseType, simdSize); break; } @@ -2258,7 +2301,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, } retNode = isScalar ? gtNewScalarHWIntrinsicNode(nodeRetType, op1, intrinsic) - : gtNewSimdHWIntrinsicNode(nodeRetType, op1, intrinsic, simdBaseJitType, simdSize); + : gtNewSimdHWIntrinsicNode(nodeRetType, op1, intrinsic, simdBaseType, simdSize); #if defined(TARGET_XARCH) switch (intrinsic) @@ -2316,16 +2359,15 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, case 2: { - retNode = isScalar - ? gtNewScalarHWIntrinsicNode(nodeRetType, op1, op2, intrinsic) - : gtNewSimdHWIntrinsicNode(nodeRetType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = isScalar ? gtNewScalarHWIntrinsicNode(nodeRetType, op1, op2, intrinsic) + : gtNewSimdHWIntrinsicNode(nodeRetType, op1, op2, intrinsic, simdBaseType, simdSize); #ifdef TARGET_XARCH if ((intrinsic == NI_X86Base_Crc32) || (intrinsic == NI_X86Base_X64_Crc32)) { // TODO-XArch-Cleanup: currently we use the simdBaseJitType to bring the type of the second argument // to the code generator. May encode the overload info in other way. - retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType); + retNode->AsHWIntrinsic()->SetSimdBaseType(sigReader.GetOp2TypeAsPrecise()); } #elif defined(TARGET_ARM64) switch (intrinsic) @@ -2334,7 +2376,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, case NI_Crc32_ComputeCrc32C: case NI_Crc32_Arm64_ComputeCrc32: case NI_Crc32_Arm64_ComputeCrc32C: - retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType); + retNode->AsHWIntrinsic()->SetSimdBaseType(sigReader.GetOp2TypeAsPrecise()); break; case NI_AdvSimd_AddWideningUpper: @@ -2351,12 +2393,12 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, case NI_ArmBase_Arm64_MultiplyHigh: if (sig->retType == CORINFO_TYPE_ULONG) { - retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_ULONG); + retNode->AsHWIntrinsic()->SetSimdBaseType(TYP_ULONG); } else { assert(sig->retType == CORINFO_TYPE_LONG); - retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_LONG); + retNode->AsHWIntrinsic()->SetSimdBaseType(TYP_LONG); } break; @@ -2411,9 +2453,9 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, op3 = addRangeCheckIfNeeded(intrinsic, op3, immLowerBound, immUpperBound); } - retNode = isScalar ? gtNewScalarHWIntrinsicNode(nodeRetType, op1, op2, op3, intrinsic) - : gtNewSimdHWIntrinsicNode(nodeRetType, op1, op2, op3, intrinsic, simdBaseJitType, - simdSize); + retNode = isScalar + ? gtNewScalarHWIntrinsicNode(nodeRetType, op1, op2, op3, intrinsic) + : gtNewSimdHWIntrinsicNode(nodeRetType, op1, op2, op3, intrinsic, simdBaseType, simdSize); switch (intrinsic) { @@ -2468,8 +2510,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, case 4: { assert(!isScalar); - retNode = - gtNewSimdHWIntrinsicNode(nodeRetType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(nodeRetType, op1, op2, op3, op4, intrinsic, simdBaseType, simdSize); switch (intrinsic) { @@ -2496,7 +2537,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, } else { - retNode = impSpecialIntrinsic(intrinsic, clsHnd, method, sig R2RARG(entryPoint), simdBaseJitType, nodeRetType, + retNode = impSpecialIntrinsic(intrinsic, clsHnd, method, sig R2RARG(entryPoint), simdBaseType, nodeRetType, simdSize, mustExpand); #if defined(FEATURE_MASKED_HW_INTRINSICS) && defined(TARGET_ARM64) @@ -2525,7 +2566,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, { // HWInstrinsic requires a mask for op3 GenTree*& op = retNode->AsHWIntrinsic()->Op(3); - op = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op, simdBaseJitType, simdSize); + op = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op, simdBaseType, simdSize); FALLTHROUGH; } case NI_Sve_CreateBreakAfterMask: @@ -2539,14 +2580,14 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, { // HWInstrinsic requires a mask for op2 GenTree*& op = retNode->AsHWIntrinsic()->Op(2); - op = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op, simdBaseJitType, simdSize); + op = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op, simdBaseType, simdSize); FALLTHROUGH; } default: { // HWInstrinsic requires a mask for op1 GenTree*& op = retNode->AsHWIntrinsic()->Op(1); - op = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op, simdBaseJitType, simdSize); + op = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op, simdBaseType, simdSize); break; } } @@ -2568,8 +2609,8 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, { GenTree*& op1 = retNode->AsHWIntrinsic()->Op(1); GenTree*& op2 = retNode->AsHWIntrinsic()->Op(2); - op1 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op1, simdBaseJitType, simdSize); - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); + op1 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op1, simdBaseType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); break; } @@ -2581,7 +2622,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, if (nodeRetType == TYP_MASK) { // HWInstrinsic returns a mask, but all returns must be vectors, so convert mask to vector. - retNode = gtNewSimdCvtMaskToVectorNode(retType, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtMaskToVectorNode(retType, retNode, simdBaseType, simdSize); } #endif // FEATURE_MASKED_HW_INTRINSICS && TARGET_ARM64 diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index 7e73e3e2fee48c..9e94fb991a6aa1 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -262,7 +262,7 @@ void Compiler::getHWIntrinsicImmTypes(NamedIntrinsic intrinsic, case 2: { CORINFO_CLASS_HANDLE typeHnd = info.compCompHnd->getArgClass(sig, immArg); - getBaseJitTypeAndSizeOfSIMDType(typeHnd, immSimdSize); + getBaseTypeAndSizeOfSIMDType(typeHnd, immSimdSize); break; } default: @@ -273,12 +273,12 @@ void Compiler::getHWIntrinsicImmTypes(NamedIntrinsic intrinsic, { if (immNumber == 2) { - CORINFO_ARG_LIST_HANDLE immArg = sig->args; - immArg = info.compCompHnd->getArgNext(immArg); - immArg = info.compCompHnd->getArgNext(immArg); - CORINFO_CLASS_HANDLE typeHnd = info.compCompHnd->getArgClass(sig, immArg); - CorInfoType otherBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, immSimdSize); - *immSimdBaseType = JitType2PreciseVarType(otherBaseJitType); + CORINFO_ARG_LIST_HANDLE immArg = sig->args; + immArg = info.compCompHnd->getArgNext(immArg); + immArg = info.compCompHnd->getArgNext(immArg); + CORINFO_CLASS_HANDLE typeHnd = info.compCompHnd->getArgClass(sig, immArg); + var_types otherBaseType = getBaseTypeAndSizeOfSIMDType(typeHnd, immSimdSize); + *immSimdBaseType = otherBaseType; } // For imm1 use default simd sizes. } @@ -554,12 +554,12 @@ void HWIntrinsicInfo::lookupImmBounds( // Arguments: // intrinsic -- intrinsic ID // simdType -- Vector type -// simdBaseJitType -- base JIT type of the Vector64/128 +// simdBaseType -- base type of the Vector64/128 // // Return Value: // return the IR of semantic alternative on non-const imm-arg // -GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType) +GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types simdBaseType) { bool isRightShift = true; @@ -612,8 +612,8 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT unreached(); } - GenTree* tmpOp = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, genTypeSize(simdType)); - return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, fallbackIntrinsic, simdBaseJitType, + GenTree* tmpOp = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseType, genTypeSize(simdType)); + return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, fallbackIntrinsic, simdBaseType, genTypeSize(simdType)); } @@ -631,7 +631,7 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call. // entryPoint -- The entry point information required for R2R scenarios -// simdBaseJitType -- generic argument of the intrinsic. +// simdBaseType -- generic argument of the intrinsic. // retType -- return type of the intrinsic. // mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false // @@ -642,7 +642,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig R2RARG(CORINFO_CONST_LOOKUP* entryPoint), - CorInfoType simdBaseJitType, + var_types simdBaseType, var_types retType, unsigned simdSize, bool mustExpand) @@ -664,7 +664,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, bool isScalar = (category == HW_Category_Scalar); assert(numArgs >= 0); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); GenTree* retNode = nullptr; @@ -690,7 +689,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdAbsNode(retType, op1, simdBaseType, simdSize); break; } @@ -702,7 +701,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseType, simdSize); break; } @@ -716,7 +715,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseType, simdSize); } else { @@ -727,7 +726,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_AdvSimd_AddSaturateScalar; } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } break; } @@ -746,8 +745,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - op2 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op2, simdBaseJitType, simdSize)); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); + op2 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op2, simdBaseType, simdSize)); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseType, simdSize); break; } @@ -763,8 +762,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - op2 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op2, simdBaseJitType, simdSize)); - retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize); + op2 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op2, simdBaseType, simdSize)); + retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseType, simdSize); break; } @@ -817,7 +816,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(retType == TYP_SIMD8); op1 = impSIMDPopStack(); - retNode = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseType, simdSize); break; } @@ -828,7 +827,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(retType == TYP_SIMD12); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); break; } @@ -857,15 +856,15 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, return vecCon; } - op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, 8); + op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector64_ToVector128Unsafe, simdBaseType, 8); GenTree* idx = gtNewIconNode(2, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); + op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseType, 16); idx = gtNewIconNode(3, TYP_INT); zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseType, 16); break; } @@ -885,11 +884,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, return vecCon; } - op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseJitType, 12); + op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseType, 12); GenTree* idx = gtNewIconNode(3, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseType, 16); break; } @@ -918,11 +917,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(retType == TYP_SIMD16); - assert(simdBaseJitType == CORINFO_TYPE_FLOAT); + assert(simdBaseType == TYP_FLOAT); assert((simdSize == 8) || (simdSize == 12)); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseType, simdSize); break; } @@ -934,7 +933,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseType, simdSize); break; } @@ -946,7 +945,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseType, simdSize); break; } @@ -962,7 +961,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCeilNode(retType, op1, simdBaseType, simdSize); break; } @@ -975,7 +974,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize); + retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseType, simdSize); break; } @@ -988,7 +987,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToDoubleScalar : NI_AdvSimd_Arm64_ConvertToDouble; op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); break; } @@ -1009,7 +1008,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_INT, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_INT, simdBaseType, simdSize); break; } @@ -1030,7 +1029,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_DOUBLE); op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_LONG, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_LONG, simdBaseType, simdSize); break; } @@ -1041,7 +1040,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseType, simdSize); break; } @@ -1062,7 +1061,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_UINT, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_UINT, simdBaseType, simdSize); break; } @@ -1083,7 +1082,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_DOUBLE); op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_ULONG, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_ULONG, simdBaseType, simdSize); break; } @@ -1093,7 +1092,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (sig->numArgs == 1) { op1 = impPopStack().val; - retNode = gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateBroadcastNode(retType, op1, simdBaseType, simdSize); break; } @@ -1259,8 +1258,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } else { - retNode = - gtNewSimdHWIntrinsicNode(retType, std::move(nodeBuilder), intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, std::move(nodeBuilder), intrinsic, simdBaseType, simdSize); } break; } @@ -1271,7 +1269,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impPopStack().val; - retNode = gtNewSimdCreateScalarNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateScalarNode(retType, op1, simdBaseType, simdSize); break; } @@ -1292,7 +1290,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impPopStack().val; - retNode = gtNewSimdCreateSequenceNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateSequenceNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -1302,7 +1300,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impPopStack().val; - retNode = gtNewSimdCreateScalarUnsafeNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateScalarUnsafeNode(retType, op1, simdBaseType, simdSize); break; } @@ -1328,7 +1326,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1344,8 +1342,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdDotProdNode(simdType, op1, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdGetElementNode(retType, retNode, gtNewIconNode(0), simdBaseJitType, simdSize); + retNode = gtNewSimdDotProdNode(simdType, op1, op2, simdBaseType, simdSize); + retNode = gtNewSimdGetElementNode(retType, retNode, gtNewIconNode(0), simdBaseType, simdSize); } break; } @@ -1358,7 +1356,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1370,7 +1368,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1382,7 +1380,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1391,7 +1389,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); break; } @@ -1407,7 +1405,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdFloorNode(retType, op1, simdBaseType, simdSize); break; } @@ -1427,7 +1425,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseJitType, simdSize); + retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseType, simdSize); break; } @@ -1477,7 +1475,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector128_get_Indices: { assert(sig->numArgs == 0); - retNode = gtNewSimdGetIndicesNode(retType, simdBaseJitType, simdSize); + retNode = gtNewSimdGetIndicesNode(retType, simdBaseType, simdSize); break; } @@ -1628,7 +1626,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -1637,7 +1635,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseType, simdSize); break; } @@ -1646,7 +1644,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseType, simdSize); break; } @@ -1658,7 +1656,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1670,7 +1668,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1682,7 +1680,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1694,7 +1692,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1706,7 +1704,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1718,7 +1716,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1735,7 +1733,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdIsEvenIntegerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsEvenIntegerNode(retType, op1, simdBaseType, simdSize); break; } @@ -1744,7 +1742,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsFiniteNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsFiniteNode(retType, op1, simdBaseType, simdSize); break; } @@ -1753,7 +1751,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsInfinityNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsInfinityNode(retType, op1, simdBaseType, simdSize); break; } @@ -1762,7 +1760,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsIntegerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsIntegerNode(retType, op1, simdBaseType, simdSize); break; } @@ -1771,7 +1769,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNaNNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNaNNode(retType, op1, simdBaseType, simdSize); break; } @@ -1780,7 +1778,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNegativeNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNegativeNode(retType, op1, simdBaseType, simdSize); break; } @@ -1789,7 +1787,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNegativeInfinityNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNegativeInfinityNode(retType, op1, simdBaseType, simdSize); break; } @@ -1798,7 +1796,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNormalNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNormalNode(retType, op1, simdBaseType, simdSize); break; } @@ -1815,7 +1813,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdIsOddIntegerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsOddIntegerNode(retType, op1, simdBaseType, simdSize); break; } @@ -1824,7 +1822,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsPositiveNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsPositiveNode(retType, op1, simdBaseType, simdSize); break; } @@ -1833,7 +1831,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsPositiveInfinityNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsPositiveInfinityNode(retType, op1, simdBaseType, simdSize); break; } @@ -1842,7 +1840,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsSubnormalNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsSubnormalNode(retType, op1, simdBaseType, simdSize); break; } @@ -1851,7 +1849,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsZeroNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsZeroNode(retType, op1, simdBaseType, simdSize); break; } @@ -1863,7 +1861,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1875,7 +1873,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1887,7 +1885,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1899,7 +1897,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1911,7 +1909,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1923,7 +1921,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1956,7 +1954,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); } - retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdLoadNode(retType, op1, simdBaseType, simdSize); break; } @@ -1981,7 +1979,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseType, simdSize); break; } @@ -2006,7 +2004,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseType, simdSize); break; } @@ -2111,7 +2109,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2140,12 +2138,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseJitType, simdSize); + retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseType, simdSize); } else { - GenTree* mulNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdBinOpNode(GT_ADD, retType, mulNode, op3, simdBaseJitType, simdSize); + GenTree* mulNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseType, simdSize); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, mulNode, op3, simdBaseType, simdSize); } break; } @@ -2158,7 +2156,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -2172,25 +2170,25 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseType, simdSize); } else if (simdSize == 16) { intrinsic = NI_AdvSimd_ExtractNarrowingSaturateLower; - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, intrinsic, simdBaseJitType, 8); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, intrinsic, simdBaseType, 8); intrinsic = NI_AdvSimd_ExtractNarrowingSaturateUpper; - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } else { intrinsic = NI_Vector64_ToVector128Unsafe; - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseType, simdSize); - op1 = gtNewSimdWithUpperNode(TYP_SIMD16, op1, op2, simdBaseJitType, 16); + op1 = gtNewSimdWithUpperNode(TYP_SIMD16, op1, op2, simdBaseType, 16); intrinsic = NI_AdvSimd_ExtractNarrowingSaturateLower; - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); } break; } @@ -2200,7 +2198,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseType, simdSize); break; } @@ -2209,7 +2207,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseType, simdSize); break; } @@ -2221,7 +2219,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2241,7 +2239,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2253,7 +2251,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2266,7 +2264,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2278,7 +2276,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2297,7 +2295,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdRoundNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdRoundNode(retType, op1, simdBaseType, simdSize); break; } @@ -2326,7 +2324,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_AdvSimd_ShiftLogical; } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -2370,7 +2368,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetMethodHandle(this, method R2RARG(*entryPoint)); break; @@ -2381,7 +2379,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize, isShuffleNative); + retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseType, simdSize, isShuffleNative); } break; } @@ -2394,7 +2392,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdSqrtNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2435,7 +2433,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } else { @@ -2454,7 +2452,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreNode(op1, op2, simdBaseType, simdSize); } break; } @@ -2496,7 +2494,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreNode(op2, op1, simdBaseType, simdSize); break; } @@ -2526,7 +2524,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseType, simdSize); break; } @@ -2556,7 +2554,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseType, simdSize); break; } @@ -2600,7 +2598,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = simdSize == 8 ? NI_AdvSimd_StoreVectorAndZip : NI_AdvSimd_Arm64_StoreVectorAndZip; info.compNeedsConsecutiveRegisters = true; - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -2648,7 +2646,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { // While storing from a single vector, both Vector128 and Vector64 API calls are in AdvSimd class. // Thus, we get simdSize as 8 for both of the calls. We re-calculate that simd size for such API calls. - getBaseJitTypeAndSizeOfSIMDType(argClass, &simdSize); + getBaseTypeAndSizeOfSIMDType(argClass, &simdSize); } assert(HWIntrinsicInfo::isImmOp(intrinsic, op3)); @@ -2667,7 +2665,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -2681,7 +2679,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseType, simdSize); } else { @@ -2692,7 +2690,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_AdvSimd_SubtractSaturateScalar; } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } break; } @@ -2702,7 +2700,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdSumNode(retType, op1, simdBaseType, simdSize); break; } @@ -2718,7 +2716,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdTruncNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdTruncNode(retType, op1, simdBaseType, simdSize); break; } @@ -2729,7 +2727,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseType, simdSize); break; } @@ -2740,7 +2738,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseType, simdSize); break; } @@ -2763,7 +2761,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetMethodHandle(this, method R2RARG(*entryPoint)); break; @@ -2782,7 +2780,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, impPopStack(); // pop the indexOp that we already have. GenTree* vectorOp = impSIMDPopStack(); - retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize); + retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseType, simdSize); break; } @@ -2792,7 +2790,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -2802,7 +2800,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -2814,7 +2812,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2859,7 +2857,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); - op1 = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); retNode = impStoreMultiRegValueToVar(op1, sig->retTypeSigClass DEBUGARG(CorInfoCallConvExtension::Managed)); break; } @@ -2910,7 +2908,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } // Was not able to generate a pattern, instead import a truemaskall - retNode = gtNewSimdHWIntrinsicNode(TYP_MASK, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(TYP_MASK, op1, intrinsic, simdBaseType, simdSize); break; } @@ -2938,7 +2936,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); assert(HWIntrinsicInfo::IsExplicitMaskedOperation(intrinsic)); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -2996,7 +2994,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = gtConvertParamOpToFieldList(op1, fieldCount, argClass); - op1 = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); retNode = impStoreMultiRegValueToVar(op1, sig->retTypeSigClass DEBUGARG(CorInfoCallConvExtension::Managed)); break; } @@ -3035,7 +3033,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(varTypeIsSIMD(op1->TypeGet())); } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } case NI_AdvSimd_VectorTableLookupExtension: @@ -3075,7 +3073,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(varTypeIsSIMD(op1->TypeGet())); } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -3129,7 +3127,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = getArgForHWIntrinsic(argType, argClass); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -3141,7 +3139,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_ARG_LIST_HANDLE arg = sig->args; arg = info.compCompHnd->getArgNext(arg); CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg); - CorInfoType ptrType = getBaseJitTypeAndSizeOfSIMDType(argClass); + CorInfoType ptrType = getBaseJitTypeOfSIMDType(argClass); CORINFO_CLASS_HANDLE tmpClass = NO_CLASS_HANDLE; // The size of narrowed target elements is determined from the second argument of StoreNarrowing(). @@ -3151,12 +3149,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, ptrType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)); assert(ptrType == CORINFO_TYPE_PTR); ptrType = info.compCompHnd->getChildType(argClass, &tmpClass); - assert(ptrType < simdBaseJitType); + assert(JitType2PreciseVarType(ptrType) < simdBaseType); op3 = impPopStack().val; op2 = impPopStack().val; op1 = impPopStack().val; - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(ptrType); break; } @@ -3207,9 +3205,9 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op3 = addRangeCheckIfNeeded(intrinsic, op3, immLowerBound, immUpperBound); retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic) - : gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + : gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); - retNode->AsHWIntrinsic()->SetSimdBaseJitType(simdBaseJitType); + retNode->AsHWIntrinsic()->SetSimdBaseType(simdBaseType); break; } @@ -3233,12 +3231,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // HWInstrinsic requires a mask for op2 if (!varTypeIsMask(op2)) { - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); - retNode->AsHWIntrinsic()->SetSimdBaseJitType(simdBaseJitType); + retNode->AsHWIntrinsic()->SetSimdBaseType(simdBaseType); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op1BaseJitType); break; } @@ -3291,7 +3289,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(varTypeIsIntegral(op2->TypeGet())); } #endif - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op2BaseJitType); } else @@ -3312,7 +3310,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impPopStack().val; assert(varTypeIsSIMD(op3->TypeGet())); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op3BaseJitType); } @@ -3343,7 +3341,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, retNode = gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic); - retNode->AsHWIntrinsic()->SetSimdBaseJitType(simdBaseJitType); + retNode->AsHWIntrinsic()->SetSimdBaseType(simdBaseType); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op2BaseJitType); break; } @@ -3370,7 +3368,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, retNode = gtNewScalarHWIntrinsicNode(retType, op1, op2, intrinsic); - retNode->AsHWIntrinsic()->SetSimdBaseJitType(simdBaseJitType); + retNode->AsHWIntrinsic()->SetSimdBaseType(simdBaseType); break; } @@ -3418,7 +3416,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, SetOpLclRelatedToSIMDIntrinsic(op4); SetOpLclRelatedToSIMDIntrinsic(op5); retNode = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(retType, getAllocator(CMK_ASTNode), intrinsic, - simdBaseJitType, simdSize, op1, op2, op3, op4, op5); + simdBaseType, simdSize, op1, op2, op3, op4, op5); break; } @@ -3433,7 +3431,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, var_types argType1 = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); var_types argType2 = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); CorInfoType op1BaseJitType = getBaseJitTypeOfSIMDType(argClass); op2 = impPopStack().val; @@ -3445,7 +3442,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, unsigned fieldCount = info.compCompHnd->getClassNumInstanceFields(argClass); op1 = gtConvertTableOpToFieldList(op1, fieldCount); } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op1BaseJitType); break; } @@ -3467,8 +3464,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impPopStack().val; CorInfoType op1BaseJitType = getBaseJitTypeOfSIMDType(argClass); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); - retNode->AsHWIntrinsic()->SetSimdBaseJitType(simdBaseJitType); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); + retNode->AsHWIntrinsic()->SetSimdBaseType(simdBaseType); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op1BaseJitType); break; } @@ -3495,11 +3492,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (isNative) { assert(!isMagnitude && !isNumber); - retNode = gtNewSimdMinMaxNativeNode(retType, op1, op2, simdBaseJitType, simdSize, isMax); + retNode = gtNewSimdMinMaxNativeNode(retType, op1, op2, simdBaseType, simdSize, isMax); } else { - retNode = gtNewSimdMinMaxNode(retType, op1, op2, simdBaseJitType, simdSize, isMax, isMagnitude, isNumber); + retNode = gtNewSimdMinMaxNode(retType, op1, op2, simdBaseType, simdSize, isMax, isMagnitude, isNumber); } } @@ -3512,17 +3509,16 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // gtNewSimdAllTrueMaskNode: Create a mask with all bits set to true // // Arguments: -// simdBaseJitType -- the base jit type of the nodes being masked +// simdBaseType -- the base type of the nodes being masked // // Return Value: // The mask // -GenTree* Compiler::gtNewSimdAllTrueMaskNode(CorInfoType simdBaseJitType) +GenTree* Compiler::gtNewSimdAllTrueMaskNode(var_types simdBaseType) { // Import as a constant mask - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); - GenTreeMskCon* mskCon = gtNewMskConNode(TYP_MASK); + GenTreeMskCon* mskCon = gtNewMskConNode(TYP_MASK); // TODO-SVE: For agnostic VL, vector type may not be simd16_t diff --git a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp index 978c24c19551c6..8f81cdd88fe5e5 100644 --- a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp @@ -1883,7 +1883,7 @@ void CodeGen::genBaseIntrinsic(GenTreeHWIntrinsic* node, insOpts instOptions) if (op1->OperIsLong()) { - node->SetSimdBaseJitType(CORINFO_TYPE_INT); + node->SetSimdBaseType(TYP_INT); bool canCombineLoad = false; GenTree* loPart = op1->gtGetOp1(); diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index e897198470c177..93c75e263682db 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -1127,7 +1127,7 @@ int HWIntrinsicInfo::lookupIval(Compiler* comp, NamedIntrinsic id, var_types sim // Return Value: // return the IR of semantic alternative on non-const imm-arg // -GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType) +GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types simdBaseType) { assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic) || HWIntrinsicInfo::MaybeNoJmpTableImm(intrinsic)); switch (intrinsic) @@ -1149,15 +1149,13 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT GenTree* op2 = impPopStack().val; GenTree* op1 = impSIMDPopStack(); - GenTree* tmpOp = gtNewSimdCreateScalarNode(TYP_SIMD16, op2, CORINFO_TYPE_INT, 16); - return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, simdBaseJitType, genTypeSize(simdType)); + GenTree* tmpOp = gtNewSimdCreateScalarNode(TYP_SIMD16, op2, TYP_INT, 16); + return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, simdBaseType, genTypeSize(simdType)); } case NI_AVX512_RotateLeft: case NI_AVX512_RotateRight: { - var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); - // These intrinsics have variants that take op2 in a simd register and read a unique shift per element intrinsic = static_cast(intrinsic + 1); @@ -1174,8 +1172,8 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT op2 = gtNewCastNode(TYP_LONG, op2, /* fromUnsigned */ true, TYP_LONG); } - GenTree* tmpOp = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, genTypeSize(simdType)); - return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, simdBaseJitType, genTypeSize(simdType)); + GenTree* tmpOp = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseType, genTypeSize(simdType)); + return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, simdBaseType, genTypeSize(simdType)); } default: @@ -1212,7 +1210,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig R2RARG(CORINFO_CONST_LOOKUP* entryPoint), - CorInfoType simdBaseJitType, + var_types simdBaseType, var_types retType, unsigned simdSize, bool mustExpand) @@ -1225,10 +1223,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); - var_types simdBaseType = TYP_UNKNOWN; if (simdSize != 0) { - simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); } @@ -1257,7 +1253,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdAbsNode(retType, op1, simdBaseType, simdSize); } break; } @@ -1274,7 +1270,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -1285,7 +1281,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { intrinsic = NI_AVX2_AndNotVector; simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig); - simdBaseType = JitType2PreciseVarType(simdBaseJitType); compFloatingPointUsed = true; } else @@ -1302,7 +1297,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 2); - if (simdBaseType != TYP_UNKNOWN) + if (simdSize != 0) { // We don't want to support creating AND_NOT nodes prior to LIR // as it can break important optimizations. We'll produces this @@ -1313,8 +1308,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - op1 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize)); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); + op1 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseType, simdSize)); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseType, simdSize); } else { @@ -1344,7 +1339,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseType, simdSize); } else if (varTypeIsSmall(simdBaseType)) { @@ -1362,7 +1357,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_X86Base_AddSaturate; } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } else if (varTypeIsUnsigned(simdBaseType)) { @@ -1372,11 +1367,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* cns = gtNewAllBitsSetConNode(retType); GenTree* op1Dup1 = fgMakeMultiUse(&op1); - GenTree* tmp = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); + GenTree* tmp = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseType, simdSize); GenTree* tmpDup1 = fgMakeMultiUse(&tmp); - GenTree* msk = gtNewSimdCmpOpNode(GT_LT, retType, tmp, op1Dup1, simdBaseJitType, simdSize); + GenTree* msk = gtNewSimdCmpOpNode(GT_LT, retType, tmp, op1Dup1, simdBaseType, simdSize); - retNode = gtNewSimdCndSelNode(retType, msk, cns, tmpDup1, simdBaseJitType, simdSize); + retNode = gtNewSimdCndSelNode(retType, msk, cns, tmpDup1, simdBaseType, simdSize); } else { @@ -1418,13 +1413,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* op1Dup1 = fgMakeMultiUse(&op1); GenTree* op2Dup1 = fgMakeMultiUse(&op2); - GenTree* tmp = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); + GenTree* tmp = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseType, simdSize); GenTree* tmpDup1 = fgMakeMultiUse(&tmp); GenTree* tmpDup2 = gtCloneExpr(tmpDup1); - GenTree* msk = gtNewSimdIsNegativeNode(retType, tmpDup1, simdBaseJitType, simdSize); - GenTree* ovf = gtNewSimdCndSelNode(retType, msk, maxCns, minCns, simdBaseJitType, simdSize); + GenTree* msk = gtNewSimdIsNegativeNode(retType, tmpDup1, simdBaseType, simdSize); + GenTree* ovf = gtNewSimdCndSelNode(retType, msk, maxCns, minCns, simdBaseType, simdSize); // The mask we need is ((a ^ b) & ~(b ^ c)) < 0 @@ -1437,21 +1432,20 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // 0x18 = A ? norBC : andBC // a ? ~(b | c) : (b & c) msk = gtNewSimdTernaryLogicNode(retType, tmp, op1Dup1, op2Dup1, gtNewIconNode(0x18), - simdBaseJitType, simdSize); + simdBaseType, simdSize); } else { GenTree* op1Dup2 = gtCloneExpr(op1Dup1); - GenTree* msk2 = gtNewSimdBinOpNode(GT_XOR, retType, tmp, op1Dup1, simdBaseJitType, simdSize); - GenTree* msk3 = - gtNewSimdBinOpNode(GT_XOR, retType, op1Dup2, op2Dup1, simdBaseJitType, simdSize); + GenTree* msk2 = gtNewSimdBinOpNode(GT_XOR, retType, tmp, op1Dup1, simdBaseType, simdSize); + GenTree* msk3 = gtNewSimdBinOpNode(GT_XOR, retType, op1Dup2, op2Dup1, simdBaseType, simdSize); - msk = gtNewSimdBinOpNode(GT_AND_NOT, retType, msk2, msk3, simdBaseJitType, simdSize); + msk = gtNewSimdBinOpNode(GT_AND_NOT, retType, msk2, msk3, simdBaseType, simdSize); } - msk = gtNewSimdIsNegativeNode(retType, msk, simdBaseJitType, simdSize); - retNode = gtNewSimdCndSelNode(retType, msk, ovf, tmpDup2, simdBaseJitType, simdSize); + msk = gtNewSimdIsNegativeNode(retType, msk, simdBaseType, simdSize); + retNode = gtNewSimdCndSelNode(retType, msk, ovf, tmpDup2, simdBaseType, simdSize); } } break; @@ -1471,8 +1465,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - op2 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op2, simdBaseJitType, simdSize)); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); + op2 = gtFoldExpr(gtNewSimdUnOpNode(GT_NOT, retType, op2, simdBaseType, simdSize)); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1537,7 +1531,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert((retType == TYP_SIMD8) || (retType == TYP_SIMD12)); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); break; } @@ -1547,10 +1541,10 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(retType == TYP_SIMD16); assert(HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic)); - CorInfoType op1SimdBaseJitType = - getBaseJitTypeAndSizeOfSIMDType(info.compCompHnd->getArgClass(sig, sig->args), &simdSize); + var_types op1SimdBaseType = + getBaseTypeAndSizeOfSIMDType(info.compCompHnd->getArgClass(sig, sig->args), &simdSize); - assert(simdBaseJitType == op1SimdBaseJitType); + assert(simdBaseType == op1SimdBaseType); switch (getSIMDTypeForSize(simdSize)) { @@ -1571,15 +1565,15 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, return vecCon; } - op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseJitType, 8); + op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseType, 8); GenTree* idx = gtNewIconNode(2, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); + op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseType, 16); idx = gtNewIconNode(3, TYP_INT); zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseType, 16); break; } @@ -1599,11 +1593,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, return vecCon; } - op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseJitType, 12); + op1 = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseType, 12); GenTree* idx = gtNewIconNode(3, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseType, 16); break; } @@ -1627,7 +1621,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = simdSize == YMM_REGSIZE_BYTES ? NI_Vector256_GetLower : NI_Vector512_GetLower128; op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); break; } @@ -1644,11 +1638,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(retType == TYP_SIMD16); - assert(simdBaseJitType == CORINFO_TYPE_FLOAT); + assert(simdBaseType == TYP_FLOAT); assert((simdSize == 8) || (simdSize == 12)); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_AsVector128Unsafe, simdBaseType, simdSize); break; } @@ -1758,7 +1752,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sizeFound); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, convertIntrinsic, simdBaseJitType, convertSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, convertIntrinsic, simdBaseType, convertSize); break; } @@ -1772,7 +1766,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1785,7 +1779,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseType, simdSize); break; } @@ -1802,7 +1796,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCeilNode(retType, op1, simdBaseType, simdSize); break; } @@ -1816,7 +1810,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize); + retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseType, simdSize); break; } @@ -1843,7 +1837,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_AVX512_ConvertToVector128Double; } op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); } break; } @@ -1856,7 +1850,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_INT, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNode(retType, op1, TYP_INT, simdBaseType, simdSize); break; } @@ -1873,7 +1867,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_INT, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_INT, simdBaseType, simdSize); break; } @@ -1887,7 +1881,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_LONG, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNode(retType, op1, TYP_LONG, simdBaseType, simdSize); } break; } @@ -1907,7 +1901,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_LONG, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_LONG, simdBaseType, simdSize); } break; } @@ -1956,7 +1950,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (intrinsic != NI_Illegal) { op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); } break; } @@ -1971,7 +1965,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_UINT, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNode(retType, op1, TYP_UINT, simdBaseType, simdSize); } break; } @@ -1991,7 +1985,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_UINT, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_UINT, simdBaseType, simdSize); } break; } @@ -2006,7 +2000,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_ULONG, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNode(retType, op1, TYP_ULONG, simdBaseType, simdSize); } break; } @@ -2026,7 +2020,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdCvtNativeNode(retType, op1, CORINFO_TYPE_ULONG, simdBaseJitType, simdSize); + retNode = gtNewSimdCvtNativeNode(retType, op1, TYP_ULONG, simdBaseType, simdSize); } break; } @@ -2038,7 +2032,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (sig->numArgs == 1) { op1 = impPopStack().val; - retNode = gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateBroadcastNode(retType, op1, simdBaseType, simdSize); break; } @@ -2201,8 +2195,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } else { - retNode = - gtNewSimdHWIntrinsicNode(retType, std::move(nodeBuilder), intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, std::move(nodeBuilder), intrinsic, simdBaseType, simdSize); } break; } @@ -2214,7 +2207,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impPopStack().val; - retNode = gtNewSimdCreateScalarNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateScalarNode(retType, op1, simdBaseType, simdSize); break; } @@ -2225,7 +2218,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impPopStack().val; - retNode = gtNewSimdCreateScalarUnsafeNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateScalarUnsafeNode(retType, op1, simdBaseType, simdSize); break; } @@ -2256,7 +2249,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impPopStack().val; - retNode = gtNewSimdCreateSequenceNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCreateSequenceNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -2292,7 +2285,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseType, simdSize); break; } @@ -2317,13 +2310,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize == 64) || varTypeIsByte(simdBaseType) || varTypeIsLong(simdBaseType)) { // The lowering for Dot doesn't handle these cases, so import as Sum(left * right) - retNode = gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdSumNode(retType, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseType, simdSize); + retNode = gtNewSimdSumNode(retType, retNode, simdBaseType, simdSize); break; } - retNode = gtNewSimdDotProdNode(simdType, op1, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdToScalarNode(retType, retNode, simdBaseJitType, simdSize); + retNode = gtNewSimdDotProdNode(simdType, op1, op2, simdBaseType, simdSize); + retNode = gtNewSimdToScalarNode(retType, retNode, simdBaseType, simdSize); break; } @@ -2339,7 +2332,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2356,7 +2349,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2372,7 +2365,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2393,8 +2386,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { op1 = impSIMDPopStack(); - op1 = gtFoldExpr(gtNewSimdCvtVectorToMaskNode(TYP_MASK, op1, simdBaseJitType, simdSize)); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); + op1 = gtFoldExpr(gtNewSimdCvtVectorToMaskNode(TYP_MASK, op1, simdBaseType, simdSize)); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize); break; } @@ -2428,7 +2421,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case TYP_UINT: case TYP_FLOAT: { - simdBaseJitType = CORINFO_TYPE_FLOAT; + simdBaseType = TYP_FLOAT; op1 = impSIMDPopStack(); moveMaskIntrinsic = (simdSize == 32) ? NI_AVX_MoveMask : NI_X86Base_MoveMask; break; @@ -2438,7 +2431,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case TYP_ULONG: case TYP_DOUBLE: { - simdBaseJitType = CORINFO_TYPE_DOUBLE; + simdBaseType = TYP_DOUBLE; op1 = impSIMDPopStack(); moveMaskIntrinsic = (simdSize == 32) ? NI_AVX_MoveMask : NI_X86Base_MoveMask; break; @@ -2453,7 +2446,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(moveMaskIntrinsic != NI_Illegal); assert(op1 != nullptr); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, moveMaskIntrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, moveMaskIntrinsic, simdBaseType, simdSize); } break; } @@ -2471,7 +2464,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdFloorNode(retType, op1, simdBaseType, simdSize); break; } @@ -2488,7 +2481,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseJitType, simdSize); + retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseType, simdSize); } break; } @@ -2543,7 +2536,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector512_get_Indices: { assert(sig->numArgs == 0); - retNode = gtNewSimdGetIndicesNode(retType, simdBaseJitType, simdSize); + retNode = gtNewSimdGetIndicesNode(retType, simdBaseType, simdSize); break; } @@ -2703,7 +2696,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -2719,7 +2712,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2735,7 +2728,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2751,7 +2744,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2768,7 +2761,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2784,7 +2777,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2800,7 +2793,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -2819,7 +2812,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdIsEvenIntegerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsEvenIntegerNode(retType, op1, simdBaseType, simdSize); break; } @@ -2832,7 +2825,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsFiniteNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsFiniteNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2846,7 +2839,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsInfinityNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsInfinityNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2863,7 +2856,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdIsIntegerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsIntegerNode(retType, op1, simdBaseType, simdSize); break; } @@ -2873,7 +2866,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNaNNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNaNNode(retType, op1, simdBaseType, simdSize); break; } @@ -2886,7 +2879,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNegativeNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNegativeNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2900,7 +2893,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNegativeInfinityNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNegativeInfinityNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2914,7 +2907,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsNormalNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsNormalNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2933,7 +2926,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdIsOddIntegerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsOddIntegerNode(retType, op1, simdBaseType, simdSize); break; } @@ -2946,7 +2939,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsPositiveNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsPositiveNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2960,7 +2953,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsPositiveInfinityNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsPositiveInfinityNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2974,7 +2967,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdIsSubnormalNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsSubnormalNode(retType, op1, simdBaseType, simdSize); } break; } @@ -2985,7 +2978,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdIsZeroNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdIsZeroNode(retType, op1, simdBaseType, simdSize); break; } @@ -3001,7 +2994,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3017,7 +3010,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3033,7 +3026,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3050,7 +3043,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3066,7 +3059,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3082,7 +3075,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3118,7 +3111,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); } - retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdLoadNode(retType, op1, simdBaseType, simdSize); break; } @@ -3136,7 +3129,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseType, simdSize); break; } @@ -3154,7 +3147,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseType, simdSize); break; } @@ -3277,7 +3270,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseType, simdSize); break; } @@ -3305,12 +3298,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType) && compExactlyDependsOn(InstructionSet_AVX2)) { - retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseJitType, simdSize); + retNode = gtNewSimdFmaNode(retType, op1, op2, op3, simdBaseType, simdSize); } else { - GenTree* mulNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdBinOpNode(GT_ADD, retType, mulNode, op3, simdBaseJitType, simdSize); + GenTree* mulNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseType, simdSize); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, mulNode, op3, simdBaseType, simdSize); } break; } @@ -3327,7 +3320,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3347,15 +3340,15 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (simdBaseType == TYP_DOUBLE) { // gtNewSimdNarrowNode uses the base type of the return for the simdBaseType - retNode = gtNewSimdNarrowNode(retType, op1, op2, CORINFO_TYPE_FLOAT, simdSize); + retNode = gtNewSimdNarrowNode(retType, op1, op2, TYP_FLOAT, simdSize); } else if ((simdSize == 16) && ((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_INT))) { // PackSignedSaturate uses the base type of the return for the simdBaseType - simdBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_BYTE : CORINFO_TYPE_SHORT; + simdBaseType = (simdBaseType == TYP_SHORT) ? TYP_BYTE : TYP_SHORT; intrinsic = NI_X86Base_PackSignedSaturate; - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } else if (compOpportunisticallyDependsOn(InstructionSet_AVX512)) { @@ -3365,8 +3358,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { intrinsic = NI_Vector256_ToVector512Unsafe; - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD64, op1, intrinsic, simdBaseJitType, simdSize); - op1 = gtNewSimdWithUpperNode(TYP_SIMD64, op1, op2, simdBaseJitType, simdSize * 2); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD64, op1, intrinsic, simdBaseType, simdSize); + op1 = gtNewSimdWithUpperNode(TYP_SIMD64, op1, op2, simdBaseType, simdSize * 2); } switch (simdBaseType) @@ -3418,8 +3411,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(simdSize == 16); intrinsic = NI_Vector128_ToVector256Unsafe; - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD32, op1, intrinsic, simdBaseJitType, simdSize); - op1 = gtNewSimdWithUpperNode(TYP_SIMD32, op1, op2, simdBaseJitType, simdSize * 2); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD32, op1, intrinsic, simdBaseType, simdSize); + op1 = gtNewSimdWithUpperNode(TYP_SIMD32, op1, op2, simdBaseType, simdSize * 2); switch (simdBaseType) { @@ -3456,20 +3449,20 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (simdSize == 64) { - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD32, op1, intrinsic, simdBaseJitType, simdSize); - op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD32, op2, intrinsic, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD32, op1, intrinsic, simdBaseType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD32, op2, intrinsic, simdBaseType, simdSize); - retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseType, simdSize); } else { - retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize * 2); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseType, simdSize * 2); } } else { // gtNewSimdNarrowNode uses the base type of the return for the simdBaseType - CorInfoType narrowSimdBaseJitType; + var_types narrowSimdBaseType; GenTreeVecCon* minCns = varTypeIsSigned(simdBaseType) ? gtNewVconNode(retType) : nullptr; GenTreeVecCon* maxCns = gtNewVconNode(retType); @@ -3481,14 +3474,14 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, minCns->EvaluateBroadcastInPlace(INT8_MIN); maxCns->EvaluateBroadcastInPlace(INT8_MAX); - narrowSimdBaseJitType = CORINFO_TYPE_BYTE; + narrowSimdBaseType = TYP_BYTE; break; } case TYP_USHORT: { maxCns->EvaluateBroadcastInPlace(UINT8_MAX); - narrowSimdBaseJitType = CORINFO_TYPE_UBYTE; + narrowSimdBaseType = TYP_UBYTE; break; } @@ -3497,14 +3490,14 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, minCns->EvaluateBroadcastInPlace(INT16_MIN); maxCns->EvaluateBroadcastInPlace(INT16_MAX); - narrowSimdBaseJitType = CORINFO_TYPE_SHORT; + narrowSimdBaseType = TYP_SHORT; break; } case TYP_UINT: { maxCns->EvaluateBroadcastInPlace(UINT16_MAX); - narrowSimdBaseJitType = CORINFO_TYPE_USHORT; + narrowSimdBaseType = TYP_USHORT; break; } @@ -3513,14 +3506,14 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, minCns->EvaluateBroadcastInPlace(INT32_MIN); maxCns->EvaluateBroadcastInPlace(INT32_MAX); - narrowSimdBaseJitType = CORINFO_TYPE_INT; + narrowSimdBaseType = TYP_INT; break; } case TYP_ULONG: { maxCns->EvaluateBroadcastInPlace(UINT32_MAX); - narrowSimdBaseJitType = CORINFO_TYPE_UINT; + narrowSimdBaseType = TYP_UINT; break; } @@ -3537,18 +3530,18 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (minCns != nullptr) { - op1 = gtNewSimdMinMaxNode(retType, op1, minCns, simdBaseJitType, simdSize, /* isMax */ true, + op1 = gtNewSimdMinMaxNode(retType, op1, minCns, simdBaseType, simdSize, /* isMax */ true, /* isMagnitude */ false, /* isNumber */ false); - op2 = gtNewSimdMinMaxNode(retType, op2, gtCloneExpr(minCns), simdBaseJitType, simdSize, + op2 = gtNewSimdMinMaxNode(retType, op2, gtCloneExpr(minCns), simdBaseType, simdSize, /* isMax */ true, /* isMagnitude */ false, /* isNumber */ false); } - op1 = gtNewSimdMinMaxNode(retType, op1, maxCns, simdBaseJitType, simdSize, /* isMax */ false, + op1 = gtNewSimdMinMaxNode(retType, op1, maxCns, simdBaseType, simdSize, /* isMax */ false, /* isMagnitude */ false, /* isNumber */ false); - op2 = gtNewSimdMinMaxNode(retType, op2, gtCloneExpr(maxCns), simdBaseJitType, simdSize, + op2 = gtNewSimdMinMaxNode(retType, op2, gtCloneExpr(maxCns), simdBaseType, simdSize, /* isMax */ false, /* isMagnitude */ false, /* isNumber */ false); - retNode = gtNewSimdNarrowNode(retType, op1, op2, narrowSimdBaseJitType, simdSize); + retNode = gtNewSimdNarrowNode(retType, op1, op2, narrowSimdBaseType, simdSize); } } break; @@ -3564,7 +3557,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, compOpportunisticallyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseType, simdSize); } break; } @@ -3575,7 +3568,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseType, simdSize); break; } @@ -3591,7 +3584,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3617,7 +3610,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3633,7 +3626,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3651,7 +3644,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3667,7 +3660,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseType, simdSize); } break; } @@ -3688,7 +3681,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdRoundNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdRoundNode(retType, op1, simdBaseType, simdSize); break; } @@ -3720,7 +3713,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_AVX2_ShiftLeftLogicalVariable; } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } break; } @@ -3772,7 +3765,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetMethodHandle(this, method R2RARG(*entryPoint)); break; @@ -3789,7 +3782,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize, isShuffleNative); + retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseType, simdSize, isShuffleNative); } break; } @@ -3803,7 +3796,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { op1 = impSIMDPopStack(); - retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdSqrtNode(retType, op1, simdBaseType, simdSize); } break; } @@ -3824,7 +3817,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreNode(op1, op2, simdBaseType, simdSize); break; } @@ -3866,7 +3859,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreNode(op2, op1, simdBaseType, simdSize); break; } @@ -3889,7 +3882,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseType, simdSize); break; } @@ -3912,7 +3905,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseType, simdSize); break; } @@ -3930,7 +3923,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseType, simdSize); } else if (varTypeIsSmall(simdBaseType)) { @@ -3948,7 +3941,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, intrinsic = NI_X86Base_SubtractSaturate; } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } else if (varTypeIsUnsigned(simdBaseType)) { @@ -3958,11 +3951,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* cns = gtNewZeroConNode(retType); GenTree* op1Dup1 = fgMakeMultiUse(&op1); - GenTree* tmp = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); + GenTree* tmp = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseType, simdSize); GenTree* tmpDup1 = fgMakeMultiUse(&tmp); - GenTree* msk = gtNewSimdCmpOpNode(GT_GT, retType, tmp, op1Dup1, simdBaseJitType, simdSize); + GenTree* msk = gtNewSimdCmpOpNode(GT_GT, retType, tmp, op1Dup1, simdBaseType, simdSize); - retNode = gtNewSimdCndSelNode(retType, msk, cns, tmpDup1, simdBaseJitType, simdSize); + retNode = gtNewSimdCndSelNode(retType, msk, cns, tmpDup1, simdBaseType, simdSize); } else { @@ -4004,13 +3997,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* op1Dup1 = fgMakeMultiUse(&op1); GenTree* op2Dup1 = fgMakeMultiUse(&op2); - GenTree* tmp = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); + GenTree* tmp = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseType, simdSize); GenTree* tmpDup1 = fgMakeMultiUse(&tmp); GenTree* tmpDup2 = gtCloneExpr(tmpDup1); - GenTree* msk = gtNewSimdIsNegativeNode(retType, tmpDup1, simdBaseJitType, simdSize); - GenTree* ovf = gtNewSimdCndSelNode(retType, msk, maxCns, minCns, simdBaseJitType, simdSize); + GenTree* msk = gtNewSimdIsNegativeNode(retType, tmpDup1, simdBaseType, simdSize); + GenTree* ovf = gtNewSimdCndSelNode(retType, msk, maxCns, minCns, simdBaseType, simdSize); // The mask we need is ((a ^ b) & (b ^ c)) < 0 @@ -4023,21 +4016,20 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // 0x18 = B ? norAC : andAC // b ? ~(a | c) : (a & c) msk = gtNewSimdTernaryLogicNode(retType, tmp, op1Dup1, op2Dup1, gtNewIconNode(0x24), - simdBaseJitType, simdSize); + simdBaseType, simdSize); } else { GenTree* op1Dup2 = gtCloneExpr(op1Dup1); - GenTree* msk2 = gtNewSimdBinOpNode(GT_XOR, retType, tmp, op1Dup1, simdBaseJitType, simdSize); - GenTree* msk3 = - gtNewSimdBinOpNode(GT_XOR, retType, op1Dup2, op2Dup1, simdBaseJitType, simdSize); + GenTree* msk2 = gtNewSimdBinOpNode(GT_XOR, retType, tmp, op1Dup1, simdBaseType, simdSize); + GenTree* msk3 = gtNewSimdBinOpNode(GT_XOR, retType, op1Dup2, op2Dup1, simdBaseType, simdSize); - msk = gtNewSimdBinOpNode(GT_AND, retType, msk2, msk3, simdBaseJitType, simdSize); + msk = gtNewSimdBinOpNode(GT_AND, retType, msk2, msk3, simdBaseType, simdSize); } - msk = gtNewSimdIsNegativeNode(retType, msk, simdBaseJitType, simdSize); - retNode = gtNewSimdCndSelNode(retType, msk, ovf, tmpDup2, simdBaseJitType, simdSize); + msk = gtNewSimdIsNegativeNode(retType, msk, simdBaseType, simdSize); + retNode = gtNewSimdCndSelNode(retType, msk, ovf, tmpDup2, simdBaseType, simdSize); } } break; @@ -4050,7 +4042,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdSumNode(retType, op1, simdBaseType, simdSize); break; } @@ -4061,7 +4053,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdToScalarNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdToScalarNode(retType, op1, simdBaseType, simdSize); break; } @@ -4078,7 +4070,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(); - retNode = gtNewSimdTruncNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdTruncNode(retType, op1, simdBaseType, simdSize); break; } @@ -4088,7 +4080,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(); - retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseType, simdSize); break; } @@ -4105,7 +4097,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); - retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseType, simdSize); } break; } @@ -4121,7 +4113,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { op1 = impSIMDPopStack(); - retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize); + retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseType, simdSize); } break; } @@ -4144,7 +4136,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* indexOp = impPopStack().val; GenTree* vectorOp = impSIMDPopStack(); - retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize); + retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseType, simdSize); break; } @@ -4155,7 +4147,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -4166,7 +4158,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseType, simdSize); break; } @@ -4179,7 +4171,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize); + retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseType, simdSize); break; } @@ -4200,7 +4192,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 3); assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); assert(retType == TYP_STRUCT); - assert(simdBaseJitType != CORINFO_TYPE_UNDEF); + assert(simdBaseType != TYP_UNDEF); op3 = impPopStack().val; op2 = impPopStack().val; @@ -4209,7 +4201,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTreeHWIntrinsic* divRemIntrinsic = gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic); // Store the type from signature into SIMD base type for convenience - divRemIntrinsic->SetSimdBaseJitType(simdBaseJitType); + divRemIntrinsic->SetSimdBaseType(simdBaseType); retNode = impStoreMultiRegValueToVar(divRemIntrinsic, sig->retTypeSigClass DEBUGARG(CorInfoCallConvExtension::Managed)); @@ -4233,7 +4225,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - assert(varTypeIsFloating(JitType2PreciseVarType(simdBaseJitType))); + assert(varTypeIsFloating(simdBaseType)); if (supportsAvx) { @@ -4242,7 +4234,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, int ival = HWIntrinsicInfo::lookupIval(this, intrinsic, simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(ival), NI_AVX_CompareScalar, - simdBaseJitType, simdSize); + simdBaseType, simdSize); } else { @@ -4282,9 +4274,9 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } } - retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_X86Base_MoveScalar, - simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_X86Base_MoveScalar, simdBaseType, + simdSize); } break; } @@ -4297,7 +4289,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); assert(JITtype2varType(sig->retType) == TYP_VOID); op1 = impPopStack().val; - retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, intrinsic, CORINFO_TYPE_UBYTE, 0); + retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, intrinsic, TYP_UBYTE, 0); break; } @@ -4325,11 +4317,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_ARG_LIST_HANDLE argList = info.compCompHnd->getArgNext(sig->args); CORINFO_CLASS_HANDLE argClass; - CorInfoType argJitType = strip(info.compCompHnd->getArgType(sig, argList, &argClass)); + var_types argType = JitType2PreciseVarType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op2 = impPopStack().val; op1 = impPopStack().val; - retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, NI_X86Base_StoreNonTemporal, argJitType, 0); + retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, NI_X86Base_StoreNonTemporal, argType, 0); break; } @@ -4344,7 +4336,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_AVX512v2_PermuteVar32x8: case NI_AVX512v2_PermuteVar64x8: { - simdBaseJitType = getBaseJitTypeOfSIMDType(sig->retTypeSigClass); + simdBaseType = getBaseTypeOfSIMDType(sig->retTypeSigClass); impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); @@ -4352,7 +4344,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* idxVector = impSIMDPopStack(); GenTree* srcVector = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, idxVector, srcVector, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, idxVector, srcVector, intrinsic, simdBaseType, simdSize); break; } @@ -4366,7 +4358,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseType, simdSize); if (!retNode->isRMWHWIntrinsic(this)) { @@ -4793,7 +4785,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // GT_AND_NOT takes them as `op1 & ~op2` and x86 reorders them back to `~op1 & op2` // since the underlying andnps/andnpd/pandn instructions take them as such - return gtNewSimdBinOpNode(GT_AND_NOT, retType, *val3, *val2, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_AND_NOT, retType, *val3, *val2, simdBaseType, simdSize); } else { @@ -4841,7 +4833,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(!unusedVal2); assert(!unusedVal3); - return gtNewSimdBinOpNode(GT_AND, retType, *val2, *val3, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_AND, retType, *val2, *val3, simdBaseType, simdSize); } case TernaryLogicOperKind::Nand: @@ -4883,7 +4875,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(!unusedVal2); assert(!unusedVal3); - return gtNewSimdBinOpNode(GT_OR, retType, *val2, *val3, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_OR, retType, *val2, *val3, simdBaseType, simdSize); } case TernaryLogicOperKind::Nor: @@ -4925,7 +4917,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(!unusedVal2); assert(!unusedVal3); - return gtNewSimdBinOpNode(GT_XOR, retType, *val2, *val3, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_XOR, retType, *val2, *val3, simdBaseType, simdSize); } case TernaryLogicOperKind::Xnor: @@ -4967,7 +4959,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } } - retNode = gtNewSimdTernaryLogicNode(retType, *val1, *val2, *val3, op4, simdBaseJitType, simdSize); + retNode = gtNewSimdTernaryLogicNode(retType, *val1, *val2, *val3, op4, simdBaseType, simdSize); break; } } @@ -4976,7 +4968,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdTernaryLogicNode(retType, op1, op2, op3, op4, simdBaseJitType, simdSize); + retNode = gtNewSimdTernaryLogicNode(retType, op1, op2, op3, op4, simdBaseType, simdSize); break; } @@ -5001,19 +4993,17 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsSigned(simdBaseType)) { - simdBaseJitType = CORINFO_TYPE_BYTE; - simdBaseType = TYP_BYTE; + simdBaseType = TYP_BYTE; } else { - simdBaseJitType = CORINFO_TYPE_UBYTE; - simdBaseType = TYP_UBYTE; + simdBaseType = TYP_UBYTE; } } intrinsic = NI_AVX512_BlendVariableMask; - op3 = gtFoldExpr(gtNewSimdCvtVectorToMaskNode(TYP_MASK, op3, simdBaseJitType, simdSize)); + op3 = gtFoldExpr(gtNewSimdCvtVectorToMaskNode(TYP_MASK, op3, simdBaseType, simdSize)); } - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5035,7 +5025,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5077,11 +5067,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (op3 == nullptr) { - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); } else { - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); } break; } @@ -5102,7 +5092,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5122,7 +5112,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5141,7 +5131,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5161,7 +5151,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5180,7 +5170,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5199,7 +5189,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5218,7 +5208,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5237,7 +5227,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5256,7 +5246,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5275,7 +5265,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5294,7 +5284,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5313,7 +5303,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseType, simdSize); break; } @@ -5327,8 +5317,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); intrinsic = NI_AVX512_CompressMask; - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5348,8 +5338,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } intrinsic = NI_AVX512_CompressStoreMask; - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5363,8 +5353,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(); intrinsic = NI_AVX512_ExpandMask; - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5392,7 +5382,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(op3BaseJitType); break; } @@ -5413,8 +5403,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } intrinsic = NI_AVX512_ExpandLoadMask; - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5434,8 +5424,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } intrinsic = (intrinsic == NI_AVX512_MaskLoad) ? NI_AVX512_MaskLoadMask : NI_AVX512_MaskLoadAlignedMask; - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5455,8 +5445,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } intrinsic = (intrinsic == NI_AVX512_MaskStore) ? NI_AVX512_MaskStoreMask : NI_AVX512_MaskStoreAlignedMask; - op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseJitType, simdSize); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); + op2 = gtNewSimdCvtVectorToMaskNode(TYP_MASK, op2, simdBaseType, simdSize); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseType, simdSize); break; } @@ -5467,7 +5457,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE argClass; var_types argType = TYP_UNKNOWN; unsigned int sizeBytes; - simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes); + simdBaseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes); var_types retType = getSIMDTypeForSize(sizeBytes); assert(sig->numArgs == 5); @@ -5498,7 +5488,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, SetOpLclRelatedToSIMDIntrinsic(op1); retNode = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(retType, getAllocator(CMK_ASTNode), intrinsic, - simdBaseJitType, simdSize, op1, op2, op3, op4, op5); + simdBaseType, simdSize, op1, op2, op3, op4, op5); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(indexBaseJitType); break; } @@ -5546,7 +5536,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { retType = getSIMDTypeForSize(simdSize); assert(retType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass))); - retNode = gtNewSimdCvtMaskToVectorNode(retType, gtFoldExpr(retNode), simdBaseJitType, simdSize); + retNode = gtNewSimdCvtMaskToVectorNode(retType, gtFoldExpr(retNode), simdBaseType, simdSize); } else if (isMinMaxIntrinsic) { @@ -5564,12 +5554,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (isNative) { assert(!isMagnitude && !isNumber); - retNode = gtNewSimdMinMaxNativeNode(retType, op1, op2, simdBaseJitType, simdSize, isMax); + retNode = gtNewSimdMinMaxNativeNode(retType, op1, op2, simdBaseType, simdSize, isMax); } else if ((simdSize != 32) || varTypeIsFloating(simdBaseType) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { - retNode = gtNewSimdMinMaxNode(retType, op1, op2, simdBaseJitType, simdSize, isMax, isMagnitude, isNumber); + retNode = gtNewSimdMinMaxNode(retType, op1, op2, simdBaseType, simdSize, isMax, isMagnitude, isNumber); } } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 5d67351639e840..c2ee4dd061d65e 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1169,7 +1169,7 @@ GenTree* Compiler::impGetNodeAddr(GenTree* val, unsigned curLevel, GenTreeFlags* // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. // -var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) +var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); @@ -1186,14 +1186,14 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoTyp if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; - CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); - if (simdBaseJitType != CORINFO_TYPE_UNDEF) + var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes); + if (simdBaseType != TYP_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { - *pSimdBaseJitType = simdBaseJitType; + *pSimdBaseJitType = simdBaseType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index f7915a7b98ef61..805caea284e050 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -4324,14 +4324,14 @@ GenTree* Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd, GenTree* op2 = impImplicitR4orR8Cast(impPopStack().val, callType); GenTree* op1 = impImplicitR4orR8Cast(impPopStack().val, callType); - op3 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op3, callJitType, 16); - op2 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op2, callJitType, 16); - op1 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, callJitType, 16); + op3 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op3, JitType2PreciseVarType(callJitType), 16); + op2 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op2, JitType2PreciseVarType(callJitType), 16); + op1 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, JitType2PreciseVarType(callJitType), 16); - retNode = - gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_AVX2_MultiplyAddScalar, callJitType, 16); + retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_AVX2_MultiplyAddScalar, + JitType2PreciseVarType(callJitType), 16); - retNode = gtNewSimdToScalarNode(callType, retNode, callJitType, 16); + retNode = gtNewSimdToScalarNode(callType, retNode, JitType2PreciseVarType(callJitType), 16); break; } #elif defined(TARGET_ARM64) @@ -4352,16 +4352,16 @@ GenTree* Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd, GenTree* op2 = impImplicitR4orR8Cast(impPopStack().val, callType); GenTree* op1 = impImplicitR4orR8Cast(impPopStack().val, callType); - op3 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, op3, callJitType, 8); - op2 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, op2, callJitType, 8); - op1 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, op1, callJitType, 8); + op3 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, op3, JitType2PreciseVarType(callJitType), 8); + op2 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, op2, JitType2PreciseVarType(callJitType), 8); + op1 = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, op1, JitType2PreciseVarType(callJitType), 8); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, - callJitType, 8); + JitType2PreciseVarType(callJitType), 8); - retNode = gtNewSimdToScalarNode(callType, retNode, callJitType, 8); + retNode = gtNewSimdToScalarNode(callType, retNode, JitType2PreciseVarType(callJitType), 8); break; #endif @@ -4968,11 +4968,13 @@ GenTree* Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd, if (isNative) { assert(!isMagnitude && !isNumber); - retNode = gtNewSimdMinMaxNativeNode(callType, op1, op2, callJitType, 0, isMax); + retNode = + gtNewSimdMinMaxNativeNode(callType, op1, op2, JitType2PreciseVarType(callJitType), 0, isMax); } else { - retNode = gtNewSimdMinMaxNode(callType, op1, op2, callJitType, 0, isMax, isMagnitude, isNumber); + retNode = gtNewSimdMinMaxNode(callType, op1, op2, JitType2PreciseVarType(callJitType), 0, isMax, + isMagnitude, isNumber); } #endif // FEATURE_HW_INTRINSICS @@ -5808,7 +5810,8 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, if (hwIntrinsicId != NI_Illegal) { op1 = impPopStack().val; - res = gtNewSimdHWIntrinsicNode(retType, op1, hwIntrinsicId, baseJitType, 16); + res = + gtNewSimdHWIntrinsicNode(retType, op1, hwIntrinsicId, JitType2PreciseVarType(baseJitType), 16); if (varTypeIsSmall(tgtType)) { @@ -5878,7 +5881,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, result = gtNewScalarHWIntrinsicNode(baseType, op1, op2, hwintrinsic); // We use the simdBaseJitType to bring the type of the second argument to codegen - result->AsHWIntrinsic()->SetSimdBaseJitType(baseJitType); + result->AsHWIntrinsic()->SetSimdBaseType(JitType2PreciseVarType(baseJitType)); #elif defined(TARGET_ARM64) if (compOpportunisticallyDependsOn(InstructionSet_Crc32)) { @@ -5890,7 +5893,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, baseType = TYP_INT; // We use the simdBaseJitType to bring the type of the second argument to codegen - result->AsHWIntrinsic()->SetSimdBaseJitType(baseJitType); + result->AsHWIntrinsic()->SetSimdBaseType(JitType2PreciseVarType(baseJitType)); } #endif // TARGET_* #endif // FEATURE_HW_INTRINSICS @@ -10023,19 +10026,21 @@ GenTree* Compiler::impEstimateIntrinsic(CORINFO_METHOD_HANDLE method, std::swap(op1, op3); } - op3 = gtNewSimdCreateScalarUnsafeNode(simdType, op3, callJitType, simdSize); - op2 = gtNewSimdCreateScalarUnsafeNode(simdType, op2, callJitType, simdSize); - op1 = gtNewSimdCreateScalarUnsafeNode(simdType, op1, callJitType, simdSize); + op3 = gtNewSimdCreateScalarUnsafeNode(simdType, op3, JitType2PreciseVarType(callJitType), simdSize); + op2 = gtNewSimdCreateScalarUnsafeNode(simdType, op2, JitType2PreciseVarType(callJitType), simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(simdType, op1, JitType2PreciseVarType(callJitType), simdSize); - op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, op3, intrinsicId, callJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, op3, intrinsicId, + JitType2PreciseVarType(callJitType), simdSize); break; } case 1: { assert(!swapOp1AndOp3); - op1 = gtNewSimdCreateScalarUnsafeNode(simdType, op1, callJitType, simdSize); - op1 = gtNewSimdHWIntrinsicNode(simdType, op1, intrinsicId, callJitType, simdSize); + op1 = gtNewSimdCreateScalarUnsafeNode(simdType, op1, JitType2PreciseVarType(callJitType), simdSize); + op1 = + gtNewSimdHWIntrinsicNode(simdType, op1, intrinsicId, JitType2PreciseVarType(callJitType), simdSize); break; } @@ -10045,7 +10050,7 @@ GenTree* Compiler::impEstimateIntrinsic(CORINFO_METHOD_HANDLE method, } } - return gtNewSimdToScalarNode(callType, op1, callJitType, simdSize); + return gtNewSimdToScalarNode(callType, op1, JitType2PreciseVarType(callJitType), simdSize); } assert(!swapOp1AndOp3); diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 02adf29fcf4beb..67ebf886dee705 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -98,13 +98,13 @@ GenTree* Compiler::impExpandHalfConstEquals( #ifdef FEATURE_HW_INTRINSICS if (varTypeIsSIMD(type)) { - return gtNewSimdBinOpNode(oper, type, op1, op2, CORINFO_TYPE_NATIVEUINT, genTypeSize(type)); + return gtNewSimdBinOpNode(oper, type, op1, op2, TYP_U_IMPL, genTypeSize(type)); } if (varTypeIsSIMD(op1)) { // E.g. a comparison of SIMD ops returning TYP_INT; assert(varTypeIsSIMD(op2)); - return gtNewSimdCmpOpAllNode(oper, type, op1, op2, CORINFO_TYPE_NATIVEUINT, genTypeSize(op1)); + return gtNewSimdCmpOpAllNode(oper, type, op1, op2, TYP_U_IMPL, genTypeSize(op1)); } #endif return gtNewOperNode(oper, type, op1, op2); diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 4bda0cfcb4c827..54fc502b84be88 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -1733,8 +1733,8 @@ class LocalAddressVisitor final : public GenTreeVisitor { // Handle case 1 or the float field of case 2 GenTree* indexNode = m_compiler->gtNewIconNode(offset / genTypeSize(elementType)); - hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode, - CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); + hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode, TYP_FLOAT, + genTypeSize(varDsc)); break; } @@ -1743,7 +1743,7 @@ class LocalAddressVisitor final : public GenTreeVisitor // Handle the Vector3 field of case 2 assert(genTypeSize(varDsc) == 16); hwiNode = m_compiler->gtNewSimdHWIntrinsicNode(elementType, lclNode, NI_Vector128_AsVector3, - CORINFO_TYPE_FLOAT, 16); + TYP_FLOAT, 16); break; } @@ -1757,14 +1757,14 @@ class LocalAddressVisitor final : public GenTreeVisitor assert(genTypeSize(elementType) * 2 == genTypeSize(varDsc)); if (offset == 0) { - hwiNode = m_compiler->gtNewSimdGetLowerNode(elementType, lclNode, CORINFO_TYPE_FLOAT, - genTypeSize(varDsc)); + hwiNode = + m_compiler->gtNewSimdGetLowerNode(elementType, lclNode, TYP_FLOAT, genTypeSize(varDsc)); } else { assert(offset == genTypeSize(elementType)); - hwiNode = m_compiler->gtNewSimdGetUpperNode(elementType, lclNode, CORINFO_TYPE_FLOAT, - genTypeSize(varDsc)); + hwiNode = + m_compiler->gtNewSimdGetUpperNode(elementType, lclNode, TYP_FLOAT, genTypeSize(varDsc)); } break; @@ -1791,9 +1791,8 @@ class LocalAddressVisitor final : public GenTreeVisitor { // Handle case 1 or the float field of case 2 GenTree* indexNode = m_compiler->gtNewIconNode(offset / genTypeSize(elementType)); - hwiNode = - m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode, elementNode, - CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); + hwiNode = m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode, + elementNode, TYP_FLOAT, genTypeSize(varDsc)); break; } @@ -1806,17 +1805,17 @@ class LocalAddressVisitor final : public GenTreeVisitor // simdLclNode[3] as the new value. This gives us a new TYP_SIMD16 with all elements in the // right spots - elementNode = m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, elementNode, - NI_Vector128_AsVector128Unsafe, - CORINFO_TYPE_FLOAT, 12); + elementNode = + m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, elementNode, + NI_Vector128_AsVector128Unsafe, TYP_FLOAT, 12); GenTree* indexNode1 = m_compiler->gtNewIconNode(3, TYP_INT); - simdLclNode = m_compiler->gtNewSimdGetElementNode(TYP_FLOAT, simdLclNode, indexNode1, - CORINFO_TYPE_FLOAT, 16); + simdLclNode = + m_compiler->gtNewSimdGetElementNode(TYP_FLOAT, simdLclNode, indexNode1, TYP_FLOAT, 16); GenTree* indexNode2 = m_compiler->gtNewIconNode(3, TYP_INT); hwiNode = m_compiler->gtNewSimdWithElementNode(TYP_SIMD16, elementNode, indexNode2, simdLclNode, - CORINFO_TYPE_FLOAT, 16); + TYP_FLOAT, 16); break; } @@ -1831,13 +1830,13 @@ class LocalAddressVisitor final : public GenTreeVisitor if (offset == 0) { hwiNode = m_compiler->gtNewSimdWithLowerNode(varDsc->TypeGet(), simdLclNode, elementNode, - CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); + TYP_FLOAT, genTypeSize(varDsc)); } else { assert(offset == genTypeSize(elementType)); hwiNode = m_compiler->gtNewSimdWithUpperNode(varDsc->TypeGet(), simdLclNode, elementNode, - CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); + TYP_FLOAT, genTypeSize(varDsc)); } break; diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 109afd011d8190..8ad010191ea8b8 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -1445,10 +1445,10 @@ var_types Compiler::StructPromotionHelper::TryPromoteValueClassAsPrimitive(CORIN #ifdef FEATURE_SIMD if (compiler->isRuntimeIntrinsicsNamespace(namespaceName) || compiler->isNumericsNamespace(namespaceName)) { - unsigned simdSize; - CorInfoType simdBaseJitType = compiler->getBaseJitTypeAndSizeOfSIMDType(node.simdTypeHnd, &simdSize); + unsigned simdSize; + var_types simdBaseType = compiler->getBaseTypeAndSizeOfSIMDType(node.simdTypeHnd, &simdSize); // We will only promote fields of SIMD types that fit into a SIMD register. - if (simdBaseJitType != CORINFO_TYPE_UNDEF) + if (simdBaseType != TYP_UNDEF) { if (compiler->structSizeMightRepresentSIMDType(simdSize)) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 25e5db40c20c6c..faebb8949310dc 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -2611,11 +2611,9 @@ bool Lowering::LowerCallMemcmp(GenTreeCall* call, GenTree** next) if (GenTree::OperIsCmpCompare(oper)) { assert(type == TYP_INT); - return comp->gtNewSimdCmpOpAllNode(oper, TYP_INT, op1, op2, CORINFO_TYPE_NATIVEUINT, - genTypeSize(op1)); + return comp->gtNewSimdCmpOpAllNode(oper, TYP_INT, op1, op2, TYP_U_IMPL, genTypeSize(op1)); } - return comp->gtNewSimdBinOpNode(oper, op1->TypeGet(), op1, op2, CORINFO_TYPE_NATIVEUINT, - genTypeSize(op1)); + return comp->gtNewSimdBinOpNode(oper, op1->TypeGet(), op1, op2, TYP_U_IMPL, genTypeSize(op1)); } #endif return comp->gtNewOperNode(oper, type, op1, op2); @@ -11751,14 +11749,14 @@ void Lowering::ContainCheckConditionalCompare(GenTreeCCMP* cmp) // Remarks: // If the created node is a vector constant, op1 will be removed from the block range // -GenTree* Lowering::InsertNewSimdCreateScalarUnsafeNode(var_types simdType, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize) +GenTree* Lowering::InsertNewSimdCreateScalarUnsafeNode(var_types simdType, + GenTree* op1, + var_types simdBaseType, + unsigned simdSize) { assert(varTypeIsSIMD(simdType)); - GenTree* result = comp->gtNewSimdCreateScalarUnsafeNode(simdType, op1, simdBaseJitType, simdSize); + GenTree* result = comp->gtNewSimdCreateScalarUnsafeNode(simdType, op1, simdBaseType, simdSize); BlockRange().InsertAfter(op1, result); if (result->IsCnsVec()) diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h index 30d98f6880d51b..e72977324cecc9 100644 --- a/src/coreclr/jit/lower.h +++ b/src/coreclr/jit/lower.h @@ -463,10 +463,10 @@ class Lowering final : public Phase bool TryLowerAddForPossibleContainment(GenTreeOp* node, GenTree** next); void StoreFFRValue(GenTreeHWIntrinsic* node); #endif // !TARGET_XARCH && !TARGET_ARM64 - GenTree* InsertNewSimdCreateScalarUnsafeNode(var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize); + GenTree* InsertNewSimdCreateScalarUnsafeNode(var_types type, + GenTree* op1, + var_types simdBaseType, + unsigned simdSize); GenTree* NormalizeIndexToNativeSized(GenTree* index); #endif // FEATURE_HW_INTRINSICS diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index a75814b38d3cc4..eb8ea9dee83a5e 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -1158,7 +1158,7 @@ GenTree* Lowering::LowerCnsMask(GenTreeMskCon* mask) BlockRange().InsertBefore(mask, vecCon); // Convert the vector constant to a mask - GenTree* convertedVec = comp->gtNewSimdCvtVectorToMaskNode(TYP_MASK, vecCon, CORINFO_TYPE_BYTE, 16); + GenTree* convertedVec = comp->gtNewSimdCvtVectorToMaskNode(TYP_MASK, vecCon, TYP_BYTE, 16); BlockRange().InsertBefore(mask, convertedVec->AsHWIntrinsic()->Op(1)); BlockRange().InsertBefore(mask, convertedVec); @@ -1691,10 +1691,9 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) if (isContainableMemory || !op2->OperIsConst()) { - unsigned simdSize = node->GetSimdSize(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + unsigned simdSize = node->GetSimdSize(); + var_types simdBaseType = node->GetSimdBaseTypeAsVarType(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); // We're either already loading from memory or we need to since // we don't know what actual index is going to be retrieved. @@ -1783,7 +1782,7 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) } // Finally we can indirect the memory address to get the actual value - GenTreeIndir* indir = comp->gtNewIndir(JITtype2varType(simdBaseJitType), addr); + GenTreeIndir* indir = comp->gtNewIndir(simdBaseType, addr); BlockRange().InsertBefore(node, indir); LIR::Use use; @@ -1854,7 +1853,7 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) BlockRange().InsertBefore(node, op3); LowerNode(op3); - node->SetSimdBaseJitType(CORINFO_TYPE_ULONG); + node->SetSimdBaseType(TYP_ULONG); node->ResetHWIntrinsicId(NI_AdvSimd_InsertScalar, comp, op1, op3, op2); break; } @@ -2010,12 +2009,11 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) // Wrap a conditional select around the embedded mask operation - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); bool foundUse = BlockRange().TryGetUse(node, &use); - GenTree* trueMask = comp->gtNewSimdAllTrueMaskNode(simdBaseJitType); + GenTree* trueMask = comp->gtNewSimdAllTrueMaskNode(node->GetSimdBaseType()); GenTree* falseVal = comp->gtNewZeroConNode(simdType); var_types nodeType = simdType; @@ -2029,7 +2027,7 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) GenTreeHWIntrinsic* condSelNode = comp->gtNewSimdHWIntrinsicNode(nodeType, trueMask, node, falseVal, NI_Sve_ConditionalSelect, - simdBaseJitType, simdSize); + node->GetSimdBaseType(), simdSize); BlockRange().InsertAfter(node, condSelNode); if (foundUse) { @@ -2094,11 +2092,10 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector64_op_Equality) || (intrinsicId == NI_Vector64_op_Inequality) || (intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality)); @@ -2151,8 +2148,8 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* opClone = comp->gtClone(op); BlockRange().InsertAfter(op, opClone); - cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op, opClone, NI_AdvSimd_Arm64_MaxPairwise, CORINFO_TYPE_UINT, - simdSize); + cmp = + comp->gtNewSimdHWIntrinsicNode(simdType, op, opClone, NI_AdvSimd_Arm64_MaxPairwise, TYP_UINT, simdSize); BlockRange().InsertBefore(node, cmp); LowerNode(cmp); } @@ -2162,8 +2159,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* zroCns = comp->gtNewIconNode(0, TYP_INT); BlockRange().InsertAfter(cmp, zroCns); - GenTree* val = - comp->gtNewSimdHWIntrinsicNode(TYP_LONG, cmp, zroCns, NI_AdvSimd_Extract, CORINFO_TYPE_ULONG, simdSize); + GenTree* val = comp->gtNewSimdHWIntrinsicNode(TYP_LONG, cmp, zroCns, NI_AdvSimd_Extract, TYP_ULONG, simdSize); BlockRange().InsertAfter(zroCns, val); LowerNode(val); @@ -2211,7 +2207,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm } } - GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, simdBaseJitType, simdSize); + GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, simdBaseType, simdSize); BlockRange().InsertBefore(node, cmp); LowerNode(cmp); @@ -2227,8 +2223,8 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* insCns = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(idxCns, insCns); - GenTree* tmp = comp->gtNewSimdHWIntrinsicNode(simdType, cmp, idxCns, insCns, NI_AdvSimd_Insert, - CORINFO_TYPE_INT, simdSize); + GenTree* tmp = + comp->gtNewSimdHWIntrinsicNode(simdType, cmp, idxCns, insCns, NI_AdvSimd_Insert, TYP_INT, simdSize); BlockRange().InsertAfter(insCns, tmp); LowerNode(tmp); @@ -2247,8 +2243,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* cmpClone = comp->gtClone(cmp); BlockRange().InsertAfter(cmp, cmpClone); - msk = comp->gtNewSimdHWIntrinsicNode(simdType, cmp, cmpClone, NI_AdvSimd_Arm64_MinPairwise, CORINFO_TYPE_UINT, - simdSize); + msk = comp->gtNewSimdHWIntrinsicNode(simdType, cmp, cmpClone, NI_AdvSimd_Arm64_MinPairwise, TYP_UINT, simdSize); BlockRange().InsertAfter(cmpClone, msk); LowerNode(msk); @@ -2258,8 +2253,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* zroCns = comp->gtNewIconNode(0, TYP_INT); BlockRange().InsertAfter(cmp, zroCns); - GenTree* val = - comp->gtNewSimdHWIntrinsicNode(TYP_LONG, cmp, zroCns, NI_AdvSimd_Extract, CORINFO_TYPE_ULONG, simdSize); + GenTree* val = comp->gtNewSimdHWIntrinsicNode(TYP_LONG, cmp, zroCns, NI_AdvSimd_Extract, TYP_ULONG, simdSize); BlockRange().InsertAfter(zroCns, val); LowerNode(val); @@ -2301,12 +2295,11 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm // GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - var_types simdType = node->TypeGet(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - simd_t simdVal = {}; + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdType = node->TypeGet(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + simd_t simdVal = {}; if ((simdSize == 8) && (simdType == TYP_DOUBLE)) { @@ -2419,7 +2412,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp1 = Vector64.CreateScalarUnsafe(op1); // ... - GenTree* tmp1 = InsertNewSimdCreateScalarUnsafeNode(simdType, node->Op(1), simdBaseJitType, simdSize); + GenTree* tmp1 = InsertNewSimdCreateScalarUnsafeNode(simdType, node->Op(1), simdBaseType, simdSize); LowerNode(tmp1); // We will be constructing the following parts: @@ -2447,7 +2440,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // Place the insert as early as possible to avoid creating a lot of long lifetimes. GenTree* insertionPoint = LIR::LastNode(tmp1, opN); idx = comp->gtNewIconNode(N); - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, idx, opN, NI_AdvSimd_Insert, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, idx, opN, NI_AdvSimd_Insert, simdBaseType, simdSize); BlockRange().InsertAfter(insertionPoint, idx, tmp1); LowerNode(tmp1); } @@ -2472,11 +2465,10 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector64_Dot) || (intrinsicId == NI_Vector128_Dot)); assert(varTypeIsSIMD(simdType)); @@ -2517,7 +2509,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); - op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, idx, tmp1, NI_AdvSimd_Insert, simdBaseJitType, simdSize); + op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, idx, tmp1, NI_AdvSimd_Insert, simdBaseType, simdSize); BlockRange().InsertAfter(tmp1, op1); LowerNode(op1); @@ -2528,7 +2520,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); - op2 = comp->gtNewSimdHWIntrinsicNode(simdType, op2, idx, tmp2, NI_AdvSimd_Insert, simdBaseJitType, simdSize); + op2 = comp->gtNewSimdHWIntrinsicNode(simdType, op2, idx, tmp2, NI_AdvSimd_Insert, simdBaseType, simdSize); BlockRange().InsertAfter(tmp2, op2); LowerNode(op2); } @@ -2553,7 +2545,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) } assert(!varTypeIsLong(simdBaseType)); - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); @@ -2599,7 +2591,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // var tmp1 = AdvSimd.AddPairwise(tmp1, tmp2); // ... - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType, + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseType, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); @@ -2625,7 +2617,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // var tmp1 = AdvSimd.Arm64.AddPairwise(tmp1, tmp2); // ... - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType, + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, simdBaseType, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); @@ -2664,8 +2656,8 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, - simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, simdBaseType, + simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); } @@ -2712,8 +2704,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // var tmp2 = AdvSimd.AddPairwise(tmp1, tmp2); // ... - tmp1 = - comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseType, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); @@ -2732,8 +2723,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // var tmp2 = AdvSimd.Arm64.AddAcross(tmp1); // ... - tmp2 = - comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, tmp1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize); + tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, tmp1, NI_AdvSimd_Arm64_AddAcross, simdBaseType, simdSize); BlockRange().InsertAfter(tmp1, tmp2); LowerNode(tmp2); } @@ -3603,7 +3593,7 @@ bool Lowering::TryLowerAddSubToMulLongOp(GenTreeOp* op, GenTree** next) NamedIntrinsic intrinsicId = op->OperIs(GT_ADD) ? NI_ArmBase_Arm64_MultiplyLongAdd : NI_ArmBase_Arm64_MultiplyLongSub; GenTreeHWIntrinsic* outOp = comp->gtNewScalarHWIntrinsicNode(TYP_LONG, mul->gtOp1, mul->gtOp2, addVal, intrinsicId); - outOp->SetSimdBaseJitType(mul->IsUnsigned() ? CORINFO_TYPE_ULONG : CORINFO_TYPE_LONG); + outOp->SetSimdBaseType(mul->IsUnsigned() ? TYP_ULONG : TYP_LONG); BlockRange().InsertAfter(op, outOp); @@ -3676,7 +3666,7 @@ bool Lowering::TryLowerNegToMulLongOp(GenTreeOp* op, GenTree** next) // Able to optimise, create the new node and replace the original. GenTreeHWIntrinsic* outOp = comp->gtNewScalarHWIntrinsicNode(TYP_LONG, mul->gtOp1, mul->gtOp2, NI_ArmBase_Arm64_MultiplyLongNeg); - outOp->SetSimdBaseJitType(mul->IsUnsigned() ? CORINFO_TYPE_ULONG : CORINFO_TYPE_LONG); + outOp->SetSimdBaseType(mul->IsUnsigned() ? TYP_ULONG : TYP_LONG); BlockRange().InsertAfter(op, outOp); diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 2f394610d4f877..651e6c1f5eb605 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -860,8 +860,8 @@ void Lowering::LowerCast(GenTree* tree) JITDUMP("LowerCast before:\n"); DISPTREERANGE(BlockRange(), tree); - CorInfoType srcBaseType = (srcType == TYP_FLOAT) ? CORINFO_TYPE_FLOAT : CORINFO_TYPE_DOUBLE; - LIR::Range castRange = LIR::EmptyRange(); + var_types srcBaseType = (srcType == TYP_FLOAT) ? TYP_FLOAT : TYP_DOUBLE; + LIR::Range castRange = LIR::EmptyRange(); // We'll be using SIMD instructions to fix up castOp before conversion. // @@ -907,7 +907,7 @@ void Lowering::LowerCast(GenTree* tree) // return int/long.MinValue for any overflow, which is correct for saturation of // negative, but the result must be replaced with MaxValue for positive overflow. - CorInfoType dstBaseType = CORINFO_TYPE_UNDEF; + var_types dstBaseType = TYP_UNDEF; NamedIntrinsic convertIntrinsic = NI_Illegal; GenTree* maxIntegralValue = nullptr; GenTree* maxFloatingValue = comp->gtNewVconNode(TYP_SIMD16); @@ -917,7 +917,7 @@ void Lowering::LowerCast(GenTree* tree) { case TYP_INT: { - dstBaseType = CORINFO_TYPE_INT; + dstBaseType = TYP_INT; maxIntegralValue = comp->gtNewIconNode(INT32_MAX); if (srcType == TYP_FLOAT) { @@ -933,7 +933,7 @@ void Lowering::LowerCast(GenTree* tree) } case TYP_UINT: { - dstBaseType = CORINFO_TYPE_UINT; + dstBaseType = TYP_UINT; maxIntegralValue = comp->gtNewIconNode(static_cast(UINT32_MAX)); if (srcType == TYP_FLOAT) { @@ -953,7 +953,7 @@ void Lowering::LowerCast(GenTree* tree) } case TYP_LONG: { - dstBaseType = CORINFO_TYPE_LONG; + dstBaseType = TYP_LONG; maxIntegralValue = comp->gtNewLconNode(INT64_MAX); if (srcType == TYP_FLOAT) { @@ -969,7 +969,7 @@ void Lowering::LowerCast(GenTree* tree) } case TYP_ULONG: { - dstBaseType = CORINFO_TYPE_ULONG; + dstBaseType = TYP_ULONG; maxIntegralValue = comp->gtNewLconNode(static_cast(UINT64_MAX)); if (srcType == TYP_FLOAT) { @@ -1130,9 +1130,8 @@ void Lowering::LowerCast(GenTree* tree) // This creates the equivalent of the following C# code: // convertResult = Sse41.BlendVariable(result, negated, result); - convertResult = - comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, result, negated, resultClone, - NI_X86Base_BlendVariable, CORINFO_TYPE_FLOAT, 16); + convertResult = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, result, negated, resultClone, + NI_X86Base_BlendVariable, TYP_FLOAT, 16); // Because the results are in a SIMD register, we need to ToScalar() them out. castRange.InsertAtEnd(convertResult); @@ -2619,11 +2618,10 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality) || (intrinsicId == NI_Vector256_op_Equality) || (intrinsicId == NI_Vector256_op_Inequality) || @@ -2651,8 +2649,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm // Changing the size is fine in some scenarios, such as comparison against Zero or // AllBitsSet, but not in other scenarios such as against an arbitrary mask. - CorInfoType maskBaseJitType = simdBaseJitType; - var_types maskBaseType = simdBaseType; + var_types maskBaseType = simdBaseType; if (op1Msk->OperIsConvertMaskToVector()) { @@ -2661,8 +2658,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm op1Msk = cvtMaskToVector->Op(1); assert(varTypeIsMask(op1Msk)); - maskBaseJitType = cvtMaskToVector->GetSimdBaseJitType(); - maskBaseType = cvtMaskToVector->GetSimdBaseType(); + maskBaseType = cvtMaskToVector->GetSimdBaseType(); } if (!varTypeIsFloating(simdBaseType) && (simdSize != 64) && !varTypeIsMask(op1Msk)) @@ -3028,15 +3024,15 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* cnsNode; - maskNode = comp->gtNewSimdHWIntrinsicNode(TYP_MASK, maskNode, NI_AVX512_NotMask, - maskBaseJitType, simdSize); + maskNode = comp->gtNewSimdHWIntrinsicNode(TYP_MASK, maskNode, NI_AVX512_NotMask, maskBaseType, + simdSize); BlockRange().InsertBefore(node, maskNode); cnsNode = comp->gtNewIconNode(8 - count); BlockRange().InsertAfter(maskNode, cnsNode); maskNode = comp->gtNewSimdHWIntrinsicNode(TYP_MASK, maskNode, cnsNode, NI_AVX512_ShiftLeftMask, - maskBaseJitType, simdSize); + maskBaseType, simdSize); BlockRange().InsertAfter(cnsNode, maskNode); LowerNode(maskNode); @@ -3044,7 +3040,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm BlockRange().InsertAfter(maskNode, cnsNode); maskNode = comp->gtNewSimdHWIntrinsicNode(TYP_MASK, maskNode, cnsNode, NI_AVX512_ShiftRightMask, - maskBaseJitType, simdSize); + maskBaseType, simdSize); BlockRange().InsertAfter(cnsNode, maskNode); maskIntrinsicId = NI_AVX512_ShiftRightMask; @@ -3154,7 +3150,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm GenTree* vecCns = comp->gtNewSimdCreateBroadcastNode(simdType, broadcastOp, - nestedIntrin->GetSimdBaseJitType(), simdSize); + nestedIntrin->GetSimdBaseType(), simdSize); assert(vecCns->IsCnsVec()); BlockRange().InsertAfter(broadcastOp, vecCns); @@ -3177,18 +3173,15 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm if (varTypeIsUnsigned(simdBaseType)) { - simdBaseJitType = CORINFO_TYPE_UINT; - simdBaseType = TYP_UINT; + simdBaseType = TYP_UINT; } else { - simdBaseJitType = CORINFO_TYPE_INT; - simdBaseType = TYP_INT; + simdBaseType = TYP_INT; } - node->SetSimdBaseJitType(simdBaseJitType); + node->SetSimdBaseType(simdBaseType); - maskBaseJitType = simdBaseJitType; - maskBaseType = simdBaseType; + maskBaseType = simdBaseType; } BlockRange().Remove(op1); @@ -3234,7 +3227,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm // We're not consuming the underlying mask directly, even if one exists, // so ensure that we track the base type as the one we'll be producing // via the vector comparison introduced here. - maskBaseJitType = simdBaseJitType; + maskBaseType = simdBaseType; // We have `x == y` or `x != y` both of which where we want to find `AllBitsSet` in the mask since // we can directly do the relevant comparison. Given the above tables then when we have a full mask @@ -3275,7 +3268,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm { GenTreeHWIntrinsic* cc; - cc = comp->gtNewSimdHWIntrinsicNode(simdType, maskNode, NI_AVX512_KORTEST, maskBaseJitType, simdSize); + cc = comp->gtNewSimdHWIntrinsicNode(simdType, maskNode, NI_AVX512_KORTEST, maskBaseType, simdSize); BlockRange().InsertBefore(nextNode, cc); use.ReplaceWith(cc); @@ -3289,9 +3282,9 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm assert(simdSize != 64); NamedIntrinsic cmpIntrinsic; - CorInfoType cmpJitType; + var_types cmpType; NamedIntrinsic mskIntrinsic; - CorInfoType mskJitType; + var_types mskType; int mskConstant; switch (simdBaseType) @@ -3303,8 +3296,8 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm case TYP_INT: case TYP_UINT: { - cmpJitType = simdBaseJitType; - mskJitType = CORINFO_TYPE_UBYTE; + cmpType = simdBaseType; + mskType = TYP_UBYTE; if (simdSize == 32) { @@ -3326,8 +3319,8 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm case TYP_LONG: case TYP_ULONG: { - mskJitType = CORINFO_TYPE_UBYTE; - cmpJitType = simdBaseJitType; + mskType = TYP_UBYTE; + cmpType = simdBaseType; if (simdSize == 32) { @@ -3348,8 +3341,8 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm case TYP_FLOAT: { - cmpJitType = simdBaseJitType; - mskJitType = simdBaseJitType; + cmpType = simdBaseType; + mskType = simdBaseType; if (simdSize == 32) { @@ -3381,8 +3374,8 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm case TYP_DOUBLE: { - cmpJitType = simdBaseJitType; - mskJitType = simdBaseJitType; + cmpType = simdBaseType; + mskType = simdBaseType; if (simdSize == 32) { @@ -3407,11 +3400,11 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm } } - GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, cmpJitType, simdSize); + GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, cmpType, simdSize); BlockRange().InsertBefore(node, cmp); LowerNode(cmp); - GenTree* msk = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, mskIntrinsic, mskJitType, simdSize); + GenTree* msk = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, mskIntrinsic, mskType, simdSize); BlockRange().InsertAfter(cmp, msk); LowerNode(msk); @@ -3453,10 +3446,9 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm // GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node) { - var_types simdType = node->gtType; - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + var_types simdType = node->gtType; + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); @@ -3500,12 +3492,12 @@ GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node) // vpblendmq. Changing the size is fine since CndSel itself is bitwise and the // the mask is just representing entire elements at a given size. - CorInfoType maskBaseJitType = cvtMaskToVector->GetSimdBaseJitType(); - node->SetSimdBaseJitType(maskBaseJitType); + var_types maskBaseType = cvtMaskToVector->GetSimdBaseType(); + node->SetSimdBaseType(maskBaseType); } else { - maskNode = comp->gtNewSimdCvtVectorToMaskNode(TYP_MASK, op1, simdBaseJitType, simdSize); + maskNode = comp->gtNewSimdCvtVectorToMaskNode(TYP_MASK, op1, simdBaseType, simdSize); BlockRange().InsertBefore(node, maskNode); } @@ -3520,12 +3512,12 @@ GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node) if (op3->IsVectorZero()) { - binOp = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseJitType, simdSize); + binOp = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseType, simdSize); BlockRange().Remove(op3); } else { - binOp = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, op1, simdBaseJitType, simdSize); + binOp = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, op1, simdBaseType, simdSize); BlockRange().Remove(op2); } @@ -3611,21 +3603,21 @@ GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node) // ... // tmp2 = op1 & op2 // ... - tmp2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseJitType, simdSize); + tmp2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseType, simdSize); BlockRange().InsertAfter(op2, tmp2); LowerNode(tmp2); // ... // tmp3 = op3 & ~tmp1 // ... - tmp3 = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, tmp1, simdBaseJitType, simdSize); + tmp3 = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, tmp1, simdBaseType, simdSize); BlockRange().InsertAfter(op3, tmp3); LowerNode(tmp3); // ... // tmp4 = tmp2 | tmp3 // ... - tmp4 = comp->gtNewSimdBinOpNode(GT_OR, simdType, tmp2, tmp3, simdBaseJitType, simdSize); + tmp4 = comp->gtNewSimdBinOpNode(GT_OR, simdType, tmp2, tmp3, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp4); LIR::Use use; @@ -3658,10 +3650,9 @@ GenTree* Lowering::LowerHWIntrinsicTernaryLogic(GenTreeHWIntrinsic* node) const uint8_t B = 0xCC; const uint8_t C = 0xAA; - var_types simdType = node->gtType; - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + var_types simdType = node->gtType; + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); @@ -3910,7 +3901,7 @@ GenTree* Lowering::LowerHWIntrinsicTernaryLogic(GenTreeHWIntrinsic* node) break; } - node->SetSimdBaseJitType(condition->AsHWIntrinsic()->GetSimdBaseJitType()); + node->SetSimdBaseType(condition->AsHWIntrinsic()->GetSimdBaseType()); node->ResetHWIntrinsicId(NI_AVX512_BlendVariableMask, comp, selectFalse, selectTrue, condition); BlockRange().Remove(op4); @@ -4110,12 +4101,11 @@ GenTree* Lowering::LowerHWIntrinsicTernaryLogic(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - var_types simdType = node->gtType; - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - simd_t simdVal = {}; + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdType = node->gtType; + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + simd_t simdVal = {}; if ((simdSize == 8) && (simdType == TYP_DOUBLE)) { @@ -4202,7 +4192,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // If we can't safely retype one of the above patterns and don't already have a cast to the // correct unsigned type, we will insert our own cast. - node->SetSimdBaseJitType(CORINFO_TYPE_INT); + node->SetSimdBaseType(TYP_INT); var_types unsignedType = varTypeToUnsigned(simdBaseType); @@ -4253,7 +4243,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp1 = Vector256.CreateScalarUnsafe(op1); // return Avx512.BroadcastScalarToVector512(tmp1); - tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseJitType, 16); + tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseType, 16); LowerNode(tmp1); node->ResetHWIntrinsicId(NI_AVX512_BroadcastScalarToVector512, tmp1); return LowerNode(node); @@ -4277,7 +4267,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp1 = Vector128.CreateScalarUnsafe(op1); // return Avx2.BroadcastScalarToVector256(tmp1); - tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseJitType, 16); + tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseType, 16); LowerNode(tmp1); node->ResetHWIntrinsicId(NI_AVX2_BroadcastScalarToVector256, tmp1); @@ -4306,7 +4296,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp3 = tmp2.ToVector256Unsafe(); // return tmp3.WithUpper(tmp1); - tmp1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, op1, simdBaseJitType, 16); + tmp1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, op1, simdBaseType, 16); BlockRange().InsertAfter(op1, tmp1); node->Op(1) = tmp1; @@ -4319,8 +4309,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); - tmp3 = - comp->gtNewSimdHWIntrinsicNode(TYP_SIMD32, tmp2, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16); + tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD32, tmp2, NI_Vector128_ToVector256Unsafe, simdBaseType, 16); BlockRange().InsertAfter(tmp2, tmp3); node->ResetHWIntrinsicId(NI_Vector256_WithUpper, comp, tmp3, tmp1); @@ -4340,10 +4329,10 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp1 = Vector128.CreateScalarUnsafe(op1); // ... - tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseJitType, 16); + tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseType, 16); LowerNode(tmp1); - if ((simdBaseJitType != CORINFO_TYPE_DOUBLE) && comp->compOpportunisticallyDependsOn(InstructionSet_AVX2)) + if ((simdBaseType != TYP_DOUBLE) && comp->compOpportunisticallyDependsOn(InstructionSet_AVX2)) { // We will be constructing the following parts: // ... @@ -4411,8 +4400,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_X86Base_UnpackLow, CORINFO_TYPE_USHORT, - simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_X86Base_UnpackLow, TYP_USHORT, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); @@ -4437,7 +4425,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) BlockRange().InsertAfter(tmp1, idx); node->ResetHWIntrinsicId(NI_X86Base_Shuffle, tmp1, idx); - node->SetSimdBaseJitType(CORINFO_TYPE_UINT); + node->SetSimdBaseType(TYP_UINT); break; } @@ -4511,7 +4499,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // return Sse3.MoveAndDuplicate(tmp1); node->ChangeHWIntrinsicId(NI_X86Base_MoveAndDuplicate, tmp1); - node->SetSimdBaseJitType(CORINFO_TYPE_DOUBLE); + node->SetSimdBaseType(TYP_DOUBLE); break; } @@ -4601,10 +4589,10 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) GenTree* hiInsertionPoint = LIR::LastNode(node->GetOperandArray(halfArgCnt), halfArgCnt); GenTree* lo = comp->gtNewSimdHWIntrinsicNode(halfType, node->GetOperandArray(), halfArgCnt, halfCreate, - simdBaseJitType, simdSize / 2); + simdBaseType, simdSize / 2); GenTree* hi = comp->gtNewSimdHWIntrinsicNode(halfType, node->GetOperandArray(halfArgCnt), halfArgCnt, - halfCreate, simdBaseJitType, simdSize / 2); + halfCreate, simdBaseType, simdSize / 2); node->ResetHWIntrinsicId(withUpper, comp, lo, hi); @@ -4628,7 +4616,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp1 = Vector128.CreateScalarUnsafe(op1); // ... - tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseJitType, 16); + tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op1, simdBaseType, 16); LowerNode(tmp1); switch (simdBaseType) @@ -4664,8 +4652,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // Place the insert as early as possible to avoid creating a lot of long lifetimes. GenTree* insertionPoint = LIR::LastNode(tmp1, opN); - tmp1 = - comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, opN, idx, insIntrinsic, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, opN, idx, insIntrinsic, simdBaseType, simdSize); BlockRange().InsertAfter(insertionPoint, idx, tmp1); LowerNode(tmp1); } @@ -4721,7 +4708,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) opN = node->Op(N + 1); - tmp2 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, opN, simdBaseJitType, 16); + tmp2 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, opN, simdBaseType, 16); LowerNode(tmp2); idx = comp->gtNewIconNode(N << 4, TYP_INT); @@ -4729,7 +4716,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // Place the insert as early as possible to avoid creating a lot of long lifetimes. GenTree* insertionPoint = LIR::LastNode(tmp1, tmp2); - tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, idx, NI_X86Base_Insert, simdBaseJitType, + tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, idx, NI_X86Base_Insert, simdBaseType, simdSize); BlockRange().InsertAfter(insertionPoint, idx, tmp3); @@ -4755,7 +4742,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) opN = node->Op(argCnt); - tmp2 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, opN, simdBaseJitType, 16); + tmp2 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, opN, simdBaseType, 16); LowerNode(tmp2); idx = comp->gtNewIconNode((argCnt - 1) << 4, TYP_INT); @@ -4817,7 +4804,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp2 = Vector128.CreateScalarUnsafe(op2); // return Sse.UnpackLow(tmp1, tmp2); - tmp2 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op2, simdBaseJitType, 16); + tmp2 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op2, simdBaseType, 16); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_X86Base_UnpackLow, tmp1, tmp2); @@ -4841,11 +4828,10 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - var_types simdType = node->gtType; - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdType = node->gtType; + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); assert(HWIntrinsicInfo::IsVectorGetElement(intrinsicId)); assert(!varTypeIsSIMD(simdType)); @@ -5027,7 +5013,7 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) BlockRange().InsertBefore(node, newAddr); GenTreeIndir* newIndir = - comp->gtNewIndir(JITtype2varType(simdBaseJitType), newAddr, (indir->gtFlags & GTF_IND_FLAGS)); + comp->gtNewIndir(node->GetSimdBaseTypeAsVarType(), newAddr, (indir->gtFlags & GTF_IND_FLAGS)); BlockRange().InsertBefore(node, newIndir); LIR::Use use; @@ -5078,7 +5064,7 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) if (lclDsc->lvDoNotEnregister && (lclOffs <= 0xFFFF) && ((lclOffs + elemSize) <= lclDsc->lvExactSize())) { - GenTree* lclFld = comp->gtNewLclFldNode(lclVar->GetLclNum(), JITtype2varType(simdBaseJitType), + GenTree* lclFld = comp->gtNewLclFldNode(lclVar->GetLclNum(), node->GetSimdBaseTypeAsVarType(), static_cast(lclOffs)); BlockRange().InsertBefore(node, lclFld); @@ -5135,7 +5121,7 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) // ... // op1 = op1.GetLower().GetLower(); - tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector512_GetLower128, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector512_GetLower128, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } @@ -5162,7 +5148,7 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) NamedIntrinsic extractIntrinsicId = NI_AVX512_ExtractVector128; - tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, extractIntrinsicId, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, extractIntrinsicId, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } @@ -5184,7 +5170,7 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) // ... // op1 = op1.GetLower(); - tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } @@ -5204,7 +5190,7 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) imm8 -= count / 2; - tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } @@ -5317,11 +5303,10 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - var_types simdType = node->TypeGet(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdType = node->TypeGet(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); @@ -5372,7 +5357,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX512)); // This copy of "node" will have the simd16 value we need. - result = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, intrinsicId, simdBaseJitType, 16); + result = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, intrinsicId, simdBaseType, 16); BlockRange().InsertBefore(node, result); // We will be constructing the following parts: @@ -5401,7 +5386,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) // ... // op1 = op1.GetLower().GetLower(); - tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector512_GetLower128, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector512_GetLower128, simdBaseType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } @@ -5428,7 +5413,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) NamedIntrinsic extractIntrinsicId = NI_AVX512_ExtractVector128; - tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, extractIntrinsicId, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, extractIntrinsicId, simdBaseType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } @@ -5455,7 +5440,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // This copy of "node" will have the simd16 value we need. - result = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, intrinsicId, simdBaseJitType, 16); + result = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, intrinsicId, simdBaseType, 16); BlockRange().InsertBefore(node, result); // We will be constructing the following parts: @@ -5484,7 +5469,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) // ... // op1 = op1.GetLower(); - tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } @@ -5504,7 +5489,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) imm8 -= count / 2; - tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } @@ -5550,7 +5535,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) // ... // tmp1 = Vector128.CreateScalarUnsafe(op3); - tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op3, CORINFO_TYPE_FLOAT, 16); + tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op3, TYP_FLOAT, 16); LowerNode(tmp1); imm8 = imm8 * 16; @@ -5582,7 +5567,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) // ... // tmp1 = Vector128.CreateScalarUnsafe(op3); - tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op3, CORINFO_TYPE_DOUBLE, 16); + tmp1 = InsertNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op3, TYP_DOUBLE, 16); LowerNode(tmp1); result->ResetHWIntrinsicId((imm8 == 0) ? NI_X86Base_MoveScalar : NI_X86Base_UnpackLow, op1, tmp1); @@ -5631,12 +5616,11 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); - unsigned simd16Count = comp->getSIMDVectorLength(16, simdBaseType); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + unsigned simd16Count = comp->getSIMDVectorLength(16, simdBaseType); assert((intrinsicId == NI_Vector128_Dot) || (intrinsicId == NI_Vector256_Dot)); assert(varTypeIsSIMD(simdType)); @@ -5702,8 +5686,8 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) idx = comp->gtNewIconNode(0xFF, TYP_INT); BlockRange().InsertBefore(node, idx); - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_AVX_DotProduct, simdBaseJitType, - simdSize); + tmp1 = + comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_AVX_DotProduct, simdBaseType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); @@ -5721,12 +5705,12 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp3, idx); - tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, NI_AVX_Permute2x128, simdBaseJitType, + tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, NI_AVX_Permute2x128, simdBaseType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); - tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseType, simdSize); BlockRange().InsertAfter(tmp2, tmp1); // We're producing a vector result, so just return the result directly @@ -5813,8 +5797,8 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // However, doing that would break/limit CSE and requires a partial write so // it's better to just broadcast the value to the entire vector - tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_X86Base_DotProduct, - simdBaseJitType, simdSize); + tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_X86Base_DotProduct, simdBaseType, + simdSize); BlockRange().InsertAfter(idx, tmp3); LowerNode(tmp3); @@ -5854,8 +5838,8 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // However, doing that would break/limit CSE and requires a partial write so // it's better to just broadcast the value to the entire vector - tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_X86Base_DotProduct, - simdBaseJitType, simdSize); + tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_X86Base_DotProduct, simdBaseType, + simdSize); BlockRange().InsertAfter(idx, tmp3); LowerNode(tmp3); @@ -5921,7 +5905,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) memcpy(&vecCon1->gtSimdVal, &simd16Val, sizeof(simd16_t)); BlockRange().InsertAfter(op1, vecCon1); - op1 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon1, simdBaseJitType, simdSize); + op1 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon1, simdBaseType, simdSize); BlockRange().InsertAfter(vecCon1, op1); LowerNode(vecCon1); @@ -5949,7 +5933,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) memcpy(&vecCon2->gtSimdVal, &simd16Val, sizeof(simd16_t)); BlockRange().InsertAfter(op2, vecCon2); - op2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op2, vecCon2, simdBaseJitType, simdSize); + op2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op2, vecCon2, simdBaseType, simdSize); BlockRange().InsertAfter(vecCon2, op2); LowerNode(vecCon2); @@ -5967,7 +5951,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // var tmp1 = Isa.Multiply(op1, op2); // ... - tmp1 = comp->gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); @@ -6011,7 +5995,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // tmp1 = Isa.HorizontalAdd(tmp1, tmp2); // ... - tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, horizontalAdd, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, horizontalAdd, simdBaseType, simdSize); } else { @@ -6099,7 +6083,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) tmp3 = comp->gtClone(tmp2); BlockRange().InsertAfter(tmp2, tmp3); - tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, shuffle, simdBaseJitType, simdSize); + tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, shuffle, simdBaseType, simdSize); } else { @@ -6124,7 +6108,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // tmp2 = Isa.Shuffle(tmp1, shuffleConst); // ... - tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_X86Base_ShuffleLow, simdBaseJitType, + tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_X86Base_ShuffleLow, simdBaseType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); @@ -6132,7 +6116,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) idx = comp->gtNewIconNode(shuffleConst, TYP_INT); BlockRange().InsertAfter(tmp2, idx); - tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_X86Base_ShuffleHigh, simdBaseJitType, + tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_X86Base_ShuffleHigh, simdBaseType, simdSize); } else @@ -6152,8 +6136,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // tmp2 = Isa.Shuffle(tmp1, shuffleConst); // ... - tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_X86Base_Shuffle, CORINFO_TYPE_INT, - simdSize); + tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_X86Base_Shuffle, TYP_INT, simdSize); } } @@ -6172,7 +6155,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // tmp1 = Isa.Add(tmp1, tmp2); // ... - tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseType, simdSize); } BlockRange().InsertAfter(tmp2, tmp1); @@ -6219,11 +6202,11 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) NamedIntrinsic permute2x128 = (simdBaseType == TYP_DOUBLE) ? NI_AVX_Permute2x128 : NI_AVX2_Permute2x128; - tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, permute2x128, simdBaseJitType, simdSize); + tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, permute2x128, simdBaseType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); - tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize); + tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseType, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); } @@ -6252,11 +6235,10 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert(HWIntrinsicInfo::IsVectorToScalar(intrinsicId)); assert(varTypeIsSIMD(simdType)); @@ -6279,8 +6261,9 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node) { GenTreeIndir* indir = op1->AsIndir(); + // todo-xarc-simd-vartypes: this used jittype2vartype GenTreeIndir* newIndir = - comp->gtNewIndir(JITtype2varType(simdBaseJitType), indir->Addr(), (indir->gtFlags & GTF_IND_FLAGS)); + comp->gtNewIndir(node->GetSimdBaseTypeAsVarType(), indir->Addr(), (indir->gtFlags & GTF_IND_FLAGS)); BlockRange().InsertBefore(node, newIndir); LIR::Use use; @@ -6310,7 +6293,7 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node) if (lclDsc->lvDoNotEnregister && (lclOffs <= 0xFFFF) && ((lclOffs + elemSize) <= lclDsc->lvExactSize())) { GenTree* lclFld = - comp->gtNewLclFldNode(lclVar->GetLclNum(), JITtype2varType(simdBaseJitType), lclVar->GetLclOffs()); + comp->gtNewLclFldNode(lclVar->GetLclNum(), node->GetSimdBaseTypeAsVarType(), lclVar->GetLclOffs()); BlockRange().InsertBefore(node, lclFld); LIR::Use use; @@ -7376,18 +7359,18 @@ void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node) if (isContainable && varTypeIsSmall(simdBaseType)) { - CorInfoType baseJitType = varTypeIsByte(node) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_USHORT; + var_types baseType = varTypeIsByte(node) ? TYP_UBYTE : TYP_USHORT; if (intrinsicId == NI_Vector512_ToScalar) { op1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector512_GetLower128, - baseJitType, 64); + baseType, 64); BlockRange().InsertBefore(hwintrinsic, op1); LowerNode(op1); } else if (intrinsicId == NI_Vector256_ToScalar) { - op1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, baseJitType, 32); + op1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, baseType, 32); BlockRange().InsertBefore(hwintrinsic, op1); LowerNode(op1); } @@ -7397,7 +7380,7 @@ void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node) GenTree* zero = comp->gtNewZeroConNode(TYP_INT); BlockRange().InsertBefore(hwintrinsic, zero); - hwintrinsic->SetSimdBaseJitType(baseJitType); + hwintrinsic->SetSimdBaseType(baseType); hwintrinsic->SetSimdSize(16); hwintrinsic->ResetHWIntrinsicId(intrinsicId, op1, zero); zero->SetContained(); @@ -8914,25 +8897,25 @@ void Lowering::TryFoldCnsVecForEmbeddedBroadcast(GenTreeHWIntrinsic* parentNode, // // Likewise, we do not have to match the intrinsic's base type as long as the broadcast size is correct. - var_types simdBaseType = parentNode->GetSimdBaseType(); - instruction ins = HWIntrinsicInfo::lookupIns(parentNode->GetHWIntrinsicId(), simdBaseType, comp); - unsigned broadcastSize = CodeGenInterface::instInputSize(ins); - CorInfoType broadcastJitType = parentNode->GetSimdBaseJitType(); + var_types simdBaseType = parentNode->GetSimdBaseType(); + instruction ins = HWIntrinsicInfo::lookupIns(parentNode->GetHWIntrinsicId(), simdBaseType, comp); + unsigned broadcastSize = CodeGenInterface::instInputSize(ins); + var_types broadcastType = parentNode->GetSimdBaseTypeAsVarType(); if (broadcastSize != genTypeSize(simdBaseType)) { if (broadcastSize == 4) { - broadcastJitType = varTypeIsFloating(simdBaseType) ? CORINFO_TYPE_FLOAT : CORINFO_TYPE_INT; + broadcastType = varTypeIsFloating(simdBaseType) ? TYP_FLOAT : TYP_INT; } else { assert(broadcastSize == 8); - broadcastJitType = varTypeIsFloating(simdBaseType) ? CORINFO_TYPE_DOUBLE : CORINFO_TYPE_LONG; + broadcastType = varTypeIsFloating(simdBaseType) ? TYP_DOUBLE : TYP_LONG; } } - if (!cnsVec->IsBroadcast(JITtype2varType(broadcastJitType))) + if (!cnsVec->IsBroadcast(broadcastType)) { // Some bit-wise instructions have both 4-byte and 8-byte broadcast forms. // If the constant wasn't a match at 4 bytes, it might be at 8. @@ -8968,8 +8951,8 @@ void Lowering::TryFoldCnsVecForEmbeddedBroadcast(GenTreeHWIntrinsic* parentNode, if (canUse8ByteBroadcast) { - broadcastJitType = varTypeIsFloating(simdBaseType) ? CORINFO_TYPE_DOUBLE : CORINFO_TYPE_LONG; - parentNode->SetSimdBaseJitType(broadcastJitType); + broadcastType = varTypeIsFloating(simdBaseType) ? TYP_DOUBLE : TYP_LONG; + parentNode->SetSimdBaseType(broadcastType); } else { @@ -8998,8 +8981,8 @@ void Lowering::TryFoldCnsVecForEmbeddedBroadcast(GenTreeHWIntrinsic* parentNode, { if (cnsVec->IsBroadcast(TYP_INT)) { - broadcastJitType = varTypeIsFloating(simdBaseType) ? CORINFO_TYPE_FLOAT : CORINFO_TYPE_INT; - parentNode->SetSimdBaseJitType(broadcastJitType); + broadcastType = varTypeIsFloating(simdBaseType) ? TYP_FLOAT : TYP_INT; + parentNode->SetSimdBaseType(broadcastType); } break; } @@ -9032,7 +9015,7 @@ void Lowering::TryFoldCnsVecForEmbeddedBroadcast(GenTreeHWIntrinsic* parentNode, assert(simdType == TYP_SIMD16); } - switch (JITtype2varType(broadcastJitType)) + switch (broadcastType) { case TYP_FLOAT: { @@ -9069,7 +9052,7 @@ void Lowering::TryFoldCnsVecForEmbeddedBroadcast(GenTreeHWIntrinsic* parentNode, } GenTreeHWIntrinsic* broadcastNode = - comp->gtNewSimdHWIntrinsicNode(simdType, constScalar, broadcastName, broadcastJitType, genTypeSize(simdType)); + comp->gtNewSimdHWIntrinsicNode(simdType, constScalar, broadcastName, broadcastType, genTypeSize(simdType)); BlockRange().InsertBefore(parentNode, constScalar, broadcastNode); BlockRange().Remove(cnsVec); @@ -9147,12 +9130,11 @@ void Lowering::ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* ad // void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) { - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId); - size_t numArgs = node->GetOperandCount(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - uint32_t simdSize = node->GetSimdSize(); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId); + size_t numArgs = node->GetOperandCount(); + var_types simdBaseType = node->GetSimdBaseType(); + uint32_t simdSize = node->GetSimdSize(); if (!HWIntrinsicInfo::SupportsContainment(intrinsicId)) { @@ -10144,16 +10126,16 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) if (IsInvariantInRange(op2, node)) { - unsigned tgtMaskSize = simdSize / genTypeSize(simdBaseType); - CorInfoType tgtSimdBaseJitType = CORINFO_TYPE_UNDEF; - size_t broadcastOpIndex = 0; + unsigned tgtMaskSize = simdSize / genTypeSize(simdBaseType); + var_types tgtSimdBaseType = TYP_UNDEF; + size_t broadcastOpIndex = 0; - if (op2->isEmbeddedMaskingCompatible(comp, tgtMaskSize, tgtSimdBaseJitType, + if (op2->isEmbeddedMaskingCompatible(comp, tgtMaskSize, tgtSimdBaseType, &broadcastOpIndex)) { - if (tgtSimdBaseJitType != CORINFO_TYPE_UNDEF) + if (tgtSimdBaseType != TYP_UNDEF) { - op2->AsHWIntrinsic()->SetSimdBaseJitType(tgtSimdBaseJitType); + op2->AsHWIntrinsic()->SetSimdBaseType(tgtSimdBaseType); if (broadcastOpIndex != 0) { @@ -10183,7 +10165,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) GenTree* lconNode = comp->gtNewLconNode((lval << 32) | lval); - broadcastNode->SetSimdBaseJitType(CORINFO_TYPE_LONG); + broadcastNode->SetSimdBaseType(TYP_LONG); broadcastNode->Op(1) = lconNode; BlockRange().InsertBefore(broadcastNode, lconNode); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 48493499ff22b3..d50e7f23d50458 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -9309,11 +9309,10 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) } } - NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); - var_types retType = node->TypeGet(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); + var_types retType = node->TypeGet(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); switch (intrinsicId) { @@ -9419,7 +9418,7 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { var_types simdType = getSIMDTypeForSize(simdSize); - node = gtNewSimdSqrtNode(simdType, hwop1, simdBaseJitType, simdSize)->AsHWIntrinsic(); + node = gtNewSimdSqrtNode(simdType, hwop1, simdBaseType, simdSize)->AsHWIntrinsic(); DEBUG_DESTROY_NODE(sqrt); } else @@ -9594,7 +9593,7 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(node); - node = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize)->AsHWIntrinsic(); + node = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseType, simdSize)->AsHWIntrinsic(); #if defined(TARGET_XARCH) if (varTypeIsFloating(simdBaseType)) @@ -9855,10 +9854,9 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) genTreeOps op1Oper = op1Intrin->GetOperForHWIntrinsicId(&op1IsScalar, /* getEffectiveOp */ true); var_types op1RetType = op1Intrin->TypeGet(); - NamedIntrinsic op1Intrinsic = op1Intrin->GetHWIntrinsicId(); - CorInfoType op1SimdBaseJitType = op1Intrin->GetSimdBaseJitType(); - var_types op1SimdBaseType = op1Intrin->GetSimdBaseType(); - unsigned op1SimdSize = op1Intrin->GetSimdSize(); + NamedIntrinsic op1Intrinsic = op1Intrin->GetHWIntrinsicId(); + var_types op1SimdBaseType = op1Intrin->GetSimdBaseType(); + unsigned op1SimdSize = op1Intrin->GetSimdSize(); if (op1Oper == GT_NOT) { @@ -9898,7 +9896,7 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) assert(varTypeIsMask(lookupType)); op1Intrin->gtType = lookupType; - op1Intrin = gtNewSimdCvtMaskToVectorNode(retType, op1Intrin, op1SimdBaseJitType, op1SimdSize) + op1Intrin = gtNewSimdCvtMaskToVectorNode(retType, op1Intrin, op1SimdBaseType, op1SimdSize) ->AsHWIntrinsic(); } else if (cvtIntrin != nullptr) @@ -10081,11 +10079,10 @@ GenTree* Compiler::fgOptimizeHWIntrinsicAssociative(GenTreeHWIntrinsic* tree) // so that we can fold it down to `v1 op c3` assert(opts.OptimizationEnabled()); - NamedIntrinsic intrinsicId = tree->GetHWIntrinsicId(); - var_types retType = tree->TypeGet(); - CorInfoType simdBaseJitType = tree->GetSimdBaseJitType(); - var_types simdBaseType = tree->GetSimdBaseType(); - unsigned simdSize = tree->GetSimdSize(); + NamedIntrinsic intrinsicId = tree->GetHWIntrinsicId(); + var_types retType = tree->TypeGet(); + var_types simdBaseType = tree->GetSimdBaseType(); + unsigned simdSize = tree->GetSimdSize(); if (!varTypeIsSIMD(retType) && !varTypeIsMask(retType)) { @@ -10167,7 +10164,7 @@ GenTree* Compiler::fgOptimizeHWIntrinsicAssociative(GenTreeHWIntrinsic* tree) assert(cns1->TypeIs(retType)); assert(cns2->TypeIs(retType)); - GenTree* res = gtNewSimdHWIntrinsicNode(retType, cns1, cns2, intrinsicId, simdBaseJitType, simdSize); + GenTree* res = gtNewSimdHWIntrinsicNode(retType, cns1, cns2, intrinsicId, simdBaseType, simdSize); res = gtFoldExprHWIntrinsic(res->AsHWIntrinsic()); assert(res == cns1); @@ -11262,10 +11259,9 @@ GenTree* Compiler::fgMorphHWIntrinsic(GenTreeHWIntrinsic* tree) // Now do POST-ORDER processing // - var_types retType = tree->TypeGet(); - CorInfoType simdBaseJitType = tree->GetSimdBaseJitType(); - var_types simdBaseType = tree->GetSimdBaseType(); - unsigned simdSize = tree->GetSimdSize(); + var_types retType = tree->TypeGet(); + var_types simdBaseType = tree->GetSimdBaseType(); + unsigned simdSize = tree->GetSimdSize(); // Try to fold it, maybe we get lucky, GenTree* morphedTree = gtFoldExpr(tree); @@ -11356,11 +11352,10 @@ GenTree* Compiler::fgMorphHWIntrinsic(GenTreeHWIntrinsic* tree) // GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) { - NamedIntrinsic intrinsic = tree->GetHWIntrinsicId(); - var_types retType = tree->TypeGet(); - CorInfoType simdBaseJitType = tree->GetSimdBaseJitType(); - var_types simdBaseType = tree->GetSimdBaseType(); - unsigned simdSize = tree->GetSimdSize(); + NamedIntrinsic intrinsic = tree->GetHWIntrinsicId(); + var_types retType = tree->TypeGet(); + var_types simdBaseType = tree->GetSimdBaseType(); + unsigned simdSize = tree->GetSimdSize(); bool isScalar = false; genTreeOps oper = tree->GetOperForHWIntrinsicId(&isScalar); @@ -11432,12 +11427,12 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) if (op1Type == TYP_MASK) { #if defined(TARGET_XARCH) - newNode = gtNewSimdHWIntrinsicNode(op1Type, op1, NI_AVX512_NotMask, simdBaseJitType, simdSize); + newNode = gtNewSimdHWIntrinsicNode(op1Type, op1, NI_AVX512_NotMask, simdBaseType, simdSize); #endif // TARGET_XARCH } else { - newNode = gtNewSimdUnOpNode(GT_NOT, op1Type, op1, simdBaseJitType, simdSize); + newNode = gtNewSimdUnOpNode(GT_NOT, op1Type, op1, simdBaseType, simdSize); #if defined(TARGET_XARCH) newNode->AsHWIntrinsic()->Op(2)->SetMorphed(this); @@ -11455,11 +11450,11 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) if (retType == TYP_MASK) { - newNode = gtNewSimdCvtVectorToMaskNode(retType, newNode, simdBaseJitType, simdSize); + newNode = gtNewSimdCvtVectorToMaskNode(retType, newNode, simdBaseType, simdSize); } else { - newNode = gtNewSimdCvtMaskToVectorNode(retType, newNode, simdBaseJitType, simdSize); + newNode = gtNewSimdCvtMaskToVectorNode(retType, newNode, simdBaseType, simdSize); } } @@ -11497,7 +11492,7 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) assert(varTypeIsMask(lookupType)); tree->gtType = lookupType; - tree = gtNewSimdCvtMaskToVectorNode(retType, tree, simdBaseJitType, simdSize)->AsHWIntrinsic(); + tree = gtNewSimdCvtMaskToVectorNode(retType, tree, simdBaseType, simdSize)->AsHWIntrinsic(); return fgMorphHWIntrinsicRequired(tree); } } @@ -11596,7 +11591,7 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) { #if defined(TARGET_ARM64) // xarch doesn't have a native GT_NEG representation for integers and itself uses (Zero - v1) - op2 = gtNewSimdUnOpNode(GT_NEG, retType, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdUnOpNode(GT_NEG, retType, op2, simdBaseType, simdSize); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); @@ -11606,7 +11601,7 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) } else { - op2 = gtNewSimdUnOpNode(GT_NEG, retType, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdUnOpNode(GT_NEG, retType, op2, simdBaseType, simdSize); #if defined(TARGET_XARCH) if (varTypeIsFloating(simdBaseType)) @@ -11647,7 +11642,7 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) if (op2->IsVectorAllBitsSet()) { // xarch doesn't have a native GT_NOT representation and itself uses (v1 ^ AllBitsSet) - op1 = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseType, simdSize); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); @@ -11658,7 +11653,7 @@ GenTree* Compiler::fgMorphHWIntrinsicRequired(GenTreeHWIntrinsic* tree) if (varTypeIsFloating(simdBaseType) && op2->IsVectorNegativeZero(simdBaseType)) { // xarch doesn't have a native GT_NEG representation for floating-point and itself uses (v1 ^ -0.0) - op1 = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseType, simdSize); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); diff --git a/src/coreclr/jit/optimizemaskconversions.cpp b/src/coreclr/jit/optimizemaskconversions.cpp index 7396469770acae..9cd32c4ad10940 100644 --- a/src/coreclr/jit/optimizemaskconversions.cpp +++ b/src/coreclr/jit/optimizemaskconversions.cpp @@ -28,8 +28,8 @@ struct MaskConversionsWeight #endif // The simd types of the Lcl Store after conversion to vector. - CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; - unsigned simdSize = 0; + var_types simdBaseType = TYP_UNDEF; + unsigned simdSize = 0; void UpdateWeight(bool isStore, bool hasConvert, weight_t blockWeight); @@ -90,22 +90,22 @@ void MaskConversionsWeight::UpdateWeight(bool isStore, bool hasConvert, weight_t // void MaskConversionsWeight::CacheSimdTypes(GenTreeHWIntrinsic* op, unsigned lclnum) { - CorInfoType newSimdBaseJitType = op->GetSimdBaseJitType(); - unsigned newSimdSize = op->GetSimdSize(); + var_types newSimdBaseType = op->GetSimdBaseType(); + unsigned newSimdSize = op->GetSimdSize(); - assert((newSimdBaseJitType != CORINFO_TYPE_UNDEF)); + assert((newSimdBaseType != TYP_UNDEF)); - if (simdBaseJitType == CORINFO_TYPE_UNDEF) + if (simdBaseType == TYP_UNDEF) { // Types have not already been cached. Set them. - simdBaseJitType = newSimdBaseJitType; - simdSize = newSimdSize; + simdBaseType = newSimdBaseType; + simdSize = newSimdSize; } - else if ((simdBaseJitType != newSimdBaseJitType) || (simdSize != newSimdSize)) + else if ((simdBaseType != newSimdBaseType) || (simdSize != newSimdSize)) { // Type mismatch with existing cached type. - JITDUMP("Local V%02d has different types: (%d, %d) vs (%d, %d). ", lclnum, simdBaseJitType, simdSize, - newSimdBaseJitType, newSimdSize); + JITDUMP("Local V%02d has different types: (%d, %d) vs (%d, %d). ", lclnum, simdBaseType, simdSize, + newSimdBaseType, newSimdSize); InvalidateWeight(); } } @@ -385,8 +385,8 @@ class MaskConversionsUpdateVisitor final : public GenTreeVisitorsimdBaseJitType != CORINFO_TYPE_UNDEF); - lclOp->Data() = m_compiler->gtNewSimdCvtVectorToMaskNode(TYP_MASK, lclOp->Data(), weight->simdBaseJitType, + assert(weight->simdBaseType != TYP_UNDEF); + lclOp->Data() = m_compiler->gtNewSimdCvtVectorToMaskNode(TYP_MASK, lclOp->Data(), weight->simdBaseType, weight->simdSize); } else if (isLocalUse && removeConversion) @@ -407,9 +407,8 @@ class MaskConversionsUpdateVisitor final : public GenTreeVisitorsimdBaseJitType != CORINFO_TYPE_UNDEF); - *use = - m_compiler->gtNewSimdCvtMaskToVectorNode(lclOrigType, lclOp, weight->simdBaseJitType, weight->simdSize); + assert(weight->simdBaseType != TYP_UNDEF); + *use = m_compiler->gtNewSimdCvtMaskToVectorNode(lclOrigType, lclOp, weight->simdBaseType, weight->simdSize); } JITDUMP("Updated %s V%02d at [%06u] to mask (%s conversion)\n", isLocalStore ? "store" : "use", diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp index dcec6f6c608bf3..c91d161aa673b3 100644 --- a/src/coreclr/jit/rationalize.cpp +++ b/src/coreclr/jit/rationalize.cpp @@ -121,11 +121,11 @@ void Rationalizer::RewriteNodeAsCall(GenTree** use, #if defined(FEATURE_MASKED_HW_INTRINSICS) // No managed call takes TYP_MASK, so convert it back to a TYP_SIMD - unsigned simdSize; - CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); + unsigned simdSize; + var_types simdBaseType = comp->getBaseTypeAndSizeOfSIMDType(clsHnd, &simdSize); assert(simdSize != 0); - GenTree* cvtNode = comp->gtNewSimdCvtMaskToVectorNode(sigTyp, operand, simdBaseJitType, simdSize); + GenTree* cvtNode = comp->gtNewSimdCvtMaskToVectorNode(sigTyp, operand, simdBaseType, simdSize); BlockRange().InsertAfter(operand, LIR::Range(comp->fgSetTreeSeq(cvtNode), cvtNode)); operand = cvtNode; #else @@ -182,11 +182,11 @@ void Rationalizer::RewriteNodeAsCall(GenTree** use, #if defined(FEATURE_MASKED_HW_INTRINSICS) // No managed call returns TYP_MASK, so convert it from a TYP_SIMD - unsigned simdSize; - CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(call->gtRetClsHnd, &simdSize); + unsigned simdSize; + var_types simdBaseType = comp->getBaseTypeAndSizeOfSIMDType(call->gtRetClsHnd, &simdSize); assert(simdSize != 0); - result = comp->gtNewSimdCvtVectorToMaskNode(TYP_MASK, result, simdBaseJitType, simdSize); + result = comp->gtNewSimdCvtVectorToMaskNode(TYP_MASK, result, simdBaseType, simdSize); if (tmpNum == BAD_VAR_NUM) { @@ -324,12 +324,11 @@ void Rationalizer::RewriteIntrinsicAsUserCall(GenTree** use, ArrayStack& parents) { - GenTreeHWIntrinsic* hwintrinsic = (*use)->AsHWIntrinsic(); - NamedIntrinsic intrinsicId = hwintrinsic->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = hwintrinsic->GetSimdBaseJitType(); - var_types simdBaseType = hwintrinsic->GetSimdBaseType(); - uint32_t simdSize = hwintrinsic->GetSimdSize(); - var_types retType = hwintrinsic->TypeGet(); + GenTreeHWIntrinsic* hwintrinsic = (*use)->AsHWIntrinsic(); + NamedIntrinsic intrinsicId = hwintrinsic->GetHWIntrinsicId(); + var_types simdBaseType = hwintrinsic->GetSimdBaseType(); + uint32_t simdSize = hwintrinsic->GetSimdSize(); + var_types retType = hwintrinsic->TypeGet(); GenTree** operands = hwintrinsic->GetOperandArray(); size_t operandCount = hwintrinsic->GetOperandCount(); @@ -368,7 +367,7 @@ void Rationalizer::RewriteHWIntrinsicAsUserCall(GenTree** use, ArrayStackgtNewSimdHWIntrinsicNode(retType, op1, op2, id, simdBaseJitType, simdSize); + result = comp->gtNewSimdHWIntrinsicNode(retType, op1, op2, id, simdBaseType, simdSize); break; } #endif // TARGET_XARCH @@ -414,7 +413,7 @@ void Rationalizer::RewriteHWIntrinsicAsUserCall(GenTree** use, ArrayStackgtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize, isShuffleNative); + result = comp->gtNewSimdShuffleNode(retType, op1, op2, simdBaseType, simdSize, isShuffleNative); break; } @@ -486,7 +485,7 @@ void Rationalizer::RewriteHWIntrinsicAsUserCall(GenTree** use, ArrayStackCheckHWIntrinsicImmRange(intrinsicId, simdBaseJitType, immOp2, mustExpand, immLowerBound, + if (comp->CheckHWIntrinsicImmRange(intrinsicId, simdBaseType, immOp2, mustExpand, immLowerBound, immUpperBound, hasFullRangeImm, &useFallback)) { // Set this as nullptr so we stay an intrinsic if both immediates are constant and in range @@ -504,7 +503,7 @@ void Rationalizer::RewriteHWIntrinsicAsUserCall(GenTree** use, ArrayStackCheckHWIntrinsicImmRange(intrinsicId, simdBaseJitType, immOp1, mustExpand, immLowerBound, + if (comp->CheckHWIntrinsicImmRange(intrinsicId, simdBaseType, immOp1, mustExpand, immLowerBound, immUpperBound, hasFullRangeImm, &useFallback)) { // We're already in the right shape, so just stop tracking ourselves as a user call @@ -629,11 +628,10 @@ void Rationalizer::RewriteHWIntrinsicBlendv(GenTree** use, Compiler::GenTreeStac // get to rationalization and we cannot take advantage of embedded masking // then we want to rewrite things to just directly produce TYP_SIMD instead. - NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); - var_types retType = node->TypeGet(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); + var_types retType = node->TypeGet(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); if (simdSize == 64) { @@ -652,18 +650,18 @@ void Rationalizer::RewriteHWIntrinsicBlendv(GenTree** use, Compiler::GenTreeStac if (scratchSideEffects.IsLirInvariantInRange(comp, op2, node)) { - unsigned tgtMaskSize = simdSize / genTypeSize(simdBaseType); - CorInfoType tgtSimdBaseJitType = CORINFO_TYPE_UNDEF; + unsigned tgtMaskSize = simdSize / genTypeSize(simdBaseType); + var_types tgtSimdBaseType = TYP_UNDEF; - if (op2->isEmbeddedMaskingCompatible(comp, tgtMaskSize, tgtSimdBaseJitType)) + if (op2->isEmbeddedMaskingCompatible(comp, tgtMaskSize, tgtSimdBaseType)) { // We are going to utilize the embedded mask, so we don't need to rewrite. However, - // we want to fixup the simdBaseJitType here since it simplifies lowering and allows + // we want to fixup the simdBaseType here since it simplifies lowering and allows // both embedded broadcast and the mask to be live simultaneously. - if (tgtSimdBaseJitType != CORINFO_TYPE_UNDEF) + if (tgtSimdBaseType != TYP_UNDEF) { - op2->AsHWIntrinsic()->SetSimdBaseJitType(tgtSimdBaseJitType); + op2->AsHWIntrinsic()->SetSimdBaseType(tgtSimdBaseType); } return; } @@ -698,7 +696,7 @@ void Rationalizer::RewriteHWIntrinsicBlendv(GenTree** use, Compiler::GenTreeStac if (HWIntrinsicInfo::NeedsNormalizeSmallTypeToInt(intrinsic) && varTypeIsSmall(simdBaseType)) { - node->SetSimdBaseJitType(varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UINT : CORINFO_TYPE_INT); + node->SetSimdBaseType(varTypeIsUnsigned(simdBaseType) ? TYP_UINT : TYP_INT); } node->ChangeHWIntrinsicId(intrinsic); } @@ -718,11 +716,10 @@ void Rationalizer::RewriteHWIntrinsicMaskOp(GenTree** use, Compiler::GenTreeStac // get to rationalization and we're just converting that back to TYP_SIMD, // then we want to rewrite things to just directly produce TYP_SIMD instead. - NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); - var_types retType = node->TypeGet(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); + NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); + var_types retType = node->TypeGet(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); if (simdSize == 64) { @@ -781,8 +778,8 @@ void Rationalizer::RewriteHWIntrinsicMaskOp(GenTree** use, Compiler::GenTreeStac case TYP_UINT: case TYP_FLOAT: { - simdBaseJitType = CORINFO_TYPE_FLOAT; - intrinsic = (simdSize == 32) ? NI_AVX_MoveMask : NI_X86Base_MoveMask; + simdBaseType = TYP_FLOAT; + intrinsic = (simdSize == 32) ? NI_AVX_MoveMask : NI_X86Base_MoveMask; break; } @@ -790,8 +787,8 @@ void Rationalizer::RewriteHWIntrinsicMaskOp(GenTree** use, Compiler::GenTreeStac case TYP_ULONG: case TYP_DOUBLE: { - simdBaseJitType = CORINFO_TYPE_DOUBLE; - intrinsic = (simdSize == 32) ? NI_AVX_MoveMask : NI_X86Base_MoveMask; + simdBaseType = TYP_DOUBLE; + intrinsic = (simdSize == 32) ? NI_AVX_MoveMask : NI_X86Base_MoveMask; break; } @@ -801,7 +798,7 @@ void Rationalizer::RewriteHWIntrinsicMaskOp(GenTree** use, Compiler::GenTreeStac } } - node->SetSimdBaseJitType(simdBaseJitType); + node->SetSimdBaseType(simdBaseType); node->ChangeHWIntrinsicId(intrinsic); } } @@ -856,12 +853,11 @@ void Rationalizer::RewriteHWIntrinsicToNonMask(GenTree** use, Compiler::GenTreeS case NI_AVX512_XnorMask: { - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); - GenTree* op1 = - comp->gtNewSimdBinOpNode(GT_XOR, simdType, node->Op(1), node->Op(2), simdBaseJitType, simdSize); + GenTree* op1 = comp->gtNewSimdBinOpNode(GT_XOR, simdType, node->Op(1), node->Op(2), simdBaseType, simdSize); BlockRange().InsertBefore(node, op1); node->Op(1) = op1; @@ -1299,11 +1295,10 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree { GenTreeHWIntrinsic* node = (*use)->AsHWIntrinsic(); - NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - var_types simdType = Compiler::getSIMDTypeForSize(simdSize); + NamedIntrinsic intrinsic = node->GetHWIntrinsicId(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); + var_types simdType = Compiler::getSIMDTypeForSize(simdSize); GenTree* op1 = node->Op(1); @@ -1322,8 +1317,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree case TYP_BYTE: case TYP_UBYTE: { - simdBaseType = TYP_UBYTE; - simdBaseJitType = CORINFO_TYPE_UBYTE; + simdBaseType = TYP_UBYTE; vecCon2->gtSimdVal.u64[0] = 0x8080808080808080; vecCon3->gtSimdVal.u64[0] = 0x00FFFEFDFCFBFAF9; @@ -1339,8 +1333,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree case TYP_SHORT: case TYP_USHORT: { - simdBaseType = TYP_USHORT; - simdBaseJitType = CORINFO_TYPE_USHORT; + simdBaseType = TYP_USHORT; vecCon2->gtSimdVal.u64[0] = 0x8000800080008000; vecCon3->gtSimdVal.u64[0] = 0xFFF4FFF3FFF2FFF1; @@ -1357,8 +1350,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree case TYP_UINT: case TYP_FLOAT: { - simdBaseType = TYP_INT; - simdBaseJitType = CORINFO_TYPE_INT; + simdBaseType = TYP_INT; vecCon2->gtSimdVal.u64[0] = 0x8000000080000000; vecCon3->gtSimdVal.u64[0] = 0xFFFFFFE2FFFFFFE1; @@ -1375,8 +1367,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree case TYP_ULONG: case TYP_DOUBLE: { - simdBaseType = TYP_LONG; - simdBaseJitType = CORINFO_TYPE_LONG; + simdBaseType = TYP_LONG; vecCon2->gtSimdVal.u64[0] = 0x8000000000000000; vecCon3->gtSimdVal.u64[0] = 0xFFFFFFFFFFFFFFC1; @@ -1396,7 +1387,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree } BlockRange().InsertAfter(op1, vecCon2); - GenTree* tmp = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon2, simdBaseJitType, simdSize); + GenTree* tmp = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon2, simdBaseType, simdSize); BlockRange().InsertAfter(vecCon2, tmp); op1 = tmp; @@ -1410,7 +1401,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree } BlockRange().InsertAfter(op1, vecCon3); - tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, vecCon3, intrinsic, simdBaseJitType, simdSize); + tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, vecCon3, intrinsic, simdBaseType, simdSize); BlockRange().InsertAfter(vecCon3, tmp); op1 = tmp; @@ -1429,27 +1420,26 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree GenTree* op2 = comp->gtClone(op1); BlockRange().InsertAfter(op1, op2); - tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_ZeroExtendWideningUpper, simdBaseJitType, 16); + tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_ZeroExtendWideningUpper, simdBaseType, 16); BlockRange().InsertBefore(op2, tmp); op1 = tmp; GenTree* icon = comp->gtNewIconNode(8); BlockRange().InsertBefore(op2, icon); - tmp = comp->gtNewSimdBinOpNode(GT_LSH, simdType, op1, icon, CORINFO_TYPE_USHORT, simdSize); + tmp = comp->gtNewSimdBinOpNode(GT_LSH, simdType, op1, icon, TYP_USHORT, simdSize); BlockRange().InsertBefore(op2, tmp); op1 = tmp; - tmp = comp->gtNewSimdGetLowerNode(TYP_SIMD8, op2, simdBaseJitType, 16); + tmp = comp->gtNewSimdGetLowerNode(TYP_SIMD8, op2, simdBaseType, 16); BlockRange().InsertAfter(op2, tmp); op2 = tmp; - tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_AddWideningLower, simdBaseJitType, 8); + tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_AddWideningLower, simdBaseType, 8); BlockRange().InsertAfter(op2, tmp); op1 = tmp; - simdBaseType = TYP_USHORT; - simdBaseJitType = CORINFO_TYPE_USHORT; + simdBaseType = TYP_USHORT; } // Sum the elements @@ -1467,22 +1457,21 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree GenTree* op2 = comp->gtClone(op1); BlockRange().InsertAfter(op1, op2); - tmp = - comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize); + tmp = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseType, simdSize); BlockRange().InsertAfter(op2, tmp); op1 = tmp; } else { - tmp = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize); + tmp = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseType, simdSize); BlockRange().InsertAfter(op1, tmp); op1 = tmp; } } else if (simdSize == 16) { - tmp = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, - simdSize); + tmp = + comp->gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseType, simdSize); BlockRange().InsertAfter(op1, tmp); op1 = tmp; } @@ -1499,7 +1488,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree node->gtType = genActualType(simdBaseType); node->ChangeHWIntrinsicId(intrinsic); node->SetSimdSize(8); - node->SetSimdBaseJitType(simdBaseJitType); + node->SetSimdBaseType(simdBaseType); node->Op(1) = op1; if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) @@ -1522,7 +1511,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree parents.Push(castNode); } #elif defined(TARGET_XARCH) - simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; + simdBaseType = varTypeIsUnsigned(simdBaseType) ? TYP_UBYTE : TYP_BYTE; // We want to tightly pack the most significant byte of each short/ushort // and then zero the tightly packed least significant bytes @@ -1554,30 +1543,30 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree memcpy(&op2->AsVecCon()->gtSimdVal, &simdVal, simdSize); BlockRange().InsertAfter(op1, op2); - GenTree* tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, shuffleIntrinsic, simdBaseJitType, simdSize); + GenTree* tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, shuffleIntrinsic, simdBaseType, simdSize); BlockRange().InsertAfter(op2, tmp); op1 = tmp; if (simdSize == 32) { - CorInfoType simdOtherJitType; + var_types simdOtherType; // Since Vector256 is 2x128-bit lanes we need a full width permutation so we get the lower // 64-bits of each lane next to eachother. The upper bits should be zero, but also don't // matter so we can also then simplify down to a 128-bit move mask. - simdOtherJitType = (simdBaseType == TYP_UBYTE) ? CORINFO_TYPE_ULONG : CORINFO_TYPE_LONG; + simdOtherType = (simdBaseType == TYP_UBYTE) ? TYP_ULONG : TYP_LONG; GenTree* icon = comp->gtNewIconNode(0xD8); BlockRange().InsertAfter(op1, icon); - tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, icon, NI_AVX2_Permute4x64, simdOtherJitType, simdSize); + tmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, icon, NI_AVX2_Permute4x64, simdOtherType, simdSize); BlockRange().InsertAfter(icon, tmp); op1 = tmp; simdType = TYP_SIMD16; - tmp = comp->gtNewSimdGetLowerNode(simdType, op1, simdBaseJitType, simdSize); + tmp = comp->gtNewSimdGetLowerNode(simdType, op1, simdBaseType, simdSize); BlockRange().InsertAfter(op1, tmp); op1 = tmp; @@ -1586,7 +1575,7 @@ void Rationalizer::RewriteHWIntrinsicExtractMsb(GenTree** use, Compiler::GenTree node->ChangeHWIntrinsicId(NI_X86Base_MoveMask); node->SetSimdSize(simdSize); - node->SetSimdBaseJitType(simdBaseJitType); + node->SetSimdBaseType(simdBaseType); node->Op(1) = op1; #else unreached(); diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index 7fdbd76afb0081..d818285410c919 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -52,9 +52,8 @@ int Compiler::getSIMDVectorLength(unsigned simdSize, var_types baseType) // int Compiler::getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd) { - unsigned sizeBytes = 0; - CorInfoType baseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); - var_types baseType = JitType2PreciseVarType(baseJitType); + unsigned sizeBytes = 0; + var_types baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return getSIMDVectorLength(sizeBytes, baseType); } @@ -153,6 +152,16 @@ unsigned Compiler::getFFRegisterVarNum() } #endif +var_types Compiler::getBaseTypeForPrimitiveNumericClass(CORINFO_CLASS_HANDLE cls) +{ + CorInfoType jitType = info.compCompHnd->getTypeForPrimitiveNumericClass(cls); + if (jitType == CORINFO_TYPE_UNDEF) + { + return TYP_UNDEF; + } + return JitType2PreciseVarType(jitType); +} + //---------------------------------------------------------------------------------- // Return the base type and size of SIMD vector type given its type handle. // @@ -177,6 +186,299 @@ unsigned Compiler::getFFRegisterVarNum() // this when we implement SIMD intrinsic identification for the final // product. // +var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */) +{ + if (m_simdHandleCache == nullptr) + { + if (impInlineInfo == nullptr) + { + m_simdHandleCache = new (this, CMK_Generic) SIMDHandlesCache(); + } + else + { + // Steal the inliner compiler's cache (create it if not available). + + if (impInlineInfo->InlineRoot->m_simdHandleCache == nullptr) + { + impInlineInfo->InlineRoot->m_simdHandleCache = new (this, CMK_Generic) SIMDHandlesCache(); + } + + m_simdHandleCache = impInlineInfo->InlineRoot->m_simdHandleCache; + } + } + + if (sizeBytes != nullptr) + { + *sizeBytes = 0; + } + + if ((typeHnd == nullptr) || !isIntrinsicType(typeHnd)) + { + return TYP_UNDEF; + } + + const char* namespaceName; + const char* className = getClassNameFromMetadata(typeHnd, &namespaceName); + + // fast path search using cached type handles of important types + var_types simdBaseType = TYP_UNDEF; + unsigned size = 0; + + if (isNumericsNamespace(namespaceName)) + { + switch (className[0]) + { + case 'P': + { + if (strcmp(className, "Plane") != 0) + { + return TYP_UNDEF; + } + + JITDUMP(" Known type Plane\n"); + m_simdHandleCache->PlaneHandle = typeHnd; + + simdBaseType = TYP_FLOAT; + size = 4 * genTypeSize(TYP_FLOAT); + break; + } + + case 'Q': + { + if (strcmp(className, "Quaternion") != 0) + { + return TYP_UNDEF; + } + + JITDUMP(" Known type Quaternion\n"); + m_simdHandleCache->QuaternionHandle = typeHnd; + + simdBaseType = TYP_FLOAT; + size = 4 * genTypeSize(TYP_FLOAT); + break; + } + + case 'V': + { + if (strncmp(className, "Vector", 6) != 0) + { + return TYP_UNDEF; + } + + switch (className[6]) + { + case '\0': + { + JITDUMP(" Found type Vector\n"); + m_simdHandleCache->VectorHandle = typeHnd; + break; + } + + case '2': + { + if (className[7] != '\0') + { + return TYP_UNDEF; + } + + JITDUMP(" Found Vector2\n"); + m_simdHandleCache->Vector2Handle = typeHnd; + + simdBaseType = TYP_FLOAT; + size = 2 * genTypeSize(TYP_FLOAT); + break; + } + + case '3': + { + if (className[7] != '\0') + { + return TYP_UNDEF; + } + + JITDUMP(" Found Vector3\n"); + m_simdHandleCache->Vector3Handle = typeHnd; + + simdBaseType = TYP_FLOAT; + size = 3 * genTypeSize(TYP_FLOAT); + break; + } + + case '4': + { + if (className[7] != '\0') + { + return TYP_UNDEF; + } + + JITDUMP(" Found Vector4\n"); + m_simdHandleCache->Vector4Handle = typeHnd; + + simdBaseType = TYP_FLOAT; + size = 4 * genTypeSize(TYP_FLOAT); + break; + } + + case '`': + { + if ((className[7] != '1') || (className[8] != '\0')) + { + return TYP_UNDEF; + } + + CORINFO_CLASS_HANDLE typeArgHnd = info.compCompHnd->getTypeInstantiationArgument(typeHnd, 0); + simdBaseType = getBaseTypeForPrimitiveNumericClass(typeArgHnd); + + if ((simdBaseType < TYP_BYTE) || (simdBaseType > TYP_DOUBLE)) + { + return TYP_UNDEF; + } + + JITDUMP(" Found Vector<%s>\n", varTypeName(simdBaseType)); + size = getVectorTByteLength(); + + if (size == 0) + { + return TYP_UNDEF; + } + break; + } + + default: + { + return TYP_UNDEF; + } + } + break; + } + + default: + { + return TYP_UNDEF; + } + } + } +#ifdef FEATURE_HW_INTRINSICS + else + { + size = info.compCompHnd->getClassSize(typeHnd); + + switch (size) + { +#if defined(TARGET_ARM64) + case 8: + { + if (strcmp(className, "Vector64`1") != 0) + { + return TYP_UNDEF; + } + + CORINFO_CLASS_HANDLE typeArgHnd = info.compCompHnd->getTypeInstantiationArgument(typeHnd, 0); + simdBaseType = getBaseTypeForPrimitiveNumericClass(typeArgHnd); + + if ((simdBaseType < TYP_BYTE) || (simdBaseType > TYP_DOUBLE)) + { + return TYP_UNDEF; + } + + JITDUMP(" Found Vector64<%s>\n", varTypeName(simdBaseType)); + break; + } +#endif // TARGET_ARM64 + + case 16: + { + if (strcmp(className, "Vector128`1") != 0) + { + return TYP_UNDEF; + } + + CORINFO_CLASS_HANDLE typeArgHnd = info.compCompHnd->getTypeInstantiationArgument(typeHnd, 0); + simdBaseType = getBaseTypeForPrimitiveNumericClass(typeArgHnd); + + if ((simdBaseType < TYP_BYTE) || (simdBaseType > TYP_DOUBLE)) + { + return TYP_UNDEF; + } + + JITDUMP(" Found Vector128<%s>\n", varTypeName(simdBaseType)); + break; + } + +#if defined(TARGET_XARCH) + case 32: + { + if (strcmp(className, "Vector256`1") != 0) + { + return TYP_UNDEF; + } + + CORINFO_CLASS_HANDLE typeArgHnd = info.compCompHnd->getTypeInstantiationArgument(typeHnd, 0); + simdBaseType = getBaseTypeForPrimitiveNumericClass(typeArgHnd); + + if ((simdBaseType < TYP_BYTE) || (simdBaseType > TYP_DOUBLE)) + { + return TYP_UNDEF; + } + + if (!compOpportunisticallyDependsOn(InstructionSet_AVX)) + { + // We must treat as a regular struct if AVX isn't supported + return TYP_UNDEF; + } + + JITDUMP(" Found Vector256<%s>\n", varTypeName(simdBaseType)); + break; + } + + case 64: + { + if (strcmp(className, "Vector512`1") != 0) + { + return TYP_UNDEF; + } + + CORINFO_CLASS_HANDLE typeArgHnd = info.compCompHnd->getTypeInstantiationArgument(typeHnd, 0); + simdBaseType = getBaseTypeForPrimitiveNumericClass(typeArgHnd); + + if ((simdBaseType < TYP_BYTE) || (simdBaseType > TYP_DOUBLE)) + { + return TYP_UNDEF; + } + + if (!compOpportunisticallyDependsOn(InstructionSet_AVX512)) + { + // We must treat as a regular struct if AVX512 isn't supported + return TYP_UNDEF; + } + + JITDUMP(" Found Vector512<%s>\n", varTypeName(simdBaseType)); + break; + } +#endif // TARGET_XARCH + + default: + { + return TYP_UNDEF; + } + } + } +#endif // FEATURE_HW_INTRINSICS + + if (sizeBytes != nullptr) + { + *sizeBytes = size; + } + + if (simdBaseType != TYP_UNDEF) + { + assert(size == info.compCompHnd->getClassSize(typeHnd)); + setUsesSIMDTypes(true); + } + + return simdBaseType; +} + CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */) { if (m_simdHandleCache == nullptr) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 22b433629c26dc..1f3485e38b6492 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -2354,9 +2354,9 @@ ValueNum ValueNumStore::VNOneForSimdType(var_types simdType, var_types simdBaseT return VNBroadcastForSimdType(simdType, simdBaseType, oneVN); } -ValueNum ValueNumStore::VNForSimdType(unsigned simdSize, CorInfoType simdBaseJitType) +ValueNum ValueNumStore::VNForSimdType(unsigned simdSize, var_types simdBaseType) { - ValueNum baseTypeVN = VNForIntCon(INT32(simdBaseJitType)); + ValueNum baseTypeVN = VNForIntCon(INT32(simdBaseType)); ValueNum sizeVN = VNForIntCon(simdSize); ValueNum simdTypeVN = VNForFunc(TYP_REF, VNF_SimdType, sizeVN, baseTypeVN); @@ -13298,7 +13298,7 @@ void Compiler::fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree) else { VNFunc func = GetVNFuncForNode(tree); - ValueNum simdTypeVN = vnStore->VNForSimdType(tree->GetSimdSize(), tree->GetNormalizedSimdBaseJitType()); + ValueNum simdTypeVN = vnStore->VNForSimdType(tree->GetSimdSize(), tree->GetSimdBaseType()); ValueNumPair resultTypeVNPair(simdTypeVN, simdTypeVN); JITDUMP(" simdTypeVN is "); diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 77af89db114c84..738e8fac121674 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -701,7 +701,7 @@ class ValueNumStore ValueNum VNOneForSimdType(var_types simdType, var_types simdBaseType); // A helper function for constructing VNF_SimdType VNs. - ValueNum VNForSimdType(unsigned simdSize, CorInfoType simdBaseJitType); + ValueNum VNForSimdType(unsigned simdSize, var_types simdBaseType); // Returns if a value number represents NaN in all elements bool VNIsVectorNaN(var_types simdType, var_types simdBaseType, ValueNum valVN);