@@ -2238,7 +2238,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
2238
2238
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
2239
2239
setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
2240
2240
2241
- for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2241
+ for (auto VT : {MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16,
2242
+ MVT::v16f16, MVT::v8f16}) {
2242
2243
setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
2243
2244
setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2244
2245
}
@@ -33192,8 +33193,8 @@ static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
33192
33193
"Cannot lower masked load op.");
33193
33194
33194
33195
assert((ScalarVT.getSizeInBits() >= 32 ||
33195
- (Subtarget.hasBWI() &&
33196
- (ScalarVT == MVT::i8 || ScalarVT == MVT::i16 ))) &&
33196
+ (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16 ||
33197
+ ScalarVT == MVT::f16 ))) &&
33197
33198
"Unsupported masked load op.");
33198
33199
33199
33200
// This operation is legal for targets with VLX, but without
@@ -33240,9 +33241,9 @@ static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
33240
33241
"Cannot lower masked store op.");
33241
33242
33242
33243
assert((ScalarVT.getSizeInBits() >= 32 ||
33243
- (Subtarget.hasBWI() &&
33244
- (ScalarVT == MVT::i8 || ScalarVT == MVT::i16 ))) &&
33245
- "Unsupported masked store op.");
33244
+ (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16 ||
33245
+ ScalarVT == MVT::f16 ))) &&
33246
+ "Unsupported masked store op.");
33246
33247
33247
33248
// This operation is legal for targets with VLX, but without
33248
33249
// VLX the vector should be widened to 512 bit
0 commit comments