2121using namespace clang ;
2222using namespace clang ::CIRGen;
2323
24- mlir::Value CIRGenFunction::emitX86BuiltinExpr (unsigned builtinID ,
25- const CallExpr *e ) {
26- if (builtinID == Builtin::BI__builtin_cpu_is) {
27- cgm.errorNYI (e ->getSourceRange (), " __builtin_cpu_is" );
24+ mlir::Value CIRGenFunction::emitX86BuiltinExpr (unsigned BuiltinID ,
25+ const CallExpr *E ) {
26+ if (BuiltinID == Builtin::BI__builtin_cpu_is) {
27+ cgm.errorNYI (E ->getSourceRange (), " __builtin_cpu_is" );
2828 return {};
2929 }
30- if (builtinID == Builtin::BI__builtin_cpu_supports) {
31- cgm.errorNYI (e ->getSourceRange (), " __builtin_cpu_supports" );
30+ if (BuiltinID == Builtin::BI__builtin_cpu_supports) {
31+ cgm.errorNYI (E ->getSourceRange (), " __builtin_cpu_supports" );
3232 return {};
3333 }
34- if (builtinID == Builtin::BI__builtin_cpu_init) {
35- cgm.errorNYI (e ->getSourceRange (), " __builtin_cpu_init" );
34+ if (BuiltinID == Builtin::BI__builtin_cpu_init) {
35+ cgm.errorNYI (E ->getSourceRange (), " __builtin_cpu_init" );
3636 return {};
3737 }
3838
@@ -43,7 +43,56 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
4343 // Find out if any arguments are required to be integer constant expressions.
4444 assert (!cir::MissingFeatures::handleBuiltinICEArguments ());
4545
46- switch (builtinID) {
46+ llvm::SmallVector<mlir::Value, 4 > Ops;
47+
48+ // Find out if any arguments are required to be integer constant expressions.
49+ unsigned ICEArguments = 0 ;
50+ ASTContext::GetBuiltinTypeError Error;
51+ getContext ().GetBuiltinType (BuiltinID, Error, &ICEArguments);
52+ assert (Error == ASTContext::GE_None && " Should not codegen an error" );
53+ for (unsigned i = 0 , e = E->getNumArgs (); i != e; i++) {
54+ Ops.push_back (emitScalarOrConstFoldImmArg (ICEArguments, i, E));
55+ }
56+
57+ // OG has unordered comparison as a form of optimization in addition to
58+ // ordered comparison, while CIR doesn't.
59+ //
60+ // This means that we can't encode the comparison code of UGT (unordered
61+ // greater than), at least not at the CIR level.
62+ //
63+ // The boolean shouldInvert compensates for this.
64+ // For example: to get to the comparison code UGT, we pass in
65+ // getVectorFCmpIR(OLE, shouldInvert = true) since OLE is the inverse of UGT.
66+
67+ // There are several ways to support this otherwise:
68+ // - register extra CmpOpKind for unordered comparison types and build the
69+ // translation code for
70+ // to go from CIR -> LLVM dialect. Notice we get this naturally with
71+ // shouldInvert, benefiting from existing infrastructure, albeit having to
72+ // generate an extra `not` at CIR).
73+ // - Just add extra comparison code to a new VecCmpOpKind instead of
74+ // cluttering CmpOpKind.
75+ // - Add a boolean in VecCmpOp to indicate if it's doing unordered or ordered
76+ // comparison
77+ // - Just emit the intrinsics call instead of calling this helper, see how the
78+ // LLVM lowering handles this.
79+ auto getVectorFCmpIR = [this , &Ops, &E](cir::CmpOpKind pred,
80+ bool shouldInvert, bool isSignaling) {
81+ assert (!cir::MissingFeatures::cgFPOptionsRAII ());
82+ auto loc = getLoc (E->getExprLoc ());
83+ mlir::Value cmp;
84+ if (builder.getIsFPConstrained ())
85+ // TODO: Add isSignaling boolean once emitConstrainedFPCall implemented
86+ assert (cir::MissingFeatures::emitConstrainedFPCall ());
87+ else
88+ cmp = builder.createVecCompare (loc, pred, Ops[0 ], Ops[1 ]);
89+
90+ mlir::Value bitCast = builder.createBitcast (
91+ shouldInvert ? builder.createNot (cmp) : cmp, Ops[0 ].getType ());
92+ return bitCast;
93+ };
94+
95+ switch (BuiltinID) {
4796 default :
4897 return {};
4998 case X86::BI_mm_prefetch:
@@ -710,10 +759,18 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
710759 case X86::BI__builtin_ia32_cmpunordpd:
711760 case X86::BI__builtin_ia32_cmpneqps:
712761 case X86::BI__builtin_ia32_cmpneqpd:
762+ cgm.errorNYI (E->getSourceRange (),
763+ std::string (" unimplemented X86 builtin call: " ) +
764+ getContext ().BuiltinInfo .getName (BuiltinID));
765+ return {};
713766 case X86::BI__builtin_ia32_cmpnltps:
714767 case X86::BI__builtin_ia32_cmpnltpd:
768+ return getVectorFCmpIR (cir::CmpOpKind::lt, /* shouldInvert=*/ true ,
769+ /* isSignaling=*/ true );
715770 case X86::BI__builtin_ia32_cmpnleps:
716771 case X86::BI__builtin_ia32_cmpnlepd:
772+ return getVectorFCmpIR (cir::CmpOpKind::le, /* shouldInvert=*/ true ,
773+ /* isSignaling=*/ true );
717774 case X86::BI__builtin_ia32_cmpordps:
718775 case X86::BI__builtin_ia32_cmpordpd:
719776 case X86::BI__builtin_ia32_cmpph128_mask:
@@ -798,9 +855,26 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
798855 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
799856 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
800857 case X86::BI__builtin_ia32_prefetchi:
801- cgm.errorNYI (e ->getSourceRange (),
858+ cgm.errorNYI (E ->getSourceRange (),
802859 std::string (" unimplemented X86 builtin call: " ) +
803- getContext ().BuiltinInfo .getName (builtinID ));
860+ getContext ().BuiltinInfo .getName (BuiltinID ));
804861 return {};
805862 }
806863}
864+
865+ mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg (unsigned ICEArguments,
866+ unsigned Idx,
867+ const CallExpr *E) {
868+ mlir::Value Arg = {};
869+ if ((ICEArguments & (1 << Idx)) == 0 ) {
870+ Arg = emitScalarExpr (E->getArg (Idx));
871+ } else {
872+ // If this is required to be a constant, constant fold it so that we
873+ // know that the generated intrinsic gets a ConstantInt.
874+ std::optional<llvm::APSInt> Result =
875+ E->getArg (Idx)->getIntegerConstantExpr (getContext ());
876+ assert (Result && " Expected argument to be a constant" );
877+ Arg = builder.getConstInt (getLoc (E->getSourceRange ()), *Result);
878+ }
879+ return Arg;
880+ }
0 commit comments