From 850c35e42157c13e37c017279aae6f4b19d7e4b3 Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Wed, 2 Apr 2025 07:20:16 -0400 Subject: [PATCH 01/15] Reorder Relu and maxpool optimization Signed-off-by: Arkar-Hema --- output.txt | 86 +++++++++++++++++++++ src/Dialect/ONNX/Transforms/Decompose.cpp | 61 +++++++++++++++ test/mlir/onnx/reorder_relu_to_maxpool.mlir | 33 ++++++++ 3 files changed, 180 insertions(+) create mode 100644 output.txt create mode 100644 test/mlir/onnx/reorder_relu_to_maxpool.mlir diff --git a/output.txt b/output.txt new file mode 100644 index 0000000000..6f9ec4e2f6 --- /dev/null +++ b/output.txt @@ -0,0 +1,86 @@ +diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp +index d61a980e..762de75b 100644 +--- a/src/Dialect/ONNX/Transforms/Decompose.cpp ++++ b/src/Dialect/ONNX/Transforms/Decompose.cpp +@@ -1249,6 +1249,55 @@ struct SumToAddPattern : public OpRewritePattern { + } + }; + ++/// reorder relu-> maxpool to maxpool->relu ++struct ReorderReLUToMaxPoolPattern : public OpRewritePattern { ++ using OpRewritePattern::OpRewritePattern; ++ ++ LogicalResult matchAndRewrite( ++ ONNXMaxPoolSingleOutOp maxPoolOp, PatternRewriter &rewriter) const final { ++ ++ // Get the input to MaxPool ++ Value maxPoolInput = maxPoolOp.getX(); ++ Operation *inputOp = maxPoolInput.getDefiningOp(); ++ ++ // Check if the input to MaxPool is a ReLU layer ++ if (!inputOp || !isa(inputOp)) ++ return failure(); // Only process if MaxPool follows a ReLU layer ++ ++ auto reluOp = dyn_cast(inputOp); ++ ++ // Create a new MaxPool operation using ReLU's output shape ++ Value newMaxPool = rewriter.create( ++ maxPoolOp.getLoc(), ++ maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape ++ reluOp.getX(), // Original ReLU's input becomes MaxPool's input ++ maxPoolOp.getAutoPadAttr(), // Auto pad ++ maxPoolOp.getCeilModeAttr(), // Ceil mode ++ maxPoolOp.getDilationsAttr(), // Dilations ++ maxPoolOp.getKernelShapeAttr(), // Kernel shape ++ maxPoolOp.getPadsAttr(), // Pads ++ maxPoolOp.getStorageOrderAttr(),// Storage order ++ maxPoolOp.getStridesAttr() // Strides ++ ); ++ ++ // Create a new ReLU operation using MaxPool's output shape ++ Value newRelu = rewriter.create( ++ reluOp.getLoc(), ++ maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape ++ newMaxPool // New MaxPool output becomes ReLU's input ++ ); ++ // Replace all uses of the old MaxPool output with the new ReLU output ++ maxPoolOp.getResult().replaceAllUsesWith(newRelu); ++ ++ // Safely erase the old MaxPool (now unused) ++ rewriter.eraseOp(maxPoolOp); ++ ++ // Replace the original ReLU output with the new ReLU output ++ rewriter.replaceOp(reluOp, newRelu); ++ return success(); ++ } ++}; ++ + // ============================================================================= + // Pattern for replacing CastLikeOp by CastOp. + // ============================================================================= +@@ -1385,6 +1434,17 @@ void DecomposeONNXToONNXPass::runOnOperation() { + op.getValueStringAttr() || op.getValueStringsAttr()); + }); + ++ target.addDynamicallyLegalOp([](Operation *op) { ++ if (auto reluOp = dyn_cast(op)) { ++ for (auto user : reluOp.getResult().getUsers()) { ++ if (auto poolOp = dyn_cast(user)) { ++ return false; // Reorder ReLU to MaxPool condition met ++ } ++ } ++ } ++ return true; ++ }); ++ + // Decompose CustomOp FusedMatMul introduced by onnxruntime: + // https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.FusedMatMul + target.addDynamicallyLegalOp([](ONNXCustomOp op) { +@@ -1440,6 +1500,7 @@ void onnx_mlir::getDecomposeONNXToONNXPatterns( + patterns.insert(context); + + // TODO: consider whether to include SoftmaxPattern here ++ patterns.insert(context); + } + + /*! diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp index d61a980e15..762de75bf3 100644 --- a/src/Dialect/ONNX/Transforms/Decompose.cpp +++ b/src/Dialect/ONNX/Transforms/Decompose.cpp @@ -1249,6 +1249,55 @@ struct SumToAddPattern : public OpRewritePattern { } }; +/// reorder relu-> maxpool to maxpool->relu +struct ReorderReLUToMaxPoolPattern : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite( + ONNXMaxPoolSingleOutOp maxPoolOp, PatternRewriter &rewriter) const final { + + // Get the input to MaxPool + Value maxPoolInput = maxPoolOp.getX(); + Operation *inputOp = maxPoolInput.getDefiningOp(); + + // Check if the input to MaxPool is a ReLU layer + if (!inputOp || !isa(inputOp)) + return failure(); // Only process if MaxPool follows a ReLU layer + + auto reluOp = dyn_cast(inputOp); + + // Create a new MaxPool operation using ReLU's output shape + Value newMaxPool = rewriter.create( + maxPoolOp.getLoc(), + maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape + reluOp.getX(), // Original ReLU's input becomes MaxPool's input + maxPoolOp.getAutoPadAttr(), // Auto pad + maxPoolOp.getCeilModeAttr(), // Ceil mode + maxPoolOp.getDilationsAttr(), // Dilations + maxPoolOp.getKernelShapeAttr(), // Kernel shape + maxPoolOp.getPadsAttr(), // Pads + maxPoolOp.getStorageOrderAttr(),// Storage order + maxPoolOp.getStridesAttr() // Strides + ); + + // Create a new ReLU operation using MaxPool's output shape + Value newRelu = rewriter.create( + reluOp.getLoc(), + maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape + newMaxPool // New MaxPool output becomes ReLU's input + ); + // Replace all uses of the old MaxPool output with the new ReLU output + maxPoolOp.getResult().replaceAllUsesWith(newRelu); + + // Safely erase the old MaxPool (now unused) + rewriter.eraseOp(maxPoolOp); + + // Replace the original ReLU output with the new ReLU output + rewriter.replaceOp(reluOp, newRelu); + return success(); + } +}; + // ============================================================================= // Pattern for replacing CastLikeOp by CastOp. // ============================================================================= @@ -1385,6 +1434,17 @@ void DecomposeONNXToONNXPass::runOnOperation() { op.getValueStringAttr() || op.getValueStringsAttr()); }); + target.addDynamicallyLegalOp([](Operation *op) { + if (auto reluOp = dyn_cast(op)) { + for (auto user : reluOp.getResult().getUsers()) { + if (auto poolOp = dyn_cast(user)) { + return false; // Reorder ReLU to MaxPool condition met + } + } + } + return true; + }); + // Decompose CustomOp FusedMatMul introduced by onnxruntime: // https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.FusedMatMul target.addDynamicallyLegalOp([](ONNXCustomOp op) { @@ -1440,6 +1500,7 @@ void onnx_mlir::getDecomposeONNXToONNXPatterns( patterns.insert(context); // TODO: consider whether to include SoftmaxPattern here + patterns.insert(context); } /*! diff --git a/test/mlir/onnx/reorder_relu_to_maxpool.mlir b/test/mlir/onnx/reorder_relu_to_maxpool.mlir new file mode 100644 index 0000000000..133b65b8a5 --- /dev/null +++ b/test/mlir/onnx/reorder_relu_to_maxpool.mlir @@ -0,0 +1,33 @@ +// RUN: onnx-mlir --useOnnxModelTypes=false --EmitONNXIR --printIR %s | FileCheck %s + +func.func @test_reorder_relu_maxpool(%arg0: tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> { + %0 = "onnx.Relu"(%arg0) {onnx_node_name = "onnx.Relu_0"} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32> + %1 = "onnx.MaxPoolSingleOut"(%0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_1", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> + return %1 : tensor<1x64x16x16xf32> + + // CHECK-LABEL: func @test_reorder_relu_maxpool + // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> { + // CHECK: [[VAR_0_:%.+]] = "onnx.MaxPoolSingleOut"([[PARAM_0_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_0", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> + // CHECK: [[VAR_1_:%.+]] = "onnx.Relu"([[VAR_0_]]) {onnx_node_name = "onnx.Relu_1"} : (tensor<1x64x16x16xf32>) -> tensor<1x64x16x16xf32> + // CHECK-NEXT: return [[VAR_1_]] : tensor<1x64x16x16xf32> + +} + +func.func @test_reorder_relu_maxpool_conv(%arg0: tensor<1x3x32x32xf32>) -> tensor<1x16x15x15xf32> { + %0 = onnx.Constant dense<0.00999999977> : tensor<16x3x3x3xf32> + %1 = onnx.Constant dense<[-0.549453557, -0.827535748, -0.358648896, 0.968641698, -0.0196946431, 0.269008577, -0.445898831, 0.947227954, 0.384573817, 1.60240877, -0.970565319, 0.224884078, -1.80497575, 1.07463968, -0.368380129, -1.6080451]> : tensor<16xf32> + %2 = "onnx.Conv"(%arg0, %0, %1) {auto_pad = "NOTSET", group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "onnx.Conv_0", pads = [0, 0, 0, 0]} : (tensor<1x3x32x32xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<1x16x30x30xf32> + %3 = "onnx.Relu"(%2) {onnx_node_name = "onnx.Relu_1"} : (tensor<1x16x30x30xf32>) -> tensor<1x16x30x30xf32> + %4 = "onnx.MaxPoolSingleOut"(%3) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_2", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x16x30x30xf32>) -> tensor<1x16x15x15xf32> + return %4 : tensor<1x16x15x15xf32> + + // CHECK-LABEL: func @test_reorder_relu_maxpool_conv + // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x32x32xf32>) -> tensor<1x16x15x15xf32> { + // CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<{{.*}}> : tensor<16x3x3x3xf32> + // CHECK: [[VAR_1_:%.+]] = onnx.Constant dense<{{.*}}> : tensor<16xf32> + // CHECK: [[CONV_OUT_:%.+]] = "onnx.Conv"([[PARAM_0_]], [[VAR_0_]], [[VAR_1_]]) + // CHECK-SAME: (tensor<1x3x32x32xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<1x16x30x30xf32> + // CHECK: [[VAR_2_:%.+]] = "onnx.MaxPoolSingleOut"([[CONV_OUT_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.Conv_0_onnx.MaxPoolSingleOut_2", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x16x30x30xf32>) -> tensor<1x16x15x15xf32> + // CHECK: [[VAR_3_:%.+]] = "onnx.Relu"([[VAR_2_]]) {onnx_node_name = "onnx.Relu_3"} : (tensor<1x16x15x15xf32>) -> tensor<1x16x15x15xf32> + // CHECK-NEXT: return [[VAR_3_]] : tensor<1x16x15x15xf32> +} \ No newline at end of file From 368b24ba0cd20143116d45ea5e38e912d856b37f Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Wed, 2 Apr 2025 07:27:28 -0400 Subject: [PATCH 02/15] Removed output file Signed-off-by: Arkar-Hema --- output.txt | 86 ------------------------------------------------------ 1 file changed, 86 deletions(-) delete mode 100644 output.txt diff --git a/output.txt b/output.txt deleted file mode 100644 index 6f9ec4e2f6..0000000000 --- a/output.txt +++ /dev/null @@ -1,86 +0,0 @@ -diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp -index d61a980e..762de75b 100644 ---- a/src/Dialect/ONNX/Transforms/Decompose.cpp -+++ b/src/Dialect/ONNX/Transforms/Decompose.cpp -@@ -1249,6 +1249,55 @@ struct SumToAddPattern : public OpRewritePattern { - } - }; - -+/// reorder relu-> maxpool to maxpool->relu -+struct ReorderReLUToMaxPoolPattern : public OpRewritePattern { -+ using OpRewritePattern::OpRewritePattern; -+ -+ LogicalResult matchAndRewrite( -+ ONNXMaxPoolSingleOutOp maxPoolOp, PatternRewriter &rewriter) const final { -+ -+ // Get the input to MaxPool -+ Value maxPoolInput = maxPoolOp.getX(); -+ Operation *inputOp = maxPoolInput.getDefiningOp(); -+ -+ // Check if the input to MaxPool is a ReLU layer -+ if (!inputOp || !isa(inputOp)) -+ return failure(); // Only process if MaxPool follows a ReLU layer -+ -+ auto reluOp = dyn_cast(inputOp); -+ -+ // Create a new MaxPool operation using ReLU's output shape -+ Value newMaxPool = rewriter.create( -+ maxPoolOp.getLoc(), -+ maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape -+ reluOp.getX(), // Original ReLU's input becomes MaxPool's input -+ maxPoolOp.getAutoPadAttr(), // Auto pad -+ maxPoolOp.getCeilModeAttr(), // Ceil mode -+ maxPoolOp.getDilationsAttr(), // Dilations -+ maxPoolOp.getKernelShapeAttr(), // Kernel shape -+ maxPoolOp.getPadsAttr(), // Pads -+ maxPoolOp.getStorageOrderAttr(),// Storage order -+ maxPoolOp.getStridesAttr() // Strides -+ ); -+ -+ // Create a new ReLU operation using MaxPool's output shape -+ Value newRelu = rewriter.create( -+ reluOp.getLoc(), -+ maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape -+ newMaxPool // New MaxPool output becomes ReLU's input -+ ); -+ // Replace all uses of the old MaxPool output with the new ReLU output -+ maxPoolOp.getResult().replaceAllUsesWith(newRelu); -+ -+ // Safely erase the old MaxPool (now unused) -+ rewriter.eraseOp(maxPoolOp); -+ -+ // Replace the original ReLU output with the new ReLU output -+ rewriter.replaceOp(reluOp, newRelu); -+ return success(); -+ } -+}; -+ - // ============================================================================= - // Pattern for replacing CastLikeOp by CastOp. - // ============================================================================= -@@ -1385,6 +1434,17 @@ void DecomposeONNXToONNXPass::runOnOperation() { - op.getValueStringAttr() || op.getValueStringsAttr()); - }); - -+ target.addDynamicallyLegalOp([](Operation *op) { -+ if (auto reluOp = dyn_cast(op)) { -+ for (auto user : reluOp.getResult().getUsers()) { -+ if (auto poolOp = dyn_cast(user)) { -+ return false; // Reorder ReLU to MaxPool condition met -+ } -+ } -+ } -+ return true; -+ }); -+ - // Decompose CustomOp FusedMatMul introduced by onnxruntime: - // https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.FusedMatMul - target.addDynamicallyLegalOp([](ONNXCustomOp op) { -@@ -1440,6 +1500,7 @@ void onnx_mlir::getDecomposeONNXToONNXPatterns( - patterns.insert(context); - - // TODO: consider whether to include SoftmaxPattern here -+ patterns.insert(context); - } - - /*! From 42cdb7af9c9eeb35de32d6f32d4246c0f832f61c Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Fri, 4 Apr 2025 04:57:36 -0400 Subject: [PATCH 03/15] Updated the onnx_hybrid_transform test file Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/Transforms/Decompose.cpp | 30 +++++++++++------------ test/mlir/onnx/onnx_hybrid_transform.mlir | 8 +++--- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp index 762de75bf3..f0d2997ce8 100644 --- a/src/Dialect/ONNX/Transforms/Decompose.cpp +++ b/src/Dialect/ONNX/Transforms/Decompose.cpp @@ -1250,7 +1250,8 @@ struct SumToAddPattern : public OpRewritePattern { }; /// reorder relu-> maxpool to maxpool->relu -struct ReorderReLUToMaxPoolPattern : public OpRewritePattern { +struct ReorderReLUToMaxPoolPattern + : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite( @@ -1267,24 +1268,23 @@ struct ReorderReLUToMaxPoolPattern : public OpRewritePattern(inputOp); // Create a new MaxPool operation using ReLU's output shape - Value newMaxPool = rewriter.create( - maxPoolOp.getLoc(), - maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape - reluOp.getX(), // Original ReLU's input becomes MaxPool's input - maxPoolOp.getAutoPadAttr(), // Auto pad - maxPoolOp.getCeilModeAttr(), // Ceil mode - maxPoolOp.getDilationsAttr(), // Dilations - maxPoolOp.getKernelShapeAttr(), // Kernel shape - maxPoolOp.getPadsAttr(), // Pads - maxPoolOp.getStorageOrderAttr(),// Storage order - maxPoolOp.getStridesAttr() // Strides + Value newMaxPool = + rewriter.create(maxPoolOp.getLoc(), + maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape + reluOp.getX(), // Original ReLU's input becomes MaxPool's input + maxPoolOp.getAutoPadAttr(), // Auto pad + maxPoolOp.getCeilModeAttr(), // Ceil mode + maxPoolOp.getDilationsAttr(), // Dilations + maxPoolOp.getKernelShapeAttr(), // Kernel shape + maxPoolOp.getPadsAttr(), // Pads + maxPoolOp.getStorageOrderAttr(), // Storage order + maxPoolOp.getStridesAttr() // Strides ); // Create a new ReLU operation using MaxPool's output shape - Value newRelu = rewriter.create( - reluOp.getLoc(), + Value newRelu = rewriter.create(reluOp.getLoc(), maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape - newMaxPool // New MaxPool output becomes ReLU's input + newMaxPool // New MaxPool output becomes ReLU's input ); // Replace all uses of the old MaxPool output with the new ReLU output maxPoolOp.getResult().replaceAllUsesWith(newRelu); diff --git a/test/mlir/onnx/onnx_hybrid_transform.mlir b/test/mlir/onnx/onnx_hybrid_transform.mlir index 9966f22c7e..a11e248678 100644 --- a/test/mlir/onnx/onnx_hybrid_transform.mlir +++ b/test/mlir/onnx/onnx_hybrid_transform.mlir @@ -243,8 +243,8 @@ func.func @test_inception_v2_6_snippet(%arg0: tensor<1x3x224x224xf32>, %arg1: te // DECOMPOSE-DAG: [[VAR_40_:%.+]] = "onnx.Mul"([[VAR_38_]], [[VAR_39_]]) : (tensor<1x64x112x112xf32>, tensor<64x1x1xf32>) -> tensor<1x64x112x112xf32> // DECOMPOSE-DAG: [[VAR_41_:%.+]] = "onnx.Unsqueeze"([[VAR_8_]], [[VAR_0_]]) : (tensor<64xf32>, tensor<2xi64>) -> tensor<64x1x1xf32> // DECOMPOSE: [[VAR_42_:%.+]] = "onnx.Add"([[VAR_40_]], [[VAR_41_]]) : (tensor<1x64x112x112xf32>, tensor<64x1x1xf32>) -> tensor<1x64x112x112xf32> -// DECOMPOSE: [[VAR_43_:%.+]] = "onnx.Relu"([[VAR_42_]]) : (tensor<1x64x112x112xf32>) -> tensor<1x64x112x112xf32> -// DECOMPOSE-DAG: [[VAR_44_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_43_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x112x112xf32>) -> tensor<1x64x56x56xf32> +// DECOMPOSE: [[VAR_43_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_42_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x112x112xf32>) -> tensor<1x64x56x56xf32> +// DECOMPOSE-DAG: [[VAR_44_:%.+]] = "onnx.Relu"([[VAR_43_]]) : (tensor<1x64x56x56xf32>) -> tensor<1x64x56x56xf32> // DECOMPOSE-DAG: [[VAR_45_:%.+]] = "onnx.Add"([[VAR_13_]], [[VAR_1_]]) : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_46_:%.+]] = "onnx.Sqrt"([[VAR_45_]]) : (tensor<64xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_47_:%.+]] = "onnx.Div"([[VAR_10_]], [[VAR_46_]]) : (tensor<64xf32>, tensor<64xf32>) -> tensor<64xf32> @@ -274,8 +274,8 @@ func.func @test_inception_v2_6_snippet(%arg0: tensor<1x3x224x224xf32>, %arg1: te // DECOMPOSE-DAG: [[VAR_69_:%.+]] = "onnx.Mul"([[VAR_67_]], [[VAR_68_]]) : (tensor<1x192x56x56xf32>, tensor<192x1x1xf32>) -> tensor<1x192x56x56xf32> // DECOMPOSE-DAG: [[VAR_70_:%.+]] = "onnx.Unsqueeze"([[VAR_22_]], [[VAR_0_]]) : (tensor<192xf32>, tensor<2xi64>) -> tensor<192x1x1xf32> // DECOMPOSE: [[VAR_71_:%.+]] = "onnx.Add"([[VAR_69_]], [[VAR_70_]]) : (tensor<1x192x56x56xf32>, tensor<192x1x1xf32>) -> tensor<1x192x56x56xf32> -// DECOMPOSE: [[VAR_72_:%.+]] = "onnx.Relu"([[VAR_71_]]) : (tensor<1x192x56x56xf32>) -> tensor<1x192x56x56xf32> -// DECOMPOSE-DAG: [[VAR_73_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_72_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x192x56x56xf32>) -> tensor<1x192x28x28xf32> +// DECOMPOSE: [[VAR_72_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_71_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x192x56x56xf32>) -> tensor<1x192x28x28xf32> +// DECOMPOSE-DAG: [[VAR_73_:%.+]] = "onnx.Relu"([[VAR_72_]]) : (tensor<1x192x28x28xf32>) -> tensor<1x192x28x28xf32> // DECOMPOSE-DAG: [[VAR_74_:%.+]] = "onnx.Add"([[VAR_27_]], [[VAR_1_]]) : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_75_:%.+]] = "onnx.Sqrt"([[VAR_74_]]) : (tensor<64xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_76_:%.+]] = "onnx.Div"([[VAR_24_]], [[VAR_75_]]) : (tensor<64xf32>, tensor<64xf32>) -> tensor<64xf32> From 41579e528d190d7c814c7f062460d1fd5221bfbf Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Mon, 7 Apr 2025 04:44:09 -0400 Subject: [PATCH 04/15] Updated the gen_onnx_mlir file Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/Transforms/Decompose.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp index f0d2997ce8..250cbebb11 100644 --- a/src/Dialect/ONNX/Transforms/Decompose.cpp +++ b/src/Dialect/ONNX/Transforms/Decompose.cpp @@ -1270,21 +1270,21 @@ struct ReorderReLUToMaxPoolPattern // Create a new MaxPool operation using ReLU's output shape Value newMaxPool = rewriter.create(maxPoolOp.getLoc(), - maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape - reluOp.getX(), // Original ReLU's input becomes MaxPool's input - maxPoolOp.getAutoPadAttr(), // Auto pad - maxPoolOp.getCeilModeAttr(), // Ceil mode - maxPoolOp.getDilationsAttr(), // Dilations - maxPoolOp.getKernelShapeAttr(), // Kernel shape - maxPoolOp.getPadsAttr(), // Pads - maxPoolOp.getStorageOrderAttr(), // Storage order - maxPoolOp.getStridesAttr() // Strides + maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape + reluOp.getX(), // Original ReLU's input becomes MaxPool's input + maxPoolOp.getAutoPadAttr(), // Auto pad + maxPoolOp.getCeilModeAttr(), // Ceil mode + maxPoolOp.getDilationsAttr(), // Dilations + maxPoolOp.getKernelShapeAttr(), // Kernel shape + maxPoolOp.getPadsAttr(), // Pads + maxPoolOp.getStorageOrderAttr(), // Storage order + maxPoolOp.getStridesAttr() // Strides ); // Create a new ReLU operation using MaxPool's output shape Value newRelu = rewriter.create(reluOp.getLoc(), - maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape - newMaxPool // New MaxPool output becomes ReLU's input + maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape + newMaxPool // New MaxPool output becomes ReLU's input ); // Replace all uses of the old MaxPool output with the new ReLU output maxPoolOp.getResult().replaceAllUsesWith(newRelu); From ff482b56d4aede596e6af5981511976aa8d94776 Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Mon, 7 Apr 2025 04:44:59 -0400 Subject: [PATCH 05/15] Updated the gen_onnx_mlir file Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps.td.inc | 1 + utils/OpBuildTable.inc | 916 ++++++++++++++++++++++++++++++++ utils/gen_onnx_mlir.py | 1 + 3 files changed, 918 insertions(+) create mode 100644 utils/OpBuildTable.inc diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index 666c0cde71..2729553b07 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -4387,6 +4387,7 @@ def ONNXMaxOp:ONNX_Op<"Max", def ONNXMaxPoolOp:ONNX_Op<"MaxPool", [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + let hasCanonicalizer = 1; let summary = "ONNX MaxPool operation"; let description = [{ MaxPool consumes an input tensor X and applies max pooling across diff --git a/utils/OpBuildTable.inc b/utils/OpBuildTable.inc new file mode 100644 index 0000000000..ed0c277f77 --- /dev/null +++ b/utils/OpBuildTable.inc @@ -0,0 +1,916 @@ +//******************************************************** +// Do not modify this file directly. +// This file is automatically generated via script. +// Details can be found in docs/ImportONNXDefs.md . +//******************************************************** + +op_dialect_version_map_["Abs"] = {13}; +op_dialect_version_map_["Acos"] = {22}; +op_dialect_version_map_["Acosh"] = {22}; +op_dialect_version_map_["Adagrad"] = {1}; +op_dialect_version_map_["Adam"] = {1}; +op_dialect_version_map_["Add"] = {14}; +op_dialect_version_map_["And"] = {7}; +op_dialect_version_map_["ArgMax"] = {13}; +op_dialect_version_map_["ArgMin"] = {13}; +op_dialect_version_map_["ArrayFeatureExtractor"] = {1}; +op_dialect_version_map_["Asin"] = {22}; +op_dialect_version_map_["Asinh"] = {22}; +op_dialect_version_map_["Atan"] = {22}; +op_dialect_version_map_["Atanh"] = {22}; +op_dialect_version_map_["AveragePool"] = {22}; +op_dialect_version_map_["BatchNormalization"] = {15}; +op_dialect_version_map_["Bernoulli"] = {22}; +op_dialect_version_map_["Binarizer"] = {1}; +op_dialect_version_map_["BitShift"] = {11}; +op_dialect_version_map_["BitwiseAnd"] = {18}; +op_dialect_version_map_["BitwiseNot"] = {18}; +op_dialect_version_map_["BitwiseOr"] = {18}; +op_dialect_version_map_["BitwiseXor"] = {18}; +op_dialect_version_map_["BlackmanWindow"] = {17}; +op_dialect_version_map_["Cast"] = {21}; +op_dialect_version_map_["CastLike"] = {19}; +op_dialect_version_map_["CastMap"] = {1}; +op_dialect_version_map_["CategoryMapper"] = {1}; +op_dialect_version_map_["Ceil"] = {13}; +op_dialect_version_map_["Celu"] = {12}; +op_dialect_version_map_["CenterCropPad"] = {18}; +op_dialect_version_map_["Clip"] = {13, 12, 11, 6}; +op_dialect_version_map_["Compress"] = {11}; +op_dialect_version_map_["Concat"] = {13}; +op_dialect_version_map_["ConcatFromSequence"] = {11}; +op_dialect_version_map_["Constant"] = {19}; +op_dialect_version_map_["ConstantOfShape"] = {20}; +op_dialect_version_map_["Conv"] = {22}; +op_dialect_version_map_["ConvInteger"] = {10}; +op_dialect_version_map_["ConvTranspose"] = {22}; +op_dialect_version_map_["Cos"] = {22}; +op_dialect_version_map_["Cosh"] = {22}; +op_dialect_version_map_["Col2Im"] = {18}; +op_dialect_version_map_["CumSum"] = {14}; +op_dialect_version_map_["DeformConv"] = {22}; +op_dialect_version_map_["DepthToSpace"] = {13}; +op_dialect_version_map_["DequantizeLinear"] = {19}; +op_dialect_version_map_["Det"] = {22}; +op_dialect_version_map_["DFT"] = {20, 17}; +op_dialect_version_map_["DictVectorizer"] = {1}; +op_dialect_version_map_["Div"] = {14}; +op_dialect_version_map_["Dropout"] = {22}; +op_dialect_version_map_["DynamicQuantizeLinear"] = {11}; +op_dialect_version_map_["Einsum"] = {12}; +op_dialect_version_map_["Elu"] = {22}; +op_dialect_version_map_["Equal"] = {19}; +op_dialect_version_map_["Erf"] = {13}; +op_dialect_version_map_["Exp"] = {13}; +op_dialect_version_map_["Expand"] = {13}; +op_dialect_version_map_["EyeLike"] = {22}; +op_dialect_version_map_["FeatureVectorizer"] = {1}; +op_dialect_version_map_["Flatten"] = {21}; +op_dialect_version_map_["Floor"] = {13}; +op_dialect_version_map_["GRU"] = {22}; +op_dialect_version_map_["Gather"] = {13}; +op_dialect_version_map_["GatherElements"] = {13}; +op_dialect_version_map_["GatherND"] = {13}; +op_dialect_version_map_["Gelu"] = {20}; +op_dialect_version_map_["Gemm"] = {13}; +op_dialect_version_map_["GlobalAveragePool"] = {22}; +op_dialect_version_map_["GlobalLpPool"] = {2}; +op_dialect_version_map_["GlobalMaxPool"] = {22}; +op_dialect_version_map_["Gradient"] = {1}; +op_dialect_version_map_["Greater"] = {13}; +op_dialect_version_map_["GreaterOrEqual"] = {16}; +op_dialect_version_map_["GridSample"] = {22, 16}; +op_dialect_version_map_["GroupNormalization"] = {21, 18}; +op_dialect_version_map_["HammingWindow"] = {17}; +op_dialect_version_map_["HannWindow"] = {17}; +op_dialect_version_map_["HardSigmoid"] = {22}; +op_dialect_version_map_["Hardmax"] = {13}; +op_dialect_version_map_["HardSwish"] = {22}; +op_dialect_version_map_["Identity"] = {21}; +op_dialect_version_map_["If"] = {21}; +op_dialect_version_map_["Imputer"] = {1}; +op_dialect_version_map_["InstanceNormalization"] = {22}; +op_dialect_version_map_["IsInf"] = {20}; +op_dialect_version_map_["IsNaN"] = {20}; +op_dialect_version_map_["LayerNormalization"] = {17}; +op_dialect_version_map_["LRN"] = {13}; +op_dialect_version_map_["LSTM"] = {22}; +op_dialect_version_map_["LabelEncoder"] = {2}; +op_dialect_version_map_["LeakyRelu"] = {16}; +op_dialect_version_map_["Less"] = {13}; +op_dialect_version_map_["LessOrEqual"] = {16}; +op_dialect_version_map_["LinearClassifier"] = {1}; +op_dialect_version_map_["LinearRegressor"] = {1}; +op_dialect_version_map_["Log"] = {13}; +op_dialect_version_map_["LogSoftmax"] = {13}; +op_dialect_version_map_["Loop"] = {21}; +op_dialect_version_map_["LpNormalization"] = {22}; +op_dialect_version_map_["LpPool"] = {22}; +op_dialect_version_map_["MatMul"] = {13}; +op_dialect_version_map_["MatMulInteger"] = {10}; +op_dialect_version_map_["Max"] = {13}; +op_dialect_version_map_["MaxPool"] = {22}; +op_dialect_version_map_["MaxRoiPool"] = {22}; +op_dialect_version_map_["MaxUnpool"] = {22}; +op_dialect_version_map_["Mean"] = {13}; +op_dialect_version_map_["MeanVarianceNormalization"] = {13}; +op_dialect_version_map_["MelWeightMatrix"] = {17}; +op_dialect_version_map_["Min"] = {13}; +op_dialect_version_map_["Mish"] = {22}; +op_dialect_version_map_["Mod"] = {13}; +op_dialect_version_map_["Momentum"] = {1}; +op_dialect_version_map_["Mul"] = {14}; +op_dialect_version_map_["Multinomial"] = {22}; +op_dialect_version_map_["Neg"] = {13}; +op_dialect_version_map_["NegativeLogLikelihoodLoss"] = {22}; +op_dialect_version_map_["NonMaxSuppression"] = {11}; +op_dialect_version_map_["NonZero"] = {13}; +op_dialect_version_map_["Normalizer"] = {1}; +op_dialect_version_map_["Not"] = {1}; +op_dialect_version_map_["OneHot"] = {11}; +op_dialect_version_map_["OneHotEncoder"] = {1}; +op_dialect_version_map_["Optional"] = {15}; +op_dialect_version_map_["OptionalGetElement"] = {18}; +op_dialect_version_map_["OptionalHasElement"] = {18}; +op_dialect_version_map_["Or"] = {7}; +op_dialect_version_map_["PRelu"] = {16}; +op_dialect_version_map_["Pad"] = {21, 18, 13, 11, 2}; +op_dialect_version_map_["Pow"] = {15}; +op_dialect_version_map_["QLinearConv"] = {10}; +op_dialect_version_map_["QLinearMatMul"] = {10}; +op_dialect_version_map_["QuantizeLinear"] = {19}; +op_dialect_version_map_["RNN"] = {22}; +op_dialect_version_map_["RandomNormal"] = {22}; +op_dialect_version_map_["RandomNormalLike"] = {22}; +op_dialect_version_map_["RandomUniform"] = {22}; +op_dialect_version_map_["RandomUniformLike"] = {22}; +op_dialect_version_map_["Range"] = {11}; +op_dialect_version_map_["Reciprocal"] = {13}; +op_dialect_version_map_["ReduceL1"] = {18, 13}; +op_dialect_version_map_["ReduceL2"] = {18, 13}; +op_dialect_version_map_["ReduceLogSum"] = {18, 13}; +op_dialect_version_map_["ReduceLogSumExp"] = {18, 13}; +op_dialect_version_map_["ReduceMax"] = {20, 18, 13}; +op_dialect_version_map_["ReduceMean"] = {18, 13}; +op_dialect_version_map_["ReduceMin"] = {20, 18, 13}; +op_dialect_version_map_["ReduceProd"] = {18, 13}; +op_dialect_version_map_["ReduceSum"] = {13, 11}; +op_dialect_version_map_["ReduceSumSquare"] = {18, 13}; +op_dialect_version_map_["Relu"] = {14}; +op_dialect_version_map_["Reshape"] = {21}; +op_dialect_version_map_["Resize"] = {19, 18, 13, 11, 10}; +op_dialect_version_map_["ReverseSequence"] = {10}; +op_dialect_version_map_["RoiAlign"] = {22}; +op_dialect_version_map_["Round"] = {22}; +op_dialect_version_map_["SVMClassifier"] = {1}; +op_dialect_version_map_["SVMRegressor"] = {1}; +op_dialect_version_map_["Scaler"] = {1}; +op_dialect_version_map_["Scan"] = {21}; +op_dialect_version_map_["Scatter"] = {11}; +op_dialect_version_map_["ScatterElements"] = {18}; +op_dialect_version_map_["ScatterND"] = {18}; +op_dialect_version_map_["Selu"] = {22}; +op_dialect_version_map_["SequenceAt"] = {11}; +op_dialect_version_map_["SequenceConstruct"] = {11}; +op_dialect_version_map_["SequenceEmpty"] = {11}; +op_dialect_version_map_["SequenceErase"] = {11}; +op_dialect_version_map_["SequenceInsert"] = {11}; +op_dialect_version_map_["SequenceLength"] = {11}; +op_dialect_version_map_["SequenceMap"] = {17}; +op_dialect_version_map_["Shape"] = {21}; +op_dialect_version_map_["Shrink"] = {9}; +op_dialect_version_map_["Sigmoid"] = {13}; +op_dialect_version_map_["Sign"] = {13}; +op_dialect_version_map_["Sin"] = {22}; +op_dialect_version_map_["Sinh"] = {22}; +op_dialect_version_map_["Size"] = {21}; +op_dialect_version_map_["Slice"] = {13}; +op_dialect_version_map_["Softmax"] = {13, 11}; +op_dialect_version_map_["SoftmaxCrossEntropyLoss"] = {13}; +op_dialect_version_map_["Softplus"] = {22}; +op_dialect_version_map_["Softsign"] = {22}; +op_dialect_version_map_["SpaceToDepth"] = {13}; +op_dialect_version_map_["Split"] = {18, 13, 11}; +op_dialect_version_map_["SplitToSequence"] = {11}; +op_dialect_version_map_["Sqrt"] = {13}; +op_dialect_version_map_["Squeeze"] = {21, 11}; +op_dialect_version_map_["StringNormalizer"] = {10}; +op_dialect_version_map_["STFT"] = {17}; +op_dialect_version_map_["Sub"] = {14}; +op_dialect_version_map_["Sum"] = {13}; +op_dialect_version_map_["Tan"] = {22}; +op_dialect_version_map_["Tanh"] = {13}; +op_dialect_version_map_["TfIdfVectorizer"] = {9}; +op_dialect_version_map_["ThresholdedRelu"] = {22}; +op_dialect_version_map_["Tile"] = {13}; +op_dialect_version_map_["TopK"] = {11}; +op_dialect_version_map_["Transpose"] = {21}; +op_dialect_version_map_["Trilu"] = {14}; +op_dialect_version_map_["TreeEnsembleClassifier"] = {1}; +op_dialect_version_map_["TreeEnsembleRegressor"] = {1}; +op_dialect_version_map_["Unique"] = {11}; +op_dialect_version_map_["Unsqueeze"] = {21, 11}; +op_dialect_version_map_["Upsample"] = {10, 7}; +op_dialect_version_map_["Where"] = {16}; +op_dialect_version_map_["Xor"] = {7}; +op_dialect_version_map_["ZipMap"] = {1}; +import_handler_map_["Abs"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Acos"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Acosh"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Add"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["And"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ArgMax"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ArgMin"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Asin"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Asinh"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Atan"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Atanh"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["AveragePool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BatchNormalization"] = + &onnx_mlir::detail::FrontendGenImpl::ImportNodeBatchNormalization; +import_handler_map_["Bernoulli"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BitShift"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BitwiseAnd"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BitwiseNot"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BitwiseOr"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BitwiseXor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["BlackmanWindow"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Cast"] = + &onnx_mlir::detail::FrontendGenImpl::ImportNodeCast; +import_handler_map_["CastLike"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Ceil"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Celu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["CenterCropPad"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Clip"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ClipV12"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ClipV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ClipV6"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Col2Im"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Compress"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Concat"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ConcatFromSequence"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Constant"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ConstantOfShape"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Conv"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ConvInteger"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ConvTranspose"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Cos"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Cosh"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["CumSum"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["DFT"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["DFTV17"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["DeformConv"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["DepthToSpace"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["DequantizeLinear"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Det"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Div"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Dropout"] = + &onnx_mlir::detail::FrontendGenImpl::ImportNodeDropout; +import_handler_map_["DynamicQuantizeLinear"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Einsum"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Elu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Equal"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Erf"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Exp"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Expand"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["EyeLike"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Flatten"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Floor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GRU"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Gather"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GatherElements"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GatherND"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Gelu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Gemm"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GlobalAveragePool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GlobalLpPool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GlobalMaxPool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Greater"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GreaterOrEqual"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GridSample"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GridSampleV16"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GroupNormalization"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GroupNormalizationV18"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["HammingWindow"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["HannWindow"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["HardSigmoid"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["HardSwish"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Hardmax"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Identity"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["If"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["InstanceNormalization"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["IsInf"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["IsNaN"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LRN"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LSTM"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LayerNormalization"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LeakyRelu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Less"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LessOrEqual"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Log"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LogSoftmax"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Loop"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LpNormalization"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LpPool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["MatMul"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["MatMulInteger"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Max"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["MaxPool"] = + &onnx_mlir::detail::FrontendGenImpl::ImportNodeMaxPool; +import_handler_map_["MaxRoiPool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["MaxUnpool"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Mean"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["MeanVarianceNormalization"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["MelWeightMatrix"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Min"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Mish"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Mod"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Mul"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Multinomial"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Neg"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["NegativeLogLikelihoodLoss"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["NonMaxSuppression"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["NonZero"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Not"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["OneHot"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Optional"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["OptionalGetElement"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["OptionalHasElement"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Or"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["PRelu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Pad"] = + &onnx_mlir::detail::FrontendGenImpl::ImportNodePad; +import_handler_map_["PadV18"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["PadV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["PadV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["PadV2"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Pow"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["QLinearConv"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["QLinearMatMul"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["QuantizeLinear"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["RNN"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["RandomNormal"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["RandomNormalLike"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["RandomUniform"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["RandomUniformLike"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Range"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Reciprocal"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceL1"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceL1V13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceL2"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceL2V13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceLogSum"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceLogSumV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceLogSumExp"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceLogSumExpV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMax"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMaxV18"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMaxV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMean"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMeanV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMin"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMinV18"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceMinV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceProd"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceProdV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceSum"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceSumV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceSumSquare"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReduceSumSquareV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Relu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Reshape"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Resize"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ResizeV18"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ResizeV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ResizeV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ResizeV10"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ReverseSequence"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["RoiAlign"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Round"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["STFT"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Scan"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Scatter"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ScatterElements"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ScatterND"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Selu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceAt"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceConstruct"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceEmpty"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceErase"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceInsert"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceLength"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SequenceMap"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Shape"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Shrink"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sigmoid"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sign"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sin"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sinh"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Size"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Slice"] = + &onnx_mlir::detail::FrontendGenImpl::ImportNodeSlice; +import_handler_map_["Softmax"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SoftmaxV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SoftmaxCrossEntropyLoss"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Softplus"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Softsign"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SpaceToDepth"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Split"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SplitV13"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SplitV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SplitToSequence"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sqrt"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Squeeze"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SqueezeV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["StringNormalizer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sub"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Sum"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Tan"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Tanh"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["TfIdfVectorizer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ThresholdedRelu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Tile"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["TopK"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Transpose"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Trilu"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Unique"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Unsqueeze"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["UnsqueezeV11"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Upsample"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["UpsampleV7"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Where"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Xor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ArrayFeatureExtractor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Binarizer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["CastMap"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["CategoryMapper"] = + &onnx_mlir::detail::FrontendGenImpl::ImportCategoryMapper; +import_handler_map_["DictVectorizer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["FeatureVectorizer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Imputer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LabelEncoder"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LinearClassifier"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["LinearRegressor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Normalizer"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["OneHotEncoder"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SVMClassifier"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["SVMRegressor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Scaler"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["TreeEnsembleClassifier"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["TreeEnsembleRegressor"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["ZipMap"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Adagrad"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Adam"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Gradient"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["Momentum"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; +op_opsets_map_["Abs"] = {13, 6, 1}; +op_opsets_map_["Acos"] = {22, 7}; +op_opsets_map_["Acosh"] = {22, 9}; +op_opsets_map_["Add"] = {14, 13, 7, 6, 1}; +op_opsets_map_["AffineGrid"] = {20}; +op_opsets_map_["And"] = {7, 1}; +op_opsets_map_["ArgMax"] = {13, 12, 11, 1}; +op_opsets_map_["ArgMin"] = {13, 12, 11, 1}; +op_opsets_map_["Asin"] = {22, 7}; +op_opsets_map_["Asinh"] = {22, 9}; +op_opsets_map_["Atan"] = {22, 7}; +op_opsets_map_["Atanh"] = {22, 9}; +op_opsets_map_["AveragePool"] = {22, 19, 11, 10, 7, 1}; +op_opsets_map_["BatchNormalization"] = {15, 14, 9, 7, 6, 1}; +op_opsets_map_["Bernoulli"] = {22, 15}; +op_opsets_map_["BitShift"] = {11}; +op_opsets_map_["BitwiseAnd"] = {18}; +op_opsets_map_["BitwiseNot"] = {18}; +op_opsets_map_["BitwiseOr"] = {18}; +op_opsets_map_["BitwiseXor"] = {18}; +op_opsets_map_["BlackmanWindow"] = {17}; +op_opsets_map_["Cast"] = {21, 19, 13, 9, 6, 1}; +op_opsets_map_["CastLike"] = {21, 19, 15}; +op_opsets_map_["Ceil"] = {13, 6, 1}; +op_opsets_map_["Celu"] = {12}; +op_opsets_map_["CenterCropPad"] = {18}; +op_opsets_map_["Clip"] = {13, 12, 11, 6, 1}; +op_opsets_map_["Col2Im"] = {18}; +op_opsets_map_["Compress"] = {11, 9}; +op_opsets_map_["Concat"] = {13, 11, 4, 1}; +op_opsets_map_["ConcatFromSequence"] = {11}; +op_opsets_map_["Constant"] = {21, 19, 13, 12, 11, 9, 1}; +op_opsets_map_["ConstantOfShape"] = {21, 20, 9}; +op_opsets_map_["Conv"] = {22, 11, 1}; +op_opsets_map_["ConvInteger"] = {10}; +op_opsets_map_["ConvTranspose"] = {22, 11, 1}; +op_opsets_map_["Cos"] = {22, 7}; +op_opsets_map_["Cosh"] = {22, 9}; +op_opsets_map_["CumSum"] = {14, 11}; +op_opsets_map_["DFT"] = {20, 17}; +op_opsets_map_["DeformConv"] = {22, 19}; +op_opsets_map_["DepthToSpace"] = {13, 11, 1}; +op_opsets_map_["DequantizeLinear"] = {21, 19, 13, 10}; +op_opsets_map_["Det"] = {22, 11}; +op_opsets_map_["Div"] = {14, 13, 7, 6, 1}; +op_opsets_map_["Dropout"] = {22, 13, 12, 10, 7, 6, 1}; +op_opsets_map_["DynamicQuantizeLinear"] = {11}; +op_opsets_map_["Einsum"] = {12}; +op_opsets_map_["Elu"] = {22, 6, 1}; +op_opsets_map_["Equal"] = {19, 13, 11, 7, 1}; +op_opsets_map_["Erf"] = {13, 9}; +op_opsets_map_["Exp"] = {13, 6, 1}; +op_opsets_map_["Expand"] = {13, 8}; +op_opsets_map_["EyeLike"] = {22, 9}; +op_opsets_map_["Flatten"] = {21, 13, 11, 9, 1}; +op_opsets_map_["Floor"] = {13, 6, 1}; +op_opsets_map_["GRU"] = {22, 14, 7, 3, 1}; +op_opsets_map_["Gather"] = {13, 11, 1}; +op_opsets_map_["GatherElements"] = {13, 11}; +op_opsets_map_["GatherND"] = {13, 12, 11}; +op_opsets_map_["Gelu"] = {20}; +op_opsets_map_["Gemm"] = {13, 11, 9, 7, 6, 1}; +op_opsets_map_["GlobalAveragePool"] = {22, 1}; +op_opsets_map_["GlobalLpPool"] = {22, 2, 1}; +op_opsets_map_["GlobalMaxPool"] = {22, 1}; +op_opsets_map_["Greater"] = {13, 9, 7, 1}; +op_opsets_map_["GreaterOrEqual"] = {16, 12}; +op_opsets_map_["GridSample"] = {22, 20, 16}; +op_opsets_map_["GroupNormalization"] = {21, 18}; +op_opsets_map_["HammingWindow"] = {17}; +op_opsets_map_["HannWindow"] = {17}; +op_opsets_map_["HardSigmoid"] = {22, 6, 1}; +op_opsets_map_["HardSwish"] = {22, 14}; +op_opsets_map_["Hardmax"] = {13, 11, 1}; +op_opsets_map_["Identity"] = {21, 19, 16, 14, 13, 1}; +op_opsets_map_["If"] = {21, 19, 16, 13, 11, 1}; +op_opsets_map_["ImageDecoder"] = {20}; +op_opsets_map_["InstanceNormalization"] = {22, 6, 1}; +op_opsets_map_["IsInf"] = {20, 10}; +op_opsets_map_["IsNaN"] = {20, 13, 9}; +op_opsets_map_["LRN"] = {13, 1}; +op_opsets_map_["LSTM"] = {22, 14, 7, 1}; +op_opsets_map_["LayerNormalization"] = {17}; +op_opsets_map_["LeakyRelu"] = {16, 6, 1}; +op_opsets_map_["Less"] = {13, 9, 7, 1}; +op_opsets_map_["LessOrEqual"] = {16, 12}; +op_opsets_map_["Log"] = {13, 6, 1}; +op_opsets_map_["LogSoftmax"] = {13, 11, 1}; +op_opsets_map_["Loop"] = {21, 19, 16, 13, 11, 1}; +op_opsets_map_["LpNormalization"] = {22, 1}; +op_opsets_map_["LpPool"] = {22, 18, 11, 2, 1}; +op_opsets_map_["MatMul"] = {13, 9, 1}; +op_opsets_map_["MatMulInteger"] = {10}; +op_opsets_map_["Max"] = {13, 12, 8, 6, 1}; +op_opsets_map_["MaxPool"] = {22, 12, 11, 10, 8, 1}; +op_opsets_map_["MaxRoiPool"] = {22, 1}; +op_opsets_map_["MaxUnpool"] = {22, 11, 9}; +op_opsets_map_["Mean"] = {13, 8, 6, 1}; +op_opsets_map_["MeanVarianceNormalization"] = {13, 9}; +op_opsets_map_["MelWeightMatrix"] = {17}; +op_opsets_map_["Min"] = {13, 12, 8, 6, 1}; +op_opsets_map_["Mish"] = {22, 18}; +op_opsets_map_["Mod"] = {13, 10}; +op_opsets_map_["Mul"] = {14, 13, 7, 6, 1}; +op_opsets_map_["Multinomial"] = {22, 7}; +op_opsets_map_["Neg"] = {13, 6, 1}; +op_opsets_map_["NegativeLogLikelihoodLoss"] = {22, 13, 12}; +op_opsets_map_["NonMaxSuppression"] = {11, 10}; +op_opsets_map_["NonZero"] = {13, 9}; +op_opsets_map_["Not"] = {1}; +op_opsets_map_["OneHot"] = {11, 9}; +op_opsets_map_["Optional"] = {15}; +op_opsets_map_["OptionalGetElement"] = {18, 15}; +op_opsets_map_["OptionalHasElement"] = {18, 15}; +op_opsets_map_["Or"] = {7, 1}; +op_opsets_map_["PRelu"] = {16, 9, 7, 6, 1}; +op_opsets_map_["Pad"] = {21, 19, 18, 13, 11, 2, 1}; +op_opsets_map_["Pow"] = {15, 13, 12, 7, 1}; +op_opsets_map_["QLinearConv"] = {10}; +op_opsets_map_["QLinearMatMul"] = {21, 10}; +op_opsets_map_["QuantizeLinear"] = {21, 19, 13, 10}; +op_opsets_map_["RNN"] = {22, 14, 7, 1}; +op_opsets_map_["RandomNormal"] = {22, 1}; +op_opsets_map_["RandomNormalLike"] = {22, 1}; +op_opsets_map_["RandomUniform"] = {22, 1}; +op_opsets_map_["RandomUniformLike"] = {22, 1}; +op_opsets_map_["Range"] = {11}; +op_opsets_map_["Reciprocal"] = {13, 6, 1}; +op_opsets_map_["ReduceL1"] = {18, 13, 11, 1}; +op_opsets_map_["ReduceL2"] = {18, 13, 11, 1}; +op_opsets_map_["ReduceLogSum"] = {18, 13, 11, 1}; +op_opsets_map_["ReduceLogSumExp"] = {18, 13, 11, 1}; +op_opsets_map_["ReduceMax"] = {20, 18, 13, 12, 11, 1}; +op_opsets_map_["ReduceMean"] = {18, 13, 11, 1}; +op_opsets_map_["ReduceMin"] = {20, 18, 13, 12, 11, 1}; +op_opsets_map_["ReduceProd"] = {18, 13, 11, 1}; +op_opsets_map_["ReduceSum"] = {13, 11, 1}; +op_opsets_map_["ReduceSumSquare"] = {18, 13, 11, 1}; +op_opsets_map_["RegexFullMatch"] = {20}; +op_opsets_map_["Relu"] = {14, 13, 6, 1}; +op_opsets_map_["Reshape"] = {21, 19, 14, 13, 5, 1}; +op_opsets_map_["Resize"] = {19, 18, 13, 11, 10}; +op_opsets_map_["ReverseSequence"] = {10}; +op_opsets_map_["RoiAlign"] = {22, 16, 10}; +op_opsets_map_["Round"] = {22, 11}; +op_opsets_map_["STFT"] = {17}; +op_opsets_map_["Scan"] = {21, 19, 16, 11, 9, 8}; +op_opsets_map_["Scatter"] = {11, 9}; +op_opsets_map_["ScatterElements"] = {18, 16, 13, 11}; +op_opsets_map_["ScatterND"] = {18, 16, 13, 11}; +op_opsets_map_["Selu"] = {22, 6, 1}; +op_opsets_map_["SequenceAt"] = {11}; +op_opsets_map_["SequenceConstruct"] = {11}; +op_opsets_map_["SequenceEmpty"] = {11}; +op_opsets_map_["SequenceErase"] = {11}; +op_opsets_map_["SequenceInsert"] = {11}; +op_opsets_map_["SequenceLength"] = {11}; +op_opsets_map_["SequenceMap"] = {17}; +op_opsets_map_["Shape"] = {21, 19, 15, 13, 1}; +op_opsets_map_["Shrink"] = {9}; +op_opsets_map_["Sigmoid"] = {13, 6, 1}; +op_opsets_map_["Sign"] = {13, 9}; +op_opsets_map_["Sin"] = {22, 7}; +op_opsets_map_["Sinh"] = {22, 9}; +op_opsets_map_["Size"] = {21, 19, 13, 1}; +op_opsets_map_["Slice"] = {13, 11, 10, 1}; +op_opsets_map_["Softmax"] = {13, 11, 1}; +op_opsets_map_["SoftmaxCrossEntropyLoss"] = {13, 12}; +op_opsets_map_["Softplus"] = {22, 1}; +op_opsets_map_["Softsign"] = {22, 1}; +op_opsets_map_["SpaceToDepth"] = {13, 1}; +op_opsets_map_["Split"] = {18, 13, 11, 2, 1}; +op_opsets_map_["SplitToSequence"] = {11}; +op_opsets_map_["Sqrt"] = {13, 6, 1}; +op_opsets_map_["Squeeze"] = {21, 13, 11, 1}; +op_opsets_map_["StringConcat"] = {20}; +op_opsets_map_["StringNormalizer"] = {10}; +op_opsets_map_["StringSplit"] = {20}; +op_opsets_map_["Sub"] = {14, 13, 7, 6, 1}; +op_opsets_map_["Sum"] = {13, 8, 6, 1}; +op_opsets_map_["Tan"] = {22, 7}; +op_opsets_map_["Tanh"] = {13, 6, 1}; +op_opsets_map_["TfIdfVectorizer"] = {9}; +op_opsets_map_["ThresholdedRelu"] = {22, 10}; +op_opsets_map_["Tile"] = {13, 6, 1}; +op_opsets_map_["TopK"] = {11, 10, 1}; +op_opsets_map_["Transpose"] = {21, 13, 1}; +op_opsets_map_["Trilu"] = {14}; +op_opsets_map_["Unique"] = {11}; +op_opsets_map_["Unsqueeze"] = {21, 13, 11, 1}; +op_opsets_map_["Upsample"] = {10, 9, 7, 1}; +op_opsets_map_["Where"] = {16, 9}; +op_opsets_map_["Xor"] = {7, 1}; +op_opsets_map_["ArrayFeatureExtractor"] = {1}; +op_opsets_map_["Binarizer"] = {1}; +op_opsets_map_["CastMap"] = {1}; +op_opsets_map_["CategoryMapper"] = {1}; +op_opsets_map_["DictVectorizer"] = {1}; +op_opsets_map_["FeatureVectorizer"] = {1}; +op_opsets_map_["Imputer"] = {1}; +op_opsets_map_["LabelEncoder"] = {4, 2, 1}; +op_opsets_map_["LinearClassifier"] = {1}; +op_opsets_map_["LinearRegressor"] = {1}; +op_opsets_map_["Normalizer"] = {1}; +op_opsets_map_["OneHotEncoder"] = {1}; +op_opsets_map_["SVMClassifier"] = {1}; +op_opsets_map_["SVMRegressor"] = {1}; +op_opsets_map_["Scaler"] = {1}; +op_opsets_map_["TreeEnsemble"] = {5}; +op_opsets_map_["TreeEnsembleClassifier"] = {5, 3, 1}; +op_opsets_map_["TreeEnsembleRegressor"] = {5, 3, 1}; +op_opsets_map_["ZipMap"] = {1}; +op_opsets_map_["Adagrad"] = {1}; +op_opsets_map_["Adam"] = {1}; +op_opsets_map_["Gradient"] = {1}; +op_opsets_map_["Momentum"] = {1}; diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index 355569deab..dfa4f81c7b 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -344,6 +344,7 @@ "Less", "Loop", "LSTM", + "MaxPool", "Mul", "Or", "Pow", From 1e93d8d912639759975ed711804fa006e3884775 Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Mon, 7 Apr 2025 05:39:22 -0400 Subject: [PATCH 06/15] Merging nested concat ops Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/Transforms/Recompose.cpp | 61 +++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/src/Dialect/ONNX/Transforms/Recompose.cpp b/src/Dialect/ONNX/Transforms/Recompose.cpp index 5b57620635..c0e2b0bad4 100644 --- a/src/Dialect/ONNX/Transforms/Recompose.cpp +++ b/src/Dialect/ONNX/Transforms/Recompose.cpp @@ -602,6 +602,57 @@ struct RecomposeQLinearMatMulFromQuantizeLinearPattern } }; +/// Merges nested ONNXConcatOps +struct RecomposeConcatPattern + : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + // Helper function to check if an input is a mergeable Concat. + static bool isMergeableConcat(Value input, int64_t axis) { + auto innerConcat = input.getDefiningOp(); + if (!innerConcat) + return false; + return (innerConcat.getAxis() == axis) && (innerConcat.getResult().hasOneUse()); + } + + LogicalResult matchAndRewrite( + ONNXConcatOp concatOp, PatternRewriter &rewriter) const final { + Location loc = concatOp.getLoc(); + auto inputs = concatOp.getOperands(); + SmallVector newInputs; + bool merged = false; + + // Flatten nested concat nodes. + for (auto input : inputs) { + newInputs.push_back(input); + if (isMergeableConcat(input, concatOp.getAxis())) { + merged = true; + // Remove the nested concat and append its inputs. + newInputs.pop_back(); + auto innerConcat = cast(input.getDefiningOp()); + newInputs.append(innerConcat.getOperands().begin(), + innerConcat.getOperands().end()); + } + } + + if (merged) { + // Create a new ONNXConcat op with the flattened inputs. + auto newConcat = rewriter.create(loc, + concatOp.getResult().getType(), newInputs, concatOp.getAxis()); + rewriter.replaceOp(concatOp, newConcat.getResult()); + return success(); + } + + // If there is only a single input, replace the concat with that input. + if (concatOp.getOperands().size() == 1) { + rewriter.replaceOp(concatOp, concatOp.getOperands()[0]); + return success(); + } + + return failure(); + } +}; + struct RecomposeONNXToONNXPass : public PassWrapper> { MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RecomposeONNXToONNXPass) @@ -656,6 +707,15 @@ void RecomposeONNXToONNXPass::runOnOperation() { return true; }); + target.addDynamicallyLegalOp([](ONNXConcatOp op) { + for (Value input : op.getOperands()) { + if (!RecomposeConcatPattern::isMergeableConcat(input, op.getAxis())) { + return true; // Op is legal if any input isn't a mergeable Concat. + } + } + return false; // Op is illegal (needs rewriting) if all inputs are mergeable. + }); + // Recompose QLinearMatMul, starting from QuantizeLinear. // Pattern: DequanizeLinear + MatMul + QuantizeLinear. target.addDynamicallyLegalOp( @@ -682,6 +742,7 @@ void onnx_mlir::getRecomposeONNXToONNXPatterns( patterns.insert(context); patterns.insert(context); patterns.insert(context); + patterns.insert(context); } /*! From 90f5c2e588325ce6ba52a5e4abde7673a5cc675d Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 8 Apr 2025 03:02:10 -0400 Subject: [PATCH 07/15] Clang format updated for merge concat Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/Transforms/Recompose.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Dialect/ONNX/Transforms/Recompose.cpp b/src/Dialect/ONNX/Transforms/Recompose.cpp index c0e2b0bad4..f3cbe2926f 100644 --- a/src/Dialect/ONNX/Transforms/Recompose.cpp +++ b/src/Dialect/ONNX/Transforms/Recompose.cpp @@ -603,8 +603,7 @@ struct RecomposeQLinearMatMulFromQuantizeLinearPattern }; /// Merges nested ONNXConcatOps -struct RecomposeConcatPattern - : public OpRewritePattern { +struct RecomposeConcatPattern : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; // Helper function to check if an input is a mergeable Concat. @@ -612,7 +611,8 @@ struct RecomposeConcatPattern auto innerConcat = input.getDefiningOp(); if (!innerConcat) return false; - return (innerConcat.getAxis() == axis) && (innerConcat.getResult().hasOneUse()); + return (innerConcat.getAxis() == axis) && + (innerConcat.getResult().hasOneUse()); } LogicalResult matchAndRewrite( @@ -621,7 +621,7 @@ struct RecomposeConcatPattern auto inputs = concatOp.getOperands(); SmallVector newInputs; bool merged = false; - + // Flatten nested concat nodes. for (auto input : inputs) { newInputs.push_back(input); @@ -630,15 +630,15 @@ struct RecomposeConcatPattern // Remove the nested concat and append its inputs. newInputs.pop_back(); auto innerConcat = cast(input.getDefiningOp()); - newInputs.append(innerConcat.getOperands().begin(), - innerConcat.getOperands().end()); + newInputs.append( + innerConcat.getOperands().begin(), innerConcat.getOperands().end()); } } - + if (merged) { // Create a new ONNXConcat op with the flattened inputs. - auto newConcat = rewriter.create(loc, - concatOp.getResult().getType(), newInputs, concatOp.getAxis()); + auto newConcat = rewriter.create( + loc, concatOp.getResult().getType(), newInputs, concatOp.getAxis()); rewriter.replaceOp(concatOp, newConcat.getResult()); return success(); } @@ -713,7 +713,8 @@ void RecomposeONNXToONNXPass::runOnOperation() { return true; // Op is legal if any input isn't a mergeable Concat. } } - return false; // Op is illegal (needs rewriting) if all inputs are mergeable. + return false; // Op is illegal (needs rewriting) if all inputs are + // mergeable. }); // Recompose QLinearMatMul, starting from QuantizeLinear. From 0c4707a06575ee925a2e9260953abe47b68ca2e7 Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 8 Apr 2025 05:03:42 -0400 Subject: [PATCH 08/15] Added test file Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps.td.inc | 1 - src/Dialect/ONNX/Transforms/Decompose.cpp | 60 --------------------- test/mlir/onnx/recompose_concat.mlir | 38 +++++++++++++ test/mlir/onnx/reorder_relu_to_maxpool.mlir | 33 ------------ utils/gen_onnx_mlir.py | 1 - 5 files changed, 38 insertions(+), 95 deletions(-) create mode 100644 test/mlir/onnx/recompose_concat.mlir delete mode 100644 test/mlir/onnx/reorder_relu_to_maxpool.mlir diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index 2729553b07..666c0cde71 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -4387,7 +4387,6 @@ def ONNXMaxOp:ONNX_Op<"Max", def ONNXMaxPoolOp:ONNX_Op<"MaxPool", [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; let summary = "ONNX MaxPool operation"; let description = [{ MaxPool consumes an input tensor X and applies max pooling across diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp index 250cbebb11..45ca7127c6 100644 --- a/src/Dialect/ONNX/Transforms/Decompose.cpp +++ b/src/Dialect/ONNX/Transforms/Decompose.cpp @@ -1249,55 +1249,6 @@ struct SumToAddPattern : public OpRewritePattern { } }; -/// reorder relu-> maxpool to maxpool->relu -struct ReorderReLUToMaxPoolPattern - : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult matchAndRewrite( - ONNXMaxPoolSingleOutOp maxPoolOp, PatternRewriter &rewriter) const final { - - // Get the input to MaxPool - Value maxPoolInput = maxPoolOp.getX(); - Operation *inputOp = maxPoolInput.getDefiningOp(); - - // Check if the input to MaxPool is a ReLU layer - if (!inputOp || !isa(inputOp)) - return failure(); // Only process if MaxPool follows a ReLU layer - - auto reluOp = dyn_cast(inputOp); - - // Create a new MaxPool operation using ReLU's output shape - Value newMaxPool = - rewriter.create(maxPoolOp.getLoc(), - maxPoolOp.getResult().getType(), // MaxPool gets ReLU's output shape - reluOp.getX(), // Original ReLU's input becomes MaxPool's input - maxPoolOp.getAutoPadAttr(), // Auto pad - maxPoolOp.getCeilModeAttr(), // Ceil mode - maxPoolOp.getDilationsAttr(), // Dilations - maxPoolOp.getKernelShapeAttr(), // Kernel shape - maxPoolOp.getPadsAttr(), // Pads - maxPoolOp.getStorageOrderAttr(), // Storage order - maxPoolOp.getStridesAttr() // Strides - ); - - // Create a new ReLU operation using MaxPool's output shape - Value newRelu = rewriter.create(reluOp.getLoc(), - maxPoolOp.getResult().getType(), // ReLU gets MaxPool's output shape - newMaxPool // New MaxPool output becomes ReLU's input - ); - // Replace all uses of the old MaxPool output with the new ReLU output - maxPoolOp.getResult().replaceAllUsesWith(newRelu); - - // Safely erase the old MaxPool (now unused) - rewriter.eraseOp(maxPoolOp); - - // Replace the original ReLU output with the new ReLU output - rewriter.replaceOp(reluOp, newRelu); - return success(); - } -}; - // ============================================================================= // Pattern for replacing CastLikeOp by CastOp. // ============================================================================= @@ -1434,16 +1385,6 @@ void DecomposeONNXToONNXPass::runOnOperation() { op.getValueStringAttr() || op.getValueStringsAttr()); }); - target.addDynamicallyLegalOp([](Operation *op) { - if (auto reluOp = dyn_cast(op)) { - for (auto user : reluOp.getResult().getUsers()) { - if (auto poolOp = dyn_cast(user)) { - return false; // Reorder ReLU to MaxPool condition met - } - } - } - return true; - }); // Decompose CustomOp FusedMatMul introduced by onnxruntime: // https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.FusedMatMul @@ -1500,7 +1441,6 @@ void onnx_mlir::getDecomposeONNXToONNXPatterns( patterns.insert(context); // TODO: consider whether to include SoftmaxPattern here - patterns.insert(context); } /*! diff --git a/test/mlir/onnx/recompose_concat.mlir b/test/mlir/onnx/recompose_concat.mlir new file mode 100644 index 0000000000..e536f37d7a --- /dev/null +++ b/test/mlir/onnx/recompose_concat.mlir @@ -0,0 +1,38 @@ +// RUN: onnx-mlir --useOnnxModelTypes=false --EmitONNXIR --printIR %s | FileCheck %s + +func.func @test_recompose_concat(%arg0: tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> { +%0 = onnx.Constant dense<0.00999999977> : tensor<6x3x3x3xf32> +%1 = "onnx.NoValue"() {onnx_node_name = "onnx.NoValue_0", value} : () -> none +%2 = "onnx.Conv"(%arg0, %0, %1) {auto_pad = "NOTSET", group = 1 : si64, onnx_node_name = "onnx.Conv_1", pads = [1, 1, 1, 1]} : (tensor<1x3x6x6xf32>, tensor<6x3x3x3xf32>, none) -> tensor<1x6x6x6xf32> +%3 = "onnx.Relu"(%2) {onnx_node_name = "onnx.Relu_2"} : (tensor<1x6x6x6xf32>) -> tensor<1x6x6x6xf32> +%4 = "onnx.Concat"(%arg0, %3) {axis = 1 : si64, onnx_node_name = "onnx.Concat_3"} : (tensor<1x3x6x6xf32>, tensor<1x6x6x6xf32>) -> tensor<1x9x6x6xf32> +%5 = "onnx.Concat"(%4, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_4"} : (tensor<1x9x6x6xf32>, tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> +return %5 : tensor<1x12x6x6xf32> + + // CHECK-LABEL: func @test_recompose_concat + // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> { + // CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<{{.*}}> : tensor<6x3x3x3xf32> + + // CHECK: [[VAR_1_:%.+]] = "onnx.NoValue"() + + // CHECK: [[VAR_2_:%.+]] = "onnx.Conv"([[PARAM_0_]], [[VAR_0_]], [[VAR_1_]]) + // CHECK-SAME: : (tensor<1x3x6x6xf32>, tensor<6x3x3x3xf32>, none) -> tensor<1x6x6x6xf32> + // CHECK: [[VAR_3_:%.+]] = "onnx.Relu"([[VAR_2_]]) {onnx_node_name = "onnx.Relu_2"} : (tensor<1x6x6x6xf32>) -> tensor<1x6x6x6xf32> + // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[VAR_3_]], [[PARAM_0_]]) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x6x6xf32>, tensor<1x6x6x6xf32>, tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> + // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x6x6xf32> + +} + +func.func @test_recompose_concat_simple(%arg0: tensor<1x3x4xf32>, %arg1: tensor<1x3x4xf32> ) -> tensor<1x12x4xf32> { +%0 = "onnx.Concat"(%arg0, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x4xf32>, tensor<1x3x4xf32>) -> tensor<1x6x4xf32> +%1 = "onnx.Concat"(%0, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} : (tensor<1x6x4xf32>, tensor<1x3x4xf32>) -> tensor<1x9x4xf32> +%2 = "onnx.Concat"(%1, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_2"} : (tensor<1x9x4xf32>, tensor<1x3x4xf32>) -> tensor<1x12x4xf32> +return %2 : tensor<1x12x4xf32> + + // CHECK-LABEL: func @test_recompose_concat_simple + // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x4xf32>, [[PARAM_1_:%.+]]: tensor<1x3x4xf32>) -> tensor<1x12x4xf32> { + // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[PARAM_1_]], [[PARAM_0_]], [[PARAM_1_]]) + // CHECK-SAME: {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} + // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x4xf32> + +} \ No newline at end of file diff --git a/test/mlir/onnx/reorder_relu_to_maxpool.mlir b/test/mlir/onnx/reorder_relu_to_maxpool.mlir deleted file mode 100644 index 133b65b8a5..0000000000 --- a/test/mlir/onnx/reorder_relu_to_maxpool.mlir +++ /dev/null @@ -1,33 +0,0 @@ -// RUN: onnx-mlir --useOnnxModelTypes=false --EmitONNXIR --printIR %s | FileCheck %s - -func.func @test_reorder_relu_maxpool(%arg0: tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> { - %0 = "onnx.Relu"(%arg0) {onnx_node_name = "onnx.Relu_0"} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32> - %1 = "onnx.MaxPoolSingleOut"(%0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_1", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> - return %1 : tensor<1x64x16x16xf32> - - // CHECK-LABEL: func @test_reorder_relu_maxpool - // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> { - // CHECK: [[VAR_0_:%.+]] = "onnx.MaxPoolSingleOut"([[PARAM_0_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_0", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> - // CHECK: [[VAR_1_:%.+]] = "onnx.Relu"([[VAR_0_]]) {onnx_node_name = "onnx.Relu_1"} : (tensor<1x64x16x16xf32>) -> tensor<1x64x16x16xf32> - // CHECK-NEXT: return [[VAR_1_]] : tensor<1x64x16x16xf32> - -} - -func.func @test_reorder_relu_maxpool_conv(%arg0: tensor<1x3x32x32xf32>) -> tensor<1x16x15x15xf32> { - %0 = onnx.Constant dense<0.00999999977> : tensor<16x3x3x3xf32> - %1 = onnx.Constant dense<[-0.549453557, -0.827535748, -0.358648896, 0.968641698, -0.0196946431, 0.269008577, -0.445898831, 0.947227954, 0.384573817, 1.60240877, -0.970565319, 0.224884078, -1.80497575, 1.07463968, -0.368380129, -1.6080451]> : tensor<16xf32> - %2 = "onnx.Conv"(%arg0, %0, %1) {auto_pad = "NOTSET", group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "onnx.Conv_0", pads = [0, 0, 0, 0]} : (tensor<1x3x32x32xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<1x16x30x30xf32> - %3 = "onnx.Relu"(%2) {onnx_node_name = "onnx.Relu_1"} : (tensor<1x16x30x30xf32>) -> tensor<1x16x30x30xf32> - %4 = "onnx.MaxPoolSingleOut"(%3) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_2", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x16x30x30xf32>) -> tensor<1x16x15x15xf32> - return %4 : tensor<1x16x15x15xf32> - - // CHECK-LABEL: func @test_reorder_relu_maxpool_conv - // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x32x32xf32>) -> tensor<1x16x15x15xf32> { - // CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<{{.*}}> : tensor<16x3x3x3xf32> - // CHECK: [[VAR_1_:%.+]] = onnx.Constant dense<{{.*}}> : tensor<16xf32> - // CHECK: [[CONV_OUT_:%.+]] = "onnx.Conv"([[PARAM_0_]], [[VAR_0_]], [[VAR_1_]]) - // CHECK-SAME: (tensor<1x3x32x32xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<1x16x30x30xf32> - // CHECK: [[VAR_2_:%.+]] = "onnx.MaxPoolSingleOut"([[CONV_OUT_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.Conv_0_onnx.MaxPoolSingleOut_2", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x16x30x30xf32>) -> tensor<1x16x15x15xf32> - // CHECK: [[VAR_3_:%.+]] = "onnx.Relu"([[VAR_2_]]) {onnx_node_name = "onnx.Relu_3"} : (tensor<1x16x15x15xf32>) -> tensor<1x16x15x15xf32> - // CHECK-NEXT: return [[VAR_3_]] : tensor<1x16x15x15xf32> -} \ No newline at end of file diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index dfa4f81c7b..355569deab 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -344,7 +344,6 @@ "Less", "Loop", "LSTM", - "MaxPool", "Mul", "Or", "Pow", From 9541fb64a7689efd17c6baaeeb9dfbf7d424bc6b Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 8 Apr 2025 07:51:52 -0400 Subject: [PATCH 09/15] Removed unwanted files Signed-off-by: Arkar-Hema --- test/mlir/onnx/onnx_hybrid_transform.mlir | 10 +- utils/OpBuildTable.inc | 916 ---------------------- 2 files changed, 5 insertions(+), 921 deletions(-) delete mode 100644 utils/OpBuildTable.inc diff --git a/test/mlir/onnx/onnx_hybrid_transform.mlir b/test/mlir/onnx/onnx_hybrid_transform.mlir index a11e248678..ca663bf945 100644 --- a/test/mlir/onnx/onnx_hybrid_transform.mlir +++ b/test/mlir/onnx/onnx_hybrid_transform.mlir @@ -243,8 +243,8 @@ func.func @test_inception_v2_6_snippet(%arg0: tensor<1x3x224x224xf32>, %arg1: te // DECOMPOSE-DAG: [[VAR_40_:%.+]] = "onnx.Mul"([[VAR_38_]], [[VAR_39_]]) : (tensor<1x64x112x112xf32>, tensor<64x1x1xf32>) -> tensor<1x64x112x112xf32> // DECOMPOSE-DAG: [[VAR_41_:%.+]] = "onnx.Unsqueeze"([[VAR_8_]], [[VAR_0_]]) : (tensor<64xf32>, tensor<2xi64>) -> tensor<64x1x1xf32> // DECOMPOSE: [[VAR_42_:%.+]] = "onnx.Add"([[VAR_40_]], [[VAR_41_]]) : (tensor<1x64x112x112xf32>, tensor<64x1x1xf32>) -> tensor<1x64x112x112xf32> -// DECOMPOSE: [[VAR_43_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_42_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x112x112xf32>) -> tensor<1x64x56x56xf32> -// DECOMPOSE-DAG: [[VAR_44_:%.+]] = "onnx.Relu"([[VAR_43_]]) : (tensor<1x64x56x56xf32>) -> tensor<1x64x56x56xf32> +// DECOMPOSE: [[VAR_43_:%.+]] = "onnx.Relu"([[VAR_42_]]) : (tensor<1x64x112x112xf32>) -> tensor<1x64x112x112xf32> +// DECOMPOSE-DAG: [[VAR_44_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_43_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x112x112xf32>) -> tensor<1x64x56x56xf32> // DECOMPOSE-DAG: [[VAR_45_:%.+]] = "onnx.Add"([[VAR_13_]], [[VAR_1_]]) : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_46_:%.+]] = "onnx.Sqrt"([[VAR_45_]]) : (tensor<64xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_47_:%.+]] = "onnx.Div"([[VAR_10_]], [[VAR_46_]]) : (tensor<64xf32>, tensor<64xf32>) -> tensor<64xf32> @@ -274,8 +274,8 @@ func.func @test_inception_v2_6_snippet(%arg0: tensor<1x3x224x224xf32>, %arg1: te // DECOMPOSE-DAG: [[VAR_69_:%.+]] = "onnx.Mul"([[VAR_67_]], [[VAR_68_]]) : (tensor<1x192x56x56xf32>, tensor<192x1x1xf32>) -> tensor<1x192x56x56xf32> // DECOMPOSE-DAG: [[VAR_70_:%.+]] = "onnx.Unsqueeze"([[VAR_22_]], [[VAR_0_]]) : (tensor<192xf32>, tensor<2xi64>) -> tensor<192x1x1xf32> // DECOMPOSE: [[VAR_71_:%.+]] = "onnx.Add"([[VAR_69_]], [[VAR_70_]]) : (tensor<1x192x56x56xf32>, tensor<192x1x1xf32>) -> tensor<1x192x56x56xf32> -// DECOMPOSE: [[VAR_72_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_71_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x192x56x56xf32>) -> tensor<1x192x28x28xf32> -// DECOMPOSE-DAG: [[VAR_73_:%.+]] = "onnx.Relu"([[VAR_72_]]) : (tensor<1x192x28x28xf32>) -> tensor<1x192x28x28xf32> +// DECOMPOSE: [[VAR_72_:%.+]] = "onnx.Relu"([[VAR_71_]]) : (tensor<1x192x56x56xf32>) -> tensor<1x192x56x56xf32> +// DECOMPOSE-DAG: [[VAR_73_:%.+]] = "onnx.MaxPoolSingleOut"([[VAR_72_]]) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x192x56x56xf32>) -> tensor<1x192x28x28xf32> // DECOMPOSE-DAG: [[VAR_74_:%.+]] = "onnx.Add"([[VAR_27_]], [[VAR_1_]]) : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_75_:%.+]] = "onnx.Sqrt"([[VAR_74_]]) : (tensor<64xf32>) -> tensor<64xf32> // DECOMPOSE: [[VAR_76_:%.+]] = "onnx.Div"([[VAR_24_]], [[VAR_75_]]) : (tensor<64xf32>, tensor<64xf32>) -> tensor<64xf32> @@ -335,4 +335,4 @@ func.func @test_inception_v2_6_snippet(%arg0: tensor<1x3x224x224xf32>, %arg1: te // CONSTPROP: } // LIMIT: Warning: onnx-hybrid-transform didn't converge with max-num-rewrites-offset=1, max-num-rewrites-multiplier=0.000000e+00 -// LIMIT-LABEL: func.func @test_inception_v2_6_snippet +// LIMIT-LABEL: func.func @test_inception_v2_6_snippet \ No newline at end of file diff --git a/utils/OpBuildTable.inc b/utils/OpBuildTable.inc deleted file mode 100644 index ed0c277f77..0000000000 --- a/utils/OpBuildTable.inc +++ /dev/null @@ -1,916 +0,0 @@ -//******************************************************** -// Do not modify this file directly. -// This file is automatically generated via script. -// Details can be found in docs/ImportONNXDefs.md . -//******************************************************** - -op_dialect_version_map_["Abs"] = {13}; -op_dialect_version_map_["Acos"] = {22}; -op_dialect_version_map_["Acosh"] = {22}; -op_dialect_version_map_["Adagrad"] = {1}; -op_dialect_version_map_["Adam"] = {1}; -op_dialect_version_map_["Add"] = {14}; -op_dialect_version_map_["And"] = {7}; -op_dialect_version_map_["ArgMax"] = {13}; -op_dialect_version_map_["ArgMin"] = {13}; -op_dialect_version_map_["ArrayFeatureExtractor"] = {1}; -op_dialect_version_map_["Asin"] = {22}; -op_dialect_version_map_["Asinh"] = {22}; -op_dialect_version_map_["Atan"] = {22}; -op_dialect_version_map_["Atanh"] = {22}; -op_dialect_version_map_["AveragePool"] = {22}; -op_dialect_version_map_["BatchNormalization"] = {15}; -op_dialect_version_map_["Bernoulli"] = {22}; -op_dialect_version_map_["Binarizer"] = {1}; -op_dialect_version_map_["BitShift"] = {11}; -op_dialect_version_map_["BitwiseAnd"] = {18}; -op_dialect_version_map_["BitwiseNot"] = {18}; -op_dialect_version_map_["BitwiseOr"] = {18}; -op_dialect_version_map_["BitwiseXor"] = {18}; -op_dialect_version_map_["BlackmanWindow"] = {17}; -op_dialect_version_map_["Cast"] = {21}; -op_dialect_version_map_["CastLike"] = {19}; -op_dialect_version_map_["CastMap"] = {1}; -op_dialect_version_map_["CategoryMapper"] = {1}; -op_dialect_version_map_["Ceil"] = {13}; -op_dialect_version_map_["Celu"] = {12}; -op_dialect_version_map_["CenterCropPad"] = {18}; -op_dialect_version_map_["Clip"] = {13, 12, 11, 6}; -op_dialect_version_map_["Compress"] = {11}; -op_dialect_version_map_["Concat"] = {13}; -op_dialect_version_map_["ConcatFromSequence"] = {11}; -op_dialect_version_map_["Constant"] = {19}; -op_dialect_version_map_["ConstantOfShape"] = {20}; -op_dialect_version_map_["Conv"] = {22}; -op_dialect_version_map_["ConvInteger"] = {10}; -op_dialect_version_map_["ConvTranspose"] = {22}; -op_dialect_version_map_["Cos"] = {22}; -op_dialect_version_map_["Cosh"] = {22}; -op_dialect_version_map_["Col2Im"] = {18}; -op_dialect_version_map_["CumSum"] = {14}; -op_dialect_version_map_["DeformConv"] = {22}; -op_dialect_version_map_["DepthToSpace"] = {13}; -op_dialect_version_map_["DequantizeLinear"] = {19}; -op_dialect_version_map_["Det"] = {22}; -op_dialect_version_map_["DFT"] = {20, 17}; -op_dialect_version_map_["DictVectorizer"] = {1}; -op_dialect_version_map_["Div"] = {14}; -op_dialect_version_map_["Dropout"] = {22}; -op_dialect_version_map_["DynamicQuantizeLinear"] = {11}; -op_dialect_version_map_["Einsum"] = {12}; -op_dialect_version_map_["Elu"] = {22}; -op_dialect_version_map_["Equal"] = {19}; -op_dialect_version_map_["Erf"] = {13}; -op_dialect_version_map_["Exp"] = {13}; -op_dialect_version_map_["Expand"] = {13}; -op_dialect_version_map_["EyeLike"] = {22}; -op_dialect_version_map_["FeatureVectorizer"] = {1}; -op_dialect_version_map_["Flatten"] = {21}; -op_dialect_version_map_["Floor"] = {13}; -op_dialect_version_map_["GRU"] = {22}; -op_dialect_version_map_["Gather"] = {13}; -op_dialect_version_map_["GatherElements"] = {13}; -op_dialect_version_map_["GatherND"] = {13}; -op_dialect_version_map_["Gelu"] = {20}; -op_dialect_version_map_["Gemm"] = {13}; -op_dialect_version_map_["GlobalAveragePool"] = {22}; -op_dialect_version_map_["GlobalLpPool"] = {2}; -op_dialect_version_map_["GlobalMaxPool"] = {22}; -op_dialect_version_map_["Gradient"] = {1}; -op_dialect_version_map_["Greater"] = {13}; -op_dialect_version_map_["GreaterOrEqual"] = {16}; -op_dialect_version_map_["GridSample"] = {22, 16}; -op_dialect_version_map_["GroupNormalization"] = {21, 18}; -op_dialect_version_map_["HammingWindow"] = {17}; -op_dialect_version_map_["HannWindow"] = {17}; -op_dialect_version_map_["HardSigmoid"] = {22}; -op_dialect_version_map_["Hardmax"] = {13}; -op_dialect_version_map_["HardSwish"] = {22}; -op_dialect_version_map_["Identity"] = {21}; -op_dialect_version_map_["If"] = {21}; -op_dialect_version_map_["Imputer"] = {1}; -op_dialect_version_map_["InstanceNormalization"] = {22}; -op_dialect_version_map_["IsInf"] = {20}; -op_dialect_version_map_["IsNaN"] = {20}; -op_dialect_version_map_["LayerNormalization"] = {17}; -op_dialect_version_map_["LRN"] = {13}; -op_dialect_version_map_["LSTM"] = {22}; -op_dialect_version_map_["LabelEncoder"] = {2}; -op_dialect_version_map_["LeakyRelu"] = {16}; -op_dialect_version_map_["Less"] = {13}; -op_dialect_version_map_["LessOrEqual"] = {16}; -op_dialect_version_map_["LinearClassifier"] = {1}; -op_dialect_version_map_["LinearRegressor"] = {1}; -op_dialect_version_map_["Log"] = {13}; -op_dialect_version_map_["LogSoftmax"] = {13}; -op_dialect_version_map_["Loop"] = {21}; -op_dialect_version_map_["LpNormalization"] = {22}; -op_dialect_version_map_["LpPool"] = {22}; -op_dialect_version_map_["MatMul"] = {13}; -op_dialect_version_map_["MatMulInteger"] = {10}; -op_dialect_version_map_["Max"] = {13}; -op_dialect_version_map_["MaxPool"] = {22}; -op_dialect_version_map_["MaxRoiPool"] = {22}; -op_dialect_version_map_["MaxUnpool"] = {22}; -op_dialect_version_map_["Mean"] = {13}; -op_dialect_version_map_["MeanVarianceNormalization"] = {13}; -op_dialect_version_map_["MelWeightMatrix"] = {17}; -op_dialect_version_map_["Min"] = {13}; -op_dialect_version_map_["Mish"] = {22}; -op_dialect_version_map_["Mod"] = {13}; -op_dialect_version_map_["Momentum"] = {1}; -op_dialect_version_map_["Mul"] = {14}; -op_dialect_version_map_["Multinomial"] = {22}; -op_dialect_version_map_["Neg"] = {13}; -op_dialect_version_map_["NegativeLogLikelihoodLoss"] = {22}; -op_dialect_version_map_["NonMaxSuppression"] = {11}; -op_dialect_version_map_["NonZero"] = {13}; -op_dialect_version_map_["Normalizer"] = {1}; -op_dialect_version_map_["Not"] = {1}; -op_dialect_version_map_["OneHot"] = {11}; -op_dialect_version_map_["OneHotEncoder"] = {1}; -op_dialect_version_map_["Optional"] = {15}; -op_dialect_version_map_["OptionalGetElement"] = {18}; -op_dialect_version_map_["OptionalHasElement"] = {18}; -op_dialect_version_map_["Or"] = {7}; -op_dialect_version_map_["PRelu"] = {16}; -op_dialect_version_map_["Pad"] = {21, 18, 13, 11, 2}; -op_dialect_version_map_["Pow"] = {15}; -op_dialect_version_map_["QLinearConv"] = {10}; -op_dialect_version_map_["QLinearMatMul"] = {10}; -op_dialect_version_map_["QuantizeLinear"] = {19}; -op_dialect_version_map_["RNN"] = {22}; -op_dialect_version_map_["RandomNormal"] = {22}; -op_dialect_version_map_["RandomNormalLike"] = {22}; -op_dialect_version_map_["RandomUniform"] = {22}; -op_dialect_version_map_["RandomUniformLike"] = {22}; -op_dialect_version_map_["Range"] = {11}; -op_dialect_version_map_["Reciprocal"] = {13}; -op_dialect_version_map_["ReduceL1"] = {18, 13}; -op_dialect_version_map_["ReduceL2"] = {18, 13}; -op_dialect_version_map_["ReduceLogSum"] = {18, 13}; -op_dialect_version_map_["ReduceLogSumExp"] = {18, 13}; -op_dialect_version_map_["ReduceMax"] = {20, 18, 13}; -op_dialect_version_map_["ReduceMean"] = {18, 13}; -op_dialect_version_map_["ReduceMin"] = {20, 18, 13}; -op_dialect_version_map_["ReduceProd"] = {18, 13}; -op_dialect_version_map_["ReduceSum"] = {13, 11}; -op_dialect_version_map_["ReduceSumSquare"] = {18, 13}; -op_dialect_version_map_["Relu"] = {14}; -op_dialect_version_map_["Reshape"] = {21}; -op_dialect_version_map_["Resize"] = {19, 18, 13, 11, 10}; -op_dialect_version_map_["ReverseSequence"] = {10}; -op_dialect_version_map_["RoiAlign"] = {22}; -op_dialect_version_map_["Round"] = {22}; -op_dialect_version_map_["SVMClassifier"] = {1}; -op_dialect_version_map_["SVMRegressor"] = {1}; -op_dialect_version_map_["Scaler"] = {1}; -op_dialect_version_map_["Scan"] = {21}; -op_dialect_version_map_["Scatter"] = {11}; -op_dialect_version_map_["ScatterElements"] = {18}; -op_dialect_version_map_["ScatterND"] = {18}; -op_dialect_version_map_["Selu"] = {22}; -op_dialect_version_map_["SequenceAt"] = {11}; -op_dialect_version_map_["SequenceConstruct"] = {11}; -op_dialect_version_map_["SequenceEmpty"] = {11}; -op_dialect_version_map_["SequenceErase"] = {11}; -op_dialect_version_map_["SequenceInsert"] = {11}; -op_dialect_version_map_["SequenceLength"] = {11}; -op_dialect_version_map_["SequenceMap"] = {17}; -op_dialect_version_map_["Shape"] = {21}; -op_dialect_version_map_["Shrink"] = {9}; -op_dialect_version_map_["Sigmoid"] = {13}; -op_dialect_version_map_["Sign"] = {13}; -op_dialect_version_map_["Sin"] = {22}; -op_dialect_version_map_["Sinh"] = {22}; -op_dialect_version_map_["Size"] = {21}; -op_dialect_version_map_["Slice"] = {13}; -op_dialect_version_map_["Softmax"] = {13, 11}; -op_dialect_version_map_["SoftmaxCrossEntropyLoss"] = {13}; -op_dialect_version_map_["Softplus"] = {22}; -op_dialect_version_map_["Softsign"] = {22}; -op_dialect_version_map_["SpaceToDepth"] = {13}; -op_dialect_version_map_["Split"] = {18, 13, 11}; -op_dialect_version_map_["SplitToSequence"] = {11}; -op_dialect_version_map_["Sqrt"] = {13}; -op_dialect_version_map_["Squeeze"] = {21, 11}; -op_dialect_version_map_["StringNormalizer"] = {10}; -op_dialect_version_map_["STFT"] = {17}; -op_dialect_version_map_["Sub"] = {14}; -op_dialect_version_map_["Sum"] = {13}; -op_dialect_version_map_["Tan"] = {22}; -op_dialect_version_map_["Tanh"] = {13}; -op_dialect_version_map_["TfIdfVectorizer"] = {9}; -op_dialect_version_map_["ThresholdedRelu"] = {22}; -op_dialect_version_map_["Tile"] = {13}; -op_dialect_version_map_["TopK"] = {11}; -op_dialect_version_map_["Transpose"] = {21}; -op_dialect_version_map_["Trilu"] = {14}; -op_dialect_version_map_["TreeEnsembleClassifier"] = {1}; -op_dialect_version_map_["TreeEnsembleRegressor"] = {1}; -op_dialect_version_map_["Unique"] = {11}; -op_dialect_version_map_["Unsqueeze"] = {21, 11}; -op_dialect_version_map_["Upsample"] = {10, 7}; -op_dialect_version_map_["Where"] = {16}; -op_dialect_version_map_["Xor"] = {7}; -op_dialect_version_map_["ZipMap"] = {1}; -import_handler_map_["Abs"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Acos"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Acosh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Add"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["And"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ArgMax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ArgMin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Asin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Asinh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Atan"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Atanh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["AveragePool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BatchNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeBatchNormalization; -import_handler_map_["Bernoulli"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitShift"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseAnd"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseNot"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseOr"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseXor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BlackmanWindow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Cast"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeCast; -import_handler_map_["CastLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Ceil"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Celu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CenterCropPad"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Clip"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ClipV12"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ClipV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ClipV6"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Col2Im"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Compress"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Concat"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConcatFromSequence"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Constant"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConstantOfShape"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Conv"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConvInteger"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConvTranspose"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Cos"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Cosh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CumSum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DFT"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DFTV17"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DeformConv"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DepthToSpace"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DequantizeLinear"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Det"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Div"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Dropout"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeDropout; -import_handler_map_["DynamicQuantizeLinear"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Einsum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Elu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Equal"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Erf"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Exp"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Expand"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["EyeLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Flatten"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Floor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GRU"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gather"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GatherElements"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GatherND"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gemm"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GlobalAveragePool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GlobalLpPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GlobalMaxPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Greater"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GreaterOrEqual"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GridSample"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GridSampleV16"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GroupNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GroupNormalizationV18"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HammingWindow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HannWindow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HardSigmoid"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HardSwish"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Hardmax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Identity"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["If"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["InstanceNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["IsInf"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["IsNaN"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LRN"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LSTM"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LayerNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LeakyRelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Less"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LessOrEqual"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Log"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LogSoftmax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Loop"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LpNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LpPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MatMul"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MatMulInteger"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Max"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MaxPool"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeMaxPool; -import_handler_map_["MaxRoiPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MaxUnpool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mean"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MeanVarianceNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MelWeightMatrix"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Min"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mish"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mod"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mul"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Multinomial"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Neg"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["NegativeLogLikelihoodLoss"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["NonMaxSuppression"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["NonZero"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Not"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OneHot"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Optional"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OptionalGetElement"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OptionalHasElement"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Or"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PRelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Pad"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodePad; -import_handler_map_["PadV18"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PadV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PadV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PadV2"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Pow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["QLinearConv"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["QLinearMatMul"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["QuantizeLinear"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RNN"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomNormal"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomNormalLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomUniform"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomUniformLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Range"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Reciprocal"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL1"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL1V13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL2"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL2V13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSumV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSumExp"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSumExpV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMaxV18"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMaxV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMean"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMeanV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMinV18"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMinV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceProd"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceProdV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSumV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSumSquare"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSumSquareV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Relu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Reshape"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Resize"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV18"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV10"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReverseSequence"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RoiAlign"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Round"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["STFT"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Scan"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Scatter"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ScatterElements"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ScatterND"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Selu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceAt"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceConstruct"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceEmpty"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceErase"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceInsert"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceLength"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceMap"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Shape"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Shrink"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sigmoid"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sign"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sinh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Size"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Slice"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeSlice; -import_handler_map_["Softmax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SoftmaxV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SoftmaxCrossEntropyLoss"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Softplus"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Softsign"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SpaceToDepth"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Split"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SplitV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SplitV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SplitToSequence"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sqrt"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Squeeze"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SqueezeV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["StringNormalizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sub"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Tan"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Tanh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TfIdfVectorizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ThresholdedRelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Tile"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TopK"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Transpose"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Trilu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Unique"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Unsqueeze"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["UnsqueezeV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Upsample"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["UpsampleV7"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Where"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Xor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ArrayFeatureExtractor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Binarizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CastMap"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CategoryMapper"] = - &onnx_mlir::detail::FrontendGenImpl::ImportCategoryMapper; -import_handler_map_["DictVectorizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["FeatureVectorizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Imputer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LabelEncoder"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LinearClassifier"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LinearRegressor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Normalizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OneHotEncoder"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SVMClassifier"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SVMRegressor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Scaler"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TreeEnsembleClassifier"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TreeEnsembleRegressor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ZipMap"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Adagrad"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Adam"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gradient"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Momentum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -op_opsets_map_["Abs"] = {13, 6, 1}; -op_opsets_map_["Acos"] = {22, 7}; -op_opsets_map_["Acosh"] = {22, 9}; -op_opsets_map_["Add"] = {14, 13, 7, 6, 1}; -op_opsets_map_["AffineGrid"] = {20}; -op_opsets_map_["And"] = {7, 1}; -op_opsets_map_["ArgMax"] = {13, 12, 11, 1}; -op_opsets_map_["ArgMin"] = {13, 12, 11, 1}; -op_opsets_map_["Asin"] = {22, 7}; -op_opsets_map_["Asinh"] = {22, 9}; -op_opsets_map_["Atan"] = {22, 7}; -op_opsets_map_["Atanh"] = {22, 9}; -op_opsets_map_["AveragePool"] = {22, 19, 11, 10, 7, 1}; -op_opsets_map_["BatchNormalization"] = {15, 14, 9, 7, 6, 1}; -op_opsets_map_["Bernoulli"] = {22, 15}; -op_opsets_map_["BitShift"] = {11}; -op_opsets_map_["BitwiseAnd"] = {18}; -op_opsets_map_["BitwiseNot"] = {18}; -op_opsets_map_["BitwiseOr"] = {18}; -op_opsets_map_["BitwiseXor"] = {18}; -op_opsets_map_["BlackmanWindow"] = {17}; -op_opsets_map_["Cast"] = {21, 19, 13, 9, 6, 1}; -op_opsets_map_["CastLike"] = {21, 19, 15}; -op_opsets_map_["Ceil"] = {13, 6, 1}; -op_opsets_map_["Celu"] = {12}; -op_opsets_map_["CenterCropPad"] = {18}; -op_opsets_map_["Clip"] = {13, 12, 11, 6, 1}; -op_opsets_map_["Col2Im"] = {18}; -op_opsets_map_["Compress"] = {11, 9}; -op_opsets_map_["Concat"] = {13, 11, 4, 1}; -op_opsets_map_["ConcatFromSequence"] = {11}; -op_opsets_map_["Constant"] = {21, 19, 13, 12, 11, 9, 1}; -op_opsets_map_["ConstantOfShape"] = {21, 20, 9}; -op_opsets_map_["Conv"] = {22, 11, 1}; -op_opsets_map_["ConvInteger"] = {10}; -op_opsets_map_["ConvTranspose"] = {22, 11, 1}; -op_opsets_map_["Cos"] = {22, 7}; -op_opsets_map_["Cosh"] = {22, 9}; -op_opsets_map_["CumSum"] = {14, 11}; -op_opsets_map_["DFT"] = {20, 17}; -op_opsets_map_["DeformConv"] = {22, 19}; -op_opsets_map_["DepthToSpace"] = {13, 11, 1}; -op_opsets_map_["DequantizeLinear"] = {21, 19, 13, 10}; -op_opsets_map_["Det"] = {22, 11}; -op_opsets_map_["Div"] = {14, 13, 7, 6, 1}; -op_opsets_map_["Dropout"] = {22, 13, 12, 10, 7, 6, 1}; -op_opsets_map_["DynamicQuantizeLinear"] = {11}; -op_opsets_map_["Einsum"] = {12}; -op_opsets_map_["Elu"] = {22, 6, 1}; -op_opsets_map_["Equal"] = {19, 13, 11, 7, 1}; -op_opsets_map_["Erf"] = {13, 9}; -op_opsets_map_["Exp"] = {13, 6, 1}; -op_opsets_map_["Expand"] = {13, 8}; -op_opsets_map_["EyeLike"] = {22, 9}; -op_opsets_map_["Flatten"] = {21, 13, 11, 9, 1}; -op_opsets_map_["Floor"] = {13, 6, 1}; -op_opsets_map_["GRU"] = {22, 14, 7, 3, 1}; -op_opsets_map_["Gather"] = {13, 11, 1}; -op_opsets_map_["GatherElements"] = {13, 11}; -op_opsets_map_["GatherND"] = {13, 12, 11}; -op_opsets_map_["Gelu"] = {20}; -op_opsets_map_["Gemm"] = {13, 11, 9, 7, 6, 1}; -op_opsets_map_["GlobalAveragePool"] = {22, 1}; -op_opsets_map_["GlobalLpPool"] = {22, 2, 1}; -op_opsets_map_["GlobalMaxPool"] = {22, 1}; -op_opsets_map_["Greater"] = {13, 9, 7, 1}; -op_opsets_map_["GreaterOrEqual"] = {16, 12}; -op_opsets_map_["GridSample"] = {22, 20, 16}; -op_opsets_map_["GroupNormalization"] = {21, 18}; -op_opsets_map_["HammingWindow"] = {17}; -op_opsets_map_["HannWindow"] = {17}; -op_opsets_map_["HardSigmoid"] = {22, 6, 1}; -op_opsets_map_["HardSwish"] = {22, 14}; -op_opsets_map_["Hardmax"] = {13, 11, 1}; -op_opsets_map_["Identity"] = {21, 19, 16, 14, 13, 1}; -op_opsets_map_["If"] = {21, 19, 16, 13, 11, 1}; -op_opsets_map_["ImageDecoder"] = {20}; -op_opsets_map_["InstanceNormalization"] = {22, 6, 1}; -op_opsets_map_["IsInf"] = {20, 10}; -op_opsets_map_["IsNaN"] = {20, 13, 9}; -op_opsets_map_["LRN"] = {13, 1}; -op_opsets_map_["LSTM"] = {22, 14, 7, 1}; -op_opsets_map_["LayerNormalization"] = {17}; -op_opsets_map_["LeakyRelu"] = {16, 6, 1}; -op_opsets_map_["Less"] = {13, 9, 7, 1}; -op_opsets_map_["LessOrEqual"] = {16, 12}; -op_opsets_map_["Log"] = {13, 6, 1}; -op_opsets_map_["LogSoftmax"] = {13, 11, 1}; -op_opsets_map_["Loop"] = {21, 19, 16, 13, 11, 1}; -op_opsets_map_["LpNormalization"] = {22, 1}; -op_opsets_map_["LpPool"] = {22, 18, 11, 2, 1}; -op_opsets_map_["MatMul"] = {13, 9, 1}; -op_opsets_map_["MatMulInteger"] = {10}; -op_opsets_map_["Max"] = {13, 12, 8, 6, 1}; -op_opsets_map_["MaxPool"] = {22, 12, 11, 10, 8, 1}; -op_opsets_map_["MaxRoiPool"] = {22, 1}; -op_opsets_map_["MaxUnpool"] = {22, 11, 9}; -op_opsets_map_["Mean"] = {13, 8, 6, 1}; -op_opsets_map_["MeanVarianceNormalization"] = {13, 9}; -op_opsets_map_["MelWeightMatrix"] = {17}; -op_opsets_map_["Min"] = {13, 12, 8, 6, 1}; -op_opsets_map_["Mish"] = {22, 18}; -op_opsets_map_["Mod"] = {13, 10}; -op_opsets_map_["Mul"] = {14, 13, 7, 6, 1}; -op_opsets_map_["Multinomial"] = {22, 7}; -op_opsets_map_["Neg"] = {13, 6, 1}; -op_opsets_map_["NegativeLogLikelihoodLoss"] = {22, 13, 12}; -op_opsets_map_["NonMaxSuppression"] = {11, 10}; -op_opsets_map_["NonZero"] = {13, 9}; -op_opsets_map_["Not"] = {1}; -op_opsets_map_["OneHot"] = {11, 9}; -op_opsets_map_["Optional"] = {15}; -op_opsets_map_["OptionalGetElement"] = {18, 15}; -op_opsets_map_["OptionalHasElement"] = {18, 15}; -op_opsets_map_["Or"] = {7, 1}; -op_opsets_map_["PRelu"] = {16, 9, 7, 6, 1}; -op_opsets_map_["Pad"] = {21, 19, 18, 13, 11, 2, 1}; -op_opsets_map_["Pow"] = {15, 13, 12, 7, 1}; -op_opsets_map_["QLinearConv"] = {10}; -op_opsets_map_["QLinearMatMul"] = {21, 10}; -op_opsets_map_["QuantizeLinear"] = {21, 19, 13, 10}; -op_opsets_map_["RNN"] = {22, 14, 7, 1}; -op_opsets_map_["RandomNormal"] = {22, 1}; -op_opsets_map_["RandomNormalLike"] = {22, 1}; -op_opsets_map_["RandomUniform"] = {22, 1}; -op_opsets_map_["RandomUniformLike"] = {22, 1}; -op_opsets_map_["Range"] = {11}; -op_opsets_map_["Reciprocal"] = {13, 6, 1}; -op_opsets_map_["ReduceL1"] = {18, 13, 11, 1}; -op_opsets_map_["ReduceL2"] = {18, 13, 11, 1}; -op_opsets_map_["ReduceLogSum"] = {18, 13, 11, 1}; -op_opsets_map_["ReduceLogSumExp"] = {18, 13, 11, 1}; -op_opsets_map_["ReduceMax"] = {20, 18, 13, 12, 11, 1}; -op_opsets_map_["ReduceMean"] = {18, 13, 11, 1}; -op_opsets_map_["ReduceMin"] = {20, 18, 13, 12, 11, 1}; -op_opsets_map_["ReduceProd"] = {18, 13, 11, 1}; -op_opsets_map_["ReduceSum"] = {13, 11, 1}; -op_opsets_map_["ReduceSumSquare"] = {18, 13, 11, 1}; -op_opsets_map_["RegexFullMatch"] = {20}; -op_opsets_map_["Relu"] = {14, 13, 6, 1}; -op_opsets_map_["Reshape"] = {21, 19, 14, 13, 5, 1}; -op_opsets_map_["Resize"] = {19, 18, 13, 11, 10}; -op_opsets_map_["ReverseSequence"] = {10}; -op_opsets_map_["RoiAlign"] = {22, 16, 10}; -op_opsets_map_["Round"] = {22, 11}; -op_opsets_map_["STFT"] = {17}; -op_opsets_map_["Scan"] = {21, 19, 16, 11, 9, 8}; -op_opsets_map_["Scatter"] = {11, 9}; -op_opsets_map_["ScatterElements"] = {18, 16, 13, 11}; -op_opsets_map_["ScatterND"] = {18, 16, 13, 11}; -op_opsets_map_["Selu"] = {22, 6, 1}; -op_opsets_map_["SequenceAt"] = {11}; -op_opsets_map_["SequenceConstruct"] = {11}; -op_opsets_map_["SequenceEmpty"] = {11}; -op_opsets_map_["SequenceErase"] = {11}; -op_opsets_map_["SequenceInsert"] = {11}; -op_opsets_map_["SequenceLength"] = {11}; -op_opsets_map_["SequenceMap"] = {17}; -op_opsets_map_["Shape"] = {21, 19, 15, 13, 1}; -op_opsets_map_["Shrink"] = {9}; -op_opsets_map_["Sigmoid"] = {13, 6, 1}; -op_opsets_map_["Sign"] = {13, 9}; -op_opsets_map_["Sin"] = {22, 7}; -op_opsets_map_["Sinh"] = {22, 9}; -op_opsets_map_["Size"] = {21, 19, 13, 1}; -op_opsets_map_["Slice"] = {13, 11, 10, 1}; -op_opsets_map_["Softmax"] = {13, 11, 1}; -op_opsets_map_["SoftmaxCrossEntropyLoss"] = {13, 12}; -op_opsets_map_["Softplus"] = {22, 1}; -op_opsets_map_["Softsign"] = {22, 1}; -op_opsets_map_["SpaceToDepth"] = {13, 1}; -op_opsets_map_["Split"] = {18, 13, 11, 2, 1}; -op_opsets_map_["SplitToSequence"] = {11}; -op_opsets_map_["Sqrt"] = {13, 6, 1}; -op_opsets_map_["Squeeze"] = {21, 13, 11, 1}; -op_opsets_map_["StringConcat"] = {20}; -op_opsets_map_["StringNormalizer"] = {10}; -op_opsets_map_["StringSplit"] = {20}; -op_opsets_map_["Sub"] = {14, 13, 7, 6, 1}; -op_opsets_map_["Sum"] = {13, 8, 6, 1}; -op_opsets_map_["Tan"] = {22, 7}; -op_opsets_map_["Tanh"] = {13, 6, 1}; -op_opsets_map_["TfIdfVectorizer"] = {9}; -op_opsets_map_["ThresholdedRelu"] = {22, 10}; -op_opsets_map_["Tile"] = {13, 6, 1}; -op_opsets_map_["TopK"] = {11, 10, 1}; -op_opsets_map_["Transpose"] = {21, 13, 1}; -op_opsets_map_["Trilu"] = {14}; -op_opsets_map_["Unique"] = {11}; -op_opsets_map_["Unsqueeze"] = {21, 13, 11, 1}; -op_opsets_map_["Upsample"] = {10, 9, 7, 1}; -op_opsets_map_["Where"] = {16, 9}; -op_opsets_map_["Xor"] = {7, 1}; -op_opsets_map_["ArrayFeatureExtractor"] = {1}; -op_opsets_map_["Binarizer"] = {1}; -op_opsets_map_["CastMap"] = {1}; -op_opsets_map_["CategoryMapper"] = {1}; -op_opsets_map_["DictVectorizer"] = {1}; -op_opsets_map_["FeatureVectorizer"] = {1}; -op_opsets_map_["Imputer"] = {1}; -op_opsets_map_["LabelEncoder"] = {4, 2, 1}; -op_opsets_map_["LinearClassifier"] = {1}; -op_opsets_map_["LinearRegressor"] = {1}; -op_opsets_map_["Normalizer"] = {1}; -op_opsets_map_["OneHotEncoder"] = {1}; -op_opsets_map_["SVMClassifier"] = {1}; -op_opsets_map_["SVMRegressor"] = {1}; -op_opsets_map_["Scaler"] = {1}; -op_opsets_map_["TreeEnsemble"] = {5}; -op_opsets_map_["TreeEnsembleClassifier"] = {5, 3, 1}; -op_opsets_map_["TreeEnsembleRegressor"] = {5, 3, 1}; -op_opsets_map_["ZipMap"] = {1}; -op_opsets_map_["Adagrad"] = {1}; -op_opsets_map_["Adam"] = {1}; -op_opsets_map_["Gradient"] = {1}; -op_opsets_map_["Momentum"] = {1}; From e331e7eaaa8d532150dd104bf1b5ab75c2bac5a6 Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 15 Apr 2025 05:25:01 -0400 Subject: [PATCH 10/15] Recompose merge concat in canonicalize.cpp Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps.td.inc | 1 + src/Dialect/ONNX/ONNXOps/Canonicalize.cpp | 62 ++++++++++++++++++++++ src/Dialect/ONNX/Transforms/Decompose.cpp | 1 - src/Dialect/ONNX/Transforms/Recompose.cpp | 64 +---------------------- test/mlir/onnx/onnx_canonicalization.mlir | 16 ++++++ test/mlir/onnx/onnx_hybrid_transform.mlir | 2 +- test/mlir/onnx/recompose_concat.mlir | 38 -------------- utils/gen_onnx_mlir.py | 1 + 8 files changed, 82 insertions(+), 103 deletions(-) delete mode 100644 test/mlir/onnx/recompose_concat.mlir diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index 666c0cde71..ff79b0e7d5 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -1244,6 +1244,7 @@ def ONNXCompressOp:ONNX_Op<"Compress", def ONNXConcatOp:ONNX_Op<"Concat", [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + let hasCanonicalizer = 1; let summary = "ONNX Concat operation"; let description = [{ Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. diff --git a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp index 33c2fdec8b..05b90b371c 100644 --- a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp +++ b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp @@ -1489,6 +1489,62 @@ class FuseTwoReshapesPattern : public OpRewritePattern { } }; +// ============================================================================= +// Rewrite pattern concat +// ============================================================================= + +struct RecomposeConcatPattern : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + // Helper function to check if an input is a mergeable Concat. + static bool isMergeableConcat(Value input, int64_t axis) { + ONNXConcatOp innerConcat = input.getDefiningOp(); + if (!innerConcat) + return false; + return (innerConcat.getAxis() == axis) && + (innerConcat.getResult().hasOneUse()); + } + + LogicalResult matchAndRewrite( + ONNXConcatOp concatOp, PatternRewriter &rewriter) const final { + Location loc = concatOp.getLoc(); + auto inputs = concatOp.getOperands(); + + // If there is only a single input, replace the concat with that input. + if (inputs.size() == 1) { + rewriter.replaceOp(concatOp, inputs[0]); + return success(); + } + + SmallVector newInputs; + bool merged = false; + + // Flatten nested concat nodes. + for (Value input : inputs) { + if (isMergeableConcat(input, concatOp.getAxis())) { + // Remove the nested concat and append its inputs. + ONNXConcatOp innerConcat = cast(input.getDefiningOp()); + newInputs.append(innerConcat.getOperands().begin(), + innerConcat.getOperands().end()); + merged = true; + } else { + // Push non-mergeable input. + newInputs.push_back(input); + } + } + + if (merged) { + // Create a new ONNXConcat op with the flattened inputs. + auto newConcat = rewriter.create( + loc, concatOp.getResult().getType(), newInputs, concatOp.getAxis()); + rewriter.replaceOp(concatOp, newConcat.getResult()); + return success(); + } + + return failure(); + } +}; + // ============================================================================= // Rewrite pattern LayerNormalization // ============================================================================= @@ -1709,6 +1765,12 @@ void ONNXCastOp::getCanonicalizationPatterns( // result.insert(context); } +/// on the ONNXConcatOp. +void ONNXConcatOp::getCanonicalizationPatterns( + RewritePatternSet &results, MLIRContext *context) { + results.insert(context); +} + /// on the ONNXConstantOp. void ONNXConstantOp::getCanonicalizationPatterns( RewritePatternSet &results, MLIRContext *context) {} diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp index 45ca7127c6..d61a980e15 100644 --- a/src/Dialect/ONNX/Transforms/Decompose.cpp +++ b/src/Dialect/ONNX/Transforms/Decompose.cpp @@ -1385,7 +1385,6 @@ void DecomposeONNXToONNXPass::runOnOperation() { op.getValueStringAttr() || op.getValueStringsAttr()); }); - // Decompose CustomOp FusedMatMul introduced by onnxruntime: // https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.FusedMatMul target.addDynamicallyLegalOp([](ONNXCustomOp op) { diff --git a/src/Dialect/ONNX/Transforms/Recompose.cpp b/src/Dialect/ONNX/Transforms/Recompose.cpp index f3cbe2926f..9a13f26c71 100644 --- a/src/Dialect/ONNX/Transforms/Recompose.cpp +++ b/src/Dialect/ONNX/Transforms/Recompose.cpp @@ -602,57 +602,6 @@ struct RecomposeQLinearMatMulFromQuantizeLinearPattern } }; -/// Merges nested ONNXConcatOps -struct RecomposeConcatPattern : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - // Helper function to check if an input is a mergeable Concat. - static bool isMergeableConcat(Value input, int64_t axis) { - auto innerConcat = input.getDefiningOp(); - if (!innerConcat) - return false; - return (innerConcat.getAxis() == axis) && - (innerConcat.getResult().hasOneUse()); - } - - LogicalResult matchAndRewrite( - ONNXConcatOp concatOp, PatternRewriter &rewriter) const final { - Location loc = concatOp.getLoc(); - auto inputs = concatOp.getOperands(); - SmallVector newInputs; - bool merged = false; - - // Flatten nested concat nodes. - for (auto input : inputs) { - newInputs.push_back(input); - if (isMergeableConcat(input, concatOp.getAxis())) { - merged = true; - // Remove the nested concat and append its inputs. - newInputs.pop_back(); - auto innerConcat = cast(input.getDefiningOp()); - newInputs.append( - innerConcat.getOperands().begin(), innerConcat.getOperands().end()); - } - } - - if (merged) { - // Create a new ONNXConcat op with the flattened inputs. - auto newConcat = rewriter.create( - loc, concatOp.getResult().getType(), newInputs, concatOp.getAxis()); - rewriter.replaceOp(concatOp, newConcat.getResult()); - return success(); - } - - // If there is only a single input, replace the concat with that input. - if (concatOp.getOperands().size() == 1) { - rewriter.replaceOp(concatOp, concatOp.getOperands()[0]); - return success(); - } - - return failure(); - } -}; - struct RecomposeONNXToONNXPass : public PassWrapper> { MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RecomposeONNXToONNXPass) @@ -705,17 +654,7 @@ void RecomposeONNXToONNXPass::runOnOperation() { return false; return true; - }); - - target.addDynamicallyLegalOp([](ONNXConcatOp op) { - for (Value input : op.getOperands()) { - if (!RecomposeConcatPattern::isMergeableConcat(input, op.getAxis())) { - return true; // Op is legal if any input isn't a mergeable Concat. - } - } - return false; // Op is illegal (needs rewriting) if all inputs are - // mergeable. - }); + }); // Recompose QLinearMatMul, starting from QuantizeLinear. // Pattern: DequanizeLinear + MatMul + QuantizeLinear. @@ -743,7 +682,6 @@ void onnx_mlir::getRecomposeONNXToONNXPatterns( patterns.insert(context); patterns.insert(context); patterns.insert(context); - patterns.insert(context); } /*! diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index bdaa8f4919..d0371d5d3c 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -1922,3 +1922,19 @@ func.func @test_remove_where_equal_5(%arg0: tensor, %arg1: tensor<1xi64 // CHECK: onnx.Return [[VAR_6_]] : tensor<2xi64> // CHECK: } } + +// ----- + +func.func @test_recompose_concat(%arg0: tensor<1x3x4xf32>, %arg1: tensor<1x3x4xf32> ) -> tensor<1x12x4xf32> { +%0 = "onnx.Concat"(%arg0, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x4xf32>, tensor<1x3x4xf32>) -> tensor<1x6x4xf32> +%1 = "onnx.Concat"(%0, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} : (tensor<1x6x4xf32>, tensor<1x3x4xf32>) -> tensor<1x9x4xf32> +%2 = "onnx.Concat"(%1, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_2"} : (tensor<1x9x4xf32>, tensor<1x3x4xf32>) -> tensor<1x12x4xf32> +return %2 : tensor<1x12x4xf32> + + // CHECK-LABEL: func @test_recompose_concat + // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x4xf32>, [[PARAM_1_:%.+]]: tensor<1x3x4xf32>) -> tensor<1x12x4xf32> { + // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[PARAM_1_]], [[PARAM_0_]], [[PARAM_1_]]) + // CHECK-SAME: {axis = 1 : si64} + // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x4xf32> + +} \ No newline at end of file diff --git a/test/mlir/onnx/onnx_hybrid_transform.mlir b/test/mlir/onnx/onnx_hybrid_transform.mlir index ca663bf945..9966f22c7e 100644 --- a/test/mlir/onnx/onnx_hybrid_transform.mlir +++ b/test/mlir/onnx/onnx_hybrid_transform.mlir @@ -335,4 +335,4 @@ func.func @test_inception_v2_6_snippet(%arg0: tensor<1x3x224x224xf32>, %arg1: te // CONSTPROP: } // LIMIT: Warning: onnx-hybrid-transform didn't converge with max-num-rewrites-offset=1, max-num-rewrites-multiplier=0.000000e+00 -// LIMIT-LABEL: func.func @test_inception_v2_6_snippet \ No newline at end of file +// LIMIT-LABEL: func.func @test_inception_v2_6_snippet diff --git a/test/mlir/onnx/recompose_concat.mlir b/test/mlir/onnx/recompose_concat.mlir deleted file mode 100644 index e536f37d7a..0000000000 --- a/test/mlir/onnx/recompose_concat.mlir +++ /dev/null @@ -1,38 +0,0 @@ -// RUN: onnx-mlir --useOnnxModelTypes=false --EmitONNXIR --printIR %s | FileCheck %s - -func.func @test_recompose_concat(%arg0: tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> { -%0 = onnx.Constant dense<0.00999999977> : tensor<6x3x3x3xf32> -%1 = "onnx.NoValue"() {onnx_node_name = "onnx.NoValue_0", value} : () -> none -%2 = "onnx.Conv"(%arg0, %0, %1) {auto_pad = "NOTSET", group = 1 : si64, onnx_node_name = "onnx.Conv_1", pads = [1, 1, 1, 1]} : (tensor<1x3x6x6xf32>, tensor<6x3x3x3xf32>, none) -> tensor<1x6x6x6xf32> -%3 = "onnx.Relu"(%2) {onnx_node_name = "onnx.Relu_2"} : (tensor<1x6x6x6xf32>) -> tensor<1x6x6x6xf32> -%4 = "onnx.Concat"(%arg0, %3) {axis = 1 : si64, onnx_node_name = "onnx.Concat_3"} : (tensor<1x3x6x6xf32>, tensor<1x6x6x6xf32>) -> tensor<1x9x6x6xf32> -%5 = "onnx.Concat"(%4, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_4"} : (tensor<1x9x6x6xf32>, tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> -return %5 : tensor<1x12x6x6xf32> - - // CHECK-LABEL: func @test_recompose_concat - // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> { - // CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<{{.*}}> : tensor<6x3x3x3xf32> - - // CHECK: [[VAR_1_:%.+]] = "onnx.NoValue"() - - // CHECK: [[VAR_2_:%.+]] = "onnx.Conv"([[PARAM_0_]], [[VAR_0_]], [[VAR_1_]]) - // CHECK-SAME: : (tensor<1x3x6x6xf32>, tensor<6x3x3x3xf32>, none) -> tensor<1x6x6x6xf32> - // CHECK: [[VAR_3_:%.+]] = "onnx.Relu"([[VAR_2_]]) {onnx_node_name = "onnx.Relu_2"} : (tensor<1x6x6x6xf32>) -> tensor<1x6x6x6xf32> - // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[VAR_3_]], [[PARAM_0_]]) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x6x6xf32>, tensor<1x6x6x6xf32>, tensor<1x3x6x6xf32>) -> tensor<1x12x6x6xf32> - // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x6x6xf32> - -} - -func.func @test_recompose_concat_simple(%arg0: tensor<1x3x4xf32>, %arg1: tensor<1x3x4xf32> ) -> tensor<1x12x4xf32> { -%0 = "onnx.Concat"(%arg0, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x4xf32>, tensor<1x3x4xf32>) -> tensor<1x6x4xf32> -%1 = "onnx.Concat"(%0, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} : (tensor<1x6x4xf32>, tensor<1x3x4xf32>) -> tensor<1x9x4xf32> -%2 = "onnx.Concat"(%1, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_2"} : (tensor<1x9x4xf32>, tensor<1x3x4xf32>) -> tensor<1x12x4xf32> -return %2 : tensor<1x12x4xf32> - - // CHECK-LABEL: func @test_recompose_concat_simple - // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x4xf32>, [[PARAM_1_:%.+]]: tensor<1x3x4xf32>) -> tensor<1x12x4xf32> { - // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[PARAM_1_]], [[PARAM_0_]], [[PARAM_1_]]) - // CHECK-SAME: {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} - // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x4xf32> - -} \ No newline at end of file diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index 355569deab..b072d25b17 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -330,6 +330,7 @@ "Add", "And", "Cast", + "Concat", "Constant", "DepthToSpace", "DequantizeLinear", From 6738155ad2ac99ef6a4fd03382a580d4d2d76142 Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 15 Apr 2025 05:31:09 -0400 Subject: [PATCH 11/15] Modified recompose concat for clang format Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps/Canonicalize.cpp | 5 +++-- src/Dialect/ONNX/Transforms/Recompose.cpp | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp index 05b90b371c..1b8e2cfa1f 100644 --- a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp +++ b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp @@ -1505,8 +1505,8 @@ struct RecomposeConcatPattern : public OpRewritePattern { (innerConcat.getResult().hasOneUse()); } - LogicalResult matchAndRewrite( - ONNXConcatOp concatOp, PatternRewriter &rewriter) const final { + LogicalResult matchAndRewrite(ONNXConcatOp concatOp, + PatternRewriter &rewriter) const final { Location loc = concatOp.getLoc(); auto inputs = concatOp.getOperands(); @@ -1545,6 +1545,7 @@ struct RecomposeConcatPattern : public OpRewritePattern { } }; + // ============================================================================= // Rewrite pattern LayerNormalization // ============================================================================= diff --git a/src/Dialect/ONNX/Transforms/Recompose.cpp b/src/Dialect/ONNX/Transforms/Recompose.cpp index 9a13f26c71..5b57620635 100644 --- a/src/Dialect/ONNX/Transforms/Recompose.cpp +++ b/src/Dialect/ONNX/Transforms/Recompose.cpp @@ -654,7 +654,7 @@ void RecomposeONNXToONNXPass::runOnOperation() { return false; return true; - }); + }); // Recompose QLinearMatMul, starting from QuantizeLinear. // Pattern: DequanizeLinear + MatMul + QuantizeLinear. From 283ddcfc4b5c0b21d6c7c60e7c912764dd36e3ef Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 15 Apr 2025 05:39:47 -0400 Subject: [PATCH 12/15] Resolved conflicts Signed-off-by: Arkar-Hema --- test/mlir/onnx/onnx_canonicalization.mlir | 32 +++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index d0371d5d3c..4cea2435b1 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -1899,6 +1899,22 @@ func.func @test_remove_where_equal_4(%arg0: tensor) -> tensor<2xi64> { // ----- +func.func @test_recompose_concat(%arg0: tensor<1x3x4xf32>, %arg1: tensor<1x3x4xf32> ) -> tensor<1x12x4xf32> { +%0 = "onnx.Concat"(%arg0, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x4xf32>, tensor<1x3x4xf32>) -> tensor<1x6x4xf32> +%1 = "onnx.Concat"(%0, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} : (tensor<1x6x4xf32>, tensor<1x3x4xf32>) -> tensor<1x9x4xf32> +%2 = "onnx.Concat"(%1, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_2"} : (tensor<1x9x4xf32>, tensor<1x3x4xf32>) -> tensor<1x12x4xf32> +return %2 : tensor<1x12x4xf32> + + // CHECK-LABEL: func @test_recompose_concat + // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x4xf32>, [[PARAM_1_:%.+]]: tensor<1x3x4xf32>) -> tensor<1x12x4xf32> { + // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[PARAM_1_]], [[PARAM_0_]], [[PARAM_1_]]) + // CHECK-SAME: {axis = 1 : si64} + // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x4xf32> + +} + +// ----- + // Not rewriting since the operand in ConcatOp is neither DimOp nor ConstantOp. func.func @test_remove_where_equal_5(%arg0: tensor, %arg1: tensor<1xi64>, %arg2: tensor<1xi64>) -> tensor<2xi64> { %0 = onnx.Constant dense<-1> : tensor<2xi64> @@ -1922,19 +1938,3 @@ func.func @test_remove_where_equal_5(%arg0: tensor, %arg1: tensor<1xi64 // CHECK: onnx.Return [[VAR_6_]] : tensor<2xi64> // CHECK: } } - -// ----- - -func.func @test_recompose_concat(%arg0: tensor<1x3x4xf32>, %arg1: tensor<1x3x4xf32> ) -> tensor<1x12x4xf32> { -%0 = "onnx.Concat"(%arg0, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_0"} : (tensor<1x3x4xf32>, tensor<1x3x4xf32>) -> tensor<1x6x4xf32> -%1 = "onnx.Concat"(%0, %arg0) {axis = 1 : si64, onnx_node_name = "onnx.Concat_1"} : (tensor<1x6x4xf32>, tensor<1x3x4xf32>) -> tensor<1x9x4xf32> -%2 = "onnx.Concat"(%1, %arg1) {axis = 1 : si64, onnx_node_name = "onnx.Concat_2"} : (tensor<1x9x4xf32>, tensor<1x3x4xf32>) -> tensor<1x12x4xf32> -return %2 : tensor<1x12x4xf32> - - // CHECK-LABEL: func @test_recompose_concat - // CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x4xf32>, [[PARAM_1_:%.+]]: tensor<1x3x4xf32>) -> tensor<1x12x4xf32> { - // CHECK: [[FINAL_OUT:%.+]] = "onnx.Concat"([[PARAM_0_]], [[PARAM_1_]], [[PARAM_0_]], [[PARAM_1_]]) - // CHECK-SAME: {axis = 1 : si64} - // CHECK-NEXT: return [[FINAL_OUT]] : tensor<1x12x4xf32> - -} \ No newline at end of file From 4e279a528b8d114667ddbdc1c68ba766ba46e3ef Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 15 Apr 2025 05:44:26 -0400 Subject: [PATCH 13/15] Resolved clang format Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps/Canonicalize.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp index 1b8e2cfa1f..bacc200f4c 100644 --- a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp +++ b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp @@ -1505,8 +1505,8 @@ struct RecomposeConcatPattern : public OpRewritePattern { (innerConcat.getResult().hasOneUse()); } - LogicalResult matchAndRewrite(ONNXConcatOp concatOp, - PatternRewriter &rewriter) const final { + LogicalResult matchAndRewrite( + ONNXConcatOp concatOp, PatternRewriter &rewriter) const final { Location loc = concatOp.getLoc(); auto inputs = concatOp.getOperands(); @@ -1524,8 +1524,8 @@ struct RecomposeConcatPattern : public OpRewritePattern { if (isMergeableConcat(input, concatOp.getAxis())) { // Remove the nested concat and append its inputs. ONNXConcatOp innerConcat = cast(input.getDefiningOp()); - newInputs.append(innerConcat.getOperands().begin(), - innerConcat.getOperands().end()); + newInputs.append( + innerConcat.getOperands().begin(), innerConcat.getOperands().end()); merged = true; } else { // Push non-mergeable input. @@ -1545,7 +1545,6 @@ struct RecomposeConcatPattern : public OpRewritePattern { } }; - // ============================================================================= // Rewrite pattern LayerNormalization // ============================================================================= From cd413ce0779effd41769c6544af37f1e4eec795b Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 15 Apr 2025 23:18:02 -0400 Subject: [PATCH 14/15] Made minor changes to merge concat Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps/Canonicalize.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp index bacc200f4c..dd3814dc2d 100644 --- a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp +++ b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp @@ -1498,17 +1498,18 @@ struct RecomposeConcatPattern : public OpRewritePattern { // Helper function to check if an input is a mergeable Concat. static bool isMergeableConcat(Value input, int64_t axis) { - ONNXConcatOp innerConcat = input.getDefiningOp(); - if (!innerConcat) + ONNXConcatOp concatOp = input.getDefiningOp(); + if (!concatOp) return false; - return (innerConcat.getAxis() == axis) && - (innerConcat.getResult().hasOneUse()); + return (concatOp.getAxis() == axis) && + (concatOp.getResult().hasOneUse()); } LogicalResult matchAndRewrite( ONNXConcatOp concatOp, PatternRewriter &rewriter) const final { Location loc = concatOp.getLoc(); - auto inputs = concatOp.getOperands(); + ValueRange inputs = concatOp.getOperands(); + int64_t axis = concatOp.getAxis(); // If there is only a single input, replace the concat with that input. if (inputs.size() == 1) { @@ -1521,7 +1522,7 @@ struct RecomposeConcatPattern : public OpRewritePattern { // Flatten nested concat nodes. for (Value input : inputs) { - if (isMergeableConcat(input, concatOp.getAxis())) { + if (isMergeableConcat(input, axis)) { // Remove the nested concat and append its inputs. ONNXConcatOp innerConcat = cast(input.getDefiningOp()); newInputs.append( @@ -1536,7 +1537,7 @@ struct RecomposeConcatPattern : public OpRewritePattern { if (merged) { // Create a new ONNXConcat op with the flattened inputs. auto newConcat = rewriter.create( - loc, concatOp.getResult().getType(), newInputs, concatOp.getAxis()); + loc, concatOp.getResult().getType(), newInputs, axis); rewriter.replaceOp(concatOp, newConcat.getResult()); return success(); } From ef7f2e08761f47b4bab36b945236bd3f47a1646b Mon Sep 17 00:00:00 2001 From: Arkar-Hema Date: Tue, 15 Apr 2025 23:20:25 -0400 Subject: [PATCH 15/15] Made minor changes for clang format Signed-off-by: Arkar-Hema --- src/Dialect/ONNX/ONNXOps/Canonicalize.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp index dd3814dc2d..59c71d373e 100644 --- a/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp +++ b/src/Dialect/ONNX/ONNXOps/Canonicalize.cpp @@ -1501,8 +1501,7 @@ struct RecomposeConcatPattern : public OpRewritePattern { ONNXConcatOp concatOp = input.getDefiningOp(); if (!concatOp) return false; - return (concatOp.getAxis() == axis) && - (concatOp.getResult().hasOneUse()); + return (concatOp.getAxis() == axis) && (concatOp.getResult().hasOneUse()); } LogicalResult matchAndRewrite(