@@ -26,15 +26,15 @@ def run(f):
26
26
# CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>,
27
27
# CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>,
28
28
# CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>) -> !torch.vtensor<[?,?,3],f32> {
29
- # CHECK: %[[S0:.+]] = torch.symbolic_int "s35 " {min_val = 5, max_val = 10} : !torch.int
30
- # CHECK: %[[S1:.+]] = torch.symbolic_int "s16 " {min_val = {{[0-9]+}}, max_val = 100} : !torch.int
31
- # CHECK: %[[S2:.+]] = torch.symbolic_int "s43 " {min_val = {{[0-9]+}}, max_val = 50} : !torch.int
32
- # CHECK: %[[S3:.+]] = torch.symbolic_int "s23 " {min_val = {{[0-9]+}}, max_val = {{[0-9]+}}} : !torch.int
33
- # CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S1]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
29
+ # CHECK: %[[S0:.+]] = torch.symbolic_int "{{[a-z0-9]+}} " {min_val = 5, max_val = 10} : !torch.int
30
+ # CHECK: %[[S1:.+]] = torch.symbolic_int "{{[a-z0-9]+}} " {min_val = {{[0-9]+}}, max_val = 100} : !torch.int
31
+ # CHECK: %[[S2:.+]] = torch.symbolic_int "{{[a-z0-9]+}} " {min_val = {{[0-9]+}}, max_val = 50} : !torch.int
32
+ # CHECK: %[[S3:.+]] = torch.symbolic_int "{{[a-z0-9]+}} " {min_val = {{[0-9]+}}, max_val = {{[0-9]+}}} : !torch.int
33
+ # CHECK-DISABLED : torch.bind_symbolic_shape %[[ARG0]], [%[[S1]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
34
34
# CHECK: torch.bind_symbolic_shape %[[ARG1]], [%[[S0]], %[[S2]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
35
- # CHECK: torch.bind_symbolic_shape %[[ARG2]], [%[[S3]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
35
+ # CHECK-DISABLED : torch.bind_symbolic_shape %[[ARG2]], [%[[S3]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
36
36
# CHECK: %[[OP:.+]] = torch.operator "torch.my_custom_library.tanh_sigmoid_cat_op"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>) -> !torch.vtensor<[?,?,3],f32>
37
- # CHECK: torch.bind_symbolic_shape %[[OP]], [%[[S1]], %[[S3]], %[[S0]], %[[S2]]], affine_map<()[s0, s1, s2, s3] -> (s2, s1 + s3 + s0 * 2, 3)> : !torch.vtensor<[?,?,3],f32>
37
+ # CHECK-DISABLED : torch.bind_symbolic_shape %[[OP]], [%[[S1]], %[[S3]], %[[S0]], %[[S2]]], affine_map<()[s0, s1, s2, s3] -> (s2, s1 + s3 + s0 * 2, 3)> : !torch.vtensor<[?,?,3],f32>
38
38
# CHECK: return %[[OP]] : !torch.vtensor<[?,?,3],f32>
39
39
def test_tanh_sigmoid_cat_custom_op ():
40
40
@@ -89,7 +89,7 @@ def forward(self, x, y, z):
89
89
@run
90
90
# CHECK-LABEL: test_custom_op_array_output
91
91
# CHECK: func.func @main(%[[ARG0:[a-zA-Z0-9]+]]: !torch.vtensor<[?,3],f32>)
92
- # CHECK: %[[S0:.+]] = torch.symbolic_int "s35 " {min_val = {{[0-9]+}}, max_val = 10} : !torch.int
92
+ # CHECK: %[[S0:.+]] = torch.symbolic_int "{{[a-z0-9]+}} " {min_val = {{[0-9]+}}, max_val = 10} : !torch.int
93
93
# CHECK: %[[int:.+]] = torch.constant.int 4
94
94
# CHECK: %[[V0:.+]] = torch.operator "torch.my_custom_library.array_output_op"(%[[int]], %[[ARG0]]) : (!torch.int, !torch.vtensor<[?,3],f32>) -> !torch.list<vtensor>
95
95
# CHECK: %[[V1:.+]]:4 = torch.prim.ListUnpack %[[V0]] : !torch.list<vtensor> -> !torch.vtensor<[?,3],f32>, !torch.vtensor<[?,3],f32>, !torch.vtensor<[?,3],f32>, !torch.vtensor<[?,3],f32>
0 commit comments