Skip to content

Commit ae4d558

Browse files
committed
add tests and rebase
1 parent d8509fa commit ae4d558

File tree

5 files changed

+310
-14
lines changed

5 files changed

+310
-14
lines changed

examples/dynamo/hierarchical_partitioner_example.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def main():
7373
# 1. Partition the model into blocks that can be executed by different backends
7474
partitioned_model, op_support = hierarchical_adjacency_partition(
7575
gm,
76-
verbose=True,
7776
min_block_size=1,
7877
backend_priority=["inductor", "tensorrt"],
7978
backend_support_map={

py/torch_tensorrt/dynamo/partitioning/_adjacency_partitioner.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,6 @@ def partition(
261261
262262
Args:
263263
gm: FX GraphModule to partition
264-
verbose: Bool representing whether to print operator support
265264
min_block_size: Minimum number of operators per TRT-Engine Block
266265
torch_executed_ops: Collection of operations to run in Torch, regardless of converter coverage
267266
require_full_compilation: Require that all computational operators be run in TRT

py/torch_tensorrt/dynamo/partitioning/_global_partitioner.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,6 @@ def partition(
210210
211211
Args:
212212
gm: FX GraphModule to partition
213-
verbose: Bool representing whether to print operator support
214213
min_block_size: Minimum number of operators per TRT-Engine Block
215214
torch_executed_ops: Collection of operations to run in Torch, regardless of converter coverage
216215
require_full_compilation: Whether to require that all operators be run in TRT

py/torch_tensorrt/dynamo/partitioning/_hierarchical_partitioner.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
is_node_output_tensor,
1919
)
2020
from torch_tensorrt.dynamo._defaults import (
21-
DEBUG,
2221
MIN_BLOCK_SIZE,
2322
REQUIRE_FULL_COMPILATION,
2423
)
@@ -390,8 +389,8 @@ def put_nodes_into_subgraphs(self) -> list[Subgraph]:
390389

391390
return subgraphs
392391

393-
def tag(self, subgraphs: list[Subgraph]):
394-
self.tags = []
392+
def tag(self, subgraphs: list[Subgraph]) -> None:
393+
self.tags: list[str] = []
395394
for subgraph in subgraphs:
396395
tag = (
397396
f"_run_on_acc_{subgraph.backend}_{len(self.tags)}"
@@ -403,7 +402,7 @@ def tag(self, subgraphs: list[Subgraph]):
403402
if hasattr(node, "tag"):
404403
raise FxNetSplitterInternalError(f"Node {node} was already tagged")
405404

406-
node.tag = tag # type: ignore[attr-defined]
405+
node.tag = tag
407406
self._node_submodule_map[node.name] = tag
408407

409408

@@ -433,7 +432,7 @@ def __init__(
433432
self.allow_non_tensor = allow_non_tensor
434433
self.acc_nodes: NodeSet = set()
435434

436-
def reduce_acc_nodes_non_tensor_input_helper(self, cpu_worklist: NodeList):
435+
def reduce_acc_nodes_non_tensor_input_helper(self, cpu_worklist: NodeList) -> None:
437436
"""
438437
Transitively excludes nodes from ACC supported set.
439438
For every node in the worklist:
@@ -450,7 +449,7 @@ def reduce_acc_nodes_non_tensor_input_helper(self, cpu_worklist: NodeList):
450449
if not is_node_output_tensor(user):
451450
cpu_worklist.append(user)
452451

453-
def reduce_acc_nodes_non_tensor_input(self):
452+
def reduce_acc_nodes_non_tensor_input(self) -> None:
454453
"""
455454
Excludes nodes from ACC supported set that have direct
456455
upstream CPU nodes that produce non-tensor outputs.
@@ -468,7 +467,7 @@ def reduce_acc_nodes_non_tensor_input(self):
468467

469468
self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)
470469

471-
def reduce_acc_nodes_non_tensor_output(self):
470+
def reduce_acc_nodes_non_tensor_output(self) -> None:
472471
"""
473472
Excludes nodes from ACC supported set that produce non-tensor
474473
outputs and have downstream CPU nodes.
@@ -527,7 +526,6 @@ class FxNetSplitterInternalError(Exception):
527526

528527
def hierarchical_adjacency_partition(
529528
gm: torch.fx.GraphModule,
530-
verbose: bool = DEBUG,
531529
min_block_size: int = MIN_BLOCK_SIZE,
532530
torch_executed_ops: Collection[Target] = set(),
533531
backend_support_map: Optional[Dict[str, Collection[Target]]] = None,
@@ -540,7 +538,6 @@ def hierarchical_adjacency_partition(
540538
541539
Args:
542540
gm: FX GraphModule to partition
543-
verbose: Bool representing whether to print operator support
544541
min_block_size: Minimum number of operators per TRT-Engine Block
545542
backend_support_map: Dictionary mapping backend names to sets of supported operators
546543
backend_priority: Ordered list of backend names, from highest to lowest priority
@@ -583,7 +580,6 @@ def hierarchical_adjacency_partition(
583580

584581
partitioned_graph = partitioner.partition_graph()
585582

586-
if verbose:
587-
supported_ops.print_support_overview(partitioner.num_accelerated_subgraphs)
583+
supported_ops.print_support_overview(partitioner.num_accelerated_subgraphs)
588584

589585
return partitioned_graph, supported_ops

0 commit comments

Comments
 (0)