Skip to content

Commit 945b610

Browse files
committed
Post-rebase fixes
1 parent f0c7a92 commit 945b610

File tree

12 files changed

+36
-783
lines changed

12 files changed

+36
-783
lines changed

Deeploy/CommonExtensions/CodeTransformationPasses/Closure.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def apply(self,
155155
executionBlock: ExecutionBlock,
156156
name: str,
157157
verbose: CodeGenVerbosity = _NoVerbosity) -> Tuple[NetworkContext, ExecutionBlock]:
158-
# Add underscore to avoid name issues when beginning with problematic characters (like numbers)
158+
# Prepend underscore to avoid name issues when beginning with problematic characters (like numbers)
159159
self.closureName = "_" + name + self.closureSuffix
160160
self.functionCall = executionBlock.generate(ctxt)
161161
self._generateClosureStruct(ctxt, executionBlock)

Deeploy/CommonExtensions/OptimizationPasses/TopologyOptimizationPasses/LoweringOptimizationPasses.py

Lines changed: 7 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,8 @@ def _transformLayoutPermutation(dims: int, spatialDims: int, targetChannelsFirst
124124

125125

126126
# Calculate permutation q = p^(-1) s.t. q(p(i)) = i
127-
def _invertPermutation(permutation: List[int]) -> List[int]:
128-
inverse = [0] * len(permutation)
129-
for idx, permIdx in enumerate(permutation):
130-
inverse[permIdx] = idx
131-
return inverse
127+
def _invertPermutation(permutation: Sequence[int]) -> List[int]:
128+
return [permutation.index(i) for i in range(len(permutation))]
132129

133130

134131
T = TypeVar('T')
@@ -283,31 +280,10 @@ def __init__(self, default_channels_first: bool = True):
283280
super().__init__(graph, partial(_NCHWtoNHWC_fun, default_channels_first = default_channels_first), name)
284281

285282

286-
@contextagnostic
287-
class NCHWtoNHWCPass(SequentialPass):
288-
289-
def __init__(self, default_channels_first: bool = True):
290-
passes = [
291-
NCHWtoNHWCPadPass(default_channels_first),
292-
NCHWtoNHWCMaxPoolPass(default_channels_first),
293-
NCHWtoNHWCConvPass(default_channels_first),
294-
NCHWtoNHWCRequantizedConvPass(default_channels_first),
295-
]
296-
super().__init__(*passes)
297-
298-
299-
def _PULPDWNCHWtoNHWC_fun(graph: gs.Graph, match: Match, name: str, default_channels_first: bool = True):
300-
301-
matched_nodes = [m for k, m in match.nodes_map.items()]
302-
opNode = matched_nodes[0]
303-
node_op = opNode.op
304-
305-
if 'group' in opNode.attrs and opNode.attrs['group'] == 1:
306283
def _NCWHtoNHWC_dw_fun(graph: gs.Graph, match: Match, name: str, default_channels_first: bool) -> gs.Graph:
307284
node = next(iter((match.nodes_map.values())))
308285

309286
if not _isDepthwise(node):
310-
if opNode.attrs.get('group', 1) == 1:
311287
return graph
312288

313289
channels_first = node.attrs.get("channels_first", True)
@@ -340,47 +316,10 @@ def _NCWHtoNHWC_dw_fun(graph: gs.Graph, match: Match, name: str, default_channel
340316
class NCHWtoNHWCDwConvPass(ReplaceSequentialPatternPass):
341317

342318
def __init__(self, default_channels_first: bool = True):
343-
# Define pattern graph
344-
graph = gs.Graph()
345-
346-
_input = gs.Variable(name = 'input_1')
347-
output = graph.layer(inputs = [_input], outputs = ['convOut'], op = 'RequantizedConv', name = 'requantizedConv')
348-
349-
graph.outputs.append(output)
350-
graph.inputs.append(_input)
351-
352-
# Define name
319+
graph = _singleNodePattern(op = "Conv|RequantizedConv")
353320
name = "_NCHW_TO_NHWC_DW_CONV_PASS"
354-
355-
# Initialize Pass
356-
super().__init__(pattern = graph,
357-
replacement_fn = partial(_PULPDWNCHWtoNHWC_fun,
358-
default_channels_first = default_channels_first),
359-
name = name)
360-
361-
362-
# Float DW Conv
363-
@contextagnostic
364-
class PULPFPDWConvPass(ReplaceSequentialPatternPass):
365-
366-
def __init__(self, default_channels_first: bool = True):
367-
# Define pattern graph
368-
graph = gs.Graph()
369-
370-
_input = gs.Variable(name = 'input_1')
371-
output = graph.layer(inputs = [_input], outputs = ['convOut'], op = 'Conv', name = 'conv')
372-
373-
graph.outputs.append(output)
374-
graph.inputs.append(_input)
375-
376-
# Define name
377-
name = "_NCHW_TO_NHWC_FP_DW_CONV_PASS"
378-
379-
# Initialize Pass
380-
super().__init__(pattern = graph,
381-
replacement_fn = partial(_PULPDWNCHWtoNHWC_fun,
382-
default_channels_first = default_channels_first),
383-
name = name)
321+
super().__init__(graph, partial(_NCWHtoNHWC_dw_fun, default_channels_first = default_channels_first), name,
322+
NonBranchingMatcher(regex_op = True))
384323

385324

386325
def _PULP_NCHWtoNHWC_dw_fun(graph: gs.Graph, match: Match, name: str, default_channels_first: bool = True):
@@ -425,10 +364,8 @@ def __init__(self, default_channels_first: bool = True):
425364
passes = [
426365
NCHWtoNHWCPadPass(default_channels_first),
427366
NCHWtoNHWCMaxPoolPass(default_channels_first),
428-
PULPDWConvPass(default_channels_first),
429-
PULPFPDWConvPass(default_channels_first),
430-
PULPNCHWtoNHWCDenseConvPass(default_channels_first),
431-
PULPNCHWtoNHWCDenseRequantizedConvPass(default_channels_first),
367+
NCHWtoNHWCDwConvPass(default_channels_first),
368+
NCHWtoNHWCConvPass(default_channels_first),
432369
]
433370
super().__init__(*passes)
434371

@@ -494,11 +431,6 @@ def _requantized_gemm_to_pw_fun(graph: gs.Graph, match: Match, name: str):
494431
matrixAExpandDimsNode, pwIn = _appendExpandDims(matrixA, name, axis = expandAxis)
495432
graph.nodes.append(matrixAExpandDimsNode)
496433

497-
# If transB is set then the matrix is of shape [N x K] and it doesn't need to be transposed, otherwise its shape is [K x N] and it has to be transposed
498-
if not 'transB' in requantizedGemm.attrs or requantizedGemm.attrs['transB'] == 0:
499-
# matrixBTransposed, shape [N x K]
500-
matrixBTransposeNode, matrixB = _appendTransposeNode(matrixB, name, _permutationLastTwoDims(len(matrixB.shape)))
501-
graph.nodes.append(matrixBTransposeNode)
502434
# pwWeight, shape [N x 1 x 1 x K]
503435
matrixBExpandDimsNode, pwWeight = _appendExpandDims(matrixB, name, axis = (1, 2))
504436
graph.nodes.append(matrixBExpandDimsNode)

Deeploy/DeeployTypes.py

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -398,17 +398,6 @@ def sizeInBytes(self) -> int:
398398
"""
399399
return (math.prod(self.shape) * (self._type.referencedType.typeWidth)) // 8
400400

401-
def sizeInBytes(self) -> int:
402-
"""Returns the size of this VariableBuffer in bytes
403-
404-
Returns
405-
-------
406-
int
407-
Size of this VariableBuffer in bytes
408-
409-
"""
410-
return (math.prod(self.shape) * (self._type.referencedType.typeWidth)) // 8
411-
412401

413402
class TransientBuffer(VariableBuffer):
414403
"""Class to represent memory space required by kernels that is not covered by input and output tensors, e.g. im2col buffers in convolutions
@@ -444,9 +433,6 @@ def __repr__(self) -> str:
444433
def sizeInBytes(self) -> int:
445434
return int(self.size)
446435

447-
def sizeInBytes(self) -> int:
448-
return int(self.size)
449-
450436

451437
class ConstantBuffer(VariableBuffer):
452438
"""Class to represent compile-time constant tensors (weights, biases, other parameters) within Deeploy.
@@ -890,7 +876,7 @@ def is_buffer(self, value: Any) -> bool:
890876
obj = self.lookup(value)
891877
return isinstance(obj, VariableBuffer)
892878

893-
def hoistTransientBuffer(self, name: str, size: Union[int, str]) -> str:
879+
def hoistTransientBuffer(self, name: str, size: int) -> str:
894880
"""Registers a new TransientBuffer in the local context
895881
896882
Parameters

Deeploy/MemoryLevelExtension/NetworkDeployers/MemoryLevelDeployer.py

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -128,18 +128,6 @@ def getTargetMemoryLevelMapping(self) -> TargetMemoryLevelMapping:
128128
f"Platform should be a MemoryPlatform or MemoryPlatformWrapper! Got {type(self.Platform).__name__}"
129129
return TargetMemoryLevelMapping(self.graph, self.Platform, self.ctxt)
130130

131-
def _parseNode(self, node: ONNXLayer, ctxt: NetworkContext,
132-
default_channels_first: bool) -> Tuple[NetworkContext, bool]:
133-
134-
newCtxt, parsePass = super()._parseNode(node, ctxt, default_channels_first)
135-
136-
if not parsePass:
137-
return ctxt, False
138-
139-
newCtxt, self.graph = self.memoryLevelAnnotationOptimizer.optimize(newCtxt, self.graph)
140-
141-
return newCtxt, parsePass
142-
143131
def bind(self):
144132
log.info("- Perform Memory Level Annotation")
145133
# LMACAN: Annotate before bind because during binding (specifically alignToContext) templates
@@ -150,7 +138,6 @@ def bind(self):
150138
if not ret:
151139
return False
152140

153-
log.info("- Perform Memory Level Annotation")
154141
# SCHEREMO: There might be hoisting; reassign memoryLevel preferences
155142
self.ctxt, self.graph = self.memoryLevelAnnotationOptimizer.optimize(self.ctxt, self.graph)
156143

@@ -195,7 +182,6 @@ def bind(self):
195182
if not ret:
196183
return False
197184

198-
log.info("- Perform Memory Level Annotation")
199185
# SCHEREMO: There might be hoisting; reassign memoryLevel preferences
200186
self.ctxt, self.graph = self.memoryLevelAnnotationOptimizer.optimize(self.ctxt, self.graph)
201187

@@ -231,7 +217,6 @@ def bind(self):
231217
if not ret:
232218
return False
233219

234-
log.info("- Perform Memory Level Annotation")
235220
# SCHEREMO: There might be hoisting; reassign memoryLevel preferences
236221
self.ctxt, self.graph = self.memoryLevelAnnotationOptimizer.optimize(self.ctxt, self.graph)
237222

Deeploy/Targets/Generic/Parsers.py

Lines changed: 4 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1068,36 +1068,10 @@ def parseNodeCtxt(self,
10681068
node: gs.Node,
10691069
channels_first: bool = True) -> Tuple[NetworkContext, bool]:
10701070

1071-
# Define names of node inputs and outputs, according to the ONNX standard
1072-
inputs = ['data_in', 'shape']
1073-
outputs = ['data_out']
1074-
1075-
# Map inputs and outputs to their corresponding names in the operator representation
1076-
for idx, inputNode in enumerate(node.inputs):
1077-
self.operatorRepresentation[inputs[idx]] = ctxt.lookup(inputNode.name).name
1078-
for idx, outputNode in enumerate(node.outputs):
1079-
self.operatorRepresentation[outputs[idx]] = ctxt.lookup(outputNode.name).name
1080-
1081-
# Update alias_of parameter for the output node
1082-
output_node = ctxt.lookup(node.outputs[outputs.index("data_out")].name)
1083-
input_node = ctxt.lookup(node.inputs[inputs.index("data_in")].name)
1084-
1085-
# Prepare new aliases
1086-
new_output_node_aliases = input_node.get_aliases_of()
1087-
new_output_node_aliases.append(input_node.name)
1088-
1089-
# Add new aliases to output node
1090-
output_node.add_aliases(aliases_to_add = new_output_node_aliases)
1091-
1092-
# Add output node as alias to its aliases (alias relationship is symmetric)
1093-
for alias in output_node.get_aliases_of():
1094-
alias_node = ctxt.lookup(alias)
1095-
alias_node.add_aliases(aliases_to_add = [
1096-
output_node.name,
1097-
])
1098-
1099-
# Compute data size
1100-
self.operatorRepresentation['size'] = np.prod(ctxt.lookup(node.inputs[0].name).shape)
1071+
for tensor, symName in zip(node.inputs, ['data_in', 'shape']):
1072+
self.operatorRepresentation[symName] = ctxt.lookup(tensor.name).name
1073+
for tensor, symName in zip(node.outputs, ['data_out']):
1074+
self.operatorRepresentation[symName] = ctxt.lookup(tensor.name).name
11011075

11021076
return ctxt, True
11031077

0 commit comments

Comments
 (0)