Skip to content
Merged
Show file tree
Hide file tree
Changes from 78 commits
Commits
Show all changes
84 commits
Select commit Hold shift + click to select a range
27dc08c
Aggiunti file per Norm e Transpose. Aggiunti debug per Pad layers
Aldrago98 Jun 6, 2025
9220d4a
Floating point support for 1D convolution padding
Aldrago98 Jun 10, 2025
8543928
MaxPool1D and Convolution1D floating point support
Aldrago98 Jun 11, 2025
edf7b51
Fix computeshape in ConvTranspose
Aldrago98 Jun 13, 2025
a1de3a6
Add base class for ConvTranspose
Aldrago98 Jun 13, 2025
85e1f7c
Added Conv1D parameters nd implemented the C function
Aldrago98 Jun 17, 2025
ef72f61
Modify index in Batch C function
Aldrago98 Jun 18, 2025
d6bbc8f
Adjust pointer increments
Aldrago98 Jun 18, 2025
28cb26b
debug tensors
Aldrago98 Jun 18, 2025
0e8e901
Fixed Conv parameters
Aldrago98 Jun 19, 2025
9995c34
Code cleaned
Aldrago98 Jun 24, 2025
def9e3c
modified CI file
Aldrago98 Jun 25, 2025
c87e7f6
Fixed conv template and format
Aldrago98 Jun 25, 2025
d290086
Fixed ConvTemplate and DeeployTypes
Aldrago98 Jun 25, 2025
c37cd73
Fixed Conv1D parameters
Aldrago98 Jun 26, 2025
b5f192c
Fixed submodules inclusion
Aldrago98 Jun 27, 2025
6103bcf
Fixed Include in DeeployBasicMath
Aldrago98 Jun 27, 2025
6f76e15
Changed comment
Aldrago98 Jun 27, 2025
c002e61
Update changelog
Aldrago98 Jun 27, 2025
e6e826d
Update CI file
Aldrago98 Jun 27, 2025
eb471cf
Add template in binding.py and format
Aldrago98 Jun 27, 2025
06ac67e
Removed Italian comments and update changelog.md
Aldrago98 Jul 1, 2025
50a6a42
comment format
Aldrago98 Jul 1, 2025
24d26ac
Suggested changes for PR
Aldrago98 Jul 3, 2025
9d9dabf
Aggiunti file per Norm e Transpose. Aggiunti debug per Pad layers
Aldrago98 Jun 6, 2025
8d11c56
Floating point support for 1D convolution padding
Aldrago98 Jun 10, 2025
243cdac
MaxPool1D and Convolution1D floating point support
Aldrago98 Jun 11, 2025
3c83739
Fix computeshape in ConvTranspose
Aldrago98 Jun 13, 2025
0fe881e
Add base class for ConvTranspose
Aldrago98 Jun 13, 2025
b2be48b
Added Conv1D parameters nd implemented the C function
Aldrago98 Jun 17, 2025
2f6526c
Modify index in Batch C function
Aldrago98 Jun 18, 2025
cb65611
Adjust pointer increments
Aldrago98 Jun 18, 2025
45ebf16
debug tensors
Aldrago98 Jun 18, 2025
25345d9
Fixed Conv parameters
Aldrago98 Jun 19, 2025
614b297
Code cleaned
Aldrago98 Jun 24, 2025
262e977
modified CI file
Aldrago98 Jun 25, 2025
fc8f3d4
Fixed conv template and format
Aldrago98 Jun 25, 2025
59ad506
Fixed ConvTemplate and DeeployTypes
Aldrago98 Jun 25, 2025
d706543
Fixed Conv1D parameters
Aldrago98 Jun 26, 2025
c5aafd3
Fixed submodules inclusion
Aldrago98 Jun 27, 2025
30b972b
Fixed Include in DeeployBasicMath
Aldrago98 Jun 27, 2025
87f14e6
Changed comment
Aldrago98 Jun 27, 2025
2b8913b
Update changelog
Aldrago98 Jun 27, 2025
78df5df
Removed Italian comments and update changelog.md
Aldrago98 Jul 1, 2025
79c7049
comment format
Aldrago98 Jul 1, 2025
85b18b2
Format Bindings and TypeCheckers
Aldrago98 Jul 3, 2025
5463ff7
Prepare v0.2.0 Release (#102)
Xeratec Jul 8, 2025
6a0d40d
Aggiunti file per Norm e Transpose. Aggiunti debug per Pad layers
Aldrago98 Jun 6, 2025
4bcf8f7
MaxPool1D and Convolution1D floating point support
Aldrago98 Jun 11, 2025
ca13c16
Code cleaned
Aldrago98 Jun 24, 2025
da73f15
modified CI file
Aldrago98 Jun 25, 2025
3c3347d
Fixed conv template and format
Aldrago98 Jun 25, 2025
3992962
Aggiunti file per Norm e Transpose. Aggiunti debug per Pad layers
Aldrago98 Jun 6, 2025
c83db1c
Floating point support for 1D convolution padding
Aldrago98 Jun 10, 2025
ffe6cd8
MaxPool1D and Convolution1D floating point support
Aldrago98 Jun 11, 2025
7f5ef39
Fix computeshape in ConvTranspose
Aldrago98 Jun 13, 2025
2cb59c5
Added Conv1D parameters nd implemented the C function
Aldrago98 Jun 17, 2025
1f2f47d
Modify index in Batch C function
Aldrago98 Jun 18, 2025
b65b64e
Adjust pointer increments
Aldrago98 Jun 18, 2025
28c44d9
Code cleaned
Aldrago98 Jun 24, 2025
5997669
modified CI file
Aldrago98 Jun 25, 2025
5be590c
Fixed conv template and format
Aldrago98 Jun 25, 2025
1786812
Fixed ConvTemplate and DeeployTypes
Aldrago98 Jun 25, 2025
398cd34
Fixed Conv1D parameters
Aldrago98 Jun 26, 2025
c673d84
Fixed submodules inclusion
Aldrago98 Jun 27, 2025
3f2ea92
Added imports and formatting
Aldrago98 Jul 15, 2025
0747770
Update DeeployBasicMath.h
Aldrago98 Jul 23, 2025
d21c6f9
Remove math.h from Deeploybasicmath.h
Aldrago98 Jul 23, 2025
ad25164
Format Deeploybasicmath.h
Aldrago98 Jul 24, 2025
08ad563
Put changes on top in changelog.md
Aldrago98 Jul 24, 2025
d1d5539
Remove unecessary code and format
Aldrago98 Jul 31, 2025
17c1fdf
Update launch.json
Aldrago98 Aug 5, 2025
b0151f1
Update DeeployTypes.py
Aldrago98 Aug 5, 2025
4a451c8
Ident DeeployTypes.py
Aldrago98 Aug 5, 2025
bdf4d36
Identation and format of DeeployTypes
Aldrago98 Aug 5, 2025
0234fb3
Blank lines in convTemplate,passes and Cmake
Aldrago98 Aug 5, 2025
a4af405
json blank
Aldrago98 Aug 5, 2025
61396c6
Format fixes
diaconuccalin Oct 17, 2025
55cb3bf
Quickfix
diaconuccalin Oct 17, 2025
0e1927d
Added copyright info in the files where it was missing
diaconuccalin Oct 17, 2025
3ab0478
Post-merge duplicate removals
diaconuccalin Oct 21, 2025
c0753ea
Removed unnecessary new ConvTranspose type checker. Added the generic…
diaconuccalin Oct 21, 2025
2709d18
Implement remaining changes
Xeratec Oct 24, 2025
8fc7cc6
Reduce code duplication
Xeratec Oct 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions .vscode/c_cpp_properties.json
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
{
"configurations": [
{
"name": "cMake",
"configurationProvider": "ms-vscode.cmake-tools",
"compileCommands": [
"${workspaceFolder}/DeeployTest/TEST_RECENT/build/compile_commands.json"
]
}
],
"version": 4
"configurations": [
{
"name": "cMake",
"configurationProvider": "ms-vscode.cmake-tools",
"compileCommands": [
"${workspaceFolder}/DeeployTest/TEST_RECENT/build/compile_commands.json"
]
}
],
"version": 4
}
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ This file contains the changelog for the Deeploy project. The changelog is divid
## Unreleased (Planned Release Target: v0.2.1)

### List of Pull Requests
- Support for 1D Autoencoder [#98](https://github.com/pulp-platform/Deeploy/pull/98)
- Refactor Logging for Improved Debugging [#115](https://github.com/pulp-platform/Deeploy/pull/115)
- Add reuse-tool as an SPDX license header linter [#113](https://github.com/pulp-platform/Deeploy/pull/113)
- Bug fixes, API Cleanup and Reduce Compiler Warning on PULP [#112](https://github.com/pulp-platform/Deeploy/pull/112)
Expand Down Expand Up @@ -158,6 +159,13 @@ This release containing major architectural changes, new platform support, enhan


### Added
- BatchNorm kernel
- ConvTranspose kernel
- MaxPool1D kernel
- Template for 1D Convolution
- Support for float32 data type in the previous kernels
- Float binding for Pad1D kernel
- Test for Autoencoder1D in the CI pipeline
- ChimeraDeployer, currently mainly a placeholder
- Allocate templates for Chimera
- ChimeraPlatform, using appropriate allocation templates and using the generic Parser + Binding for the Add node
Expand Down Expand Up @@ -291,6 +299,8 @@ This release containing major architectural changes, new platform support, enhan
- `dev-requirements.txt` tracking the dependencies of the build system, linting, documentation, and QOL.

### Changed
- FloatConvTemplate file
- Platform.py file
- Bump the CMake version to 3.24 as required for the chimera-sdk
- Bump GVSoC's version and add chimera simulation target
- Rename the generic source util to utils to avoid name collision with chimera-sdk
Expand Down
15 changes: 10 additions & 5 deletions Deeploy/CommonExtensions/TypeCheckers/SignPropTypeChecker.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,15 @@ def typeInferOutput(self, ctxt: NetworkContext, node: gs.Node,
for obj, nLevel, sign in zip(outputs, nLevels, signedness):
obj.nLevels = nLevel
obj._signed = sign

if issubclass(obj._type.referencedType, IntegerImmediate) and not obj._type.fitsNumLevels(nLevel):
log.warning(
f"{obj.name} has {nLevel} levels, but {obj._type.referencedType.typeName} only supports {obj._type.referencedType.nLevels} levels."
)
else:
if issubclass(obj._type.referencedType, IntegerImmediate) and not obj._type.fitsNumLevels(nLevel):
log.warning(
f"{obj.name} has {nLevel} levels, but {obj._type.referencedType.typeName} only supports {obj._type.referencedType.nLevels} levels."
)

if issubclass(obj._type.referencedType, IntegerImmediate) and not obj._type.fitsNumLevels(nLevel):
log.warning(
f"{obj.name} has {nLevel} levels, but {obj._type.referencedType.typeName} only supports {obj._type.referencedType.nLevels} levels."
)

return ctxt
1 change: 0 additions & 1 deletion Deeploy/DeeployTypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1643,7 +1643,6 @@ def typeCheck(self, ctxt: NetworkContext, node: gs.Node,
matches the node

"""

newCtxt, ret = self.typeChecker.typeCheck(ctxt.copy(), node, operatorRepresentation)
if ret:
log.debug(f" {SUCCESS_MARK} Type check passed for {self}")
Expand Down
73 changes: 58 additions & 15 deletions Deeploy/Targets/Generic/Bindings.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,20 @@
int8_t, int32_t, uint8_t
from Deeploy.DeeployTypes import CodeTransformation, NodeBinding
from Deeploy.FutureExtension.CodeTransformationPasses.FutureCodeTransformation import FutureGeneration
from Deeploy.Targets.Generic.Templates import AddTemplate, ConcatTemplate, ConvTemplate, DebugPrintTemplate, \
DequantTemplate, DummyTemplate, DWConvTemplate, FloatAddTemplate, FloatConvTemplate, FloatDivTemplate, \
FloatDWConvTemplate, FloatGELUTemplate, FloatGemmTemplate, FloatLayernormTemplate, FloatMatMulTemplate, \
FloatMaxPoolTemplate, FloatMulTemplate, FloatPadTemplate, FloatReduceMeanTemplate, FloatReluTemplate, \
FloatSoftmaxTemplate, GatherTemplate, GemmTemplate, IntegerDivTemplate, ITAMaxTemplate, ITAPartialMaxTemplate, \
MatMulTemplate, MaxPoolTemplate, MulTemplate, PadTemplate, QuantTemplate, ReduceMeanTemplate, ReduceSumTemplate, \
RequantShiftTemplate, ReshapeTemplate, RQIntegerDivTemplate, RQSiGELUTemplate, SliceTemplate, TransposeTemplate, \
iGELUTemplate, iLayernormTemplate, iRMSNormTemplate, iSoftmaxTemplate
from Deeploy.Targets.Generic.TypeCheckers import AddChecker, ConcatChecker, ConvChecker, DebugPrintChecker, \
DequantChecker, DivChecker, DummyChecker, GatherChecker, GELUChecker, GEMMChecker, LayerNormChecker, \
MatMulChecker, MaxPoolChecker, MulChecker, PadChecker, QuantChecker, ReduceMeanChecker, ReduceSumChecker, \
ReluChecker, RequantShiftChecker, ReshapeChecker, RQIntegerDivChecker, SliceChecker, SoftmaxChecker, \
TransposeChecker
from Deeploy.Targets.Generic.Templates import AddTemplate, BatchNormalizationTemplate, ConcatTemplate, ConvTemplate, \
ConvTransposeTemplate, DebugPrintTemplate, DequantTemplate, DummyTemplate, DWConvTemplate, FloatAddTemplate, \
FloatConvTemplate, FloatDivTemplate, FloatDWConvTemplate, FloatGELUTemplate, FloatGemmTemplate, \
FloatLayernormTemplate, FloatMatMulTemplate, FloatMaxPoolTemplate, FloatMulTemplate, FloatPadTemplate, \
FloatReduceMeanTemplate, FloatReluTemplate, FloatSoftmaxTemplate, GatherTemplate, GemmTemplate, \
IntegerDivTemplate, ITAMaxTemplate, ITAPartialMaxTemplate, MatMulTemplate, MaxPoolTemplate, MulTemplate, \
PadTemplate, QuantTemplate, ReduceMeanTemplate, ReduceSumTemplate, RequantShiftTemplate, ReshapeTemplate, \
RQIntegerDivTemplate, RQSiGELUTemplate, SliceTemplate, TransposeTemplate, iGELUTemplate, iLayernormTemplate, \
iRMSNormTemplate, iSoftmaxTemplate
from Deeploy.Targets.Generic.TypeCheckers import AddChecker, BatchNormChecker, ConcatChecker, ConvChecker, \
ConvTransposeChecker, DebugPrintChecker, DequantChecker, DivChecker, DummyChecker, GatherChecker, GELUChecker, \
GEMMChecker, LayerNormChecker, MatMulChecker, MaxPoolChecker, MulChecker, PadChecker, QuantChecker, \
ReduceMeanChecker, ReduceSumChecker, ReluChecker, RequantShiftChecker, ReshapeChecker, RQIntegerDivChecker, \
SliceChecker, SoftmaxChecker, TransposeChecker

BasicTransformer = CodeTransformation([ArgumentStructGeneration(), MemoryManagementGeneration(), FutureGeneration()])

Expand Down Expand Up @@ -53,8 +54,14 @@
FloatAddTemplate.referenceTemplate, BasicTransformer)
]

BasicConv1DBinding = NodeBinding(ConvChecker([PointerClass(int8_t), PointerClass(int8_t)], [PointerClass(int32_t)]),
ConvTemplate.reference1DTemplate, BasicTransformer)
BasicConv1DBindings = [
NodeBinding(ConvChecker(
[PointerClass(type), PointerClass(type), PointerClass(type)], [PointerClass(type)]),
FloatConvTemplate.reference1DTemplate, BasicTransformer) for type in FloatDataTypes
] + [
NodeBinding(ConvChecker([PointerClass(int8_t), PointerClass(int8_t)], [PointerClass(int32_t)]),
ConvTemplate.reference1DTemplate, BasicTransformer)
]

BasicDWConv1DBinding = NodeBinding(ConvChecker([PointerClass(int8_t), PointerClass(int8_t)], [PointerClass(int32_t)]),
DWConvTemplate.reference1DTemplate, BasicTransformer)
Expand Down Expand Up @@ -147,6 +154,11 @@
FloatMatMulTemplate.referenceTemplate, BasicTransformer)
]

BasicMaxPool1DBindings = [
NodeBinding(MaxPoolChecker([PointerClass(type)], [PointerClass(type)]), FloatMaxPoolTemplate.reference1DTemplate,
BasicTransformer) for type in FloatDataTypes
]

BasicMaxPool2DBindings = [
NodeBinding(MaxPoolChecker([PointerClass(int8_t)], [PointerClass(int8_t)]), MaxPoolTemplate.referenceTemplate,
BasicTransformer)
Expand All @@ -167,7 +179,11 @@
BasicPad1DBindings = [
NodeBinding(PadChecker([PointerClass(type)], [PointerClass(type)]), PadTemplate.reference1DTemplate,
BasicTransformer) for type in SignedIntegerDataTypes
] + [
NodeBinding(PadChecker([PointerClass(type)], [PointerClass(type)]), FloatPadTemplate.reference1DTemplate,
BasicTransformer) for type in FloatDataTypes
]

BasicPad2DBindings = [
NodeBinding(PadChecker([PointerClass(type)], [PointerClass(type)]), PadTemplate.reference2DTemplate,
BasicTransformer) for type in SignedIntegerDataTypes
Expand Down Expand Up @@ -266,3 +282,30 @@
NodeBinding(DequantChecker([PointerClass(int32_t)], [PointerClass(float32_t)]), DequantTemplate.referenceTemplate,
BasicTransformer),
]

BasicBatchNormBindings = [
NodeBinding(
BatchNormChecker(
[PointerClass(type),
PointerClass(type),
PointerClass(type),
PointerClass(type),
PointerClass(type)], [PointerClass(type)]), BatchNormalizationTemplate.referenceTemplate, BasicTransformer)
for type in FloatDataTypes
]

BasicConvTransposeBindings = [
NodeBinding(
ConvTransposeChecker(
[PointerClass(type), PointerClass(type), PointerClass(type)], # input, weight, bias
[PointerClass(type)]),
ConvTransposeTemplate.referenceTemplate,
BasicTransformer) for type in FloatDataTypes
] + [
NodeBinding(
ConvTransposeChecker(
[PointerClass(type), PointerClass(type)], # input, weight
[PointerClass(type)]),
ConvTransposeTemplate.referenceTemplate,
BasicTransformer) for type in FloatDataTypes
]
64 changes: 64 additions & 0 deletions Deeploy/Targets/Generic/Layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -618,3 +618,67 @@ class DequantLayer(ONNXLayer):

def __init__(self, maps: List[NodeMapper]):
super().__init__(maps)


class BatchNormalizationLayer(ONNXLayer):

def __init__(self, maps: List[NodeMapper]):
super().__init__(maps)

def computeOps(self):
# 5 operations per element: sub, mul, add, sqrt, div
B = self.mapper.parser.operatorRepresentation['batch_size']
C = self.mapper.parser.operatorRepresentation['channel_size']
W = self.mapper.parser.operatorRepresentation['window_size']
return B * C * W * 5


class ConvTransposeLayer(ONNXLayer):

def __init__(self, maps: List[NodeMapper]):
super().__init__(maps)

def computeShapes(self, inputShapes: Shape, outputShapes: Shape, operatorRepresentation,
channels_first) -> Tuple[Shape, Shape]:
"""
Infers output shapes for ConvTranspose using only static info.
- inputShapes[0]: input tensor shape (e.g., [N, C_in, W] for 1D, [N, C_in, H, W] for 2D)
- inputShapes[1]: weight tensor shape (e.g., [C_in, C_out // group, kW] for 1D)
- outputShapes[0]: output tensor shape (to be updated)
"""
newInputShapes = list(inputShapes)
newOutputShapes = list(outputShapes)
group = operatorRepresentation.get('group', 1)
weight_shape = inputShapes[1]

if newOutputShapes and len(newOutputShapes[0]) >= 2:
if channels_first:
# For 1D: weight_shape = [C_in, C_out // group, kW]
# For 2D: weight_shape = [C_in, C_out // group, kH, kW]
ch_out = weight_shape[1] * group
newOutputShapes[0][1] = ch_out
else:
# For 1D: weight_shape = [C_in, C_out // group, kW]
# For 2D: weight_shape = [C_in, C_out // group, kH, kW]
ch_out = weight_shape[-2] * group
newOutputShapes[0][-1] = ch_out

return newInputShapes, newOutputShapes

def computeOps(self):
opRep = self.mapper.parser.operatorRepresentation

groups = opRep.get('group', 1)
kernel_shape = np.prod(opRep['kernel_shape']) # es. [3, 3] -> 9
ch_in = opRep['ch_im_in']
ch_out = opRep['ch_im_out']

opsPerPx = int(kernel_shape * ch_in * ch_out / groups) * 2

# ConvTranspose upscales spatial dims, quindi num pixel viene da output
if 'dim_im_out_y' in opRep:
numPx = opRep['dim_im_out_x'] * opRep['dim_im_out_y']
else:
numPx = opRep['dim_im_out_x']

return numPx * opsPerPx
Loading
Loading