Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
93 commits
Select commit Hold shift + click to select a range
9ec13f9
Add classifier training support
runwangdl Mar 17, 2025
f1a0491
Fix L3 DMA and Maxpool Bugs
runwangdl Mar 3, 2025
29baf2c
WIP Static Memory Allocation of IOs
Victor-Jung Mar 3, 2025
25be229
Temporary fix broken float softmax
Victor-Jung Mar 4, 2025
da56cbe
Fix lifetime of aliased input buffers
Victor-Jung Mar 4, 2025
721f747
Fix output buffer lifetime
Victor-Jung Mar 4, 2025
78685e5
Linting
Victor-Jung Mar 4, 2025
02b5435
WIP fix output buffer lifetime
Victor-Jung Mar 4, 2025
a2d67a0
Change RQHardswish dim due to compiler bug
Victor-Jung Mar 4, 2025
bdd92de
Fix typo
Victor-Jung Mar 4, 2025
20b1f8b
Fix duplicated IO in memory allocation visualization
Victor-Jung Mar 4, 2025
c708069
Fix the Constant Tensor offset to not take into account IO since they…
Victor-Jung Mar 4, 2025
b6e2448
Add new attribute to Variable and Transient buffer to annotate if the…
Victor-Jung Mar 7, 2025
7e96f18
Adapt calculateLifetime to use buffer I/O annotation
Victor-Jung Mar 7, 2025
b923520
Fix typo
Victor-Jung Mar 7, 2025
f4cb9e0
Remove IO buffer name and refactor var name
Victor-Jung Mar 13, 2025
435cc9d
Linting
Victor-Jung Mar 13, 2025
731f39f
Test the correctness of the memory map after memory allocation
Victor-Jung Mar 17, 2025
dd1370c
Allocate memory arena first
Victor-Jung Mar 17, 2025
8bfdb13
correct DMA lengh of copy assertion
runwangdl Mar 18, 2025
f01eb7f
Align memory allocation test
Victor-Jung Mar 18, 2025
031dc79
delete redundant shell scripts
runwangdl Mar 19, 2025
58e18da
Merge branch 'devel' into PULPCCTL3_16_16_64
runwangdl Mar 19, 2025
ac2d879
Update node with multioutput to single output
runwangdl Mar 19, 2025
6a7198b
add softmaxcrossentropygrad tiling
runwangdl Mar 19, 2025
360aef7
Add softmaxcrossentropylossgrad tiling
runwangdl Mar 20, 2025
bc48582
Merge branch 'PULPCCTL3_16_16_64' into GEMM_training_tiled
runwangdl Mar 20, 2025
b6542ba
Fix CI issue
runwangdl Mar 20, 2025
fe208d0
Fix CI bugs
runwangdl Mar 20, 2025
4a21359
update CI
runwangdl Mar 20, 2025
a0dcb6d
Improve memory alloc visualization
Victor-Jung Mar 20, 2025
91f12f0
Add and pass test for CCT gemmtraining 1_16_16_8 to 128
runwangdl Mar 20, 2025
d1e1ebf
update CI with 8-128 dim CCT last gemm training test
runwangdl Mar 20, 2025
86a2e99
Add SGD support for PULP Open
runwangdl Mar 20, 2025
bdacd2f
Update CCT training test with sgd
runwangdl Mar 20, 2025
b5421cc
Multi-level profiling + Linting
Victor-Jung Mar 21, 2025
99035f0
Update Changelog
runwangdl Mar 23, 2025
62e87d3
Merge branch 'devel' into GEMM_training_tiled
runwangdl Mar 23, 2025
15ea3ec
Solved issues caused by merging conflicts
runwangdl Mar 23, 2025
a644fdf
Solved Review Comments
runwangdl Mar 28, 2025
643e160
Resolving conflicts
runwangdl Mar 28, 2025
80a9518
Reresolve the conflict
runwangdl Mar 28, 2025
501775d
Solving CI issues
runwangdl Mar 28, 2025
65a56b7
fix linting errors
runwangdl Mar 28, 2025
03c3f4a
gelu sigmoid approximation
runwangdl Mar 24, 2025
7e141fd
gelu parallel + unroll
runwangdl Mar 24, 2025
c3ee783
Float Matmul Parallel on M
runwangdl Mar 24, 2025
47d8c19
Softmax Parallel and Softmax Op Support
runwangdl Mar 24, 2025
ccba380
conv parallel without im2col
runwangdl Mar 25, 2025
fafcedf
PULP Layernorm Parallel
runwangdl Mar 25, 2025
147e68f
Fixed CI issues
runwangdl Mar 28, 2025
6e07dc9
fixing linting
runwangdl Mar 28, 2025
8b2f685
Merge branch 'devel' into devel_CCT_Optim
runwangdl Apr 8, 2025
9c0b8f6
Enlarge CI floatconv tiling L1 size for 8 core and delete CCT 128 tes…
runwangdl Apr 8, 2025
4c36de2
matmul 1*4 unrolling
runwangdl Apr 24, 2025
28ec2ca
Add computeOp support for CCT necessary kernels
runwangdl Apr 24, 2025
bf1f8ae
Add openlibm expf
runwangdl Apr 13, 2025
deac9ce
add relu, mul, maxpool ops num
runwangdl May 4, 2025
3b12187
Optimize parallel for multiple kernels
runwangdl May 4, 2025
49da947
Merge branch 'devel' into devel_CCT_Optim
runwangdl May 4, 2025
47961b9
Merge branch 'devel' into devel_CCT_Optim
runwangdl May 6, 2025
8907532
Change ConvTileConstraint to only tile on outchannel
runwangdl May 6, 2025
133f9ae
Fix error in gelu
runwangdl May 6, 2025
f25127d
Fix Linting Issues
runwangdl May 6, 2025
6f3f585
Merge branch 'devel' into devel_CCT_Optim
runwangdl May 8, 2025
4ffea9b
Change CI tests
runwangdl May 8, 2025
81c3460
profilling string change to const static
runwangdl May 8, 2025
4af69de
Fix profiling dual loop issue
runwangdl May 8, 2025
e819626
Add RV32IMF Picolibc support for Siracusa platform
runwangdl May 8, 2025
fa0cc37
Build Docker for new gvsoc for testing
runwangdl May 8, 2025
ac56ca2
Gvsoc Small test
runwangdl May 8, 2025
fd6c99d
Add Redmule Platform, Engline, Tiler, and Deployer
runwangdl May 8, 2025
2862f29
Add rv32imf.txt to build docker
runwangdl May 8, 2025
9ef9cc2
Update GVSOC hash
runwangdl May 9, 2025
10de9f6
matmul delicate constraints for Redmule
runwangdl May 9, 2025
efab54c
Merge branch 'devel_CCT_Optim' into redmule_platform
runwangdl May 9, 2025
37670e6
conv with redmule
runwangdl May 9, 2025
08b7e23
Add CCT 32 test
runwangdl May 9, 2025
e42b3d6
xtensor gvsoc docker build
runwangdl May 9, 2025
823d847
add softmaxgrad tileconstraint
runwangdl May 10, 2025
212ff3c
LayernormGrad and CCT MLP Training Graph
runwangdl May 11, 2025
d7346a5
Merge branch 'devel' into exp/heterogeneous-memory-placement
runwangdl May 12, 2025
c51694b
Fix Layernormgrad
runwangdl May 12, 2025
3efa661
Add Gelugrad
runwangdl May 16, 2025
aee7651
Merge branch 'exp/heterogeneous-memory-placement' into AttentionTraining
runwangdl May 16, 2025
b40cbd7
GEMM with Redmule
runwangdl May 18, 2025
203f095
Efficient GEMM
runwangdl May 18, 2025
7835c5a
reducesum tileconstraint
runwangdl Jun 9, 2025
21294bb
temporary deactiate transposesplit otherwise kq training failed
runwangdl Jun 9, 2025
90689e2
merge devel
runwangdl Jun 13, 2025
5c3f287
gemm no bias + input in name issue for codegenerate
runwangdl Jun 19, 2025
3271c3a
Parallelization and Optimization of CCT Inference and Training Kernel…
runwangdl Jun 12, 2025
7f99f2c
Adapation for Merging Devel
runwangdl Jun 20, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion .github/workflows/BuildDocker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ jobs:
file: Container/Dockerfile
push: true
# JUNGVI: If you operate from a fork and want to build a new docker make sure to replace 'pulp-platform' by your uname.
tags: ghcr.io/pulp-platform/deeploy:main
tags: ghcr.io/runwangdl/deeploy:redmule
74 changes: 49 additions & 25 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ on:
- cron: "0 1 */6 * *"

env:
DOCKER_IMAGE: ghcr.io/pulp-platform/deeploy:main
DOCKER_IMAGE: ghcr.io/runwangdl/deeploy:redmule

jobs:

Expand Down Expand Up @@ -290,7 +290,7 @@ jobs:
MLPerf/ImageClassification
MLPerf/AnomalyDetection
CCT/CCT_1_16_16_8
testTrainCCT/CCT_Classifier_Training/CCT_1_16_16_8
testTrainCCT/CCT1_Classifier_Training/CCT_1_16_16_8
num-cores: 8

siracusa-kernels-tiled-singlebuffer-L2:
Expand Down Expand Up @@ -338,7 +338,7 @@ jobs:
},
{
"name": "testFloat2DConvolution",
"L1": [2000]
"L1": [8000]
},
{
"name": "testFloatLayerNorm",
Expand Down Expand Up @@ -412,15 +412,15 @@ jobs:
},
{
"name": "RQHardswish",
"L1": [750]
"L1": [800]
},
{
"name": "testFloatGEMM",
"L1": [8000]
},
{
"name": "testFloat2DConvolution",
"L1": [4000]
"L1": [15000]
},
{
"name": "testFloatLayerNorm",
Expand Down Expand Up @@ -481,7 +481,7 @@ jobs:
L1: [64000]
- name: "CCT/CCT_1_16_16_8"
L1: [64000]
- name: "testTrainCCT/CCT_Classifier_Training/CCT_1_16_16_8"
- name: "testTrainCCT/CCT1_Classifier_Training/CCT_1_16_16_8"
L1: [64000]
num-cores:
- 8
Expand Down Expand Up @@ -510,15 +510,9 @@ jobs:
L1: [60000, 30000, 15000]
- name: "microLlama/microLlama1"
L1: [60000, 10000, 5000]
- name: "CCT/CCT_1_32_32_8"
L1: [64000]
- name: "CCT/CCT_1_16_16_64"
L1: [64000]
- name: "CCT/CCT_1_16_16_128"
L1: [64000]
- name: "testTrainCCT/CCT_Classifier_Training/CCT_1_16_16_64"
L1: [64000]
- name: "testTrainCCT/CCT_Classifier_Training/CCT_1_16_16_128"
- name: "CCT/CCT_2_32_32_128"
L1: [128000]
- name: "testTrainCCT/CCT1_Classifier_Training/CCT_1_16_16_128"
L1: [64000]
num-cores:
- 8
Expand Down Expand Up @@ -555,15 +549,9 @@ jobs:
L1: [60000, 20000, 10000]
- name: "microLlama/microLlama8_parallel"
L1: [60000, 20000, 10000]
- name: "CCT/CCT_1_32_32_8"
L1: [64000]
- name: "CCT/CCT_1_16_16_64"
L1: [64000]
- name: "CCT/CCT_1_16_16_128"
L1: [64000]
- name: "testTrainCCT/CCT_Classifier_Training/CCT_1_16_16_64"
L1: [64000]
- name: "testTrainCCT/CCT_Classifier_Training/CCT_1_16_16_128"
- name: "CCT/CCT_2_32_32_128"
L1: [128000]
- name: "testTrainCCT/CCT1_Classifier_Training/CCT_1_16_16_128"
L1: [64000]
num-cores:
- 8
Expand Down Expand Up @@ -748,6 +736,42 @@ jobs:
default-memory-level: ${{ matrix.default-memory-level }}
neureka-wmem: ${{ matrix.neureka-wmem }}

siracusa-redmule-kernels-tiled-singlebuffer-L2:
strategy:
fail-fast: false
matrix:
test-data:
- name: "testFloatMatmul"
L1: [8000]
num-cores:
- 8
uses: ./.github/workflows/TestRunnerTiledSiracusaWithRedmule.yml
needs: select-docker-image
with:
docker-image: ${{ needs.select-docker-image.outputs.image }}
test-name: ${{ matrix.test-data.name }}
num-cores: ${{ matrix.num-cores }}
L1: ${{ toJson(matrix.test-data.L1) }}

siracusa-redmule-kernels-tiled-doublebuffer-L2:
strategy:
fail-fast: false
matrix:
test-data:
- name: "testFloatMatmul"
L1: [8000]
num-cores:
- 8
double-buffer:
- true
uses: ./.github/workflows/TestRunnerTiledSiracusaWithRedmule.yml
needs: select-docker-image
with:
docker-image: ${{ needs.select-docker-image.outputs.image }}
test-name: ${{ matrix.test-data.name }}
num-cores: ${{ matrix.num-cores }}
L1: ${{ toJson(matrix.test-data.L1) }}
double-buffer: ${{ matrix.double-buffer }}

### Deeploy Extension and Internal Tests ###
deeploy-memory-allocation:
Expand All @@ -766,7 +790,7 @@ jobs:
run: |
cd DeeployTest
python testMVP.py -t Tests/CCT/CCT_1_16_16_8 -p Siracusa --defaultMemLevel=L2 --l1=64000 --l2=75000 --memAllocStrategy=MiniMalloc
python testMVP.py -t Tests/CCT/CCT_1_16_16_8 -p Siracusa --defaultMemLevel=L2 --l1=64000 --l2=70000 --memAllocStrategy=MiniMalloc --shouldFail
python testMVP.py -t Tests/CCT/CCT_1_16_16_8 -p Siracusa --defaultMemLevel=L2 --l1=64000 --l2=60000 --memAllocStrategy=MiniMalloc --shouldFail
python testMVP.py -t Tests/CCT/CCT_1_16_16_8 -p Siracusa --defaultMemLevel=L2 --l1=64000 --l2=90000 --memAllocStrategy=TetrisRandom
python testMVP.py -t Tests/CCT/CCT_1_16_16_8 -p Siracusa --defaultMemLevel=L2 --l1=64000 --l2=75000 --memAllocStrategy=TetrisRandom --shouldFail

Expand Down
72 changes: 72 additions & 0 deletions .github/workflows/TestRunnerTiledSiracusaWithRedmule.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
name: TestRunnerTiledSiracusa

on:
workflow_call:
inputs:
docker-image:
required: true
type: string
test-name:
required: true
type: string
num-cores:
required: false
default: 8
type: number
L1:
required: false
default: "[64000]"
type: string
default-memory-level:
required: false
default: "L2"
type: string
double-buffer:
required: false
default: false
type: boolean
memory-allocation-strategy:
required: false
default: "MiniMalloc"
type: string
search-strategy:
required: false
default: "random-max"
type: string

jobs:

test-runner-siracusa-tiled:
strategy:
fail-fast: false
matrix:
L1: ${{ fromJSON(inputs.L1) }}
runs-on: ubuntu-22.04
container:
image: ${{ inputs.docker-image }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
with:
submodules: recursive
- name: Build Deeploy
run: pip install -e .
- name: Cache ccache
id: ccache-cache
uses: actions/cache@v4
with:
path: /app/.ccache
key: ${{ runner.os }}-ccache
- name: Run Test
uses: nick-fields/retry@v3
with:
timeout_minutes: 15
max_attempts: 3
retry_on: timeout
command: |
cd DeeployTest
mkdir -p /app/.ccache
export CCACHE_DIR=/app/.ccache
python testRunner_tiled_siracusa_w_redmule.py -t Tests/${{ inputs.test-name }} --cores=${{ inputs.num-cores }} --l1 ${{ matrix.L1 }} --defaultMemLevel=${{ inputs.default-memory-level }} ${{ inputs.double-buffer && '--doublebuffer' || '' }} --memAllocStrategy=${{ inputs.memory-allocation-strategy }} --searchStrategy=${{ inputs.search-strategy }}
shell: bash

71 changes: 66 additions & 5 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -267,19 +267,80 @@ Change main.c to use OUTPUTTYPE instead of float
- LLVM Compiler RT for rv32im, rv32ima, and rv32imafd.
- Appropriate linking of picolibc and compiler RT.
- Build and install a flow for XTensor, XTL, and XSIMD. These libraries are used in some GVSoC models, and they used to live in the PULP SDK, as a header-only library. Keeping only the library headers in the PULP SDK makes it hard to bump new versions.
- Adds RV32IMF Picolib to the toolchain
- Generic float DW Conv2D kernel and bindings.
- Bias handling and computation for regular and DW Conv2D.
- Empty bias handling for generic regular and DW Conv2D.
- Tests for Conv2D regular and DW, with and without bias (and included them in the CI pipeline).
- `BuildDockerToolchain.yml` to build Toolchain Docker container
- `BuildDockerDeeploy.yml` to build Deeploy Docker container
- Add support for `linux/arm64` containers
- Added caching to speed up container builds
- Makefile to simplify local container build
- Add helper script to generate a baseline changelog.
- SoftHier Deeploy Targets, including Deployer, Platform, and Templates
- SoftHier cmake compilation flow
- SoftHier CI task
- Parallel implementations of the following operators on Siracusa: Matmul, Softmax, Gelu, Conv, Layernorm, Maxpool, Add, Mul,and Relu
- Gelu with Sigmoid implementation
- ComputeOp support for multiple float kernels: Maxpool, Relu, and Mul



### Changed
- Officially depreciate Banshee as a simulator for Snitch Cluster in the CI. Maintaining this is a burden and unnecessary, as GVSoC is now the standard simulator. Additionally, newer versions of the Snitch runtime don't support Banshee anymore.
- Bump XTensor's version to `0.25.0` to fix a bug with Intel's SSE.
- Update snitch cluster patch to link to picolibc and add explicit target.
- Update README to include Snitch in the Getting Started and the D&T Journal.
- The ISA for the Siracusa platform has been updated from rv32imc_zfinx_xpulpv2 to rv32imf_xpulpv2.
- All floating-point comparison tasks in deeploytest.c are now offloaded to Cluster 0 for execution.
- Split the original build flow into two container
- Refactor changelog for better readability
- Reformatted all C files
- Extended testRunner flow for SoftHier
- Extended Dockerfile for SoftHier GVSoC simulator
- Minor change on `Util.cmake` for easier debug with assembly

### Fixed
- Fix the PULP Deployer where outputs were unecessary loaded in L3
- Fix the lifetime computation of aliased buffers
- Removed unsupported `-MMD` compiler flag in LLVM-based toolchains.
- Fix `DebugPrint` topology pass
- Fix `PrintInput` code transformations to work with global variables
- RequantShift when log2d is 0
- missing math.h headers
- clang on mac doesn't support `-Wl,--gc-sections` flag, moved it into each target and for host it's checking now for host system
- `--ffast-math` caused numerical errors on generic so moved into each target and removed from that one since I'm imagining it as the _debug_ target
- Gather kernel on generic target
- Update the link of the Docker container used to run the CI with the Docker published by this repo instead of my fork.
- Add a retry on timeout step for large network tests. This is a temporary fix to address the sporadic freeze happening at the compilation stage, see [this issue](https://github.com/pulp-platform/Deeploy/issues/9).
- Float bug on Testslice, CMSIS TestUtil, DivInterger
- AbstractDatayType Float Bugs
- Change main.c to use OUTPUTTYPE instead of float
- MaxPool Padding Extract Pass for float and interger
- Testinput, testoutput, weight type casted from double to float warning
- Relaxed the error threshold between expected and actual values in deeploytest.
- CycleMeasure Pass for Siracusa Untiled Profilling
- GEMM Tiling Constraints transA and `transB' not supported
- MatMul layer Multi-Dimensional Input Issue
- Add Layer for Broadcasted Bias
- Resolved an issue where concatenation of float32 with f caused inf errors during code generation
- Fixed a bug in the MemoryScheduler where the CP problem was solved more time that it was needed.
- Updated printinput nodetemplate for float handling.
- Fix `testMVP.py` to get a proper should fail test.
- Maxpool Tile Calculation Error: The last dimension padding was incorrectly calculated due to L3 wraptiling solution. This has been fixed by updating serializeTilingSolution of Maxpool to avoid incorrect padding of Maxpool and prevent potential DMA 3D transfer issues of Maxpool.
- DMA 1D Copy Assertion Issue: Updated the DMA length datatype from uint16 to uint32 to avoid assertion failures when dealing with large block transfers.
- Deeploy subdirectories installed when installing Deeploy with pip install
- Fix linking TEST_RECENT on MacOS
- Fixed broken VSCode launch configuration
- Fixed broken `pulp-sdk` hash
- Fix issue with building `banshee` on `linux/arm
- Removed `i3c` related files from the `pulp-sdk` CMake flow
- Fixed C-code linting stage in CI

### Removed
- Remove the link to the precompiled LLVM 12 in the `testRunner` for Snitch and in the CI.
- Remove the sourcing of the cursed PULP SDK script.

## rv32imf_xpulpv2 ISA support for Siracusa platform

### Changed
- The ISA for the Siracusa platform has been updated from rv32imc_zfinx_xpulpv2 to rv32imf_xpulpv2.
- All floating-point comparison tasks in deeploytest.c are now offloaded to Cluster 0 for execution.
## Release v0.1.0 (2024-08-08)
This release contains the first version of Deeploy, which includes the initial implementation of the Deeploy framework, support for various platforms, and basic functionality for deploying deep learning models on PULP-based systems.
10 changes: 6 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ if(TOOLCHAIN STREQUAL GCC)
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
endif()

set(platform MemPool CACHE STRING "Platform (MemPool, QEMU, Siracusa, Siracusa_w_neureka, PULP-Open, Generic, Snitch)")
set_property(CACHE platform PROPERTY STRINGS MemPool QEMU Siracusa Siracusa_w_neureka PULP-Open Generic Snitch)
set(platform MemPool CACHE STRING "Platform (MemPool, QEMU, Siracusa, Siracusa_w_neureka, Siracusa_w_redmule, PULP-Open, Generic, Snitch)")
set_property(CACHE platform PROPERTY STRINGS MemPool QEMU Siracusa Siracusa_w_neureka Siracusa_w_redmule PULP-Open Generic Snitch)

if(platform STREQUAL MemPool)
message(STATUS "Building for platform 'MemPool'")
Expand All @@ -26,6 +26,8 @@ elseif(platform STREQUAL Siracusa)
message(STATUS "Building for platform 'Siracusa'")
elseif(platform STREQUAL Siracusa_w_neureka)
message(STATUS "Building for platform 'Siracusa_w_neureka'")
elseif(platform STREQUAL Siracusa_w_redmule)
message(STATUS "Building for platform 'Siracusa_w_redmule'")
elseif(platform STREQUAL PULPOpen)
message(STATUS "Building for platform 'PULP-Open'")
elseif(platform STREQUAL Generic)
Expand Down Expand Up @@ -148,7 +150,7 @@ if(platform STREQUAL QEMU-ARM)

endif()

if(platform STREQUAL Siracusa OR platform STREQUAL Siracusa_w_neureka OR platform STREQUAL PULPOpen)
if(platform STREQUAL Siracusa OR platform STREQUAL Siracusa_w_neureka OR platform STREQUAL Siracusa_w_redmule OR platform STREQUAL PULPOpen)

if(TOOLCHAIN STREQUAL LLVM)
set(CMAKE_TOOLCHAIN_FILE ${CMAKE_CURRENT_LIST_DIR}/cmake/pulp/toolchain_llvm.cmake)
Expand All @@ -158,7 +160,7 @@ if(platform STREQUAL Siracusa OR platform STREQUAL Siracusa_w_neureka OR platfor

include(${CMAKE_CURRENT_LIST_DIR}/cmake/pulp/pulp.cmake)

if(platform STREQUAL Siracusa OR platform STREQUAL Siracusa_w_neureka)
if(platform STREQUAL Siracusa OR platform STREQUAL Siracusa_w_neureka OR platform STREQUAL Siracusa_w_redmule)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/pulp/siracusa/siracusa.cmake)
elseif(platform STREQUAL PULPOpen)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/pulp/pulp-open/pulp-open.cmake)
Expand Down
4 changes: 3 additions & 1 deletion Container/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,9 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y git-lfs \
libsdl2-ttf-dev \
gcc-multilib \
wget \
clang-format
clang-format \
libxtensor-dev \
libxsimd-dev

# Install cmake 3.31.1
RUN wget https://github.com/Kitware/CMake/releases/download/v3.31.1/cmake-3.31.1-linux-x86_64.sh && \
Expand Down
Loading