Skip to content
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
62 commits
Select commit Hold shift + click to select a range
9e97e26
Add TensorRT decoder plugin for quantum error correction
wsttiger Sep 30, 2025
79a7e19
Formatting
wsttiger Sep 30, 2025
d88452f
Removed hardcoded paths to TensorRT installation
wsttiger Oct 1, 2025
e7ec736
Merge branch 'main' into add_trt_decoder
wsttiger Oct 1, 2025
7cbbeb1
Incorrect URL
wsttiger Oct 1, 2025
5287b09
Fixed up the references to cuda in CMake
wsttiger Oct 2, 2025
88b3cc1
Switched to finding cuda toolkit instead of hardcoding cuda headers
wsttiger Oct 3, 2025
1bfbb3d
Disabled trt_decoder for ARM
wsttiger Oct 6, 2025
4c040dc
Redo platform check for x86
wsttiger Oct 6, 2025
e01de62
Added include directory for the Arm64 arch
wsttiger Oct 7, 2025
ce0f24c
Removed cudaqx namespace
wsttiger Oct 7, 2025
e641f49
Added copyright notice
wsttiger Oct 7, 2025
f3d7a95
Added CUDAQ logging + minor details
wsttiger Oct 7, 2025
6533797
Handled CUDA (potential) errors + formatting
wsttiger Oct 8, 2025
83e957b
Removed block_size from trt_decoder logic (there's no parity check ma…
wsttiger Oct 8, 2025
cdb1754
Default initialization + formatting
wsttiger Oct 8, 2025
b6cfa6f
Added LFS (no assets yet), added training for E2E test with test AI d…
wsttiger Oct 12, 2025
aea8d56
Added test AI model (onnx)
wsttiger Oct 12, 2025
036b331
Formatting
wsttiger Oct 12, 2025
1deb4f5
Added test_trt_decoder.py - for the python path
wsttiger Oct 14, 2025
5db5c88
Added trt-decoder optional dependency to cudaq_qec pyproject.toml
wsttiger Oct 15, 2025
2fb89c7
Added platform detection to test_trt_decoder.py
wsttiger Oct 15, 2025
8fa4ef8
Modified platform checks
wsttiger Oct 15, 2025
4f133f9
Formatting
wsttiger Oct 15, 2025
fb16b36
Merge branch 'main' into add_trt_decoder
wsttiger Oct 16, 2025
c9e563f
DCO Remediation Commit for Scott Thornton <[email protected]>
wsttiger Oct 16, 2025
392f5de
Added installation of TensorRT to build_wheels.yaml
wsttiger Oct 17, 2025
42c2b32
Made minor mods to build_wheels.yaml
wsttiger Oct 17, 2025
5ad505b
Hardcoding TensorRT package name for now
wsttiger Oct 21, 2025
2d08b88
Added debugging info
wsttiger Oct 21, 2025
4defcfd
Edits to build_wheel.sh
wsttiger Oct 21, 2025
62cdbac
more edits of build_wheel.yaml for debugging
wsttiger Oct 21, 2025
d4e79a9
Added TensorRT library path to LD_LIBRARY_PATH for auditwheel
wsttiger Oct 21, 2025
d8489f7
modified test_wheels to check for GPU's
wsttiger Oct 21, 2025
6ba9191
Merge from main - fixed conflict in test_wheels.sh
wsttiger Oct 22, 2025
eea3198
Removed the hardcoding of TensorRT version and path from the build_wh…
wsttiger Oct 22, 2025
33359f5
Added all optional dependencies to cudaq_qec
wsttiger Oct 22, 2025
5e38f6a
Fixed small bug in pyproject.toml for QEC
wsttiger Oct 23, 2025
be6a52f
Added extra NVIDIA index for installing dependencies (wheels)
wsttiger Oct 23, 2025
f152a6c
Fixes for the build_wheels.yaml CI/CD pipeline
wsttiger Oct 24, 2025
55ae990
Fixed RPATH in trt_decoder (plugin)
wsttiger Oct 24, 2025
07ed875
Remove extra bloat
wsttiger Oct 24, 2025
108a8b9
Installed and enabled Git LFS for trt_decoder tests
wsttiger Oct 25, 2025
d090aca
Moved build_engine_from_onnx.py to scripts directory
wsttiger Oct 27, 2025
2e727c0
Parameterize CUDA version for TensorRT installation in CI workflows
wsttiger Oct 27, 2025
626ee9a
Map CUDA 12.6 to 12.9 for TensorRT downloads in build_wheels workflow
wsttiger Oct 27, 2025
65fdcee
Fixed minor bug
wsttiger Oct 27, 2025
ac18d2b
Removed the x86_64 constraint on the trt_decoder tests, also removed …
wsttiger Oct 28, 2025
b645807
Formatting
wsttiger Oct 28, 2025
8e0ab06
Re-enabled building trt_decoder for ARM architecture
wsttiger Oct 28, 2025
60e703d
Added try/catch around instantiating decoder
wsttiger Oct 28, 2025
e9825b3
Formatting
wsttiger Oct 28, 2025
c2c61db
Re-enabled ARM in pyproject.toml.cu13
wsttiger Oct 28, 2025
2b39242
no tensorrt-cu13
wsttiger Oct 28, 2025
663ba48
tensorrt-cu13 is back in
wsttiger Oct 28, 2025
9626a12
Re-calibrated the warning messages
wsttiger Oct 31, 2025
13e5c69
Created a more focused set of test data
wsttiger Oct 31, 2025
6ea6ec9
Added check for CUDA
wsttiger Oct 31, 2025
32e8e64
Formatting
wsttiger Oct 31, 2025
3642fc8
Merge branch 'main' into add_trt_decoder
wsttiger Oct 31, 2025
30da7ce
Reduced the number of syndromes to 30
wsttiger Oct 31, 2025
5389216
nvidia-cublas-cuXXX -> nvidia-cublas
wsttiger Nov 1, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/all_libs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,12 @@ jobs:

- name: Install build requirements
run: |
apt install -y --no-install-recommends gfortran libblas-dev
apt install -y --no-install-recommends gfortran libblas-dev wget
wget https://developer.download.nvidia.com/compute/tensorrt/10.13.3/local_installers/nv-tensorrt-local-repo-ubuntu2404-10.13.3-cuda-12.9_1.0-1_amd64.deb
dpkg -i nv-tensorrt-local-repo-ubuntu2404-10.13.3-cuda-12.9_1.0-1_amd64.deb
cp /var/nv-tensorrt-local-repo-ubuntu2404-10.13.3-cuda-12.9/nv-tensorrt-local-4B177B4F-keyring.gpg /usr/share/keyrings/
apt update
apt install -y tensorrt-dev

- name: Build
id: build
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/lib_qec.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,14 @@ jobs:
# ========================================================================
# Build library
# ========================================================================
- name: Install build requirements
run: |
apt install -y --no-install-recommends gfortran libblas-dev wget
wget https://developer.download.nvidia.com/compute/tensorrt/10.13.3/local_installers/nv-tensorrt-local-repo-ubuntu2404-10.13.3-cuda-12.9_1.0-1_amd64.deb
dpkg -i nv-tensorrt-local-repo-ubuntu2404-10.13.3-cuda-12.9_1.0-1_amd64.deb
cp /var/nv-tensorrt-local-repo-ubuntu2404-10.13.3-cuda-12.9/nv-tensorrt-local-4B177B4F-keyring.gpg /usr/share/keyrings/
apt update
apt install -y tensorrt-dev

- name: Build
id: build
Expand Down
56 changes: 56 additions & 0 deletions libs/qec/include/cudaq/qec/trt_decoder_internal.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/*******************************************************************************
* Copyright (c) 2024 - 2025 NVIDIA Corporation & Affiliates. *
* All rights reserved. *
* *
* This source code and the accompanying materials are made available under *
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/

#pragma once

#include "cudaq/qec/decoder.h"
#include <memory>
#include <string>
#include <vector>

#include "NvInfer.h"
#include "NvOnnxParser.h"

namespace cudaq::qec::trt_decoder_internal {

/// @brief Validates TRT decoder parameters
/// @param params The parameter map to validate
/// @throws std::runtime_error if parameters are invalid
void validate_trt_decoder_parameters(const cudaqx::heterogeneous_map &params);

/// @brief Loads a binary file into memory
/// @param filename Path to the file to load
/// @return Vector containing the file contents
/// @throws std::runtime_error if file cannot be opened
std::vector<char> load_file(const std::string &filename);

/// @brief Builds a TensorRT engine from an ONNX model
/// @param onnx_model_path Path to the ONNX model file
/// @param params Configuration parameters
/// @param logger TensorRT logger instance
/// @return Unique pointer to the built TensorRT engine
/// @throws std::runtime_error if engine building fails
std::unique_ptr<nvinfer1::ICudaEngine>
build_engine_from_onnx(const std::string &onnx_model_path,
const cudaqx::heterogeneous_map &params,
nvinfer1::ILogger &logger);

/// @brief Saves a TensorRT engine to a file
/// @param engine The engine to save
/// @param file_path Path where to save the engine
/// @throws std::runtime_error if saving fails
void save_engine_to_file(nvinfer1::ICudaEngine *engine,
const std::string &file_path);

/// @brief Parses and configures precision settings for TensorRT
/// @param precision The precision string (fp16, bf16, int8, fp8, noTF32, best)
/// @param config TensorRT builder config instance
void parse_precision(const std::string &precision,
nvinfer1::IBuilderConfig *config);

} // namespace cudaq::qec::trt_decoder_internal
1 change: 1 addition & 0 deletions libs/qec/lib/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ add_library(${LIBRARY_NAME} SHARED
)

add_subdirectory(decoders/plugins/example)
add_subdirectory(decoders/plugins/trt_decoder)
add_subdirectory(codes)
add_subdirectory(device)

Expand Down
143 changes: 143 additions & 0 deletions libs/qec/lib/decoders/plugins/trt_decoder/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
# ============================================================================ #
# Copyright (c) 2024 - 2025 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #

cmake_minimum_required(VERSION 3.28 FATAL_ERROR)

set(MODULE_NAME "cudaq-qec-trt-decoder")

project(${MODULE_NAME})

# Set default TensorRT root if not provided
if(NOT DEFINED TENSORRT_ROOT)
message(STATUS "TENSORRT_ROOT not provided, using default: ${TENSORRT_ROOT}")
else()
message(STATUS "Using TENSORRT_ROOT: ${TENSORRT_ROOT}")
endif()

# Specify the source file for the plugin
set(PLUGIN_SRC
trt_decoder.cpp
# Add additional source files here as needed
)

find_package(CUDAToolkit REQUIRED)

# Create the shared library
add_library(${MODULE_NAME} SHARED ${PLUGIN_SRC})

# Check for TensorRT availability
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
PATHS
${TENSORRT_ROOT}/include
/usr/include/x86_64-linux-gnu
/usr/local/cuda/include
/usr/local/tensorrt/include
/opt/tensorrt/include
NO_DEFAULT_PATH
)

find_library(TENSORRT_LIBRARY nvinfer
PATHS
${TENSORRT_ROOT}/lib
/usr/lib/x86_64-linux-gnu
/usr/local/cuda/lib64
/usr/local/tensorrt/lib
/opt/tensorrt/lib
NO_DEFAULT_PATH
)

find_library(TENSORRT_ONNX_LIBRARY nvonnxparser
PATHS
${TENSORRT_ROOT}/lib
/usr/lib/x86_64-linux-gnu
/usr/local/cuda/lib64
/usr/local/tensorrt/lib
/opt/tensorrt/lib
NO_DEFAULT_PATH
)

if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY AND TENSORRT_ONNX_LIBRARY)
message(STATUS "TensorRT found: ${TENSORRT_INCLUDE_DIR}")
message(STATUS "TensorRT library: ${TENSORRT_LIBRARY}")
message(STATUS "TensorRT ONNX parser: ${TENSORRT_ONNX_LIBRARY}")
target_compile_definitions(${MODULE_NAME} PRIVATE TENSORRT_AVAILABLE)
else()
message(WARNING "TensorRT not found. Building decoder without TensorRT support.")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think the build succeeds if TensorRT is installed. At least it doesn't on my machine. Is the whole build supposed to fail if TRT is not found? I would advocate for making this a top-level CMake flag.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this resolved by #331?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this resolved by #331?

Sort of. #331 allows the user to disable the TRT decoder at cmake time by specifically disabling it, but if they leave it enabled at cmake time (which is the default), then there is still a build failure about missing include files despite this message making it sound like it should just build the decoder without TensorRT support.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got it. Makes sense now.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This may be addressed by #332. @wsttiger take a look and let me know what you think.

message(WARNING "TENSORRT_INCLUDE_DIR: ${TENSORRT_INCLUDE_DIR}")
message(WARNING "TENSORRT_LIBRARY: ${TENSORRT_LIBRARY}")
message(WARNING "TENSORRT_ONNX_LIBRARY: ${TENSORRT_ONNX_LIBRARY}")
endif()

# Set the include directories for dependencies
target_include_directories(${MODULE_NAME}
PUBLIC
${CMAKE_SOURCE_DIR}/libs/qec/include
${CMAKE_SOURCE_DIR}/libs/core/include
)

# Add TensorRT include directory if found
if(TENSORRT_INCLUDE_DIR)
target_include_directories(${MODULE_NAME} PRIVATE
${TENSORRT_INCLUDE_DIR}
${CUDAToolkit_INCLUDE_DIRS}
)
endif()

# Link with required libraries
target_link_libraries(${MODULE_NAME}
PUBLIC
cudaqx-core
cudaq::cudaq-operator
PRIVATE
cudaq::cudaq-common
cudaq-qec
)

# Conditionally link TensorRT libraries
if(TENSORRT_LIBRARY AND TENSORRT_ONNX_LIBRARY)
target_link_libraries(${MODULE_NAME} PRIVATE
${TENSORRT_LIBRARY}
${TENSORRT_ONNX_LIBRARY}
CUDA::cudart
)
endif()

set_target_properties(${MODULE_NAME} PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/decoder-plugins
)

# RPATH configuration
# ==============================================================================

if (NOT SKBUILD)
set_target_properties(${MODULE_NAME} PROPERTIES
BUILD_RPATH "$ORIGIN:${TENSORRT_ROOT}/lib"
INSTALL_RPATH "$ORIGIN:$ORIGIN/../../../tensorrt_libs"
)

# Let CMake automatically add paths of linked libraries to the RPATH:
set_target_properties(${MODULE_NAME} PROPERTIES
INSTALL_RPATH_USE_LINK_PATH TRUE)
else()
# CUDA-Q install its libraries in site-packages/lib (or dist-packages/lib)
# Thus, we need the $ORIGIN/../lib
set_target_properties(${MODULE_NAME} PROPERTIES
INSTALL_RPATH "$ORIGIN:$ORIGIN/../../../tensorrt_libs"
)
endif()

# Install
# ==============================================================================

install(TARGETS ${MODULE_NAME}
COMPONENT qec-lib-plugins
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}/decoder-plugins
)



Loading
Loading