diff --git a/CMakeLists.txt b/CMakeLists.txt index 2de5674..f111a8c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,43 +6,58 @@ cmake_minimum_required(VERSION 3.12) -option(JAM_COMPATIBLE "Build compatible with JAM-codec" OFF) -option(CUSTOM_CONFIG_SUPPORT "Support custom config of coder" OFF) -set(MAX_AGGREGATE_FIELDS 20 CACHE STRING "Max number of aggregates fields (1..1000); for generation") - -option(BUILD_TESTS "Whether to include the test suite in build" OFF) - -if (PACKAGE_MANAGER) - if(PACKAGE_MANAGER NOT MATCHES "^(hunter|vcpkg)$") - message(FATAL_ERROR "PACKAGE_MANAGER must be set to 'hunter', 'vcpkg' or isn't set") - endif () -else () - set(PACKAGE_MANAGER "hunter") - if (CMAKE_TOOLCHAIN_FILE) - get_filename_component(ACTUAL_NAME ${CMAKE_TOOLCHAIN_FILE} NAME) - if(ACTUAL_NAME STREQUAL "vcpkg.cmake") - message(STATUS "vcpkg will be used because vcpkg.cmake has found") - set(PACKAGE_MANAGER "vcpkg") - endif () - endif () -endif () +# Select package manager +if(PACKAGE_MANAGER) + if(NOT PACKAGE_MANAGER MATCHES "^(hunter|vcpkg)$") + message(FATAL_ERROR "PACKAGE_MANAGER must be set to 'hunter', 'vcpkg' or isn't set") + endif() +else() + set(PACKAGE_MANAGER "hunter") + if(CMAKE_TOOLCHAIN_FILE) + get_filename_component(ACTUAL_NAME ${CMAKE_TOOLCHAIN_FILE} NAME) + if(ACTUAL_NAME STREQUAL "vcpkg.cmake") + message(STATUS "vcpkg will be used because vcpkg.cmake has found") + set(PACKAGE_MANAGER "vcpkg") + endif() + endif() +endif() message(STATUS "Selected package manager: ${PACKAGE_MANAGER}") -if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.27") +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.27") + # find_package() uses upper-case _ROOT variables. cmake_policy(SET CMP0144 NEW) -endif () +endif() -if (PACKAGE_MANAGER STREQUAL "hunter") - include("cmake/Hunter/init.cmake") -endif () +if(PACKAGE_MANAGER STREQUAL "hunter") + include("cmake/Hunter/init.cmake") +else() + set(HUNTER_ENABLED OFF) +endif() -if(BUILD_TESTS) - if (PACKAGE_MANAGER STREQUAL "vcpkg") - list(APPEND VCPKG_MANIFEST_FEATURES scale-tests) - endif() +# Adjust vcpkg features by custom defined option (for deploy possible dependencies) +if(PACKAGE_MANAGER STREQUAL "vcpkg") + if(BUILD_TESTS AND NOT "scale-tests" IN_LIST VCPKG_MANIFEST_FEATURES) + list(APPEND VCPKG_MANIFEST_FEATURES "scale-tests") + endif() endif() -project(Scale LANGUAGES CXX VERSION 2.0.0) +project(Scale LANGUAGES CXX VERSION 2.0.1) + +include(cmake/feature_option.cmake) + +# Init options +feature_option(JAM_COMPATIBLE "jam-compatibility" "Build compatible with JAM-codec" OFF) +feature_option(CUSTOM_CONFIG_SUPPORT "configurable-coding" "Support custom config of coder" OFF) +feature_option(BUILD_TESTS "scale-tests" "Whether to include the test suite in build" OFF) +option(ASAN "Build tests with address sanitizer" OFF) +option(TSAN "Build tests with thread sanitizer" OFF) +option(UBSAN "Build tests with undefined behavior sanitizer" OFF) + +if((ASAN OR TSAN OR UBSAN) AND NOT BUILD_TESTS) + message(FATAL_ERROR "Since SCALE is header-only, sanitizers should only be enabled for tests") +endif() + +set(MAX_AGGREGATE_FIELDS 20 CACHE STRING "Max number of aggregates fields (1..1000); for generation") set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -50,28 +65,56 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -if (PACKAGE_MANAGER STREQUAL "hunter") - hunter_add_package(Boost) - find_package(Boost) +if(PACKAGE_MANAGER STREQUAL "hunter") + hunter_add_package(Boost) + find_package(Boost) else() - find_package(Boost CONFIG REQUIRED COMPONENTS endian multiprecision) -endif () + find_package(Boost CONFIG REQUIRED COMPONENTS endian multiprecision) +endif() -if (PACKAGE_MANAGER STREQUAL "hunter") - hunter_add_package(qtils) -endif () +if(PACKAGE_MANAGER STREQUAL "hunter") + hunter_add_package(qtils) +endif() find_package(qtils CONFIG REQUIRED) -SET(JAM_COMPATIBILITY_ENABLED "${JAM_COMPATIBLE}") +set(JAM_COMPATIBILITY_ENABLED "${JAM_COMPATIBLE}") set(CUSTOM_CONFIG_ENABLED "${CUSTOM_CONFIG_SUPPORT}") configure_file("${CMAKE_SOURCE_DIR}/include/scale/definitions.hpp.in" "${CMAKE_BINARY_DIR}/include/scale/definitions.hpp") -add_subdirectory(src) +if(ASAN) + message(STATUS "Address sanitizer will be used") + add_compile_options(-fsanitize=address -fsanitize-address-use-after-scope -fno-omit-frame-pointer) + add_link_options(-fsanitize=address -fsanitize-address-use-after-scope -fno-omit-frame-pointer) +endif() +if(TSAN) + message(STATUS "Thread sanitizer will be used") + add_compile_options(-fsanitize=thread -fno-omit-frame-pointer) + add_link_options(-fsanitize=thread -fno-omit-frame-pointer) +endif() +if(UBSAN) + message(STATUS "Undefined behavior sanitizer will be used") + add_compile_options(-fsanitize=undefined -fno-omit-frame-pointer) + add_link_options(-fsanitize=undefined -fno-omit-frame-pointer) +endif() + +include(cmake/generate_decompose_and_apply.cmake) -if (BUILD_TESTS) - enable_testing() - add_subdirectory(test ${CMAKE_BINARY_DIR}/test_bin) -endif () +add_library(scale INTERFACE + ${DECOMPOSE_AND_APPLY_HPP} +) +target_include_directories(scale PUBLIC INTERFACE + $ + $ + $ +) +target_link_libraries(scale INTERFACE + Boost::boost +) + +if(BUILD_TESTS) + enable_testing() + add_subdirectory(test ${CMAKE_BINARY_DIR}/test_bin) +endif() ############################################################################### # INSTALLATION @@ -79,16 +122,8 @@ endif () include(GNUInstallDirs) -install(TARGETS scale EXPORT scaleConfig - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - FRAMEWORK DESTINATION ${CMAKE_INSTALL_PREFIX} -) - -install(TARGETS scale_append EXPORT scaleConfig +install( + TARGETS scale EXPORT scaleConfig LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} diff --git a/cmake/feature_option.cmake b/cmake/feature_option.cmake new file mode 100644 index 0000000..e33c8c6 --- /dev/null +++ b/cmake/feature_option.cmake @@ -0,0 +1,14 @@ +# +# Copyright Quadrivium LLC +# All Rights Reserved +# SPDX-License-Identifier: Apache-2.0 +# + +# Connects CMake options with vcpkg features +function (feature_option variable feature_name help_text default) + if(PACKAGE_MANAGER STREQUAL "vcpkg" AND ${feature_name} IN_LIST VCPKG_MANIFEST_FEATURES) + set(${variable} ON CACHE BOOL ${help_text} FORCE) + else() + set(${variable} ${default} CACHE BOOL ${help_text}) + endif() +endfunction() diff --git a/src/CMakeLists.txt b/cmake/generate_decompose_and_apply.cmake similarity index 54% rename from src/CMakeLists.txt rename to cmake/generate_decompose_and_apply.cmake index 2746c1f..99094db 100644 --- a/src/CMakeLists.txt +++ b/cmake/generate_decompose_and_apply.cmake @@ -1,3 +1,9 @@ +# +# Copyright Quadrivium LLC +# All Rights Reserved +# SPDX-License-Identifier: Apache-2.0 +# + set(SCRIPT_PATH "${CMAKE_SOURCE_DIR}/scripts/generate_decompose_and_apply_hpp.sh") set(DECOMPOSE_AND_APPLY_HPP_IN "${CMAKE_SOURCE_DIR}/include/scale/detail/decompose_and_apply.hpp.in") set(DECOMPOSE_AND_APPLY_HPP "${CMAKE_BINARY_DIR}/include/scale/detail/decompose_and_apply.hpp") @@ -9,28 +15,3 @@ add_custom_command( COMMENT "Generating include/scale/detail/decompose_and_apply.hpp" VERBATIM ) - -add_library(scale - scale_error.cpp - ${DECOMPOSE_AND_APPLY_HPP} - ) -target_include_directories(scale PUBLIC - $ - $ - $ - ) -target_link_libraries(scale - Boost::boost - ) - -add_library(scale_append - encode_append.cpp -) -target_include_directories(scale_append PUBLIC - $ - $ - $ -) -target_link_libraries(scale_append - scale -) diff --git a/include/scale/bit_vector.hpp b/include/scale/bit_vector.hpp index 3ad4cb8..c27e353 100644 --- a/include/scale/bit_vector.hpp +++ b/include/scale/bit_vector.hpp @@ -1263,16 +1263,16 @@ namespace scale { if (sbf_ and new_size > arr_.size() * CHAR_BIT) { switch_to_vector(); } - if (not sbf_) { - if (new_size != vec_.size() * CHAR_BIT) { - vec_.resize((new_size + CHAR_BIT - 1) / CHAR_BIT, 0); - } - } if (new_size <= size_) { auto data = sbf_ ? arr_.data() : vec_.data(); data[new_size / CHAR_BIT] &= static_cast(-1) >> (CHAR_BIT - (new_size % CHAR_BIT)); } + if (not sbf_) { + if (new_size != vec_.size() * CHAR_BIT) { + vec_.resize((new_size + CHAR_BIT - 1) / CHAR_BIT, 0); + } + } if (sbf_ and new_size > size_) { for (auto i = (size_ + CHAR_BIT - 1) / CHAR_BIT; i <= new_size / CHAR_BIT; diff --git a/include/scale/encode_append.hpp b/include/scale/encode_append.hpp index 0522846..3f83144 100644 --- a/include/scale/encode_append.hpp +++ b/include/scale/encode_append.hpp @@ -40,6 +40,52 @@ namespace scale { * \param self_encoded * @return success if input was appended to self_encoded, failure otherwise */ - outcome::result append_or_new_vec(std::vector &self_encoded, - ConstSpanOfBytes input); + inline outcome::result append_or_new_vec( + std::vector &self_encoded, ConstSpanOfBytes input) { + EncodeOpaqueValue opaque_value{.v = input}; + + // No data present, just encode the given input data. + if (self_encoded.empty()) { + backend::ToBytes encoder(self_encoded); + encode(std::vector{opaque_value}, encoder); + return outcome::success(); + } + + // Take old size, calculate old size length and encode new size + OUTCOME_TRY(size, impl::memory::decode>(self_encoded)); + auto old_size = untagged(size); + auto new_size = old_size + 1; + auto encoded_old_size_len = lengthOfEncodedCompactInteger(old_size); + OUTCOME_TRY(encoded_new_size, impl::memory::encode(as_compact(new_size))); + + const auto old_data_size = self_encoded.size(); + const auto encoded_new_size_len = encoded_new_size.size(); + const auto shift_size = encoded_new_size_len - encoded_old_size_len; + + // if old and new encoded size length is equal, no need to shift data + if (encoded_old_size_len != encoded_new_size_len) { + // reserve place for new size length, old vector and new vector + self_encoded.reserve(old_data_size + shift_size + opaque_value.v.size()); + + // increase size to make space for new size encoding + self_encoded.resize(old_data_size + shift_size); + + // shift existing data + std::memmove(self_encoded.data() + encoded_new_size_len, + self_encoded.data() + encoded_old_size_len, + old_data_size - encoded_old_size_len); + } else { + // reserve place for existing and new vector + self_encoded.reserve(old_data_size + opaque_value.v.size()); + } + + // copy new size bytes at the beginning + std::memmove( + self_encoded.data(), encoded_new_size.data(), encoded_new_size.size()); + // append new data bytes + self_encoded.insert( + self_encoded.end(), opaque_value.v.begin(), opaque_value.v.end()); + return outcome::success(); + } + } // namespace scale diff --git a/include/scale/scale_error.hpp b/include/scale/scale_error.hpp index 75187a2..8d91074 100644 --- a/include/scale/scale_error.hpp +++ b/include/scale/scale_error.hpp @@ -49,3 +49,50 @@ namespace scale { OUTCOME_HPP_DECLARE_ERROR(scale, EncodeError) OUTCOME_HPP_DECLARE_ERROR(scale, DecodeError) + +/** + * @brief Defines the error category for SCALE encoding errors. + * @param e The specific encoding error. + * @return A string describing the error. + */ +inline OUTCOME_CPP_DEFINE_CATEGORY(scale, EncodeError, e) { + using scale::EncodeError; + switch (e) { + case EncodeError::NEGATIVE_INTEGER: + return "SCALE encode: negative integers is not supported"; + case EncodeError::VALUE_TOO_BIG_FOR_COMPACT_REPRESENTATION: + return "SCALE decode: value too big for compact representation"; + case EncodeError::DEREF_NULLPOINTER: + return "SCALE encode: attempt to dereference a nullptr"; + } + return "unknown EncodeError"; +} + +/** + * @brief Defines the error category for SCALE decoding errors. + * @param e The specific decoding error. + * @return A string describing the error. + */ +inline OUTCOME_CPP_DEFINE_CATEGORY(scale, DecodeError, e) { + using scale::DecodeError; + switch (e) { + case DecodeError::NOT_ENOUGH_DATA: + return "SCALE decode: not enough data to decode"; + case DecodeError::UNEXPECTED_VALUE: + return "SCALE decode: unexpected value occurred"; + case DecodeError::TOO_MANY_ITEMS: + return "SCALE decode: collection has too many items or memory is out or " + "data is damaged, unable to unpack"; + case DecodeError::WRONG_TYPE_INDEX: + return "SCALE decode: wrong type index, cannot decode variant"; + case DecodeError::INVALID_ENUM_VALUE: + return "SCALE decode: decoded enum value does not belong to the enum"; + case DecodeError::UNUSED_BITS_ARE_SET: + return "SCALE decode: bits which must be unused have set"; + case DecodeError::REDUNDANT_COMPACT_ENCODING: + return "SCALE decode: redundant bytes in compact encoding"; + case DecodeError::DECODED_VALUE_OVERFLOWS_TARGET: + return "SCALE decode: encoded value overflows target type"; + } + return "unknown SCALE DecodeError"; +} diff --git a/src/encode_append.cpp b/src/encode_append.cpp deleted file mode 100644 index 93011ca..0000000 --- a/src/encode_append.cpp +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include - -namespace scale { - - outcome::result append_or_new_vec(std::vector &self_encoded, - ConstSpanOfBytes input) { - EncodeOpaqueValue opaque_value{.v = input}; - - // No data present, just encode the given input data. - if (self_encoded.empty()) { - backend::ToBytes encoder(self_encoded); - encode(std::vector{opaque_value}, encoder); - return outcome::success(); - } - - // Take old size, calculate old size length and encode new size - OUTCOME_TRY(size, impl::memory::decode>(self_encoded)); - auto old_size = untagged(size); - auto new_size = old_size + 1; - auto encoded_old_size_len = lengthOfEncodedCompactInteger(old_size); - OUTCOME_TRY(encoded_new_size, impl::memory::encode(as_compact(new_size))); - - // If old and new encoded size length is equal, we don't need to copy the - // already encoded data. - if (encoded_old_size_len != encoded_new_size.size()) { - // reserve place for new size length, old vector and new vector - self_encoded.reserve(encoded_new_size.size() - + (self_encoded.size() - encoded_old_size_len) - + opaque_value.v.size()); - - // shift the data bytes in a container to give space for the new Compact - // encoded length prefix - const auto shift_size = encoded_new_size.size() - encoded_old_size_len; - self_encoded.resize(self_encoded.size() + shift_size); - std::memmove(self_encoded.data() + encoded_new_size.size(), - self_encoded.data() + encoded_old_size_len, - self_encoded.size() - shift_size); - } else { - // reserve place for existing and new vector - self_encoded.reserve(self_encoded.size() + opaque_value.v.size()); - } - // copy new size bytes - std::memmove( - self_encoded.data(), encoded_new_size.data(), encoded_new_size.size()); - // copy new data bytes - self_encoded.insert( - self_encoded.end(), opaque_value.v.begin(), opaque_value.v.end()); - return outcome::success(); - } -} // namespace scale diff --git a/src/scale_error.cpp b/src/scale_error.cpp deleted file mode 100644 index cd06dac..0000000 --- a/src/scale_error.cpp +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -/** - * @brief Defines error categories for SCALE encoding and decoding. - * - * This file contains the implementation of error categories for - * SCALE encoding and decoding errors, allowing them to be - * used with the Outcome error handling library. - */ - -#include - -/** - * @brief Defines the error category for SCALE encoding errors. - * @param e The specific encoding error. - * @return A string describing the error. - */ -OUTCOME_CPP_DEFINE_CATEGORY(scale, EncodeError, e) { - using scale::EncodeError; - switch (e) { - case EncodeError::NEGATIVE_INTEGER: - return "SCALE encode: negative integers is not supported"; - case EncodeError::VALUE_TOO_BIG_FOR_COMPACT_REPRESENTATION: - return "SCALE decode: value too big for compact representation"; - case EncodeError::DEREF_NULLPOINTER: - return "SCALE encode: attempt to dereference a nullptr"; - } - return "unknown EncodeError"; -} - -/** - * @brief Defines the error category for SCALE decoding errors. - * @param e The specific decoding error. - * @return A string describing the error. - */ -OUTCOME_CPP_DEFINE_CATEGORY(scale, DecodeError, e) { - using scale::DecodeError; - switch (e) { - case DecodeError::NOT_ENOUGH_DATA: - return "SCALE decode: not enough data to decode"; - case DecodeError::UNEXPECTED_VALUE: - return "SCALE decode: unexpected value occurred"; - case DecodeError::TOO_MANY_ITEMS: - return "SCALE decode: collection has too many items or memory is out or " - "data is damaged, unable to unpack"; - case DecodeError::WRONG_TYPE_INDEX: - return "SCALE decode: wrong type index, cannot decode variant"; - case DecodeError::INVALID_ENUM_VALUE: - return "SCALE decode: decoded enum value does not belong to the enum"; - case DecodeError::UNUSED_BITS_ARE_SET: - return "SCALE decode: bits which must be unused have set"; - case DecodeError::REDUNDANT_COMPACT_ENCODING: - return "SCALE decode: redundant bytes in compact encoding"; - case DecodeError::DECODED_VALUE_OVERFLOWS_TARGET: - return "SCALE decode: encoded value overflows target type"; - } - return "unknown SCALE DecodeError"; -} diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 91feea7..2aaccf6 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -59,7 +59,7 @@ addtest(scale_encode_append_test scale_encode_append_test.cpp ) target_link_libraries(scale_encode_append_test - scale_append + scale ) addtest(scale_compact_integer_test diff --git a/vcpkg.json b/vcpkg.json index 804fb10..8a05792 100644 --- a/vcpkg.json +++ b/vcpkg.json @@ -1,12 +1,18 @@ { "name": "scale", - "version": "2.0.0", + "version": "2.0.1", "dependencies": [ "qtils", "boost-multiprecision", "boost-endian" ], "features": { + "jam-compatibility": { + "description": "Encoding/decoding compatible with JAM" + }, + "configurable-coding": { + "description": "Enable configurable encoding/decoding" + }, "scale-tests": { "description": "Test of scale encoding/decoding", "dependencies": [