Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion cpp/bench/prims/core/copy.cu
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/memory_type.hpp>
#include <raft/thirdparty/mdspan/include/experimental/mdspan>

#include <cuda/std/mdspan>

#include <cstdint>
#include <numeric>
Expand Down
7 changes: 4 additions & 3 deletions cpp/include/raft/core/detail/fail_container_policy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
#include <raft/core/error.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/resources.hpp>
#include <raft/thirdparty/mdspan/include/experimental/mdspan>

#include <cuda/std/mdspan>

#include <stddef.h>

Expand Down Expand Up @@ -122,8 +123,8 @@ struct fail_container_policy {
using reference = typename container_type::reference;
using const_reference = typename container_type::const_reference;

using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;
using accessor_policy = cuda::std::default_accessor<element_type>;
using const_accessor_policy = cuda::std::default_accessor<element_type const>;

auto create(raft::resources const& res, size_t n) -> container_type { return container_type(n); }

Expand Down
12 changes: 6 additions & 6 deletions cpp/include/raft/core/detail/mdspan_util.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,27 @@
#pragma once

#include <raft/core/detail/macros.hpp>
#include <raft/thirdparty/mdspan/include/experimental/mdspan>

#include <cstdint>
#include <tuple>
#include <utility>

namespace raft::detail {

template <class T, std::size_t N, std::size_t... Idx>
MDSPAN_INLINE_FUNCTION constexpr auto arr_to_tup(T (&arr)[N], std::index_sequence<Idx...>)
RAFT_INLINE_FUNCTION constexpr auto arr_to_tup(T (&arr)[N], std::index_sequence<Idx...>)
{
return std::make_tuple(arr[Idx]...);
}

template <class T, std::size_t N>
MDSPAN_INLINE_FUNCTION constexpr auto arr_to_tup(T (&arr)[N])
RAFT_INLINE_FUNCTION constexpr auto arr_to_tup(T (&arr)[N])
{
return arr_to_tup(arr, std::make_index_sequence<N>{});
}

template <typename T>
MDSPAN_INLINE_FUNCTION auto native_popc(T v) -> int32_t
RAFT_INLINE_FUNCTION auto native_popc(T v) -> int32_t
{
int c = 0;
for (; v != 0; v &= v - 1) {
Expand All @@ -45,7 +45,7 @@ MDSPAN_INLINE_FUNCTION auto native_popc(T v) -> int32_t
return c;
}

MDSPAN_INLINE_FUNCTION auto popc(uint32_t v) -> int32_t
RAFT_INLINE_FUNCTION auto popc(uint32_t v) -> int32_t
{
#if defined(__CUDA_ARCH__)
return __popc(v);
Expand All @@ -56,7 +56,7 @@ MDSPAN_INLINE_FUNCTION auto popc(uint32_t v) -> int32_t
#endif // compiler
}

MDSPAN_INLINE_FUNCTION auto popc(uint64_t v) -> int32_t
RAFT_INLINE_FUNCTION auto popc(uint64_t v) -> int32_t
{
#if defined(__CUDA_ARCH__)
return __popcll(v);
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/raft/core/device_container_policy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,8 @@ class device_uvector_policy {
using reference = device_reference<element_type>;
using const_reference = device_reference<element_type const>;

using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;
using accessor_policy = cuda::std::default_accessor<element_type>;
using const_accessor_policy = cuda::std::default_accessor<element_type const>;

public:
auto create(raft::resources const& res, size_t n) -> container_type
Expand Down
11 changes: 5 additions & 6 deletions cpp/include/raft/core/device_mdspan.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ using device_accessor = host_device_accessor<AccessorPolicy, memory_type::device
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
typename AccessorPolicy = cuda::std::default_accessor<ElementType>>
using device_mdspan = mdspan<ElementType, Extents, LayoutPolicy, device_accessor<AccessorPolicy>>;

template <typename T, bool B>
Expand Down Expand Up @@ -121,7 +121,7 @@ using device_aligned_matrix_view =
device_mdspan<ElementType,
matrix_extent<IndexType>,
LayoutPolicy,
std::experimental::aligned_accessor<ElementType, detail::alignment::value>>;
cuda::std::aligned_accessor<ElementType, detail::alignment::value>>;

/**
* @brief Create a 2-dim 128 byte aligned mdspan instance for device pointer. It's
Expand All @@ -140,8 +140,7 @@ template <typename ElementType,
auto constexpr make_device_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
using data_handle_type =
typename std::experimental::aligned_accessor<ElementType,
detail::alignment::value>::data_handle_type;
typename cuda::std::aligned_accessor<ElementType, detail::alignment::value>::data_handle_type;
static_assert(std::is_same<LayoutPolicy, layout_left_padded<ElementType>>::value ||
std::is_same<LayoutPolicy, layout_right_padded<ElementType>>::value);
assert(reinterpret_cast<std::uintptr_t>(ptr) ==
Expand Down Expand Up @@ -217,7 +216,7 @@ auto constexpr make_device_strided_matrix_view(ElementType* ptr,
assert(is_row_major ? stride0 >= n_cols : stride1 >= n_rows);
matrix_extent<IndexType> extents{n_rows, n_cols};

auto layout = make_strided_layout(extents, std::array<IndexType, 2>{stride0, stride1});
auto layout = make_strided_layout(extents, cuda::std::array<IndexType, 2>{stride0, stride1});
return device_matrix_view<ElementType, IndexType, layout_stride>{ptr, layout};
}

Expand Down Expand Up @@ -273,6 +272,6 @@ auto constexpr make_device_vector_view(
template <typename IndexType>
auto make_vector_strided_layout(IndexType n, IndexType stride)
{
return make_strided_layout(vector_extent<IndexType>{n}, std::array<IndexType, 1>{stride});
return make_strided_layout(vector_extent<IndexType>{n}, cuda::std::array<IndexType, 1>{stride});
}
} // end namespace raft
2 changes: 1 addition & 1 deletion cpp/include/raft/core/device_span.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace raft {
/**
* @brief A span class for device pointer.
*/
template <typename T, size_t extent = std::experimental::dynamic_extent>
template <typename T, size_t extent = cuda::std::dynamic_extent>
using device_span = span<T, true, extent>;

/**
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/raft/core/host_container_policy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ class host_vector_policy {
using const_pointer = typename container_type::const_pointer;
using reference = element_type&;
using const_reference = element_type const&;
using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;
using accessor_policy = cuda::std::default_accessor<element_type>;
using const_accessor_policy = cuda::std::default_accessor<element_type const>;

public:
auto create(raft::resources const&, size_t n) -> container_type { return container_type(n); }
Expand Down
9 changes: 4 additions & 5 deletions cpp/include/raft/core/host_mdspan.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ using host_accessor = host_device_accessor<AccessorPolicy, memory_type::host>;
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
typename AccessorPolicy = cuda::std::default_accessor<ElementType>>
using host_mdspan = mdspan<ElementType, Extents, LayoutPolicy, host_accessor<AccessorPolicy>>;

template <typename T, bool B>
Expand Down Expand Up @@ -118,7 +118,7 @@ using host_aligned_matrix_view =
host_mdspan<ElementType,
matrix_extent<IndexType>,
LayoutPolicy,
std::experimental::aligned_accessor<ElementType, detail::alignment::value>>;
cuda::std::aligned_accessor<ElementType, detail::alignment::value>>;

/**
* @brief Create a 2-dim 128 byte aligned mdspan instance for host pointer. It's
Expand All @@ -137,8 +137,7 @@ template <typename ElementType,
auto constexpr make_host_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
using data_handle_type =
typename std::experimental::aligned_accessor<ElementType,
detail::alignment::value>::data_handle_type;
typename cuda::std::aligned_accessor<ElementType, detail::alignment::value>::data_handle_type;

static_assert(std::is_same<LayoutPolicy, layout_left_padded<ElementType>>::value ||
std::is_same<LayoutPolicy, layout_right_padded<ElementType>>::value);
Expand Down Expand Up @@ -214,7 +213,7 @@ auto constexpr make_host_strided_matrix_view(ElementType* ptr,
assert(is_row_major ? stride0 >= n_cols : stride1 >= n_rows);
matrix_extent<IndexType> extents{n_rows, n_cols};

auto layout = make_strided_layout(extents, std::array<IndexType, 2>{stride0, stride1});
auto layout = make_strided_layout(extents, cuda::std::array<IndexType, 2>{stride0, stride1});
return host_matrix_view<ElementType, IndexType, layout_stride>{ptr, layout};
}

Expand Down
2 changes: 1 addition & 1 deletion cpp/include/raft/core/host_span.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace raft {
/**
* @brief A span class for host pointer.
*/
template <typename T, size_t extent = std::experimental::dynamic_extent>
template <typename T, size_t extent = cuda::std::dynamic_extent>
using host_span = span<T, false, extent>;

/**
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/raft/core/managed_container_policy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ class managed_uvector_policy {
using reference = device_reference<element_type>;
using const_reference = device_reference<element_type const>;

using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;
using accessor_policy = cuda::std::default_accessor<element_type>;
using const_accessor_policy = cuda::std::default_accessor<element_type const>;

auto create(raft::resources const& res, size_t n) -> container_type
{
Expand Down
7 changes: 3 additions & 4 deletions cpp/include/raft/core/managed_mdspan.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ using managed_accessor = host_device_accessor<AccessorPolicy, memory_type::manag
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
typename AccessorPolicy = cuda::std::default_accessor<ElementType>>
using managed_mdspan = mdspan<ElementType, Extents, LayoutPolicy, managed_accessor<AccessorPolicy>>;

template <typename T, bool B>
Expand Down Expand Up @@ -122,7 +122,7 @@ using managed_aligned_matrix_view =
managed_mdspan<ElementType,
matrix_extent<IndexType>,
LayoutPolicy,
std::experimental::aligned_accessor<ElementType, detail::alignment::value>>;
cuda::std::aligned_accessor<ElementType, detail::alignment::value>>;

/**
* @brief Create a 2-dim 128 byte aligned mdspan instance for managed pointer. It's
Expand All @@ -143,8 +143,7 @@ auto constexpr make_managed_aligned_matrix_view(ElementType* ptr,
IndexType n_cols)
{
using data_handle_type =
typename std::experimental::aligned_accessor<ElementType,
detail::alignment::value>::data_handle_type;
typename cuda::std::aligned_accessor<ElementType, detail::alignment::value>::data_handle_type;
static_assert(std::is_same<LayoutPolicy, layout_left_padded<ElementType>>::value ||
std::is_same<LayoutPolicy, layout_right_padded<ElementType>>::value);
assert(reinterpret_cast<std::uintptr_t>(ptr) ==
Expand Down
Loading
Loading