Skip to content

Commit a370bbf

Browse files
codablockUdjinM6
authored andcommitted
Update immer library to current master (0a718d2d76bab6ebdcf43de943bd6c7d2dbfe2f9) (#2821)
* Update immer library to current master (0a718d2d76bab6ebdcf43de943bd6c7d2dbfe2f9) * Temporary fix for alignof(std::max_align_t) on MinGW 32bit builds See arximboldi/immer#78
1 parent 9f04855 commit a370bbf

28 files changed

+1066
-324
lines changed

src/immer/array.hpp

+9-10
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,6 @@ class array_transient;
3535
* of doubt, measure. For basic types, using an `array` when
3636
* :math:`n < 100` is a good heuristic.
3737
*
38-
* .. warning:: The current implementation depends on
39-
* ``boost::intrusive_ptr`` and does not support :doc:`memory
40-
* policies<memory>`. This will be fixed soon.
41-
*
4238
* @endrst
4339
*/
4440
template <typename T, typename MemoryPolicy = default_memory_policy>
@@ -73,23 +69,26 @@ class array
7369
array() = default;
7470

7571
/*!
76-
* Constructs a vector containing the elements in `values`.
72+
* Constructs an array containing the elements in `values`.
7773
*/
7874
array(std::initializer_list<T> values)
7975
: impl_{impl_t::from_initializer_list(values)}
8076
{}
8177

8278
/*!
83-
* Constructs a vector containing the elements in the range
84-
* defined by the input iterators `first` and `last`.
79+
* Constructs a array containing the elements in the range
80+
* defined by the forward iterator `first` and range sentinel `last`.
8581
*/
86-
template <typename Iter>
87-
array(Iter first, Iter last)
82+
template <typename Iter, typename Sent,
83+
std::enable_if_t
84+
<detail::compatible_sentinel_v<Iter, Sent>
85+
&& detail::is_forward_iterator_v<Iter>, bool> = true>
86+
array(Iter first, Sent last)
8887
: impl_{impl_t::from_range(first, last)}
8988
{}
9089

9190
/*!
92-
* Constructs a vector containing the element `val` repeated `n`
91+
* Constructs a array containing the element `val` repeated `n`
9392
* times.
9493
*/
9594
array(size_type n, T v = {})

src/immer/atom.hpp

+259
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,259 @@
1+
//
2+
// immer: immutable data structures for C++
3+
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
4+
//
5+
// This software is distributed under the Boost Software License, Version 1.0.
6+
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
7+
//
8+
9+
#pragma once
10+
11+
#include <immer/box.hpp>
12+
#include <immer/refcount/no_refcount_policy.hpp>
13+
14+
#include <atomic>
15+
#include <type_traits>
16+
17+
namespace immer {
18+
19+
namespace detail {
20+
21+
template <typename T, typename MemoryPolicy>
22+
struct refcount_atom_impl
23+
{
24+
using box_type = box<T, MemoryPolicy>;
25+
using value_type = T;
26+
using memory_policy = MemoryPolicy;
27+
using spinlock_t = typename MemoryPolicy::refcount::spinlock_type;
28+
using scoped_lock_t = typename spinlock_t::scoped_lock;
29+
30+
refcount_atom_impl(const refcount_atom_impl&) = delete;
31+
refcount_atom_impl(refcount_atom_impl&&) = delete;
32+
refcount_atom_impl& operator=(const refcount_atom_impl&) = delete;
33+
refcount_atom_impl& operator=(refcount_atom_impl&&) = delete;
34+
35+
refcount_atom_impl(box_type b)
36+
: impl_{std::move(b)}
37+
{}
38+
39+
box_type load() const
40+
{
41+
scoped_lock_t lock{lock_};
42+
return impl_;
43+
}
44+
45+
void store(box_type b)
46+
{
47+
scoped_lock_t lock{lock_};
48+
impl_ = std::move(b);
49+
}
50+
51+
box_type exchange(box_type b)
52+
{
53+
{
54+
scoped_lock_t lock{lock_};
55+
swap(b, impl_);
56+
}
57+
return std::move(b);
58+
}
59+
60+
template <typename Fn>
61+
box_type update(Fn&& fn)
62+
{
63+
while (true) {
64+
auto oldv = load();
65+
auto newv = oldv.update(fn);
66+
{
67+
scoped_lock_t lock{lock_};
68+
if (oldv.impl_ == impl_.impl_) {
69+
impl_ = newv;
70+
return { newv };
71+
}
72+
}
73+
}
74+
}
75+
76+
private:
77+
mutable spinlock_t lock_;
78+
box_type impl_;
79+
};
80+
81+
template <typename T, typename MemoryPolicy>
82+
struct gc_atom_impl
83+
{
84+
using box_type = box<T, MemoryPolicy>;
85+
using value_type = T;
86+
using memory_policy = MemoryPolicy;
87+
88+
static_assert(
89+
std::is_same<typename MemoryPolicy::refcount,
90+
no_refcount_policy>::value,
91+
"gc_atom_impl can only be used when there is no refcount!");
92+
93+
gc_atom_impl(const gc_atom_impl&) = delete;
94+
gc_atom_impl(gc_atom_impl&&) = delete;
95+
gc_atom_impl& operator=(const gc_atom_impl&) = delete;
96+
gc_atom_impl& operator=(gc_atom_impl&&) = delete;
97+
98+
gc_atom_impl(box_type b)
99+
: impl_{b.impl_}
100+
{}
101+
102+
box_type load() const
103+
{ return {impl_.load()}; }
104+
105+
void store(box_type b)
106+
{ impl_.store(b.impl_); }
107+
108+
box_type exchange(box_type b)
109+
{ return {impl_.exchange(b.impl_)}; }
110+
111+
template <typename Fn>
112+
box_type update(Fn&& fn)
113+
{
114+
while (true) {
115+
auto oldv = box_type{impl_.load()};
116+
auto newv = oldv.update(fn);
117+
if (impl_.compare_exchange_weak(oldv.impl_, newv.impl_))
118+
return { newv };
119+
}
120+
}
121+
122+
private:
123+
std::atomic<typename box_type::holder*> impl_;
124+
};
125+
126+
} // namespace detail
127+
128+
/*!
129+
* Stores for boxed values of type `T` in a thread-safe manner.
130+
*
131+
* @see box
132+
*
133+
* @rst
134+
*
135+
* .. warning:: If memory policy used includes thread unsafe reference counting,
136+
* no no thread safety is assumed, and the atom becomes thread unsafe too!
137+
*
138+
* .. note:: ``box<T>`` provides a value based box of type ``T``, this is, we can
139+
* think about it as a value-based version of ``std::shared_ptr``. In a
140+
* similar fashion, ``atom<T>`` is in spirit the value-based equivalent of
141+
* C++20 ``std::atomic_shared_ptr``. However, the API does not follow
142+
* ``std::atomic`` interface closely, since it attempts to be a higher level
143+
* construction, most similar to Clojure's ``(atom)``. It is remarkable in
144+
* particular that, since ``box<T>`` underlying object is immutable, using
145+
* ``atom<T>`` is fully thread-safe in ways that ``std::atmic_shared_ptr`` is
146+
* not. This is so because dereferencing the underlying pointer in a
147+
* ``std::atomic_share_ptr`` may require further synchronization, in particular
148+
* when invoking non-const methods.
149+
*
150+
* @endrst
151+
*/
152+
template <typename T,
153+
typename MemoryPolicy = default_memory_policy>
154+
class atom
155+
{
156+
public:
157+
using box_type = box<T, MemoryPolicy>;
158+
using value_type = T;
159+
using memory_policy = MemoryPolicy;
160+
161+
atom(const atom&) = delete;
162+
atom(atom&&) = delete;
163+
void operator=(const atom&) = delete;
164+
void operator=(atom&&) = delete;
165+
166+
/*!
167+
* Constructs an atom holding a value `b`;
168+
*/
169+
atom(box_type v={})
170+
: impl_{std::move(v)}
171+
{}
172+
173+
/*!
174+
* Sets a new value in the atom.
175+
*/
176+
atom& operator=(box_type b)
177+
{
178+
impl_.store(std::move(b));
179+
return *this;
180+
}
181+
182+
/*!
183+
* Reads the currently stored value in a thread-safe manner.
184+
*/
185+
operator box_type() const
186+
{ return impl_.load(); }
187+
188+
/*!
189+
* Reads the currently stored value in a thread-safe manner.
190+
*/
191+
operator value_type() const
192+
{ return *impl_.load(); }
193+
194+
/*!
195+
* Reads the currently stored value in a thread-safe manner.
196+
*/
197+
box_type load() const
198+
{ return impl_.load(); }
199+
200+
/*!
201+
* Stores a new value in a thread-safe manner.
202+
*/
203+
void store(box_type b)
204+
{ impl_.store(std::move(b)); }
205+
206+
/*!
207+
* Stores a new value and returns the old value, in a thread-safe manner.
208+
*/
209+
box_type exchange(box_type b)
210+
{ return impl_.exchange(std::move(b)); }
211+
212+
/*!
213+
* Stores the result of applying `fn` to the current value atomically and
214+
* returns the new resulting value.
215+
*
216+
* @rst
217+
*
218+
* .. warning:: ``fn`` must be a pure function and have no side effects! The
219+
* function might be evaluated multiple times when multiple threads
220+
* content to update the value.
221+
*
222+
* @endrst
223+
*/
224+
template <typename Fn>
225+
box_type update(Fn&& fn)
226+
{ return impl_.update(std::forward<Fn>(fn)); }
227+
228+
private:
229+
struct get_refcount_atom_impl
230+
{
231+
template <typename U, typename MP>
232+
struct apply
233+
{
234+
using type = detail::refcount_atom_impl<U, MP>;
235+
};
236+
};
237+
238+
struct get_gc_atom_impl
239+
{
240+
template <typename U, typename MP>
241+
struct apply
242+
{
243+
using type = detail::gc_atom_impl<U, MP>;
244+
};
245+
};
246+
247+
// If we are using "real" garbage collection (we assume this when we use
248+
// `no_refcount_policy`), we just store the pointer in an atomic. If we use
249+
// reference counting, we rely on the reference counting spinlock.
250+
using impl_t = typename std::conditional_t<
251+
std::is_same<typename MemoryPolicy::refcount, no_refcount_policy>::value,
252+
get_gc_atom_impl,
253+
get_refcount_atom_impl
254+
>::template apply<T, MemoryPolicy>::type;
255+
256+
impl_t impl_;
257+
};
258+
259+
}

src/immer/box.hpp

+14-1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,16 @@
1313

1414
namespace immer {
1515

16+
namespace detail {
17+
18+
template <typename U, typename MP>
19+
struct gc_atom_impl;
20+
21+
template <typename U, typename MP>
22+
struct refcount_atom_impl;
23+
24+
} // namespace detail
25+
1626
/*!
1727
* Immutable box for a single value of type `T`.
1828
*
@@ -21,9 +31,12 @@ namespace immer {
2131
* moving just copy the underlying pointers.
2232
*/
2333
template <typename T,
24-
typename MemoryPolicy = default_memory_policy>
34+
typename MemoryPolicy = default_memory_policy>
2535
class box
2636
{
37+
friend struct detail::gc_atom_impl<T, MemoryPolicy>;
38+
friend struct detail::refcount_atom_impl<T, MemoryPolicy>;
39+
2740
struct holder : MemoryPolicy::refcount
2841
{
2942
T value;

src/immer/config.hpp

+10-2
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,20 @@
3535
#define IMMER_TRACE_E(expr) \
3636
IMMER_TRACE(" " << #expr << " = " << (expr))
3737

38+
#if defined(_MSC_VER)
39+
#define IMMER_UNREACHABLE __assume(false)
40+
#define IMMER_LIKELY(cond) cond
41+
#define IMMER_UNLIKELY(cond) cond
42+
#define IMMER_FORCEINLINE __forceinline
43+
#define IMMER_PREFETCH(p)
44+
#else
3845
#define IMMER_UNREACHABLE __builtin_unreachable()
3946
#define IMMER_LIKELY(cond) __builtin_expect(!!(cond), 1)
4047
#define IMMER_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
41-
// #define IMMER_PREFETCH(p) __builtin_prefetch(p)
42-
#define IMMER_PREFETCH(p)
4348
#define IMMER_FORCEINLINE inline __attribute__ ((always_inline))
49+
#define IMMER_PREFETCH(p)
50+
// #define IMMER_PREFETCH(p) __builtin_prefetch(p)
51+
#endif
4452

4553
#define IMMER_DESCENT_DEEP 0
4654

src/immer/detail/arrays/no_capacity.hpp

+7-3
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#pragma once
1010

11+
#include <immer/algorithm.hpp>
1112
#include <immer/detail/arrays/node.hpp>
1213

1314
namespace immer {
@@ -90,10 +91,13 @@ struct no_capacity
9091
T* data() { return ptr->data(); }
9192
const T* data() const { return ptr->data(); }
9293

93-
template <typename Iter>
94-
static no_capacity from_range(Iter first, Iter last)
94+
template <typename Iter, typename Sent,
95+
std::enable_if_t
96+
<is_forward_iterator_v<Iter>
97+
&& compatible_sentinel_v<Iter, Sent>, bool> = true>
98+
static no_capacity from_range(Iter first, Sent last)
9599
{
96-
auto count = static_cast<size_t>(std::distance(first, last));
100+
auto count = static_cast<size_t>(distance(first, last));
97101
return {
98102
node_t::copy_n(count, first, last),
99103
count,

0 commit comments

Comments
 (0)