-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcache.h
374 lines (332 loc) · 14.8 KB
/
cache.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
// Header-only C++ implementation of LRU cache with all the gems.
// Features: thread safety, serialization, memory monitoring, statistics, etc.
// SPDX-FileCopyrightText: Copyright © 2024 Anatoly Petrov <[email protected]>
// SPDX-License-Identifier: MIT
// Non thread-safe LRU cache.
// For thread-safe implementation see lru_cache/thread_safe_cache.h
#ifndef LRU_CACHE_CACHE_H
#define LRU_CACHE_CACHE_H
#include <algorithm>
#include <format>
#include <optional>
#include <ostream>
#include <string>
#include "lru_cache/detail/utils.h"
#include "lru_cache/serde.h"
#include "lru_cache/stats.h"
#include "lru_cache/traits.h"
namespace lru {
// Non thread-safe LRU cache (see also lru_cache/thread_safe_cache.h).
// The interface mimics the Memcached text protocol (where it makes sense).
// We support the following Memcached-like commands:
// - set, add, replace, get, delete, stats, flush (the last one equal to clear)
// The following Memcached analogous are not provided:
// - append/prepend/incr/decr (this cache is typed, just modify the value)
// - cas/gets (if you need synchronization, use SafeCache)
// - stats items/slabs/sizes (send cache to stream; it prints the content)
// Also, our implementation provides some extended functionality:
// - cache serialization/deserialization (via dump and load commands)
// - limiting of size/memory during execution (via maxsize/maxmem commands)
// - item iteration (pointer dereferencing doesn't touch LRU)
// - printing of cache content to the std::ostream (feature for debugging)
template<
typename Key,
typename Value,
typename Hash = std::hash<Key>,
typename KeyEqual = std::equal_to<Key>,
typename Allocator = std::allocator<std::pair<const Key, Value> > >
class Cache {
public:
// Cache traits.
using Traits = CacheTraits<Key, Value, Hash, KeyEqual, Allocator>;
using Item = typename Traits::Item;
using KeyMem = typename Traits::KeyMem;
using ValueMem = typename Traits::ValueMem;
using Buf = typename Traits::Buf;
using Iter = typename Traits::Iter;
using ConstIter = typename Traits::ConstIter;
using ReverseIter = typename Traits::ReverseIter;
using ConstReverseIter = typename Traits::ConstReverseIter;
using Table = typename Traits::Table;
static constexpr size_t kItemMem = Traits::kItemMem;
// Writes a string representation of the cache to the stream.
template<typename K, typename V>
friend std::ostream &operator<<(std::ostream &, const Cache<K, V> &);
// Returns true if cache items and their LRU order are equal.
// Non-optimized implementation. Use only for debugging/testing.
bool operator ==(const Cache &other) const {
return buf_ == other.buf_;
}
// Returns true if cache items or their LRU order are not equal.
// Non-optimized implementation. Use only for debugging/testing.
bool operator !=(const Cache &other) const {
return buf_ != other.buf_;
}
// Creates a new cache.
// 1) If maxsize and maxmem are not specified, the cache turns to the unbounded cache.
// However, the performance of such an unbounded cache will not be ideal because of LRU.
// 2) For accurate memory monitoring, the client may provide key_mem and value_mem hint functions
// that return the actual size of the dynamic buffer allocated by Key and Value.
// The return value should not include the size of the Key/Value type itself
// because it is already calculated by the cache with the sizeof() function.
explicit Cache(const size_t maxsize = nval,
const size_t maxmem = nval,
KeyMem key_mem = nullptr,
ValueMem value_mem = nullptr): stats_{
0, 0, maxsize, 0, maxmem, 0
}, key_mem_(std::move(key_mem)), value_mem_(std::move(value_mem)) {
}
// Returns an iterator to the beginning of the cache buffer.
Iter begin() { return buf_.begin(); }
// Returns a const iterator to the beginning of the cache buffer.
ConstIter begin() const { return buf_.begin(); }
// Returns an iterator to the end of the cache buffer.
Iter end() { return buf_.end(); }
// Returns a const iterator to the end of the cache buffer.
ConstIter end() const { return buf_.end(); }
// Returns a reverse iterator to the beginning of the cache buffer.
ReverseIter rbegin() { return buf_.rbegin(); }
// Returns a const reverse iterator to the beginning of the cache buffer.
ConstReverseIter rbegin() const { return buf_.rbegin(); }
// Returns a reverse iterator to the end of the cache buffer.
ReverseIter rend() { return buf_.rend(); }
// Returns a const reverse iterator to the end of the cache buffer.
ConstReverseIter rend() const { return buf_.rend(); }
// Stores data, possibly overwriting any existing data.
// New items are at the top of the LRU.
template<typename V>
void Set(const Key &key, V &&value) {
auto it = table_.find(key);
if (it == table_.end()) {
// missed
Push(key, std::forward<V>(value));
} else {
// exists
Update(it->second, std::forward<V>(value));
}
}
// Stores data, only if it does not already exist.
// New items are at the top of the LRU. If an item already exists and an add fails,
// it promotes the item to the front of the LRU anyway.
// Returns true if addition succeeded.
template<typename V>
bool Add(const Key &key, V &&value) {
auto it = table_.find(key);
if (it == table_.end()) {
// missed
Push(key, std::forward<V>(value));
return true;
}
// exists
Touch(it->second);
return false;
}
// Stores this data, but only if the data already exists.
// Returns true if replacement succeeded.
template<typename V>
bool Replace(const Key &key, V &&value) {
auto it = table_.find(key);
// missed
if (it == table_.end()) { return false; }
// exists
Update(it->second, std::forward<V>(value));
return true;
}
// Retrieves data by the key.
// Returns empty optional if an item was not found.
std::optional<std::reference_wrapper<Value> > Get(const Key &key) {
auto it = table_.find(key);
if (it == table_.end()) {
stats_.misses++;
return {};
}
// exists
Touch(it->second);
stats_.hits++;
return it->second->second;
}
// Removes an item from the cache, if it exists.
// Returns true if deletion succeeded.
bool Delete(const Key &key) {
auto it = table_.find(key);
if (it == table_.end()) { return false; }
// exists
const size_t mem = CalcItemMem(*it->second);
buf_.erase(it->second);
table_.erase(it);
stats_.currsize--;
stats_.currmem -= mem;
return true;
}
// Clears the cache without touching hits/misses statistics.
void Flush() {
buf_.clear();
table_.clear();
stats_.currsize = 0;
stats_.currmem = 0;
}
// Returns current item count.
[[nodiscard]] size_t Size() const { return stats_.currsize; }
// Returns current memory usage.
[[nodiscard]] size_t Memory() const { return stats_.currmem; }
// Returns upper limit of item count.
[[nodiscard]] size_t Maxsize() const { return stats_.maxsize; }
// Returns upper limit of memory usage.
[[nodiscard]] size_t Maxmem() const { return stats_.maxmem; }
// Limits maximum item count.
// It also shrinks the cache to the new limit (if needed).
void Maxsize(size_t items) {
if (items == nval) {
stats_.maxsize = items;
return;
}
// not nval
auto start = std::next(buf_.cbegin(),
std::min(items, buf_.size()));
auto stop = buf_.cend();
size_t mem = 0;
std::for_each(start, stop,
[this, &mem](const auto &item) {
mem += CalcItemMem(item);
table_.erase(item.first);
}
);
buf_.erase(start, stop);
stats_.maxsize = items;
if (stats_.currsize > items) { stats_.currsize = items; }
// stats_.maxmem unchanged
stats_.currmem -= mem;
}
// Limits maximum memory usage.
// It also shrinks the cache to the new limit (if needed).
void Maxmem(const size_t bytes) {
// No need to check for bytes == nval
while (stats_.currmem > bytes) { Pop(); }
// stats_.maxsize unchanged
// stats_.currsize already changed in Pop()
stats_.maxmem = bytes;
// stats_.currmem already changed in Pop()
}
// Returns cache statistics.
[[nodiscard]] CacheInfo Stats() const { return stats_; }
// Serializes cached items to the user-provided output iterator.
// Single-pass iterators are also suitable. But don't use std::ostream_iterator
// because it performs formatting and corrupts the binary data.
// Use std::ostreambuf_iterator instead.
template<serde::output_byte_iterator Iter>
void Dump(Iter it) const {
// We load deserialized items with Cache.Set() which affects LRU,
// so at serialization stage items go in the revert order.
using iter = serde::aux::SerializingIterator<Traits>;
auto first = iter(buf_.rbegin());
auto last = iter(buf_.rend());
std::for_each(first, last, [&it](auto bytes) {
std::copy(bytes.begin(), bytes.end(), it);
});
}
// Serializes cached items to the user-provided container.
template<serde::byte_sequence_container Container>
void Dump(Container &c) const {
// We load deserialized items with Cache.Set() which affects LRU,
// so at serialization stage items go in the revert order.
using iter = serde::aux::SerializingIterator<Traits>;
auto first = iter(buf_.rbegin());
auto last = iter(buf_.rend());
std::for_each(first, last, [&c](auto bytes) {
// Here we choose the most optimal way to insert row bytes.
if constexpr (aux::HasInsertMethodV<Container, decltype(first
)>) {
c.insert(c.end(), bytes.begin(), bytes.end());
} else {
std::copy(bytes.begin(), bytes.end(),
std::back_inserter(c));
}
});
}
// Deserializes cached items from the user-provided range.
// Single-pass iterators are also suitable. But don't use std::istream_iterator
// because it performs formatting and corrupts the binary data.
// Use std::istreambuf_iterator instead.
template<serde::input_byte_iterator Iter>
void Load(Iter first, Iter last) {
Flush();
using iter = serde::aux::DeserializingIterator<Traits, Iter>;
std::for_each(iter(first), iter(last), [this](auto &&item) {
Set(item.first, std::move(item.second));
});
}
// Deserializes cached items from the user-provided container.
template<serde::byte_sequence_container Container>
void Load(const Container &c) {
Flush();
using iter = serde::aux::DeserializingIterator<Traits, typename
Container::const_iterator>;
auto first = iter(c.begin());
auto last = iter(c.end());
std::for_each(first, last, [this](auto &&item) {
Set(item.first, std::move(item.second));
});
}
private:
Buf buf_;
Table table_;
mutable CacheInfo stats_;
KeyMem key_mem_;
ValueMem value_mem_;
void Touch(Iter it) { buf_.splice(buf_.cbegin(), buf_, it); }
template<typename V>
void Push(const Key &key, V &&value) {
buf_.emplace_front(key, std::forward<V>(value));
table_.emplace(key, buf_.begin());
stats_.currsize += 1;
stats_.currmem += CalcItemMem(buf_.front());
if (stats_.currsize > stats_.maxsize
or stats_.currmem > stats_.maxmem) { Pop(); }
}
void Pop() {
if (buf_.empty()) return;
// not empty
const size_t mem = CalcItemMem(buf_.back());
const Key &last = buf_.back().first;
table_.erase(last);
buf_.pop_back();
stats_.currsize -= 1;
stats_.currmem -= mem;
}
template<typename V>
void Update(Iter it, V &&value) {
const size_t mem = value_mem_
? stats_.currmem
- value_mem_(it->second)
+ value_mem_(value)
: stats_.currmem;
it->second = std::forward<V>(value);
stats_.currmem = mem;
Touch(it);
}
size_t CalcItemMem(const Item &item) {
size_t size = kItemMem;
// We store two copies of Key (one in Table, one in Buf).
if (key_mem_) { size += key_mem_(item.first) * 2; }
if (value_mem_) { size += value_mem_(item.second); }
return size;
}
};
// Writes a string representation of the cache to the stream.
template<typename Key, typename Value>
std::ostream &operator <<(std::ostream &out,
const Cache<Key, Value> &cache) {
const std::string name = std::format("lru::Cache<Key={}, Value={}>",
typeid(Key).name(),
typeid(Value).name());
out << name << " at " << &cache << '\n';
out << cache.Stats() << '\n';
size_t n = 0;
for (const auto &item: cache) {
out << aux::ItemToStr(item, n) << '\n';
n++;
}
return out << std::flush;
}
}
#endif // LRU_CACHE_CACHE_H