1
+ /*
2
+ * rv32emu is freely redistributable under the MIT License. See the file
3
+ * "LICENSE" for information on usage and redistribution of this file.
4
+ */
5
+
6
+ #include <assert.h>
7
+ #include <stddef.h>
8
+ #include <stdlib.h>
9
+ #include <string.h>
10
+
11
+ #include "cache.h"
12
+
13
+ #define MIN (a , b ) ((a < b) ? a : b)
14
+ #define GOLDEN_RATIO_32 0x61C88647
15
+ #define HASH (val ) \
16
+ (((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
17
+
18
+ static uint32_t cache_size , cache_size_bits ;
19
+
20
+ /*
21
+ * ARC improves the basic LRU strategy by splitting the cache into two lists,
22
+ * T1 and T2, the former for recently and the latter for frequently referenced
23
+ * entries. In turn, each of these is extended with a ghost list (B1 or B2),
24
+ * which is attached to the bottom of the two lists. When a cache hit occurs
25
+ * in B1, it indicates that T1's capacity is too little, therefore we increase
26
+ * T1's size while decreasing T2. But, if the cache hit occurs in B2, we would
27
+ * increase the size of T2 and decrease the size of T1.
28
+ */
29
+ typedef enum {
30
+ LRU_list ,
31
+ LFU_list ,
32
+ LRU_ghost_list ,
33
+ LFU_ghost_list ,
34
+ N_CACHE_LIST_TYPES
35
+ } cache_list_t ;
36
+
37
+ struct list_head {
38
+ struct list_head * prev , * next ;
39
+ };
40
+
41
+ struct hlist_head {
42
+ struct hlist_node * first ;
43
+ };
44
+
45
+ struct hlist_node {
46
+ struct hlist_node * next , * * pprev ;
47
+ };
48
+
49
+ typedef struct {
50
+ void * value ;
51
+ uint32_t key ;
52
+ cache_list_t arc_type ;
53
+ struct list_head list ;
54
+ struct hlist_node ht_list ;
55
+ } arc_entry_t ;
56
+
57
+ typedef struct {
58
+ struct hlist_head * ht_list_head ;
59
+ } hashtable_t ;
60
+
61
+ typedef struct cache {
62
+ struct list_head * list_table [N_CACHE_LIST_TYPES ];
63
+ uint32_t list_size [N_CACHE_LIST_TYPES ];
64
+ hashtable_t * map ;
65
+ uint32_t capacity ;
66
+ uint32_t lru_capacity ;
67
+ } cache_t ;
68
+
69
+ static inline void INIT_LIST_HEAD (struct list_head * head )
70
+ {
71
+ head -> next = head ;
72
+ head -> prev = head ;
73
+ }
74
+
75
+ static inline void list_add (struct list_head * node , struct list_head * head )
76
+ {
77
+ struct list_head * next = head -> next ;
78
+
79
+ next -> prev = node ;
80
+ node -> next = next ;
81
+ node -> prev = head ;
82
+ head -> next = node ;
83
+ }
84
+
85
+ static inline void list_del (struct list_head * node )
86
+ {
87
+ struct list_head * next = node -> next ;
88
+ struct list_head * prev = node -> prev ;
89
+
90
+ next -> prev = prev ;
91
+ prev -> next = next ;
92
+ }
93
+
94
+ static inline void list_del_init (struct list_head * node )
95
+ {
96
+ list_del (node );
97
+ INIT_LIST_HEAD (node );
98
+ }
99
+
100
+ #define list_entry (node , type , member ) container_of(node, type, member)
101
+
102
+ #define list_last_entry (head , type , member ) \
103
+ list_entry((head)->prev, type, member)
104
+
105
+ #ifdef __HAVE_TYPEOF
106
+ #define list_for_each_entry_safe (entry , safe , head , member ) \
107
+ for (entry = list_entry((head)->next, __typeof__(*entry), member), \
108
+ safe = list_entry(entry->member.next, __typeof__(*entry), member); \
109
+ &entry->member != (head); entry = safe, \
110
+ safe = list_entry(safe->member.next, __typeof__(*entry), member))
111
+ #else
112
+ #define list_for_each_entry_safe (entry , safe , head , member , type ) \
113
+ for (entry = list_entry((head)->next, type, member), \
114
+ safe = list_entry(entry->member.next, type, member); \
115
+ &entry->member != (head); \
116
+ entry = safe, safe = list_entry(safe->member.next, type, member))
117
+ #endif
118
+
119
+ #define INIT_HLIST_HEAD (ptr ) ((ptr)->first = NULL)
120
+
121
+ static inline void INIT_HLIST_NODE (struct hlist_node * h )
122
+ {
123
+ h -> next = NULL ;
124
+ h -> pprev = NULL ;
125
+ }
126
+
127
+ static inline int hlist_empty (const struct hlist_head * h )
128
+ {
129
+ return !h -> first ;
130
+ }
131
+
132
+ static inline void hlist_add_head (struct hlist_node * n , struct hlist_head * h )
133
+ {
134
+ struct hlist_node * first = h -> first ;
135
+ n -> next = first ;
136
+ if (first )
137
+ first -> pprev = & n -> next ;
138
+
139
+ h -> first = n ;
140
+ n -> pprev = & h -> first ;
141
+ }
142
+
143
+ static inline int hlist_unhashed (const struct hlist_node * h )
144
+ {
145
+ return !h -> pprev ;
146
+ }
147
+
148
+ static inline void hlist_del (struct hlist_node * n )
149
+ {
150
+ struct hlist_node * next = n -> next ;
151
+ struct hlist_node * * pprev = n -> pprev ;
152
+
153
+ * pprev = next ;
154
+ if (next )
155
+ next -> pprev = pprev ;
156
+ }
157
+
158
+ static inline void hlist_del_init (struct hlist_node * n )
159
+ {
160
+ if (hlist_unhashed (n ))
161
+ return ;
162
+ hlist_del (n );
163
+ INIT_HLIST_NODE (n );
164
+ }
165
+
166
+ #define hlist_entry (ptr , type , member ) container_of(ptr, type, member)
167
+
168
+ #ifdef __HAVE_TYPEOF
169
+ #define hlist_entry_safe (ptr , type , member ) \
170
+ ({ \
171
+ typeof(ptr) ____ptr = (ptr); \
172
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
173
+ })
174
+ #else
175
+ #define hlist_entry_safe (ptr , type , member ) \
176
+ (ptr) ? hlist_entry(ptr, type, member) : NULL
177
+ #endif
178
+
179
+ #ifdef __HAVE_TYPEOF
180
+ #define hlist_for_each_entry (pos , head , member ) \
181
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); pos; \
182
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
183
+ #else
184
+ #define hlist_for_each_entry (pos , head , member , type ) \
185
+ for (pos = hlist_entry_safe((head)->first, type, member); pos; \
186
+ pos = hlist_entry_safe((pos)->member.next, type, member))
187
+ #endif
188
+
189
+ cache_t * cache_create (int size_bits )
190
+ {
191
+ cache_t * cache = malloc (sizeof (cache_t ));
192
+ if (!cache )
193
+ return NULL ;
194
+ cache_size_bits = size_bits ;
195
+ cache_size = 1 << size_bits ;
196
+
197
+ for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
198
+ cache -> list_table [i ] = malloc (sizeof (struct list_head ));
199
+ INIT_LIST_HEAD (cache -> list_table [i ]);
200
+ cache -> list_size [i ] = 0 ;
201
+ }
202
+
203
+ cache -> map = malloc (sizeof (hashtable_t ));
204
+ if (!cache -> map ) {
205
+ free (cache -> list_table );
206
+ free (cache );
207
+ return NULL ;
208
+ }
209
+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
210
+ if (!cache -> map -> ht_list_head ) {
211
+ free (cache -> map );
212
+ free (cache -> list_table );
213
+ free (cache );
214
+ return NULL ;
215
+ }
216
+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
217
+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
218
+ }
219
+
220
+ cache -> capacity = cache_size ;
221
+ cache -> lru_capacity = cache_size / 2 ;
222
+ return cache ;
223
+ }
224
+
225
+ /* Rules of ARC
226
+ * 1. size of LRU_list + size of LFU_list <= c
227
+ * 2. size of LRU_list + size of LRU_ghost_list <= c
228
+ * 3. size of LFU_list + size of LFU_ghost_list <= 2c
229
+ * 4. size of LRU_list + size of LFU_list + size of LRU_ghost_list + size of
230
+ * LFU_ghost_list <= 2c
231
+ */
232
+ #define CACHE_ASSERT (cache ) \
233
+ assert(cache->list_size[LRU_list] + cache->list_size[LFU_list] <= \
234
+ cache->capacity); \
235
+ assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] <= \
236
+ cache->capacity); \
237
+ assert(cache->list_size[LFU_list] + cache->list_size[LFU_ghost_list] <= \
238
+ 2 * cache->capacity); \
239
+ assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] + \
240
+ cache->list_size[LFU_list] + \
241
+ cache->list_size[LFU_ghost_list] <= \
242
+ 2 * cache->capacity);
243
+
244
+ static inline void move_to_mru (cache_t * cache ,
245
+ arc_entry_t * entry ,
246
+ const cache_list_t arc_type )
247
+ {
248
+ cache -> list_size [entry -> arc_type ]-- ;
249
+ cache -> list_size [arc_type ]++ ;
250
+ entry -> arc_type = arc_type ;
251
+ list_del_init (& entry -> list );
252
+ list_add (& entry -> list , cache -> list_table [arc_type ]);
253
+ }
254
+
255
+ static inline void replace_list (cache_t * cache )
256
+ {
257
+ if (cache -> list_size [LRU_list ] >= cache -> lru_capacity )
258
+ move_to_mru (
259
+ cache ,
260
+ list_last_entry (cache -> list_table [LRU_list ], arc_entry_t , list ),
261
+ LRU_ghost_list );
262
+ else if (cache -> list_size [LFU_list ] >=
263
+ (cache -> capacity - cache -> lru_capacity ))
264
+ move_to_mru (
265
+ cache ,
266
+ list_last_entry (cache -> list_table [LFU_list ], arc_entry_t , list ),
267
+ LFU_ghost_list );
268
+ }
269
+
270
+ void * cache_get (cache_t * cache , uint32_t key )
271
+ {
272
+ if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
273
+ return NULL ;
274
+
275
+ arc_entry_t * entry = NULL ;
276
+ #ifdef __HAVE_TYPEOF
277
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
278
+ #else
279
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
280
+ arc_entry_t )
281
+ #endif
282
+ {
283
+ if (entry -> key == key )
284
+ break ;
285
+ }
286
+ if (!entry || entry -> key != key )
287
+ return NULL ;
288
+ /* cache hit in LRU_list */
289
+ if (entry -> arc_type == LRU_list ) {
290
+ replace_list (cache );
291
+ move_to_mru (cache , entry , LFU_list );
292
+ }
293
+
294
+ /* cache hit in LFU_list */
295
+ if (entry -> arc_type == LFU_list )
296
+ move_to_mru (cache , entry , LFU_list );
297
+
298
+ /* cache hit in LRU_ghost_list */
299
+ if (entry -> arc_type == LRU_ghost_list ) {
300
+ cache -> lru_capacity = MIN (cache -> lru_capacity + 1 , cache -> capacity );
301
+ replace_list (cache );
302
+ move_to_mru (cache , entry , LFU_list );
303
+ }
304
+
305
+ /* cache hit in LFU_ghost_list */
306
+ if (entry -> arc_type == LFU_ghost_list ) {
307
+ cache -> lru_capacity = cache -> lru_capacity ? cache -> lru_capacity - 1 : 0 ;
308
+ replace_list (cache );
309
+ move_to_mru (cache , entry , LFU_list );
310
+ }
311
+ CACHE_ASSERT (cache );
312
+ /* return NULL if cache miss */
313
+ return entry -> value ;
314
+ }
315
+
316
+ void * cache_put (cache_t * cache , uint32_t key , void * value )
317
+ {
318
+ void * delete_value = NULL ;
319
+ assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
320
+ cache -> capacity );
321
+ /* Before adding new element to cach, we should check the status
322
+ * of cache.
323
+ */
324
+ if ((cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ]) ==
325
+ cache -> capacity ) {
326
+ if (cache -> list_size [LRU_list ] < cache -> capacity ) {
327
+ arc_entry_t * delete_target = list_last_entry (
328
+ cache -> list_table [LRU_ghost_list ], arc_entry_t , list );
329
+ list_del_init (& delete_target -> list );
330
+ hlist_del_init (& delete_target -> ht_list );
331
+ delete_value = delete_target -> value ;
332
+ free (delete_target );
333
+ cache -> list_size [LRU_ghost_list ]-- ;
334
+ replace_list (cache );
335
+ } else {
336
+ arc_entry_t * delete_target =
337
+ list_last_entry (cache -> list_table [LRU_list ], arc_entry_t , list );
338
+ list_del_init (& delete_target -> list );
339
+ hlist_del_init (& delete_target -> ht_list );
340
+ delete_value = delete_target -> value ;
341
+ free (delete_target );
342
+ cache -> list_size [LRU_list ]-- ;
343
+ }
344
+ } else {
345
+ assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <
346
+ cache -> capacity );
347
+ uint32_t size =
348
+ cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] +
349
+ cache -> list_size [LFU_list ] + cache -> list_size [LFU_ghost_list ];
350
+ if (size == cache -> capacity * 2 ) {
351
+ arc_entry_t * delete_target = list_last_entry (
352
+ cache -> list_table [LFU_ghost_list ], arc_entry_t , list );
353
+ list_del_init (& delete_target -> list );
354
+ hlist_del_init (& delete_target -> ht_list );
355
+ delete_value = delete_target -> value ;
356
+ free (delete_target );
357
+ cache -> list_size [LFU_ghost_list ]-- ;
358
+ }
359
+ replace_list (cache );
360
+ }
361
+ arc_entry_t * new_entry = malloc (sizeof (arc_entry_t ));
362
+ new_entry -> key = key ;
363
+ new_entry -> value = value ;
364
+ new_entry -> arc_type = LRU_list ;
365
+ list_add (& new_entry -> list , cache -> list_table [LRU_list ]);
366
+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
367
+ cache -> list_size [LRU_list ]++ ;
368
+ CACHE_ASSERT (cache );
369
+ return delete_value ;
370
+ }
371
+
372
+ void cache_free (cache_t * cache , void (* release_entry )(void * ))
373
+ {
374
+ for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
375
+ arc_entry_t * entry , * safe ;
376
+ #ifdef __HAVE_TYPEOF
377
+ list_for_each_entry_safe (entry , safe , cache -> list_table [i ], list )
378
+ #else
379
+ list_for_each_entry_safe (entry , safe , cache -> list_table [i ], list ,
380
+ arc_entry_t )
381
+ #endif
382
+ release_entry (entry -> value );
383
+ }
384
+ free (cache -> map -> ht_list_head );
385
+ free (cache -> map );
386
+ free (cache );
387
+ }
0 commit comments