1
+ #include "cache.h"
2
+
3
+ #define min (a , b ) ((a < b) ? a : b)
4
+ #define max (a , b ) ((a > b) ? a : b)
5
+ #define BITS 10
6
+ #define SIZE 1024
7
+ #define GOLDEN_RATIO_32 0x61C88647
8
+ #define HASH (val ) (((val) *GOLDEN_RATIO_32) >> (32 - BITS)) % SIZE
9
+
10
+ typedef struct arc_entry {
11
+ void * value ;
12
+ uint32_t key ;
13
+ arc_type_t arc_type ;
14
+ struct list_head list ;
15
+ struct list_head ht_list ;
16
+ } arc_entry_t ;
17
+
18
+ typedef struct hashtable {
19
+ struct list_head * ht_list_head ;
20
+ } hashtable_t ;
21
+
22
+ cache_t * cache_create ()
23
+ {
24
+ cache_t * cache = (cache_t * ) malloc (sizeof (cache_t ));
25
+ for (int i = 0 ; i < 4 ; i ++ ) {
26
+ cache -> list_table [i ] =
27
+ (struct list_head * ) malloc (sizeof (struct list_head ));
28
+ INIT_LIST_HEAD (cache -> list_table [i ]);
29
+ cache -> list_size [i ] = 0 ;
30
+ }
31
+ cache -> map = (hashtable_t * ) malloc (sizeof (hashtable_t ));
32
+ cache -> map -> ht_list_head =
33
+ (struct list_head * ) malloc (SIZE * sizeof (struct list_head ));
34
+
35
+ for (int i = 0 ; i < SIZE ; i ++ ) {
36
+ INIT_LIST_HEAD (& cache -> map -> ht_list_head [i ]);
37
+ }
38
+
39
+ cache -> c = SIZE ;
40
+ cache -> p = SIZE / 2 ;
41
+ #if RV32_HAS (ARCACHE_INFO )
42
+ cache -> get_time = 0 ;
43
+ cache -> hit_time = 0 ;
44
+ #endif
45
+ return cache ;
46
+ }
47
+
48
+ void cache_free (cache_t * cache , void (* release_entry )(void * ))
49
+ {
50
+ for (int i = 0 ; i < 4 ; i ++ ) {
51
+ arc_entry_t * entry , * safe ;
52
+ list_for_each_entry_safe (entry , safe , cache -> list_table [i ], list )
53
+ release_entry (entry -> value );
54
+ free (cache -> list_table [i ]);
55
+ }
56
+ free (cache -> map -> ht_list_head );
57
+ free (cache -> map );
58
+ free (cache );
59
+ }
60
+
61
+ /* Rule of ARC
62
+ * 1. size of T1 + size of T2 <= c
63
+ * 2. size of T1 + size of B1 <= c
64
+ * 3. size of T2 + size of B2 <= 2c
65
+ * 4. size of T1 + size of T2 + size of B1 + size of B2 <= 2c
66
+ */
67
+ #if RV32_HAS (ARCACHE_INFO )
68
+ void assert_cache (cache_t * cache )
69
+ {
70
+ assert (cache -> list_size [T1 ] + cache -> list_size [T2 ] <= cache -> c );
71
+ assert (cache -> list_size [T1 ] + cache -> list_size [B1 ] <= cache -> c );
72
+ assert (cache -> list_size [T2 ] + cache -> list_size [B2 ] <= 2 * cache -> c );
73
+ assert (cache -> list_size [T1 ] + cache -> list_size [B1 ] + cache -> list_size [T2 ] +
74
+ cache -> list_size [B2 ] <=
75
+ 2 * cache -> c );
76
+ }
77
+ #endif
78
+
79
+ void move_to_mru (cache_t * cache , arc_entry_t * entry , const arc_type_t arc_type )
80
+ {
81
+ cache -> list_size [entry -> arc_type ]-- ;
82
+ cache -> list_size [arc_type ]++ ;
83
+ entry -> arc_type = arc_type ;
84
+ list_move (& entry -> list , cache -> list_table [arc_type ]);
85
+ }
86
+
87
+ void replaceT1 (cache_t * cache )
88
+ {
89
+ if (cache -> list_size [T1 ] >= cache -> p )
90
+ move_to_mru (cache ,
91
+ list_last_entry (cache -> list_table [T1 ], arc_entry_t , list ),
92
+ B1 );
93
+ }
94
+ void replaceT2 (cache_t * cache )
95
+ {
96
+ if (cache -> list_size [T2 ] >= (cache -> c - cache -> p ))
97
+ move_to_mru (cache ,
98
+ list_last_entry (cache -> list_table [T2 ], arc_entry_t , list ),
99
+ B2 );
100
+ }
101
+
102
+ void * cache_get (cache_t * cache , uint32_t key )
103
+ {
104
+ if (cache -> c <= 0 || list_empty (& cache -> map -> ht_list_head [HASH (key )]))
105
+ return NULL ;
106
+
107
+ arc_entry_t * entry = NULL ;
108
+ list_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
109
+ {
110
+ if (entry -> key == key )
111
+ break ;
112
+ }
113
+ #if RV32_HAS (ARCACHE_INFO )
114
+ cache -> get_time ++ ;
115
+ #endif
116
+ if (!entry || entry -> key != key )
117
+ return NULL ;
118
+ /* cache hit in T1 */
119
+ if (entry -> arc_type == T1 ) {
120
+ #if RV32_HAS (ARCACHE_INFO )
121
+ cache -> hit_time ++ ;
122
+ #endif
123
+ replaceT2 (cache );
124
+ move_to_mru (cache , entry , T2 );
125
+ }
126
+
127
+ /* cache hit in T2 */
128
+ if (entry -> arc_type == T2 ) {
129
+ #if RV32_HAS (ARCACHE_INFO )
130
+ cache -> hit_time ++ ;
131
+ #endif
132
+ move_to_mru (cache , entry , T2 );
133
+ }
134
+
135
+ /* cache hit in B1 */
136
+ if (entry -> arc_type == B1 ) {
137
+ cache -> p = min (cache -> p + 1 , cache -> c );
138
+ replaceT2 (cache );
139
+ move_to_mru (cache , entry , T2 );
140
+ }
141
+
142
+ /* cache hit in B2 */
143
+ if (entry -> arc_type == B2 ) {
144
+ cache -> p = max (cache -> p - 1 , 0 );
145
+ replaceT1 (cache );
146
+ move_to_mru (cache , entry , T2 );
147
+ }
148
+ #if RV32_HAS (ARCACHE_INFO )
149
+ assert_cache (cache );
150
+ #endif
151
+ /* return NULL if cache miss */
152
+ return entry -> value ;
153
+ }
154
+
155
+ void * cache_put (cache_t * cache , uint32_t key , void * value )
156
+ {
157
+ #if RV32_HAS (ARCACHE_INFO )
158
+ cache -> get_time ++ ;
159
+ #endif
160
+ void * delete_value = NULL ;
161
+ #if RV32_HAS (ARCACHE_INFO )
162
+ assert (cache -> list_size [T1 ] + cache -> list_size [B1 ] <= cache -> c );
163
+ #endif
164
+ /* Before adding new element to cach, we should check the status
165
+ * of cache.
166
+ */
167
+ if ((cache -> list_size [T1 ] + cache -> list_size [B1 ]) == cache -> c ) {
168
+ if (cache -> list_size [T1 ] < cache -> c ) {
169
+ arc_entry_t * delete_target =
170
+ list_last_entry (cache -> list_table [B1 ], arc_entry_t , list );
171
+ list_del_init (& delete_target -> list );
172
+ list_del_init (& delete_target -> ht_list );
173
+ delete_value = delete_target -> value ;
174
+ free (delete_target );
175
+ cache -> list_size [B1 ]-- ;
176
+ replaceT1 (cache );
177
+ } else {
178
+ arc_entry_t * delete_target =
179
+ list_last_entry (cache -> list_table [T1 ], arc_entry_t , list );
180
+ list_del_init (& delete_target -> list );
181
+ list_del_init (& delete_target -> ht_list );
182
+ delete_value = delete_target -> value ;
183
+ free (delete_target );
184
+ cache -> list_size [T1 ]-- ;
185
+ }
186
+ } else {
187
+ #if RV32_HAS (ARCACHE_INFO )
188
+ assert (cache -> list_size [T1 ] + cache -> list_size [B1 ] < cache -> c );
189
+ #endif
190
+ uint32_t size = cache -> list_size [T1 ] + cache -> list_size [B1 ] +
191
+ cache -> list_size [T2 ] + cache -> list_size [B2 ];
192
+ if (size == cache -> c * 2 ) {
193
+ arc_entry_t * delete_target =
194
+ list_last_entry (cache -> list_table [B2 ], arc_entry_t , list );
195
+ list_del_init (& delete_target -> list );
196
+ list_del_init (& delete_target -> ht_list );
197
+ delete_value = delete_target -> value ;
198
+ free (delete_target );
199
+ cache -> list_size [B2 ]-- ;
200
+ }
201
+ if (cache -> list_size [T1 ] + cache -> list_size [T2 ] >= cache -> c &&
202
+ cache -> list_size [T1 ] < cache -> p )
203
+ replaceT2 (cache );
204
+ else
205
+ replaceT1 (cache );
206
+ }
207
+ arc_entry_t * new_entry = (arc_entry_t * ) malloc (sizeof (arc_entry_t ));
208
+ new_entry -> key = key ;
209
+ new_entry -> value = value ;
210
+ new_entry -> arc_type = T1 ;
211
+ list_add (& new_entry -> list , cache -> list_table [T1 ]);
212
+ list_add (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
213
+ cache -> list_size [T1 ]++ ;
214
+ #if RV32_HAS (ARCACHE_INFO )
215
+ assert_cache (cache );
216
+ #endif
217
+ return delete_value ;
218
+ }
219
+
220
+ #if RV32_HAS (ARCACHE_INFO )
221
+ void cache_print_stats (cache_t * cache )
222
+ {
223
+ printf (
224
+ "requests: %12lu \n"
225
+ "hits: %12lu \n"
226
+ "ratio: %lf%%\n" ,
227
+ cache -> get_time , cache -> hit_time ,
228
+ cache -> hit_time * 100 / (double ) cache -> get_time );
229
+ }
230
+ #endif
0 commit comments