Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/auxiliary/util/u_cache.c
4561 views
1
/**************************************************************************
2
*
3
* Copyright 2008 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
/**
29
* @file
30
* Improved cache implementation.
31
*
32
* Fixed size array with linear probing on collision and LRU eviction
33
* on full.
34
*
35
* @author Jose Fonseca <[email protected]>
36
*/
37
38
39
#include "pipe/p_compiler.h"
40
#include "util/u_debug.h"
41
42
#include "util/u_math.h"
43
#include "util/u_memory.h"
44
#include "util/u_cache.h"
45
#include "util/simple_list.h"
46
47
48
struct util_cache_entry
49
{
50
enum { EMPTY = 0, FILLED, DELETED } state;
51
uint32_t hash;
52
53
struct util_cache_entry *next;
54
struct util_cache_entry *prev;
55
56
void *key;
57
void *value;
58
59
#ifdef DEBUG
60
unsigned count;
61
#endif
62
};
63
64
65
struct util_cache
66
{
67
/** Hash function */
68
uint32_t (*hash)(const void *key);
69
70
/** Compare two keys */
71
int (*compare)(const void *key1, const void *key2);
72
73
/** Destroy a (key, value) pair */
74
void (*destroy)(void *key, void *value);
75
76
/** Max entries in the cache */
77
uint32_t size;
78
79
/** Array [size] of entries */
80
struct util_cache_entry *entries;
81
82
/** Number of entries in the cache */
83
unsigned count;
84
85
/** Head of list, sorted from Least-recently used to Most-recently used */
86
struct util_cache_entry lru;
87
};
88
89
90
static void
91
ensure_sanity(const struct util_cache *cache);
92
93
#define CACHE_DEFAULT_ALPHA 2
94
95
/**
96
* Create a new cache with 'size' entries. Also provide functions for
97
* hashing keys, comparing keys and destroying (key,value) pairs.
98
*/
99
struct util_cache *
100
util_cache_create(uint32_t (*hash)(const void *key),
101
int (*compare)(const void *key1, const void *key2),
102
void (*destroy)(void *key, void *value),
103
uint32_t size)
104
{
105
struct util_cache *cache;
106
107
cache = CALLOC_STRUCT(util_cache);
108
if (!cache)
109
return NULL;
110
111
cache->hash = hash;
112
cache->compare = compare;
113
cache->destroy = destroy;
114
115
make_empty_list(&cache->lru);
116
117
size *= CACHE_DEFAULT_ALPHA;
118
cache->size = size;
119
120
cache->entries = CALLOC(size, sizeof(struct util_cache_entry));
121
if (!cache->entries) {
122
FREE(cache);
123
return NULL;
124
}
125
126
ensure_sanity(cache);
127
return cache;
128
}
129
130
131
/**
132
* Try to find a cache entry, given the key and hash of the key.
133
*/
134
static struct util_cache_entry *
135
util_cache_entry_get(struct util_cache *cache,
136
uint32_t hash,
137
const void *key)
138
{
139
struct util_cache_entry *first_unfilled = NULL;
140
uint32_t index = hash % cache->size;
141
uint32_t probe;
142
143
/* Probe until we find either a matching FILLED entry or an EMPTY
144
* slot (which has never been occupied).
145
*
146
* Deleted or non-matching slots are not indicative of completion
147
* as a previous linear probe for the same key could have continued
148
* past this point.
149
*/
150
for (probe = 0; probe < cache->size; probe++) {
151
uint32_t i = (index + probe) % cache->size;
152
struct util_cache_entry *current = &cache->entries[i];
153
154
if (current->state == FILLED) {
155
if (current->hash == hash &&
156
cache->compare(key, current->key) == 0)
157
return current;
158
}
159
else {
160
if (!first_unfilled)
161
first_unfilled = current;
162
163
if (current->state == EMPTY)
164
return first_unfilled;
165
}
166
}
167
168
return NULL;
169
}
170
171
static inline void
172
util_cache_entry_destroy(struct util_cache *cache,
173
struct util_cache_entry *entry)
174
{
175
void *key = entry->key;
176
void *value = entry->value;
177
178
entry->key = NULL;
179
entry->value = NULL;
180
181
if (entry->state == FILLED) {
182
remove_from_list(entry);
183
cache->count--;
184
185
if (cache->destroy)
186
cache->destroy(key, value);
187
188
entry->state = DELETED;
189
}
190
}
191
192
193
/**
194
* Insert an entry in the cache, given the key and value.
195
*/
196
void
197
util_cache_set(struct util_cache *cache,
198
void *key,
199
void *value)
200
{
201
struct util_cache_entry *entry;
202
uint32_t hash;
203
204
assert(cache);
205
if (!cache)
206
return;
207
hash = cache->hash(key);
208
entry = util_cache_entry_get(cache, hash, key);
209
if (!entry)
210
entry = cache->lru.prev;
211
212
if (cache->count >= cache->size / CACHE_DEFAULT_ALPHA)
213
util_cache_entry_destroy(cache, cache->lru.prev);
214
215
util_cache_entry_destroy(cache, entry);
216
217
#ifdef DEBUG
218
++entry->count;
219
#endif
220
221
entry->key = key;
222
entry->hash = hash;
223
entry->value = value;
224
entry->state = FILLED;
225
insert_at_head(&cache->lru, entry);
226
cache->count++;
227
228
ensure_sanity(cache);
229
}
230
231
232
/**
233
* Search the cache for an entry with the given key. Return the corresponding
234
* value or NULL if not found.
235
*/
236
void *
237
util_cache_get(struct util_cache *cache,
238
const void *key)
239
{
240
struct util_cache_entry *entry;
241
uint32_t hash;
242
243
assert(cache);
244
if (!cache)
245
return NULL;
246
hash = cache->hash(key);
247
entry = util_cache_entry_get(cache, hash, key);
248
if (!entry)
249
return NULL;
250
251
if (entry->state == FILLED)
252
move_to_head(&cache->lru, entry);
253
254
return entry->value;
255
}
256
257
258
/**
259
* Remove all entries from the cache. The 'destroy' function will be called
260
* for each entry's (key, value).
261
*/
262
void
263
util_cache_clear(struct util_cache *cache)
264
{
265
uint32_t i;
266
267
assert(cache);
268
if (!cache)
269
return;
270
271
for (i = 0; i < cache->size; ++i) {
272
util_cache_entry_destroy(cache, &cache->entries[i]);
273
cache->entries[i].state = EMPTY;
274
}
275
276
assert(cache->count == 0);
277
assert(is_empty_list(&cache->lru));
278
ensure_sanity(cache);
279
}
280
281
282
/**
283
* Destroy the cache and all entries.
284
*/
285
void
286
util_cache_destroy(struct util_cache *cache)
287
{
288
assert(cache);
289
if (!cache)
290
return;
291
292
#ifdef DEBUG
293
if (cache->count >= 20*cache->size) {
294
/* Normal approximation of the Poisson distribution */
295
double mean = (double)cache->count/(double)cache->size;
296
double stddev = sqrt(mean);
297
unsigned i;
298
for (i = 0; i < cache->size; ++i) {
299
double z = fabs(cache->entries[i].count - mean)/stddev;
300
/* This assert should not fail 99.9999998027% of the times, unless
301
* the hash function is a poor one */
302
assert(z <= 6.0);
303
}
304
}
305
#endif
306
307
util_cache_clear(cache);
308
309
FREE(cache->entries);
310
FREE(cache);
311
}
312
313
314
/**
315
* Remove the cache entry which matches the given key.
316
*/
317
void
318
util_cache_remove(struct util_cache *cache,
319
const void *key)
320
{
321
struct util_cache_entry *entry;
322
uint32_t hash;
323
324
assert(cache);
325
if (!cache)
326
return;
327
328
hash = cache->hash(key);
329
330
entry = util_cache_entry_get(cache, hash, key);
331
if (!entry)
332
return;
333
334
if (entry->state == FILLED)
335
util_cache_entry_destroy(cache, entry);
336
337
ensure_sanity(cache);
338
}
339
340
341
static void
342
ensure_sanity(const struct util_cache *cache)
343
{
344
#ifdef DEBUG
345
unsigned i, cnt = 0;
346
347
assert(cache);
348
for (i = 0; i < cache->size; i++) {
349
struct util_cache_entry *header = &cache->entries[i];
350
351
assert(header);
352
assert(header->state == FILLED ||
353
header->state == EMPTY ||
354
header->state == DELETED);
355
if (header->state == FILLED) {
356
cnt++;
357
assert(header->hash == cache->hash(header->key));
358
}
359
}
360
361
assert(cnt == cache->count);
362
assert(cache->size >= cnt);
363
364
if (cache->count == 0) {
365
assert (is_empty_list(&cache->lru));
366
}
367
else {
368
struct util_cache_entry *header = cache->lru.next;
369
370
assert (header);
371
assert (!is_empty_list(&cache->lru));
372
373
for (i = 0; i < cache->count; i++)
374
header = header->next;
375
376
assert(header == &cache->lru);
377
}
378
#endif
379
380
(void)cache;
381
}
382
383