Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/auxiliary/pipebuffer/pb_slab.c
4566 views
1
/*
2
* Copyright 2016 Advanced Micro Devices, Inc.
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining
6
* a copy of this software and associated documentation files (the
7
* "Software"), to deal in the Software without restriction, including
8
* without limitation the rights to use, copy, modify, merge, publish,
9
* distribute, sub license, and/or sell copies of the Software, and to
10
* permit persons to whom the Software is furnished to do so, subject to
11
* the following conditions:
12
*
13
* The above copyright notice and this permission notice (including the
14
* next paragraph) shall be included in all copies or substantial portions
15
* of the Software.
16
*
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
* USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
*/
27
28
#include "pb_slab.h"
29
30
#include "util/u_math.h"
31
#include "util/u_memory.h"
32
33
/* All slab allocations from the same heap and with the same size belong
34
* to the same group.
35
*/
36
struct pb_slab_group
37
{
38
/* Slabs with allocation candidates. Typically, slabs in this list should
39
* have some free entries.
40
*
41
* However, when the head becomes full we purposefully keep it around
42
* until the next allocation attempt, at which time we try a reclaim.
43
* The intention is to keep serving allocations from the same slab as long
44
* as possible for better locality.
45
*
46
* Due to a race in new slab allocation, additional slabs in this list
47
* can be fully allocated as well.
48
*/
49
struct list_head slabs;
50
};
51
52
53
static void
54
pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
55
{
56
struct pb_slab *slab = entry->slab;
57
58
list_del(&entry->head); /* remove from reclaim list */
59
list_add(&entry->head, &slab->free);
60
slab->num_free++;
61
62
/* Add slab to the group's list if it isn't already linked. */
63
if (!list_is_linked(&slab->head)) {
64
struct pb_slab_group *group = &slabs->groups[entry->group_index];
65
list_addtail(&slab->head, &group->slabs);
66
}
67
68
if (slab->num_free >= slab->num_entries) {
69
list_del(&slab->head);
70
slabs->slab_free(slabs->priv, slab);
71
}
72
}
73
74
static void
75
pb_slabs_reclaim_locked(struct pb_slabs *slabs)
76
{
77
while (!list_is_empty(&slabs->reclaim)) {
78
struct pb_slab_entry *entry =
79
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
80
81
if (!slabs->can_reclaim(slabs->priv, entry))
82
break;
83
84
pb_slab_reclaim(slabs, entry);
85
}
86
}
87
88
/* Allocate a slab entry of the given size from the given heap.
89
*
90
* This will try to re-use entries that have previously been freed. However,
91
* if no entries are free (or all free entries are still "in flight" as
92
* determined by the can_reclaim fallback function), a new slab will be
93
* requested via the slab_alloc callback.
94
*
95
* Note that slab_free can also be called by this function.
96
*/
97
struct pb_slab_entry *
98
pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
99
{
100
unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
101
unsigned group_index;
102
struct pb_slab_group *group;
103
struct pb_slab *slab;
104
struct pb_slab_entry *entry;
105
unsigned entry_size = 1 << order;
106
bool three_fourths = false;
107
108
/* If the size is <= 3/4 of the entry size, use a slab with entries using
109
* 3/4 sizes to reduce overallocation.
110
*/
111
if (slabs->allow_three_fourths_allocations && size <= entry_size * 3 / 4) {
112
entry_size = entry_size * 3 / 4;
113
three_fourths = true;
114
}
115
116
assert(order < slabs->min_order + slabs->num_orders);
117
assert(heap < slabs->num_heaps);
118
119
group_index = (heap * slabs->num_orders + (order - slabs->min_order)) *
120
(1 + slabs->allow_three_fourths_allocations) + three_fourths;
121
group = &slabs->groups[group_index];
122
123
mtx_lock(&slabs->mutex);
124
125
/* If there is no candidate slab at all, or the first slab has no free
126
* entries, try reclaiming entries.
127
*/
128
if (list_is_empty(&group->slabs) ||
129
list_is_empty(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
130
pb_slabs_reclaim_locked(slabs);
131
132
/* Remove slabs without free entries. */
133
while (!list_is_empty(&group->slabs)) {
134
slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
135
if (!list_is_empty(&slab->free))
136
break;
137
138
list_del(&slab->head);
139
}
140
141
if (list_is_empty(&group->slabs)) {
142
/* Drop the mutex temporarily to prevent a deadlock where the allocation
143
* calls back into slab functions (most likely to happen for
144
* pb_slab_reclaim if memory is low).
145
*
146
* There's a chance that racing threads will end up allocating multiple
147
* slabs for the same group, but that doesn't hurt correctness.
148
*/
149
mtx_unlock(&slabs->mutex);
150
slab = slabs->slab_alloc(slabs->priv, heap, entry_size, group_index);
151
if (!slab)
152
return NULL;
153
mtx_lock(&slabs->mutex);
154
155
list_add(&slab->head, &group->slabs);
156
}
157
158
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
159
list_del(&entry->head);
160
slab->num_free--;
161
162
mtx_unlock(&slabs->mutex);
163
164
return entry;
165
}
166
167
/* Free the given slab entry.
168
*
169
* The entry may still be in use e.g. by in-flight command submissions. The
170
* can_reclaim callback function will be called to determine whether the entry
171
* can be handed out again by pb_slab_alloc.
172
*/
173
void
174
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
175
{
176
mtx_lock(&slabs->mutex);
177
list_addtail(&entry->head, &slabs->reclaim);
178
mtx_unlock(&slabs->mutex);
179
}
180
181
/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
182
*
183
* This may end up freeing some slabs and is therefore useful to try to reclaim
184
* some no longer used memory. However, calling this function is not strictly
185
* required since pb_slab_alloc will eventually do the same thing.
186
*/
187
void
188
pb_slabs_reclaim(struct pb_slabs *slabs)
189
{
190
mtx_lock(&slabs->mutex);
191
pb_slabs_reclaim_locked(slabs);
192
mtx_unlock(&slabs->mutex);
193
}
194
195
/* Initialize the slabs manager.
196
*
197
* The minimum and maximum size of slab entries are 2^min_order and
198
* 2^max_order, respectively.
199
*
200
* priv will be passed to the given callback functions.
201
*/
202
bool
203
pb_slabs_init(struct pb_slabs *slabs,
204
unsigned min_order, unsigned max_order,
205
unsigned num_heaps, bool allow_three_fourth_allocations,
206
void *priv,
207
slab_can_reclaim_fn *can_reclaim,
208
slab_alloc_fn *slab_alloc,
209
slab_free_fn *slab_free)
210
{
211
unsigned num_groups;
212
unsigned i;
213
214
assert(min_order <= max_order);
215
assert(max_order < sizeof(unsigned) * 8 - 1);
216
217
slabs->min_order = min_order;
218
slabs->num_orders = max_order - min_order + 1;
219
slabs->num_heaps = num_heaps;
220
slabs->allow_three_fourths_allocations = allow_three_fourth_allocations;
221
222
slabs->priv = priv;
223
slabs->can_reclaim = can_reclaim;
224
slabs->slab_alloc = slab_alloc;
225
slabs->slab_free = slab_free;
226
227
list_inithead(&slabs->reclaim);
228
229
num_groups = slabs->num_orders * slabs->num_heaps *
230
(1 + allow_three_fourth_allocations);
231
slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
232
if (!slabs->groups)
233
return false;
234
235
for (i = 0; i < num_groups; ++i) {
236
struct pb_slab_group *group = &slabs->groups[i];
237
list_inithead(&group->slabs);
238
}
239
240
(void) mtx_init(&slabs->mutex, mtx_plain);
241
242
return true;
243
}
244
245
/* Shutdown the slab manager.
246
*
247
* This will free all allocated slabs and internal structures, even if some
248
* of the slab entries are still in flight (i.e. if can_reclaim would return
249
* false).
250
*/
251
void
252
pb_slabs_deinit(struct pb_slabs *slabs)
253
{
254
/* Reclaim all slab entries (even those that are still in flight). This
255
* implicitly calls slab_free for everything.
256
*/
257
while (!list_is_empty(&slabs->reclaim)) {
258
struct pb_slab_entry *entry =
259
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
260
pb_slab_reclaim(slabs, entry);
261
}
262
263
FREE(slabs->groups);
264
mtx_destroy(&slabs->mutex);
265
}
266
267