Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/i915/i915_gem_evict.c
15113 views
1
/*
2
* Copyright © 2008-2010 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*
23
* Authors:
24
* Eric Anholt <[email protected]>
25
* Chris Wilson <[email protected]>
26
*
27
*/
28
29
#include "drmP.h"
30
#include "drm.h"
31
#include "i915_drv.h"
32
#include "i915_drm.h"
33
#include "i915_trace.h"
34
35
static bool
36
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
37
{
38
list_add(&obj->exec_list, unwind);
39
drm_gem_object_reference(&obj->base);
40
return drm_mm_scan_add_block(obj->gtt_space);
41
}
42
43
int
44
i915_gem_evict_something(struct drm_device *dev, int min_size,
45
unsigned alignment, bool mappable)
46
{
47
drm_i915_private_t *dev_priv = dev->dev_private;
48
struct list_head eviction_list, unwind_list;
49
struct drm_i915_gem_object *obj;
50
int ret = 0;
51
52
i915_gem_retire_requests(dev);
53
54
/* Re-check for free space after retiring requests */
55
if (mappable) {
56
if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
57
min_size, alignment, 0,
58
dev_priv->mm.gtt_mappable_end,
59
0))
60
return 0;
61
} else {
62
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
63
min_size, alignment, 0))
64
return 0;
65
}
66
67
trace_i915_gem_evict(dev, min_size, alignment, mappable);
68
69
/*
70
* The goal is to evict objects and amalgamate space in LRU order.
71
* The oldest idle objects reside on the inactive list, which is in
72
* retirement order. The next objects to retire are those on the (per
73
* ring) active list that do not have an outstanding flush. Once the
74
* hardware reports completion (the seqno is updated after the
75
* batchbuffer has been finished) the clean buffer objects would
76
* be retired to the inactive list. Any dirty objects would be added
77
* to the tail of the flushing list. So after processing the clean
78
* active objects we need to emit a MI_FLUSH to retire the flushing
79
* list, hence the retirement order of the flushing list is in
80
* advance of the dirty objects on the active lists.
81
*
82
* The retirement sequence is thus:
83
* 1. Inactive objects (already retired)
84
* 2. Clean active objects
85
* 3. Flushing list
86
* 4. Dirty active objects.
87
*
88
* On each list, the oldest objects lie at the HEAD with the freshest
89
* object on the TAIL.
90
*/
91
92
INIT_LIST_HEAD(&unwind_list);
93
if (mappable)
94
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
95
alignment, 0,
96
dev_priv->mm.gtt_mappable_end);
97
else
98
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
99
100
/* First see if there is a large enough contiguous idle region... */
101
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
102
if (mark_free(obj, &unwind_list))
103
goto found;
104
}
105
106
/* Now merge in the soon-to-be-expired objects... */
107
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
108
/* Does the object require an outstanding flush? */
109
if (obj->base.write_domain || obj->pin_count)
110
continue;
111
112
if (mark_free(obj, &unwind_list))
113
goto found;
114
}
115
116
/* Finally add anything with a pending flush (in order of retirement) */
117
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
118
if (obj->pin_count)
119
continue;
120
121
if (mark_free(obj, &unwind_list))
122
goto found;
123
}
124
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
125
if (! obj->base.write_domain || obj->pin_count)
126
continue;
127
128
if (mark_free(obj, &unwind_list))
129
goto found;
130
}
131
132
/* Nothing found, clean up and bail out! */
133
while (!list_empty(&unwind_list)) {
134
obj = list_first_entry(&unwind_list,
135
struct drm_i915_gem_object,
136
exec_list);
137
138
ret = drm_mm_scan_remove_block(obj->gtt_space);
139
BUG_ON(ret);
140
141
list_del_init(&obj->exec_list);
142
drm_gem_object_unreference(&obj->base);
143
}
144
145
/* We expect the caller to unpin, evict all and try again, or give up.
146
* So calling i915_gem_evict_everything() is unnecessary.
147
*/
148
return -ENOSPC;
149
150
found:
151
/* drm_mm doesn't allow any other other operations while
152
* scanning, therefore store to be evicted objects on a
153
* temporary list. */
154
INIT_LIST_HEAD(&eviction_list);
155
while (!list_empty(&unwind_list)) {
156
obj = list_first_entry(&unwind_list,
157
struct drm_i915_gem_object,
158
exec_list);
159
if (drm_mm_scan_remove_block(obj->gtt_space)) {
160
list_move(&obj->exec_list, &eviction_list);
161
continue;
162
}
163
list_del_init(&obj->exec_list);
164
drm_gem_object_unreference(&obj->base);
165
}
166
167
/* Unbinding will emit any required flushes */
168
while (!list_empty(&eviction_list)) {
169
obj = list_first_entry(&eviction_list,
170
struct drm_i915_gem_object,
171
exec_list);
172
if (ret == 0)
173
ret = i915_gem_object_unbind(obj);
174
175
list_del_init(&obj->exec_list);
176
drm_gem_object_unreference(&obj->base);
177
}
178
179
return ret;
180
}
181
182
int
183
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
184
{
185
drm_i915_private_t *dev_priv = dev->dev_private;
186
int ret;
187
bool lists_empty;
188
189
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
190
list_empty(&dev_priv->mm.flushing_list) &&
191
list_empty(&dev_priv->mm.active_list));
192
if (lists_empty)
193
return -ENOSPC;
194
195
trace_i915_gem_evict_everything(dev, purgeable_only);
196
197
/* Flush everything (on to the inactive lists) and evict */
198
ret = i915_gpu_idle(dev);
199
if (ret)
200
return ret;
201
202
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
203
204
return i915_gem_evict_inactive(dev, purgeable_only);
205
}
206
207
/** Unbinds all inactive objects. */
208
int
209
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
210
{
211
drm_i915_private_t *dev_priv = dev->dev_private;
212
struct drm_i915_gem_object *obj, *next;
213
214
list_for_each_entry_safe(obj, next,
215
&dev_priv->mm.inactive_list, mm_list) {
216
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
217
int ret = i915_gem_object_unbind(obj);
218
if (ret)
219
return ret;
220
}
221
}
222
223
return 0;
224
}
225
226