Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/asahi/lib/agx_device.c
4560 views
1
/*
2
* Copyright (C) 2021 Alyssa Rosenzweig <[email protected]>
3
* Copyright 2019 Collabora, Ltd.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
* SOFTWARE.
23
*/
24
25
#include <inttypes.h>
26
#include "agx_device.h"
27
#include "agx_bo.h"
28
#include "decode.h"
29
30
unsigned AGX_FAKE_HANDLE = 0;
31
uint64_t AGX_FAKE_LO = 0;
32
uint64_t AGX_FAKE_HI = (1ull << 32);
33
34
static void
35
agx_bo_free(struct agx_device *dev, struct agx_bo *bo)
36
{
37
#if __APPLE__
38
const uint64_t handle = bo->handle;
39
40
kern_return_t ret = IOConnectCallScalarMethod(dev->fd,
41
AGX_SELECTOR_FREE_MEM,
42
&handle, 1, NULL, NULL);
43
44
if (ret)
45
fprintf(stderr, "error freeing BO mem: %u\n", ret);
46
#else
47
free(bo->ptr.cpu);
48
#endif
49
50
/* Reset the handle */
51
memset(bo, 0, sizeof(*bo));
52
}
53
54
void
55
agx_shmem_free(struct agx_device *dev, unsigned handle)
56
{
57
#if __APPLE__
58
const uint64_t input = handle;
59
kern_return_t ret = IOConnectCallScalarMethod(dev->fd,
60
AGX_SELECTOR_FREE_SHMEM,
61
&input, 1, NULL, NULL);
62
63
if (ret)
64
fprintf(stderr, "error freeing shmem: %u\n", ret);
65
#else
66
#endif
67
}
68
69
struct agx_bo
70
agx_shmem_alloc(struct agx_device *dev, size_t size, bool cmdbuf)
71
{
72
struct agx_bo bo;
73
74
#if __APPLE__
75
struct agx_create_shmem_resp out = {};
76
size_t out_sz = sizeof(out);
77
78
uint64_t inputs[2] = {
79
size,
80
cmdbuf ? 1 : 0 // 2 - error reporting, 1 - no error reporting
81
};
82
83
kern_return_t ret = IOConnectCallMethod(dev->fd,
84
AGX_SELECTOR_CREATE_SHMEM, inputs, 2, NULL, 0, NULL,
85
NULL, &out, &out_sz);
86
87
assert(ret == 0);
88
assert(out_sz == sizeof(out));
89
assert(out.size == size);
90
assert(out.map != 0);
91
92
bo = (struct agx_bo) {
93
.type = cmdbuf ? AGX_ALLOC_CMDBUF : AGX_ALLOC_MEMMAP,
94
.handle = out.id,
95
.ptr.cpu = out.map,
96
.size = out.size,
97
.guid = 0, /* TODO? */
98
};
99
#else
100
bo = (struct agx_bo) {
101
.type = cmdbuf ? AGX_ALLOC_CMDBUF : AGX_ALLOC_MEMMAP,
102
.handle = AGX_FAKE_HANDLE++,
103
.ptr.cpu = calloc(1, size),
104
.size = size,
105
.guid = 0, /* TODO? */
106
};
107
#endif
108
109
if (dev->debug & AGX_DBG_TRACE)
110
agxdecode_track_alloc(&bo);
111
112
return bo;
113
}
114
115
static struct agx_bo *
116
agx_bo_alloc(struct agx_device *dev, size_t size,
117
uint32_t flags)
118
{
119
struct agx_bo *bo;
120
unsigned handle = 0;
121
122
#if __APPLE__
123
bool write_combine = false;
124
uint32_t mode = 0x430; // shared, ?
125
126
uint32_t args_in[24] = { 0 };
127
args_in[1] = write_combine ? 0x400 : 0x0;
128
args_in[2] = 0x2580320; //0x18000; // unk
129
args_in[3] = 0x1; // unk;
130
args_in[4] = 0x4000101; //0x1000101; // unk
131
args_in[5] = mode;
132
args_in[16] = size;
133
args_in[20] = flags;
134
args_in[21] = 0x3;
135
136
uint64_t out[10] = { 0 };
137
size_t out_sz = sizeof(out);
138
139
kern_return_t ret = IOConnectCallMethod(dev->fd,
140
AGX_SELECTOR_ALLOCATE_MEM, NULL, 0, args_in,
141
sizeof(args_in), NULL, 0, out, &out_sz);
142
143
assert(ret == 0);
144
assert(out_sz == sizeof(out));
145
handle = (out[3] >> 32ull);
146
#else
147
/* Faked software path until we have a DRM driver */
148
handle = (++AGX_FAKE_HANDLE);
149
#endif
150
151
pthread_mutex_lock(&dev->bo_map_lock);
152
bo = agx_lookup_bo(dev, handle);
153
pthread_mutex_unlock(&dev->bo_map_lock);
154
155
/* Fresh handle */
156
assert(!memcmp(bo, &((struct agx_bo) {}), sizeof(*bo)));
157
158
bo->type = AGX_ALLOC_REGULAR;
159
bo->size = size;
160
bo->flags = flags;
161
bo->dev = dev;
162
bo->handle = handle;
163
164
ASSERTED bool lo = (flags & 0x08000000);
165
166
#if __APPLE__
167
bo->ptr.gpu = out[0];
168
bo->ptr.cpu = (void *) out[1];
169
bo->guid = out[5];
170
#else
171
if (lo) {
172
bo->ptr.gpu = AGX_FAKE_LO;
173
AGX_FAKE_LO += bo->size;
174
} else {
175
bo->ptr.gpu = AGX_FAKE_HI;
176
AGX_FAKE_HI += bo->size;
177
}
178
179
bo->ptr.gpu = (((uint64_t) bo->handle) << (lo ? 16 : 24));
180
bo->ptr.cpu = calloc(1, bo->size);
181
#endif
182
183
assert(bo->ptr.gpu < (1ull << (lo ? 32 : 40)));
184
185
return bo;
186
}
187
188
void
189
agx_bo_reference(struct agx_bo *bo)
190
{
191
if (bo) {
192
ASSERTED int count = p_atomic_inc_return(&bo->refcnt);
193
assert(count != 1);
194
}
195
}
196
197
void
198
agx_bo_unreference(struct agx_bo *bo)
199
{
200
if (!bo)
201
return;
202
203
/* Don't return to cache if there are still references */
204
if (p_atomic_dec_return(&bo->refcnt))
205
return;
206
207
struct agx_device *dev = bo->dev;
208
209
pthread_mutex_lock(&dev->bo_map_lock);
210
211
/* Someone might have imported this BO while we were waiting for the
212
* lock, let's make sure it's still not referenced before freeing it.
213
*/
214
if (p_atomic_read(&bo->refcnt) == 0) {
215
if (dev->debug & AGX_DBG_TRACE)
216
agxdecode_track_free(bo);
217
218
/* TODO: cache */
219
agx_bo_free(dev, bo);
220
221
}
222
pthread_mutex_unlock(&dev->bo_map_lock);
223
}
224
225
struct agx_bo *
226
agx_bo_create(struct agx_device *dev, unsigned size, unsigned flags)
227
{
228
struct agx_bo *bo;
229
assert(size > 0);
230
231
/* To maximize BO cache usage, don't allocate tiny BOs */
232
size = ALIGN_POT(size, 4096);
233
234
/* TODO: Cache fetch */
235
bo = agx_bo_alloc(dev, size, flags);
236
237
if (!bo) {
238
fprintf(stderr, "BO creation failed\n");
239
return NULL;
240
}
241
242
p_atomic_set(&bo->refcnt, 1);
243
244
if (dev->debug & AGX_DBG_TRACE)
245
agxdecode_track_alloc(bo);
246
247
return bo;
248
}
249
250
static void
251
agx_get_global_ids(struct agx_device *dev)
252
{
253
#if __APPLE__
254
uint64_t out[2] = {};
255
size_t out_sz = sizeof(out);
256
257
ASSERTED kern_return_t ret = IOConnectCallStructMethod(dev->fd,
258
AGX_SELECTOR_GET_GLOBAL_IDS,
259
NULL, 0, &out, &out_sz);
260
261
assert(ret == 0);
262
assert(out_sz == sizeof(out));
263
assert(out[1] > out[0]);
264
265
dev->next_global_id = out[0];
266
dev->last_global_id = out[1];
267
#else
268
dev->next_global_id = 0;
269
dev->last_global_id = 0x1000000;
270
#endif
271
}
272
273
uint64_t
274
agx_get_global_id(struct agx_device *dev)
275
{
276
if (unlikely(dev->next_global_id >= dev->last_global_id)) {
277
agx_get_global_ids(dev);
278
}
279
280
return dev->next_global_id++;
281
}
282
283
/* Tries to open an AGX device, returns true if successful */
284
285
bool
286
agx_open_device(void *memctx, struct agx_device *dev)
287
{
288
#if __APPLE__
289
kern_return_t ret;
290
291
/* TODO: Support other models */
292
CFDictionaryRef matching = IOServiceNameMatching("AGXAcceleratorG13G_B0");
293
294
io_service_t service =
295
IOServiceGetMatchingService(kIOMasterPortDefault, matching);
296
297
if (!service)
298
return false;
299
300
ret = IOServiceOpen(service, mach_task_self(), AGX_SERVICE_TYPE, &dev->fd);
301
302
if (ret)
303
return false;
304
305
const char *api = "Equestria";
306
char in[16] = { 0 };
307
assert(strlen(api) < sizeof(in));
308
memcpy(in, api, strlen(api));
309
310
ret = IOConnectCallStructMethod(dev->fd, AGX_SELECTOR_SET_API, in,
311
sizeof(in), NULL, NULL);
312
313
/* Oddly, the return codes are flipped for SET_API */
314
if (ret != 1)
315
return false;
316
#else
317
/* Only open a fake AGX device on other operating systems if forced */
318
if (!getenv("AGX_FAKE_DEVICE"))
319
return false;
320
#endif
321
322
dev->memctx = memctx;
323
util_sparse_array_init(&dev->bo_map, sizeof(struct agx_bo), 512);
324
325
/* XXX: why do BO ids below 6 mess things up..? */
326
for (unsigned i = 0; i < 6; ++i)
327
agx_bo_alloc(dev, 4096, AGX_MEMORY_TYPE_FRAMEBUFFER);
328
329
dev->queue = agx_create_command_queue(dev);
330
dev->cmdbuf = agx_shmem_alloc(dev, 0x4000, true); // length becomes kernelCommandDataSize
331
dev->memmap = agx_shmem_alloc(dev, 0x4000, false);
332
agx_get_global_ids(dev);
333
334
return true;
335
}
336
337
void
338
agx_close_device(struct agx_device *dev)
339
{
340
util_sparse_array_finish(&dev->bo_map);
341
342
#if __APPLE__
343
kern_return_t ret = IOServiceClose(dev->fd);
344
345
if (ret)
346
fprintf(stderr, "Error from IOServiceClose: %u\n", ret);
347
#endif
348
}
349
350
#if __APPLE__
351
static struct agx_notification_queue
352
agx_create_notification_queue(mach_port_t connection)
353
{
354
struct agx_create_notification_queue_resp resp;
355
size_t resp_size = sizeof(resp);
356
assert(resp_size == 0x10);
357
358
ASSERTED kern_return_t ret = IOConnectCallStructMethod(connection,
359
AGX_SELECTOR_CREATE_NOTIFICATION_QUEUE,
360
NULL, 0, &resp, &resp_size);
361
362
assert(resp_size == sizeof(resp));
363
assert(ret == 0);
364
365
mach_port_t notif_port = IODataQueueAllocateNotificationPort();
366
IOConnectSetNotificationPort(connection, 0, notif_port, resp.unk2);
367
368
return (struct agx_notification_queue) {
369
.port = notif_port,
370
.queue = resp.queue,
371
.id = resp.unk2
372
};
373
}
374
#endif
375
376
struct agx_command_queue
377
agx_create_command_queue(struct agx_device *dev)
378
{
379
#if __APPLE__
380
struct agx_command_queue queue = {};
381
382
{
383
uint8_t buffer[1024 + 8] = { 0 };
384
const char *path = "/tmp/a.out";
385
assert(strlen(path) < 1022);
386
memcpy(buffer + 0, path, strlen(path));
387
388
/* Copy to the end */
389
unsigned END_LEN = MIN2(strlen(path), 1024 - strlen(path));
390
unsigned SKIP = strlen(path) - END_LEN;
391
unsigned OFFS = 1024 - END_LEN;
392
memcpy(buffer + OFFS, path + SKIP, END_LEN);
393
394
buffer[1024] = 0x2;
395
396
struct agx_create_command_queue_resp out = {};
397
size_t out_sz = sizeof(out);
398
399
ASSERTED kern_return_t ret = IOConnectCallStructMethod(dev->fd,
400
AGX_SELECTOR_CREATE_COMMAND_QUEUE,
401
buffer, sizeof(buffer),
402
&out, &out_sz);
403
404
assert(ret == 0);
405
assert(out_sz == sizeof(out));
406
407
queue.id = out.id;
408
assert(queue.id);
409
}
410
411
queue.notif = agx_create_notification_queue(dev->fd);
412
413
{
414
uint64_t scalars[2] = {
415
queue.id,
416
queue.notif.id
417
};
418
419
ASSERTED kern_return_t ret = IOConnectCallScalarMethod(dev->fd,
420
0x1D,
421
scalars, 2, NULL, NULL);
422
423
assert(ret == 0);
424
}
425
426
{
427
uint64_t scalars[2] = {
428
queue.id,
429
0x1ffffffffull
430
};
431
432
ASSERTED kern_return_t ret = IOConnectCallScalarMethod(dev->fd,
433
0x29,
434
scalars, 2, NULL, NULL);
435
436
assert(ret == 0);
437
}
438
439
return queue;
440
#else
441
return (struct agx_command_queue) {
442
0
443
};
444
#endif
445
}
446
447
void
448
agx_submit_cmdbuf(struct agx_device *dev, unsigned cmdbuf, unsigned mappings, uint64_t scalar)
449
{
450
#if __APPLE__
451
struct agx_submit_cmdbuf_req req = {
452
.unk0 = 0x10,
453
.unk1 = 0x1,
454
.cmdbuf = cmdbuf,
455
.mappings = mappings,
456
.user_0 = (void *) ((uintptr_t) 0xABCD), // Passed in the notif queue
457
.user_1 = (void *) ((uintptr_t) 0x1234), // Maybe pick better
458
.unk2 = 0x0,
459
.unk3 = 0x1,
460
};
461
462
assert(sizeof(req) == 40);
463
464
ASSERTED kern_return_t ret = IOConnectCallMethod(dev->fd,
465
AGX_SELECTOR_SUBMIT_COMMAND_BUFFERS,
466
&scalar, 1,
467
&req, sizeof(req),
468
NULL, 0, NULL, 0);
469
assert(ret == 0);
470
return;
471
#endif
472
}
473
474
void
475
agx_wait_queue(struct agx_command_queue queue)
476
{
477
#if __APPLE__
478
IOReturn ret = IODataQueueWaitForAvailableData(queue.notif.queue, queue.notif.port);
479
480
uint64_t data[4];
481
unsigned sz = sizeof(data);
482
ret = IODataQueueDequeue(queue.notif.queue, data, &sz);
483
assert(sz == sizeof(data));
484
assert(data[0] == 0xABCD);
485
486
ret = IODataQueueWaitForAvailableData(queue.notif.queue, queue.notif.port);
487
ret = IODataQueueDequeue(queue.notif.queue, data, &sz);
488
assert(sz == sizeof(data));
489
assert(data[0] == 0x1234);
490
491
assert(!IODataQueueDataAvailable(queue.notif.queue));
492
#endif
493
}
494
495