Path: blob/21.2-virgl/src/intel/vulkan/anv_gem.c
4547 views
/*1* Copyright © 2015 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include <sys/ioctl.h>24#include <sys/types.h>25#include <sys/mman.h>26#include <string.h>27#include <errno.h>28#include <unistd.h>29#include <fcntl.h>3031#include "anv_private.h"32#include "common/intel_defines.h"33#include "common/intel_gem.h"34#include "drm-uapi/sync_file.h"3536/**37* Wrapper around DRM_IOCTL_I915_GEM_CREATE.38*39* Return gem handle, or 0 on failure. Gem handles are never 0.40*/41uint32_t42anv_gem_create(struct anv_device *device, uint64_t size)43{44struct drm_i915_gem_create gem_create = {45.size = size,46};4748int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);49if (ret != 0) {50/* FIXME: What do we do if this fails? */51return 0;52}5354return gem_create.handle;55}5657void58anv_gem_close(struct anv_device *device, uint32_t gem_handle)59{60struct drm_gem_close close = {61.handle = gem_handle,62};6364intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);65}6667uint32_t68anv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size,69uint32_t num_regions,70struct drm_i915_gem_memory_class_instance *regions)71{72struct drm_i915_gem_create_ext_memory_regions ext_regions = {73.base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },74.num_regions = num_regions,75.regions = (uintptr_t)regions,76};7778struct drm_i915_gem_create_ext gem_create = {79.size = anv_bo_size,80.extensions = (uintptr_t) &ext_regions,81};8283int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE_EXT,84&gem_create);85if (ret != 0) {86return 0;87}8889return gem_create.handle;90}9192/**93* Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.94*/95static void*96anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,97uint64_t offset, uint64_t size, uint32_t flags)98{99struct drm_i915_gem_mmap_offset gem_mmap = {100.handle = gem_handle,101.flags = (flags & I915_MMAP_WC) ?102I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,103};104assert(offset == 0);105106/* Get the fake offset back */107int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);108if (ret != 0)109return MAP_FAILED;110111/* And map it */112void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,113device->fd, gem_mmap.offset);114return map;115}116117static void*118anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,119uint64_t offset, uint64_t size, uint32_t flags)120{121struct drm_i915_gem_mmap gem_mmap = {122.handle = gem_handle,123.offset = offset,124.size = size,125.flags = flags,126};127128int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);129if (ret != 0)130return MAP_FAILED;131132return (void *)(uintptr_t) gem_mmap.addr_ptr;133}134135/**136* Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.137*/138void*139anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,140uint64_t offset, uint64_t size, uint32_t flags)141{142void *map;143if (device->physical->has_mmap_offset)144map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);145else146map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);147148if (map != MAP_FAILED)149VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));150151return map;152}153154/* This is just a wrapper around munmap, but it also notifies valgrind that155* this map is no longer valid. Pair this with anv_gem_mmap().156*/157void158anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)159{160VG(VALGRIND_FREELIKE_BLOCK(p, 0));161munmap(p, size);162}163164uint32_t165anv_gem_userptr(struct anv_device *device, void *mem, size_t size)166{167struct drm_i915_gem_userptr userptr = {168.user_ptr = (__u64)((unsigned long) mem),169.user_size = size,170.flags = 0,171};172173int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);174if (ret == -1)175return 0;176177return userptr.handle;178}179180int181anv_gem_set_caching(struct anv_device *device,182uint32_t gem_handle, uint32_t caching)183{184struct drm_i915_gem_caching gem_caching = {185.handle = gem_handle,186.caching = caching,187};188189return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);190}191192int193anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,194uint32_t read_domains, uint32_t write_domain)195{196struct drm_i915_gem_set_domain gem_set_domain = {197.handle = gem_handle,198.read_domains = read_domains,199.write_domain = write_domain,200};201202return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);203}204205/**206* Returns 0, 1, or negative to indicate error207*/208int209anv_gem_busy(struct anv_device *device, uint32_t gem_handle)210{211struct drm_i915_gem_busy busy = {212.handle = gem_handle,213};214215int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);216if (ret < 0)217return ret;218219return busy.busy != 0;220}221222/**223* On error, \a timeout_ns holds the remaining time.224*/225int226anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)227{228struct drm_i915_gem_wait wait = {229.bo_handle = gem_handle,230.timeout_ns = *timeout_ns,231.flags = 0,232};233234int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);235*timeout_ns = wait.timeout_ns;236237return ret;238}239240int241anv_gem_execbuffer(struct anv_device *device,242struct drm_i915_gem_execbuffer2 *execbuf)243{244if (execbuf->flags & I915_EXEC_FENCE_OUT)245return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);246else247return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);248}249250/** Return -1 on error. */251int252anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)253{254struct drm_i915_gem_get_tiling get_tiling = {255.handle = gem_handle,256};257258/* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING259* anymore, so we will need another way to get the tiling. Apparently this260* is only used in Android code, so we may need some other way to261* communicate the tiling mode.262*/263if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {264assert(!"Failed to get BO tiling");265return -1;266}267268return get_tiling.tiling_mode;269}270271int272anv_gem_set_tiling(struct anv_device *device,273uint32_t gem_handle, uint32_t stride, uint32_t tiling)274{275int ret;276277/* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So278* nothing needs to be done.279*/280if (!device->info.has_tiling_uapi)281return 0;282283/* set_tiling overwrites the input on the error path, so we have to open284* code intel_ioctl.285*/286do {287struct drm_i915_gem_set_tiling set_tiling = {288.handle = gem_handle,289.tiling_mode = tiling,290.stride = stride,291};292293ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);294} while (ret == -1 && (errno == EINTR || errno == EAGAIN));295296return ret;297}298299int300anv_gem_get_param(int fd, uint32_t param)301{302int tmp;303304drm_i915_getparam_t gp = {305.param = param,306.value = &tmp,307};308309int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);310if (ret == 0)311return tmp;312313return 0;314}315316uint64_t317anv_gem_get_drm_cap(int fd, uint32_t capability)318{319struct drm_get_cap cap = {320.capability = capability,321};322323intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);324return cap.value;325}326327bool328anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)329{330struct drm_gem_close close;331int ret;332333struct drm_i915_gem_create gem_create = {334.size = 4096,335};336337if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {338assert(!"Failed to create GEM BO");339return false;340}341342bool swizzled = false;343344/* set_tiling overwrites the input on the error path, so we have to open345* code intel_ioctl.346*/347do {348struct drm_i915_gem_set_tiling set_tiling = {349.handle = gem_create.handle,350.tiling_mode = tiling,351.stride = tiling == I915_TILING_X ? 512 : 128,352};353354ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);355} while (ret == -1 && (errno == EINTR || errno == EAGAIN));356357if (ret != 0) {358assert(!"Failed to set BO tiling");359goto close_and_return;360}361362struct drm_i915_gem_get_tiling get_tiling = {363.handle = gem_create.handle,364};365366if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {367assert(!"Failed to get BO tiling");368goto close_and_return;369}370371swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;372373close_and_return:374375memset(&close, 0, sizeof(close));376close.handle = gem_create.handle;377intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);378379return swizzled;380}381382bool383anv_gem_has_context_priority(int fd)384{385return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,386INTEL_CONTEXT_MEDIUM_PRIORITY);387}388389int390anv_gem_create_context(struct anv_device *device)391{392struct drm_i915_gem_context_create create = { 0 };393394int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);395if (ret == -1)396return -1;397398return create.ctx_id;399}400401int402anv_gem_create_context_engines(struct anv_device *device,403const struct drm_i915_query_engine_info *info,404int num_engines, uint16_t *engine_classes)405{406const size_t engine_inst_sz = 2 * sizeof(__u16); /* 1 class, 1 instance */407const size_t engines_param_size =408sizeof(__u64) /* extensions */ + num_engines * engine_inst_sz;409410void *engines_param = malloc(engines_param_size);411assert(engines_param);412*(__u64*)engines_param = 0;413__u16 *class_inst_ptr = (__u16*)(((__u64*)engines_param) + 1);414415/* For each type of drm_i915_gem_engine_class of interest, we keep track of416* the previous engine instance used.417*/418int last_engine_idx[] = {419[I915_ENGINE_CLASS_RENDER] = -1,420};421422int i915_engine_counts[] = {423[I915_ENGINE_CLASS_RENDER] =424anv_gem_count_engines(info, I915_ENGINE_CLASS_RENDER),425};426427/* For each queue, we look for the next instance that matches the class we428* need.429*/430for (int i = 0; i < num_engines; i++) {431uint16_t engine_class = engine_classes[i];432if (i915_engine_counts[engine_class] <= 0) {433free(engines_param);434return -1;435}436437/* Run through the engines reported by the kernel looking for the next438* matching instance. We loop in case we want to create multiple439* contexts on an engine instance.440*/441int engine_instance = -1;442for (int i = 0; i < info->num_engines; i++) {443int *idx = &last_engine_idx[engine_class];444if (++(*idx) >= info->num_engines)445*idx = 0;446if (info->engines[*idx].engine.engine_class == engine_class) {447engine_instance = info->engines[*idx].engine.engine_instance;448break;449}450}451if (engine_instance < 0) {452free(engines_param);453return -1;454}455456*class_inst_ptr++ = engine_class;457*class_inst_ptr++ = engine_instance;458}459460assert((uintptr_t)engines_param + engines_param_size ==461(uintptr_t)class_inst_ptr);462463struct drm_i915_gem_context_create_ext_setparam set_engines = {464.base = {465.name = I915_CONTEXT_CREATE_EXT_SETPARAM,466},467.param = {468.param = I915_CONTEXT_PARAM_ENGINES,469.value = (uintptr_t)engines_param,470.size = engines_param_size,471}472};473struct drm_i915_gem_context_create_ext create = {474.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,475.extensions = (uintptr_t)&set_engines,476};477int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);478free(engines_param);479if (ret == -1)480return -1;481482return create.ctx_id;483}484485int486anv_gem_destroy_context(struct anv_device *device, int context)487{488struct drm_i915_gem_context_destroy destroy = {489.ctx_id = context,490};491492return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);493}494495int496anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)497{498struct drm_i915_gem_context_param p = {499.ctx_id = context,500.param = param,501.value = value,502};503int err = 0;504505if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))506err = -errno;507return err;508}509510int511anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)512{513struct drm_i915_gem_context_param gp = {514.ctx_id = context,515.param = param,516};517518int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);519if (ret == -1)520return -1;521522*value = gp.value;523return 0;524}525526int527anv_gem_context_get_reset_stats(int fd, int context,528uint32_t *active, uint32_t *pending)529{530struct drm_i915_reset_stats stats = {531.ctx_id = context,532};533534int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);535if (ret == 0) {536*active = stats.batch_active;537*pending = stats.batch_pending;538}539540return ret;541}542543int544anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)545{546struct drm_prime_handle args = {547.handle = gem_handle,548.flags = DRM_CLOEXEC | DRM_RDWR,549};550551int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);552if (ret == -1)553return -1;554555return args.fd;556}557558uint32_t559anv_gem_fd_to_handle(struct anv_device *device, int fd)560{561struct drm_prime_handle args = {562.fd = fd,563};564565int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);566if (ret == -1)567return 0;568569return args.handle;570}571572int573anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)574{575struct drm_i915_reg_read args = {576.offset = offset577};578579int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);580581*result = args.val;582return ret;583}584585int586anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)587{588struct sync_merge_data args = {589.name = "anv merge fence",590.fd2 = fd2,591.fence = -1,592};593594int ret = intel_ioctl(fd1, SYNC_IOC_MERGE, &args);595if (ret == -1)596return -1;597598return args.fence;599}600601uint32_t602anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)603{604struct drm_syncobj_create args = {605.flags = flags,606};607608int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);609if (ret)610return 0;611612return args.handle;613}614615void616anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)617{618struct drm_syncobj_destroy args = {619.handle = handle,620};621622intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);623}624625int626anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)627{628struct drm_syncobj_handle args = {629.handle = handle,630};631632int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);633if (ret)634return -1;635636return args.fd;637}638639uint32_t640anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)641{642struct drm_syncobj_handle args = {643.fd = fd,644};645646int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);647if (ret)648return 0;649650return args.handle;651}652653int654anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)655{656struct drm_syncobj_handle args = {657.handle = handle,658.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,659};660661int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);662if (ret)663return -1;664665return args.fd;666}667668int669anv_gem_syncobj_import_sync_file(struct anv_device *device,670uint32_t handle, int fd)671{672struct drm_syncobj_handle args = {673.handle = handle,674.fd = fd,675.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,676};677678return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);679}680681void682anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)683{684struct drm_syncobj_array args = {685.handles = (uint64_t)(uintptr_t)&handle,686.count_handles = 1,687};688689intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);690}691692bool693anv_gem_supports_syncobj_wait(int fd)694{695return intel_gem_supports_syncobj_wait(fd);696}697698int699anv_gem_syncobj_wait(struct anv_device *device,700const uint32_t *handles, uint32_t num_handles,701int64_t abs_timeout_ns, bool wait_all)702{703struct drm_syncobj_wait args = {704.handles = (uint64_t)(uintptr_t)handles,705.count_handles = num_handles,706.timeout_nsec = abs_timeout_ns,707.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,708};709710if (wait_all)711args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;712713return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);714}715716int717anv_gem_syncobj_timeline_wait(struct anv_device *device,718const uint32_t *handles, const uint64_t *points,719uint32_t num_items, int64_t abs_timeout_ns,720bool wait_all, bool wait_materialize)721{722assert(device->physical->has_syncobj_wait_available);723724struct drm_syncobj_timeline_wait args = {725.handles = (uint64_t)(uintptr_t)handles,726.points = (uint64_t)(uintptr_t)points,727.count_handles = num_items,728.timeout_nsec = abs_timeout_ns,729.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,730};731732if (wait_all)733args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;734if (wait_materialize)735args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;736737return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);738}739740int741anv_gem_syncobj_timeline_signal(struct anv_device *device,742const uint32_t *handles, const uint64_t *points,743uint32_t num_items)744{745assert(device->physical->has_syncobj_wait_available);746747struct drm_syncobj_timeline_array args = {748.handles = (uint64_t)(uintptr_t)handles,749.points = (uint64_t)(uintptr_t)points,750.count_handles = num_items,751};752753return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);754}755756int757anv_gem_syncobj_timeline_query(struct anv_device *device,758const uint32_t *handles, uint64_t *points,759uint32_t num_items)760{761assert(device->physical->has_syncobj_wait_available);762763struct drm_syncobj_timeline_array args = {764.handles = (uint64_t)(uintptr_t)handles,765.points = (uint64_t)(uintptr_t)points,766.count_handles = num_items,767};768769return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);770}771772struct drm_i915_query_engine_info *773anv_gem_get_engine_info(int fd)774{775return intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO);776}777778int779anv_gem_count_engines(const struct drm_i915_query_engine_info *info,780uint16_t engine_class)781{782int count = 0;783for (int i = 0; i < info->num_engines; i++) {784if (info->engines[i].engine.engine_class == engine_class)785count++;786}787return count;788}789790791