Path: blob/21.2-virgl/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
4566 views
/*1* Copyright © 2011 Marek Olšák <[email protected]>2* All Rights Reserved.3*4* Permission is hereby granted, free of charge, to any person obtaining5* a copy of this software and associated documentation files (the6* "Software"), to deal in the Software without restriction, including7* without limitation the rights to use, copy, modify, merge, publish,8* distribute, sub license, and/or sell copies of the Software, and to9* permit persons to whom the Software is furnished to do so, subject to10* the following conditions:11*12* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,13* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES14* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND15* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS16* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE19* USE OR OTHER DEALINGS IN THE SOFTWARE.20*21* The above copyright notice and this permission notice (including the22* next paragraph) shall be included in all copies or substantial portions23* of the Software.24*/2526#include "radeon_drm_cs.h"2728#include "util/u_hash_table.h"29#include "util/u_memory.h"30#include "util/simple_list.h"31#include "os/os_thread.h"32#include "os/os_mman.h"33#include "util/os_time.h"3435#include "frontend/drm_driver.h"3637#include <sys/ioctl.h>38#include <xf86drm.h>39#include <errno.h>40#include <fcntl.h>41#include <stdio.h>42#include <inttypes.h>4344static struct pb_buffer *45radeon_winsys_bo_create(struct radeon_winsys *rws,46uint64_t size,47unsigned alignment,48enum radeon_bo_domain domain,49enum radeon_bo_flag flags);5051static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)52{53return (struct radeon_bo *)bo;54}5556struct radeon_bo_va_hole {57struct list_head list;58uint64_t offset;59uint64_t size;60};6162static bool radeon_real_bo_is_busy(struct radeon_bo *bo)63{64struct drm_radeon_gem_busy args = {0};6566args.handle = bo->handle;67return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,68&args, sizeof(args)) != 0;69}7071static bool radeon_bo_is_busy(struct radeon_bo *bo)72{73unsigned num_idle;74bool busy = false;7576if (bo->handle)77return radeon_real_bo_is_busy(bo);7879mtx_lock(&bo->rws->bo_fence_lock);80for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {81if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {82busy = true;83break;84}85radeon_ws_bo_reference(&bo->u.slab.fences[num_idle], NULL);86}87memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],88(bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));89bo->u.slab.num_fences -= num_idle;90mtx_unlock(&bo->rws->bo_fence_lock);9192return busy;93}9495static void radeon_real_bo_wait_idle(struct radeon_bo *bo)96{97struct drm_radeon_gem_wait_idle args = {0};9899args.handle = bo->handle;100while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,101&args, sizeof(args)) == -EBUSY);102}103104static void radeon_bo_wait_idle(struct radeon_bo *bo)105{106if (bo->handle) {107radeon_real_bo_wait_idle(bo);108} else {109mtx_lock(&bo->rws->bo_fence_lock);110while (bo->u.slab.num_fences) {111struct radeon_bo *fence = NULL;112radeon_ws_bo_reference(&fence, bo->u.slab.fences[0]);113mtx_unlock(&bo->rws->bo_fence_lock);114115/* Wait without holding the fence lock. */116radeon_real_bo_wait_idle(fence);117118mtx_lock(&bo->rws->bo_fence_lock);119if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {120radeon_ws_bo_reference(&bo->u.slab.fences[0], NULL);121memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],122(bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0]));123bo->u.slab.num_fences--;124}125radeon_ws_bo_reference(&fence, NULL);126}127mtx_unlock(&bo->rws->bo_fence_lock);128}129}130131static bool radeon_bo_wait(struct radeon_winsys *rws,132struct pb_buffer *_buf, uint64_t timeout,133enum radeon_bo_usage usage)134{135struct radeon_bo *bo = radeon_bo(_buf);136int64_t abs_timeout;137138/* No timeout. Just query. */139if (timeout == 0)140return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);141142abs_timeout = os_time_get_absolute_timeout(timeout);143144/* Wait if any ioctl is being submitted with this buffer. */145if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))146return false;147148/* Infinite timeout. */149if (abs_timeout == PIPE_TIMEOUT_INFINITE) {150radeon_bo_wait_idle(bo);151return true;152}153154/* Other timeouts need to be emulated with a loop. */155while (radeon_bo_is_busy(bo)) {156if (os_time_get_nano() >= abs_timeout)157return false;158os_time_sleep(10);159}160161return true;162}163164static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)165{166/* Zero domains the driver doesn't understand. */167domain &= RADEON_DOMAIN_VRAM_GTT;168169/* If no domain is set, we must set something... */170if (!domain)171domain = RADEON_DOMAIN_VRAM_GTT;172173return domain;174}175176static enum radeon_bo_domain radeon_bo_get_initial_domain(177struct pb_buffer *buf)178{179struct radeon_bo *bo = (struct radeon_bo*)buf;180struct drm_radeon_gem_op args;181182if (bo->rws->info.drm_minor < 38)183return RADEON_DOMAIN_VRAM_GTT;184185memset(&args, 0, sizeof(args));186args.handle = bo->handle;187args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;188189if (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,190&args, sizeof(args))) {191fprintf(stderr, "radeon: failed to get initial domain: %p 0x%08X\n",192bo, bo->handle);193/* Default domain as returned by get_valid_domain. */194return RADEON_DOMAIN_VRAM_GTT;195}196197/* GEM domains and winsys domains are defined the same. */198return get_valid_domain(args.value);199}200201static uint64_t radeon_bomgr_find_va(const struct radeon_info *info,202struct radeon_vm_heap *heap,203uint64_t size, uint64_t alignment)204{205struct radeon_bo_va_hole *hole, *n;206uint64_t offset = 0, waste = 0;207208/* All VM address space holes will implicitly start aligned to the209* size alignment, so we don't need to sanitize the alignment here210*/211size = align(size, info->gart_page_size);212213mtx_lock(&heap->mutex);214/* first look for a hole */215LIST_FOR_EACH_ENTRY_SAFE(hole, n, &heap->holes, list) {216offset = hole->offset;217waste = offset % alignment;218waste = waste ? alignment - waste : 0;219offset += waste;220if (offset >= (hole->offset + hole->size)) {221continue;222}223if (!waste && hole->size == size) {224offset = hole->offset;225list_del(&hole->list);226FREE(hole);227mtx_unlock(&heap->mutex);228return offset;229}230if ((hole->size - waste) > size) {231if (waste) {232n = CALLOC_STRUCT(radeon_bo_va_hole);233n->size = waste;234n->offset = hole->offset;235list_add(&n->list, &hole->list);236}237hole->size -= (size + waste);238hole->offset += size + waste;239mtx_unlock(&heap->mutex);240return offset;241}242if ((hole->size - waste) == size) {243hole->size = waste;244mtx_unlock(&heap->mutex);245return offset;246}247}248249offset = heap->start;250waste = offset % alignment;251waste = waste ? alignment - waste : 0;252253if (offset + waste + size > heap->end) {254mtx_unlock(&heap->mutex);255return 0;256}257258if (waste) {259n = CALLOC_STRUCT(radeon_bo_va_hole);260n->size = waste;261n->offset = offset;262list_add(&n->list, &heap->holes);263}264offset += waste;265heap->start += size + waste;266mtx_unlock(&heap->mutex);267return offset;268}269270static uint64_t radeon_bomgr_find_va64(struct radeon_drm_winsys *ws,271uint64_t size, uint64_t alignment)272{273uint64_t va = 0;274275/* Try to allocate from the 64-bit address space first.276* If it doesn't exist (start = 0) or if it doesn't have enough space,277* fall back to the 32-bit address space.278*/279if (ws->vm64.start)280va = radeon_bomgr_find_va(&ws->info, &ws->vm64, size, alignment);281if (!va)282va = radeon_bomgr_find_va(&ws->info, &ws->vm32, size, alignment);283return va;284}285286static void radeon_bomgr_free_va(const struct radeon_info *info,287struct radeon_vm_heap *heap,288uint64_t va, uint64_t size)289{290struct radeon_bo_va_hole *hole = NULL;291292size = align(size, info->gart_page_size);293294mtx_lock(&heap->mutex);295if ((va + size) == heap->start) {296heap->start = va;297/* Delete uppermost hole if it reaches the new top */298if (!list_is_empty(&heap->holes)) {299hole = container_of(heap->holes.next, struct radeon_bo_va_hole, list);300if ((hole->offset + hole->size) == va) {301heap->start = hole->offset;302list_del(&hole->list);303FREE(hole);304}305}306} else {307struct radeon_bo_va_hole *next;308309hole = container_of(&heap->holes, struct radeon_bo_va_hole, list);310LIST_FOR_EACH_ENTRY(next, &heap->holes, list) {311if (next->offset < va)312break;313hole = next;314}315316if (&hole->list != &heap->holes) {317/* Grow upper hole if it's adjacent */318if (hole->offset == (va + size)) {319hole->offset = va;320hole->size += size;321/* Merge lower hole if it's adjacent */322if (next != hole && &next->list != &heap->holes &&323(next->offset + next->size) == va) {324next->size += hole->size;325list_del(&hole->list);326FREE(hole);327}328goto out;329}330}331332/* Grow lower hole if it's adjacent */333if (next != hole && &next->list != &heap->holes &&334(next->offset + next->size) == va) {335next->size += size;336goto out;337}338339/* FIXME on allocation failure we just lose virtual address space340* maybe print a warning341*/342next = CALLOC_STRUCT(radeon_bo_va_hole);343if (next) {344next->size = size;345next->offset = va;346list_add(&next->list, &hole->list);347}348}349out:350mtx_unlock(&heap->mutex);351}352353void radeon_bo_destroy(void *winsys, struct pb_buffer *_buf)354{355struct radeon_bo *bo = radeon_bo(_buf);356struct radeon_drm_winsys *rws = bo->rws;357struct drm_gem_close args;358359assert(bo->handle && "must not be called for slab entries");360361memset(&args, 0, sizeof(args));362363mtx_lock(&rws->bo_handles_mutex);364_mesa_hash_table_remove_key(rws->bo_handles, (void*)(uintptr_t)bo->handle);365if (bo->flink_name) {366_mesa_hash_table_remove_key(rws->bo_names,367(void*)(uintptr_t)bo->flink_name);368}369mtx_unlock(&rws->bo_handles_mutex);370371if (bo->u.real.ptr)372os_munmap(bo->u.real.ptr, bo->base.size);373374if (rws->info.r600_has_virtual_memory) {375if (rws->va_unmap_working) {376struct drm_radeon_gem_va va;377378va.handle = bo->handle;379va.vm_id = 0;380va.operation = RADEON_VA_UNMAP;381va.flags = RADEON_VM_PAGE_READABLE |382RADEON_VM_PAGE_WRITEABLE |383RADEON_VM_PAGE_SNOOPED;384va.offset = bo->va;385386if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va,387sizeof(va)) != 0 &&388va.operation == RADEON_VA_RESULT_ERROR) {389fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");390fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.size);391fprintf(stderr, "radeon: va : 0x%"PRIx64"\n", bo->va);392}393}394395radeon_bomgr_free_va(&rws->info,396bo->va < rws->vm32.end ? &rws->vm32 : &rws->vm64,397bo->va, bo->base.size);398}399400/* Close object. */401args.handle = bo->handle;402drmIoctl(rws->fd, DRM_IOCTL_GEM_CLOSE, &args);403404mtx_destroy(&bo->u.real.map_mutex);405406if (bo->initial_domain & RADEON_DOMAIN_VRAM)407rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);408else if (bo->initial_domain & RADEON_DOMAIN_GTT)409rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);410411if (bo->u.real.map_count >= 1) {412if (bo->initial_domain & RADEON_DOMAIN_VRAM)413bo->rws->mapped_vram -= bo->base.size;414else415bo->rws->mapped_gtt -= bo->base.size;416bo->rws->num_mapped_buffers--;417}418419FREE(bo);420}421422static void radeon_bo_destroy_or_cache(void *winsys, struct pb_buffer *_buf)423{424struct radeon_bo *bo = radeon_bo(_buf);425426assert(bo->handle && "must not be called for slab entries");427428if (bo->u.real.use_reusable_pool)429pb_cache_add_buffer(&bo->u.real.cache_entry);430else431radeon_bo_destroy(NULL, _buf);432}433434void *radeon_bo_do_map(struct radeon_bo *bo)435{436struct drm_radeon_gem_mmap args = {0};437void *ptr;438unsigned offset;439440/* If the buffer is created from user memory, return the user pointer. */441if (bo->user_ptr)442return bo->user_ptr;443444if (bo->handle) {445offset = 0;446} else {447offset = bo->va - bo->u.slab.real->va;448bo = bo->u.slab.real;449}450451/* Map the buffer. */452mtx_lock(&bo->u.real.map_mutex);453/* Return the pointer if it's already mapped. */454if (bo->u.real.ptr) {455bo->u.real.map_count++;456mtx_unlock(&bo->u.real.map_mutex);457return (uint8_t*)bo->u.real.ptr + offset;458}459args.handle = bo->handle;460args.offset = 0;461args.size = (uint64_t)bo->base.size;462if (drmCommandWriteRead(bo->rws->fd,463DRM_RADEON_GEM_MMAP,464&args,465sizeof(args))) {466mtx_unlock(&bo->u.real.map_mutex);467fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",468bo, bo->handle);469return NULL;470}471472ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,473bo->rws->fd, args.addr_ptr);474if (ptr == MAP_FAILED) {475/* Clear the cache and try again. */476pb_cache_release_all_buffers(&bo->rws->bo_cache);477478ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,479bo->rws->fd, args.addr_ptr);480if (ptr == MAP_FAILED) {481mtx_unlock(&bo->u.real.map_mutex);482fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);483return NULL;484}485}486bo->u.real.ptr = ptr;487bo->u.real.map_count = 1;488489if (bo->initial_domain & RADEON_DOMAIN_VRAM)490bo->rws->mapped_vram += bo->base.size;491else492bo->rws->mapped_gtt += bo->base.size;493bo->rws->num_mapped_buffers++;494495mtx_unlock(&bo->u.real.map_mutex);496return (uint8_t*)bo->u.real.ptr + offset;497}498499static void *radeon_bo_map(struct radeon_winsys *rws,500struct pb_buffer *buf,501struct radeon_cmdbuf *rcs,502enum pipe_map_flags usage)503{504struct radeon_bo *bo = (struct radeon_bo*)buf;505struct radeon_drm_cs *cs = rcs ? radeon_drm_cs(rcs) : NULL;506507/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */508if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {509/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */510if (usage & PIPE_MAP_DONTBLOCK) {511if (!(usage & PIPE_MAP_WRITE)) {512/* Mapping for read.513*514* Since we are mapping for read, we don't need to wait515* if the GPU is using the buffer for read too516* (neither one is changing it).517*518* Only check whether the buffer is being used for write. */519if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {520cs->flush_cs(cs->flush_data,521RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);522return NULL;523}524525if (!radeon_bo_wait(rws, (struct pb_buffer*)bo, 0,526RADEON_USAGE_WRITE)) {527return NULL;528}529} else {530if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {531cs->flush_cs(cs->flush_data,532RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);533return NULL;534}535536if (!radeon_bo_wait(rws, (struct pb_buffer*)bo, 0,537RADEON_USAGE_READWRITE)) {538return NULL;539}540}541} else {542uint64_t time = os_time_get_nano();543544if (!(usage & PIPE_MAP_WRITE)) {545/* Mapping for read.546*547* Since we are mapping for read, we don't need to wait548* if the GPU is using the buffer for read too549* (neither one is changing it).550*551* Only check whether the buffer is being used for write. */552if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {553cs->flush_cs(cs->flush_data,554RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);555}556radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,557RADEON_USAGE_WRITE);558} else {559/* Mapping for write. */560if (cs) {561if (radeon_bo_is_referenced_by_cs(cs, bo)) {562cs->flush_cs(cs->flush_data,563RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);564} else {565/* Try to avoid busy-waiting in radeon_bo_wait. */566if (p_atomic_read(&bo->num_active_ioctls))567radeon_drm_cs_sync_flush(rcs);568}569}570571radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,572RADEON_USAGE_READWRITE);573}574575bo->rws->buffer_wait_time += os_time_get_nano() - time;576}577}578579return radeon_bo_do_map(bo);580}581582static void radeon_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *_buf)583{584struct radeon_bo *bo = (struct radeon_bo*)_buf;585586if (bo->user_ptr)587return;588589if (!bo->handle)590bo = bo->u.slab.real;591592mtx_lock(&bo->u.real.map_mutex);593if (!bo->u.real.ptr) {594mtx_unlock(&bo->u.real.map_mutex);595return; /* it's not been mapped */596}597598assert(bo->u.real.map_count);599if (--bo->u.real.map_count) {600mtx_unlock(&bo->u.real.map_mutex);601return; /* it's been mapped multiple times */602}603604os_munmap(bo->u.real.ptr, bo->base.size);605bo->u.real.ptr = NULL;606607if (bo->initial_domain & RADEON_DOMAIN_VRAM)608bo->rws->mapped_vram -= bo->base.size;609else610bo->rws->mapped_gtt -= bo->base.size;611bo->rws->num_mapped_buffers--;612613mtx_unlock(&bo->u.real.map_mutex);614}615616static const struct pb_vtbl radeon_bo_vtbl = {617radeon_bo_destroy_or_cache618/* other functions are never called */619};620621static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,622unsigned size, unsigned alignment,623unsigned initial_domains,624unsigned flags,625int heap)626{627struct radeon_bo *bo;628struct drm_radeon_gem_create args;629int r;630631memset(&args, 0, sizeof(args));632633assert(initial_domains);634assert((initial_domains &635~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);636637args.size = size;638args.alignment = alignment;639args.initial_domain = initial_domains;640args.flags = 0;641642/* If VRAM is just stolen system memory, allow both VRAM and643* GTT, whichever has free space. If a buffer is evicted from644* VRAM to GTT, it will stay there.645*/646if (!rws->info.has_dedicated_vram)647args.initial_domain |= RADEON_DOMAIN_GTT;648649if (flags & RADEON_FLAG_GTT_WC)650args.flags |= RADEON_GEM_GTT_WC;651if (flags & RADEON_FLAG_NO_CPU_ACCESS)652args.flags |= RADEON_GEM_NO_CPU_ACCESS;653654if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,655&args, sizeof(args))) {656fprintf(stderr, "radeon: Failed to allocate a buffer:\n");657fprintf(stderr, "radeon: size : %u bytes\n", size);658fprintf(stderr, "radeon: alignment : %u bytes\n", alignment);659fprintf(stderr, "radeon: domains : %u\n", args.initial_domain);660fprintf(stderr, "radeon: flags : %u\n", args.flags);661return NULL;662}663664assert(args.handle != 0);665666bo = CALLOC_STRUCT(radeon_bo);667if (!bo)668return NULL;669670pipe_reference_init(&bo->base.reference, 1);671bo->base.alignment_log2 = util_logbase2(alignment);672bo->base.usage = 0;673bo->base.size = size;674bo->base.vtbl = &radeon_bo_vtbl;675bo->rws = rws;676bo->handle = args.handle;677bo->va = 0;678bo->initial_domain = initial_domains;679bo->hash = __sync_fetch_and_add(&rws->next_bo_hash, 1);680(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);681682if (heap >= 0) {683pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,684heap);685}686687if (rws->info.r600_has_virtual_memory) {688struct drm_radeon_gem_va va;689unsigned va_gap_size;690691va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;692693if (flags & RADEON_FLAG_32BIT) {694bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm32,695size + va_gap_size, alignment);696assert(bo->va + size < rws->vm32.end);697} else {698bo->va = radeon_bomgr_find_va64(rws, size + va_gap_size, alignment);699}700701va.handle = bo->handle;702va.vm_id = 0;703va.operation = RADEON_VA_MAP;704va.flags = RADEON_VM_PAGE_READABLE |705RADEON_VM_PAGE_WRITEABLE |706RADEON_VM_PAGE_SNOOPED;707va.offset = bo->va;708r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));709if (r && va.operation == RADEON_VA_RESULT_ERROR) {710fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");711fprintf(stderr, "radeon: size : %d bytes\n", size);712fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);713fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);714fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);715radeon_bo_destroy(NULL, &bo->base);716return NULL;717}718mtx_lock(&rws->bo_handles_mutex);719if (va.operation == RADEON_VA_RESULT_VA_EXIST) {720struct pb_buffer *b = &bo->base;721struct radeon_bo *old_bo =722_mesa_hash_table_u64_search(rws->bo_vas, va.offset);723724mtx_unlock(&rws->bo_handles_mutex);725pb_reference(&b, &old_bo->base);726return radeon_bo(b);727}728729_mesa_hash_table_u64_insert(rws->bo_vas, bo->va, bo);730mtx_unlock(&rws->bo_handles_mutex);731}732733if (initial_domains & RADEON_DOMAIN_VRAM)734rws->allocated_vram += align(size, rws->info.gart_page_size);735else if (initial_domains & RADEON_DOMAIN_GTT)736rws->allocated_gtt += align(size, rws->info.gart_page_size);737738return bo;739}740741bool radeon_bo_can_reclaim(void *winsys, struct pb_buffer *_buf)742{743struct radeon_bo *bo = radeon_bo(_buf);744745if (radeon_bo_is_referenced_by_any_cs(bo))746return false;747748return radeon_bo_wait(winsys, _buf, 0, RADEON_USAGE_READWRITE);749}750751bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)752{753struct radeon_bo *bo = container_of(entry, struct radeon_bo, u.slab.entry);754755return radeon_bo_can_reclaim(NULL, &bo->base);756}757758static void radeon_bo_slab_destroy(void *winsys, struct pb_buffer *_buf)759{760struct radeon_bo *bo = radeon_bo(_buf);761762assert(!bo->handle);763764pb_slab_free(&bo->rws->bo_slabs, &bo->u.slab.entry);765}766767static const struct pb_vtbl radeon_winsys_bo_slab_vtbl = {768radeon_bo_slab_destroy769/* other functions are never called */770};771772struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,773unsigned entry_size,774unsigned group_index)775{776struct radeon_drm_winsys *ws = priv;777struct radeon_slab *slab = CALLOC_STRUCT(radeon_slab);778enum radeon_bo_domain domains = radeon_domain_from_heap(heap);779enum radeon_bo_flag flags = radeon_flags_from_heap(heap);780unsigned base_hash;781782if (!slab)783return NULL;784785slab->buffer = radeon_bo(radeon_winsys_bo_create(&ws->base,78664 * 1024, 64 * 1024,787domains, flags));788if (!slab->buffer)789goto fail;790791assert(slab->buffer->handle);792793slab->base.num_entries = slab->buffer->base.size / entry_size;794slab->base.num_free = slab->base.num_entries;795slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));796if (!slab->entries)797goto fail_buffer;798799list_inithead(&slab->base.free);800801base_hash = __sync_fetch_and_add(&ws->next_bo_hash, slab->base.num_entries);802803for (unsigned i = 0; i < slab->base.num_entries; ++i) {804struct radeon_bo *bo = &slab->entries[i];805806bo->base.alignment_log2 = util_logbase2(entry_size);807bo->base.usage = slab->buffer->base.usage;808bo->base.size = entry_size;809bo->base.vtbl = &radeon_winsys_bo_slab_vtbl;810bo->rws = ws;811bo->va = slab->buffer->va + i * entry_size;812bo->initial_domain = domains;813bo->hash = base_hash + i;814bo->u.slab.entry.slab = &slab->base;815bo->u.slab.entry.group_index = group_index;816bo->u.slab.entry.entry_size = entry_size;817bo->u.slab.real = slab->buffer;818819list_addtail(&bo->u.slab.entry.head, &slab->base.free);820}821822return &slab->base;823824fail_buffer:825radeon_ws_bo_reference(&slab->buffer, NULL);826fail:827FREE(slab);828return NULL;829}830831void radeon_bo_slab_free(void *priv, struct pb_slab *pslab)832{833struct radeon_slab *slab = (struct radeon_slab *)pslab;834835for (unsigned i = 0; i < slab->base.num_entries; ++i) {836struct radeon_bo *bo = &slab->entries[i];837for (unsigned j = 0; j < bo->u.slab.num_fences; ++j)838radeon_ws_bo_reference(&bo->u.slab.fences[j], NULL);839FREE(bo->u.slab.fences);840}841842FREE(slab->entries);843radeon_ws_bo_reference(&slab->buffer, NULL);844FREE(slab);845}846847static unsigned eg_tile_split(unsigned tile_split)848{849switch (tile_split) {850case 0: tile_split = 64; break;851case 1: tile_split = 128; break;852case 2: tile_split = 256; break;853case 3: tile_split = 512; break;854default:855case 4: tile_split = 1024; break;856case 5: tile_split = 2048; break;857case 6: tile_split = 4096; break;858}859return tile_split;860}861862static unsigned eg_tile_split_rev(unsigned eg_tile_split)863{864switch (eg_tile_split) {865case 64: return 0;866case 128: return 1;867case 256: return 2;868case 512: return 3;869default:870case 1024: return 4;871case 2048: return 5;872case 4096: return 6;873}874}875876static void radeon_bo_get_metadata(struct radeon_winsys *rws,877struct pb_buffer *_buf,878struct radeon_bo_metadata *md,879struct radeon_surf *surf)880{881struct radeon_bo *bo = radeon_bo(_buf);882struct drm_radeon_gem_set_tiling args;883884assert(bo->handle && "must not be called for slab entries");885886memset(&args, 0, sizeof(args));887888args.handle = bo->handle;889890drmCommandWriteRead(bo->rws->fd,891DRM_RADEON_GEM_GET_TILING,892&args,893sizeof(args));894895if (surf) {896if (args.tiling_flags & RADEON_TILING_MACRO)897md->mode = RADEON_SURF_MODE_2D;898else if (args.tiling_flags & RADEON_TILING_MICRO)899md->mode = RADEON_SURF_MODE_1D;900else901md->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;902903surf->u.legacy.bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;904surf->u.legacy.bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;905surf->u.legacy.tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;906surf->u.legacy.tile_split = eg_tile_split(surf->u.legacy.tile_split);907surf->u.legacy.mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;908909if (bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT))910surf->flags |= RADEON_SURF_SCANOUT;911else912surf->flags &= ~RADEON_SURF_SCANOUT;913return;914}915916md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;917md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;918if (args.tiling_flags & RADEON_TILING_MICRO)919md->u.legacy.microtile = RADEON_LAYOUT_TILED;920else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)921md->u.legacy.microtile = RADEON_LAYOUT_SQUARETILED;922923if (args.tiling_flags & RADEON_TILING_MACRO)924md->u.legacy.macrotile = RADEON_LAYOUT_TILED;925926md->u.legacy.bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;927md->u.legacy.bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;928md->u.legacy.tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;929md->u.legacy.mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;930md->u.legacy.tile_split = eg_tile_split(md->u.legacy.tile_split);931md->u.legacy.scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);932}933934static void radeon_bo_set_metadata(struct radeon_winsys *rws,935struct pb_buffer *_buf,936struct radeon_bo_metadata *md,937struct radeon_surf *surf)938{939struct radeon_bo *bo = radeon_bo(_buf);940struct drm_radeon_gem_set_tiling args;941942assert(bo->handle && "must not be called for slab entries");943944memset(&args, 0, sizeof(args));945946os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);947948if (surf) {949if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)950args.tiling_flags |= RADEON_TILING_MICRO;951if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)952args.tiling_flags |= RADEON_TILING_MACRO;953954args.tiling_flags |= (surf->u.legacy.bankw & RADEON_TILING_EG_BANKW_MASK) <<955RADEON_TILING_EG_BANKW_SHIFT;956args.tiling_flags |= (surf->u.legacy.bankh & RADEON_TILING_EG_BANKH_MASK) <<957RADEON_TILING_EG_BANKH_SHIFT;958if (surf->u.legacy.tile_split) {959args.tiling_flags |= (eg_tile_split_rev(surf->u.legacy.tile_split) &960RADEON_TILING_EG_TILE_SPLIT_MASK) <<961RADEON_TILING_EG_TILE_SPLIT_SHIFT;962}963args.tiling_flags |= (surf->u.legacy.mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<964RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;965966if (bo->rws->gen >= DRV_SI && !(surf->flags & RADEON_SURF_SCANOUT))967args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;968969args.pitch = surf->u.legacy.level[0].nblk_x * surf->bpe;970} else {971if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)972args.tiling_flags |= RADEON_TILING_MICRO;973else if (md->u.legacy.microtile == RADEON_LAYOUT_SQUARETILED)974args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;975976if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)977args.tiling_flags |= RADEON_TILING_MACRO;978979args.tiling_flags |= (md->u.legacy.bankw & RADEON_TILING_EG_BANKW_MASK) <<980RADEON_TILING_EG_BANKW_SHIFT;981args.tiling_flags |= (md->u.legacy.bankh & RADEON_TILING_EG_BANKH_MASK) <<982RADEON_TILING_EG_BANKH_SHIFT;983if (md->u.legacy.tile_split) {984args.tiling_flags |= (eg_tile_split_rev(md->u.legacy.tile_split) &985RADEON_TILING_EG_TILE_SPLIT_MASK) <<986RADEON_TILING_EG_TILE_SPLIT_SHIFT;987}988args.tiling_flags |= (md->u.legacy.mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<989RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;990991if (bo->rws->gen >= DRV_SI && !md->u.legacy.scanout)992args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;993994args.pitch = md->u.legacy.stride;995}996997args.handle = bo->handle;998999drmCommandWriteRead(bo->rws->fd,1000DRM_RADEON_GEM_SET_TILING,1001&args,1002sizeof(args));1003}10041005static struct pb_buffer *1006radeon_winsys_bo_create(struct radeon_winsys *rws,1007uint64_t size,1008unsigned alignment,1009enum radeon_bo_domain domain,1010enum radeon_bo_flag flags)1011{1012struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);1013struct radeon_bo *bo;1014int heap = -1;10151016assert(!(flags & RADEON_FLAG_SPARSE)); /* not supported */10171018/* Only 32-bit sizes are supported. */1019if (size > UINT_MAX)1020return NULL;10211022/* VRAM implies WC. This is not optional. */1023if (domain & RADEON_DOMAIN_VRAM)1024flags |= RADEON_FLAG_GTT_WC;1025/* NO_CPU_ACCESS is valid with VRAM only. */1026if (domain != RADEON_DOMAIN_VRAM)1027flags &= ~RADEON_FLAG_NO_CPU_ACCESS;10281029/* Sub-allocate small buffers from slabs. */1030if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&1031size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&1032ws->info.r600_has_virtual_memory &&1033alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {1034struct pb_slab_entry *entry;1035int heap = radeon_get_heap_index(domain, flags);10361037if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)1038goto no_slab;10391040entry = pb_slab_alloc(&ws->bo_slabs, size, heap);1041if (!entry) {1042/* Clear the cache and try again. */1043pb_cache_release_all_buffers(&ws->bo_cache);10441045entry = pb_slab_alloc(&ws->bo_slabs, size, heap);1046}1047if (!entry)1048return NULL;10491050bo = container_of(entry, struct radeon_bo, u.slab.entry);10511052pipe_reference_init(&bo->base.reference, 1);10531054return &bo->base;1055}1056no_slab:10571058/* This flag is irrelevant for the cache. */1059flags &= ~RADEON_FLAG_NO_SUBALLOC;10601061/* Align size to page size. This is the minimum alignment for normal1062* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,1063* like constant/uniform buffers, can benefit from better and more reuse.1064*/1065size = align(size, ws->info.gart_page_size);1066alignment = align(alignment, ws->info.gart_page_size);10671068bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;10691070/* Shared resources don't use cached heaps. */1071if (use_reusable_pool) {1072heap = radeon_get_heap_index(domain, flags);1073assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);10741075bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,10760, heap));1077if (bo)1078return &bo->base;1079}10801081bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);1082if (!bo) {1083/* Clear the cache and try again. */1084if (ws->info.r600_has_virtual_memory)1085pb_slabs_reclaim(&ws->bo_slabs);1086pb_cache_release_all_buffers(&ws->bo_cache);1087bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);1088if (!bo)1089return NULL;1090}10911092bo->u.real.use_reusable_pool = use_reusable_pool;10931094mtx_lock(&ws->bo_handles_mutex);1095_mesa_hash_table_insert(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);1096mtx_unlock(&ws->bo_handles_mutex);10971098return &bo->base;1099}11001101static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,1102void *pointer, uint64_t size)1103{1104struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);1105struct drm_radeon_gem_userptr args;1106struct radeon_bo *bo;1107int r;11081109bo = CALLOC_STRUCT(radeon_bo);1110if (!bo)1111return NULL;11121113memset(&args, 0, sizeof(args));1114args.addr = (uintptr_t)pointer;1115args.size = align(size, ws->info.gart_page_size);1116args.flags = RADEON_GEM_USERPTR_ANONONLY |1117RADEON_GEM_USERPTR_VALIDATE |1118RADEON_GEM_USERPTR_REGISTER;1119if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,1120&args, sizeof(args))) {1121FREE(bo);1122return NULL;1123}11241125assert(args.handle != 0);11261127mtx_lock(&ws->bo_handles_mutex);11281129/* Initialize it. */1130pipe_reference_init(&bo->base.reference, 1);1131bo->handle = args.handle;1132bo->base.alignment_log2 = 0;1133bo->base.size = size;1134bo->base.vtbl = &radeon_bo_vtbl;1135bo->rws = ws;1136bo->user_ptr = pointer;1137bo->va = 0;1138bo->initial_domain = RADEON_DOMAIN_GTT;1139bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);1140(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);11411142_mesa_hash_table_insert(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);11431144mtx_unlock(&ws->bo_handles_mutex);11451146if (ws->info.r600_has_virtual_memory) {1147struct drm_radeon_gem_va va;11481149bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);11501151va.handle = bo->handle;1152va.operation = RADEON_VA_MAP;1153va.vm_id = 0;1154va.offset = bo->va;1155va.flags = RADEON_VM_PAGE_READABLE |1156RADEON_VM_PAGE_WRITEABLE |1157RADEON_VM_PAGE_SNOOPED;1158va.offset = bo->va;1159r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));1160if (r && va.operation == RADEON_VA_RESULT_ERROR) {1161fprintf(stderr, "radeon: Failed to assign virtual address space\n");1162radeon_bo_destroy(NULL, &bo->base);1163return NULL;1164}1165mtx_lock(&ws->bo_handles_mutex);1166if (va.operation == RADEON_VA_RESULT_VA_EXIST) {1167struct pb_buffer *b = &bo->base;1168struct radeon_bo *old_bo =1169_mesa_hash_table_u64_search(ws->bo_vas, va.offset);11701171mtx_unlock(&ws->bo_handles_mutex);1172pb_reference(&b, &old_bo->base);1173return b;1174}11751176_mesa_hash_table_u64_insert(ws->bo_vas, bo->va, bo);1177mtx_unlock(&ws->bo_handles_mutex);1178}11791180ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);11811182return (struct pb_buffer*)bo;1183}11841185static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,1186struct winsys_handle *whandle,1187unsigned vm_alignment)1188{1189struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);1190struct radeon_bo *bo;1191int r;1192unsigned handle;1193uint64_t size = 0;11941195/* We must maintain a list of pairs <handle, bo>, so that we always return1196* the same BO for one particular handle. If we didn't do that and created1197* more than one BO for the same handle and then relocated them in a CS,1198* we would hit a deadlock in the kernel.1199*1200* The list of pairs is guarded by a mutex, of course. */1201mtx_lock(&ws->bo_handles_mutex);12021203if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {1204/* First check if there already is an existing bo for the handle. */1205bo = util_hash_table_get(ws->bo_names, (void*)(uintptr_t)whandle->handle);1206} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {1207/* We must first get the GEM handle, as fds are unreliable keys */1208r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);1209if (r)1210goto fail;1211bo = util_hash_table_get(ws->bo_handles, (void*)(uintptr_t)handle);1212} else {1213/* Unknown handle type */1214goto fail;1215}12161217if (bo) {1218/* Increase the refcount. */1219struct pb_buffer *b = NULL;1220pb_reference(&b, &bo->base);1221goto done;1222}12231224/* There isn't, create a new one. */1225bo = CALLOC_STRUCT(radeon_bo);1226if (!bo) {1227goto fail;1228}12291230if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {1231struct drm_gem_open open_arg = {};1232memset(&open_arg, 0, sizeof(open_arg));1233/* Open the BO. */1234open_arg.name = whandle->handle;1235if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {1236FREE(bo);1237goto fail;1238}1239handle = open_arg.handle;1240size = open_arg.size;1241bo->flink_name = whandle->handle;1242} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {1243size = lseek(whandle->handle, 0, SEEK_END);1244/*1245* Could check errno to determine whether the kernel is new enough, but1246* it doesn't really matter why this failed, just that it failed.1247*/1248if (size == (off_t)-1) {1249FREE(bo);1250goto fail;1251}1252lseek(whandle->handle, 0, SEEK_SET);1253}12541255assert(handle != 0);12561257bo->handle = handle;12581259/* Initialize it. */1260pipe_reference_init(&bo->base.reference, 1);1261bo->base.alignment_log2 = 0;1262bo->base.size = (unsigned) size;1263bo->base.vtbl = &radeon_bo_vtbl;1264bo->rws = ws;1265bo->va = 0;1266bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);1267(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);12681269if (bo->flink_name)1270_mesa_hash_table_insert(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);12711272_mesa_hash_table_insert(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);12731274done:1275mtx_unlock(&ws->bo_handles_mutex);12761277if (ws->info.r600_has_virtual_memory && !bo->va) {1278struct drm_radeon_gem_va va;12791280bo->va = radeon_bomgr_find_va64(ws, bo->base.size, vm_alignment);12811282va.handle = bo->handle;1283va.operation = RADEON_VA_MAP;1284va.vm_id = 0;1285va.offset = bo->va;1286va.flags = RADEON_VM_PAGE_READABLE |1287RADEON_VM_PAGE_WRITEABLE |1288RADEON_VM_PAGE_SNOOPED;1289va.offset = bo->va;1290r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));1291if (r && va.operation == RADEON_VA_RESULT_ERROR) {1292fprintf(stderr, "radeon: Failed to assign virtual address space\n");1293radeon_bo_destroy(NULL, &bo->base);1294return NULL;1295}1296mtx_lock(&ws->bo_handles_mutex);1297if (va.operation == RADEON_VA_RESULT_VA_EXIST) {1298struct pb_buffer *b = &bo->base;1299struct radeon_bo *old_bo =1300_mesa_hash_table_u64_search(ws->bo_vas, va.offset);13011302mtx_unlock(&ws->bo_handles_mutex);1303pb_reference(&b, &old_bo->base);1304return b;1305}13061307_mesa_hash_table_u64_insert(ws->bo_vas, bo->va, bo);1308mtx_unlock(&ws->bo_handles_mutex);1309}13101311bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);13121313if (bo->initial_domain & RADEON_DOMAIN_VRAM)1314ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);1315else if (bo->initial_domain & RADEON_DOMAIN_GTT)1316ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);13171318return (struct pb_buffer*)bo;13191320fail:1321mtx_unlock(&ws->bo_handles_mutex);1322return NULL;1323}13241325static bool radeon_winsys_bo_get_handle(struct radeon_winsys *rws,1326struct pb_buffer *buffer,1327struct winsys_handle *whandle)1328{1329struct drm_gem_flink flink;1330struct radeon_bo *bo = radeon_bo(buffer);1331struct radeon_drm_winsys *ws = bo->rws;13321333/* Don't allow exports of slab entries. */1334if (!bo->handle)1335return false;13361337memset(&flink, 0, sizeof(flink));13381339bo->u.real.use_reusable_pool = false;13401341if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {1342if (!bo->flink_name) {1343flink.handle = bo->handle;13441345if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {1346return false;1347}13481349bo->flink_name = flink.name;13501351mtx_lock(&ws->bo_handles_mutex);1352_mesa_hash_table_insert(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);1353mtx_unlock(&ws->bo_handles_mutex);1354}1355whandle->handle = bo->flink_name;1356} else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {1357whandle->handle = bo->handle;1358} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {1359if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))1360return false;1361}13621363return true;1364}13651366static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)1367{1368return ((struct radeon_bo*)buf)->user_ptr != NULL;1369}13701371static bool radeon_winsys_bo_is_suballocated(struct pb_buffer *buf)1372{1373return !((struct radeon_bo*)buf)->handle;1374}13751376static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)1377{1378return ((struct radeon_bo*)buf)->va;1379}13801381static unsigned radeon_winsys_bo_get_reloc_offset(struct pb_buffer *buf)1382{1383struct radeon_bo *bo = radeon_bo(buf);13841385if (bo->handle)1386return 0;13871388return bo->va - bo->u.slab.real->va;1389}13901391void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws)1392{1393ws->base.buffer_set_metadata = radeon_bo_set_metadata;1394ws->base.buffer_get_metadata = radeon_bo_get_metadata;1395ws->base.buffer_map = radeon_bo_map;1396ws->base.buffer_unmap = radeon_bo_unmap;1397ws->base.buffer_wait = radeon_bo_wait;1398ws->base.buffer_create = radeon_winsys_bo_create;1399ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;1400ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;1401ws->base.buffer_is_user_ptr = radeon_winsys_bo_is_user_ptr;1402ws->base.buffer_is_suballocated = radeon_winsys_bo_is_suballocated;1403ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;1404ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;1405ws->base.buffer_get_reloc_offset = radeon_winsys_bo_get_reloc_offset;1406ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;1407}140814091410