/*1* Copyright © 2008 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*22* Authors:23* Eric Anholt <[email protected]>24*25*/2627#include <linux/types.h>28#include <linux/slab.h>29#include <linux/mm.h>30#include <linux/uaccess.h>31#include <linux/fs.h>32#include <linux/file.h>33#include <linux/module.h>34#include <linux/mman.h>35#include <linux/pagemap.h>36#include <linux/shmem_fs.h>37#include "drmP.h"3839/** @file drm_gem.c40*41* This file provides some of the base ioctls and library routines for42* the graphics memory manager implemented by each device driver.43*44* Because various devices have different requirements in terms of45* synchronization and migration strategies, implementing that is left up to46* the driver, and all that the general API provides should be generic --47* allocating objects, reading/writing data with the cpu, freeing objects.48* Even there, platform-dependent optimizations for reading/writing data with49* the CPU mean we'll likely hook those out to driver-specific calls. However,50* the DRI2 implementation wants to have at least allocate/mmap be generic.51*52* The goal was to have swap-backed object allocation managed through53* struct file. However, file descriptors as handles to a struct file have54* two major failings:55* - Process limits prevent more than 1024 or so being used at a time by56* default.57* - Inability to allocate high fds will aggravate the X Server's select()58* handling, and likely that of many GL client applications as well.59*60* This led to a plan of using our own integer IDs (called handles, following61* DRM terminology) to mimic fds, and implement the fd syscalls we need as62* ioctls. The objects themselves will still include the struct file so63* that we can transition to fds if the required kernel infrastructure shows64* up at a later date, and as our interface with shmfs for memory allocation.65*/6667/*68* We make up offsets for buffer objects so we can recognize them at69* mmap time.70*/7172/* pgoff in mmap is an unsigned long, so we need to make sure that73* the faked up offset will fit74*/7576#if BITS_PER_LONG == 6477#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)78#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)79#else80#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)81#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)82#endif8384/**85* Initialize the GEM device fields86*/8788int89drm_gem_init(struct drm_device *dev)90{91struct drm_gem_mm *mm;9293spin_lock_init(&dev->object_name_lock);94idr_init(&dev->object_name_idr);9596mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);97if (!mm) {98DRM_ERROR("out of memory\n");99return -ENOMEM;100}101102dev->mm_private = mm;103104if (drm_ht_create(&mm->offset_hash, 12)) {105kfree(mm);106return -ENOMEM;107}108109if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,110DRM_FILE_PAGE_OFFSET_SIZE)) {111drm_ht_remove(&mm->offset_hash);112kfree(mm);113return -ENOMEM;114}115116return 0;117}118119void120drm_gem_destroy(struct drm_device *dev)121{122struct drm_gem_mm *mm = dev->mm_private;123124drm_mm_takedown(&mm->offset_manager);125drm_ht_remove(&mm->offset_hash);126kfree(mm);127dev->mm_private = NULL;128}129130/**131* Initialize an already allocate GEM object of the specified size with132* shmfs backing store.133*/134int drm_gem_object_init(struct drm_device *dev,135struct drm_gem_object *obj, size_t size)136{137BUG_ON((size & (PAGE_SIZE - 1)) != 0);138139obj->dev = dev;140obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);141if (IS_ERR(obj->filp))142return -ENOMEM;143144kref_init(&obj->refcount);145atomic_set(&obj->handle_count, 0);146obj->size = size;147148return 0;149}150EXPORT_SYMBOL(drm_gem_object_init);151152/**153* Allocate a GEM object of the specified size with shmfs backing store154*/155struct drm_gem_object *156drm_gem_object_alloc(struct drm_device *dev, size_t size)157{158struct drm_gem_object *obj;159160obj = kzalloc(sizeof(*obj), GFP_KERNEL);161if (!obj)162goto free;163164if (drm_gem_object_init(dev, obj, size) != 0)165goto free;166167if (dev->driver->gem_init_object != NULL &&168dev->driver->gem_init_object(obj) != 0) {169goto fput;170}171return obj;172fput:173/* Object_init mangles the global counters - readjust them. */174fput(obj->filp);175free:176kfree(obj);177return NULL;178}179EXPORT_SYMBOL(drm_gem_object_alloc);180181/**182* Removes the mapping from handle to filp for this object.183*/184int185drm_gem_handle_delete(struct drm_file *filp, u32 handle)186{187struct drm_device *dev;188struct drm_gem_object *obj;189190/* This is gross. The idr system doesn't let us try a delete and191* return an error code. It just spews if you fail at deleting.192* So, we have to grab a lock around finding the object and then193* doing the delete on it and dropping the refcount, or the user194* could race us to double-decrement the refcount and cause a195* use-after-free later. Given the frequency of our handle lookups,196* we may want to use ida for number allocation and a hash table197* for the pointers, anyway.198*/199spin_lock(&filp->table_lock);200201/* Check if we currently have a reference on the object */202obj = idr_find(&filp->object_idr, handle);203if (obj == NULL) {204spin_unlock(&filp->table_lock);205return -EINVAL;206}207dev = obj->dev;208209/* Release reference and decrement refcount. */210idr_remove(&filp->object_idr, handle);211spin_unlock(&filp->table_lock);212213drm_gem_object_handle_unreference_unlocked(obj);214215return 0;216}217EXPORT_SYMBOL(drm_gem_handle_delete);218219/**220* Create a handle for this object. This adds a handle reference221* to the object, which includes a regular reference count. Callers222* will likely want to dereference the object afterwards.223*/224int225drm_gem_handle_create(struct drm_file *file_priv,226struct drm_gem_object *obj,227u32 *handlep)228{229int ret;230231/*232* Get the user-visible handle using idr.233*/234again:235/* ensure there is space available to allocate a handle */236if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)237return -ENOMEM;238239/* do the allocation under our spinlock */240spin_lock(&file_priv->table_lock);241ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);242spin_unlock(&file_priv->table_lock);243if (ret == -EAGAIN)244goto again;245246if (ret != 0)247return ret;248249drm_gem_object_handle_reference(obj);250return 0;251}252EXPORT_SYMBOL(drm_gem_handle_create);253254/** Returns a reference to the object named by the handle. */255struct drm_gem_object *256drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,257u32 handle)258{259struct drm_gem_object *obj;260261spin_lock(&filp->table_lock);262263/* Check if we currently have a reference on the object */264obj = idr_find(&filp->object_idr, handle);265if (obj == NULL) {266spin_unlock(&filp->table_lock);267return NULL;268}269270drm_gem_object_reference(obj);271272spin_unlock(&filp->table_lock);273274return obj;275}276EXPORT_SYMBOL(drm_gem_object_lookup);277278/**279* Releases the handle to an mm object.280*/281int282drm_gem_close_ioctl(struct drm_device *dev, void *data,283struct drm_file *file_priv)284{285struct drm_gem_close *args = data;286int ret;287288if (!(dev->driver->driver_features & DRIVER_GEM))289return -ENODEV;290291ret = drm_gem_handle_delete(file_priv, args->handle);292293return ret;294}295296/**297* Create a global name for an object, returning the name.298*299* Note that the name does not hold a reference; when the object300* is freed, the name goes away.301*/302int303drm_gem_flink_ioctl(struct drm_device *dev, void *data,304struct drm_file *file_priv)305{306struct drm_gem_flink *args = data;307struct drm_gem_object *obj;308int ret;309310if (!(dev->driver->driver_features & DRIVER_GEM))311return -ENODEV;312313obj = drm_gem_object_lookup(dev, file_priv, args->handle);314if (obj == NULL)315return -ENOENT;316317again:318if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {319ret = -ENOMEM;320goto err;321}322323spin_lock(&dev->object_name_lock);324if (!obj->name) {325ret = idr_get_new_above(&dev->object_name_idr, obj, 1,326&obj->name);327args->name = (uint64_t) obj->name;328spin_unlock(&dev->object_name_lock);329330if (ret == -EAGAIN)331goto again;332333if (ret != 0)334goto err;335336/* Allocate a reference for the name table. */337drm_gem_object_reference(obj);338} else {339args->name = (uint64_t) obj->name;340spin_unlock(&dev->object_name_lock);341ret = 0;342}343344err:345drm_gem_object_unreference_unlocked(obj);346return ret;347}348349/**350* Open an object using the global name, returning a handle and the size.351*352* This handle (of course) holds a reference to the object, so the object353* will not go away until the handle is deleted.354*/355int356drm_gem_open_ioctl(struct drm_device *dev, void *data,357struct drm_file *file_priv)358{359struct drm_gem_open *args = data;360struct drm_gem_object *obj;361int ret;362u32 handle;363364if (!(dev->driver->driver_features & DRIVER_GEM))365return -ENODEV;366367spin_lock(&dev->object_name_lock);368obj = idr_find(&dev->object_name_idr, (int) args->name);369if (obj)370drm_gem_object_reference(obj);371spin_unlock(&dev->object_name_lock);372if (!obj)373return -ENOENT;374375ret = drm_gem_handle_create(file_priv, obj, &handle);376drm_gem_object_unreference_unlocked(obj);377if (ret)378return ret;379380args->handle = handle;381args->size = obj->size;382383return 0;384}385386/**387* Called at device open time, sets up the structure for handling refcounting388* of mm objects.389*/390void391drm_gem_open(struct drm_device *dev, struct drm_file *file_private)392{393idr_init(&file_private->object_idr);394spin_lock_init(&file_private->table_lock);395}396397/**398* Called at device close to release the file's399* handle references on objects.400*/401static int402drm_gem_object_release_handle(int id, void *ptr, void *data)403{404struct drm_gem_object *obj = ptr;405406drm_gem_object_handle_unreference_unlocked(obj);407408return 0;409}410411/**412* Called at close time when the filp is going away.413*414* Releases any remaining references on objects by this filp.415*/416void417drm_gem_release(struct drm_device *dev, struct drm_file *file_private)418{419idr_for_each(&file_private->object_idr,420&drm_gem_object_release_handle, NULL);421422idr_remove_all(&file_private->object_idr);423idr_destroy(&file_private->object_idr);424}425426void427drm_gem_object_release(struct drm_gem_object *obj)428{429fput(obj->filp);430}431EXPORT_SYMBOL(drm_gem_object_release);432433/**434* Called after the last reference to the object has been lost.435* Must be called holding struct_ mutex436*437* Frees the object438*/439void440drm_gem_object_free(struct kref *kref)441{442struct drm_gem_object *obj = (struct drm_gem_object *) kref;443struct drm_device *dev = obj->dev;444445BUG_ON(!mutex_is_locked(&dev->struct_mutex));446447if (dev->driver->gem_free_object != NULL)448dev->driver->gem_free_object(obj);449}450EXPORT_SYMBOL(drm_gem_object_free);451452static void drm_gem_object_ref_bug(struct kref *list_kref)453{454BUG();455}456457/**458* Called after the last handle to the object has been closed459*460* Removes any name for the object. Note that this must be461* called before drm_gem_object_free or we'll be touching462* freed memory463*/464void drm_gem_object_handle_free(struct drm_gem_object *obj)465{466struct drm_device *dev = obj->dev;467468/* Remove any name for this object */469spin_lock(&dev->object_name_lock);470if (obj->name) {471idr_remove(&dev->object_name_idr, obj->name);472obj->name = 0;473spin_unlock(&dev->object_name_lock);474/*475* The object name held a reference to this object, drop476* that now.477*478* This cannot be the last reference, since the handle holds one too.479*/480kref_put(&obj->refcount, drm_gem_object_ref_bug);481} else482spin_unlock(&dev->object_name_lock);483484}485EXPORT_SYMBOL(drm_gem_object_handle_free);486487void drm_gem_vm_open(struct vm_area_struct *vma)488{489struct drm_gem_object *obj = vma->vm_private_data;490491drm_gem_object_reference(obj);492493mutex_lock(&obj->dev->struct_mutex);494drm_vm_open_locked(vma);495mutex_unlock(&obj->dev->struct_mutex);496}497EXPORT_SYMBOL(drm_gem_vm_open);498499void drm_gem_vm_close(struct vm_area_struct *vma)500{501struct drm_gem_object *obj = vma->vm_private_data;502struct drm_device *dev = obj->dev;503504mutex_lock(&dev->struct_mutex);505drm_vm_close_locked(vma);506drm_gem_object_unreference(obj);507mutex_unlock(&dev->struct_mutex);508}509EXPORT_SYMBOL(drm_gem_vm_close);510511512/**513* drm_gem_mmap - memory map routine for GEM objects514* @filp: DRM file pointer515* @vma: VMA for the area to be mapped516*517* If a driver supports GEM object mapping, mmap calls on the DRM file518* descriptor will end up here.519*520* If we find the object based on the offset passed in (vma->vm_pgoff will521* contain the fake offset we created when the GTT map ioctl was called on522* the object), we set up the driver fault handler so that any accesses523* to the object can be trapped, to perform migration, GTT binding, surface524* register allocation, or performance monitoring.525*/526int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)527{528struct drm_file *priv = filp->private_data;529struct drm_device *dev = priv->minor->dev;530struct drm_gem_mm *mm = dev->mm_private;531struct drm_local_map *map = NULL;532struct drm_gem_object *obj;533struct drm_hash_item *hash;534int ret = 0;535536mutex_lock(&dev->struct_mutex);537538if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {539mutex_unlock(&dev->struct_mutex);540return drm_mmap(filp, vma);541}542543map = drm_hash_entry(hash, struct drm_map_list, hash)->map;544if (!map ||545((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {546ret = -EPERM;547goto out_unlock;548}549550/* Check for valid size. */551if (map->size < vma->vm_end - vma->vm_start) {552ret = -EINVAL;553goto out_unlock;554}555556obj = map->handle;557if (!obj->dev->driver->gem_vm_ops) {558ret = -EINVAL;559goto out_unlock;560}561562vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;563vma->vm_ops = obj->dev->driver->gem_vm_ops;564vma->vm_private_data = map->handle;565vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));566567/* Take a ref for this mapping of the object, so that the fault568* handler can dereference the mmap offset's pointer to the object.569* This reference is cleaned up by the corresponding vm_close570* (which should happen whether the vma was created by this call, or571* by a vm_open due to mremap or partial unmap or whatever).572*/573drm_gem_object_reference(obj);574575vma->vm_file = filp; /* Needed for drm_vm_open() */576drm_vm_open_locked(vma);577578out_unlock:579mutex_unlock(&dev->struct_mutex);580581return ret;582}583EXPORT_SYMBOL(drm_gem_mmap);584585586