// SPDX-License-Identifier: GPL-2.0-only1/*2* Framework for buffer objects that can be shared across devices/subsystems.3*4* Copyright(C) 2011 Linaro Limited. All rights reserved.5* Author: Sumit Semwal <[email protected]>6*7* Many thanks to linaro-mm-sig list, and specially8* Arnd Bergmann <[email protected]>, Rob Clark <[email protected]> and9* Daniel Vetter <[email protected]> for their support in creation and10* refining of this idea.11*/1213#include <linux/fs.h>14#include <linux/slab.h>15#include <linux/dma-buf.h>16#include <linux/dma-fence.h>17#include <linux/dma-fence-unwrap.h>18#include <linux/anon_inodes.h>19#include <linux/export.h>20#include <linux/debugfs.h>21#include <linux/list.h>22#include <linux/module.h>23#include <linux/mutex.h>24#include <linux/seq_file.h>25#include <linux/sync_file.h>26#include <linux/poll.h>27#include <linux/dma-resv.h>28#include <linux/mm.h>29#include <linux/mount.h>30#include <linux/pseudo_fs.h>3132#include <uapi/linux/dma-buf.h>33#include <uapi/linux/magic.h>3435#include "dma-buf-sysfs-stats.h"3637static inline int is_dma_buf_file(struct file *);3839static DEFINE_MUTEX(dmabuf_list_mutex);40static LIST_HEAD(dmabuf_list);4142static void __dma_buf_list_add(struct dma_buf *dmabuf)43{44mutex_lock(&dmabuf_list_mutex);45list_add(&dmabuf->list_node, &dmabuf_list);46mutex_unlock(&dmabuf_list_mutex);47}4849static void __dma_buf_list_del(struct dma_buf *dmabuf)50{51if (!dmabuf)52return;5354mutex_lock(&dmabuf_list_mutex);55list_del(&dmabuf->list_node);56mutex_unlock(&dmabuf_list_mutex);57}5859/**60* dma_buf_iter_begin - begin iteration through global list of all DMA buffers61*62* Returns the first buffer in the global list of DMA-bufs that's not in the63* process of being destroyed. Increments that buffer's reference count to64* prevent buffer destruction. Callers must release the reference, either by65* continuing iteration with dma_buf_iter_next(), or with dma_buf_put().66*67* Return:68* * First buffer from global list, with refcount elevated69* * NULL if no active buffers are present70*/71struct dma_buf *dma_buf_iter_begin(void)72{73struct dma_buf *ret = NULL, *dmabuf;7475/*76* The list mutex does not protect a dmabuf's refcount, so it can be77* zeroed while we are iterating. We cannot call get_dma_buf() since the78* caller may not already own a reference to the buffer.79*/80mutex_lock(&dmabuf_list_mutex);81list_for_each_entry(dmabuf, &dmabuf_list, list_node) {82if (file_ref_get(&dmabuf->file->f_ref)) {83ret = dmabuf;84break;85}86}87mutex_unlock(&dmabuf_list_mutex);88return ret;89}9091/**92* dma_buf_iter_next - continue iteration through global list of all DMA buffers93* @dmabuf: [in] pointer to dma_buf94*95* Decrements the reference count on the provided buffer. Returns the next96* buffer from the remainder of the global list of DMA-bufs with its reference97* count incremented. Callers must release the reference, either by continuing98* iteration with dma_buf_iter_next(), or with dma_buf_put().99*100* Return:101* * Next buffer from global list, with refcount elevated102* * NULL if no additional active buffers are present103*/104struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)105{106struct dma_buf *ret = NULL;107108/*109* The list mutex does not protect a dmabuf's refcount, so it can be110* zeroed while we are iterating. We cannot call get_dma_buf() since the111* caller may not already own a reference to the buffer.112*/113mutex_lock(&dmabuf_list_mutex);114dma_buf_put(dmabuf);115list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {116if (file_ref_get(&dmabuf->file->f_ref)) {117ret = dmabuf;118break;119}120}121mutex_unlock(&dmabuf_list_mutex);122return ret;123}124125static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)126{127struct dma_buf *dmabuf;128char name[DMA_BUF_NAME_LEN];129ssize_t ret = 0;130131dmabuf = dentry->d_fsdata;132spin_lock(&dmabuf->name_lock);133if (dmabuf->name)134ret = strscpy(name, dmabuf->name, sizeof(name));135spin_unlock(&dmabuf->name_lock);136137return dynamic_dname(buffer, buflen, "/%s:%s",138dentry->d_name.name, ret > 0 ? name : "");139}140141static void dma_buf_release(struct dentry *dentry)142{143struct dma_buf *dmabuf;144145dmabuf = dentry->d_fsdata;146if (unlikely(!dmabuf))147return;148149BUG_ON(dmabuf->vmapping_counter);150151/*152* If you hit this BUG() it could mean:153* * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else154* * dmabuf->cb_in/out.active are non-0 despite no pending fence callback155*/156BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);157158dma_buf_stats_teardown(dmabuf);159dmabuf->ops->release(dmabuf);160161if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])162dma_resv_fini(dmabuf->resv);163164WARN_ON(!list_empty(&dmabuf->attachments));165module_put(dmabuf->owner);166kfree(dmabuf->name);167kfree(dmabuf);168}169170static int dma_buf_file_release(struct inode *inode, struct file *file)171{172if (!is_dma_buf_file(file))173return -EINVAL;174175__dma_buf_list_del(file->private_data);176177return 0;178}179180static const struct dentry_operations dma_buf_dentry_ops = {181.d_dname = dmabuffs_dname,182.d_release = dma_buf_release,183};184185static struct vfsmount *dma_buf_mnt;186187static int dma_buf_fs_init_context(struct fs_context *fc)188{189struct pseudo_fs_context *ctx;190191ctx = init_pseudo(fc, DMA_BUF_MAGIC);192if (!ctx)193return -ENOMEM;194ctx->dops = &dma_buf_dentry_ops;195return 0;196}197198static struct file_system_type dma_buf_fs_type = {199.name = "dmabuf",200.init_fs_context = dma_buf_fs_init_context,201.kill_sb = kill_anon_super,202};203204static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)205{206struct dma_buf *dmabuf;207208if (!is_dma_buf_file(file))209return -EINVAL;210211dmabuf = file->private_data;212213/* check if buffer supports mmap */214if (!dmabuf->ops->mmap)215return -EINVAL;216217/* check for overflowing the buffer's size */218if (vma->vm_pgoff + vma_pages(vma) >219dmabuf->size >> PAGE_SHIFT)220return -EINVAL;221222return dmabuf->ops->mmap(dmabuf, vma);223}224225static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)226{227struct dma_buf *dmabuf;228loff_t base;229230if (!is_dma_buf_file(file))231return -EBADF;232233dmabuf = file->private_data;234235/* only support discovering the end of the buffer,236* but also allow SEEK_SET to maintain the idiomatic237* SEEK_END(0), SEEK_CUR(0) pattern.238*/239if (whence == SEEK_END)240base = dmabuf->size;241else if (whence == SEEK_SET)242base = 0;243else244return -EINVAL;245246if (offset != 0)247return -EINVAL;248249return base + offset;250}251252/**253* DOC: implicit fence polling254*255* To support cross-device and cross-driver synchronization of buffer access256* implicit fences (represented internally in the kernel with &struct dma_fence)257* can be attached to a &dma_buf. The glue for that and a few related things are258* provided in the &dma_resv structure.259*260* Userspace can query the state of these implicitly tracked fences using poll()261* and related system calls:262*263* - Checking for EPOLLIN, i.e. read access, can be use to query the state of the264* most recent write or exclusive fence.265*266* - Checking for EPOLLOUT, i.e. write access, can be used to query the state of267* all attached fences, shared and exclusive ones.268*269* Note that this only signals the completion of the respective fences, i.e. the270* DMA transfers are complete. Cache flushing and any other necessary271* preparations before CPU access can begin still need to happen.272*273* As an alternative to poll(), the set of fences on DMA buffer can be274* exported as a &sync_file using &dma_buf_sync_file_export.275*/276277static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)278{279struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;280struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);281unsigned long flags;282283spin_lock_irqsave(&dcb->poll->lock, flags);284wake_up_locked_poll(dcb->poll, dcb->active);285dcb->active = 0;286spin_unlock_irqrestore(&dcb->poll->lock, flags);287dma_fence_put(fence);288/* Paired with get_file in dma_buf_poll */289fput(dmabuf->file);290}291292static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,293struct dma_buf_poll_cb_t *dcb)294{295struct dma_resv_iter cursor;296struct dma_fence *fence;297int r;298299dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),300fence) {301dma_fence_get(fence);302r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);303if (!r)304return true;305dma_fence_put(fence);306}307308return false;309}310311static __poll_t dma_buf_poll(struct file *file, poll_table *poll)312{313struct dma_buf *dmabuf;314struct dma_resv *resv;315__poll_t events;316317dmabuf = file->private_data;318if (!dmabuf || !dmabuf->resv)319return EPOLLERR;320321resv = dmabuf->resv;322323poll_wait(file, &dmabuf->poll, poll);324325events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);326if (!events)327return 0;328329dma_resv_lock(resv, NULL);330331if (events & EPOLLOUT) {332struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;333334/* Check that callback isn't busy */335spin_lock_irq(&dmabuf->poll.lock);336if (dcb->active)337events &= ~EPOLLOUT;338else339dcb->active = EPOLLOUT;340spin_unlock_irq(&dmabuf->poll.lock);341342if (events & EPOLLOUT) {343/* Paired with fput in dma_buf_poll_cb */344get_file(dmabuf->file);345346if (!dma_buf_poll_add_cb(resv, true, dcb))347/* No callback queued, wake up any other waiters */348dma_buf_poll_cb(NULL, &dcb->cb);349else350events &= ~EPOLLOUT;351}352}353354if (events & EPOLLIN) {355struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;356357/* Check that callback isn't busy */358spin_lock_irq(&dmabuf->poll.lock);359if (dcb->active)360events &= ~EPOLLIN;361else362dcb->active = EPOLLIN;363spin_unlock_irq(&dmabuf->poll.lock);364365if (events & EPOLLIN) {366/* Paired with fput in dma_buf_poll_cb */367get_file(dmabuf->file);368369if (!dma_buf_poll_add_cb(resv, false, dcb))370/* No callback queued, wake up any other waiters */371dma_buf_poll_cb(NULL, &dcb->cb);372else373events &= ~EPOLLIN;374}375}376377dma_resv_unlock(resv);378return events;379}380381/**382* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.383* It could support changing the name of the dma-buf if the same384* piece of memory is used for multiple purpose between different devices.385*386* @dmabuf: [in] dmabuf buffer that will be renamed.387* @buf: [in] A piece of userspace memory that contains the name of388* the dma-buf.389*390* Returns 0 on success. If the dma-buf buffer is already attached to391* devices, return -EBUSY.392*393*/394static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)395{396char *name = strndup_user(buf, DMA_BUF_NAME_LEN);397398if (IS_ERR(name))399return PTR_ERR(name);400401spin_lock(&dmabuf->name_lock);402kfree(dmabuf->name);403dmabuf->name = name;404spin_unlock(&dmabuf->name_lock);405406return 0;407}408409#if IS_ENABLED(CONFIG_SYNC_FILE)410static long dma_buf_export_sync_file(struct dma_buf *dmabuf,411void __user *user_data)412{413struct dma_buf_export_sync_file arg;414enum dma_resv_usage usage;415struct dma_fence *fence = NULL;416struct sync_file *sync_file;417int fd, ret;418419if (copy_from_user(&arg, user_data, sizeof(arg)))420return -EFAULT;421422if (arg.flags & ~DMA_BUF_SYNC_RW)423return -EINVAL;424425if ((arg.flags & DMA_BUF_SYNC_RW) == 0)426return -EINVAL;427428fd = get_unused_fd_flags(O_CLOEXEC);429if (fd < 0)430return fd;431432usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);433ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);434if (ret)435goto err_put_fd;436437if (!fence)438fence = dma_fence_get_stub();439440sync_file = sync_file_create(fence);441442dma_fence_put(fence);443444if (!sync_file) {445ret = -ENOMEM;446goto err_put_fd;447}448449arg.fd = fd;450if (copy_to_user(user_data, &arg, sizeof(arg))) {451ret = -EFAULT;452goto err_put_file;453}454455fd_install(fd, sync_file->file);456457return 0;458459err_put_file:460fput(sync_file->file);461err_put_fd:462put_unused_fd(fd);463return ret;464}465466static long dma_buf_import_sync_file(struct dma_buf *dmabuf,467const void __user *user_data)468{469struct dma_buf_import_sync_file arg;470struct dma_fence *fence, *f;471enum dma_resv_usage usage;472struct dma_fence_unwrap iter;473unsigned int num_fences;474int ret = 0;475476if (copy_from_user(&arg, user_data, sizeof(arg)))477return -EFAULT;478479if (arg.flags & ~DMA_BUF_SYNC_RW)480return -EINVAL;481482if ((arg.flags & DMA_BUF_SYNC_RW) == 0)483return -EINVAL;484485fence = sync_file_get_fence(arg.fd);486if (!fence)487return -EINVAL;488489usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :490DMA_RESV_USAGE_READ;491492num_fences = 0;493dma_fence_unwrap_for_each(f, &iter, fence)494++num_fences;495496if (num_fences > 0) {497dma_resv_lock(dmabuf->resv, NULL);498499ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);500if (!ret) {501dma_fence_unwrap_for_each(f, &iter, fence)502dma_resv_add_fence(dmabuf->resv, f, usage);503}504505dma_resv_unlock(dmabuf->resv);506}507508dma_fence_put(fence);509510return ret;511}512#endif513514static long dma_buf_ioctl(struct file *file,515unsigned int cmd, unsigned long arg)516{517struct dma_buf *dmabuf;518struct dma_buf_sync sync;519enum dma_data_direction direction;520int ret;521522dmabuf = file->private_data;523524switch (cmd) {525case DMA_BUF_IOCTL_SYNC:526if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))527return -EFAULT;528529if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)530return -EINVAL;531532switch (sync.flags & DMA_BUF_SYNC_RW) {533case DMA_BUF_SYNC_READ:534direction = DMA_FROM_DEVICE;535break;536case DMA_BUF_SYNC_WRITE:537direction = DMA_TO_DEVICE;538break;539case DMA_BUF_SYNC_RW:540direction = DMA_BIDIRECTIONAL;541break;542default:543return -EINVAL;544}545546if (sync.flags & DMA_BUF_SYNC_END)547ret = dma_buf_end_cpu_access(dmabuf, direction);548else549ret = dma_buf_begin_cpu_access(dmabuf, direction);550551return ret;552553case DMA_BUF_SET_NAME_A:554case DMA_BUF_SET_NAME_B:555return dma_buf_set_name(dmabuf, (const char __user *)arg);556557#if IS_ENABLED(CONFIG_SYNC_FILE)558case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:559return dma_buf_export_sync_file(dmabuf, (void __user *)arg);560case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:561return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);562#endif563564default:565return -ENOTTY;566}567}568569static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)570{571struct dma_buf *dmabuf = file->private_data;572573seq_printf(m, "size:\t%zu\n", dmabuf->size);574/* Don't count the temporary reference taken inside procfs seq_show */575seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);576seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);577spin_lock(&dmabuf->name_lock);578if (dmabuf->name)579seq_printf(m, "name:\t%s\n", dmabuf->name);580spin_unlock(&dmabuf->name_lock);581}582583static const struct file_operations dma_buf_fops = {584.release = dma_buf_file_release,585.mmap = dma_buf_mmap_internal,586.llseek = dma_buf_llseek,587.poll = dma_buf_poll,588.unlocked_ioctl = dma_buf_ioctl,589.compat_ioctl = compat_ptr_ioctl,590.show_fdinfo = dma_buf_show_fdinfo,591};592593/*594* is_dma_buf_file - Check if struct file* is associated with dma_buf595*/596static inline int is_dma_buf_file(struct file *file)597{598return file->f_op == &dma_buf_fops;599}600601static struct file *dma_buf_getfile(size_t size, int flags)602{603static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);604struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);605struct file *file;606607if (IS_ERR(inode))608return ERR_CAST(inode);609610inode->i_size = size;611inode_set_bytes(inode, size);612613/*614* The ->i_ino acquired from get_next_ino() is not unique thus615* not suitable for using it as dentry name by dmabuf stats.616* Override ->i_ino with the unique and dmabuffs specific617* value.618*/619inode->i_ino = atomic64_inc_return(&dmabuf_inode);620flags &= O_ACCMODE | O_NONBLOCK;621file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",622flags, &dma_buf_fops);623if (IS_ERR(file))624goto err_alloc_file;625626return file;627628err_alloc_file:629iput(inode);630return file;631}632633/**634* DOC: dma buf device access635*636* For device DMA access to a shared DMA buffer the usual sequence of operations637* is fairly simple:638*639* 1. The exporter defines his exporter instance using640* DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private641* buffer object into a &dma_buf. It then exports that &dma_buf to userspace642* as a file descriptor by calling dma_buf_fd().643*644* 2. Userspace passes this file-descriptors to all drivers it wants this buffer645* to share with: First the file descriptor is converted to a &dma_buf using646* dma_buf_get(). Then the buffer is attached to the device using647* dma_buf_attach().648*649* Up to this stage the exporter is still free to migrate or reallocate the650* backing storage.651*652* 3. Once the buffer is attached to all devices userspace can initiate DMA653* access to the shared buffer. In the kernel this is done by calling654* dma_buf_map_attachment() and dma_buf_unmap_attachment().655*656* 4. Once a driver is done with a shared buffer it needs to call657* dma_buf_detach() (after cleaning up any mappings) and then release the658* reference acquired with dma_buf_get() by calling dma_buf_put().659*660* For the detailed semantics exporters are expected to implement see661* &dma_buf_ops.662*/663664/**665* dma_buf_export - Creates a new dma_buf, and associates an anon file666* with this buffer, so it can be exported.667* Also connect the allocator specific data and ops to the buffer.668* Additionally, provide a name string for exporter; useful in debugging.669*670* @exp_info: [in] holds all the export related information provided671* by the exporter. see &struct dma_buf_export_info672* for further details.673*674* Returns, on success, a newly created struct dma_buf object, which wraps the675* supplied private data and operations for struct dma_buf_ops. On either676* missing ops, or error in allocating struct dma_buf, will return negative677* error.678*679* For most cases the easiest way to create @exp_info is through the680* %DEFINE_DMA_BUF_EXPORT_INFO macro.681*/682struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)683{684struct dma_buf *dmabuf;685struct dma_resv *resv = exp_info->resv;686struct file *file;687size_t alloc_size = sizeof(struct dma_buf);688int ret;689690if (WARN_ON(!exp_info->priv || !exp_info->ops691|| !exp_info->ops->map_dma_buf692|| !exp_info->ops->unmap_dma_buf693|| !exp_info->ops->release))694return ERR_PTR(-EINVAL);695696if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))697return ERR_PTR(-EINVAL);698699if (!try_module_get(exp_info->owner))700return ERR_PTR(-ENOENT);701702file = dma_buf_getfile(exp_info->size, exp_info->flags);703if (IS_ERR(file)) {704ret = PTR_ERR(file);705goto err_module;706}707708if (!exp_info->resv)709alloc_size += sizeof(struct dma_resv);710else711/* prevent &dma_buf[1] == dma_buf->resv */712alloc_size += 1;713dmabuf = kzalloc(alloc_size, GFP_KERNEL);714if (!dmabuf) {715ret = -ENOMEM;716goto err_file;717}718719dmabuf->priv = exp_info->priv;720dmabuf->ops = exp_info->ops;721dmabuf->size = exp_info->size;722dmabuf->exp_name = exp_info->exp_name;723dmabuf->owner = exp_info->owner;724spin_lock_init(&dmabuf->name_lock);725init_waitqueue_head(&dmabuf->poll);726dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;727dmabuf->cb_in.active = dmabuf->cb_out.active = 0;728INIT_LIST_HEAD(&dmabuf->attachments);729730if (!resv) {731dmabuf->resv = (struct dma_resv *)&dmabuf[1];732dma_resv_init(dmabuf->resv);733} else {734dmabuf->resv = resv;735}736737ret = dma_buf_stats_setup(dmabuf, file);738if (ret)739goto err_dmabuf;740741file->private_data = dmabuf;742file->f_path.dentry->d_fsdata = dmabuf;743dmabuf->file = file;744745__dma_buf_list_add(dmabuf);746747return dmabuf;748749err_dmabuf:750if (!resv)751dma_resv_fini(dmabuf->resv);752kfree(dmabuf);753err_file:754fput(file);755err_module:756module_put(exp_info->owner);757return ERR_PTR(ret);758}759EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");760761/**762* dma_buf_fd - returns a file descriptor for the given struct dma_buf763* @dmabuf: [in] pointer to dma_buf for which fd is required.764* @flags: [in] flags to give to fd765*766* On success, returns an associated 'fd'. Else, returns error.767*/768int dma_buf_fd(struct dma_buf *dmabuf, int flags)769{770if (!dmabuf || !dmabuf->file)771return -EINVAL;772773return FD_ADD(flags, dmabuf->file);774}775EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");776777/**778* dma_buf_get - returns the struct dma_buf related to an fd779* @fd: [in] fd associated with the struct dma_buf to be returned780*781* On success, returns the struct dma_buf associated with an fd; uses782* file's refcounting done by fget to increase refcount. returns ERR_PTR783* otherwise.784*/785struct dma_buf *dma_buf_get(int fd)786{787struct file *file;788789file = fget(fd);790791if (!file)792return ERR_PTR(-EBADF);793794if (!is_dma_buf_file(file)) {795fput(file);796return ERR_PTR(-EINVAL);797}798799return file->private_data;800}801EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");802803/**804* dma_buf_put - decreases refcount of the buffer805* @dmabuf: [in] buffer to reduce refcount of806*807* Uses file's refcounting done implicitly by fput().808*809* If, as a result of this call, the refcount becomes 0, the 'release' file810* operation related to this fd is called. It calls &dma_buf_ops.release vfunc811* in turn, and frees the memory allocated for dmabuf when exported.812*/813void dma_buf_put(struct dma_buf *dmabuf)814{815if (WARN_ON(!dmabuf || !dmabuf->file))816return;817818fput(dmabuf->file);819}820EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");821822static void mangle_sg_table(struct sg_table *sg_table)823{824#ifdef CONFIG_DMABUF_DEBUG825int i;826struct scatterlist *sg;827828/* To catch abuse of the underlying struct page by importers mix829* up the bits, but take care to preserve the low SG_ bits to830* not corrupt the sgt. The mixing is undone on unmap831* before passing the sgt back to the exporter.832*/833for_each_sgtable_sg(sg_table, sg, i)834sg->page_link ^= ~0xffUL;835#endif836837}838839static inline bool840dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)841{842return !!attach->importer_ops;843}844845static bool846dma_buf_pin_on_map(struct dma_buf_attachment *attach)847{848return attach->dmabuf->ops->pin &&849(!dma_buf_attachment_is_dynamic(attach) ||850!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));851}852853/**854* DOC: locking convention855*856* In order to avoid deadlock situations between dma-buf exports and importers,857* all dma-buf API users must follow the common dma-buf locking convention.858*859* Convention for importers860*861* 1. Importers must hold the dma-buf reservation lock when calling these862* functions:863*864* - dma_buf_pin()865* - dma_buf_unpin()866* - dma_buf_map_attachment()867* - dma_buf_unmap_attachment()868* - dma_buf_vmap()869* - dma_buf_vunmap()870*871* 2. Importers must not hold the dma-buf reservation lock when calling these872* functions:873*874* - dma_buf_attach()875* - dma_buf_dynamic_attach()876* - dma_buf_detach()877* - dma_buf_export()878* - dma_buf_fd()879* - dma_buf_get()880* - dma_buf_put()881* - dma_buf_mmap()882* - dma_buf_begin_cpu_access()883* - dma_buf_end_cpu_access()884* - dma_buf_map_attachment_unlocked()885* - dma_buf_unmap_attachment_unlocked()886* - dma_buf_vmap_unlocked()887* - dma_buf_vunmap_unlocked()888*889* Convention for exporters890*891* 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf892* reservation and exporter can take the lock:893*894* - &dma_buf_ops.attach()895* - &dma_buf_ops.detach()896* - &dma_buf_ops.release()897* - &dma_buf_ops.begin_cpu_access()898* - &dma_buf_ops.end_cpu_access()899* - &dma_buf_ops.mmap()900*901* 2. These &dma_buf_ops callbacks are invoked with locked dma-buf902* reservation and exporter can't take the lock:903*904* - &dma_buf_ops.pin()905* - &dma_buf_ops.unpin()906* - &dma_buf_ops.map_dma_buf()907* - &dma_buf_ops.unmap_dma_buf()908* - &dma_buf_ops.vmap()909* - &dma_buf_ops.vunmap()910*911* 3. Exporters must hold the dma-buf reservation lock when calling these912* functions:913*914* - dma_buf_move_notify()915*/916917/**918* dma_buf_dynamic_attach - Add the device to dma_buf's attachments list919* @dmabuf: [in] buffer to attach device to.920* @dev: [in] device to be attached.921* @importer_ops: [in] importer operations for the attachment922* @importer_priv: [in] importer private pointer for the attachment923*924* Returns struct dma_buf_attachment pointer for this attachment. Attachments925* must be cleaned up by calling dma_buf_detach().926*927* Optionally this calls &dma_buf_ops.attach to allow device-specific attach928* functionality.929*930* Returns:931*932* A pointer to newly created &dma_buf_attachment on success, or a negative933* error code wrapped into a pointer on failure.934*935* Note that this can fail if the backing storage of @dmabuf is in a place not936* accessible to @dev, and cannot be moved to a more suitable place. This is937* indicated with the error code -EBUSY.938*/939struct dma_buf_attachment *940dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,941const struct dma_buf_attach_ops *importer_ops,942void *importer_priv)943{944struct dma_buf_attachment *attach;945int ret;946947if (WARN_ON(!dmabuf || !dev))948return ERR_PTR(-EINVAL);949950if (WARN_ON(importer_ops && !importer_ops->move_notify))951return ERR_PTR(-EINVAL);952953attach = kzalloc(sizeof(*attach), GFP_KERNEL);954if (!attach)955return ERR_PTR(-ENOMEM);956957attach->dev = dev;958attach->dmabuf = dmabuf;959if (importer_ops)960attach->peer2peer = importer_ops->allow_peer2peer;961attach->importer_ops = importer_ops;962attach->importer_priv = importer_priv;963964if (dmabuf->ops->attach) {965ret = dmabuf->ops->attach(dmabuf, attach);966if (ret)967goto err_attach;968}969dma_resv_lock(dmabuf->resv, NULL);970list_add(&attach->node, &dmabuf->attachments);971dma_resv_unlock(dmabuf->resv);972973return attach;974975err_attach:976kfree(attach);977return ERR_PTR(ret);978}979EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");980981/**982* dma_buf_attach - Wrapper for dma_buf_dynamic_attach983* @dmabuf: [in] buffer to attach device to.984* @dev: [in] device to be attached.985*986* Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static987* mapping.988*/989struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,990struct device *dev)991{992return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);993}994EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");995996/**997* dma_buf_detach - Remove the given attachment from dmabuf's attachments list998* @dmabuf: [in] buffer to detach from.999* @attach: [in] attachment to be detached; is free'd after this call.1000*1001* Clean up a device attachment obtained by calling dma_buf_attach().1002*1003* Optionally this calls &dma_buf_ops.detach for device-specific detach.1004*/1005void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)1006{1007if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))1008return;10091010dma_resv_lock(dmabuf->resv, NULL);1011list_del(&attach->node);1012dma_resv_unlock(dmabuf->resv);10131014if (dmabuf->ops->detach)1015dmabuf->ops->detach(dmabuf, attach);10161017kfree(attach);1018}1019EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");10201021/**1022* dma_buf_pin - Lock down the DMA-buf1023* @attach: [in] attachment which should be pinned1024*1025* Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may1026* call this, and only for limited use cases like scanout and not for temporary1027* pin operations. It is not permitted to allow userspace to pin arbitrary1028* amounts of buffers through this interface.1029*1030* Buffers must be unpinned by calling dma_buf_unpin().1031*1032* Returns:1033* 0 on success, negative error code on failure.1034*/1035int dma_buf_pin(struct dma_buf_attachment *attach)1036{1037struct dma_buf *dmabuf = attach->dmabuf;1038int ret = 0;10391040WARN_ON(!attach->importer_ops);10411042dma_resv_assert_held(dmabuf->resv);10431044if (dmabuf->ops->pin)1045ret = dmabuf->ops->pin(attach);10461047return ret;1048}1049EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");10501051/**1052* dma_buf_unpin - Unpin a DMA-buf1053* @attach: [in] attachment which should be unpinned1054*1055* This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move1056* any mapping of @attach again and inform the importer through1057* &dma_buf_attach_ops.move_notify.1058*/1059void dma_buf_unpin(struct dma_buf_attachment *attach)1060{1061struct dma_buf *dmabuf = attach->dmabuf;10621063WARN_ON(!attach->importer_ops);10641065dma_resv_assert_held(dmabuf->resv);10661067if (dmabuf->ops->unpin)1068dmabuf->ops->unpin(attach);1069}1070EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");10711072/**1073* dma_buf_map_attachment - Returns the scatterlist table of the attachment;1074* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the1075* dma_buf_ops.1076* @attach: [in] attachment whose scatterlist is to be returned1077* @direction: [in] direction of DMA transfer1078*1079* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR1080* on error. May return -EINTR if it is interrupted by a signal.1081*1082* On success, the DMA addresses and lengths in the returned scatterlist are1083* PAGE_SIZE aligned.1084*1085* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that1086* the underlying backing storage is pinned for as long as a mapping exists,1087* therefore users/importers should not hold onto a mapping for undue amounts of1088* time.1089*1090* Important: Dynamic importers must wait for the exclusive fence of the struct1091* dma_resv attached to the DMA-BUF first.1092*/1093struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,1094enum dma_data_direction direction)1095{1096struct sg_table *sg_table;1097signed long ret;10981099might_sleep();11001101if (WARN_ON(!attach || !attach->dmabuf))1102return ERR_PTR(-EINVAL);11031104dma_resv_assert_held(attach->dmabuf->resv);11051106if (dma_buf_pin_on_map(attach)) {1107ret = attach->dmabuf->ops->pin(attach);1108/*1109* Catch exporters making buffers inaccessible even when1110* attachments preventing that exist.1111*/1112WARN_ON_ONCE(ret == -EBUSY);1113if (ret)1114return ERR_PTR(ret);1115}11161117sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);1118if (!sg_table)1119sg_table = ERR_PTR(-ENOMEM);1120if (IS_ERR(sg_table))1121goto error_unpin;11221123/*1124* Importers with static attachments don't wait for fences.1125*/1126if (!dma_buf_attachment_is_dynamic(attach)) {1127ret = dma_resv_wait_timeout(attach->dmabuf->resv,1128DMA_RESV_USAGE_KERNEL, true,1129MAX_SCHEDULE_TIMEOUT);1130if (ret < 0)1131goto error_unmap;1132}1133mangle_sg_table(sg_table);11341135#ifdef CONFIG_DMA_API_DEBUG1136{1137struct scatterlist *sg;1138u64 addr;1139int len;1140int i;11411142for_each_sgtable_dma_sg(sg_table, sg, i) {1143addr = sg_dma_address(sg);1144len = sg_dma_len(sg);1145if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {1146pr_debug("%s: addr %llx or len %x is not page aligned!\n",1147__func__, addr, len);1148}1149}1150}1151#endif /* CONFIG_DMA_API_DEBUG */1152return sg_table;11531154error_unmap:1155attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);1156sg_table = ERR_PTR(ret);11571158error_unpin:1159if (dma_buf_pin_on_map(attach))1160attach->dmabuf->ops->unpin(attach);11611162return sg_table;1163}1164EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");11651166/**1167* dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;1168* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the1169* dma_buf_ops.1170* @attach: [in] attachment whose scatterlist is to be returned1171* @direction: [in] direction of DMA transfer1172*1173* Unlocked variant of dma_buf_map_attachment().1174*/1175struct sg_table *1176dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,1177enum dma_data_direction direction)1178{1179struct sg_table *sg_table;11801181might_sleep();11821183if (WARN_ON(!attach || !attach->dmabuf))1184return ERR_PTR(-EINVAL);11851186dma_resv_lock(attach->dmabuf->resv, NULL);1187sg_table = dma_buf_map_attachment(attach, direction);1188dma_resv_unlock(attach->dmabuf->resv);11891190return sg_table;1191}1192EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");11931194/**1195* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might1196* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of1197* dma_buf_ops.1198* @attach: [in] attachment to unmap buffer from1199* @sg_table: [in] scatterlist info of the buffer to unmap1200* @direction: [in] direction of DMA transfer1201*1202* This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().1203*/1204void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,1205struct sg_table *sg_table,1206enum dma_data_direction direction)1207{1208might_sleep();12091210if (WARN_ON(!attach || !attach->dmabuf || !sg_table))1211return;12121213dma_resv_assert_held(attach->dmabuf->resv);12141215mangle_sg_table(sg_table);1216attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);12171218if (dma_buf_pin_on_map(attach))1219attach->dmabuf->ops->unpin(attach);1220}1221EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");12221223/**1224* dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might1225* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of1226* dma_buf_ops.1227* @attach: [in] attachment to unmap buffer from1228* @sg_table: [in] scatterlist info of the buffer to unmap1229* @direction: [in] direction of DMA transfer1230*1231* Unlocked variant of dma_buf_unmap_attachment().1232*/1233void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,1234struct sg_table *sg_table,1235enum dma_data_direction direction)1236{1237might_sleep();12381239if (WARN_ON(!attach || !attach->dmabuf || !sg_table))1240return;12411242dma_resv_lock(attach->dmabuf->resv, NULL);1243dma_buf_unmap_attachment(attach, sg_table, direction);1244dma_resv_unlock(attach->dmabuf->resv);1245}1246EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");12471248/**1249* dma_buf_move_notify - notify attachments that DMA-buf is moving1250*1251* @dmabuf: [in] buffer which is moving1252*1253* Informs all attachments that they need to destroy and recreate all their1254* mappings.1255*/1256void dma_buf_move_notify(struct dma_buf *dmabuf)1257{1258struct dma_buf_attachment *attach;12591260dma_resv_assert_held(dmabuf->resv);12611262list_for_each_entry(attach, &dmabuf->attachments, node)1263if (attach->importer_ops)1264attach->importer_ops->move_notify(attach);1265}1266EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");12671268/**1269* DOC: cpu access1270*1271* There are multiple reasons for supporting CPU access to a dma buffer object:1272*1273* - Fallback operations in the kernel, for example when a device is connected1274* over USB and the kernel needs to shuffle the data around first before1275* sending it away. Cache coherency is handled by bracketing any transactions1276* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()1277* access.1278*1279* Since for most kernel internal dma-buf accesses need the entire buffer, a1280* vmap interface is introduced. Note that on very old 32-bit architectures1281* vmalloc space might be limited and result in vmap calls failing.1282*1283* Interfaces:1284*1285* .. code-block:: c1286*1287* void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)1288* void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)1289*1290* The vmap call can fail if there is no vmap support in the exporter, or if1291* it runs out of vmalloc space. Note that the dma-buf layer keeps a reference1292* count for all vmap access and calls down into the exporter's vmap function1293* only when no vmapping exists, and only unmaps it once. Protection against1294* concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.1295*1296* - For full compatibility on the importer side with existing userspace1297* interfaces, which might already support mmap'ing buffers. This is needed in1298* many processing pipelines (e.g. feeding a software rendered image into a1299* hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION1300* framework already supported this and for DMA buffer file descriptors to1301* replace ION buffers mmap support was needed.1302*1303* There is no special interfaces, userspace simply calls mmap on the dma-buf1304* fd. But like for CPU access there's a need to bracket the actual access,1305* which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that1306* DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must1307* be restarted.1308*1309* Some systems might need some sort of cache coherency management e.g. when1310* CPU and GPU domains are being accessed through dma-buf at the same time.1311* To circumvent this problem there are begin/end coherency markers, that1312* forward directly to existing dma-buf device drivers vfunc hooks. Userspace1313* can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The1314* sequence would be used like following:1315*1316* - mmap dma-buf fd1317* - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write1318* to mmap area 3. SYNC_END ioctl. This can be repeated as often as you1319* want (with the new data being consumed by say the GPU or the scanout1320* device)1321* - munmap once you don't need the buffer any more1322*1323* For correctness and optimal performance, it is always required to use1324* SYNC_START and SYNC_END before and after, respectively, when accessing the1325* mapped address. Userspace cannot rely on coherent access, even when there1326* are systems where it just works without calling these ioctls.1327*1328* - And as a CPU fallback in userspace processing pipelines.1329*1330* Similar to the motivation for kernel cpu access it is again important that1331* the userspace code of a given importing subsystem can use the same1332* interfaces with a imported dma-buf buffer object as with a native buffer1333* object. This is especially important for drm where the userspace part of1334* contemporary OpenGL, X, and other drivers is huge, and reworking them to1335* use a different way to mmap a buffer rather invasive.1336*1337* The assumption in the current dma-buf interfaces is that redirecting the1338* initial mmap is all that's needed. A survey of some of the existing1339* subsystems shows that no driver seems to do any nefarious thing like1340* syncing up with outstanding asynchronous processing on the device or1341* allocating special resources at fault time. So hopefully this is good1342* enough, since adding interfaces to intercept pagefaults and allow pte1343* shootdowns would increase the complexity quite a bit.1344*1345* Interface:1346*1347* .. code-block:: c1348*1349* int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);1350*1351* If the importing subsystem simply provides a special-purpose mmap call to1352* set up a mapping in userspace, calling do_mmap with &dma_buf.file will1353* equally achieve that for a dma-buf object.1354*/13551356static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,1357enum dma_data_direction direction)1358{1359bool write = (direction == DMA_BIDIRECTIONAL ||1360direction == DMA_TO_DEVICE);1361struct dma_resv *resv = dmabuf->resv;1362long ret;13631364/* Wait on any implicit rendering fences */1365ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),1366true, MAX_SCHEDULE_TIMEOUT);1367if (ret < 0)1368return ret;13691370return 0;1371}13721373/**1374* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the1375* cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific1376* preparations. Coherency is only guaranteed in the specified range for the1377* specified access direction.1378* @dmabuf: [in] buffer to prepare cpu access for.1379* @direction: [in] direction of access.1380*1381* After the cpu access is complete the caller should call1382* dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is1383* it guaranteed to be coherent with other DMA access.1384*1385* This function will also wait for any DMA transactions tracked through1386* implicit synchronization in &dma_buf.resv. For DMA transactions with explicit1387* synchronization this function will only ensure cache coherency, callers must1388* ensure synchronization with such DMA transactions on their own.1389*1390* Can return negative error values, returns 0 on success.1391*/1392int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,1393enum dma_data_direction direction)1394{1395int ret = 0;13961397if (WARN_ON(!dmabuf))1398return -EINVAL;13991400might_lock(&dmabuf->resv->lock.base);14011402if (dmabuf->ops->begin_cpu_access)1403ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);14041405/* Ensure that all fences are waited upon - but we first allow1406* the native handler the chance to do so more efficiently if it1407* chooses. A double invocation here will be reasonably cheap no-op.1408*/1409if (ret == 0)1410ret = __dma_buf_begin_cpu_access(dmabuf, direction);14111412return ret;1413}1414EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");14151416/**1417* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the1418* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific1419* actions. Coherency is only guaranteed in the specified range for the1420* specified access direction.1421* @dmabuf: [in] buffer to complete cpu access for.1422* @direction: [in] direction of access.1423*1424* This terminates CPU access started with dma_buf_begin_cpu_access().1425*1426* Can return negative error values, returns 0 on success.1427*/1428int dma_buf_end_cpu_access(struct dma_buf *dmabuf,1429enum dma_data_direction direction)1430{1431int ret = 0;14321433WARN_ON(!dmabuf);14341435might_lock(&dmabuf->resv->lock.base);14361437if (dmabuf->ops->end_cpu_access)1438ret = dmabuf->ops->end_cpu_access(dmabuf, direction);14391440return ret;1441}1442EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");144314441445/**1446* dma_buf_mmap - Setup up a userspace mmap with the given vma1447* @dmabuf: [in] buffer that should back the vma1448* @vma: [in] vma for the mmap1449* @pgoff: [in] offset in pages where this mmap should start within the1450* dma-buf buffer.1451*1452* This function adjusts the passed in vma so that it points at the file of the1453* dma_buf operation. It also adjusts the starting pgoff and does bounds1454* checking on the size of the vma. Then it calls the exporters mmap function to1455* set up the mapping.1456*1457* Can return negative error values, returns 0 on success.1458*/1459int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,1460unsigned long pgoff)1461{1462if (WARN_ON(!dmabuf || !vma))1463return -EINVAL;14641465/* check if buffer supports mmap */1466if (!dmabuf->ops->mmap)1467return -EINVAL;14681469/* check for offset overflow */1470if (pgoff + vma_pages(vma) < pgoff)1471return -EOVERFLOW;14721473/* check for overflowing the buffer's size */1474if (pgoff + vma_pages(vma) >1475dmabuf->size >> PAGE_SHIFT)1476return -EINVAL;14771478/* readjust the vma */1479vma_set_file(vma, dmabuf->file);1480vma->vm_pgoff = pgoff;14811482return dmabuf->ops->mmap(dmabuf, vma);1483}1484EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");14851486/**1487* dma_buf_vmap - Create virtual mapping for the buffer object into kernel1488* address space. Same restrictions as for vmap and friends apply.1489* @dmabuf: [in] buffer to vmap1490* @map: [out] returns the vmap pointer1491*1492* This call may fail due to lack of virtual mapping address space.1493* These calls are optional in drivers. The intended use for them1494* is for mapping objects linear in kernel space for high use objects.1495*1496* To ensure coherency users must call dma_buf_begin_cpu_access() and1497* dma_buf_end_cpu_access() around any cpu access performed through this1498* mapping.1499*1500* Returns 0 on success, or a negative errno code otherwise.1501*/1502int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)1503{1504struct iosys_map ptr;1505int ret;15061507iosys_map_clear(map);15081509if (WARN_ON(!dmabuf))1510return -EINVAL;15111512dma_resv_assert_held(dmabuf->resv);15131514if (!dmabuf->ops->vmap)1515return -EINVAL;15161517if (dmabuf->vmapping_counter) {1518dmabuf->vmapping_counter++;1519BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));1520*map = dmabuf->vmap_ptr;1521return 0;1522}15231524BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));15251526ret = dmabuf->ops->vmap(dmabuf, &ptr);1527if (WARN_ON_ONCE(ret))1528return ret;15291530dmabuf->vmap_ptr = ptr;1531dmabuf->vmapping_counter = 1;15321533*map = dmabuf->vmap_ptr;15341535return 0;1536}1537EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");15381539/**1540* dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel1541* address space. Same restrictions as for vmap and friends apply.1542* @dmabuf: [in] buffer to vmap1543* @map: [out] returns the vmap pointer1544*1545* Unlocked version of dma_buf_vmap()1546*1547* Returns 0 on success, or a negative errno code otherwise.1548*/1549int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)1550{1551int ret;15521553iosys_map_clear(map);15541555if (WARN_ON(!dmabuf))1556return -EINVAL;15571558dma_resv_lock(dmabuf->resv, NULL);1559ret = dma_buf_vmap(dmabuf, map);1560dma_resv_unlock(dmabuf->resv);15611562return ret;1563}1564EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");15651566/**1567* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.1568* @dmabuf: [in] buffer to vunmap1569* @map: [in] vmap pointer to vunmap1570*/1571void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)1572{1573if (WARN_ON(!dmabuf))1574return;15751576dma_resv_assert_held(dmabuf->resv);15771578BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));1579BUG_ON(dmabuf->vmapping_counter == 0);1580BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));15811582if (--dmabuf->vmapping_counter == 0) {1583if (dmabuf->ops->vunmap)1584dmabuf->ops->vunmap(dmabuf, map);1585iosys_map_clear(&dmabuf->vmap_ptr);1586}1587}1588EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");15891590/**1591* dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.1592* @dmabuf: [in] buffer to vunmap1593* @map: [in] vmap pointer to vunmap1594*/1595void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)1596{1597if (WARN_ON(!dmabuf))1598return;15991600dma_resv_lock(dmabuf->resv, NULL);1601dma_buf_vunmap(dmabuf, map);1602dma_resv_unlock(dmabuf->resv);1603}1604EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");16051606#ifdef CONFIG_DEBUG_FS1607static int dma_buf_debug_show(struct seq_file *s, void *unused)1608{1609struct dma_buf *buf_obj;1610struct dma_buf_attachment *attach_obj;1611int count = 0, attach_count;1612size_t size = 0;1613int ret;16141615ret = mutex_lock_interruptible(&dmabuf_list_mutex);16161617if (ret)1618return ret;16191620seq_puts(s, "\nDma-buf Objects:\n");1621seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",1622"size", "flags", "mode", "count", "ino");16231624list_for_each_entry(buf_obj, &dmabuf_list, list_node) {16251626ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);1627if (ret)1628goto error_unlock;162916301631spin_lock(&buf_obj->name_lock);1632seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",1633buf_obj->size,1634buf_obj->file->f_flags, buf_obj->file->f_mode,1635file_count(buf_obj->file),1636buf_obj->exp_name,1637file_inode(buf_obj->file)->i_ino,1638buf_obj->name ?: "<none>");1639spin_unlock(&buf_obj->name_lock);16401641dma_resv_describe(buf_obj->resv, s);16421643seq_puts(s, "\tAttached Devices:\n");1644attach_count = 0;16451646list_for_each_entry(attach_obj, &buf_obj->attachments, node) {1647seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));1648attach_count++;1649}1650dma_resv_unlock(buf_obj->resv);16511652seq_printf(s, "Total %d devices attached\n\n",1653attach_count);16541655count++;1656size += buf_obj->size;1657}16581659seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);16601661mutex_unlock(&dmabuf_list_mutex);1662return 0;16631664error_unlock:1665mutex_unlock(&dmabuf_list_mutex);1666return ret;1667}16681669DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);16701671static struct dentry *dma_buf_debugfs_dir;16721673static int dma_buf_init_debugfs(void)1674{1675struct dentry *d;1676int err = 0;16771678d = debugfs_create_dir("dma_buf", NULL);1679if (IS_ERR(d))1680return PTR_ERR(d);16811682dma_buf_debugfs_dir = d;16831684d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,1685NULL, &dma_buf_debug_fops);1686if (IS_ERR(d)) {1687pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");1688debugfs_remove_recursive(dma_buf_debugfs_dir);1689dma_buf_debugfs_dir = NULL;1690err = PTR_ERR(d);1691}16921693return err;1694}16951696static void dma_buf_uninit_debugfs(void)1697{1698debugfs_remove_recursive(dma_buf_debugfs_dir);1699}1700#else1701static inline int dma_buf_init_debugfs(void)1702{1703return 0;1704}1705static inline void dma_buf_uninit_debugfs(void)1706{1707}1708#endif17091710static int __init dma_buf_init(void)1711{1712int ret;17131714ret = dma_buf_init_sysfs_statistics();1715if (ret)1716return ret;17171718dma_buf_mnt = kern_mount(&dma_buf_fs_type);1719if (IS_ERR(dma_buf_mnt))1720return PTR_ERR(dma_buf_mnt);17211722dma_buf_init_debugfs();1723return 0;1724}1725subsys_initcall(dma_buf_init);17261727static void __exit dma_buf_deinit(void)1728{1729dma_buf_uninit_debugfs();1730kern_unmount(dma_buf_mnt);1731dma_buf_uninit_sysfs_statistics();1732}1733__exitcall(dma_buf_deinit);173417351736