// SPDX-License-Identifier: GPL-2.0-only1/*2* Framework for buffer objects that can be shared across devices/subsystems.3*4* Copyright(C) 2011 Linaro Limited. All rights reserved.5* Author: Sumit Semwal <[email protected]>6*7* Many thanks to linaro-mm-sig list, and specially8* Arnd Bergmann <[email protected]>, Rob Clark <[email protected]> and9* Daniel Vetter <[email protected]> for their support in creation and10* refining of this idea.11*/1213#include <linux/fs.h>14#include <linux/slab.h>15#include <linux/dma-buf.h>16#include <linux/dma-fence.h>17#include <linux/dma-fence-unwrap.h>18#include <linux/anon_inodes.h>19#include <linux/export.h>20#include <linux/debugfs.h>21#include <linux/list.h>22#include <linux/module.h>23#include <linux/mutex.h>24#include <linux/seq_file.h>25#include <linux/sync_file.h>26#include <linux/poll.h>27#include <linux/dma-resv.h>28#include <linux/mm.h>29#include <linux/mount.h>30#include <linux/pseudo_fs.h>3132#include <uapi/linux/dma-buf.h>33#include <uapi/linux/magic.h>3435#include "dma-buf-sysfs-stats.h"3637static inline int is_dma_buf_file(struct file *);3839static DEFINE_MUTEX(dmabuf_list_mutex);40static LIST_HEAD(dmabuf_list);4142static void __dma_buf_list_add(struct dma_buf *dmabuf)43{44mutex_lock(&dmabuf_list_mutex);45list_add(&dmabuf->list_node, &dmabuf_list);46mutex_unlock(&dmabuf_list_mutex);47}4849static void __dma_buf_list_del(struct dma_buf *dmabuf)50{51if (!dmabuf)52return;5354mutex_lock(&dmabuf_list_mutex);55list_del(&dmabuf->list_node);56mutex_unlock(&dmabuf_list_mutex);57}5859/**60* dma_buf_iter_begin - begin iteration through global list of all DMA buffers61*62* Returns the first buffer in the global list of DMA-bufs that's not in the63* process of being destroyed. Increments that buffer's reference count to64* prevent buffer destruction. Callers must release the reference, either by65* continuing iteration with dma_buf_iter_next(), or with dma_buf_put().66*67* Return:68* * First buffer from global list, with refcount elevated69* * NULL if no active buffers are present70*/71struct dma_buf *dma_buf_iter_begin(void)72{73struct dma_buf *ret = NULL, *dmabuf;7475/*76* The list mutex does not protect a dmabuf's refcount, so it can be77* zeroed while we are iterating. We cannot call get_dma_buf() since the78* caller may not already own a reference to the buffer.79*/80mutex_lock(&dmabuf_list_mutex);81list_for_each_entry(dmabuf, &dmabuf_list, list_node) {82if (file_ref_get(&dmabuf->file->f_ref)) {83ret = dmabuf;84break;85}86}87mutex_unlock(&dmabuf_list_mutex);88return ret;89}9091/**92* dma_buf_iter_next - continue iteration through global list of all DMA buffers93* @dmabuf: [in] pointer to dma_buf94*95* Decrements the reference count on the provided buffer. Returns the next96* buffer from the remainder of the global list of DMA-bufs with its reference97* count incremented. Callers must release the reference, either by continuing98* iteration with dma_buf_iter_next(), or with dma_buf_put().99*100* Return:101* * Next buffer from global list, with refcount elevated102* * NULL if no additional active buffers are present103*/104struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)105{106struct dma_buf *ret = NULL;107108/*109* The list mutex does not protect a dmabuf's refcount, so it can be110* zeroed while we are iterating. We cannot call get_dma_buf() since the111* caller may not already own a reference to the buffer.112*/113mutex_lock(&dmabuf_list_mutex);114dma_buf_put(dmabuf);115list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {116if (file_ref_get(&dmabuf->file->f_ref)) {117ret = dmabuf;118break;119}120}121mutex_unlock(&dmabuf_list_mutex);122return ret;123}124125static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)126{127struct dma_buf *dmabuf;128char name[DMA_BUF_NAME_LEN];129ssize_t ret = 0;130131dmabuf = dentry->d_fsdata;132spin_lock(&dmabuf->name_lock);133if (dmabuf->name)134ret = strscpy(name, dmabuf->name, sizeof(name));135spin_unlock(&dmabuf->name_lock);136137return dynamic_dname(buffer, buflen, "/%s:%s",138dentry->d_name.name, ret > 0 ? name : "");139}140141static void dma_buf_release(struct dentry *dentry)142{143struct dma_buf *dmabuf;144145dmabuf = dentry->d_fsdata;146if (unlikely(!dmabuf))147return;148149BUG_ON(dmabuf->vmapping_counter);150151/*152* If you hit this BUG() it could mean:153* * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else154* * dmabuf->cb_in/out.active are non-0 despite no pending fence callback155*/156BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);157158dma_buf_stats_teardown(dmabuf);159dmabuf->ops->release(dmabuf);160161if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])162dma_resv_fini(dmabuf->resv);163164WARN_ON(!list_empty(&dmabuf->attachments));165module_put(dmabuf->owner);166kfree(dmabuf->name);167kfree(dmabuf);168}169170static int dma_buf_file_release(struct inode *inode, struct file *file)171{172if (!is_dma_buf_file(file))173return -EINVAL;174175__dma_buf_list_del(file->private_data);176177return 0;178}179180static const struct dentry_operations dma_buf_dentry_ops = {181.d_dname = dmabuffs_dname,182.d_release = dma_buf_release,183};184185static struct vfsmount *dma_buf_mnt;186187static int dma_buf_fs_init_context(struct fs_context *fc)188{189struct pseudo_fs_context *ctx;190191ctx = init_pseudo(fc, DMA_BUF_MAGIC);192if (!ctx)193return -ENOMEM;194ctx->dops = &dma_buf_dentry_ops;195return 0;196}197198static struct file_system_type dma_buf_fs_type = {199.name = "dmabuf",200.init_fs_context = dma_buf_fs_init_context,201.kill_sb = kill_anon_super,202};203204static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)205{206struct dma_buf *dmabuf;207208if (!is_dma_buf_file(file))209return -EINVAL;210211dmabuf = file->private_data;212213/* check if buffer supports mmap */214if (!dmabuf->ops->mmap)215return -EINVAL;216217/* check for overflowing the buffer's size */218if (vma->vm_pgoff + vma_pages(vma) >219dmabuf->size >> PAGE_SHIFT)220return -EINVAL;221222return dmabuf->ops->mmap(dmabuf, vma);223}224225static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)226{227struct dma_buf *dmabuf;228loff_t base;229230if (!is_dma_buf_file(file))231return -EBADF;232233dmabuf = file->private_data;234235/* only support discovering the end of the buffer,236* but also allow SEEK_SET to maintain the idiomatic237* SEEK_END(0), SEEK_CUR(0) pattern.238*/239if (whence == SEEK_END)240base = dmabuf->size;241else if (whence == SEEK_SET)242base = 0;243else244return -EINVAL;245246if (offset != 0)247return -EINVAL;248249return base + offset;250}251252/**253* DOC: implicit fence polling254*255* To support cross-device and cross-driver synchronization of buffer access256* implicit fences (represented internally in the kernel with &struct dma_fence)257* can be attached to a &dma_buf. The glue for that and a few related things are258* provided in the &dma_resv structure.259*260* Userspace can query the state of these implicitly tracked fences using poll()261* and related system calls:262*263* - Checking for EPOLLIN, i.e. read access, can be use to query the state of the264* most recent write or exclusive fence.265*266* - Checking for EPOLLOUT, i.e. write access, can be used to query the state of267* all attached fences, shared and exclusive ones.268*269* Note that this only signals the completion of the respective fences, i.e. the270* DMA transfers are complete. Cache flushing and any other necessary271* preparations before CPU access can begin still need to happen.272*273* As an alternative to poll(), the set of fences on DMA buffer can be274* exported as a &sync_file using &dma_buf_sync_file_export.275*/276277static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)278{279struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;280struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);281unsigned long flags;282283spin_lock_irqsave(&dcb->poll->lock, flags);284wake_up_locked_poll(dcb->poll, dcb->active);285dcb->active = 0;286spin_unlock_irqrestore(&dcb->poll->lock, flags);287dma_fence_put(fence);288/* Paired with get_file in dma_buf_poll */289fput(dmabuf->file);290}291292static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,293struct dma_buf_poll_cb_t *dcb)294{295struct dma_resv_iter cursor;296struct dma_fence *fence;297int r;298299dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),300fence) {301dma_fence_get(fence);302r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);303if (!r)304return true;305dma_fence_put(fence);306}307308return false;309}310311static __poll_t dma_buf_poll(struct file *file, poll_table *poll)312{313struct dma_buf *dmabuf;314struct dma_resv *resv;315__poll_t events;316317dmabuf = file->private_data;318if (!dmabuf || !dmabuf->resv)319return EPOLLERR;320321resv = dmabuf->resv;322323poll_wait(file, &dmabuf->poll, poll);324325events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);326if (!events)327return 0;328329dma_resv_lock(resv, NULL);330331if (events & EPOLLOUT) {332struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;333334/* Check that callback isn't busy */335spin_lock_irq(&dmabuf->poll.lock);336if (dcb->active)337events &= ~EPOLLOUT;338else339dcb->active = EPOLLOUT;340spin_unlock_irq(&dmabuf->poll.lock);341342if (events & EPOLLOUT) {343/* Paired with fput in dma_buf_poll_cb */344get_file(dmabuf->file);345346if (!dma_buf_poll_add_cb(resv, true, dcb))347/* No callback queued, wake up any other waiters */348dma_buf_poll_cb(NULL, &dcb->cb);349else350events &= ~EPOLLOUT;351}352}353354if (events & EPOLLIN) {355struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;356357/* Check that callback isn't busy */358spin_lock_irq(&dmabuf->poll.lock);359if (dcb->active)360events &= ~EPOLLIN;361else362dcb->active = EPOLLIN;363spin_unlock_irq(&dmabuf->poll.lock);364365if (events & EPOLLIN) {366/* Paired with fput in dma_buf_poll_cb */367get_file(dmabuf->file);368369if (!dma_buf_poll_add_cb(resv, false, dcb))370/* No callback queued, wake up any other waiters */371dma_buf_poll_cb(NULL, &dcb->cb);372else373events &= ~EPOLLIN;374}375}376377dma_resv_unlock(resv);378return events;379}380381/**382* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.383* It could support changing the name of the dma-buf if the same384* piece of memory is used for multiple purpose between different devices.385*386* @dmabuf: [in] dmabuf buffer that will be renamed.387* @buf: [in] A piece of userspace memory that contains the name of388* the dma-buf.389*390* Returns 0 on success. If the dma-buf buffer is already attached to391* devices, return -EBUSY.392*393*/394static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)395{396char *name = strndup_user(buf, DMA_BUF_NAME_LEN);397398if (IS_ERR(name))399return PTR_ERR(name);400401spin_lock(&dmabuf->name_lock);402kfree(dmabuf->name);403dmabuf->name = name;404spin_unlock(&dmabuf->name_lock);405406return 0;407}408409#if IS_ENABLED(CONFIG_SYNC_FILE)410static long dma_buf_export_sync_file(struct dma_buf *dmabuf,411void __user *user_data)412{413struct dma_buf_export_sync_file arg;414enum dma_resv_usage usage;415struct dma_fence *fence = NULL;416struct sync_file *sync_file;417int fd, ret;418419if (copy_from_user(&arg, user_data, sizeof(arg)))420return -EFAULT;421422if (arg.flags & ~DMA_BUF_SYNC_RW)423return -EINVAL;424425if ((arg.flags & DMA_BUF_SYNC_RW) == 0)426return -EINVAL;427428fd = get_unused_fd_flags(O_CLOEXEC);429if (fd < 0)430return fd;431432usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);433ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);434if (ret)435goto err_put_fd;436437if (!fence)438fence = dma_fence_get_stub();439440sync_file = sync_file_create(fence);441442dma_fence_put(fence);443444if (!sync_file) {445ret = -ENOMEM;446goto err_put_fd;447}448449arg.fd = fd;450if (copy_to_user(user_data, &arg, sizeof(arg))) {451ret = -EFAULT;452goto err_put_file;453}454455fd_install(fd, sync_file->file);456457return 0;458459err_put_file:460fput(sync_file->file);461err_put_fd:462put_unused_fd(fd);463return ret;464}465466static long dma_buf_import_sync_file(struct dma_buf *dmabuf,467const void __user *user_data)468{469struct dma_buf_import_sync_file arg;470struct dma_fence *fence, *f;471enum dma_resv_usage usage;472struct dma_fence_unwrap iter;473unsigned int num_fences;474int ret = 0;475476if (copy_from_user(&arg, user_data, sizeof(arg)))477return -EFAULT;478479if (arg.flags & ~DMA_BUF_SYNC_RW)480return -EINVAL;481482if ((arg.flags & DMA_BUF_SYNC_RW) == 0)483return -EINVAL;484485fence = sync_file_get_fence(arg.fd);486if (!fence)487return -EINVAL;488489usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :490DMA_RESV_USAGE_READ;491492num_fences = 0;493dma_fence_unwrap_for_each(f, &iter, fence)494++num_fences;495496if (num_fences > 0) {497dma_resv_lock(dmabuf->resv, NULL);498499ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);500if (!ret) {501dma_fence_unwrap_for_each(f, &iter, fence)502dma_resv_add_fence(dmabuf->resv, f, usage);503}504505dma_resv_unlock(dmabuf->resv);506}507508dma_fence_put(fence);509510return ret;511}512#endif513514static long dma_buf_ioctl(struct file *file,515unsigned int cmd, unsigned long arg)516{517struct dma_buf *dmabuf;518struct dma_buf_sync sync;519enum dma_data_direction direction;520int ret;521522dmabuf = file->private_data;523524switch (cmd) {525case DMA_BUF_IOCTL_SYNC:526if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))527return -EFAULT;528529if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)530return -EINVAL;531532switch (sync.flags & DMA_BUF_SYNC_RW) {533case DMA_BUF_SYNC_READ:534direction = DMA_FROM_DEVICE;535break;536case DMA_BUF_SYNC_WRITE:537direction = DMA_TO_DEVICE;538break;539case DMA_BUF_SYNC_RW:540direction = DMA_BIDIRECTIONAL;541break;542default:543return -EINVAL;544}545546if (sync.flags & DMA_BUF_SYNC_END)547ret = dma_buf_end_cpu_access(dmabuf, direction);548else549ret = dma_buf_begin_cpu_access(dmabuf, direction);550551return ret;552553case DMA_BUF_SET_NAME_A:554case DMA_BUF_SET_NAME_B:555return dma_buf_set_name(dmabuf, (const char __user *)arg);556557#if IS_ENABLED(CONFIG_SYNC_FILE)558case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:559return dma_buf_export_sync_file(dmabuf, (void __user *)arg);560case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:561return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);562#endif563564default:565return -ENOTTY;566}567}568569static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)570{571struct dma_buf *dmabuf = file->private_data;572573seq_printf(m, "size:\t%zu\n", dmabuf->size);574/* Don't count the temporary reference taken inside procfs seq_show */575seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);576seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);577spin_lock(&dmabuf->name_lock);578if (dmabuf->name)579seq_printf(m, "name:\t%s\n", dmabuf->name);580spin_unlock(&dmabuf->name_lock);581}582583static const struct file_operations dma_buf_fops = {584.release = dma_buf_file_release,585.mmap = dma_buf_mmap_internal,586.llseek = dma_buf_llseek,587.poll = dma_buf_poll,588.unlocked_ioctl = dma_buf_ioctl,589.compat_ioctl = compat_ptr_ioctl,590.show_fdinfo = dma_buf_show_fdinfo,591};592593/*594* is_dma_buf_file - Check if struct file* is associated with dma_buf595*/596static inline int is_dma_buf_file(struct file *file)597{598return file->f_op == &dma_buf_fops;599}600601static struct file *dma_buf_getfile(size_t size, int flags)602{603static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);604struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);605struct file *file;606607if (IS_ERR(inode))608return ERR_CAST(inode);609610inode->i_size = size;611inode_set_bytes(inode, size);612613/*614* The ->i_ino acquired from get_next_ino() is not unique thus615* not suitable for using it as dentry name by dmabuf stats.616* Override ->i_ino with the unique and dmabuffs specific617* value.618*/619inode->i_ino = atomic64_inc_return(&dmabuf_inode);620flags &= O_ACCMODE | O_NONBLOCK;621file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",622flags, &dma_buf_fops);623if (IS_ERR(file))624goto err_alloc_file;625626return file;627628err_alloc_file:629iput(inode);630return file;631}632633/**634* DOC: dma buf device access635*636* For device DMA access to a shared DMA buffer the usual sequence of operations637* is fairly simple:638*639* 1. The exporter defines his exporter instance using640* DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private641* buffer object into a &dma_buf. It then exports that &dma_buf to userspace642* as a file descriptor by calling dma_buf_fd().643*644* 2. Userspace passes this file-descriptors to all drivers it wants this buffer645* to share with: First the file descriptor is converted to a &dma_buf using646* dma_buf_get(). Then the buffer is attached to the device using647* dma_buf_attach().648*649* Up to this stage the exporter is still free to migrate or reallocate the650* backing storage.651*652* 3. Once the buffer is attached to all devices userspace can initiate DMA653* access to the shared buffer. In the kernel this is done by calling654* dma_buf_map_attachment() and dma_buf_unmap_attachment().655*656* 4. Once a driver is done with a shared buffer it needs to call657* dma_buf_detach() (after cleaning up any mappings) and then release the658* reference acquired with dma_buf_get() by calling dma_buf_put().659*660* For the detailed semantics exporters are expected to implement see661* &dma_buf_ops.662*/663664/**665* dma_buf_export - Creates a new dma_buf, and associates an anon file666* with this buffer, so it can be exported.667* Also connect the allocator specific data and ops to the buffer.668* Additionally, provide a name string for exporter; useful in debugging.669*670* @exp_info: [in] holds all the export related information provided671* by the exporter. see &struct dma_buf_export_info672* for further details.673*674* Returns, on success, a newly created struct dma_buf object, which wraps the675* supplied private data and operations for struct dma_buf_ops. On either676* missing ops, or error in allocating struct dma_buf, will return negative677* error.678*679* For most cases the easiest way to create @exp_info is through the680* %DEFINE_DMA_BUF_EXPORT_INFO macro.681*/682struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)683{684struct dma_buf *dmabuf;685struct dma_resv *resv = exp_info->resv;686struct file *file;687size_t alloc_size = sizeof(struct dma_buf);688int ret;689690if (WARN_ON(!exp_info->priv || !exp_info->ops691|| !exp_info->ops->map_dma_buf692|| !exp_info->ops->unmap_dma_buf693|| !exp_info->ops->release))694return ERR_PTR(-EINVAL);695696if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))697return ERR_PTR(-EINVAL);698699if (!try_module_get(exp_info->owner))700return ERR_PTR(-ENOENT);701702file = dma_buf_getfile(exp_info->size, exp_info->flags);703if (IS_ERR(file)) {704ret = PTR_ERR(file);705goto err_module;706}707708if (!exp_info->resv)709alloc_size += sizeof(struct dma_resv);710else711/* prevent &dma_buf[1] == dma_buf->resv */712alloc_size += 1;713dmabuf = kzalloc(alloc_size, GFP_KERNEL);714if (!dmabuf) {715ret = -ENOMEM;716goto err_file;717}718719dmabuf->priv = exp_info->priv;720dmabuf->ops = exp_info->ops;721dmabuf->size = exp_info->size;722dmabuf->exp_name = exp_info->exp_name;723dmabuf->owner = exp_info->owner;724spin_lock_init(&dmabuf->name_lock);725init_waitqueue_head(&dmabuf->poll);726dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;727dmabuf->cb_in.active = dmabuf->cb_out.active = 0;728INIT_LIST_HEAD(&dmabuf->attachments);729730if (!resv) {731dmabuf->resv = (struct dma_resv *)&dmabuf[1];732dma_resv_init(dmabuf->resv);733} else {734dmabuf->resv = resv;735}736737ret = dma_buf_stats_setup(dmabuf, file);738if (ret)739goto err_dmabuf;740741file->private_data = dmabuf;742file->f_path.dentry->d_fsdata = dmabuf;743dmabuf->file = file;744745__dma_buf_list_add(dmabuf);746747return dmabuf;748749err_dmabuf:750if (!resv)751dma_resv_fini(dmabuf->resv);752kfree(dmabuf);753err_file:754fput(file);755err_module:756module_put(exp_info->owner);757return ERR_PTR(ret);758}759EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");760761/**762* dma_buf_fd - returns a file descriptor for the given struct dma_buf763* @dmabuf: [in] pointer to dma_buf for which fd is required.764* @flags: [in] flags to give to fd765*766* On success, returns an associated 'fd'. Else, returns error.767*/768int dma_buf_fd(struct dma_buf *dmabuf, int flags)769{770int fd;771772if (!dmabuf || !dmabuf->file)773return -EINVAL;774775fd = get_unused_fd_flags(flags);776if (fd < 0)777return fd;778779fd_install(fd, dmabuf->file);780781return fd;782}783EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");784785/**786* dma_buf_get - returns the struct dma_buf related to an fd787* @fd: [in] fd associated with the struct dma_buf to be returned788*789* On success, returns the struct dma_buf associated with an fd; uses790* file's refcounting done by fget to increase refcount. returns ERR_PTR791* otherwise.792*/793struct dma_buf *dma_buf_get(int fd)794{795struct file *file;796797file = fget(fd);798799if (!file)800return ERR_PTR(-EBADF);801802if (!is_dma_buf_file(file)) {803fput(file);804return ERR_PTR(-EINVAL);805}806807return file->private_data;808}809EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");810811/**812* dma_buf_put - decreases refcount of the buffer813* @dmabuf: [in] buffer to reduce refcount of814*815* Uses file's refcounting done implicitly by fput().816*817* If, as a result of this call, the refcount becomes 0, the 'release' file818* operation related to this fd is called. It calls &dma_buf_ops.release vfunc819* in turn, and frees the memory allocated for dmabuf when exported.820*/821void dma_buf_put(struct dma_buf *dmabuf)822{823if (WARN_ON(!dmabuf || !dmabuf->file))824return;825826fput(dmabuf->file);827}828EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");829830static void mangle_sg_table(struct sg_table *sg_table)831{832#ifdef CONFIG_DMABUF_DEBUG833int i;834struct scatterlist *sg;835836/* To catch abuse of the underlying struct page by importers mix837* up the bits, but take care to preserve the low SG_ bits to838* not corrupt the sgt. The mixing is undone on unmap839* before passing the sgt back to the exporter.840*/841for_each_sgtable_sg(sg_table, sg, i)842sg->page_link ^= ~0xffUL;843#endif844845}846847static inline bool848dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)849{850return !!attach->importer_ops;851}852853static bool854dma_buf_pin_on_map(struct dma_buf_attachment *attach)855{856return attach->dmabuf->ops->pin &&857(!dma_buf_attachment_is_dynamic(attach) ||858!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));859}860861/**862* DOC: locking convention863*864* In order to avoid deadlock situations between dma-buf exports and importers,865* all dma-buf API users must follow the common dma-buf locking convention.866*867* Convention for importers868*869* 1. Importers must hold the dma-buf reservation lock when calling these870* functions:871*872* - dma_buf_pin()873* - dma_buf_unpin()874* - dma_buf_map_attachment()875* - dma_buf_unmap_attachment()876* - dma_buf_vmap()877* - dma_buf_vunmap()878*879* 2. Importers must not hold the dma-buf reservation lock when calling these880* functions:881*882* - dma_buf_attach()883* - dma_buf_dynamic_attach()884* - dma_buf_detach()885* - dma_buf_export()886* - dma_buf_fd()887* - dma_buf_get()888* - dma_buf_put()889* - dma_buf_mmap()890* - dma_buf_begin_cpu_access()891* - dma_buf_end_cpu_access()892* - dma_buf_map_attachment_unlocked()893* - dma_buf_unmap_attachment_unlocked()894* - dma_buf_vmap_unlocked()895* - dma_buf_vunmap_unlocked()896*897* Convention for exporters898*899* 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf900* reservation and exporter can take the lock:901*902* - &dma_buf_ops.attach()903* - &dma_buf_ops.detach()904* - &dma_buf_ops.release()905* - &dma_buf_ops.begin_cpu_access()906* - &dma_buf_ops.end_cpu_access()907* - &dma_buf_ops.mmap()908*909* 2. These &dma_buf_ops callbacks are invoked with locked dma-buf910* reservation and exporter can't take the lock:911*912* - &dma_buf_ops.pin()913* - &dma_buf_ops.unpin()914* - &dma_buf_ops.map_dma_buf()915* - &dma_buf_ops.unmap_dma_buf()916* - &dma_buf_ops.vmap()917* - &dma_buf_ops.vunmap()918*919* 3. Exporters must hold the dma-buf reservation lock when calling these920* functions:921*922* - dma_buf_move_notify()923*/924925/**926* dma_buf_dynamic_attach - Add the device to dma_buf's attachments list927* @dmabuf: [in] buffer to attach device to.928* @dev: [in] device to be attached.929* @importer_ops: [in] importer operations for the attachment930* @importer_priv: [in] importer private pointer for the attachment931*932* Returns struct dma_buf_attachment pointer for this attachment. Attachments933* must be cleaned up by calling dma_buf_detach().934*935* Optionally this calls &dma_buf_ops.attach to allow device-specific attach936* functionality.937*938* Returns:939*940* A pointer to newly created &dma_buf_attachment on success, or a negative941* error code wrapped into a pointer on failure.942*943* Note that this can fail if the backing storage of @dmabuf is in a place not944* accessible to @dev, and cannot be moved to a more suitable place. This is945* indicated with the error code -EBUSY.946*/947struct dma_buf_attachment *948dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,949const struct dma_buf_attach_ops *importer_ops,950void *importer_priv)951{952struct dma_buf_attachment *attach;953int ret;954955if (WARN_ON(!dmabuf || !dev))956return ERR_PTR(-EINVAL);957958if (WARN_ON(importer_ops && !importer_ops->move_notify))959return ERR_PTR(-EINVAL);960961attach = kzalloc(sizeof(*attach), GFP_KERNEL);962if (!attach)963return ERR_PTR(-ENOMEM);964965attach->dev = dev;966attach->dmabuf = dmabuf;967if (importer_ops)968attach->peer2peer = importer_ops->allow_peer2peer;969attach->importer_ops = importer_ops;970attach->importer_priv = importer_priv;971972if (dmabuf->ops->attach) {973ret = dmabuf->ops->attach(dmabuf, attach);974if (ret)975goto err_attach;976}977dma_resv_lock(dmabuf->resv, NULL);978list_add(&attach->node, &dmabuf->attachments);979dma_resv_unlock(dmabuf->resv);980981return attach;982983err_attach:984kfree(attach);985return ERR_PTR(ret);986}987EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");988989/**990* dma_buf_attach - Wrapper for dma_buf_dynamic_attach991* @dmabuf: [in] buffer to attach device to.992* @dev: [in] device to be attached.993*994* Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static995* mapping.996*/997struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,998struct device *dev)999{1000return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);1001}1002EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");10031004/**1005* dma_buf_detach - Remove the given attachment from dmabuf's attachments list1006* @dmabuf: [in] buffer to detach from.1007* @attach: [in] attachment to be detached; is free'd after this call.1008*1009* Clean up a device attachment obtained by calling dma_buf_attach().1010*1011* Optionally this calls &dma_buf_ops.detach for device-specific detach.1012*/1013void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)1014{1015if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))1016return;10171018dma_resv_lock(dmabuf->resv, NULL);1019list_del(&attach->node);1020dma_resv_unlock(dmabuf->resv);10211022if (dmabuf->ops->detach)1023dmabuf->ops->detach(dmabuf, attach);10241025kfree(attach);1026}1027EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");10281029/**1030* dma_buf_pin - Lock down the DMA-buf1031* @attach: [in] attachment which should be pinned1032*1033* Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may1034* call this, and only for limited use cases like scanout and not for temporary1035* pin operations. It is not permitted to allow userspace to pin arbitrary1036* amounts of buffers through this interface.1037*1038* Buffers must be unpinned by calling dma_buf_unpin().1039*1040* Returns:1041* 0 on success, negative error code on failure.1042*/1043int dma_buf_pin(struct dma_buf_attachment *attach)1044{1045struct dma_buf *dmabuf = attach->dmabuf;1046int ret = 0;10471048WARN_ON(!attach->importer_ops);10491050dma_resv_assert_held(dmabuf->resv);10511052if (dmabuf->ops->pin)1053ret = dmabuf->ops->pin(attach);10541055return ret;1056}1057EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");10581059/**1060* dma_buf_unpin - Unpin a DMA-buf1061* @attach: [in] attachment which should be unpinned1062*1063* This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move1064* any mapping of @attach again and inform the importer through1065* &dma_buf_attach_ops.move_notify.1066*/1067void dma_buf_unpin(struct dma_buf_attachment *attach)1068{1069struct dma_buf *dmabuf = attach->dmabuf;10701071WARN_ON(!attach->importer_ops);10721073dma_resv_assert_held(dmabuf->resv);10741075if (dmabuf->ops->unpin)1076dmabuf->ops->unpin(attach);1077}1078EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");10791080/**1081* dma_buf_map_attachment - Returns the scatterlist table of the attachment;1082* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the1083* dma_buf_ops.1084* @attach: [in] attachment whose scatterlist is to be returned1085* @direction: [in] direction of DMA transfer1086*1087* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR1088* on error. May return -EINTR if it is interrupted by a signal.1089*1090* On success, the DMA addresses and lengths in the returned scatterlist are1091* PAGE_SIZE aligned.1092*1093* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that1094* the underlying backing storage is pinned for as long as a mapping exists,1095* therefore users/importers should not hold onto a mapping for undue amounts of1096* time.1097*1098* Important: Dynamic importers must wait for the exclusive fence of the struct1099* dma_resv attached to the DMA-BUF first.1100*/1101struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,1102enum dma_data_direction direction)1103{1104struct sg_table *sg_table;1105signed long ret;11061107might_sleep();11081109if (WARN_ON(!attach || !attach->dmabuf))1110return ERR_PTR(-EINVAL);11111112dma_resv_assert_held(attach->dmabuf->resv);11131114if (dma_buf_pin_on_map(attach)) {1115ret = attach->dmabuf->ops->pin(attach);1116/*1117* Catch exporters making buffers inaccessible even when1118* attachments preventing that exist.1119*/1120WARN_ON_ONCE(ret == -EBUSY);1121if (ret)1122return ERR_PTR(ret);1123}11241125sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);1126if (!sg_table)1127sg_table = ERR_PTR(-ENOMEM);1128if (IS_ERR(sg_table))1129goto error_unpin;11301131/*1132* Importers with static attachments don't wait for fences.1133*/1134if (!dma_buf_attachment_is_dynamic(attach)) {1135ret = dma_resv_wait_timeout(attach->dmabuf->resv,1136DMA_RESV_USAGE_KERNEL, true,1137MAX_SCHEDULE_TIMEOUT);1138if (ret < 0)1139goto error_unmap;1140}1141mangle_sg_table(sg_table);11421143#ifdef CONFIG_DMA_API_DEBUG1144{1145struct scatterlist *sg;1146u64 addr;1147int len;1148int i;11491150for_each_sgtable_dma_sg(sg_table, sg, i) {1151addr = sg_dma_address(sg);1152len = sg_dma_len(sg);1153if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {1154pr_debug("%s: addr %llx or len %x is not page aligned!\n",1155__func__, addr, len);1156}1157}1158}1159#endif /* CONFIG_DMA_API_DEBUG */1160return sg_table;11611162error_unmap:1163attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);1164sg_table = ERR_PTR(ret);11651166error_unpin:1167if (dma_buf_pin_on_map(attach))1168attach->dmabuf->ops->unpin(attach);11691170return sg_table;1171}1172EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");11731174/**1175* dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;1176* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the1177* dma_buf_ops.1178* @attach: [in] attachment whose scatterlist is to be returned1179* @direction: [in] direction of DMA transfer1180*1181* Unlocked variant of dma_buf_map_attachment().1182*/1183struct sg_table *1184dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,1185enum dma_data_direction direction)1186{1187struct sg_table *sg_table;11881189might_sleep();11901191if (WARN_ON(!attach || !attach->dmabuf))1192return ERR_PTR(-EINVAL);11931194dma_resv_lock(attach->dmabuf->resv, NULL);1195sg_table = dma_buf_map_attachment(attach, direction);1196dma_resv_unlock(attach->dmabuf->resv);11971198return sg_table;1199}1200EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");12011202/**1203* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might1204* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of1205* dma_buf_ops.1206* @attach: [in] attachment to unmap buffer from1207* @sg_table: [in] scatterlist info of the buffer to unmap1208* @direction: [in] direction of DMA transfer1209*1210* This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().1211*/1212void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,1213struct sg_table *sg_table,1214enum dma_data_direction direction)1215{1216might_sleep();12171218if (WARN_ON(!attach || !attach->dmabuf || !sg_table))1219return;12201221dma_resv_assert_held(attach->dmabuf->resv);12221223mangle_sg_table(sg_table);1224attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);12251226if (dma_buf_pin_on_map(attach))1227attach->dmabuf->ops->unpin(attach);1228}1229EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");12301231/**1232* dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might1233* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of1234* dma_buf_ops.1235* @attach: [in] attachment to unmap buffer from1236* @sg_table: [in] scatterlist info of the buffer to unmap1237* @direction: [in] direction of DMA transfer1238*1239* Unlocked variant of dma_buf_unmap_attachment().1240*/1241void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,1242struct sg_table *sg_table,1243enum dma_data_direction direction)1244{1245might_sleep();12461247if (WARN_ON(!attach || !attach->dmabuf || !sg_table))1248return;12491250dma_resv_lock(attach->dmabuf->resv, NULL);1251dma_buf_unmap_attachment(attach, sg_table, direction);1252dma_resv_unlock(attach->dmabuf->resv);1253}1254EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");12551256/**1257* dma_buf_move_notify - notify attachments that DMA-buf is moving1258*1259* @dmabuf: [in] buffer which is moving1260*1261* Informs all attachments that they need to destroy and recreate all their1262* mappings.1263*/1264void dma_buf_move_notify(struct dma_buf *dmabuf)1265{1266struct dma_buf_attachment *attach;12671268dma_resv_assert_held(dmabuf->resv);12691270list_for_each_entry(attach, &dmabuf->attachments, node)1271if (attach->importer_ops)1272attach->importer_ops->move_notify(attach);1273}1274EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");12751276/**1277* DOC: cpu access1278*1279* There are multiple reasons for supporting CPU access to a dma buffer object:1280*1281* - Fallback operations in the kernel, for example when a device is connected1282* over USB and the kernel needs to shuffle the data around first before1283* sending it away. Cache coherency is handled by bracketing any transactions1284* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()1285* access.1286*1287* Since for most kernel internal dma-buf accesses need the entire buffer, a1288* vmap interface is introduced. Note that on very old 32-bit architectures1289* vmalloc space might be limited and result in vmap calls failing.1290*1291* Interfaces:1292*1293* .. code-block:: c1294*1295* void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)1296* void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)1297*1298* The vmap call can fail if there is no vmap support in the exporter, or if1299* it runs out of vmalloc space. Note that the dma-buf layer keeps a reference1300* count for all vmap access and calls down into the exporter's vmap function1301* only when no vmapping exists, and only unmaps it once. Protection against1302* concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.1303*1304* - For full compatibility on the importer side with existing userspace1305* interfaces, which might already support mmap'ing buffers. This is needed in1306* many processing pipelines (e.g. feeding a software rendered image into a1307* hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION1308* framework already supported this and for DMA buffer file descriptors to1309* replace ION buffers mmap support was needed.1310*1311* There is no special interfaces, userspace simply calls mmap on the dma-buf1312* fd. But like for CPU access there's a need to bracket the actual access,1313* which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that1314* DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must1315* be restarted.1316*1317* Some systems might need some sort of cache coherency management e.g. when1318* CPU and GPU domains are being accessed through dma-buf at the same time.1319* To circumvent this problem there are begin/end coherency markers, that1320* forward directly to existing dma-buf device drivers vfunc hooks. Userspace1321* can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The1322* sequence would be used like following:1323*1324* - mmap dma-buf fd1325* - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write1326* to mmap area 3. SYNC_END ioctl. This can be repeated as often as you1327* want (with the new data being consumed by say the GPU or the scanout1328* device)1329* - munmap once you don't need the buffer any more1330*1331* For correctness and optimal performance, it is always required to use1332* SYNC_START and SYNC_END before and after, respectively, when accessing the1333* mapped address. Userspace cannot rely on coherent access, even when there1334* are systems where it just works without calling these ioctls.1335*1336* - And as a CPU fallback in userspace processing pipelines.1337*1338* Similar to the motivation for kernel cpu access it is again important that1339* the userspace code of a given importing subsystem can use the same1340* interfaces with a imported dma-buf buffer object as with a native buffer1341* object. This is especially important for drm where the userspace part of1342* contemporary OpenGL, X, and other drivers is huge, and reworking them to1343* use a different way to mmap a buffer rather invasive.1344*1345* The assumption in the current dma-buf interfaces is that redirecting the1346* initial mmap is all that's needed. A survey of some of the existing1347* subsystems shows that no driver seems to do any nefarious thing like1348* syncing up with outstanding asynchronous processing on the device or1349* allocating special resources at fault time. So hopefully this is good1350* enough, since adding interfaces to intercept pagefaults and allow pte1351* shootdowns would increase the complexity quite a bit.1352*1353* Interface:1354*1355* .. code-block:: c1356*1357* int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);1358*1359* If the importing subsystem simply provides a special-purpose mmap call to1360* set up a mapping in userspace, calling do_mmap with &dma_buf.file will1361* equally achieve that for a dma-buf object.1362*/13631364static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,1365enum dma_data_direction direction)1366{1367bool write = (direction == DMA_BIDIRECTIONAL ||1368direction == DMA_TO_DEVICE);1369struct dma_resv *resv = dmabuf->resv;1370long ret;13711372/* Wait on any implicit rendering fences */1373ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),1374true, MAX_SCHEDULE_TIMEOUT);1375if (ret < 0)1376return ret;13771378return 0;1379}13801381/**1382* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the1383* cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific1384* preparations. Coherency is only guaranteed in the specified range for the1385* specified access direction.1386* @dmabuf: [in] buffer to prepare cpu access for.1387* @direction: [in] direction of access.1388*1389* After the cpu access is complete the caller should call1390* dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is1391* it guaranteed to be coherent with other DMA access.1392*1393* This function will also wait for any DMA transactions tracked through1394* implicit synchronization in &dma_buf.resv. For DMA transactions with explicit1395* synchronization this function will only ensure cache coherency, callers must1396* ensure synchronization with such DMA transactions on their own.1397*1398* Can return negative error values, returns 0 on success.1399*/1400int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,1401enum dma_data_direction direction)1402{1403int ret = 0;14041405if (WARN_ON(!dmabuf))1406return -EINVAL;14071408might_lock(&dmabuf->resv->lock.base);14091410if (dmabuf->ops->begin_cpu_access)1411ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);14121413/* Ensure that all fences are waited upon - but we first allow1414* the native handler the chance to do so more efficiently if it1415* chooses. A double invocation here will be reasonably cheap no-op.1416*/1417if (ret == 0)1418ret = __dma_buf_begin_cpu_access(dmabuf, direction);14191420return ret;1421}1422EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");14231424/**1425* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the1426* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific1427* actions. Coherency is only guaranteed in the specified range for the1428* specified access direction.1429* @dmabuf: [in] buffer to complete cpu access for.1430* @direction: [in] direction of access.1431*1432* This terminates CPU access started with dma_buf_begin_cpu_access().1433*1434* Can return negative error values, returns 0 on success.1435*/1436int dma_buf_end_cpu_access(struct dma_buf *dmabuf,1437enum dma_data_direction direction)1438{1439int ret = 0;14401441WARN_ON(!dmabuf);14421443might_lock(&dmabuf->resv->lock.base);14441445if (dmabuf->ops->end_cpu_access)1446ret = dmabuf->ops->end_cpu_access(dmabuf, direction);14471448return ret;1449}1450EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");145114521453/**1454* dma_buf_mmap - Setup up a userspace mmap with the given vma1455* @dmabuf: [in] buffer that should back the vma1456* @vma: [in] vma for the mmap1457* @pgoff: [in] offset in pages where this mmap should start within the1458* dma-buf buffer.1459*1460* This function adjusts the passed in vma so that it points at the file of the1461* dma_buf operation. It also adjusts the starting pgoff and does bounds1462* checking on the size of the vma. Then it calls the exporters mmap function to1463* set up the mapping.1464*1465* Can return negative error values, returns 0 on success.1466*/1467int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,1468unsigned long pgoff)1469{1470if (WARN_ON(!dmabuf || !vma))1471return -EINVAL;14721473/* check if buffer supports mmap */1474if (!dmabuf->ops->mmap)1475return -EINVAL;14761477/* check for offset overflow */1478if (pgoff + vma_pages(vma) < pgoff)1479return -EOVERFLOW;14801481/* check for overflowing the buffer's size */1482if (pgoff + vma_pages(vma) >1483dmabuf->size >> PAGE_SHIFT)1484return -EINVAL;14851486/* readjust the vma */1487vma_set_file(vma, dmabuf->file);1488vma->vm_pgoff = pgoff;14891490return dmabuf->ops->mmap(dmabuf, vma);1491}1492EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");14931494/**1495* dma_buf_vmap - Create virtual mapping for the buffer object into kernel1496* address space. Same restrictions as for vmap and friends apply.1497* @dmabuf: [in] buffer to vmap1498* @map: [out] returns the vmap pointer1499*1500* This call may fail due to lack of virtual mapping address space.1501* These calls are optional in drivers. The intended use for them1502* is for mapping objects linear in kernel space for high use objects.1503*1504* To ensure coherency users must call dma_buf_begin_cpu_access() and1505* dma_buf_end_cpu_access() around any cpu access performed through this1506* mapping.1507*1508* Returns 0 on success, or a negative errno code otherwise.1509*/1510int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)1511{1512struct iosys_map ptr;1513int ret;15141515iosys_map_clear(map);15161517if (WARN_ON(!dmabuf))1518return -EINVAL;15191520dma_resv_assert_held(dmabuf->resv);15211522if (!dmabuf->ops->vmap)1523return -EINVAL;15241525if (dmabuf->vmapping_counter) {1526dmabuf->vmapping_counter++;1527BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));1528*map = dmabuf->vmap_ptr;1529return 0;1530}15311532BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));15331534ret = dmabuf->ops->vmap(dmabuf, &ptr);1535if (WARN_ON_ONCE(ret))1536return ret;15371538dmabuf->vmap_ptr = ptr;1539dmabuf->vmapping_counter = 1;15401541*map = dmabuf->vmap_ptr;15421543return 0;1544}1545EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");15461547/**1548* dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel1549* address space. Same restrictions as for vmap and friends apply.1550* @dmabuf: [in] buffer to vmap1551* @map: [out] returns the vmap pointer1552*1553* Unlocked version of dma_buf_vmap()1554*1555* Returns 0 on success, or a negative errno code otherwise.1556*/1557int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)1558{1559int ret;15601561iosys_map_clear(map);15621563if (WARN_ON(!dmabuf))1564return -EINVAL;15651566dma_resv_lock(dmabuf->resv, NULL);1567ret = dma_buf_vmap(dmabuf, map);1568dma_resv_unlock(dmabuf->resv);15691570return ret;1571}1572EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");15731574/**1575* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.1576* @dmabuf: [in] buffer to vunmap1577* @map: [in] vmap pointer to vunmap1578*/1579void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)1580{1581if (WARN_ON(!dmabuf))1582return;15831584dma_resv_assert_held(dmabuf->resv);15851586BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));1587BUG_ON(dmabuf->vmapping_counter == 0);1588BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));15891590if (--dmabuf->vmapping_counter == 0) {1591if (dmabuf->ops->vunmap)1592dmabuf->ops->vunmap(dmabuf, map);1593iosys_map_clear(&dmabuf->vmap_ptr);1594}1595}1596EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");15971598/**1599* dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.1600* @dmabuf: [in] buffer to vunmap1601* @map: [in] vmap pointer to vunmap1602*/1603void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)1604{1605if (WARN_ON(!dmabuf))1606return;16071608dma_resv_lock(dmabuf->resv, NULL);1609dma_buf_vunmap(dmabuf, map);1610dma_resv_unlock(dmabuf->resv);1611}1612EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");16131614#ifdef CONFIG_DEBUG_FS1615static int dma_buf_debug_show(struct seq_file *s, void *unused)1616{1617struct dma_buf *buf_obj;1618struct dma_buf_attachment *attach_obj;1619int count = 0, attach_count;1620size_t size = 0;1621int ret;16221623ret = mutex_lock_interruptible(&dmabuf_list_mutex);16241625if (ret)1626return ret;16271628seq_puts(s, "\nDma-buf Objects:\n");1629seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",1630"size", "flags", "mode", "count", "ino");16311632list_for_each_entry(buf_obj, &dmabuf_list, list_node) {16331634ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);1635if (ret)1636goto error_unlock;163716381639spin_lock(&buf_obj->name_lock);1640seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",1641buf_obj->size,1642buf_obj->file->f_flags, buf_obj->file->f_mode,1643file_count(buf_obj->file),1644buf_obj->exp_name,1645file_inode(buf_obj->file)->i_ino,1646buf_obj->name ?: "<none>");1647spin_unlock(&buf_obj->name_lock);16481649dma_resv_describe(buf_obj->resv, s);16501651seq_puts(s, "\tAttached Devices:\n");1652attach_count = 0;16531654list_for_each_entry(attach_obj, &buf_obj->attachments, node) {1655seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));1656attach_count++;1657}1658dma_resv_unlock(buf_obj->resv);16591660seq_printf(s, "Total %d devices attached\n\n",1661attach_count);16621663count++;1664size += buf_obj->size;1665}16661667seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);16681669mutex_unlock(&dmabuf_list_mutex);1670return 0;16711672error_unlock:1673mutex_unlock(&dmabuf_list_mutex);1674return ret;1675}16761677DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);16781679static struct dentry *dma_buf_debugfs_dir;16801681static int dma_buf_init_debugfs(void)1682{1683struct dentry *d;1684int err = 0;16851686d = debugfs_create_dir("dma_buf", NULL);1687if (IS_ERR(d))1688return PTR_ERR(d);16891690dma_buf_debugfs_dir = d;16911692d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,1693NULL, &dma_buf_debug_fops);1694if (IS_ERR(d)) {1695pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");1696debugfs_remove_recursive(dma_buf_debugfs_dir);1697dma_buf_debugfs_dir = NULL;1698err = PTR_ERR(d);1699}17001701return err;1702}17031704static void dma_buf_uninit_debugfs(void)1705{1706debugfs_remove_recursive(dma_buf_debugfs_dir);1707}1708#else1709static inline int dma_buf_init_debugfs(void)1710{1711return 0;1712}1713static inline void dma_buf_uninit_debugfs(void)1714{1715}1716#endif17171718static int __init dma_buf_init(void)1719{1720int ret;17211722ret = dma_buf_init_sysfs_statistics();1723if (ret)1724return ret;17251726dma_buf_mnt = kern_mount(&dma_buf_fs_type);1727if (IS_ERR(dma_buf_mnt))1728return PTR_ERR(dma_buf_mnt);17291730dma_buf_init_debugfs();1731return 0;1732}1733subsys_initcall(dma_buf_init);17341735static void __exit dma_buf_deinit(void)1736{1737dma_buf_uninit_debugfs();1738kern_unmount(dma_buf_mnt);1739dma_buf_uninit_sysfs_statistics();1740}1741__exitcall(dma_buf_deinit);174217431744