/* SPDX-License-Identifier: GPL-2.0 */1/*2* The contents of this file are private to DMA engine drivers, and is not3* part of the API to be used by DMA engine users.4*/5#ifndef DMAENGINE_H6#define DMAENGINE_H78#include <linux/bug.h>9#include <linux/dmaengine.h>1011/**12* dma_cookie_init - initialize the cookies for a DMA channel13* @chan: dma channel to initialize14*/15static inline void dma_cookie_init(struct dma_chan *chan)16{17chan->cookie = DMA_MIN_COOKIE;18chan->completed_cookie = DMA_MIN_COOKIE;19}2021/**22* dma_cookie_assign - assign a DMA engine cookie to the descriptor23* @tx: descriptor needing cookie24*25* Assign a unique non-zero per-channel cookie to the descriptor.26* Note: caller is expected to hold a lock to prevent concurrency.27*/28static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)29{30struct dma_chan *chan = tx->chan;31dma_cookie_t cookie;3233cookie = chan->cookie + 1;34if (cookie < DMA_MIN_COOKIE)35cookie = DMA_MIN_COOKIE;36tx->cookie = chan->cookie = cookie;3738return cookie;39}4041/**42* dma_cookie_complete - complete a descriptor43* @tx: descriptor to complete44*45* Mark this descriptor complete by updating the channels completed46* cookie marker. Zero the descriptors cookie to prevent accidental47* repeated completions.48*49* Note: caller is expected to hold a lock to prevent concurrency.50*/51static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)52{53BUG_ON(tx->cookie < DMA_MIN_COOKIE);54tx->chan->completed_cookie = tx->cookie;55tx->cookie = 0;56}5758/**59* dma_cookie_status - report cookie status60* @chan: dma channel61* @cookie: cookie we are interested in62* @state: dma_tx_state structure to return last/used cookies63*64* Report the status of the cookie, filling in the state structure if65* non-NULL. No locking is required.66*/67static inline enum dma_status dma_cookie_status(struct dma_chan *chan,68dma_cookie_t cookie, struct dma_tx_state *state)69{70dma_cookie_t used, complete;7172used = chan->cookie;73complete = chan->completed_cookie;74barrier();75if (state) {76state->last = complete;77state->used = used;78state->residue = 0;79state->in_flight_bytes = 0;80}81return dma_async_is_complete(cookie, complete, used);82}8384static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)85{86if (state)87state->residue = residue;88}8990static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,91u32 in_flight_bytes)92{93if (state)94state->in_flight_bytes = in_flight_bytes;95}9697struct dmaengine_desc_callback {98dma_async_tx_callback callback;99dma_async_tx_callback_result callback_result;100void *callback_param;101};102103/**104* dmaengine_desc_get_callback - get the passed in callback function105* @tx: tx descriptor106* @cb: temp struct to hold the callback info107*108* Fill the passed in cb struct with what's available in the passed in109* tx descriptor struct110* No locking is required.111*/112static inline void113dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,114struct dmaengine_desc_callback *cb)115{116cb->callback = tx->callback;117cb->callback_result = tx->callback_result;118cb->callback_param = tx->callback_param;119}120121/**122* dmaengine_desc_callback_invoke - call the callback function in cb struct123* @cb: temp struct that is holding the callback info124* @result: transaction result125*126* Call the callback function provided in the cb struct with the parameter127* in the cb struct.128* Locking is dependent on the driver.129*/130static inline void131dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,132const struct dmaengine_result *result)133{134struct dmaengine_result dummy_result = {135.result = DMA_TRANS_NOERROR,136.residue = 0137};138139if (cb->callback_result) {140if (!result)141result = &dummy_result;142cb->callback_result(cb->callback_param, result);143} else if (cb->callback) {144cb->callback(cb->callback_param);145}146}147148/**149* dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and150* then immediately call the callback.151* @tx: dma async tx descriptor152* @result: transaction result153*154* Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()155* in a single function since no work is necessary in between for the driver.156* Locking is dependent on the driver.157*/158static inline void159dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,160const struct dmaengine_result *result)161{162struct dmaengine_desc_callback cb;163164dmaengine_desc_get_callback(tx, &cb);165dmaengine_desc_callback_invoke(&cb, result);166}167168/**169* dmaengine_desc_callback_valid - verify the callback is valid in cb170* @cb: callback info struct171*172* Return a bool that verifies whether callback in cb is valid or not.173* No locking is required.174*/175static inline bool176dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)177{178return cb->callback || cb->callback_result;179}180181struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);182struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);183184#ifdef CONFIG_DEBUG_FS185#include <linux/debugfs.h>186187static inline struct dentry *188dmaengine_get_debugfs_root(struct dma_device *dma_dev) {189return dma_dev->dbg_dev_root;190}191#else192struct dentry;193static inline struct dentry *194dmaengine_get_debugfs_root(struct dma_device *dma_dev)195{196return NULL;197}198#endif /* CONFIG_DEBUG_FS */199200#endif201202203