/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */1/* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. */23#ifndef _RDMA_ITER_H_4#define _RDMA_ITER_H_56#include <linux/scatterlist.h>7#include <rdma/ib_umem.h>89/**10* IB block DMA iterator11*12* Iterates the DMA-mapped SGL in contiguous memory blocks aligned13* to a HW supported page size.14*/15struct ib_block_iter {16/* internal states */17struct scatterlist *__sg; /* sg holding the current aligned block */18dma_addr_t __dma_addr; /* unaligned DMA address of this block */19size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */20unsigned int __sg_nents; /* number of SG entries */21unsigned int __sg_advance; /* number of bytes to advance in sg in next step */22unsigned int __pg_bit; /* alignment of current block */23};2425void __rdma_block_iter_start(struct ib_block_iter *biter,26struct scatterlist *sglist,27unsigned int nents,28unsigned long pgsz);29bool __rdma_block_iter_next(struct ib_block_iter *biter);3031/**32* rdma_block_iter_dma_address - get the aligned dma address of the current33* block held by the block iterator.34* @biter: block iterator holding the memory block35*/36static inline dma_addr_t37rdma_block_iter_dma_address(struct ib_block_iter *biter)38{39return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);40}4142/**43* rdma_for_each_block - iterate over contiguous memory blocks of the sg list44* @sglist: sglist to iterate over45* @biter: block iterator holding the memory block46* @nents: maximum number of sg entries to iterate over47* @pgsz: best HW supported page size to use48*49* Callers may use rdma_block_iter_dma_address() to get each50* blocks aligned DMA address.51*/52#define rdma_for_each_block(sglist, biter, nents, pgsz) \53for (__rdma_block_iter_start(biter, sglist, nents, \54pgsz); \55__rdma_block_iter_next(biter);)5657static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,58struct ib_umem *umem,59unsigned long pgsz)60{61__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,62umem->sgt_append.sgt.nents, pgsz);63biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);64biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);65}6667static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)68{69return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;70}7172/**73* rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem74* @umem: umem to iterate over75* @pgsz: Page size to split the list into76*77* pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The78* returned DMA blocks will be aligned to pgsz and span the range:79* ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)80*81* Performs exactly ib_umem_num_dma_blocks() iterations.82*/83#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \84for (__rdma_umem_block_iter_start(biter, umem, pgsz); \85__rdma_umem_block_iter_next(biter);)8687#endif /* _RDMA_ITER_H_ */888990