Path: blob/main/sys/ofed/include/rdma/ib_umem_odp.h
39482 views
/*-1* SPDX-License-Identifier: BSD-2-Clause OR GPL-2.02*3* Copyright (c) 2014 Mellanox Technologies. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/3334#ifndef IB_UMEM_ODP_H35#define IB_UMEM_ODP_H3637#include <linux/rbtree.h>3839#include <rdma/ib_umem.h>40#include <rdma/ib_verbs.h>4142struct umem_odp_node {43u64 __subtree_last;44struct rb_node rb;45};4647struct ib_umem_odp {48/*49* An array of the pages included in the on-demand paging umem.50* Indices of pages that are currently not mapped into the device will51* contain NULL.52*/53struct page **page_list;54/*55* An array of the same size as page_list, with DMA addresses mapped56* for pages the pages in page_list. The lower two bits designate57* access permissions. See ODP_READ_ALLOWED_BIT and58* ODP_WRITE_ALLOWED_BIT.59*/60dma_addr_t *dma_list;61/*62* The umem_mutex protects the page_list and dma_list fields of an ODP63* umem, allowing only a single thread to map/unmap pages. The mutex64* also protects access to the mmu notifier counters.65*/66struct mutex umem_mutex;67void *private; /* for the HW driver to use. */6869/* When false, use the notifier counter in the ucontext struct. */70bool mn_counters_active;71int notifiers_seq;72int notifiers_count;7374/* A linked list of umems that don't have private mmu notifier75* counters yet. */76struct list_head no_private_counters;77struct ib_umem *umem;7879/* Tree tracking */80struct umem_odp_node interval_tree;8182struct completion notifier_completion;83int dying;84};8586#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING8788int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);8990void ib_umem_odp_release(struct ib_umem *umem);9192/*93* The lower 2 bits of the DMA address signal the R/W permissions for94* the entry. To upgrade the permissions, provide the appropriate95* bitmask to the map_dma_pages function.96*97* Be aware that upgrading a mapped address might result in change of98* the DMA address for the page.99*/100#define ODP_READ_ALLOWED_BIT (1<<0ULL)101#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)102103#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))104105int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,106u64 access_mask, unsigned long current_seq);107108void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,109u64 bound);110111void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root);112void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root);113typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,114void *cookie);115/*116* Call the callback on each ib_umem in the range. Returns the logical or of117* the return values of the functions called.118*/119int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,120umem_call_back cb, void *cookie);121122struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,123u64 start, u64 last);124struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,125u64 start, u64 last);126127static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,128unsigned long mmu_seq)129{130/*131* This code is strongly based on the KVM code from132* mmu_notifier_retry. Should be called with133* the relevant locks taken (item->odp_data->umem_mutex134* and the ucontext umem_mutex semaphore locked for read).135*/136137/* Do not allow page faults while the new ib_umem hasn't seen a state138* with zero notifiers yet, and doesn't have its own valid set of139* private counters. */140if (!item->odp_data->mn_counters_active)141return 1;142143if (unlikely(item->odp_data->notifiers_count))144return 1;145if (item->odp_data->notifiers_seq != mmu_seq)146return 1;147return 0;148}149150#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */151152static inline int ib_umem_odp_get(struct ib_ucontext *context,153struct ib_umem *umem)154{155return -EINVAL;156}157158static inline void ib_umem_odp_release(struct ib_umem *umem) {}159160#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */161162#endif /* IB_UMEM_ODP_H */163164165