Path: blob/master/drivers/accel/habanalabs/common/memory_mgr.c
26436 views
// SPDX-License-Identifier: GPL-2.012/*3* Copyright 2022 HabanaLabs, Ltd.4* All Rights Reserved.5*/67#include "habanalabs.h"89/**10* hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to11* the buffer descriptor.12*13* @mmg: parent unified memory manager14* @handle: requested buffer handle15*16* Find the buffer in the store and return a pointer to its descriptor.17* Increase buffer refcount. If not found - return NULL.18*/19struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)20{21struct hl_mmap_mem_buf *buf;2223spin_lock(&mmg->lock);24buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));25if (!buf) {26spin_unlock(&mmg->lock);27dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);28return NULL;29}30kref_get(&buf->refcount);31spin_unlock(&mmg->lock);32return buf;33}3435/**36* hl_mmap_mem_buf_destroy - destroy the unused buffer37*38* @buf: memory manager buffer descriptor39*40* Internal function, used as a final step of buffer release. Shall be invoked41* only when the buffer is no longer in use (removed from idr). Will call the42* release callback (if applicable), and free the memory.43*/44static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)45{46if (buf->behavior->release)47buf->behavior->release(buf);4849kfree(buf);50}5152/**53* hl_mmap_mem_buf_release - release buffer54*55* @kref: kref that reached 0.56*57* Internal function, used as a kref release callback, when the last user of58* the buffer is released. Shall be called from an interrupt context.59*/60static void hl_mmap_mem_buf_release(struct kref *kref)61{62struct hl_mmap_mem_buf *buf =63container_of(kref, struct hl_mmap_mem_buf, refcount);6465spin_lock(&buf->mmg->lock);66idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));67spin_unlock(&buf->mmg->lock);6869hl_mmap_mem_buf_destroy(buf);70}7172/**73* hl_mmap_mem_buf_remove_idr_locked - remove handle from idr74*75* @kref: kref that reached 0.76*77* Internal function, used for kref put by handle. Assumes mmg lock is taken.78* Will remove the buffer from idr, without destroying it.79*/80static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)81{82struct hl_mmap_mem_buf *buf =83container_of(kref, struct hl_mmap_mem_buf, refcount);8485idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));86}8788/**89* hl_mmap_mem_buf_put - decrease the reference to the buffer90*91* @buf: memory manager buffer descriptor92*93* Decrease the reference to the buffer, and release it if it was the last one.94* Shall be called from an interrupt context.95*/96int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)97{98return kref_put(&buf->refcount, hl_mmap_mem_buf_release);99}100101/**102* hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the103* given handle.104*105* @mmg: parent unified memory manager106* @handle: requested buffer handle107*108* Decrease the reference to the buffer, and release it if it was the last one.109* Shall not be called from an interrupt context. Return -EINVAL if handle was110* not found, else return the put outcome (0 or 1).111*/112int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)113{114struct hl_mmap_mem_buf *buf;115116spin_lock(&mmg->lock);117buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));118if (!buf) {119spin_unlock(&mmg->lock);120dev_dbg(mmg->dev,121"Buff put failed, no match to handle %#llx\n", handle);122return -EINVAL;123}124125if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {126spin_unlock(&mmg->lock);127hl_mmap_mem_buf_destroy(buf);128return 1;129}130131spin_unlock(&mmg->lock);132return 0;133}134135/**136* hl_mmap_mem_buf_alloc - allocate a new mappable buffer137*138* @mmg: parent unified memory manager139* @behavior: behavior object describing this buffer polymorphic behavior140* @gfp: gfp flags to use for the memory allocations141* @args: additional args passed to behavior->alloc142*143* Allocate and register a new memory buffer inside the give memory manager.144* Return the pointer to the new buffer on success or NULL on failure.145*/146struct hl_mmap_mem_buf *147hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,148struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,149void *args)150{151struct hl_mmap_mem_buf *buf;152int rc;153154buf = kzalloc(sizeof(*buf), gfp);155if (!buf)156return NULL;157158spin_lock(&mmg->lock);159rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);160spin_unlock(&mmg->lock);161if (rc < 0) {162dev_err(mmg->dev,163"%s: Failed to allocate IDR for a new buffer, rc=%d\n",164behavior->topic, rc);165goto free_buf;166}167168buf->mmg = mmg;169buf->behavior = behavior;170buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);171kref_init(&buf->refcount);172173rc = buf->behavior->alloc(buf, gfp, args);174if (rc) {175dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",176behavior->topic, rc);177goto remove_idr;178}179180return buf;181182remove_idr:183spin_lock(&mmg->lock);184idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));185spin_unlock(&mmg->lock);186free_buf:187kfree(buf);188return NULL;189}190191/**192* hl_mmap_mem_buf_vm_close - handle mmap close193*194* @vma: the vma object for which mmap was closed.195*196* Put the memory buffer if it is no longer mapped.197*/198static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)199{200struct hl_mmap_mem_buf *buf =201(struct hl_mmap_mem_buf *)vma->vm_private_data;202long new_mmap_size;203204new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);205206if (new_mmap_size > 0) {207buf->real_mapped_size = new_mmap_size;208return;209}210211atomic_set(&buf->mmap, 0);212hl_mmap_mem_buf_put(buf);213vma->vm_private_data = NULL;214}215216static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {217.close = hl_mmap_mem_buf_vm_close218};219220/**221* hl_mem_mgr_mmap - map the given buffer to the user222*223* @mmg: unified memory manager224* @vma: the vma object for which mmap was closed.225* @args: additional args passed to behavior->mmap226*227* Map the buffer specified by the vma->vm_pgoff to the given vma.228*/229int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,230void *args)231{232struct hl_mmap_mem_buf *buf;233u64 user_mem_size;234u64 handle;235int rc;236237/* We use the page offset to hold the idr and thus we need to clear238* it before doing the mmap itself239*/240handle = vma->vm_pgoff << PAGE_SHIFT;241vma->vm_pgoff = 0;242243/* Reference was taken here */244buf = hl_mmap_mem_buf_get(mmg, handle);245if (!buf) {246dev_err(mmg->dev,247"Memory mmap failed, no match to handle %#llx\n", handle);248return -EINVAL;249}250251/* Validation check */252user_mem_size = vma->vm_end - vma->vm_start;253if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {254dev_err(mmg->dev,255"%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",256buf->behavior->topic, user_mem_size, buf->mappable_size);257rc = -EINVAL;258goto put_mem;259}260261#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK262if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,263user_mem_size)) {264#else265if (!access_ok((void __user *)(uintptr_t)vma->vm_start,266user_mem_size)) {267#endif268dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",269buf->behavior->topic, vma->vm_start);270271rc = -EINVAL;272goto put_mem;273}274275if (atomic_cmpxchg(&buf->mmap, 0, 1)) {276dev_err(mmg->dev,277"%s, Memory mmap failed, already mapped to user\n",278buf->behavior->topic);279rc = -EINVAL;280goto put_mem;281}282283vma->vm_ops = &hl_mmap_mem_buf_vm_ops;284285/* Note: We're transferring the memory reference to vma->vm_private_data here. */286287vma->vm_private_data = buf;288289rc = buf->behavior->mmap(buf, vma, args);290if (rc) {291atomic_set(&buf->mmap, 0);292goto put_mem;293}294295buf->real_mapped_size = buf->mappable_size;296vma->vm_pgoff = handle >> PAGE_SHIFT;297298return 0;299300put_mem:301hl_mmap_mem_buf_put(buf);302return rc;303}304305/**306* hl_mem_mgr_init - initialize unified memory manager307*308* @dev: owner device pointer309* @mmg: structure to initialize310*311* Initialize an instance of unified memory manager312*/313void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)314{315mmg->dev = dev;316spin_lock_init(&mmg->lock);317idr_init(&mmg->handles);318}319320static void hl_mem_mgr_fini_stats_reset(struct hl_mem_mgr_fini_stats *stats)321{322if (!stats)323return;324325memset(stats, 0, sizeof(*stats));326}327328static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats)329{330if (!stats)331return;332333switch (mem_id) {334case HL_MMAP_TYPE_CB:335++stats->n_busy_cb;336break;337case HL_MMAP_TYPE_TS_BUFF:338++stats->n_busy_ts;339break;340default:341/* we currently store only CB/TS so this shouldn't happen */342++stats->n_busy_other;343}344}345346/**347* hl_mem_mgr_fini - release unified memory manager348*349* @mmg: parent unified memory manager350* @stats: if non-NULL, will return some counters for handles that could not be removed.351*352* Release the unified memory manager. Shall be called from an interrupt context.353*/354void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats)355{356struct hl_mmap_mem_buf *buf;357struct idr *idp;358const char *topic;359u64 mem_id;360u32 id;361362hl_mem_mgr_fini_stats_reset(stats);363364idp = &mmg->handles;365366idr_for_each_entry(idp, buf, id) {367topic = buf->behavior->topic;368mem_id = buf->behavior->mem_id;369if (hl_mmap_mem_buf_put(buf) != 1) {370dev_err(mmg->dev,371"%s: Buff handle %u for CTX is still alive\n",372topic, id);373hl_mem_mgr_fini_stats_inc(mem_id, stats);374}375}376}377378/**379* hl_mem_mgr_idr_destroy() - destroy memory manager IDR.380* @mmg: parent unified memory manager381*382* Destroy the memory manager IDR.383* Shall be called when IDR is empty and no memory buffers are in use.384*/385void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)386{387if (!idr_is_empty(&mmg->handles))388dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");389390idr_destroy(&mmg->handles);391}392393394