Path: blob/master/drivers/accel/ivpu/ivpu_mmu_context.c
51731 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright (C) 2020-2023 Intel Corporation3*/45#include <linux/bitfield.h>6#include <linux/highmem.h>7#include <linux/set_memory.h>8#include <linux/vmalloc.h>910#include <drm/drm_cache.h>1112#include "ivpu_drv.h"13#include "ivpu_hw.h"14#include "ivpu_mmu.h"15#include "ivpu_mmu_context.h"1617#define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)18#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)19#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)20#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)21#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)22#define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))23#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)24#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)25#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)26#define IVPU_MMU_ENTRY_FLAG_RO BIT(7)27#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)28#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)29#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)30#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)3132#define IVPU_MMU_PAGE_SIZE SZ_4K33#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)34#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)35#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)36#define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)37#define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)38#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))3940#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb00041#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)42#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)43#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \44IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)4546static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)47{48dma_addr_t dma_addr;49struct page *page;50void *cpu;5152page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);53if (!page)54return NULL;5556set_pages_array_wc(&page, 1);5758dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);59if (dma_mapping_error(vdev->drm.dev, dma_addr))60goto err_free_page;6162cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));63if (!cpu)64goto err_dma_unmap_page;656667*dma = dma_addr;68return cpu;6970err_dma_unmap_page:71dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);7273err_free_page:74put_page(page);75return NULL;76}7778static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)79{80struct page *page;8182if (cpu_addr) {83page = vmalloc_to_page(cpu_addr);84vunmap(cpu_addr);85dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,86DMA_BIDIRECTIONAL);87set_pages_array_wb(&page, 1);88put_page(page);89}90}9192static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)93{94int pgd_idx, pud_idx, pmd_idx;95dma_addr_t pud_dma, pmd_dma, pte_dma;96u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;9798for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {99pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];100pud_dma = pgtable->pgd_dma_ptr[pgd_idx];101102if (!pud_dma_ptr)103continue;104105for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {106pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];107pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];108109if (!pmd_dma_ptr)110continue;111112for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {113pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];114pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];115116ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);117}118119kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);120ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);121}122123kfree(pgtable->pmd_ptrs[pgd_idx]);124kfree(pgtable->pte_ptrs[pgd_idx]);125ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);126}127128ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);129pgtable->pgd_dma_ptr = NULL;130pgtable->pgd_dma = 0;131}132133static u64*134ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)135{136u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr;137dma_addr_t pgd_dma;138139if (pgd_dma_ptr)140return pgd_dma_ptr;141142pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);143if (!pgd_dma_ptr)144return NULL;145146pgtable->pgd_dma_ptr = pgd_dma_ptr;147pgtable->pgd_dma = pgd_dma;148149return pgd_dma_ptr;150}151152static u64*153ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)154{155u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];156dma_addr_t pud_dma;157158if (pud_dma_ptr)159return pud_dma_ptr;160161pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);162if (!pud_dma_ptr)163return NULL;164165drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);166pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);167if (!pgtable->pmd_ptrs[pgd_idx])168goto err_free_pud_dma_ptr;169170drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);171pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);172if (!pgtable->pte_ptrs[pgd_idx])173goto err_free_pmd_ptrs;174175pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;176pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;177178return pud_dma_ptr;179180err_free_pmd_ptrs:181kfree(pgtable->pmd_ptrs[pgd_idx]);182183err_free_pud_dma_ptr:184ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);185return NULL;186}187188static u64*189ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,190int pud_idx)191{192u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];193dma_addr_t pmd_dma;194195if (pmd_dma_ptr)196return pmd_dma_ptr;197198pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);199if (!pmd_dma_ptr)200return NULL;201202drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);203pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);204if (!pgtable->pte_ptrs[pgd_idx][pud_idx])205goto err_free_pmd_dma_ptr;206207pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;208pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;209210return pmd_dma_ptr;211212err_free_pmd_dma_ptr:213ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);214return NULL;215}216217static u64*218ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,219int pgd_idx, int pud_idx, int pmd_idx)220{221u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];222dma_addr_t pte_dma;223224if (pte_dma_ptr)225return pte_dma_ptr;226227pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);228if (!pte_dma_ptr)229return NULL;230231pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;232pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;233234return pte_dma_ptr;235}236237static int238ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,239u64 vpu_addr, dma_addr_t dma_addr, u64 prot)240{241u64 *pte;242int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);243int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);244int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);245int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);246247drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);248249/* Allocate PGD - first level page table if needed */250if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))251return -ENOMEM;252253/* Allocate PUD - second level page table if needed */254if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))255return -ENOMEM;256257/* Allocate PMD - third level page table if needed */258if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))259return -ENOMEM;260261/* Allocate PTE - fourth level page table if needed */262pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);263if (!pte)264return -ENOMEM;265266/* Update PTE */267pte[pte_idx] = dma_addr | prot;268269return 0;270}271272static int273ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,274dma_addr_t dma_addr, u64 prot)275{276size_t size = IVPU_MMU_CONT_PAGES_SIZE;277278drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));279drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));280281prot |= IVPU_MMU_ENTRY_FLAG_CONT;282283while (size) {284int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);285286if (ret)287return ret;288289size -= IVPU_MMU_PAGE_SIZE;290vpu_addr += IVPU_MMU_PAGE_SIZE;291dma_addr += IVPU_MMU_PAGE_SIZE;292}293294return 0;295}296297static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)298{299int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);300int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);301int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);302int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);303304/* Update PTE with dummy physical address and clear flags */305ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;306}307308static int309ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,310u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)311{312int map_size;313int ret;314315while (size) {316if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&317IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {318ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);319map_size = IVPU_MMU_CONT_PAGES_SIZE;320} else {321ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);322map_size = IVPU_MMU_PAGE_SIZE;323}324325if (ret)326return ret;327328vpu_addr += map_size;329dma_addr += map_size;330size -= map_size;331}332333return 0;334}335336static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,337u64 vpu_addr)338{339int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);340int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);341int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);342int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);343344ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;345}346347static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,348u64 vpu_addr)349{350int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);351int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);352int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);353int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);354355ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;356}357358static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,359u64 vpu_addr)360{361u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);362u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);363u64 offset = 0;364365ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);366367while (start + offset < end) {368ivpu_mmu_context_split_page(vdev, ctx, start + offset);369offset += IVPU_MMU_PAGE_SIZE;370}371}372373int374ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,375size_t size)376{377u64 end = vpu_addr + size;378size_t size_left = size;379int ret;380381if (size == 0)382return 0;383384if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))385return -EINVAL;386387mutex_lock(&ctx->lock);388389ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",390ctx->id, vpu_addr, size);391392if (!ivpu_disable_mmu_cont_pages) {393/* Split 64K contiguous page at the beginning if needed */394if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))395ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);396397/* Split 64K contiguous page at the end if needed */398if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))399ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);400}401402while (size_left) {403if (vpu_addr < end)404ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);405406vpu_addr += IVPU_MMU_PAGE_SIZE;407size_left -= IVPU_MMU_PAGE_SIZE;408}409410/* Ensure page table modifications are flushed from wc buffers to memory */411wmb();412413mutex_unlock(&ctx->lock);414ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);415if (ret)416ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);417418return 0;419}420421static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)422{423while (size) {424ivpu_mmu_context_unmap_page(ctx, vpu_addr);425vpu_addr += IVPU_MMU_PAGE_SIZE;426size -= IVPU_MMU_PAGE_SIZE;427}428}429430int431ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,432struct sg_table *sgt, size_t bo_size, bool llc_coherent, bool read_only)433{434size_t start_vpu_addr = vpu_addr;435struct scatterlist *sg;436size_t sgt_size = 0;437int ret;438u64 prot;439u64 i;440441if (drm_WARN_ON(&vdev->drm, !ctx))442return -EINVAL;443444if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))445return -EINVAL;446447if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)448return -EINVAL;449450prot = IVPU_MMU_ENTRY_MAPPED;451if (llc_coherent)452prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;453if (read_only)454prot |= IVPU_MMU_ENTRY_FLAG_RO;455456mutex_lock(&ctx->lock);457458for_each_sgtable_dma_sg(sgt, sg, i) {459dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;460size_t size = sg_dma_len(sg) + sg->offset;461462ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",463ctx->id, dma_addr, vpu_addr, size);464465if (sgt_size + size > bo_size) {466ivpu_err(vdev, "Scatter-gather table size exceeds buffer object size\n");467ret = -EINVAL;468goto err_unmap_pages;469}470471ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);472if (ret) {473ivpu_err(vdev, "Failed to map context pages\n");474goto err_unmap_pages;475}476vpu_addr += size;477sgt_size += size;478}479480if (sgt_size < bo_size) {481ivpu_err(vdev, "Scatter-gather table size too small to cover buffer object size\n");482ret = -EINVAL;483goto err_unmap_pages;484}485486if (!ctx->is_cd_valid) {487ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);488if (ret) {489ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",490ctx->id, ret);491goto err_unmap_pages;492}493ctx->is_cd_valid = true;494}495496/* Ensure page table modifications are flushed from wc buffers to memory */497wmb();498499ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);500if (ret) {501ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);502goto err_unmap_pages;503}504505mutex_unlock(&ctx->lock);506return 0;507508err_unmap_pages:509ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, sgt_size);510mutex_unlock(&ctx->lock);511return ret;512}513514void515ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,516u64 vpu_addr, struct sg_table *sgt)517{518struct scatterlist *sg;519int ret;520u64 i;521522if (drm_WARN_ON(&vdev->drm, !ctx))523return;524525mutex_lock(&ctx->lock);526527for_each_sgtable_dma_sg(sgt, sg, i) {528dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;529size_t size = sg_dma_len(sg) + sg->offset;530531ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",532ctx->id, dma_addr, vpu_addr, size);533534ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);535vpu_addr += size;536}537538/* Ensure page table modifications are flushed from wc buffers to memory */539wmb();540541mutex_unlock(&ctx->lock);542543ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);544if (ret)545ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n",546ctx->id, ret);547}548549int550ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,551u64 size, struct drm_mm_node *node)552{553int ret;554555WARN_ON(!range);556557mutex_lock(&ctx->lock);558if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {559ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,560range->start, range->end, DRM_MM_INSERT_BEST);561if (!ret)562goto unlock;563}564565ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,566range->start, range->end, DRM_MM_INSERT_BEST);567unlock:568mutex_unlock(&ctx->lock);569return ret;570}571572void573ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)574{575mutex_lock(&ctx->lock);576drm_mm_remove_node(node);577mutex_unlock(&ctx->lock);578}579580void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)581{582u64 start, end;583584mutex_init(&ctx->lock);585586if (!context_id) {587start = vdev->hw->ranges.runtime.start;588end = vdev->hw->ranges.shave.end;589} else {590start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);591end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);592}593594drm_mm_init(&ctx->mm, start, end - start);595ctx->id = context_id;596}597598void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)599{600if (ctx->is_cd_valid) {601ivpu_mmu_cd_clear(vdev, ctx->id);602ctx->is_cd_valid = false;603}604605mutex_destroy(&ctx->lock);606ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);607drm_mm_takedown(&ctx->mm);608}609610void ivpu_mmu_global_context_init(struct ivpu_device *vdev)611{612ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);613}614615void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)616{617ivpu_mmu_context_fini(vdev, &vdev->gctx);618}619620int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)621{622int ret;623624ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);625626mutex_lock(&vdev->rctx.lock);627628if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {629ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");630ret = -ENOMEM;631goto err_ctx_fini;632}633634ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);635if (ret) {636ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");637goto err_ctx_fini;638}639640mutex_unlock(&vdev->rctx.lock);641return ret;642643err_ctx_fini:644mutex_unlock(&vdev->rctx.lock);645ivpu_mmu_context_fini(vdev, &vdev->rctx);646return ret;647}648649void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)650{651ivpu_mmu_cd_clear(vdev, vdev->rctx.id);652ivpu_mmu_context_fini(vdev, &vdev->rctx);653}654655656