Path: blob/master/drivers/accel/ivpu/ivpu_mmu_context.c
26428 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright (C) 2020-2023 Intel Corporation3*/45#include <linux/bitfield.h>6#include <linux/highmem.h>7#include <linux/set_memory.h>8#include <linux/vmalloc.h>910#include <drm/drm_cache.h>1112#include "ivpu_drv.h"13#include "ivpu_hw.h"14#include "ivpu_mmu.h"15#include "ivpu_mmu_context.h"1617#define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)18#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)19#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)20#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)21#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)22#define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))23#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)24#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)25#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)26#define IVPU_MMU_ENTRY_FLAG_RO BIT(7)27#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)28#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)29#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)30#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)3132#define IVPU_MMU_PAGE_SIZE SZ_4K33#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)34#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)35#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)36#define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)37#define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)38#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))3940#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb00041#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)42#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)43#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \44IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)4546static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)47{48dma_addr_t dma_addr;49struct page *page;50void *cpu;5152page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);53if (!page)54return NULL;5556set_pages_array_wc(&page, 1);5758dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);59if (dma_mapping_error(vdev->drm.dev, dma_addr))60goto err_free_page;6162cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));63if (!cpu)64goto err_dma_unmap_page;656667*dma = dma_addr;68return cpu;6970err_dma_unmap_page:71dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);7273err_free_page:74put_page(page);75return NULL;76}7778static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)79{80struct page *page;8182if (cpu_addr) {83page = vmalloc_to_page(cpu_addr);84vunmap(cpu_addr);85dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,86DMA_BIDIRECTIONAL);87set_pages_array_wb(&page, 1);88put_page(page);89}90}9192static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)93{94int pgd_idx, pud_idx, pmd_idx;95dma_addr_t pud_dma, pmd_dma, pte_dma;96u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;9798for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {99pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];100pud_dma = pgtable->pgd_dma_ptr[pgd_idx];101102if (!pud_dma_ptr)103continue;104105for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {106pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];107pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];108109if (!pmd_dma_ptr)110continue;111112for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {113pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];114pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];115116ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);117}118119kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);120ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);121}122123kfree(pgtable->pmd_ptrs[pgd_idx]);124kfree(pgtable->pte_ptrs[pgd_idx]);125ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);126}127128ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);129pgtable->pgd_dma_ptr = NULL;130pgtable->pgd_dma = 0;131}132133static u64*134ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)135{136u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr;137dma_addr_t pgd_dma;138139if (pgd_dma_ptr)140return pgd_dma_ptr;141142pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);143if (!pgd_dma_ptr)144return NULL;145146pgtable->pgd_dma_ptr = pgd_dma_ptr;147pgtable->pgd_dma = pgd_dma;148149return pgd_dma_ptr;150}151152static u64*153ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)154{155u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];156dma_addr_t pud_dma;157158if (pud_dma_ptr)159return pud_dma_ptr;160161pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);162if (!pud_dma_ptr)163return NULL;164165drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);166pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);167if (!pgtable->pmd_ptrs[pgd_idx])168goto err_free_pud_dma_ptr;169170drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);171pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);172if (!pgtable->pte_ptrs[pgd_idx])173goto err_free_pmd_ptrs;174175pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;176pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;177178return pud_dma_ptr;179180err_free_pmd_ptrs:181kfree(pgtable->pmd_ptrs[pgd_idx]);182183err_free_pud_dma_ptr:184ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);185return NULL;186}187188static u64*189ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,190int pud_idx)191{192u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];193dma_addr_t pmd_dma;194195if (pmd_dma_ptr)196return pmd_dma_ptr;197198pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);199if (!pmd_dma_ptr)200return NULL;201202drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);203pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);204if (!pgtable->pte_ptrs[pgd_idx][pud_idx])205goto err_free_pmd_dma_ptr;206207pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;208pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;209210return pmd_dma_ptr;211212err_free_pmd_dma_ptr:213ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);214return NULL;215}216217static u64*218ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,219int pgd_idx, int pud_idx, int pmd_idx)220{221u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];222dma_addr_t pte_dma;223224if (pte_dma_ptr)225return pte_dma_ptr;226227pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);228if (!pte_dma_ptr)229return NULL;230231pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;232pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;233234return pte_dma_ptr;235}236237static int238ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,239u64 vpu_addr, dma_addr_t dma_addr, u64 prot)240{241u64 *pte;242int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);243int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);244int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);245int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);246247drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);248249/* Allocate PGD - first level page table if needed */250if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))251return -ENOMEM;252253/* Allocate PUD - second level page table if needed */254if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))255return -ENOMEM;256257/* Allocate PMD - third level page table if needed */258if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))259return -ENOMEM;260261/* Allocate PTE - fourth level page table if needed */262pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);263if (!pte)264return -ENOMEM;265266/* Update PTE */267pte[pte_idx] = dma_addr | prot;268269return 0;270}271272static int273ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,274dma_addr_t dma_addr, u64 prot)275{276size_t size = IVPU_MMU_CONT_PAGES_SIZE;277278drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));279drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));280281prot |= IVPU_MMU_ENTRY_FLAG_CONT;282283while (size) {284int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);285286if (ret)287return ret;288289size -= IVPU_MMU_PAGE_SIZE;290vpu_addr += IVPU_MMU_PAGE_SIZE;291dma_addr += IVPU_MMU_PAGE_SIZE;292}293294return 0;295}296297static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)298{299int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);300int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);301int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);302int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);303304/* Update PTE with dummy physical address and clear flags */305ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;306}307308static int309ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,310u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)311{312int map_size;313int ret;314315while (size) {316if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&317IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {318ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);319map_size = IVPU_MMU_CONT_PAGES_SIZE;320} else {321ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);322map_size = IVPU_MMU_PAGE_SIZE;323}324325if (ret)326return ret;327328vpu_addr += map_size;329dma_addr += map_size;330size -= map_size;331}332333return 0;334}335336static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,337u64 vpu_addr)338{339int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);340int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);341int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);342int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);343344ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;345}346347static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,348u64 vpu_addr)349{350int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);351int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);352int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);353int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);354355ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;356}357358static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,359u64 vpu_addr)360{361u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);362u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);363u64 offset = 0;364365ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);366367while (start + offset < end) {368ivpu_mmu_context_split_page(vdev, ctx, start + offset);369offset += IVPU_MMU_PAGE_SIZE;370}371}372373int374ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,375size_t size)376{377u64 end = vpu_addr + size;378size_t size_left = size;379int ret;380381if (size == 0)382return 0;383384if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))385return -EINVAL;386387mutex_lock(&ctx->lock);388389ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",390ctx->id, vpu_addr, size);391392if (!ivpu_disable_mmu_cont_pages) {393/* Split 64K contiguous page at the beginning if needed */394if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))395ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);396397/* Split 64K contiguous page at the end if needed */398if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))399ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);400}401402while (size_left) {403if (vpu_addr < end)404ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);405406vpu_addr += IVPU_MMU_PAGE_SIZE;407size_left -= IVPU_MMU_PAGE_SIZE;408}409410/* Ensure page table modifications are flushed from wc buffers to memory */411wmb();412413mutex_unlock(&ctx->lock);414ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);415if (ret)416ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);417418return 0;419}420421static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)422{423while (size) {424ivpu_mmu_context_unmap_page(ctx, vpu_addr);425vpu_addr += IVPU_MMU_PAGE_SIZE;426size -= IVPU_MMU_PAGE_SIZE;427}428}429430int431ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,432u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)433{434size_t start_vpu_addr = vpu_addr;435struct scatterlist *sg;436int ret;437u64 prot;438u64 i;439440if (drm_WARN_ON(&vdev->drm, !ctx))441return -EINVAL;442443if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))444return -EINVAL;445446if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)447return -EINVAL;448449prot = IVPU_MMU_ENTRY_MAPPED;450if (llc_coherent)451prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;452453mutex_lock(&ctx->lock);454455for_each_sgtable_dma_sg(sgt, sg, i) {456dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;457size_t size = sg_dma_len(sg) + sg->offset;458459ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",460ctx->id, dma_addr, vpu_addr, size);461462ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);463if (ret) {464ivpu_err(vdev, "Failed to map context pages\n");465goto err_unmap_pages;466}467vpu_addr += size;468}469470if (!ctx->is_cd_valid) {471ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);472if (ret) {473ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",474ctx->id, ret);475goto err_unmap_pages;476}477ctx->is_cd_valid = true;478}479480/* Ensure page table modifications are flushed from wc buffers to memory */481wmb();482483ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);484if (ret) {485ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);486goto err_unmap_pages;487}488489mutex_unlock(&ctx->lock);490return 0;491492err_unmap_pages:493ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);494mutex_unlock(&ctx->lock);495return ret;496}497498void499ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,500u64 vpu_addr, struct sg_table *sgt)501{502struct scatterlist *sg;503int ret;504u64 i;505506if (drm_WARN_ON(&vdev->drm, !ctx))507return;508509mutex_lock(&ctx->lock);510511for_each_sgtable_dma_sg(sgt, sg, i) {512dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;513size_t size = sg_dma_len(sg) + sg->offset;514515ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",516ctx->id, dma_addr, vpu_addr, size);517518ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);519vpu_addr += size;520}521522/* Ensure page table modifications are flushed from wc buffers to memory */523wmb();524525mutex_unlock(&ctx->lock);526527ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);528if (ret)529ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);530}531532int533ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,534u64 size, struct drm_mm_node *node)535{536int ret;537538WARN_ON(!range);539540mutex_lock(&ctx->lock);541if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {542ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,543range->start, range->end, DRM_MM_INSERT_BEST);544if (!ret)545goto unlock;546}547548ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,549range->start, range->end, DRM_MM_INSERT_BEST);550unlock:551mutex_unlock(&ctx->lock);552return ret;553}554555void556ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)557{558mutex_lock(&ctx->lock);559drm_mm_remove_node(node);560mutex_unlock(&ctx->lock);561}562563void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)564{565u64 start, end;566567mutex_init(&ctx->lock);568569if (!context_id) {570start = vdev->hw->ranges.global.start;571end = vdev->hw->ranges.shave.end;572} else {573start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);574end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);575}576577drm_mm_init(&ctx->mm, start, end - start);578ctx->id = context_id;579}580581void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)582{583if (ctx->is_cd_valid) {584ivpu_mmu_cd_clear(vdev, ctx->id);585ctx->is_cd_valid = false;586}587588mutex_destroy(&ctx->lock);589ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);590drm_mm_takedown(&ctx->mm);591}592593void ivpu_mmu_global_context_init(struct ivpu_device *vdev)594{595ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);596}597598void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)599{600ivpu_mmu_context_fini(vdev, &vdev->gctx);601}602603int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)604{605int ret;606607ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);608609mutex_lock(&vdev->rctx.lock);610611if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {612ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");613ret = -ENOMEM;614goto err_ctx_fini;615}616617ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);618if (ret) {619ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");620goto err_ctx_fini;621}622623mutex_unlock(&vdev->rctx.lock);624return ret;625626err_ctx_fini:627mutex_unlock(&vdev->rctx.lock);628ivpu_mmu_context_fini(vdev, &vdev->rctx);629return ret;630}631632void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)633{634ivpu_mmu_cd_clear(vdev, vdev->rctx.id);635ivpu_mmu_context_fini(vdev, &vdev->rctx);636}637638639