Path: blob/master/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
26517 views
/*1* Copyright 2015 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*21* Authors: monk liu <[email protected]>22*/2324#include <drm/drm_auth.h>25#include <drm/drm_drv.h>26#include "amdgpu.h"27#include "amdgpu_sched.h"28#include "amdgpu_ras.h"29#include <linux/nospec.h>3031#define to_amdgpu_ctx_entity(e) \32container_of((e), struct amdgpu_ctx_entity, entity)3334const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {35[AMDGPU_HW_IP_GFX] = 1,36[AMDGPU_HW_IP_COMPUTE] = 4,37[AMDGPU_HW_IP_DMA] = 2,38[AMDGPU_HW_IP_UVD] = 1,39[AMDGPU_HW_IP_VCE] = 1,40[AMDGPU_HW_IP_UVD_ENC] = 1,41[AMDGPU_HW_IP_VCN_DEC] = 1,42[AMDGPU_HW_IP_VCN_ENC] = 1,43[AMDGPU_HW_IP_VCN_JPEG] = 1,44[AMDGPU_HW_IP_VPE] = 1,45};4647bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)48{49switch (ctx_prio) {50case AMDGPU_CTX_PRIORITY_VERY_LOW:51case AMDGPU_CTX_PRIORITY_LOW:52case AMDGPU_CTX_PRIORITY_NORMAL:53case AMDGPU_CTX_PRIORITY_HIGH:54case AMDGPU_CTX_PRIORITY_VERY_HIGH:55return true;56default:57case AMDGPU_CTX_PRIORITY_UNSET:58/* UNSET priority is not valid and we don't carry that59* around, but set it to NORMAL in the only place this60* function is called, amdgpu_ctx_ioctl().61*/62return false;63}64}6566static enum drm_sched_priority67amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)68{69switch (ctx_prio) {70case AMDGPU_CTX_PRIORITY_UNSET:71pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");72return DRM_SCHED_PRIORITY_NORMAL;7374case AMDGPU_CTX_PRIORITY_VERY_LOW:75return DRM_SCHED_PRIORITY_LOW;7677case AMDGPU_CTX_PRIORITY_LOW:78return DRM_SCHED_PRIORITY_LOW;7980case AMDGPU_CTX_PRIORITY_NORMAL:81return DRM_SCHED_PRIORITY_NORMAL;8283case AMDGPU_CTX_PRIORITY_HIGH:84return DRM_SCHED_PRIORITY_HIGH;8586case AMDGPU_CTX_PRIORITY_VERY_HIGH:87return DRM_SCHED_PRIORITY_HIGH;8889/* This should not happen as we sanitized userspace provided priority90* already, WARN if this happens.91*/92default:93WARN(1, "Invalid context priority %d\n", ctx_prio);94return DRM_SCHED_PRIORITY_NORMAL;95}9697}9899static int amdgpu_ctx_priority_permit(struct drm_file *filp,100int32_t priority)101{102/* NORMAL and below are accessible by everyone */103if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)104return 0;105106if (capable(CAP_SYS_NICE))107return 0;108109if (drm_is_current_master(filp))110return 0;111112return -EACCES;113}114115static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)116{117switch (prio) {118case AMDGPU_CTX_PRIORITY_HIGH:119case AMDGPU_CTX_PRIORITY_VERY_HIGH:120return AMDGPU_GFX_PIPE_PRIO_HIGH;121default:122return AMDGPU_GFX_PIPE_PRIO_NORMAL;123}124}125126static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)127{128switch (prio) {129case AMDGPU_CTX_PRIORITY_HIGH:130return AMDGPU_RING_PRIO_1;131case AMDGPU_CTX_PRIORITY_VERY_HIGH:132return AMDGPU_RING_PRIO_2;133default:134return AMDGPU_RING_PRIO_0;135}136}137138static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)139{140struct amdgpu_device *adev = ctx->mgr->adev;141unsigned int hw_prio;142int32_t ctx_prio;143144ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?145ctx->init_priority : ctx->override_priority;146147switch (hw_ip) {148case AMDGPU_HW_IP_GFX:149case AMDGPU_HW_IP_COMPUTE:150hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);151break;152case AMDGPU_HW_IP_VCE:153case AMDGPU_HW_IP_VCN_ENC:154hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);155break;156default:157hw_prio = AMDGPU_RING_PRIO_DEFAULT;158break;159}160161hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);162if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)163hw_prio = AMDGPU_RING_PRIO_DEFAULT;164165return hw_prio;166}167168/* Calculate the time spend on the hw */169static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)170{171struct drm_sched_fence *s_fence;172173if (!fence)174return ns_to_ktime(0);175176/* When the fence is not even scheduled it can't have spend time */177s_fence = to_drm_sched_fence(fence);178if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))179return ns_to_ktime(0);180181/* When it is still running account how much already spend */182if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))183return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);184185return ktime_sub(s_fence->finished.timestamp,186s_fence->scheduled.timestamp);187}188189static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,190struct amdgpu_ctx_entity *centity)191{192ktime_t res = ns_to_ktime(0);193uint32_t i;194195spin_lock(&ctx->ring_lock);196for (i = 0; i < amdgpu_sched_jobs; i++) {197res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));198}199spin_unlock(&ctx->ring_lock);200return res;201}202203static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,204const u32 ring)205{206struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;207struct amdgpu_device *adev = ctx->mgr->adev;208struct amdgpu_ctx_entity *entity;209enum drm_sched_priority drm_prio;210unsigned int hw_prio, num_scheds;211int32_t ctx_prio;212int r;213214entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),215GFP_KERNEL);216if (!entity)217return -ENOMEM;218219ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?220ctx->init_priority : ctx->override_priority;221entity->hw_ip = hw_ip;222entity->sequence = 1;223hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);224drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);225226hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);227228if (!(adev)->xcp_mgr) {229scheds = adev->gpu_sched[hw_ip][hw_prio].sched;230num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;231} else {232struct amdgpu_fpriv *fpriv;233234fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr);235r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,236&num_scheds, &scheds);237if (r)238goto cleanup_entity;239}240241/* disable load balance if the hw engine retains context among dependent jobs */242if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||243hw_ip == AMDGPU_HW_IP_VCN_DEC ||244hw_ip == AMDGPU_HW_IP_UVD_ENC ||245hw_ip == AMDGPU_HW_IP_UVD) {246sched = drm_sched_pick_best(scheds, num_scheds);247scheds = &sched;248num_scheds = 1;249}250251r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,252&ctx->guilty);253if (r)254goto error_free_entity;255256/* It's not an error if we fail to install the new entity */257if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))258goto cleanup_entity;259260return 0;261262cleanup_entity:263drm_sched_entity_fini(&entity->entity);264265error_free_entity:266kfree(entity);267268return r;269}270271static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,272struct amdgpu_ctx_entity *entity)273{274ktime_t res = ns_to_ktime(0);275int i;276277if (!entity)278return res;279280for (i = 0; i < amdgpu_sched_jobs; ++i) {281res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));282dma_fence_put(entity->fences[i]);283}284285amdgpu_xcp_release_sched(adev, entity);286287kfree(entity);288return res;289}290291static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,292u32 *stable_pstate)293{294struct amdgpu_device *adev = ctx->mgr->adev;295enum amd_dpm_forced_level current_level;296297current_level = amdgpu_dpm_get_performance_level(adev);298299switch (current_level) {300case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:301*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;302break;303case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:304*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;305break;306case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:307*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;308break;309case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:310*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;311break;312default:313*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;314break;315}316return 0;317}318319static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,320struct drm_file *filp, struct amdgpu_ctx *ctx)321{322struct amdgpu_fpriv *fpriv = filp->driver_priv;323u32 current_stable_pstate;324int r;325326r = amdgpu_ctx_priority_permit(filp, priority);327if (r)328return r;329330memset(ctx, 0, sizeof(*ctx));331332kref_init(&ctx->refcount);333ctx->mgr = mgr;334spin_lock_init(&ctx->ring_lock);335336ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);337ctx->reset_counter_query = ctx->reset_counter;338ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);339ctx->init_priority = priority;340ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;341342r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);343if (r)344return r;345346if (mgr->adev->pm.stable_pstate_ctx)347ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;348else349ctx->stable_pstate = current_stable_pstate;350351ctx->ctx_mgr = &(fpriv->ctx_mgr);352return 0;353}354355static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,356u32 stable_pstate)357{358struct amdgpu_device *adev = ctx->mgr->adev;359enum amd_dpm_forced_level level;360u32 current_stable_pstate;361int r;362363mutex_lock(&adev->pm.stable_pstate_ctx_lock);364if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {365r = -EBUSY;366goto done;367}368369r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);370if (r || (stable_pstate == current_stable_pstate))371goto done;372373switch (stable_pstate) {374case AMDGPU_CTX_STABLE_PSTATE_NONE:375level = AMD_DPM_FORCED_LEVEL_AUTO;376break;377case AMDGPU_CTX_STABLE_PSTATE_STANDARD:378level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;379break;380case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:381level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;382break;383case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:384level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;385break;386case AMDGPU_CTX_STABLE_PSTATE_PEAK:387level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;388break;389default:390r = -EINVAL;391goto done;392}393394r = amdgpu_dpm_force_performance_level(adev, level);395396if (level == AMD_DPM_FORCED_LEVEL_AUTO)397adev->pm.stable_pstate_ctx = NULL;398else399adev->pm.stable_pstate_ctx = ctx;400done:401mutex_unlock(&adev->pm.stable_pstate_ctx_lock);402403return r;404}405406static void amdgpu_ctx_fini(struct kref *ref)407{408struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);409struct amdgpu_ctx_mgr *mgr = ctx->mgr;410struct amdgpu_device *adev = mgr->adev;411unsigned i, j, idx;412413if (!adev)414return;415416for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {417for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {418ktime_t spend;419420spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]);421atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);422}423}424425if (drm_dev_enter(adev_to_drm(adev), &idx)) {426amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);427drm_dev_exit(idx);428}429430kfree(ctx);431}432433int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,434u32 ring, struct drm_sched_entity **entity)435{436int r;437struct drm_sched_entity *ctx_entity;438439if (hw_ip >= AMDGPU_HW_IP_NUM) {440DRM_ERROR("unknown HW IP type: %d\n", hw_ip);441return -EINVAL;442}443444/* Right now all IPs have only one instance - multiple rings. */445if (instance != 0) {446DRM_DEBUG("invalid ip instance: %d\n", instance);447return -EINVAL;448}449450if (ring >= amdgpu_ctx_num_entities[hw_ip]) {451DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);452return -EINVAL;453}454455if (ctx->entities[hw_ip][ring] == NULL) {456r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);457if (r)458return r;459}460461ctx_entity = &ctx->entities[hw_ip][ring]->entity;462r = drm_sched_entity_error(ctx_entity);463if (r) {464DRM_DEBUG("error entity %p\n", ctx_entity);465return r;466}467468*entity = ctx_entity;469return 0;470}471472static int amdgpu_ctx_alloc(struct amdgpu_device *adev,473struct amdgpu_fpriv *fpriv,474struct drm_file *filp,475int32_t priority,476uint32_t *id)477{478struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;479struct amdgpu_ctx *ctx;480int r;481482ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);483if (!ctx)484return -ENOMEM;485486mutex_lock(&mgr->lock);487r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);488if (r < 0) {489mutex_unlock(&mgr->lock);490kfree(ctx);491return r;492}493494*id = (uint32_t)r;495r = amdgpu_ctx_init(mgr, priority, filp, ctx);496if (r) {497idr_remove(&mgr->ctx_handles, *id);498*id = 0;499kfree(ctx);500}501mutex_unlock(&mgr->lock);502return r;503}504505static void amdgpu_ctx_do_release(struct kref *ref)506{507struct amdgpu_ctx *ctx;508u32 i, j;509510ctx = container_of(ref, struct amdgpu_ctx, refcount);511for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {512for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {513if (!ctx->entities[i][j])514continue;515516drm_sched_entity_destroy(&ctx->entities[i][j]->entity);517}518}519520amdgpu_ctx_fini(ref);521}522523static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)524{525struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;526struct amdgpu_ctx *ctx;527528mutex_lock(&mgr->lock);529ctx = idr_remove(&mgr->ctx_handles, id);530if (ctx)531kref_put(&ctx->refcount, amdgpu_ctx_do_release);532mutex_unlock(&mgr->lock);533return ctx ? 0 : -EINVAL;534}535536static int amdgpu_ctx_query(struct amdgpu_device *adev,537struct amdgpu_fpriv *fpriv, uint32_t id,538union drm_amdgpu_ctx_out *out)539{540struct amdgpu_ctx *ctx;541struct amdgpu_ctx_mgr *mgr;542unsigned reset_counter;543544if (!fpriv)545return -EINVAL;546547mgr = &fpriv->ctx_mgr;548mutex_lock(&mgr->lock);549ctx = idr_find(&mgr->ctx_handles, id);550if (!ctx) {551mutex_unlock(&mgr->lock);552return -EINVAL;553}554555/* TODO: these two are always zero */556out->state.flags = 0x0;557out->state.hangs = 0x0;558559/* determine if a GPU reset has occured since the last call */560reset_counter = atomic_read(&adev->gpu_reset_counter);561/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */562if (ctx->reset_counter_query == reset_counter)563out->state.reset_status = AMDGPU_CTX_NO_RESET;564else565out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;566ctx->reset_counter_query = reset_counter;567568mutex_unlock(&mgr->lock);569return 0;570}571572#define AMDGPU_RAS_COUNTE_DELAY_MS 3000573574static int amdgpu_ctx_query2(struct amdgpu_device *adev,575struct amdgpu_fpriv *fpriv, uint32_t id,576union drm_amdgpu_ctx_out *out)577{578struct amdgpu_ras *con = amdgpu_ras_get_context(adev);579struct amdgpu_ctx *ctx;580struct amdgpu_ctx_mgr *mgr;581582if (!fpriv)583return -EINVAL;584585mgr = &fpriv->ctx_mgr;586mutex_lock(&mgr->lock);587ctx = idr_find(&mgr->ctx_handles, id);588if (!ctx) {589mutex_unlock(&mgr->lock);590return -EINVAL;591}592593out->state.flags = 0x0;594out->state.hangs = 0x0;595596if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))597out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;598599if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))600out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;601602if (atomic_read(&ctx->guilty))603out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;604605if (amdgpu_in_reset(adev))606out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS;607608if (adev->ras_enabled && con) {609/* Return the cached values in O(1),610* and schedule delayed work to cache611* new vaues.612*/613int ce_count, ue_count;614615ce_count = atomic_read(&con->ras_ce_count);616ue_count = atomic_read(&con->ras_ue_count);617618if (ce_count != ctx->ras_counter_ce) {619ctx->ras_counter_ce = ce_count;620out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;621}622623if (ue_count != ctx->ras_counter_ue) {624ctx->ras_counter_ue = ue_count;625out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;626}627628schedule_delayed_work(&con->ras_counte_delay_work,629msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));630}631632mutex_unlock(&mgr->lock);633return 0;634}635636static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,637struct amdgpu_fpriv *fpriv, uint32_t id,638bool set, u32 *stable_pstate)639{640struct amdgpu_ctx *ctx;641struct amdgpu_ctx_mgr *mgr;642int r;643644if (!fpriv)645return -EINVAL;646647mgr = &fpriv->ctx_mgr;648mutex_lock(&mgr->lock);649ctx = idr_find(&mgr->ctx_handles, id);650if (!ctx) {651mutex_unlock(&mgr->lock);652return -EINVAL;653}654655if (set)656r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);657else658r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);659660mutex_unlock(&mgr->lock);661return r;662}663664int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,665struct drm_file *filp)666{667int r;668uint32_t id, stable_pstate;669int32_t priority;670671union drm_amdgpu_ctx *args = data;672struct amdgpu_device *adev = drm_to_adev(dev);673struct amdgpu_fpriv *fpriv = filp->driver_priv;674675id = args->in.ctx_id;676priority = args->in.priority;677678/* For backwards compatibility, we need to accept ioctls with garbage679* in the priority field. Garbage values in the priority field, result680* in the priority being set to NORMAL.681*/682if (!amdgpu_ctx_priority_is_valid(priority))683priority = AMDGPU_CTX_PRIORITY_NORMAL;684685switch (args->in.op) {686case AMDGPU_CTX_OP_ALLOC_CTX:687if (args->in.flags)688return -EINVAL;689r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);690args->out.alloc.ctx_id = id;691break;692case AMDGPU_CTX_OP_FREE_CTX:693if (args->in.flags)694return -EINVAL;695r = amdgpu_ctx_free(fpriv, id);696break;697case AMDGPU_CTX_OP_QUERY_STATE:698if (args->in.flags)699return -EINVAL;700r = amdgpu_ctx_query(adev, fpriv, id, &args->out);701break;702case AMDGPU_CTX_OP_QUERY_STATE2:703if (args->in.flags)704return -EINVAL;705r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);706break;707case AMDGPU_CTX_OP_GET_STABLE_PSTATE:708if (args->in.flags)709return -EINVAL;710r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);711if (!r)712args->out.pstate.flags = stable_pstate;713break;714case AMDGPU_CTX_OP_SET_STABLE_PSTATE:715if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)716return -EINVAL;717stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;718if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)719return -EINVAL;720r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);721break;722default:723return -EINVAL;724}725726return r;727}728729struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)730{731struct amdgpu_ctx *ctx;732struct amdgpu_ctx_mgr *mgr;733734if (!fpriv)735return NULL;736737mgr = &fpriv->ctx_mgr;738739mutex_lock(&mgr->lock);740ctx = idr_find(&mgr->ctx_handles, id);741if (ctx)742kref_get(&ctx->refcount);743mutex_unlock(&mgr->lock);744return ctx;745}746747int amdgpu_ctx_put(struct amdgpu_ctx *ctx)748{749if (ctx == NULL)750return -EINVAL;751752kref_put(&ctx->refcount, amdgpu_ctx_do_release);753return 0;754}755756uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,757struct drm_sched_entity *entity,758struct dma_fence *fence)759{760struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);761uint64_t seq = centity->sequence;762struct dma_fence *other = NULL;763unsigned idx = 0;764765idx = seq & (amdgpu_sched_jobs - 1);766other = centity->fences[idx];767WARN_ON(other && !dma_fence_is_signaled(other));768769dma_fence_get(fence);770771spin_lock(&ctx->ring_lock);772centity->fences[idx] = fence;773centity->sequence++;774spin_unlock(&ctx->ring_lock);775776atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),777&ctx->mgr->time_spend[centity->hw_ip]);778779dma_fence_put(other);780return seq;781}782783struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,784struct drm_sched_entity *entity,785uint64_t seq)786{787struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);788struct dma_fence *fence;789790spin_lock(&ctx->ring_lock);791792if (seq == ~0ull)793seq = centity->sequence - 1;794795if (seq >= centity->sequence) {796spin_unlock(&ctx->ring_lock);797return ERR_PTR(-EINVAL);798}799800801if (seq + amdgpu_sched_jobs < centity->sequence) {802spin_unlock(&ctx->ring_lock);803return NULL;804}805806fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);807spin_unlock(&ctx->ring_lock);808809return fence;810}811812static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,813struct amdgpu_ctx_entity *aentity,814int hw_ip,815int32_t priority)816{817struct amdgpu_device *adev = ctx->mgr->adev;818unsigned int hw_prio;819struct drm_gpu_scheduler **scheds = NULL;820unsigned num_scheds;821822/* set sw priority */823drm_sched_entity_set_priority(&aentity->entity,824amdgpu_ctx_to_drm_sched_prio(priority));825826/* set hw priority */827if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {828hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);829hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);830scheds = adev->gpu_sched[hw_ip][hw_prio].sched;831num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;832drm_sched_entity_modify_sched(&aentity->entity, scheds,833num_scheds);834}835}836837void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,838int32_t priority)839{840int32_t ctx_prio;841unsigned i, j;842843ctx->override_priority = priority;844845ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?846ctx->init_priority : ctx->override_priority;847for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {848for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {849if (!ctx->entities[i][j])850continue;851852amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],853i, ctx_prio);854}855}856}857858int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,859struct drm_sched_entity *entity)860{861struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);862struct dma_fence *other;863unsigned idx;864long r;865866spin_lock(&ctx->ring_lock);867idx = centity->sequence & (amdgpu_sched_jobs - 1);868other = dma_fence_get(centity->fences[idx]);869spin_unlock(&ctx->ring_lock);870871if (!other)872return 0;873874r = dma_fence_wait(other, true);875if (r < 0 && r != -ERESTARTSYS)876DRM_ERROR("Error (%ld) waiting for fence!\n", r);877878dma_fence_put(other);879return r;880}881882void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,883struct amdgpu_device *adev)884{885unsigned int i;886887mgr->adev = adev;888mutex_init(&mgr->lock);889idr_init_base(&mgr->ctx_handles, 1);890891for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)892atomic64_set(&mgr->time_spend[i], 0);893}894895long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)896{897struct amdgpu_ctx *ctx;898struct idr *idp;899uint32_t id, i, j;900901idp = &mgr->ctx_handles;902903mutex_lock(&mgr->lock);904idr_for_each_entry(idp, ctx, id) {905for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {906for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {907struct drm_sched_entity *entity;908909if (!ctx->entities[i][j])910continue;911912entity = &ctx->entities[i][j]->entity;913timeout = drm_sched_entity_flush(entity, timeout);914}915}916}917mutex_unlock(&mgr->lock);918return timeout;919}920921static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)922{923struct amdgpu_ctx *ctx;924struct idr *idp;925uint32_t id, i, j;926927idp = &mgr->ctx_handles;928929idr_for_each_entry(idp, ctx, id) {930if (kref_read(&ctx->refcount) != 1) {931DRM_ERROR("ctx %p is still alive\n", ctx);932continue;933}934935for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {936for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {937struct drm_sched_entity *entity;938939if (!ctx->entities[i][j])940continue;941942entity = &ctx->entities[i][j]->entity;943drm_sched_entity_fini(entity);944}945}946kref_put(&ctx->refcount, amdgpu_ctx_fini);947}948}949950void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)951{952amdgpu_ctx_mgr_entity_fini(mgr);953idr_destroy(&mgr->ctx_handles);954mutex_destroy(&mgr->lock);955}956957void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,958ktime_t usage[AMDGPU_HW_IP_NUM])959{960struct amdgpu_ctx *ctx;961unsigned int hw_ip, i;962uint32_t id;963964/*965* This is a little bit racy because it can be that a ctx or a fence are966* destroyed just in the moment we try to account them. But that is ok967* since exactly that case is explicitely allowed by the interface.968*/969mutex_lock(&mgr->lock);970for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {971uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);972973usage[hw_ip] = ns_to_ktime(ns);974}975976idr_for_each_entry(&mgr->ctx_handles, ctx, id) {977for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {978for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {979struct amdgpu_ctx_entity *centity;980ktime_t spend;981982centity = ctx->entities[hw_ip][i];983if (!centity)984continue;985spend = amdgpu_ctx_entity_time(ctx, centity);986usage[hw_ip] = ktime_add(usage[hw_ip], spend);987}988}989}990mutex_unlock(&mgr->lock);991}992993994