Path: blob/master/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
26516 views
/* SPDX-License-Identifier: GPL-2.0 OR MIT */1/*2* Copyright 2014-2022 Advanced Micro Devices, Inc.3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the "Software"),6* to deal in the Software without restriction, including without limitation7* the rights to use, copy, modify, merge, publish, distribute, sublicense,8* and/or sell copies of the Software, and to permit persons to whom the9* Software is furnished to do so, subject to the following conditions:10*11* The above copyright notice and this permission notice shall be included in12* all copies or substantial portions of the Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR18* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,19* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR20* OTHER DEALINGS IN THE SOFTWARE.21*22*/2324#ifndef KFD_MQD_MANAGER_H_25#define KFD_MQD_MANAGER_H_2627#include "kfd_priv.h"2829#define KFD_MAX_NUM_SE 830#define KFD_MAX_NUM_SH_PER_SE 23132/**33* struct mqd_manager34*35* @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it.36*37* @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp38* scheduling mode.39*40* @update_mqd: Handles a update call for the MQD41*42* @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue.43* Used only for no cp scheduling.44*45* @free_mqd: Releases the mqd buffer from local gpu memory.46*47* @is_occupied: Checks if the relevant HQD slot is occupied.48*49* @get_wave_state: Retrieves context save state and optionally copies the50* control stack, if kept in the MQD, to the given userspace address.51*52* @mqd_mutex: Mqd manager mutex.53*54* @dev: The kfd device structure coupled with this module.55*56* MQD stands for Memory Queue Descriptor which represents the current queue57* state in the memory and initiate the HQD (Hardware Queue Descriptor) state.58* This structure is actually a base class for the different types of MQDs59* structures for the variant ASICs that should be supported in the future.60* This base class is also contains all the MQD specific operations.61* Another important thing to mention is that each queue has a MQD that keeps62* his state (or context) after each preemption or reassignment.63* Basically there are a instances of the mqd manager class per MQD type per64* ASIC. Currently the kfd driver supports only Kaveri so there are instances65* per KFD_MQD_TYPE for each device.66*67*/68extern int pipe_priority_map[];69struct mqd_manager {70struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd,71struct queue_properties *q);7273void (*init_mqd)(struct mqd_manager *mm, void **mqd,74struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,75struct queue_properties *q);7677int (*load_mqd)(struct mqd_manager *mm, void *mqd,78uint32_t pipe_id, uint32_t queue_id,79struct queue_properties *p,80struct mm_struct *mms);8182void (*update_mqd)(struct mqd_manager *mm, void *mqd,83struct queue_properties *q,84struct mqd_update_info *minfo);8586int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,87enum kfd_preempt_type type,88unsigned int timeout, uint32_t pipe_id,89uint32_t queue_id);9091void (*free_mqd)(struct mqd_manager *mm, void *mqd,92struct kfd_mem_obj *mqd_mem_obj);9394bool (*is_occupied)(struct mqd_manager *mm, void *mqd,95uint64_t queue_address, uint32_t pipe_id,96uint32_t queue_id);9798int (*get_wave_state)(struct mqd_manager *mm, void *mqd,99struct queue_properties *q,100void __user *ctl_stack,101u32 *ctl_stack_used_size,102u32 *save_area_used_size);103104void (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, uint32_t *ctl_stack_size);105106void (*checkpoint_mqd)(struct mqd_manager *mm,107void *mqd,108void *mqd_dst,109void *ctl_stack_dst);110111void (*restore_mqd)(struct mqd_manager *mm, void **mqd,112struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,113struct queue_properties *p,114const void *mqd_src,115const void *ctl_stack_src,116const u32 ctl_stack_size);117118#if defined(CONFIG_DEBUG_FS)119int (*debugfs_show_mqd)(struct seq_file *m, void *data);120#endif121bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd);122uint64_t (*mqd_stride)(struct mqd_manager *mm,123struct queue_properties *p);124125struct mutex mqd_mutex;126struct kfd_node *dev;127uint32_t mqd_size;128};129130struct mqd_user_context_save_area_header {131/* Byte offset from start of user context132* save area to the last saved top (lowest133* address) of control stack data. Must be134* 4 byte aligned.135*/136uint32_t control_stack_offset;137138/* Byte size of the last saved control stack139* data. Must be 4 byte aligned.140*/141uint32_t control_stack_size;142143/* Byte offset from start of user context save144* area to the last saved base (lowest address)145* of wave state data. Must be 4 byte aligned.146*/147uint32_t wave_state_offset;148149/* Byte size of the last saved wave state data.150* Must be 4 byte aligned.151*/152uint32_t wave_state_size;153};154155struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev,156struct queue_properties *q);157158struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,159struct queue_properties *q);160void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,161struct kfd_mem_obj *mqd_mem_obj);162163void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,164const uint32_t *cu_mask, uint32_t cu_mask_count,165uint32_t *se_mask, uint32_t inst);166167int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,168uint32_t pipe_id, uint32_t queue_id,169struct queue_properties *p, struct mm_struct *mms);170171int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,172enum kfd_preempt_type type, unsigned int timeout,173uint32_t pipe_id, uint32_t queue_id);174175void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,176struct kfd_mem_obj *mqd_mem_obj);177178bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,179uint64_t queue_address, uint32_t pipe_id,180uint32_t queue_id);181182int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,183uint32_t pipe_id, uint32_t queue_id,184struct queue_properties *p, struct mm_struct *mms);185186int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,187enum kfd_preempt_type type, unsigned int timeout,188uint32_t pipe_id, uint32_t queue_id);189190bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,191uint64_t queue_address, uint32_t pipe_id,192uint32_t queue_id);193194void kfd_get_hiq_xcc_mqd(struct kfd_node *dev,195struct kfd_mem_obj *mqd_mem_obj, uint32_t virtual_xcc_id);196197uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev);198uint64_t kfd_mqd_stride(struct mqd_manager *mm,199struct queue_properties *q);200bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,201uint32_t inst);202#endif /* KFD_MQD_MANAGER_H_ */203204205