Path: blob/master/drivers/accel/habanalabs/common/device.c
26450 views
// SPDX-License-Identifier: GPL-2.012/*3* Copyright 2016-2022 HabanaLabs, Ltd.4* All Rights Reserved.5*/67#define pr_fmt(fmt) "habanalabs: " fmt89#include <uapi/drm/habanalabs_accel.h>10#include "habanalabs.h"1112#include <linux/pci.h>13#include <linux/hwmon.h>14#include <linux/vmalloc.h>1516#include <drm/drm_accel.h>17#include <drm/drm_drv.h>1819#include <trace/events/habanalabs.h>2021#define HL_RESET_DELAY_USEC 10000 /* 10ms */2223#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 302425enum dma_alloc_type {26DMA_ALLOC_COHERENT,27DMA_ALLOC_POOL,28};2930#define MEM_SCRUB_DEFAULT_VAL 0x11223344556677883132static void hl_device_heartbeat(struct work_struct *work);3334/*35* hl_set_dram_bar- sets the bar to allow later access to address36*37* @hdev: pointer to habanalabs device structure.38* @addr: the address the caller wants to access.39* @region: the PCI region.40* @new_bar_region_base: the new BAR region base address.41*42* @return: the old BAR base address on success, U64_MAX for failure.43* The caller should set it back to the old address after use.44*45* In case the bar space does not cover the whole address space,46* the bar base address should be set to allow access to a given address.47* This function can be called also if the bar doesn't need to be set,48* in that case it just won't change the base.49*/50static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,51u64 *new_bar_region_base)52{53struct asic_fixed_properties *prop = &hdev->asic_prop;54u64 bar_base_addr, old_base;5556if (is_power_of_2(prop->dram_pci_bar_size))57bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);58else59bar_base_addr = region->region_base +60div64_u64((addr - region->region_base), prop->dram_pci_bar_size) *61prop->dram_pci_bar_size;6263old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);6465/* in case of success we need to update the new BAR base */66if ((old_base != U64_MAX) && new_bar_region_base)67*new_bar_region_base = bar_base_addr;6869return old_base;70}7172int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,73enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)74{75struct pci_mem_region *region = &hdev->pci_mem_region[region_type];76u64 old_base = 0, rc, bar_region_base = region->region_base;77void __iomem *acc_addr;7879if (set_dram_bar) {80old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);81if (old_base == U64_MAX)82return -EIO;83}8485acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +86(addr - bar_region_base);8788switch (acc_type) {89case DEBUGFS_READ8:90*val = readb(acc_addr);91break;92case DEBUGFS_WRITE8:93writeb(*val, acc_addr);94break;95case DEBUGFS_READ32:96*val = readl(acc_addr);97break;98case DEBUGFS_WRITE32:99writel(*val, acc_addr);100break;101case DEBUGFS_READ64:102*val = readq(acc_addr);103break;104case DEBUGFS_WRITE64:105writeq(*val, acc_addr);106break;107}108109if (set_dram_bar) {110rc = hl_set_dram_bar(hdev, old_base, region, NULL);111if (rc == U64_MAX)112return -EIO;113}114115return 0;116}117118static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,119gfp_t flag, enum dma_alloc_type alloc_type,120const char *caller)121{122void *ptr = NULL;123124switch (alloc_type) {125case DMA_ALLOC_COHERENT:126ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);127break;128case DMA_ALLOC_POOL:129ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);130break;131}132133if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))134trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle,135size, caller);136137return ptr;138}139140static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,141dma_addr_t dma_handle, enum dma_alloc_type alloc_type,142const char *caller)143{144/* this is needed to avoid warning on using freed pointer */145u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;146147switch (alloc_type) {148case DMA_ALLOC_COHERENT:149hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);150break;151case DMA_ALLOC_POOL:152hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);153break;154}155156trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller);157}158159void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,160gfp_t flag, const char *caller)161{162return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);163}164165void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,166dma_addr_t dma_handle, const char *caller)167{168hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);169}170171void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,172dma_addr_t *dma_handle, const char *caller)173{174return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);175}176177void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,178const char *caller)179{180hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);181}182183void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)184{185return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);186}187188void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)189{190hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);191}192193int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,194enum dma_data_direction dir, const char *caller)195{196struct asic_fixed_properties *prop = &hdev->asic_prop;197struct scatterlist *sg;198int rc, i;199200rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);201if (rc)202return rc;203204if (!trace_habanalabs_dma_map_page_enabled())205return 0;206207for_each_sgtable_dma_sg(sgt, sg, i)208trace_habanalabs_dma_map_page(&(hdev)->pdev->dev,209page_to_phys(sg_page(sg)),210sg->dma_address - prop->device_dma_offset_for_host_access,211#ifdef CONFIG_NEED_SG_DMA_LENGTH212sg->dma_length,213#else214sg->length,215#endif216dir, caller);217218return 0;219}220221int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,222enum dma_data_direction dir)223{224struct asic_fixed_properties *prop = &hdev->asic_prop;225struct scatterlist *sg;226int rc, i;227228rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);229if (rc)230return rc;231232/* Shift to the device's base physical address of host memory if necessary */233if (prop->device_dma_offset_for_host_access)234for_each_sgtable_dma_sg(sgt, sg, i)235sg->dma_address += prop->device_dma_offset_for_host_access;236237return 0;238}239240void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,241enum dma_data_direction dir, const char *caller)242{243struct asic_fixed_properties *prop = &hdev->asic_prop;244struct scatterlist *sg;245int i;246247hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);248249if (trace_habanalabs_dma_unmap_page_enabled()) {250for_each_sgtable_dma_sg(sgt, sg, i)251trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev,252page_to_phys(sg_page(sg)),253sg->dma_address - prop->device_dma_offset_for_host_access,254#ifdef CONFIG_NEED_SG_DMA_LENGTH255sg->dma_length,256#else257sg->length,258#endif259dir, caller);260}261}262263void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,264enum dma_data_direction dir)265{266struct asic_fixed_properties *prop = &hdev->asic_prop;267struct scatterlist *sg;268int i;269270/* Cancel the device's base physical address of host memory if necessary */271if (prop->device_dma_offset_for_host_access)272for_each_sgtable_dma_sg(sgt, sg, i)273sg->dma_address -= prop->device_dma_offset_for_host_access;274275dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);276}277278/*279* hl_access_cfg_region - access the config region280*281* @hdev: pointer to habanalabs device structure282* @addr: the address to access283* @val: the value to write from or read to284* @acc_type: the type of access (read/write 64/32)285*/286int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,287enum debugfs_access_type acc_type)288{289struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];290u32 val_h, val_l;291292if (!IS_ALIGNED(addr, sizeof(u32))) {293dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));294return -EINVAL;295}296297switch (acc_type) {298case DEBUGFS_READ32:299*val = RREG32(addr - cfg_region->region_base);300break;301case DEBUGFS_WRITE32:302WREG32(addr - cfg_region->region_base, *val);303break;304case DEBUGFS_READ64:305val_l = RREG32(addr - cfg_region->region_base);306val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);307308*val = (((u64) val_h) << 32) | val_l;309break;310case DEBUGFS_WRITE64:311WREG32(addr - cfg_region->region_base, lower_32_bits(*val));312WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));313break;314default:315dev_err(hdev->dev, "access type %d is not supported\n", acc_type);316return -EOPNOTSUPP;317}318319return 0;320}321322/*323* hl_access_dev_mem - access device memory324*325* @hdev: pointer to habanalabs device structure326* @region_type: the type of the region the address belongs to327* @addr: the address to access328* @val: the value to write from or read to329* @acc_type: the type of access (r/w, 32/64)330*/331int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,332u64 addr, u64 *val, enum debugfs_access_type acc_type)333{334switch (region_type) {335case PCI_REGION_CFG:336return hl_access_cfg_region(hdev, addr, val, acc_type);337case PCI_REGION_SRAM:338case PCI_REGION_DRAM:339return hl_access_sram_dram_region(hdev, addr, val, acc_type,340region_type, (region_type == PCI_REGION_DRAM));341default:342return -EFAULT;343}344345return 0;346}347348void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)349{350va_list args;351int str_size;352353va_start(args, fmt);354/* Calculate formatted string length. Assuming each string is null terminated, hence355* increment result by 1356*/357str_size = vsnprintf(NULL, 0, fmt, args) + 1;358va_end(args);359360if ((e->actual_size + str_size) < e->allocated_buf_size) {361va_start(args, fmt);362vsnprintf(e->buf + e->actual_size, str_size, fmt, args);363va_end(args);364}365366/* Need to update the size even when not updating destination buffer to get the exact size367* of all input strings368*/369e->actual_size += str_size;370}371372enum hl_device_status hl_device_status(struct hl_device *hdev)373{374enum hl_device_status status;375376if (hdev->device_fini_pending) {377status = HL_DEVICE_STATUS_MALFUNCTION;378} else if (hdev->reset_info.in_reset) {379if (hdev->reset_info.in_compute_reset)380status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;381else382status = HL_DEVICE_STATUS_IN_RESET;383} else if (hdev->reset_info.needs_reset) {384status = HL_DEVICE_STATUS_NEEDS_RESET;385} else if (hdev->disabled) {386status = HL_DEVICE_STATUS_MALFUNCTION;387} else if (!hdev->init_done) {388status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;389} else {390status = HL_DEVICE_STATUS_OPERATIONAL;391}392393return status;394}395396bool hl_device_operational(struct hl_device *hdev,397enum hl_device_status *status)398{399enum hl_device_status current_status;400401current_status = hl_device_status(hdev);402if (status)403*status = current_status;404405switch (current_status) {406case HL_DEVICE_STATUS_MALFUNCTION:407case HL_DEVICE_STATUS_IN_RESET:408case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:409case HL_DEVICE_STATUS_NEEDS_RESET:410return false;411case HL_DEVICE_STATUS_OPERATIONAL:412case HL_DEVICE_STATUS_IN_DEVICE_CREATION:413default:414return true;415}416}417418bool hl_ctrl_device_operational(struct hl_device *hdev,419enum hl_device_status *status)420{421enum hl_device_status current_status;422423current_status = hl_device_status(hdev);424if (status)425*status = current_status;426427switch (current_status) {428case HL_DEVICE_STATUS_MALFUNCTION:429return false;430case HL_DEVICE_STATUS_IN_RESET:431case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:432case HL_DEVICE_STATUS_NEEDS_RESET:433case HL_DEVICE_STATUS_OPERATIONAL:434case HL_DEVICE_STATUS_IN_DEVICE_CREATION:435default:436return true;437}438}439440static void print_idle_status_mask(struct hl_device *hdev, const char *message,441u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])442{443if (idle_mask[3])444dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n",445dev_name(&hdev->pdev->dev), message,446idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);447else if (idle_mask[2])448dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n",449dev_name(&hdev->pdev->dev), message,450idle_mask[2], idle_mask[1], idle_mask[0]);451else if (idle_mask[1])452dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n",453dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]);454else455dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message,456idle_mask[0]);457}458459static void hpriv_release(struct kref *ref)460{461u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};462bool reset_device, device_is_idle = true;463struct hl_fpriv *hpriv;464struct hl_device *hdev;465466hpriv = container_of(ref, struct hl_fpriv, refcount);467468hdev = hpriv->hdev;469470hdev->asic_funcs->send_device_activity(hdev, false);471472hl_debugfs_remove_file(hpriv);473474mutex_destroy(&hpriv->ctx_lock);475mutex_destroy(&hpriv->restore_phase_mutex);476477/* There should be no memory buffers at this point and handles IDR can be destroyed */478hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);479480/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending481* reset that waits for device release.482*/483reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;484485/* Check the device idle status and reset if not idle.486* Skip it if already in reset, or if device is going to be reset in any case.487*/488if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)489device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,490HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);491if (!device_is_idle) {492print_idle_status_mask(hdev, "device is not idle after user context is closed",493idle_mask);494reset_device = true;495}496497/* We need to remove the user from the list to make sure the reset process won't498* try to kill the user process. Because, if we got here, it means there are no499* more driver/device resources that the user process is occupying so there is500* no need to kill it501*502* However, we can't set the compute_ctx to NULL at this stage. This is to prevent503* a race between the release and opening the device again. We don't want to let504* a user open the device while there a reset is about to happen.505*/506mutex_lock(&hdev->fpriv_list_lock);507list_del(&hpriv->dev_node);508mutex_unlock(&hdev->fpriv_list_lock);509510put_pid(hpriv->taskpid);511512if (reset_device) {513hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);514} else {515/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */516int rc = hdev->asic_funcs->scrub_device_mem(hdev);517518if (rc) {519dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);520hl_device_reset(hdev, HL_DRV_RESET_HARD);521}522}523524/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different525* thread, we don't care because the in_reset is marked so if a user will try to open526* the device it will fail on that, even if compute_ctx is false.527*/528mutex_lock(&hdev->fpriv_list_lock);529hdev->is_compute_ctx_active = false;530mutex_unlock(&hdev->fpriv_list_lock);531532hdev->compute_ctx_in_release = 0;533534/* release the eventfd */535if (hpriv->notifier_event.eventfd)536eventfd_ctx_put(hpriv->notifier_event.eventfd);537538mutex_destroy(&hpriv->notifier_event.lock);539540kfree(hpriv);541}542543void hl_hpriv_get(struct hl_fpriv *hpriv)544{545kref_get(&hpriv->refcount);546}547548int hl_hpriv_put(struct hl_fpriv *hpriv)549{550return kref_put(&hpriv->refcount, hpriv_release);551}552553static void print_device_in_use_info(struct hl_device *hdev,554struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message)555{556u32 active_cs_num, dmabuf_export_cnt;557bool unknown_reason = true;558char buf[128];559size_t size;560int offset;561562size = sizeof(buf);563offset = 0;564565active_cs_num = hl_get_active_cs_num(hdev);566if (active_cs_num) {567unknown_reason = false;568offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);569}570571dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);572if (dmabuf_export_cnt) {573unknown_reason = false;574offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",575dmabuf_export_cnt);576}577578if (mm_fini_stats->n_busy_cb) {579unknown_reason = false;580offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]",581mm_fini_stats->n_busy_cb);582}583584if (unknown_reason)585scnprintf(buf + offset, size - offset, " [unknown reason]");586587dev_notice(hdev->dev, "%s%s\n", message, buf);588}589590/*591* hl_device_release() - release function for habanalabs device.592* @ddev: pointer to DRM device structure.593* @file: pointer to DRM file private data structure.594*595* Called when process closes an habanalabs device596*/597void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)598{599struct hl_fpriv *hpriv = file_priv->driver_priv;600struct hl_device *hdev = to_hl_device(ddev);601struct hl_mem_mgr_fini_stats mm_fini_stats;602603if (!hdev) {604pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");605put_pid(hpriv->taskpid);606}607608hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);609610/* Memory buffers might be still in use at this point and thus the handles IDR destruction611* is postponed to hpriv_release().612*/613hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats);614615hdev->compute_ctx_in_release = 1;616617if (!hl_hpriv_put(hpriv)) {618print_device_in_use_info(hdev, &mm_fini_stats,619"User process closed FD but device still in use");620hl_device_reset(hdev, HL_DRV_RESET_HARD);621}622623hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;624}625626static int hl_device_release_ctrl(struct inode *inode, struct file *filp)627{628struct hl_fpriv *hpriv = filp->private_data;629struct hl_device *hdev = hpriv->hdev;630631filp->private_data = NULL;632633if (!hdev) {634pr_err("Closing FD after device was removed\n");635goto out;636}637638mutex_lock(&hdev->fpriv_ctrl_list_lock);639list_del(&hpriv->dev_node);640mutex_unlock(&hdev->fpriv_ctrl_list_lock);641out:642put_pid(hpriv->taskpid);643644kfree(hpriv);645646return 0;647}648649static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)650{651struct hl_device *hdev = hpriv->hdev;652unsigned long vm_pgoff;653654if (!hdev) {655pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");656return -ENODEV;657}658659vm_pgoff = vma->vm_pgoff;660661switch (vm_pgoff & HL_MMAP_TYPE_MASK) {662case HL_MMAP_TYPE_BLOCK:663vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);664return hl_hw_block_mmap(hpriv, vma);665666case HL_MMAP_TYPE_CB:667case HL_MMAP_TYPE_TS_BUFF:668return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);669}670return -EINVAL;671}672673/*674* hl_mmap - mmap function for habanalabs device675*676* @*filp: pointer to file structure677* @*vma: pointer to vm_area_struct of the process678*679* Called when process does an mmap on habanalabs device. Call the relevant mmap680* function at the end of the common code.681*/682int hl_mmap(struct file *filp, struct vm_area_struct *vma)683{684struct drm_file *file_priv = filp->private_data;685struct hl_fpriv *hpriv = file_priv->driver_priv;686687return __hl_mmap(hpriv, vma);688}689690static const struct file_operations hl_ctrl_ops = {691.owner = THIS_MODULE,692.open = hl_device_open_ctrl,693.release = hl_device_release_ctrl,694.unlocked_ioctl = hl_ioctl_control,695.compat_ioctl = hl_ioctl_control696};697698static void device_release_func(struct device *dev)699{700kfree(dev);701}702703/*704* device_init_cdev - Initialize cdev and device for habanalabs device705*706* @hdev: pointer to habanalabs device structure707* @class: pointer to the class object of the device708* @minor: minor number of the specific device709* @fops: file operations to install for this device710* @name: name of the device as it will appear in the filesystem711* @cdev: pointer to the char device object that will be initialized712* @dev: pointer to the device object that will be initialized713*714* Initialize a cdev and a Linux device for habanalabs's device.715*/716static int device_init_cdev(struct hl_device *hdev, const struct class *class,717int minor, const struct file_operations *fops,718char *name, struct cdev *cdev,719struct device **dev)720{721cdev_init(cdev, fops);722cdev->owner = THIS_MODULE;723724*dev = kzalloc(sizeof(**dev), GFP_KERNEL);725if (!*dev)726return -ENOMEM;727728device_initialize(*dev);729(*dev)->devt = MKDEV(hdev->major, minor);730(*dev)->class = class;731(*dev)->release = device_release_func;732dev_set_drvdata(*dev, hdev);733dev_set_name(*dev, "%s", name);734735return 0;736}737738static int cdev_sysfs_debugfs_add(struct hl_device *hdev)739{740const struct class *accel_class = hdev->drm.accel->kdev->class;741char name[32];742int rc;743744hdev->cdev_idx = hdev->drm.accel->index;745746/* Initialize cdev and device structures for the control device */747snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);748rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,749&hdev->cdev_ctrl, &hdev->dev_ctrl);750if (rc)751return rc;752753rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);754if (rc) {755dev_err(hdev->dev_ctrl,756"failed to add an accel control char device to the system\n");757goto free_ctrl_device;758}759760rc = hl_sysfs_init(hdev);761if (rc) {762dev_err(hdev->dev, "failed to initialize sysfs\n");763goto delete_ctrl_cdev_device;764}765766hl_debugfs_add_device(hdev);767768hdev->cdev_sysfs_debugfs_created = true;769770return 0;771772delete_ctrl_cdev_device:773cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);774free_ctrl_device:775put_device(hdev->dev_ctrl);776return rc;777}778779static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)780{781if (!hdev->cdev_sysfs_debugfs_created)782return;783784hl_sysfs_fini(hdev);785786cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);787put_device(hdev->dev_ctrl);788}789790static void device_hard_reset_pending(struct work_struct *work)791{792struct hl_device_reset_work *device_reset_work =793container_of(work, struct hl_device_reset_work, reset_work.work);794struct hl_device *hdev = device_reset_work->hdev;795u32 flags;796int rc;797798flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;799800rc = hl_device_reset(hdev, flags);801802if ((rc == -EBUSY) && !hdev->device_fini_pending) {803struct hl_ctx *ctx = hl_get_compute_ctx(hdev);804805if (ctx) {806/* The read refcount value should subtracted by one, because the read is807* protected with hl_get_compute_ctx().808*/809dev_info(hdev->dev,810"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",811kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);812hl_ctx_put(ctx);813} else {814dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",815HL_PENDING_RESET_PER_SEC);816}817818queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,819secs_to_jiffies(HL_PENDING_RESET_PER_SEC));820}821}822823static void device_release_watchdog_func(struct work_struct *work)824{825struct hl_device_reset_work *watchdog_work =826container_of(work, struct hl_device_reset_work, reset_work.work);827struct hl_device *hdev = watchdog_work->hdev;828u32 flags;829830dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");831832flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;833834hl_device_reset(hdev, flags);835}836837/*838* device_early_init - do some early initialization for the habanalabs device839*840* @hdev: pointer to habanalabs device structure841*842* Install the relevant function pointers and call the early_init function,843* if such a function exists844*/845static int device_early_init(struct hl_device *hdev)846{847int i, rc;848char workq_name[32];849850switch (hdev->asic_type) {851case ASIC_GOYA:852goya_set_asic_funcs(hdev);853strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));854break;855case ASIC_GAUDI:856gaudi_set_asic_funcs(hdev);857strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));858break;859case ASIC_GAUDI_SEC:860gaudi_set_asic_funcs(hdev);861strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));862break;863case ASIC_GAUDI2:864gaudi2_set_asic_funcs(hdev);865strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));866break;867case ASIC_GAUDI2B:868gaudi2_set_asic_funcs(hdev);869strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));870break;871case ASIC_GAUDI2C:872gaudi2_set_asic_funcs(hdev);873strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));874break;875case ASIC_GAUDI2D:876gaudi2_set_asic_funcs(hdev);877strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name));878break;879default:880dev_err(hdev->dev, "Unrecognized ASIC type %d\n",881hdev->asic_type);882return -EINVAL;883}884885rc = hdev->asic_funcs->early_init(hdev);886if (rc)887return rc;888889rc = hl_asid_init(hdev);890if (rc)891goto early_fini;892893if (hdev->asic_prop.completion_queues_count) {894hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,895sizeof(struct workqueue_struct *),896GFP_KERNEL);897if (!hdev->cq_wq) {898rc = -ENOMEM;899goto asid_fini;900}901}902903for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {904snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);905hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);906if (hdev->cq_wq[i] == NULL) {907dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");908rc = -ENOMEM;909goto free_cq_wq;910}911}912913snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);914hdev->eq_wq = create_singlethread_workqueue(workq_name);915if (hdev->eq_wq == NULL) {916dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");917rc = -ENOMEM;918goto free_cq_wq;919}920921snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);922hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);923if (!hdev->cs_cmplt_wq) {924dev_err(hdev->dev,925"Failed to allocate CS completions workqueue\n");926rc = -ENOMEM;927goto free_eq_wq;928}929930snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);931hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);932if (!hdev->ts_free_obj_wq) {933dev_err(hdev->dev,934"Failed to allocate Timestamp registration free workqueue\n");935rc = -ENOMEM;936goto free_cs_cmplt_wq;937}938939snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);940hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);941if (!hdev->prefetch_wq) {942dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");943rc = -ENOMEM;944goto free_ts_free_wq;945}946947hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);948if (!hdev->hl_chip_info) {949rc = -ENOMEM;950goto free_prefetch_wq;951}952953rc = hl_mmu_if_set_funcs(hdev);954if (rc)955goto free_chip_info;956957hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);958959snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);960hdev->reset_wq = create_singlethread_workqueue(workq_name);961if (!hdev->reset_wq) {962rc = -ENOMEM;963dev_err(hdev->dev, "Failed to create device reset WQ\n");964goto free_cb_mgr;965}966967INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);968969INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);970hdev->device_reset_work.hdev = hdev;971hdev->device_fini_pending = 0;972973INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,974device_release_watchdog_func);975hdev->device_release_watchdog_work.hdev = hdev;976977mutex_init(&hdev->send_cpu_message_lock);978mutex_init(&hdev->debug_lock);979INIT_LIST_HEAD(&hdev->cs_mirror_list);980spin_lock_init(&hdev->cs_mirror_lock);981spin_lock_init(&hdev->reset_info.lock);982INIT_LIST_HEAD(&hdev->fpriv_list);983INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);984mutex_init(&hdev->fpriv_list_lock);985mutex_init(&hdev->fpriv_ctrl_list_lock);986mutex_init(&hdev->clk_throttling.lock);987988return 0;989990free_cb_mgr:991hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);992hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);993free_chip_info:994kfree(hdev->hl_chip_info);995free_prefetch_wq:996destroy_workqueue(hdev->prefetch_wq);997free_ts_free_wq:998destroy_workqueue(hdev->ts_free_obj_wq);999free_cs_cmplt_wq:1000destroy_workqueue(hdev->cs_cmplt_wq);1001free_eq_wq:1002destroy_workqueue(hdev->eq_wq);1003free_cq_wq:1004for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)1005if (hdev->cq_wq[i])1006destroy_workqueue(hdev->cq_wq[i]);1007kfree(hdev->cq_wq);1008asid_fini:1009hl_asid_fini(hdev);1010early_fini:1011if (hdev->asic_funcs->early_fini)1012hdev->asic_funcs->early_fini(hdev);10131014return rc;1015}10161017/*1018* device_early_fini - finalize all that was done in device_early_init1019*1020* @hdev: pointer to habanalabs device structure1021*1022*/1023static void device_early_fini(struct hl_device *hdev)1024{1025int i;10261027mutex_destroy(&hdev->debug_lock);1028mutex_destroy(&hdev->send_cpu_message_lock);10291030mutex_destroy(&hdev->fpriv_list_lock);1031mutex_destroy(&hdev->fpriv_ctrl_list_lock);10321033mutex_destroy(&hdev->clk_throttling.lock);10341035hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);1036hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);10371038kfree(hdev->hl_chip_info);10391040destroy_workqueue(hdev->prefetch_wq);1041destroy_workqueue(hdev->ts_free_obj_wq);1042destroy_workqueue(hdev->cs_cmplt_wq);1043destroy_workqueue(hdev->eq_wq);1044destroy_workqueue(hdev->reset_wq);10451046for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)1047destroy_workqueue(hdev->cq_wq[i]);1048kfree(hdev->cq_wq);10491050hl_asid_fini(hdev);10511052if (hdev->asic_funcs->early_fini)1053hdev->asic_funcs->early_fini(hdev);1054}10551056static bool is_pci_link_healthy(struct hl_device *hdev)1057{1058u16 device_id;10591060if (!hdev->pdev)1061return false;10621063pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id);10641065return (device_id == hdev->pdev->device);1066}10671068static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)1069{1070struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;1071u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;1072struct asic_fixed_properties *prop = &hdev->asic_prop;10731074if (!prop->cpucp_info.eq_health_check_supported)1075return true;10761077if (!hdev->eq_heartbeat_received) {1078dev_err(hdev->dev, "EQ heartbeat event was not received!\n");10791080dev_err(hdev->dev,1081"EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",1082hdev->event_queue.ci,1083heartbeat_debug_info->heartbeat_event_counter,1084&hdev->heartbeat_debug_info.last_eq_heartbeat_ts,1085hdev->kernel_queues[cpu_q_id].pi,1086atomic_read(&hdev->kernel_queues[cpu_q_id].ci),1087atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,1088&hdev->heartbeat_debug_info.last_pq_heartbeat_ts);10891090hl_eq_dump(hdev, &hdev->event_queue);10911092return false;1093}10941095hdev->eq_heartbeat_received = false;10961097return true;1098}10991100static void hl_device_heartbeat(struct work_struct *work)1101{1102struct hl_device *hdev = container_of(work, struct hl_device,1103work_heartbeat.work);1104struct hl_info_fw_err_info info = {0};1105u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;11061107/* Start heartbeat checks only after driver has enabled events from FW */1108if (!hl_device_operational(hdev, NULL) || !hdev->init_done)1109goto reschedule;11101111/*1112* For EQ health check need to check if driver received the heartbeat eq event1113* in order to validate the eq is working.1114* Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.1115*/1116if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev)))1117goto reschedule;11181119if (hl_device_operational(hdev, NULL))1120dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",1121is_pci_link_healthy(hdev) ? "healthy" : "broken");11221123info.err_type = HL_INFO_FW_HEARTBEAT_ERR;1124info.event_mask = &event_mask;1125hl_handle_fw_err(hdev, &info);1126hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);11271128return;11291130reschedule:1131/*1132* prev_reset_trigger tracks consecutive fatal h/w errors until first1133* heartbeat immediately post reset.1134* If control reached here, then at least one heartbeat work has been1135* scheduled since last reset/init cycle.1136* So if the device is not already in reset cycle, reset the flag1137* prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR1138* status for at least one heartbeat. From this point driver restarts1139* tracking future consecutive fatal errors.1140*/1141if (!hdev->reset_info.in_reset)1142hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;11431144schedule_delayed_work(&hdev->work_heartbeat,1145usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));1146}11471148/*1149* device_late_init - do late stuff initialization for the habanalabs device1150*1151* @hdev: pointer to habanalabs device structure1152*1153* Do stuff that either needs the device H/W queues to be active or needs1154* to happen after all the rest of the initialization is finished1155*/1156static int device_late_init(struct hl_device *hdev)1157{1158int rc;11591160if (hdev->asic_funcs->late_init) {1161rc = hdev->asic_funcs->late_init(hdev);1162if (rc) {1163dev_err(hdev->dev,1164"failed late initialization for the H/W\n");1165return rc;1166}1167}11681169hdev->high_pll = hdev->asic_prop.high_pll;1170hdev->late_init_done = true;11711172return 0;1173}11741175/*1176* device_late_fini - finalize all that was done in device_late_init1177*1178* @hdev: pointer to habanalabs device structure1179*1180*/1181static void device_late_fini(struct hl_device *hdev)1182{1183if (!hdev->late_init_done)1184return;11851186if (hdev->asic_funcs->late_fini)1187hdev->asic_funcs->late_fini(hdev);11881189hdev->late_init_done = false;1190}11911192int hl_device_utilization(struct hl_device *hdev, u32 *utilization)1193{1194u64 max_power, curr_power, dc_power, dividend, divisor;1195int rc;11961197max_power = hdev->max_power;1198dc_power = hdev->asic_prop.dc_power_default;1199divisor = max_power - dc_power;1200if (!divisor) {1201dev_warn(hdev->dev, "device utilization is not supported\n");1202return -EOPNOTSUPP;1203}1204rc = hl_fw_cpucp_power_get(hdev, &curr_power);12051206if (rc)1207return rc;12081209curr_power = clamp(curr_power, dc_power, max_power);12101211dividend = (curr_power - dc_power) * 100;1212*utilization = (u32) div_u64(dividend, divisor);12131214return 0;1215}12161217int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)1218{1219int rc = 0;12201221mutex_lock(&hdev->debug_lock);12221223if (!enable) {1224if (!hdev->in_debug) {1225dev_err(hdev->dev,1226"Failed to disable debug mode because device was not in debug mode\n");1227rc = -EFAULT;1228goto out;1229}12301231if (!hdev->reset_info.hard_reset_pending)1232hdev->asic_funcs->halt_coresight(hdev, ctx);12331234hdev->in_debug = 0;12351236goto out;1237}12381239if (hdev->in_debug) {1240dev_err(hdev->dev,1241"Failed to enable debug mode because device is already in debug mode\n");1242rc = -EFAULT;1243goto out;1244}12451246hdev->in_debug = 1;12471248out:1249mutex_unlock(&hdev->debug_lock);12501251return rc;1252}12531254static void take_release_locks(struct hl_device *hdev)1255{1256/* Flush anyone that is inside the critical section of enqueue1257* jobs to the H/W1258*/1259hdev->asic_funcs->hw_queues_lock(hdev);1260hdev->asic_funcs->hw_queues_unlock(hdev);12611262/* Flush processes that are sending message to CPU */1263mutex_lock(&hdev->send_cpu_message_lock);1264mutex_unlock(&hdev->send_cpu_message_lock);12651266/* Flush anyone that is inside device open */1267mutex_lock(&hdev->fpriv_list_lock);1268mutex_unlock(&hdev->fpriv_list_lock);1269mutex_lock(&hdev->fpriv_ctrl_list_lock);1270mutex_unlock(&hdev->fpriv_ctrl_list_lock);1271}12721273static void hl_abort_waiting_for_completions(struct hl_device *hdev)1274{1275hl_abort_waiting_for_cs_completions(hdev);12761277/* Release all pending user interrupts, each pending user interrupt1278* holds a reference to a user context.1279*/1280hl_release_pending_user_interrupts(hdev);1281}12821283static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,1284bool skip_wq_flush)1285{1286if (hard_reset) {1287if (hdev->heartbeat)1288cancel_delayed_work_sync(&hdev->work_heartbeat);12891290device_late_fini(hdev);1291}12921293/*1294* Halt the engines and disable interrupts so we won't get any more1295* completions from H/W and we won't have any accesses from the1296* H/W to the host machine1297*/1298hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);12991300/* Go over all the queues, release all CS and their jobs */1301hl_cs_rollback_all(hdev, skip_wq_flush);13021303/* flush the MMU prefetch workqueue */1304flush_workqueue(hdev->prefetch_wq);13051306hl_abort_waiting_for_completions(hdev);1307}13081309/*1310* hl_device_suspend - initiate device suspend1311*1312* @hdev: pointer to habanalabs device structure1313*1314* Puts the hw in the suspend state (all asics).1315* Returns 0 for success or an error on failure.1316* Called at driver suspend.1317*/1318int hl_device_suspend(struct hl_device *hdev)1319{1320int rc;13211322pci_save_state(hdev->pdev);13231324/* Block future CS/VM/JOB completion operations */1325spin_lock(&hdev->reset_info.lock);1326if (hdev->reset_info.in_reset) {1327spin_unlock(&hdev->reset_info.lock);1328dev_err(hdev->dev, "Can't suspend while in reset\n");1329return -EIO;1330}1331hdev->reset_info.in_reset = 1;1332spin_unlock(&hdev->reset_info.lock);13331334/* This blocks all other stuff that is not blocked by in_reset */1335hdev->disabled = true;13361337take_release_locks(hdev);13381339rc = hdev->asic_funcs->suspend(hdev);1340if (rc)1341dev_err(hdev->dev,1342"Failed to disable PCI access of device CPU\n");13431344/* Shut down the device */1345pci_disable_device(hdev->pdev);1346pci_set_power_state(hdev->pdev, PCI_D3hot);13471348return 0;1349}13501351/*1352* hl_device_resume - initiate device resume1353*1354* @hdev: pointer to habanalabs device structure1355*1356* Bring the hw back to operating state (all asics).1357* Returns 0 for success or an error on failure.1358* Called at driver resume.1359*/1360int hl_device_resume(struct hl_device *hdev)1361{1362int rc;13631364pci_set_power_state(hdev->pdev, PCI_D0);1365pci_restore_state(hdev->pdev);1366rc = pci_enable_device_mem(hdev->pdev);1367if (rc) {1368dev_err(hdev->dev,1369"Failed to enable PCI device in resume\n");1370return rc;1371}13721373pci_set_master(hdev->pdev);13741375rc = hdev->asic_funcs->resume(hdev);1376if (rc) {1377dev_err(hdev->dev, "Failed to resume device after suspend\n");1378goto disable_device;1379}138013811382/* 'in_reset' was set to true during suspend, now we must clear it in order1383* for hard reset to be performed1384*/1385spin_lock(&hdev->reset_info.lock);1386hdev->reset_info.in_reset = 0;1387spin_unlock(&hdev->reset_info.lock);13881389rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);1390if (rc) {1391dev_err(hdev->dev, "Failed to reset device during resume\n");1392goto disable_device;1393}13941395return 0;13961397disable_device:1398pci_disable_device(hdev->pdev);13991400return rc;1401}14021403static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)1404{1405struct task_struct *task = NULL;1406struct list_head *hpriv_list;1407struct hl_fpriv *hpriv;1408struct mutex *hpriv_lock;1409u32 pending_cnt;14101411hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;1412hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;14131414/* Giving time for user to close FD, and for processes that are inside1415* hl_device_open to finish1416*/1417if (!list_empty(hpriv_list))1418ssleep(1);14191420if (timeout) {1421pending_cnt = timeout;1422} else {1423if (hdev->process_kill_trial_cnt) {1424/* Processes have been already killed */1425pending_cnt = 1;1426goto wait_for_processes;1427} else {1428/* Wait a small period after process kill */1429pending_cnt = HL_PENDING_RESET_PER_SEC;1430}1431}14321433mutex_lock(hpriv_lock);14341435/* This section must be protected because we are dereferencing1436* pointers that are freed if the process exits1437*/1438list_for_each_entry(hpriv, hpriv_list, dev_node) {1439task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);1440if (task) {1441dev_info(hdev->dev, "Killing user process pid=%d\n",1442task_pid_nr(task));1443send_sig(SIGKILL, task, 1);1444usleep_range(1000, 10000);14451446put_task_struct(task);1447} else {1448dev_dbg(hdev->dev,1449"Can't get task struct for user process %d, process was killed from outside the driver\n",1450pid_nr(hpriv->taskpid));1451}1452}14531454mutex_unlock(hpriv_lock);14551456/*1457* We killed the open users, but that doesn't mean they are closed.1458* It could be that they are running a long cleanup phase in the driver1459* e.g. MMU unmappings, or running other long teardown flow even before1460* our cleanup.1461* Therefore we need to wait again to make sure they are closed before1462* continuing with the reset.1463*/14641465wait_for_processes:1466while ((!list_empty(hpriv_list)) && (pending_cnt)) {1467dev_dbg(hdev->dev,1468"Waiting for all unmap operations to finish before hard reset\n");14691470pending_cnt--;14711472ssleep(1);1473}14741475/* All processes exited successfully */1476if (list_empty(hpriv_list))1477return 0;14781479/* Give up waiting for processes to exit */1480if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)1481return -ETIME;14821483hdev->process_kill_trial_cnt++;14841485return -EBUSY;1486}14871488static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)1489{1490struct list_head *hpriv_list;1491struct hl_fpriv *hpriv;1492struct mutex *hpriv_lock;14931494hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;1495hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;14961497mutex_lock(hpriv_lock);1498list_for_each_entry(hpriv, hpriv_list, dev_node)1499hpriv->hdev = NULL;1500mutex_unlock(hpriv_lock);1501}15021503static void send_disable_pci_access(struct hl_device *hdev, u32 flags)1504{1505/* If reset is due to heartbeat, device CPU is no responsive in1506* which case no point sending PCI disable message to it.1507*/1508if ((flags & HL_DRV_RESET_HARD) &&1509!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {1510/* Disable PCI access from device F/W so he won't send1511* us additional interrupts. We disable MSI/MSI-X at1512* the halt_engines function and we can't have the F/W1513* sending us interrupts after that. We need to disable1514* the access here because if the device is marked1515* disable, the message won't be send. Also, in case1516* of heartbeat, the device CPU is marked as disable1517* so this message won't be sent1518*/1519if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))1520return;15211522/* disable_irq also generates sync irq, this verifies that last EQs are handled1523* before disabled is set. The IRQ will be enabled again in request_irq call.1524*/1525if (hdev->cpu_queues_enable)1526disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id));1527}1528}15291530static void handle_reset_trigger(struct hl_device *hdev, u32 flags)1531{1532u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;15331534/* No consecutive mechanism when user context exists */1535if (hdev->is_compute_ctx_active)1536return;15371538/*1539* 'reset cause' is being updated here, because getting here1540* means that it's the 1st time and the last time we're here1541* ('in_reset' makes sure of it). This makes sure that1542* 'reset_cause' will continue holding its 1st recorded reason!1543*/1544if (flags & HL_DRV_RESET_HEARTBEAT) {1545hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;1546cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;1547} else if (flags & HL_DRV_RESET_TDR) {1548hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;1549cur_reset_trigger = HL_DRV_RESET_TDR;1550} else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {1551hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;1552cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;1553} else {1554hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;1555}15561557/*1558* If reset cause is same twice, then reset_trigger_repeated1559* is set and if this reset is due to a fatal FW error1560* device is set to an unstable state.1561*/1562if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {1563hdev->reset_info.prev_reset_trigger = cur_reset_trigger;1564hdev->reset_info.reset_trigger_repeated = 0;1565} else {1566hdev->reset_info.reset_trigger_repeated = 1;1567}1568}15691570static void reset_heartbeat_debug_info(struct hl_device *hdev)1571{1572hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0;1573hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0;1574hdev->heartbeat_debug_info.heartbeat_event_counter = 0;1575}15761577static inline void device_heartbeat_schedule(struct hl_device *hdev)1578{1579if (!hdev->heartbeat)1580return;15811582reset_heartbeat_debug_info(hdev);15831584/*1585* Before scheduling the heartbeat driver will check if eq event has received.1586* for the first schedule we need to set the indication as true then for the next1587* one this indication will be true only if eq event was sent by FW.1588*/1589hdev->eq_heartbeat_received = true;15901591schedule_delayed_work(&hdev->work_heartbeat,1592usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));1593}15941595/*1596* hl_device_reset - reset the device1597*1598* @hdev: pointer to habanalabs device structure1599* @flags: reset flags.1600*1601* Block future CS and wait for pending CS to be enqueued1602* Call ASIC H/W fini1603* Flush all completions1604* Re-initialize all internal data structures1605* Call ASIC H/W init, late_init1606* Test queues1607* Enable device1608*1609* Returns 0 for success or an error on failure.1610*/1611int hl_device_reset(struct hl_device *hdev, u32 flags)1612{1613bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,1614schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;1615u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};1616struct hl_ctx *ctx;1617int i, rc, hw_fini_rc;16181619if (!hdev->init_done) {1620dev_err(hdev->dev, "Can't reset before initialization is done\n");1621return 0;1622}16231624hard_reset = !!(flags & HL_DRV_RESET_HARD);1625from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);1626fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);1627from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);1628delay_reset = !!(flags & HL_DRV_RESET_DELAY);1629from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);1630reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;16311632if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {1633dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");1634return 0;1635}16361637if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {1638dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");1639hard_reset = true;1640}16411642if (reset_upon_device_release) {1643if (hard_reset) {1644dev_crit(hdev->dev,1645"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");1646return -EINVAL;1647}16481649goto do_reset;1650}16511652if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {1653dev_dbg(hdev->dev,1654"asic doesn't allow inference soft reset - do hard-reset instead\n");1655hard_reset = true;1656}16571658do_reset:1659/* Re-entry of reset thread */1660if (from_hard_reset_thread && hdev->process_kill_trial_cnt)1661goto kill_processes;16621663/*1664* Prevent concurrency in this function - only one reset should be1665* done at any given time. We need to perform this only if we didn't1666* get here from a dedicated hard reset thread.1667*/1668if (!from_hard_reset_thread) {1669/* Block future CS/VM/JOB completion operations */1670spin_lock(&hdev->reset_info.lock);1671if (hdev->reset_info.in_reset) {1672/* We allow scheduling of a hard reset only during a compute reset */1673if (hard_reset && hdev->reset_info.in_compute_reset)1674hdev->reset_info.hard_reset_schedule_flags = flags;1675spin_unlock(&hdev->reset_info.lock);1676return 0;1677}16781679/* This still allows the completion of some KDMA ops1680* Update this before in_reset because in_compute_reset implies we are in reset1681*/1682hdev->reset_info.in_compute_reset = !hard_reset;16831684hdev->reset_info.in_reset = 1;16851686spin_unlock(&hdev->reset_info.lock);16871688/* Cancel the device release watchdog work if required.1689* In case of reset-upon-device-release while the release watchdog work is1690* scheduled due to a hard-reset, do hard-reset instead of compute-reset.1691*/1692if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {1693struct hl_device_reset_work *watchdog_work =1694&hdev->device_release_watchdog_work;16951696hdev->reset_info.watchdog_active = 0;1697if (!from_watchdog_thread)1698cancel_delayed_work_sync(&watchdog_work->reset_work);16991700if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {1701hdev->reset_info.in_compute_reset = 0;1702flags |= HL_DRV_RESET_HARD;1703flags &= ~HL_DRV_RESET_DEV_RELEASE;1704hard_reset = true;1705}1706}17071708if (delay_reset)1709usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);17101711escalate_reset_flow:1712handle_reset_trigger(hdev, flags);1713send_disable_pci_access(hdev, flags);17141715/* This also blocks future CS/VM/JOB completion operations */1716hdev->disabled = true;17171718take_release_locks(hdev);17191720if (hard_reset)1721dev_info(hdev->dev, "Going to reset device\n");1722else if (reset_upon_device_release)1723dev_dbg(hdev->dev, "Going to reset device after release by user\n");1724else1725dev_dbg(hdev->dev, "Going to reset engines of inference device\n");1726}17271728if ((hard_reset) && (!from_hard_reset_thread)) {1729hdev->reset_info.hard_reset_pending = true;17301731hdev->process_kill_trial_cnt = 0;17321733hdev->device_reset_work.flags = flags;17341735/*1736* Because the reset function can't run from heartbeat work,1737* we need to call the reset function from a dedicated work.1738*/1739queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);17401741return 0;1742}17431744cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);17451746kill_processes:1747if (hard_reset) {1748/* Kill processes here after CS rollback. This is because the1749* process can't really exit until all its CSs are done, which1750* is what we do in cs rollback1751*/1752rc = device_kill_open_processes(hdev, 0, false);17531754if (rc == -EBUSY) {1755if (hdev->device_fini_pending) {1756dev_crit(hdev->dev,1757"%s Failed to kill all open processes, stopping hard reset\n",1758dev_name(&(hdev)->pdev->dev));1759goto out_err;1760}17611762/* signal reset thread to reschedule */1763return rc;1764}17651766if (rc) {1767dev_crit(hdev->dev,1768"%s Failed to kill all open processes, stopping hard reset\n",1769dev_name(&(hdev)->pdev->dev));1770goto out_err;1771}17721773/* Flush the Event queue workers to make sure no other thread is1774* reading or writing to registers during the reset1775*/1776flush_workqueue(hdev->eq_wq);1777}17781779/* Reset the H/W. It will be in idle state after this returns */1780hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);17811782if (hard_reset) {1783hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;17841785/* Release kernel context */1786if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)1787hdev->kernel_ctx = NULL;17881789hl_vm_fini(hdev);1790hl_mmu_fini(hdev);1791hl_eq_reset(hdev, &hdev->event_queue);1792}17931794/* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */1795hl_hw_queue_reset(hdev, hard_reset);1796for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)1797hl_cq_reset(hdev, &hdev->completion_queue[i]);17981799/* Make sure the context switch phase will run again */1800ctx = hl_get_compute_ctx(hdev);1801if (ctx) {1802atomic_set(&ctx->thread_ctx_switch_token, 1);1803ctx->thread_ctx_switch_wait_token = 0;1804hl_ctx_put(ctx);1805}18061807if (hw_fini_rc) {1808rc = hw_fini_rc;1809goto out_err;1810}1811/* Finished tear-down, starting to re-initialize */18121813if (hard_reset) {1814hdev->device_cpu_disabled = false;1815hdev->reset_info.hard_reset_pending = false;18161817/*1818* Put the device in an unusable state if there are 2 back to back resets due to1819* fatal errors.1820*/1821if (hdev->reset_info.reset_trigger_repeated &&1822(hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR ||1823hdev->reset_info.prev_reset_trigger ==1824HL_DRV_RESET_HEARTBEAT)) {1825dev_crit(hdev->dev,1826"%s Consecutive fatal errors, stopping hard reset\n",1827dev_name(&(hdev)->pdev->dev));1828rc = -EIO;1829goto out_err;1830}18311832if (hdev->kernel_ctx) {1833dev_crit(hdev->dev,1834"%s kernel ctx was alive during hard reset, something is terribly wrong\n",1835dev_name(&(hdev)->pdev->dev));1836rc = -EBUSY;1837goto out_err;1838}18391840rc = hl_mmu_init(hdev);1841if (rc) {1842dev_err(hdev->dev,1843"Failed to initialize MMU S/W after hard reset\n");1844goto out_err;1845}18461847/* Allocate the kernel context */1848hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),1849GFP_KERNEL);1850if (!hdev->kernel_ctx) {1851rc = -ENOMEM;1852hl_mmu_fini(hdev);1853goto out_err;1854}18551856hdev->is_compute_ctx_active = false;18571858rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);1859if (rc) {1860dev_err(hdev->dev,1861"failed to init kernel ctx in hard reset\n");1862kfree(hdev->kernel_ctx);1863hdev->kernel_ctx = NULL;1864hl_mmu_fini(hdev);1865goto out_err;1866}1867}18681869/* Device is now enabled as part of the initialization requires1870* communication with the device firmware to get information that1871* is required for the initialization itself1872*/1873hdev->disabled = false;18741875/* F/W security enabled indication might be updated after hard-reset */1876if (hard_reset) {1877rc = hl_fw_read_preboot_status(hdev);1878if (rc)1879goto out_err;1880}18811882rc = hdev->asic_funcs->hw_init(hdev);1883if (rc) {1884dev_err(hdev->dev, "failed to initialize the H/W after reset\n");1885goto out_err;1886}18871888/* If device is not idle fail the reset process */1889if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,1890HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {1891print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);1892rc = -EIO;1893goto out_err;1894}18951896/* Check that the communication with the device is working */1897rc = hdev->asic_funcs->test_queues(hdev);1898if (rc) {1899dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");1900goto out_err;1901}19021903if (hard_reset) {1904rc = device_late_init(hdev);1905if (rc) {1906dev_err(hdev->dev, "Failed late init after hard reset\n");1907goto out_err;1908}19091910rc = hl_vm_init(hdev);1911if (rc) {1912dev_err(hdev->dev, "Failed to init memory module after hard reset\n");1913goto out_err;1914}19151916if (!hdev->asic_prop.fw_security_enabled)1917hl_fw_set_max_power(hdev);1918} else {1919rc = hdev->asic_funcs->compute_reset_late_init(hdev);1920if (rc) {1921if (reset_upon_device_release)1922dev_err(hdev->dev,1923"Failed late init in reset after device release\n");1924else1925dev_err(hdev->dev, "Failed late init after compute reset\n");1926goto out_err;1927}1928}19291930rc = hdev->asic_funcs->scrub_device_mem(hdev);1931if (rc) {1932dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);1933goto out_err;1934}19351936spin_lock(&hdev->reset_info.lock);1937hdev->reset_info.in_compute_reset = 0;19381939/* Schedule hard reset only if requested and if not already in hard reset.1940* We keep 'in_reset' enabled, so no other reset can go in during the hard1941* reset schedule1942*/1943if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)1944schedule_hard_reset = true;1945else1946hdev->reset_info.in_reset = 0;19471948spin_unlock(&hdev->reset_info.lock);19491950hdev->reset_info.needs_reset = false;19511952if (hard_reset)1953dev_info(hdev->dev,1954"Successfully finished resetting the %s device\n",1955dev_name(&(hdev)->pdev->dev));1956else1957dev_dbg(hdev->dev,1958"Successfully finished resetting the %s device\n",1959dev_name(&(hdev)->pdev->dev));19601961if (hard_reset) {1962hdev->reset_info.hard_reset_cnt++;19631964device_heartbeat_schedule(hdev);19651966/* After reset is done, we are ready to receive events from1967* the F/W. We can't do it before because we will ignore events1968* and if those events are fatal, we won't know about it and1969* the device will be operational although it shouldn't be1970*/1971hdev->asic_funcs->enable_events_from_fw(hdev);1972} else {1973if (!reset_upon_device_release)1974hdev->reset_info.compute_reset_cnt++;19751976if (schedule_hard_reset) {1977dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");1978flags = hdev->reset_info.hard_reset_schedule_flags;1979hdev->reset_info.hard_reset_schedule_flags = 0;1980hard_reset = true;1981goto escalate_reset_flow;1982}1983}19841985return 0;19861987out_err:1988hdev->disabled = true;19891990spin_lock(&hdev->reset_info.lock);1991hdev->reset_info.in_compute_reset = 0;19921993if (hard_reset) {1994dev_err(hdev->dev,1995"%s Failed to reset! Device is NOT usable\n",1996dev_name(&(hdev)->pdev->dev));1997hdev->reset_info.hard_reset_cnt++;1998} else {1999if (reset_upon_device_release) {2000dev_err(hdev->dev, "Failed to reset device after user release\n");2001flags &= ~HL_DRV_RESET_DEV_RELEASE;2002} else {2003dev_err(hdev->dev, "Failed to do compute reset\n");2004hdev->reset_info.compute_reset_cnt++;2005}20062007spin_unlock(&hdev->reset_info.lock);2008flags |= HL_DRV_RESET_HARD;2009hard_reset = true;2010goto escalate_reset_flow;2011}20122013hdev->reset_info.in_reset = 0;20142015spin_unlock(&hdev->reset_info.lock);20162017return rc;2018}20192020/*2021* hl_device_cond_reset() - conditionally reset the device.2022* @hdev: pointer to habanalabs device structure.2023* @reset_flags: reset flags.2024* @event_mask: events to notify user about.2025*2026* Conditionally reset the device, or alternatively schedule a watchdog work to reset the device2027* unless another reset precedes it.2028*/2029int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)2030{2031struct hl_ctx *ctx = NULL;20322033/* F/W reset cannot be postponed */2034if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)2035goto device_reset;20362037/* Device release watchdog is relevant only if user exists and gets a reset notification */2038if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {2039dev_err(hdev->dev, "Resetting device without a reset indication to user\n");2040goto device_reset;2041}20422043ctx = hl_get_compute_ctx(hdev);2044if (!ctx)2045goto device_reset;20462047/*2048* There is no point in postponing the reset if user is not registered for events.2049* However if no eventfd_ctx exists but the device release watchdog is already scheduled, it2050* just implies that user has unregistered as part of handling a previous event. In this2051* case an immediate reset is not required.2052*/2053if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)2054goto device_reset;20552056/* Schedule the device release watchdog work unless reset is already in progress or if the2057* work is already scheduled.2058*/2059spin_lock(&hdev->reset_info.lock);2060if (hdev->reset_info.in_reset) {2061spin_unlock(&hdev->reset_info.lock);2062goto device_reset;2063}20642065if (hdev->reset_info.watchdog_active) {2066hdev->device_release_watchdog_work.flags |= flags;2067goto out;2068}20692070hdev->device_release_watchdog_work.flags = flags;2071dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",2072hdev->device_release_watchdog_timeout_sec);2073schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,2074secs_to_jiffies(hdev->device_release_watchdog_timeout_sec));2075hdev->reset_info.watchdog_active = 1;2076out:2077spin_unlock(&hdev->reset_info.lock);20782079hl_notifier_event_send_all(hdev, event_mask);20802081hl_ctx_put(ctx);20822083hl_abort_waiting_for_completions(hdev);20842085return 0;20862087device_reset:2088if (event_mask)2089hl_notifier_event_send_all(hdev, event_mask);2090if (ctx)2091hl_ctx_put(ctx);20922093return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD);2094}20952096static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)2097{2098mutex_lock(¬ifier_event->lock);2099notifier_event->events_mask |= event_mask;21002101if (notifier_event->eventfd)2102eventfd_signal(notifier_event->eventfd);21032104mutex_unlock(¬ifier_event->lock);2105}21062107/*2108* hl_notifier_event_send_all - notify all user processes via eventfd2109*2110* @hdev: pointer to habanalabs device structure2111* @event_mask: the occurred event/s2112* Returns 0 for success or an error on failure.2113*/2114void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)2115{2116struct hl_fpriv *hpriv;21172118if (!event_mask) {2119dev_warn(hdev->dev, "Skip sending zero event");2120return;2121}21222123mutex_lock(&hdev->fpriv_list_lock);21242125list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)2126hl_notifier_event_send(&hpriv->notifier_event, event_mask);21272128mutex_unlock(&hdev->fpriv_list_lock);2129}21302131/*2132* hl_device_init - main initialization function for habanalabs device2133*2134* @hdev: pointer to habanalabs device structure2135*2136* Allocate an id for the device, do early initialization and then call the2137* ASIC specific initialization functions. Finally, create the cdev and the2138* Linux device to expose it to the user2139*/2140int hl_device_init(struct hl_device *hdev)2141{2142int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;2143struct hl_ts_free_jobs *free_jobs_data;2144bool expose_interfaces_on_err = false;2145void *p;21462147/* Initialize ASIC function pointers and perform early init */2148rc = device_early_init(hdev);2149if (rc)2150goto out_disabled;21512152user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +2153hdev->asic_prop.user_interrupt_count;21542155if (user_interrupt_cnt) {2156hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),2157GFP_KERNEL);2158if (!hdev->user_interrupt) {2159rc = -ENOMEM;2160goto early_fini;2161}21622163/* Timestamp records supported only if CQ supported in device */2164if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {2165for (i = 0 ; i < user_interrupt_cnt ; i++) {2166p = vzalloc(TIMESTAMP_FREE_NODES_NUM *2167sizeof(struct timestamp_reg_free_node));2168if (!p) {2169rc = -ENOMEM;2170goto free_usr_intr_mem;2171}2172free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;2173free_jobs_data->free_nodes_pool = p;2174free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;2175free_jobs_data->next_avail_free_node_idx = 0;2176}2177}2178}21792180free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;2181p = vzalloc(TIMESTAMP_FREE_NODES_NUM *2182sizeof(struct timestamp_reg_free_node));2183if (!p) {2184rc = -ENOMEM;2185goto free_usr_intr_mem;2186}21872188free_jobs_data->free_nodes_pool = p;2189free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;2190free_jobs_data->next_avail_free_node_idx = 0;21912192/*2193* Start calling ASIC initialization. First S/W then H/W and finally2194* late init2195*/2196rc = hdev->asic_funcs->sw_init(hdev);2197if (rc)2198goto free_common_usr_intr_mem;219922002201/* initialize completion structure for multi CS wait */2202hl_multi_cs_completion_init(hdev);22032204/*2205* Initialize the H/W queues. Must be done before hw_init, because2206* there the addresses of the kernel queue are being written to the2207* registers of the device2208*/2209rc = hl_hw_queues_create(hdev);2210if (rc) {2211dev_err(hdev->dev, "failed to initialize kernel queues\n");2212goto sw_fini;2213}22142215cq_cnt = hdev->asic_prop.completion_queues_count;22162217/*2218* Initialize the completion queues. Must be done before hw_init,2219* because there the addresses of the completion queues are being2220* passed as arguments to request_irq2221*/2222if (cq_cnt) {2223hdev->completion_queue = kcalloc(cq_cnt,2224sizeof(*hdev->completion_queue),2225GFP_KERNEL);22262227if (!hdev->completion_queue) {2228dev_err(hdev->dev,2229"failed to allocate completion queues\n");2230rc = -ENOMEM;2231goto hw_queues_destroy;2232}2233}22342235for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {2236rc = hl_cq_init(hdev, &hdev->completion_queue[i],2237hdev->asic_funcs->get_queue_id_for_cq(hdev, i));2238if (rc) {2239dev_err(hdev->dev,2240"failed to initialize completion queue\n");2241goto cq_fini;2242}2243hdev->completion_queue[i].cq_idx = i;2244}22452246hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,2247sizeof(struct hl_cs *), GFP_KERNEL);2248if (!hdev->shadow_cs_queue) {2249rc = -ENOMEM;2250goto cq_fini;2251}22522253/*2254* Initialize the event queue. Must be done before hw_init,2255* because there the address of the event queue is being2256* passed as argument to request_irq2257*/2258rc = hl_eq_init(hdev, &hdev->event_queue);2259if (rc) {2260dev_err(hdev->dev, "failed to initialize event queue\n");2261goto free_shadow_cs_queue;2262}22632264/* MMU S/W must be initialized before kernel context is created */2265rc = hl_mmu_init(hdev);2266if (rc) {2267dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");2268goto eq_fini;2269}22702271/* Allocate the kernel context */2272hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);2273if (!hdev->kernel_ctx) {2274rc = -ENOMEM;2275goto mmu_fini;2276}22772278hdev->is_compute_ctx_active = false;22792280hdev->asic_funcs->state_dump_init(hdev);22812282hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;22832284hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;22852286rc = hl_debugfs_device_init(hdev);2287if (rc) {2288dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");2289kfree(hdev->kernel_ctx);2290goto mmu_fini;2291}22922293/* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after2294* hl_debugfs_device_init().2295*/2296rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);2297if (rc) {2298dev_err(hdev->dev, "failed to initialize kernel context\n");2299kfree(hdev->kernel_ctx);2300goto debugfs_device_fini;2301}23022303rc = hl_cb_pool_init(hdev);2304if (rc) {2305dev_err(hdev->dev, "failed to initialize CB pool\n");2306goto release_ctx;2307}23082309rc = hl_dec_init(hdev);2310if (rc) {2311dev_err(hdev->dev, "Failed to initialize the decoder module\n");2312goto cb_pool_fini;2313}23142315/*2316* From this point, override rc (=0) in case of an error to allow debugging2317* (by adding char devices and creating sysfs/debugfs files as part of the error flow).2318*/2319expose_interfaces_on_err = true;23202321/* Device is now enabled as part of the initialization requires2322* communication with the device firmware to get information that2323* is required for the initialization itself2324*/2325hdev->disabled = false;23262327rc = hdev->asic_funcs->hw_init(hdev);2328if (rc) {2329dev_err(hdev->dev, "failed to initialize the H/W\n");2330rc = 0;2331goto out_disabled;2332}23332334/* Check that the communication with the device is working */2335rc = hdev->asic_funcs->test_queues(hdev);2336if (rc) {2337dev_err(hdev->dev, "Failed to detect if device is alive\n");2338rc = 0;2339goto out_disabled;2340}23412342rc = device_late_init(hdev);2343if (rc) {2344dev_err(hdev->dev, "Failed late initialization\n");2345rc = 0;2346goto out_disabled;2347}23482349dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",2350hdev->asic_name,2351hdev->asic_prop.dram_size / SZ_1G);23522353rc = hl_vm_init(hdev);2354if (rc) {2355dev_err(hdev->dev, "Failed to initialize memory module\n");2356rc = 0;2357goto out_disabled;2358}23592360/*2361* Expose devices and sysfs/debugfs files to user.2362* From here there is no need to expose them in case of an error.2363*/2364expose_interfaces_on_err = false;23652366rc = drm_dev_register(&hdev->drm, 0);2367if (rc) {2368dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);2369rc = 0;2370goto out_disabled;2371}23722373rc = cdev_sysfs_debugfs_add(hdev);2374if (rc) {2375dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");2376rc = 0;2377goto out_disabled;2378}23792380/* Need to call this again because the max power might change,2381* depending on card type for certain ASICs2382*/2383if (hdev->asic_prop.set_max_power_on_device_init &&2384!hdev->asic_prop.fw_security_enabled)2385hl_fw_set_max_power(hdev);23862387/*2388* hl_hwmon_init() must be called after device_late_init(), because only2389* there we get the information from the device about which2390* hwmon-related sensors the device supports.2391* Furthermore, it must be done after adding the device to the system.2392*/2393rc = hl_hwmon_init(hdev);2394if (rc) {2395dev_err(hdev->dev, "Failed to initialize hwmon\n");2396rc = 0;2397goto out_disabled;2398}23992400/* Scheduling the EQ heartbeat thread must come after driver is done with all2401* initializations, as we want to make sure the FW gets enough time to be prepared2402* to respond to heartbeat packets.2403*/2404device_heartbeat_schedule(hdev);24052406dev_notice(hdev->dev,2407"Successfully added device %s to habanalabs driver\n",2408dev_name(&(hdev)->pdev->dev));24092410/* After initialization is done, we are ready to receive events from2411* the F/W. We can't do it before because we will ignore events and if2412* those events are fatal, we won't know about it and the device will2413* be operational although it shouldn't be2414*/2415hdev->asic_funcs->enable_events_from_fw(hdev);24162417hdev->init_done = true;24182419return 0;24202421cb_pool_fini:2422hl_cb_pool_fini(hdev);2423release_ctx:2424if (hl_ctx_put(hdev->kernel_ctx) != 1)2425dev_err(hdev->dev,2426"kernel ctx is still alive on initialization failure\n");2427debugfs_device_fini:2428hl_debugfs_device_fini(hdev);2429mmu_fini:2430hl_mmu_fini(hdev);2431eq_fini:2432hl_eq_fini(hdev, &hdev->event_queue);2433free_shadow_cs_queue:2434kfree(hdev->shadow_cs_queue);2435cq_fini:2436for (i = 0 ; i < cq_ready_cnt ; i++)2437hl_cq_fini(hdev, &hdev->completion_queue[i]);2438kfree(hdev->completion_queue);2439hw_queues_destroy:2440hl_hw_queues_destroy(hdev);2441sw_fini:2442hdev->asic_funcs->sw_fini(hdev);2443free_common_usr_intr_mem:2444vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);2445free_usr_intr_mem:2446if (user_interrupt_cnt) {2447for (i = 0 ; i < user_interrupt_cnt ; i++) {2448if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)2449break;2450vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);2451}2452kfree(hdev->user_interrupt);2453}2454early_fini:2455device_early_fini(hdev);2456out_disabled:2457hdev->disabled = true;2458if (expose_interfaces_on_err) {2459drm_dev_register(&hdev->drm, 0);2460cdev_sysfs_debugfs_add(hdev);2461}24622463pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",2464hdev->cdev_idx, dev_name(&hdev->pdev->dev));24652466return rc;2467}24682469/*2470* hl_device_fini - main tear-down function for habanalabs device2471*2472* @hdev: pointer to habanalabs device structure2473*2474* Destroy the device, call ASIC fini functions and release the id2475*/2476void hl_device_fini(struct hl_device *hdev)2477{2478u32 user_interrupt_cnt;2479bool device_in_reset;2480ktime_t timeout;2481u64 reset_sec;2482int i, rc;24832484dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));24852486hdev->device_fini_pending = 1;2487flush_delayed_work(&hdev->device_reset_work.reset_work);24882489if (hdev->pldm)2490reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;2491else2492reset_sec = HL_HARD_RESET_MAX_TIMEOUT;24932494/*2495* This function is competing with the reset function, so try to2496* take the reset atomic and if we are already in middle of reset,2497* wait until reset function is finished. Reset function is designed2498* to always finish. However, in Gaudi, because of all the network2499* ports, the hard reset could take between 10-30 seconds2500*/25012502timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);25032504spin_lock(&hdev->reset_info.lock);2505device_in_reset = !!hdev->reset_info.in_reset;2506if (!device_in_reset)2507hdev->reset_info.in_reset = 1;2508spin_unlock(&hdev->reset_info.lock);25092510while (device_in_reset) {2511usleep_range(50, 200);25122513spin_lock(&hdev->reset_info.lock);2514device_in_reset = !!hdev->reset_info.in_reset;2515if (!device_in_reset)2516hdev->reset_info.in_reset = 1;2517spin_unlock(&hdev->reset_info.lock);25182519if (ktime_compare(ktime_get(), timeout) > 0) {2520dev_crit(hdev->dev,2521"%s Failed to remove device because reset function did not finish\n",2522dev_name(&(hdev)->pdev->dev));2523return;2524}2525}25262527cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);25282529/* Disable PCI access from device F/W so it won't send us additional2530* interrupts. We disable MSI/MSI-X at the halt_engines function and we2531* can't have the F/W sending us interrupts after that. We need to2532* disable the access here because if the device is marked disable, the2533* message won't be send. Also, in case of heartbeat, the device CPU is2534* marked as disable so this message won't be sent2535*/2536hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);25372538/* Mark device as disabled */2539hdev->disabled = true;25402541take_release_locks(hdev);25422543hdev->reset_info.hard_reset_pending = true;25442545hl_hwmon_fini(hdev);25462547cleanup_resources(hdev, true, false, false);25482549/* Kill processes here after CS rollback. This is because the process2550* can't really exit until all its CSs are done, which is what we2551* do in cs rollback2552*/2553dev_info(hdev->dev,2554"Waiting for all processes to exit (timeout of %u seconds)",2555HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);25562557hdev->process_kill_trial_cnt = 0;2558rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);2559if (rc) {2560dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);2561device_disable_open_processes(hdev, false);2562}25632564hdev->process_kill_trial_cnt = 0;2565rc = device_kill_open_processes(hdev, 0, true);2566if (rc) {2567dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);2568device_disable_open_processes(hdev, true);2569}25702571hl_cb_pool_fini(hdev);25722573/* Reset the H/W. It will be in idle state after this returns */2574rc = hdev->asic_funcs->hw_fini(hdev, true, false);2575if (rc)2576dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);25772578hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;25792580/* Release kernel context */2581if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))2582dev_err(hdev->dev, "kernel ctx is still alive\n");25832584hl_dec_fini(hdev);25852586hl_vm_fini(hdev);25872588hl_mmu_fini(hdev);25892590vfree(hdev->captured_err_info.page_fault_info.user_mappings);25912592hl_eq_fini(hdev, &hdev->event_queue);25932594kfree(hdev->shadow_cs_queue);25952596for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)2597hl_cq_fini(hdev, &hdev->completion_queue[i]);2598kfree(hdev->completion_queue);25992600user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +2601hdev->asic_prop.user_interrupt_count;26022603if (user_interrupt_cnt) {2604if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {2605for (i = 0 ; i < user_interrupt_cnt ; i++)2606vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);2607}26082609kfree(hdev->user_interrupt);2610}26112612vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);26132614hl_hw_queues_destroy(hdev);26152616/* Call ASIC S/W finalize function */2617hdev->asic_funcs->sw_fini(hdev);26182619device_early_fini(hdev);26202621/* Hide devices and sysfs/debugfs files from user */2622cdev_sysfs_debugfs_remove(hdev);2623drm_dev_unregister(&hdev->drm);26242625hl_debugfs_device_fini(hdev);26262627pr_info("removed device successfully\n");2628}26292630/*2631* MMIO register access helper functions.2632*/26332634/*2635* hl_rreg - Read an MMIO register2636*2637* @hdev: pointer to habanalabs device structure2638* @reg: MMIO register offset (in bytes)2639*2640* Returns the value of the MMIO register we are asked to read2641*2642*/2643inline u32 hl_rreg(struct hl_device *hdev, u32 reg)2644{2645u32 val = readl(hdev->rmmio + reg);26462647if (unlikely(trace_habanalabs_rreg32_enabled()))2648trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val);26492650return val;2651}26522653/*2654* hl_wreg - Write to an MMIO register2655*2656* @hdev: pointer to habanalabs device structure2657* @reg: MMIO register offset (in bytes)2658* @val: 32-bit value2659*2660* Writes the 32-bit value into the MMIO register2661*2662*/2663inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)2664{2665if (unlikely(trace_habanalabs_wreg32_enabled()))2666trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val);26672668writel(val, hdev->rmmio + reg);2669}26702671void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,2672u8 flags)2673{2674struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;26752676if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {2677dev_err(hdev->dev,2678"Number of possible razwi initiators (%u) exceeded limit (%u)\n",2679num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);2680return;2681}26822683/* In case it's the first razwi since the device was opened, capture its parameters */2684if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))2685return;26862687razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());2688razwi_info->razwi.addr = addr;2689razwi_info->razwi.num_of_possible_engines = num_of_engines;2690memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],2691num_of_engines * sizeof(u16));2692razwi_info->razwi.flags = flags;26932694razwi_info->razwi_info_available = true;2695}26962697void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,2698u8 flags, u64 *event_mask)2699{2700hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);27012702if (event_mask)2703*event_mask |= HL_NOTIFIER_EVENT_RAZWI;2704}27052706static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)2707{2708struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;2709struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;2710struct hl_vm_hash_node *hnode;2711struct hl_userptr *userptr;2712enum vm_type *vm_type;2713struct hl_ctx *ctx;2714u32 map_idx = 0;2715int i;27162717/* Reset previous session count*/2718pgf_info->num_of_user_mappings = 0;27192720ctx = hl_get_compute_ctx(hdev);2721if (!ctx) {2722dev_err(hdev->dev, "Can't get user context for user mappings\n");2723return;2724}27252726mutex_lock(&ctx->mem_hash_lock);2727hash_for_each(ctx->mem_hash, i, hnode, node) {2728vm_type = hnode->ptr;2729if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||2730((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))2731pgf_info->num_of_user_mappings++;27322733}27342735if (!pgf_info->num_of_user_mappings)2736goto finish;27372738/* In case we already allocated in previous session, need to release it before2739* allocating new buffer.2740*/2741vfree(pgf_info->user_mappings);2742pgf_info->user_mappings =2743vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));2744if (!pgf_info->user_mappings) {2745pgf_info->num_of_user_mappings = 0;2746goto finish;2747}27482749hash_for_each(ctx->mem_hash, i, hnode, node) {2750vm_type = hnode->ptr;2751if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {2752userptr = hnode->ptr;2753pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;2754pgf_info->user_mappings[map_idx].size = userptr->size;2755map_idx++;2756} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {2757phys_pg_pack = hnode->ptr;2758pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;2759pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;2760map_idx++;2761}2762}2763finish:2764mutex_unlock(&ctx->mem_hash_lock);2765hl_ctx_put(ctx);2766}27672768void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)2769{2770struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;27712772/* Capture only the first page fault */2773if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))2774return;27752776pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());2777pgf_info->page_fault.addr = addr;2778pgf_info->page_fault.engine_id = eng_id;2779hl_capture_user_mappings(hdev, is_pmmu);27802781pgf_info->page_fault_info_available = true;2782}27832784void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,2785u64 *event_mask)2786{2787hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);27882789if (event_mask)2790*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;2791}27922793static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)2794{2795struct hw_err_info *info = &hdev->captured_err_info.hw_err;27962797/* Capture only the first HW err */2798if (atomic_cmpxchg(&info->event_detected, 0, 1))2799return;28002801info->event.timestamp = ktime_to_ns(ktime_get());2802info->event.event_id = event_id;28032804info->event_info_available = true;2805}28062807void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)2808{2809hl_capture_hw_err(hdev, event_id);28102811if (event_mask)2812*event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;2813}28142815static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)2816{2817struct fw_err_info *info = &hdev->captured_err_info.fw_err;28182819/* Capture only the first FW error */2820if (atomic_cmpxchg(&info->event_detected, 0, 1))2821return;28222823info->event.timestamp = ktime_to_ns(ktime_get());2824info->event.err_type = fw_info->err_type;2825if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)2826info->event.event_id = fw_info->event_id;28272828info->event_info_available = true;2829}28302831void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)2832{2833hl_capture_fw_err(hdev, info);28342835if (info->event_mask)2836*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;2837}28382839void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)2840{2841struct engine_err_info *info = &hdev->captured_err_info.engine_err;28422843/* Capture only the first engine error */2844if (atomic_cmpxchg(&info->event_detected, 0, 1))2845return;28462847info->event.timestamp = ktime_to_ns(ktime_get());2848info->event.engine_id = engine_id;2849info->event.error_count = error_count;2850info->event_info_available = true;2851}28522853void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)2854{2855vfree(captured_err_info->page_fault_info.user_mappings);2856memset(captured_err_info, 0, sizeof(struct hl_error_info));2857atomic_set(&captured_err_info->cs_timeout.write_enable, 1);2858captured_err_info->undef_opcode.write_enable = true;2859}28602861void hl_init_cpu_for_irq(struct hl_device *hdev)2862{2863#ifdef CONFIG_NUMA2864struct cpumask *available_mask = &hdev->irq_affinity_mask;2865int numa_node = hdev->pdev->dev.numa_node, i;2866static struct cpumask cpu_mask;28672868if (numa_node < 0)2869return;28702871if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) {2872dev_err(hdev->dev, "No available affinities in current numa node\n");2873return;2874}28752876/* Remove HT siblings */2877for_each_cpu(i, &cpu_mask)2878cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask);2879#endif2880}28812882void hl_set_irq_affinity(struct hl_device *hdev, int irq)2883{2884if (cpumask_empty(&hdev->irq_affinity_mask)) {2885dev_dbg(hdev->dev, "affinity mask is empty\n");2886return;2887}28882889if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))2890dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);2891}28922893void hl_eq_heartbeat_event_handle(struct hl_device *hdev)2894{2895hdev->heartbeat_debug_info.heartbeat_event_counter++;2896hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds();2897hdev->eq_heartbeat_received = true;2898}28992900void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask)2901{2902struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling;2903ktime_t zero_time = ktime_set(0, 0);29042905mutex_lock(&clk_throttle->lock);29062907switch (event_type) {2908case EQ_EVENT_POWER_EVT_START:2909clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER;2910clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER;2911clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();2912clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;2913dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");2914break;29152916case EQ_EVENT_POWER_EVT_END:2917clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER;2918clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();2919dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");2920break;29212922case EQ_EVENT_THERMAL_EVT_START:2923clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL;2924clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL;2925clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();2926clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;2927*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;2928dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");2929break;29302931case EQ_EVENT_THERMAL_EVT_END:2932clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL;2933clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();2934*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;2935dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");2936break;29372938default:2939dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);2940break;2941}29422943mutex_unlock(&clk_throttle->lock);2944}294529462947