Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
26517 views
1
/*
2
* Copyright 2014 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
24
25
#ifndef AMDGPU_AMDKFD_H_INCLUDED
26
#define AMDGPU_AMDKFD_H_INCLUDED
27
28
#include <linux/list.h>
29
#include <linux/types.h>
30
#include <linux/mm.h>
31
#include <linux/kthread.h>
32
#include <linux/workqueue.h>
33
#include <linux/mmu_notifier.h>
34
#include <linux/memremap.h>
35
#include <kgd_kfd_interface.h>
36
#include <drm/drm_client.h>
37
#include "amdgpu_sync.h"
38
#include "amdgpu_vm.h"
39
#include "amdgpu_xcp.h"
40
41
extern uint64_t amdgpu_amdkfd_total_mem_size;
42
43
enum TLB_FLUSH_TYPE {
44
TLB_FLUSH_LEGACY = 0,
45
TLB_FLUSH_LIGHTWEIGHT,
46
TLB_FLUSH_HEAVYWEIGHT
47
};
48
49
struct amdgpu_device;
50
struct kfd_process_device;
51
struct amdgpu_reset_context;
52
53
enum kfd_mem_attachment_type {
54
KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */
55
KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */
56
KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */
57
KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */
58
};
59
60
struct kfd_mem_attachment {
61
struct list_head list;
62
enum kfd_mem_attachment_type type;
63
bool is_mapped;
64
struct amdgpu_bo_va *bo_va;
65
struct amdgpu_device *adev;
66
uint64_t va;
67
uint64_t pte_flags;
68
};
69
70
struct kgd_mem {
71
struct mutex lock;
72
struct amdgpu_bo *bo;
73
struct dma_buf *dmabuf;
74
struct hmm_range *range;
75
struct list_head attachments;
76
/* protected by amdkfd_process_info.lock */
77
struct list_head validate_list;
78
uint32_t domain;
79
unsigned int mapped_to_gpu_memory;
80
uint64_t va;
81
82
uint32_t alloc_flags;
83
84
uint32_t invalid;
85
struct amdkfd_process_info *process_info;
86
87
struct amdgpu_sync sync;
88
89
uint32_t gem_handle;
90
bool aql_queue;
91
bool is_imported;
92
};
93
94
/* KFD Memory Eviction */
95
struct amdgpu_amdkfd_fence {
96
struct dma_fence base;
97
struct mm_struct *mm;
98
spinlock_t lock;
99
char timeline_name[TASK_COMM_LEN];
100
struct svm_range_bo *svm_bo;
101
};
102
103
struct amdgpu_kfd_dev {
104
struct kfd_dev *dev;
105
int64_t vram_used[MAX_XCP];
106
uint64_t vram_used_aligned[MAX_XCP];
107
bool init_complete;
108
struct work_struct reset_work;
109
110
/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
111
struct dev_pagemap pgmap;
112
113
/* Client for KFD BO GEM handle allocations */
114
struct drm_client_dev client;
115
};
116
117
enum kgd_engine_type {
118
KGD_ENGINE_PFP = 1,
119
KGD_ENGINE_ME,
120
KGD_ENGINE_CE,
121
KGD_ENGINE_MEC1,
122
KGD_ENGINE_MEC2,
123
KGD_ENGINE_RLC,
124
KGD_ENGINE_SDMA1,
125
KGD_ENGINE_SDMA2,
126
KGD_ENGINE_MAX
127
};
128
129
130
struct amdkfd_process_info {
131
/* List head of all VMs that belong to a KFD process */
132
struct list_head vm_list_head;
133
/* List head for all KFD BOs that belong to a KFD process. */
134
struct list_head kfd_bo_list;
135
/* List of userptr BOs that are valid or invalid */
136
struct list_head userptr_valid_list;
137
struct list_head userptr_inval_list;
138
/* Lock to protect kfd_bo_list */
139
struct mutex lock;
140
141
/* Number of VMs */
142
unsigned int n_vms;
143
/* Eviction Fence */
144
struct amdgpu_amdkfd_fence *eviction_fence;
145
146
/* MMU-notifier related fields */
147
struct mutex notifier_lock;
148
uint32_t evicted_bos;
149
struct delayed_work restore_userptr_work;
150
struct pid *pid;
151
bool block_mmu_notifications;
152
};
153
154
int amdgpu_amdkfd_init(void);
155
void amdgpu_amdkfd_fini(void);
156
157
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
158
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
159
void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
160
int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
161
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
162
const void *ih_ring_entry);
163
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
164
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
165
void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
166
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev);
167
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev);
168
int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
169
enum kgd_engine_type engine,
170
uint32_t vmid, uint64_t gpu_addr,
171
uint32_t *ib_cmd, uint32_t ib_len);
172
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
173
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
174
175
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
176
177
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev,
178
struct amdgpu_reset_context *reset_context);
179
180
int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
181
182
void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
183
184
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
185
int queue_bit);
186
187
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
188
struct mm_struct *mm,
189
struct svm_range_bo *svm_bo);
190
191
int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
192
#if defined(CONFIG_DEBUG_FS)
193
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
194
#endif
195
#if IS_ENABLED(CONFIG_HSA_AMD)
196
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
197
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
198
void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
199
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
200
unsigned long cur_seq, struct kgd_mem *mem);
201
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
202
uint32_t domain,
203
struct dma_fence *fence);
204
#else
205
static inline
206
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
207
{
208
return false;
209
}
210
211
static inline
212
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
213
{
214
return NULL;
215
}
216
217
static inline
218
void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
219
{
220
}
221
222
static inline
223
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
224
unsigned long cur_seq, struct kgd_mem *mem)
225
{
226
return 0;
227
}
228
static inline
229
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
230
uint32_t domain,
231
struct dma_fence *fence)
232
{
233
return 0;
234
}
235
#endif
236
/* Shared API */
237
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
238
void **mem_obj, uint64_t *gpu_addr,
239
void **cpu_ptr, bool mqd_gfx9);
240
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
241
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
242
void **mem_obj);
243
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
244
int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
245
int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
246
uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
247
enum kgd_engine_type type);
248
void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
249
struct kfd_local_mem_info *mem_info,
250
struct amdgpu_xcp *xcp);
251
uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
252
253
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
254
int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
255
struct amdgpu_device **dmabuf_adev,
256
uint64_t *bo_size, void *metadata_buffer,
257
size_t buffer_size, uint32_t *metadata_size,
258
uint32_t *flags, int8_t *xcp_id);
259
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
260
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
261
uint32_t *payload);
262
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
263
u32 inst);
264
int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
265
int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
266
int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
267
bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
268
bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
269
270
271
/* Read user wptr from a specified user address space with page fault
272
* disabled. The memory must be pinned and mapped to the hardware when
273
* this is called in hqd_load functions, so it should never fault in
274
* the first place. This resolves a circular lock dependency involving
275
* four locks, including the DQM lock and mmap_lock.
276
*/
277
#define read_user_wptr(mmptr, wptr, dst) \
278
({ \
279
bool valid = false; \
280
if ((mmptr) && (wptr)) { \
281
pagefault_disable(); \
282
if ((mmptr) == current->mm) { \
283
valid = !get_user((dst), (wptr)); \
284
} else if (current->flags & PF_KTHREAD) { \
285
kthread_use_mm(mmptr); \
286
valid = !get_user((dst), (wptr)); \
287
kthread_unuse_mm(mmptr); \
288
} \
289
pagefault_enable(); \
290
} \
291
valid; \
292
})
293
294
/* GPUVM API */
295
#define drm_priv_to_vm(drm_priv) \
296
(&((struct amdgpu_fpriv *) \
297
((struct drm_file *)(drm_priv))->driver_priv)->vm)
298
299
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
300
struct amdgpu_vm *avm,
301
void **process_info,
302
struct dma_fence **ef);
303
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
304
size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
305
uint8_t xcp_id);
306
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
307
struct amdgpu_device *adev, uint64_t va, uint64_t size,
308
void *drm_priv, struct kgd_mem **mem,
309
uint64_t *offset, uint32_t flags, bool criu_resume);
310
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
311
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
312
uint64_t *size);
313
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
314
struct kgd_mem *mem, void *drm_priv);
315
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
316
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
317
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
318
int amdgpu_amdkfd_gpuvm_sync_memory(
319
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
320
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
321
void **kptr, uint64_t *size);
322
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
323
324
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
325
326
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
327
struct dma_fence __rcu **ef);
328
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
329
struct kfd_vm_fault_info *info);
330
int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
331
uint64_t va, void *drm_priv,
332
struct kgd_mem **mem, uint64_t *size,
333
uint64_t *mmap_offset);
334
int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
335
struct dma_buf **dmabuf);
336
void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
337
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
338
struct tile_config *config);
339
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
340
enum amdgpu_ras_block block, uint32_t reset);
341
342
void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
343
enum amdgpu_ras_block block, uint16_t pasid,
344
pasid_notify pasid_fn, void *data, uint32_t reset);
345
346
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
347
bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
348
void amdgpu_amdkfd_block_mmu_notifications(void *p);
349
int amdgpu_amdkfd_criu_resume(void *p);
350
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
351
uint64_t size, u32 alloc_flag, int8_t xcp_id);
352
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
353
uint64_t size, u32 alloc_flag, int8_t xcp_id);
354
355
u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id);
356
357
#define KFD_XCP_MEM_ID(adev, xcp_id) \
358
((adev)->xcp_mgr && (xcp_id) >= 0 ?\
359
(adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
360
361
#define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id))
362
363
364
#if IS_ENABLED(CONFIG_HSA_AMD)
365
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
366
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
367
struct amdgpu_vm *vm);
368
369
/**
370
* @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
371
*
372
* Allows KFD to release its resources associated with the GEM object.
373
*/
374
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
375
void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
376
#else
377
static inline
378
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
379
{
380
}
381
382
static inline
383
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
384
struct amdgpu_vm *vm)
385
{
386
}
387
388
static inline
389
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
390
{
391
}
392
#endif
393
394
#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
395
int kgd2kfd_init_zone_device(struct amdgpu_device *adev);
396
#else
397
static inline
398
int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
399
{
400
return 0;
401
}
402
#endif
403
404
/* KGD2KFD callbacks */
405
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
406
int kgd2kfd_resume_mm(struct mm_struct *mm);
407
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
408
struct dma_fence *fence);
409
#if IS_ENABLED(CONFIG_HSA_AMD)
410
int kgd2kfd_init(void);
411
void kgd2kfd_exit(void);
412
struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
413
bool kgd2kfd_device_init(struct kfd_dev *kfd,
414
const struct kgd2kfd_shared_resources *gpu_resources);
415
void kgd2kfd_device_exit(struct kfd_dev *kfd);
416
void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
417
int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
418
void kgd2kfd_suspend_process(struct kfd_dev *kfd);
419
int kgd2kfd_resume_process(struct kfd_dev *kfd);
420
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
421
struct amdgpu_reset_context *reset_context);
422
int kgd2kfd_post_reset(struct kfd_dev *kfd);
423
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
424
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
425
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
426
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
427
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
428
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
429
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
430
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
431
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
432
bool retry_fault);
433
434
#else
435
static inline int kgd2kfd_init(void)
436
{
437
return -ENOENT;
438
}
439
440
static inline void kgd2kfd_exit(void)
441
{
442
}
443
444
static inline
445
struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
446
{
447
return NULL;
448
}
449
450
static inline
451
bool kgd2kfd_device_init(struct kfd_dev *kfd,
452
const struct kgd2kfd_shared_resources *gpu_resources)
453
{
454
return false;
455
}
456
457
static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
458
{
459
}
460
461
static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
462
{
463
}
464
465
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
466
{
467
return 0;
468
}
469
470
static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
471
{
472
}
473
474
static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
475
{
476
return 0;
477
}
478
479
static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd,
480
struct amdgpu_reset_context *reset_context)
481
{
482
return 0;
483
}
484
485
static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
486
{
487
return 0;
488
}
489
490
static inline
491
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
492
{
493
}
494
495
static inline
496
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
497
{
498
}
499
500
static inline
501
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
502
{
503
}
504
505
static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
506
{
507
return 0;
508
}
509
510
static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
511
{
512
}
513
514
static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
515
{
516
return 0;
517
}
518
519
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
520
{
521
return 0;
522
}
523
524
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
525
{
526
return false;
527
}
528
529
static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
530
bool retry_fault)
531
{
532
return false;
533
}
534
535
#endif
536
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
537
538