Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/include/drm/ttm/ttm_bo_api.h
10818 views
1
/**************************************************************************
2
*
3
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
* USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
/*
28
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29
*/
30
31
#ifndef _TTM_BO_API_H_
32
#define _TTM_BO_API_H_
33
34
#include "drm_hashtab.h"
35
#include <linux/kref.h>
36
#include <linux/list.h>
37
#include <linux/wait.h>
38
#include <linux/mutex.h>
39
#include <linux/mm.h>
40
#include <linux/rbtree.h>
41
#include <linux/bitmap.h>
42
43
struct ttm_bo_device;
44
45
struct drm_mm_node;
46
47
48
/**
49
* struct ttm_placement
50
*
51
* @fpfn: first valid page frame number to put the object
52
* @lpfn: last valid page frame number to put the object
53
* @num_placement: number of preferred placements
54
* @placement: preferred placements
55
* @num_busy_placement: number of preferred placements when need to evict buffer
56
* @busy_placement: preferred placements when need to evict buffer
57
*
58
* Structure indicating the placement you request for an object.
59
*/
60
struct ttm_placement {
61
unsigned fpfn;
62
unsigned lpfn;
63
unsigned num_placement;
64
const uint32_t *placement;
65
unsigned num_busy_placement;
66
const uint32_t *busy_placement;
67
};
68
69
/**
70
* struct ttm_bus_placement
71
*
72
* @addr: mapped virtual address
73
* @base: bus base address
74
* @is_iomem: is this io memory ?
75
* @size: size in byte
76
* @offset: offset from the base address
77
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
78
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
79
*
80
* Structure indicating the bus placement of an object.
81
*/
82
struct ttm_bus_placement {
83
void *addr;
84
unsigned long base;
85
unsigned long size;
86
unsigned long offset;
87
bool is_iomem;
88
bool io_reserved_vm;
89
uint64_t io_reserved_count;
90
};
91
92
93
/**
94
* struct ttm_mem_reg
95
*
96
* @mm_node: Memory manager node.
97
* @size: Requested size of memory region.
98
* @num_pages: Actual size of memory region in pages.
99
* @page_alignment: Page alignment.
100
* @placement: Placement flags.
101
* @bus: Placement on io bus accessible to the CPU
102
*
103
* Structure indicating the placement and space resources used by a
104
* buffer object.
105
*/
106
107
struct ttm_mem_reg {
108
void *mm_node;
109
unsigned long start;
110
unsigned long size;
111
unsigned long num_pages;
112
uint32_t page_alignment;
113
uint32_t mem_type;
114
uint32_t placement;
115
struct ttm_bus_placement bus;
116
};
117
118
/**
119
* enum ttm_bo_type
120
*
121
* @ttm_bo_type_device: These are 'normal' buffers that can
122
* be mmapped by user space. Each of these bos occupy a slot in the
123
* device address space, that can be used for normal vm operations.
124
*
125
* @ttm_bo_type_user: These are user-space memory areas that are made
126
* available to the GPU by mapping the buffer pages into the GPU aperture
127
* space. These buffers cannot be mmaped from the device address space.
128
*
129
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
130
* but they cannot be accessed from user-space. For kernel-only use.
131
*/
132
133
enum ttm_bo_type {
134
ttm_bo_type_device,
135
ttm_bo_type_user,
136
ttm_bo_type_kernel
137
};
138
139
struct ttm_tt;
140
141
/**
142
* struct ttm_buffer_object
143
*
144
* @bdev: Pointer to the buffer object device structure.
145
* @buffer_start: The virtual user-space start address of ttm_bo_type_user
146
* buffers.
147
* @type: The bo type.
148
* @destroy: Destruction function. If NULL, kfree is used.
149
* @num_pages: Actual number of pages.
150
* @addr_space_offset: Address space offset.
151
* @acc_size: Accounted size for this object.
152
* @kref: Reference count of this buffer object. When this refcount reaches
153
* zero, the object is put on the delayed delete list.
154
* @list_kref: List reference count of this buffer object. This member is
155
* used to avoid destruction while the buffer object is still on a list.
156
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
157
* keeps one refcount. When this refcount reaches zero,
158
* the object is destroyed.
159
* @event_queue: Queue for processes waiting on buffer object status change.
160
* @mem: structure describing current placement.
161
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
162
* pinned in physical memory. If this behaviour is not desired, this member
163
* holds a pointer to a persistent shmem object.
164
* @ttm: TTM structure holding system pages.
165
* @evicted: Whether the object was evicted without user-space knowing.
166
* @cpu_writes: For synchronization. Number of cpu writers.
167
* @lru: List head for the lru list.
168
* @ddestroy: List head for the delayed destroy list.
169
* @swap: List head for swap LRU list.
170
* @val_seq: Sequence of the validation holding the @reserved lock.
171
* Used to avoid starvation when many processes compete to validate the
172
* buffer. This member is protected by the bo_device::lru_lock.
173
* @seq_valid: The value of @val_seq is valid. This value is protected by
174
* the bo_device::lru_lock.
175
* @reserved: Deadlock-free lock used for synchronization state transitions.
176
* @sync_obj_arg: Opaque argument to synchronization object function.
177
* @sync_obj: Pointer to a synchronization object.
178
* @priv_flags: Flags describing buffer object internal state.
179
* @vm_rb: Rb node for the vm rb tree.
180
* @vm_node: Address space manager node.
181
* @offset: The current GPU offset, which can have different meanings
182
* depending on the memory type. For SYSTEM type memory, it should be 0.
183
* @cur_placement: Hint of current placement.
184
*
185
* Base class for TTM buffer object, that deals with data placement and CPU
186
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
187
* the driver can usually use the placement offset @offset directly as the
188
* GPU virtual address. For drivers implementing multiple
189
* GPU memory manager contexts, the driver should manage the address space
190
* in these contexts separately and use these objects to get the correct
191
* placement and caching for these GPU maps. This makes it possible to use
192
* these objects for even quite elaborate memory management schemes.
193
* The destroy member, the API visibility of this object makes it possible
194
* to derive driver specific types.
195
*/
196
197
struct ttm_buffer_object {
198
/**
199
* Members constant at init.
200
*/
201
202
struct ttm_bo_global *glob;
203
struct ttm_bo_device *bdev;
204
unsigned long buffer_start;
205
enum ttm_bo_type type;
206
void (*destroy) (struct ttm_buffer_object *);
207
unsigned long num_pages;
208
uint64_t addr_space_offset;
209
size_t acc_size;
210
211
/**
212
* Members not needing protection.
213
*/
214
215
struct kref kref;
216
struct kref list_kref;
217
wait_queue_head_t event_queue;
218
219
/**
220
* Members protected by the bo::reserved lock.
221
*/
222
223
struct ttm_mem_reg mem;
224
struct file *persistent_swap_storage;
225
struct ttm_tt *ttm;
226
bool evicted;
227
228
/**
229
* Members protected by the bo::reserved lock only when written to.
230
*/
231
232
atomic_t cpu_writers;
233
234
/**
235
* Members protected by the bdev::lru_lock.
236
*/
237
238
struct list_head lru;
239
struct list_head ddestroy;
240
struct list_head swap;
241
struct list_head io_reserve_lru;
242
uint32_t val_seq;
243
bool seq_valid;
244
245
/**
246
* Members protected by the bdev::lru_lock
247
* only when written to.
248
*/
249
250
atomic_t reserved;
251
252
/**
253
* Members protected by struct buffer_object_device::fence_lock
254
* In addition, setting sync_obj to anything else
255
* than NULL requires bo::reserved to be held. This allows for
256
* checking NULL while reserved but not holding the mentioned lock.
257
*/
258
259
void *sync_obj_arg;
260
void *sync_obj;
261
unsigned long priv_flags;
262
263
/**
264
* Members protected by the bdev::vm_lock
265
*/
266
267
struct rb_node vm_rb;
268
struct drm_mm_node *vm_node;
269
270
271
/**
272
* Special members that are protected by the reserve lock
273
* and the bo::lock when written to. Can be read with
274
* either of these locks held.
275
*/
276
277
unsigned long offset;
278
uint32_t cur_placement;
279
};
280
281
/**
282
* struct ttm_bo_kmap_obj
283
*
284
* @virtual: The current kernel virtual address.
285
* @page: The page when kmap'ing a single page.
286
* @bo_kmap_type: Type of bo_kmap.
287
*
288
* Object describing a kernel mapping. Since a TTM bo may be located
289
* in various memory types with various caching policies, the
290
* mapping can either be an ioremap, a vmap, a kmap or part of a
291
* premapped region.
292
*/
293
294
#define TTM_BO_MAP_IOMEM_MASK 0x80
295
struct ttm_bo_kmap_obj {
296
void *virtual;
297
struct page *page;
298
enum {
299
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
300
ttm_bo_map_vmap = 2,
301
ttm_bo_map_kmap = 3,
302
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
303
} bo_kmap_type;
304
struct ttm_buffer_object *bo;
305
};
306
307
/**
308
* ttm_bo_reference - reference a struct ttm_buffer_object
309
*
310
* @bo: The buffer object.
311
*
312
* Returns a refcounted pointer to a buffer object.
313
*/
314
315
static inline struct ttm_buffer_object *
316
ttm_bo_reference(struct ttm_buffer_object *bo)
317
{
318
kref_get(&bo->kref);
319
return bo;
320
}
321
322
/**
323
* ttm_bo_wait - wait for buffer idle.
324
*
325
* @bo: The buffer object.
326
* @interruptible: Use interruptible wait.
327
* @no_wait: Return immediately if buffer is busy.
328
*
329
* This function must be called with the bo::mutex held, and makes
330
* sure any previous rendering to the buffer is completed.
331
* Note: It might be necessary to block validations before the
332
* wait by reserving the buffer.
333
* Returns -EBUSY if no_wait is true and the buffer is busy.
334
* Returns -ERESTARTSYS if interrupted by a signal.
335
*/
336
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
337
bool interruptible, bool no_wait);
338
/**
339
* ttm_bo_validate
340
*
341
* @bo: The buffer object.
342
* @placement: Proposed placement for the buffer object.
343
* @interruptible: Sleep interruptible if sleeping.
344
* @no_wait_reserve: Return immediately if other buffers are busy.
345
* @no_wait_gpu: Return immediately if the GPU is busy.
346
*
347
* Changes placement and caching policy of the buffer object
348
* according proposed placement.
349
* Returns
350
* -EINVAL on invalid proposed placement.
351
* -ENOMEM on out-of-memory condition.
352
* -EBUSY if no_wait is true and buffer busy.
353
* -ERESTARTSYS if interrupted by a signal.
354
*/
355
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
356
struct ttm_placement *placement,
357
bool interruptible, bool no_wait_reserve,
358
bool no_wait_gpu);
359
360
/**
361
* ttm_bo_unref
362
*
363
* @bo: The buffer object.
364
*
365
* Unreference and clear a pointer to a buffer object.
366
*/
367
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
368
369
370
/**
371
* ttm_bo_list_ref_sub
372
*
373
* @bo: The buffer object.
374
* @count: The number of references with which to decrease @bo::list_kref;
375
* @never_free: The refcount should not reach zero with this operation.
376
*
377
* Release @count lru list references to this buffer object.
378
*/
379
extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
380
bool never_free);
381
382
/**
383
* ttm_bo_add_to_lru
384
*
385
* @bo: The buffer object.
386
*
387
* Add this bo to the relevant mem type lru and, if it's backed by
388
* system pages (ttms) to the swap list.
389
* This function must be called with struct ttm_bo_global::lru_lock held, and
390
* is typically called immediately prior to unreserving a bo.
391
*/
392
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
393
394
/**
395
* ttm_bo_del_from_lru
396
*
397
* @bo: The buffer object.
398
*
399
* Remove this bo from all lru lists used to lookup and reserve an object.
400
* This function must be called with struct ttm_bo_global::lru_lock held,
401
* and is usually called just immediately after the bo has been reserved to
402
* avoid recursive reservation from lru lists.
403
*/
404
extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
405
406
407
/**
408
* ttm_bo_lock_delayed_workqueue
409
*
410
* Prevent the delayed workqueue from running.
411
* Returns
412
* True if the workqueue was queued at the time
413
*/
414
extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
415
416
/**
417
* ttm_bo_unlock_delayed_workqueue
418
*
419
* Allows the delayed workqueue to run.
420
*/
421
extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
422
int resched);
423
424
/**
425
* ttm_bo_synccpu_write_grab
426
*
427
* @bo: The buffer object:
428
* @no_wait: Return immediately if buffer is busy.
429
*
430
* Synchronizes a buffer object for CPU RW access. This means
431
* blocking command submission that affects the buffer and
432
* waiting for buffer idle. This lock is recursive.
433
* Returns
434
* -EBUSY if the buffer is busy and no_wait is true.
435
* -ERESTARTSYS if interrupted by a signal.
436
*/
437
438
extern int
439
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
440
/**
441
* ttm_bo_synccpu_write_release:
442
*
443
* @bo : The buffer object.
444
*
445
* Releases a synccpu lock.
446
*/
447
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
448
449
/**
450
* ttm_bo_init
451
*
452
* @bdev: Pointer to a ttm_bo_device struct.
453
* @bo: Pointer to a ttm_buffer_object to be initialized.
454
* @size: Requested size of buffer object.
455
* @type: Requested type of buffer object.
456
* @flags: Initial placement flags.
457
* @page_alignment: Data alignment in pages.
458
* @buffer_start: Virtual address of user space data backing a
459
* user buffer object.
460
* @interruptible: If needing to sleep to wait for GPU resources,
461
* sleep interruptible.
462
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
463
* pinned in physical memory. If this behaviour is not desired, this member
464
* holds a pointer to a persistent shmem object. Typically, this would
465
* point to the shmem object backing a GEM object if TTM is used to back a
466
* GEM user interface.
467
* @acc_size: Accounted size for this object.
468
* @destroy: Destroy function. Use NULL for kfree().
469
*
470
* This function initializes a pre-allocated struct ttm_buffer_object.
471
* As this object may be part of a larger structure, this function,
472
* together with the @destroy function,
473
* enables driver-specific objects derived from a ttm_buffer_object.
474
* On successful return, the object kref and list_kref are set to 1.
475
* If a failure occurs, the function will call the @destroy function, or
476
* kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
477
* illegal and will likely cause memory corruption.
478
*
479
* Returns
480
* -ENOMEM: Out of memory.
481
* -EINVAL: Invalid placement flags.
482
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
483
*/
484
485
extern int ttm_bo_init(struct ttm_bo_device *bdev,
486
struct ttm_buffer_object *bo,
487
unsigned long size,
488
enum ttm_bo_type type,
489
struct ttm_placement *placement,
490
uint32_t page_alignment,
491
unsigned long buffer_start,
492
bool interrubtible,
493
struct file *persistent_swap_storage,
494
size_t acc_size,
495
void (*destroy) (struct ttm_buffer_object *));
496
/**
497
* ttm_bo_synccpu_object_init
498
*
499
* @bdev: Pointer to a ttm_bo_device struct.
500
* @bo: Pointer to a ttm_buffer_object to be initialized.
501
* @size: Requested size of buffer object.
502
* @type: Requested type of buffer object.
503
* @flags: Initial placement flags.
504
* @page_alignment: Data alignment in pages.
505
* @buffer_start: Virtual address of user space data backing a
506
* user buffer object.
507
* @interruptible: If needing to sleep while waiting for GPU resources,
508
* sleep interruptible.
509
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
510
* pinned in physical memory. If this behaviour is not desired, this member
511
* holds a pointer to a persistent shmem object. Typically, this would
512
* point to the shmem object backing a GEM object if TTM is used to back a
513
* GEM user interface.
514
* @p_bo: On successful completion *p_bo points to the created object.
515
*
516
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
517
* on that object. The destroy function is set to kfree().
518
* Returns
519
* -ENOMEM: Out of memory.
520
* -EINVAL: Invalid placement flags.
521
* -ERESTARTSYS: Interrupted by signal while waiting for resources.
522
*/
523
524
extern int ttm_bo_create(struct ttm_bo_device *bdev,
525
unsigned long size,
526
enum ttm_bo_type type,
527
struct ttm_placement *placement,
528
uint32_t page_alignment,
529
unsigned long buffer_start,
530
bool interruptible,
531
struct file *persistent_swap_storage,
532
struct ttm_buffer_object **p_bo);
533
534
/**
535
* ttm_bo_check_placement
536
*
537
* @bo: the buffer object.
538
* @placement: placements
539
*
540
* Performs minimal validity checking on an intended change of
541
* placement flags.
542
* Returns
543
* -EINVAL: Intended change is invalid or not allowed.
544
*/
545
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
546
struct ttm_placement *placement);
547
548
/**
549
* ttm_bo_init_mm
550
*
551
* @bdev: Pointer to a ttm_bo_device struct.
552
* @mem_type: The memory type.
553
* @p_size: size managed area in pages.
554
*
555
* Initialize a manager for a given memory type.
556
* Note: if part of driver firstopen, it must be protected from a
557
* potentially racing lastclose.
558
* Returns:
559
* -EINVAL: invalid size or memory type.
560
* -ENOMEM: Not enough memory.
561
* May also return driver-specified errors.
562
*/
563
564
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
565
unsigned long p_size);
566
/**
567
* ttm_bo_clean_mm
568
*
569
* @bdev: Pointer to a ttm_bo_device struct.
570
* @mem_type: The memory type.
571
*
572
* Take down a manager for a given memory type after first walking
573
* the LRU list to evict any buffers left alive.
574
*
575
* Normally, this function is part of lastclose() or unload(), and at that
576
* point there shouldn't be any buffers left created by user-space, since
577
* there should've been removed by the file descriptor release() method.
578
* However, before this function is run, make sure to signal all sync objects,
579
* and verify that the delayed delete queue is empty. The driver must also
580
* make sure that there are no NO_EVICT buffers present in this memory type
581
* when the call is made.
582
*
583
* If this function is part of a VT switch, the caller must make sure that
584
* there are no appications currently validating buffers before this
585
* function is called. The caller can do that by first taking the
586
* struct ttm_bo_device::ttm_lock in write mode.
587
*
588
* Returns:
589
* -EINVAL: invalid or uninitialized memory type.
590
* -EBUSY: There are still buffers left in this memory type.
591
*/
592
593
extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
594
595
/**
596
* ttm_bo_evict_mm
597
*
598
* @bdev: Pointer to a ttm_bo_device struct.
599
* @mem_type: The memory type.
600
*
601
* Evicts all buffers on the lru list of the memory type.
602
* This is normally part of a VT switch or an
603
* out-of-memory-space-due-to-fragmentation handler.
604
* The caller must make sure that there are no other processes
605
* currently validating buffers, and can do that by taking the
606
* struct ttm_bo_device::ttm_lock in write mode.
607
*
608
* Returns:
609
* -EINVAL: Invalid or uninitialized memory type.
610
* -ERESTARTSYS: The call was interrupted by a signal while waiting to
611
* evict a buffer.
612
*/
613
614
extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
615
616
/**
617
* ttm_kmap_obj_virtual
618
*
619
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
620
* @is_iomem: Pointer to an integer that on return indicates 1 if the
621
* virtual map is io memory, 0 if normal memory.
622
*
623
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
624
* If *is_iomem is 1 on return, the virtual address points to an io memory area,
625
* that should strictly be accessed by the iowriteXX() and similar functions.
626
*/
627
628
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
629
bool *is_iomem)
630
{
631
*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
632
return map->virtual;
633
}
634
635
/**
636
* ttm_bo_kmap
637
*
638
* @bo: The buffer object.
639
* @start_page: The first page to map.
640
* @num_pages: Number of pages to map.
641
* @map: pointer to a struct ttm_bo_kmap_obj representing the map.
642
*
643
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
644
* data in the buffer object. The ttm_kmap_obj_virtual function can then be
645
* used to obtain a virtual address to the data.
646
*
647
* Returns
648
* -ENOMEM: Out of memory.
649
* -EINVAL: Invalid range.
650
*/
651
652
extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
653
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
654
655
/**
656
* ttm_bo_kunmap
657
*
658
* @map: Object describing the map to unmap.
659
*
660
* Unmaps a kernel map set up by ttm_bo_kmap.
661
*/
662
663
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
664
665
#if 0
666
#endif
667
668
/**
669
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
670
*
671
* @vma: vma as input from the fbdev mmap method.
672
* @bo: The bo backing the address space. The address space will
673
* have the same size as the bo, and start at offset 0.
674
*
675
* This function is intended to be called by the fbdev mmap method
676
* if the fbdev address space is to be backed by a bo.
677
*/
678
679
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
680
struct ttm_buffer_object *bo);
681
682
/**
683
* ttm_bo_mmap - mmap out of the ttm device address space.
684
*
685
* @filp: filp as input from the mmap method.
686
* @vma: vma as input from the mmap method.
687
* @bdev: Pointer to the ttm_bo_device with the address space manager.
688
*
689
* This function is intended to be called by the device mmap method.
690
* if the device address space is to be backed by the bo manager.
691
*/
692
693
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
694
struct ttm_bo_device *bdev);
695
696
/**
697
* ttm_bo_io
698
*
699
* @bdev: Pointer to the struct ttm_bo_device.
700
* @filp: Pointer to the struct file attempting to read / write.
701
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
702
* @rbuf: User-space pointer to address of buffer to read into.
703
* Null on write.
704
* @count: Number of bytes to read / write.
705
* @f_pos: Pointer to current file position.
706
* @write: 1 for read, 0 for write.
707
*
708
* This function implements read / write into ttm buffer objects, and is
709
* intended to
710
* be called from the fops::read and fops::write method.
711
* Returns:
712
* See man (2) write, man(2) read. In particular,
713
* the function may return -ERESTARTSYS if
714
* interrupted by a signal.
715
*/
716
717
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
718
const char __user *wbuf, char __user *rbuf,
719
size_t count, loff_t *f_pos, bool write);
720
721
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
722
723
#endif
724
725