Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/kbuf.h
48958 views
1
// SPDX-License-Identifier: GPL-2.0
2
#ifndef IOU_KBUF_H
3
#define IOU_KBUF_H
4
5
#include <uapi/linux/io_uring.h>
6
#include <linux/io_uring_types.h>
7
8
enum {
9
/* ring mapped provided buffers */
10
IOBL_BUF_RING = 1,
11
/* buffers are consumed incrementally rather than always fully */
12
IOBL_INC = 2,
13
};
14
15
struct io_buffer_list {
16
/*
17
* If the IOBL_BUF_RING flag is set, then buf_ring is used. If not, then
18
* these are classic provided buffers and ->buf_list is used.
19
*/
20
union {
21
struct list_head buf_list;
22
struct io_uring_buf_ring *buf_ring;
23
};
24
/* count of classic/legacy buffers in buffer list */
25
int nbufs;
26
27
__u16 bgid;
28
29
/* below is for ring provided buffers */
30
__u16 nr_entries;
31
__u16 head;
32
__u16 mask;
33
34
__u16 flags;
35
36
struct io_mapped_region region;
37
};
38
39
struct io_buffer {
40
struct list_head list;
41
__u64 addr;
42
__u32 len;
43
__u16 bid;
44
__u16 bgid;
45
};
46
47
enum {
48
/* can alloc a bigger vec */
49
KBUF_MODE_EXPAND = 1,
50
/* if bigger vec allocated, free old one */
51
KBUF_MODE_FREE = 2,
52
};
53
54
struct buf_sel_arg {
55
struct iovec *iovs;
56
size_t out_len;
57
size_t max_len;
58
unsigned short nr_iovs;
59
unsigned short mode;
60
unsigned short buf_group;
61
unsigned short partial_map;
62
};
63
64
struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
65
unsigned buf_group, unsigned int issue_flags);
66
int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
67
struct io_br_sel *sel, unsigned int issue_flags);
68
int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
69
struct io_br_sel *sel);
70
void io_destroy_buffers(struct io_ring_ctx *ctx);
71
72
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
73
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
74
int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags);
75
76
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
77
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
78
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
79
80
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
81
void io_kbuf_drop_legacy(struct io_kiocb *req);
82
83
unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
84
int len, int nbufs);
85
bool io_kbuf_commit(struct io_kiocb *req,
86
struct io_buffer_list *bl, int len, int nr);
87
88
struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
89
unsigned int bgid);
90
91
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req,
92
struct io_buffer_list *bl)
93
{
94
if (bl) {
95
req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
96
return true;
97
}
98
return false;
99
}
100
101
static inline bool io_do_buffer_select(struct io_kiocb *req)
102
{
103
if (!(req->flags & REQ_F_BUFFER_SELECT))
104
return false;
105
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
106
}
107
108
static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl,
109
unsigned issue_flags)
110
{
111
if (req->flags & REQ_F_BL_NO_RECYCLE)
112
return false;
113
if (req->flags & REQ_F_BUFFER_RING)
114
return io_kbuf_recycle_ring(req, bl);
115
if (req->flags & REQ_F_BUFFER_SELECTED)
116
return io_kbuf_recycle_legacy(req, issue_flags);
117
return false;
118
}
119
120
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
121
struct io_buffer_list *bl)
122
{
123
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
124
return 0;
125
return __io_put_kbufs(req, bl, len, 1);
126
}
127
128
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
129
struct io_buffer_list *bl, int nbufs)
130
{
131
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
132
return 0;
133
return __io_put_kbufs(req, bl, len, nbufs);
134
}
135
#endif
136
137