Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/kbuf.h
26131 views
1
// SPDX-License-Identifier: GPL-2.0
2
#ifndef IOU_KBUF_H
3
#define IOU_KBUF_H
4
5
#include <uapi/linux/io_uring.h>
6
#include <linux/io_uring_types.h>
7
8
enum {
9
/* ring mapped provided buffers */
10
IOBL_BUF_RING = 1,
11
/* buffers are consumed incrementally rather than always fully */
12
IOBL_INC = 2,
13
};
14
15
struct io_buffer_list {
16
/*
17
* If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
18
* then these are classic provided buffers and ->buf_list is used.
19
*/
20
union {
21
struct list_head buf_list;
22
struct io_uring_buf_ring *buf_ring;
23
};
24
/* count of classic/legacy buffers in buffer list */
25
int nbufs;
26
27
__u16 bgid;
28
29
/* below is for ring provided buffers */
30
__u16 buf_nr_pages;
31
__u16 nr_entries;
32
__u16 head;
33
__u16 mask;
34
35
__u16 flags;
36
37
struct io_mapped_region region;
38
};
39
40
struct io_buffer {
41
struct list_head list;
42
__u64 addr;
43
__u32 len;
44
__u16 bid;
45
__u16 bgid;
46
};
47
48
enum {
49
/* can alloc a bigger vec */
50
KBUF_MODE_EXPAND = 1,
51
/* if bigger vec allocated, free old one */
52
KBUF_MODE_FREE = 2,
53
};
54
55
struct buf_sel_arg {
56
struct iovec *iovs;
57
size_t out_len;
58
size_t max_len;
59
unsigned short nr_iovs;
60
unsigned short mode;
61
unsigned short buf_group;
62
unsigned short partial_map;
63
};
64
65
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
66
unsigned buf_group, unsigned int issue_flags);
67
int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
68
unsigned int issue_flags);
69
int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
70
void io_destroy_buffers(struct io_ring_ctx *ctx);
71
72
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
73
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
74
int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags);
75
76
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
77
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
78
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
79
80
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
81
void io_kbuf_drop_legacy(struct io_kiocb *req);
82
83
unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs);
84
bool io_kbuf_commit(struct io_kiocb *req,
85
struct io_buffer_list *bl, int len, int nr);
86
87
struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
88
unsigned int bgid);
89
90
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
91
{
92
/*
93
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
94
* the flag and hence ensure that bl->head doesn't get incremented.
95
* If the tail has already been incremented, hang on to it.
96
* The exception is partial io, that case we should increment bl->head
97
* to monopolize the buffer.
98
*/
99
if (req->buf_list) {
100
req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
101
return true;
102
}
103
return false;
104
}
105
106
static inline bool io_do_buffer_select(struct io_kiocb *req)
107
{
108
if (!(req->flags & REQ_F_BUFFER_SELECT))
109
return false;
110
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
111
}
112
113
static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
114
{
115
if (req->flags & REQ_F_BL_NO_RECYCLE)
116
return false;
117
if (req->flags & REQ_F_BUFFER_SELECTED)
118
return io_kbuf_recycle_legacy(req, issue_flags);
119
if (req->flags & REQ_F_BUFFER_RING)
120
return io_kbuf_recycle_ring(req);
121
return false;
122
}
123
124
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
125
unsigned issue_flags)
126
{
127
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
128
return 0;
129
return __io_put_kbufs(req, len, 1);
130
}
131
132
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
133
int nbufs, unsigned issue_flags)
134
{
135
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
136
return 0;
137
return __io_put_kbufs(req, len, nbufs);
138
}
139
#endif
140
141