Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/fs/ceph/mds_client.h
15111 views
1
#ifndef _FS_CEPH_MDS_CLIENT_H
2
#define _FS_CEPH_MDS_CLIENT_H
3
4
#include <linux/completion.h>
5
#include <linux/kref.h>
6
#include <linux/list.h>
7
#include <linux/mutex.h>
8
#include <linux/rbtree.h>
9
#include <linux/spinlock.h>
10
11
#include <linux/ceph/types.h>
12
#include <linux/ceph/messenger.h>
13
#include <linux/ceph/mdsmap.h>
14
15
/*
16
* Some lock dependencies:
17
*
18
* session->s_mutex
19
* mdsc->mutex
20
*
21
* mdsc->snap_rwsem
22
*
23
* inode->i_lock
24
* mdsc->snap_flush_lock
25
* mdsc->cap_delay_lock
26
*
27
*/
28
29
struct ceph_fs_client;
30
struct ceph_cap;
31
32
/*
33
* parsed info about a single inode. pointers are into the encoded
34
* on-wire structures within the mds reply message payload.
35
*/
36
struct ceph_mds_reply_info_in {
37
struct ceph_mds_reply_inode *in;
38
struct ceph_dir_layout dir_layout;
39
u32 symlink_len;
40
char *symlink;
41
u32 xattr_len;
42
char *xattr_data;
43
};
44
45
/*
46
* parsed info about an mds reply, including information about
47
* either: 1) the target inode and/or its parent directory and dentry,
48
* and directory contents (for readdir results), or
49
* 2) the file range lock info (for fcntl F_GETLK results).
50
*/
51
struct ceph_mds_reply_info_parsed {
52
struct ceph_mds_reply_head *head;
53
54
/* trace */
55
struct ceph_mds_reply_info_in diri, targeti;
56
struct ceph_mds_reply_dirfrag *dirfrag;
57
char *dname;
58
u32 dname_len;
59
struct ceph_mds_reply_lease *dlease;
60
61
/* extra */
62
union {
63
/* for fcntl F_GETLK results */
64
struct ceph_filelock *filelock_reply;
65
66
/* for readdir results */
67
struct {
68
struct ceph_mds_reply_dirfrag *dir_dir;
69
int dir_nr;
70
char **dir_dname;
71
u32 *dir_dname_len;
72
struct ceph_mds_reply_lease **dir_dlease;
73
struct ceph_mds_reply_info_in *dir_in;
74
u8 dir_complete, dir_end;
75
};
76
};
77
78
/* encoded blob describing snapshot contexts for certain
79
operations (e.g., open) */
80
void *snapblob;
81
int snapblob_len;
82
};
83
84
85
/*
86
* cap releases are batched and sent to the MDS en masse.
87
*/
88
#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \
89
sizeof(struct ceph_mds_cap_release)) / \
90
sizeof(struct ceph_mds_cap_item))
91
92
93
/*
94
* state associated with each MDS<->client session
95
*/
96
enum {
97
CEPH_MDS_SESSION_NEW = 1,
98
CEPH_MDS_SESSION_OPENING = 2,
99
CEPH_MDS_SESSION_OPEN = 3,
100
CEPH_MDS_SESSION_HUNG = 4,
101
CEPH_MDS_SESSION_CLOSING = 5,
102
CEPH_MDS_SESSION_RESTARTING = 6,
103
CEPH_MDS_SESSION_RECONNECTING = 7,
104
};
105
106
struct ceph_mds_session {
107
struct ceph_mds_client *s_mdsc;
108
int s_mds;
109
int s_state;
110
unsigned long s_ttl; /* time until mds kills us */
111
u64 s_seq; /* incoming msg seq # */
112
struct mutex s_mutex; /* serialize session messages */
113
114
struct ceph_connection s_con;
115
116
struct ceph_authorizer *s_authorizer;
117
void *s_authorizer_buf, *s_authorizer_reply_buf;
118
size_t s_authorizer_buf_len, s_authorizer_reply_buf_len;
119
120
/* protected by s_cap_lock */
121
spinlock_t s_cap_lock;
122
u32 s_cap_gen; /* inc each time we get mds stale msg */
123
unsigned long s_cap_ttl; /* when session caps expire */
124
struct list_head s_caps; /* all caps issued by this session */
125
int s_nr_caps, s_trim_caps;
126
int s_num_cap_releases;
127
struct list_head s_cap_releases; /* waiting cap_release messages */
128
struct list_head s_cap_releases_done; /* ready to send */
129
struct ceph_cap *s_cap_iterator;
130
131
/* protected by mutex */
132
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
133
struct list_head s_cap_snaps_flushing;
134
unsigned long s_renew_requested; /* last time we sent a renew req */
135
u64 s_renew_seq;
136
137
atomic_t s_ref;
138
struct list_head s_waiting; /* waiting requests */
139
struct list_head s_unsafe; /* unsafe requests */
140
};
141
142
/*
143
* modes of choosing which MDS to send a request to
144
*/
145
enum {
146
USE_ANY_MDS,
147
USE_RANDOM_MDS,
148
USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */
149
};
150
151
struct ceph_mds_request;
152
struct ceph_mds_client;
153
154
/*
155
* request completion callback
156
*/
157
typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
158
struct ceph_mds_request *req);
159
160
/*
161
* an in-flight mds request
162
*/
163
struct ceph_mds_request {
164
u64 r_tid; /* transaction id */
165
struct rb_node r_node;
166
struct ceph_mds_client *r_mdsc;
167
168
int r_op; /* mds op code */
169
170
/* operation on what? */
171
struct inode *r_inode; /* arg1 */
172
struct dentry *r_dentry; /* arg1 */
173
struct dentry *r_old_dentry; /* arg2: rename from or link from */
174
char *r_path1, *r_path2;
175
struct ceph_vino r_ino1, r_ino2;
176
177
struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */
178
struct inode *r_target_inode; /* resulting inode */
179
180
struct mutex r_fill_mutex;
181
182
union ceph_mds_request_args r_args;
183
int r_fmode; /* file mode, if expecting cap */
184
uid_t r_uid;
185
gid_t r_gid;
186
187
/* for choosing which mds to send this request to */
188
int r_direct_mode;
189
u32 r_direct_hash; /* choose dir frag based on this dentry hash */
190
bool r_direct_is_hash; /* true if r_direct_hash is valid */
191
192
/* data payload is used for xattr ops */
193
struct page **r_pages;
194
int r_num_pages;
195
int r_data_len;
196
197
/* what caps shall we drop? */
198
int r_inode_drop, r_inode_unless;
199
int r_dentry_drop, r_dentry_unless;
200
int r_old_dentry_drop, r_old_dentry_unless;
201
struct inode *r_old_inode;
202
int r_old_inode_drop, r_old_inode_unless;
203
204
struct ceph_msg *r_request; /* original request */
205
int r_request_release_offset;
206
struct ceph_msg *r_reply;
207
struct ceph_mds_reply_info_parsed r_reply_info;
208
int r_err;
209
bool r_aborted;
210
211
unsigned long r_timeout; /* optional. jiffies */
212
unsigned long r_started; /* start time to measure timeout against */
213
unsigned long r_request_started; /* start time for mds request only,
214
used to measure lease durations */
215
216
/* link unsafe requests to parent directory, for fsync */
217
struct inode *r_unsafe_dir;
218
struct list_head r_unsafe_dir_item;
219
220
struct ceph_mds_session *r_session;
221
222
int r_attempts; /* resend attempts */
223
int r_num_fwd; /* number of forward attempts */
224
int r_resend_mds; /* mds to resend to next, if any*/
225
u32 r_sent_on_mseq; /* cap mseq request was sent at*/
226
227
struct kref r_kref;
228
struct list_head r_wait;
229
struct completion r_completion;
230
struct completion r_safe_completion;
231
ceph_mds_request_callback_t r_callback;
232
struct list_head r_unsafe_item; /* per-session unsafe list item */
233
bool r_got_unsafe, r_got_safe, r_got_result;
234
235
bool r_did_prepopulate;
236
u32 r_readdir_offset;
237
238
struct ceph_cap_reservation r_caps_reservation;
239
int r_num_caps;
240
};
241
242
/*
243
* mds client state
244
*/
245
struct ceph_mds_client {
246
struct ceph_fs_client *fsc;
247
struct mutex mutex; /* all nested structures */
248
249
struct ceph_mdsmap *mdsmap;
250
struct completion safe_umount_waiters;
251
wait_queue_head_t session_close_wq;
252
struct list_head waiting_for_map;
253
254
struct ceph_mds_session **sessions; /* NULL for mds if no session */
255
int max_sessions; /* len of s_mds_sessions */
256
int stopping; /* true if shutting down */
257
258
/*
259
* snap_rwsem will cover cap linkage into snaprealms, and
260
* realm snap contexts. (later, we can do per-realm snap
261
* contexts locks..) the empty list contains realms with no
262
* references (implying they contain no inodes with caps) that
263
* should be destroyed.
264
*/
265
struct rw_semaphore snap_rwsem;
266
struct rb_root snap_realms;
267
struct list_head snap_empty;
268
spinlock_t snap_empty_lock; /* protect snap_empty */
269
270
u64 last_tid; /* most recent mds request */
271
struct rb_root request_tree; /* pending mds requests */
272
struct delayed_work delayed_work; /* delayed work */
273
unsigned long last_renew_caps; /* last time we renewed our caps */
274
struct list_head cap_delay_list; /* caps with delayed release */
275
spinlock_t cap_delay_lock; /* protects cap_delay_list */
276
struct list_head snap_flush_list; /* cap_snaps ready to flush */
277
spinlock_t snap_flush_lock;
278
279
u64 cap_flush_seq;
280
struct list_head cap_dirty; /* inodes with dirty caps */
281
struct list_head cap_dirty_migrating; /* ...that are migration... */
282
int num_cap_flushing; /* # caps we are flushing */
283
spinlock_t cap_dirty_lock; /* protects above items */
284
wait_queue_head_t cap_flushing_wq;
285
286
/*
287
* Cap reservations
288
*
289
* Maintain a global pool of preallocated struct ceph_caps, referenced
290
* by struct ceph_caps_reservations. This ensures that we preallocate
291
* memory needed to successfully process an MDS response. (If an MDS
292
* sends us cap information and we fail to process it, we will have
293
* problems due to the client and MDS being out of sync.)
294
*
295
* Reservations are 'owned' by a ceph_cap_reservation context.
296
*/
297
spinlock_t caps_list_lock;
298
struct list_head caps_list; /* unused (reserved or
299
unreserved) */
300
int caps_total_count; /* total caps allocated */
301
int caps_use_count; /* in use */
302
int caps_reserve_count; /* unused, reserved */
303
int caps_avail_count; /* unused, unreserved */
304
int caps_min_count; /* keep at least this many
305
(unreserved) */
306
spinlock_t dentry_lru_lock;
307
struct list_head dentry_lru;
308
int num_dentry;
309
};
310
311
extern const char *ceph_mds_op_name(int op);
312
313
extern struct ceph_mds_session *
314
__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
315
316
static inline struct ceph_mds_session *
317
ceph_get_mds_session(struct ceph_mds_session *s)
318
{
319
atomic_inc(&s->s_ref);
320
return s;
321
}
322
323
extern void ceph_put_mds_session(struct ceph_mds_session *s);
324
325
extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
326
struct ceph_msg *msg, int mds);
327
328
extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
329
extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
330
extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
331
332
extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
333
334
extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc,
335
struct inode *inode,
336
struct dentry *dn, int mask);
337
338
extern void ceph_invalidate_dir_request(struct ceph_mds_request *req);
339
340
extern struct ceph_mds_request *
341
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
342
extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
343
struct ceph_mds_request *req);
344
extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
345
struct inode *dir,
346
struct ceph_mds_request *req);
347
static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
348
{
349
kref_get(&req->r_kref);
350
}
351
extern void ceph_mdsc_release_request(struct kref *kref);
352
static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
353
{
354
kref_put(&req->r_kref, ceph_mdsc_release_request);
355
}
356
357
extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
358
struct ceph_mds_session *session);
359
extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
360
struct ceph_mds_session *session);
361
362
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
363
364
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
365
int stop_on_nosnap);
366
367
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
368
extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
369
struct inode *inode,
370
struct dentry *dentry, char action,
371
u32 seq);
372
373
extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
374
struct ceph_msg *msg);
375
376
extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
377
struct ceph_mds_session *session);
378
379
#endif
380
381