Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/fs/nfsclient/nfs_clstate.c
105994 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2009 Rick Macklem, University of Guelph
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*
28
*/
29
30
#include <sys/cdefs.h>
31
/*
32
* These functions implement the client side state handling for NFSv4.
33
* NFSv4 state handling:
34
* - A lockowner is used to determine lock contention, so it
35
* corresponds directly to a Posix pid. (1 to 1 mapping)
36
* - The correct granularity of an OpenOwner is not nearly so
37
* obvious. An OpenOwner does the following:
38
* - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39
* - is used to check for Open/Share contention (not applicable to
40
* this client, since all Opens are Deny_None)
41
* As such, I considered both extreme.
42
* 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43
* all Open, Close and Lock (with a new lockowner) Ops.
44
* 1 OpenOwner for each Open - This one results in an OpenConfirm for
45
* every Open, for most servers.
46
* So, I chose to use the same mapping as I did for LockOwnwers.
47
* The main concern here is that you can end up with multiple Opens
48
* for the same File Handle, but on different OpenOwners (opens
49
* inherited from parents, grandparents...) and you do not know
50
* which of these the vnodeop close applies to. This is handled by
51
* delaying the Close Op(s) until all of the Opens have been closed.
52
* (It is not yet obvious if this is the correct granularity.)
53
* - How the code handles serialization:
54
* - For the ClientId, it uses an exclusive lock while getting its
55
* SetClientId and during recovery. Otherwise, it uses a shared
56
* lock via a reference count.
57
* - For the rest of the data structures, it uses an SMP mutex
58
* (once the nfs client is SMP safe) and doesn't sleep while
59
* manipulating the linked lists.
60
* - The serialization of Open/Close/Lock/LockU falls out in the
61
* "wash", since OpenOwners and LockOwners are both mapped from
62
* Posix pid. In other words, there is only one Posix pid using
63
* any given owner, so that owner is serialized. (If you change
64
* the granularity of the OpenOwner, then code must be added to
65
* serialize Ops on the OpenOwner.)
66
* - When to get rid of OpenOwners and LockOwners.
67
* - The function nfscl_cleanup_common() is executed after a process exits.
68
* It goes through the client list looking for all Open and Lock Owners.
69
* When one is found, it is marked "defunct" or in the case of
70
* an OpenOwner without any Opens, freed.
71
* The renew thread scans for defunct Owners and gets rid of them,
72
* if it can. The LockOwners will also be deleted when the
73
* associated Open is closed.
74
* - If the LockU or Close Op(s) fail during close in a way
75
* that could be recovered upon retry, they are relinked to the
76
* ClientId's defunct open list and retried by the renew thread
77
* until they succeed or an unmount/recovery occurs.
78
* (Since we are done with them, they do not need to be recovered.)
79
*/
80
81
#include <fs/nfs/nfsport.h>
82
83
/*
84
* Global variables
85
*/
86
extern struct nfsstatsv1 nfsstatsv1;
87
extern struct nfsreqhead nfsd_reqq;
88
extern u_int32_t newnfs_false, newnfs_true;
89
extern int nfscl_debuglevel;
90
extern int nfscl_enablecallb;
91
extern int nfs_numnfscbd;
92
NFSREQSPINLOCK;
93
NFSCLSTATEMUTEX;
94
int nfscl_inited = 0;
95
struct nfsclhead nfsclhead; /* Head of clientid list */
96
97
static int nfscl_getopen(struct nfsclownerhead *, struct nfsclopenhash *,
98
u_int8_t *, int, u_int8_t *, u_int8_t *, u_int32_t,
99
struct nfscllockowner **, struct nfsclopen **);
100
static bool nfscl_checkown(struct nfsclowner *, struct nfsclopen *, uint8_t *,
101
uint8_t *, struct nfscllockowner **, struct nfsclopen **,
102
struct nfsclopen **);
103
static void nfscl_clrelease(struct nfsclclient *);
104
static void nfscl_unlinkopen(struct nfsclopen *);
105
static void nfscl_cleanclient(struct nfsclclient *);
106
static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
107
struct ucred *, NFSPROC_T *);
108
static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
109
struct nfsmount *, struct ucred *, NFSPROC_T *);
110
static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *,
111
NFSPROC_T *);
112
static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
113
struct nfscllock *, int);
114
static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
115
struct nfscllock **, int);
116
static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *,
117
struct nfscldeleghead *);
118
static u_int32_t nfscl_nextcbident(void);
119
static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
120
static struct nfsclclient *nfscl_getclnt(u_int32_t);
121
static struct nfsclclient *nfscl_getclntsess(uint8_t *);
122
static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
123
int);
124
static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
125
int, struct nfsclrecalllayout **, struct nfscllayout **);
126
static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
127
static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
128
int);
129
static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
130
static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
131
u_int8_t *, struct nfscllock **);
132
static void nfscl_freealllocks(struct nfscllockownerhead *, int);
133
static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
134
struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
135
static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
136
struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
137
struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
138
static int nfscl_moveopen(vnode_t , struct nfsclclient *,
139
struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
140
struct nfscldeleg *, struct ucred *, NFSPROC_T *);
141
static void nfscl_totalrecall(struct nfsclclient *);
142
static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
143
struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
144
static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
145
u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
146
struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
147
static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
148
int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
149
struct ucred *, NFSPROC_T *);
150
static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
151
struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
152
static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *,
153
bool);
154
static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
155
static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
156
static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
157
struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int,
158
vnode_t *);
159
static void nfscl_freeopenowner(struct nfsclowner *, int);
160
static void nfscl_cleandeleg(struct nfscldeleg *);
161
static void nfscl_emptylockowner(struct nfscllockowner *,
162
struct nfscllockownerfhhead *);
163
static void nfscl_mergeflayouts(struct nfsclflayouthead *,
164
struct nfsclflayouthead *);
165
static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
166
uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
167
static int nfscl_seq(uint32_t, uint32_t);
168
static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
169
struct ucred *, NFSPROC_T *);
170
static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
171
struct ucred *, NFSPROC_T *);
172
173
static short nfscberr_null[] = {
174
0,
175
0,
176
};
177
178
static short nfscberr_getattr[] = {
179
NFSERR_RESOURCE,
180
NFSERR_BADHANDLE,
181
NFSERR_BADXDR,
182
NFSERR_RESOURCE,
183
NFSERR_SERVERFAULT,
184
0,
185
};
186
187
static short nfscberr_recall[] = {
188
NFSERR_RESOURCE,
189
NFSERR_BADHANDLE,
190
NFSERR_BADSTATEID,
191
NFSERR_BADXDR,
192
NFSERR_RESOURCE,
193
NFSERR_SERVERFAULT,
194
0,
195
};
196
197
static short *nfscl_cberrmap[] = {
198
nfscberr_null,
199
nfscberr_null,
200
nfscberr_null,
201
nfscberr_getattr,
202
nfscberr_recall
203
};
204
205
#define NETFAMILY(clp) \
206
(((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
207
208
/*
209
* Called for an open operation.
210
* If the nfhp argument is NULL, just get an openowner.
211
*/
212
int
213
nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
214
struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
215
struct nfsclopen **opp, int *newonep, int *retp, int lockit, bool firstref)
216
{
217
struct nfsclclient *clp;
218
struct nfsclowner *owp, *nowp;
219
struct nfsclopen *op = NULL, *nop = NULL;
220
struct nfscldeleg *dp;
221
struct nfsclownerhead *ohp;
222
u_int8_t own[NFSV4CL_LOCKNAMELEN];
223
int ret;
224
225
if (newonep != NULL)
226
*newonep = 0;
227
if (opp != NULL)
228
*opp = NULL;
229
if (owpp != NULL)
230
*owpp = NULL;
231
232
/*
233
* Might need one or both of these, so MALLOC them now, to
234
* avoid a tsleep() in MALLOC later.
235
*/
236
nowp = malloc(sizeof (struct nfsclowner),
237
M_NFSCLOWNER, M_WAITOK);
238
if (nfhp != NULL) {
239
nop = malloc(sizeof (struct nfsclopen) +
240
fhlen - 1, M_NFSCLOPEN, M_WAITOK);
241
nop->nfso_hash.le_prev = NULL;
242
}
243
ret = nfscl_getcl(vp->v_mount, cred, p, false, firstref, &clp);
244
if (ret != 0) {
245
free(nowp, M_NFSCLOWNER);
246
if (nop != NULL)
247
free(nop, M_NFSCLOPEN);
248
return (ret);
249
}
250
251
/*
252
* Get the Open iff it already exists.
253
* If none found, add the new one or return error, depending upon
254
* "create".
255
*/
256
NFSLOCKCLSTATE();
257
dp = NULL;
258
/* First check the delegation list */
259
if (nfhp != NULL && usedeleg) {
260
LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
261
if (dp->nfsdl_fhlen == fhlen &&
262
!NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
263
if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
264
(dp->nfsdl_flags & NFSCLDL_WRITE))
265
break;
266
dp = NULL;
267
break;
268
}
269
}
270
}
271
272
/* For NFSv4.1/4.2 and this option, use a single open_owner. */
273
if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
274
nfscl_filllockowner(NULL, own, F_POSIX);
275
else
276
nfscl_filllockowner(p->td_proc, own, F_POSIX);
277
if (dp != NULL)
278
ohp = &dp->nfsdl_owner;
279
else
280
ohp = &clp->nfsc_owner;
281
/* Now, search for an openowner */
282
LIST_FOREACH(owp, ohp, nfsow_list) {
283
if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
284
break;
285
}
286
287
/*
288
* Create a new open, as required.
289
*/
290
nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
291
cred, newonep);
292
293
/*
294
* Now, check the mode on the open and return the appropriate
295
* value.
296
*/
297
if (retp != NULL) {
298
if (nfhp != NULL && dp != NULL && nop == NULL)
299
/* new local open on delegation */
300
*retp = NFSCLOPEN_SETCRED;
301
else
302
*retp = NFSCLOPEN_OK;
303
}
304
if (op != NULL && (amode & ~(op->nfso_mode))) {
305
op->nfso_mode |= amode;
306
if (retp != NULL && dp == NULL)
307
*retp = NFSCLOPEN_DOOPEN;
308
}
309
310
/*
311
* Serialize modifications to the open owner for multiple threads
312
* within the same process using a read/write sleep lock.
313
* For NFSv4.1 and a single OpenOwner, allow concurrent open operations
314
* by acquiring a shared lock. The close operations still use an
315
* exclusive lock for this case.
316
*/
317
if (lockit != 0) {
318
if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) {
319
/*
320
* Get a shared lock on the OpenOwner, but first
321
* wait for any pending exclusive lock, so that the
322
* exclusive locker gets priority.
323
*/
324
nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
325
NFSCLSTATEMUTEXPTR, NULL);
326
nfsv4_getref(&owp->nfsow_rwlock, NULL,
327
NFSCLSTATEMUTEXPTR, NULL);
328
} else
329
nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
330
}
331
NFSUNLOCKCLSTATE();
332
if (nowp != NULL)
333
free(nowp, M_NFSCLOWNER);
334
if (nop != NULL)
335
free(nop, M_NFSCLOPEN);
336
if (owpp != NULL)
337
*owpp = owp;
338
if (opp != NULL)
339
*opp = op;
340
return (0);
341
}
342
343
/*
344
* Create a new open, as required.
345
*/
346
static void
347
nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
348
struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
349
struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
350
struct ucred *cred, int *newonep)
351
{
352
struct nfsclowner *owp = *owpp, *nowp;
353
struct nfsclopen *op, *nop;
354
355
if (nowpp != NULL)
356
nowp = *nowpp;
357
else
358
nowp = NULL;
359
if (nopp != NULL)
360
nop = *nopp;
361
else
362
nop = NULL;
363
if (owp == NULL && nowp != NULL) {
364
NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
365
LIST_INIT(&nowp->nfsow_open);
366
nowp->nfsow_clp = clp;
367
nowp->nfsow_seqid = 0;
368
nowp->nfsow_defunct = 0;
369
nfscl_lockinit(&nowp->nfsow_rwlock);
370
if (dp != NULL) {
371
nfsstatsv1.cllocalopenowners++;
372
LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
373
} else {
374
nfsstatsv1.clopenowners++;
375
LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
376
}
377
owp = *owpp = nowp;
378
*nowpp = NULL;
379
if (newonep != NULL)
380
*newonep = 1;
381
}
382
383
/* If an fhp has been specified, create an Open as well. */
384
if (fhp != NULL) {
385
/* and look for the correct open, based upon FH */
386
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
387
if (op->nfso_fhlen == fhlen &&
388
!NFSBCMP(op->nfso_fh, fhp, fhlen))
389
break;
390
}
391
if (op == NULL && nop != NULL) {
392
nop->nfso_own = owp;
393
nop->nfso_mode = 0;
394
nop->nfso_opencnt = 0;
395
nop->nfso_posixlock = 1;
396
nop->nfso_fhlen = fhlen;
397
NFSBCOPY(fhp, nop->nfso_fh, fhlen);
398
LIST_INIT(&nop->nfso_lock);
399
nop->nfso_stateid.seqid = 0;
400
nop->nfso_stateid.other[0] = 0;
401
nop->nfso_stateid.other[1] = 0;
402
nop->nfso_stateid.other[2] = 0;
403
KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
404
newnfs_copyincred(cred, &nop->nfso_cred);
405
if (dp != NULL) {
406
TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
407
TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
408
nfsdl_list);
409
dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
410
nfsstatsv1.cllocalopens++;
411
} else {
412
LIST_INSERT_HEAD(NFSCLOPENHASH(clp, fhp, fhlen),
413
nop, nfso_hash);
414
nfsstatsv1.clopens++;
415
}
416
LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
417
*opp = nop;
418
*nopp = NULL;
419
if (newonep != NULL)
420
*newonep = 1;
421
} else {
422
*opp = op;
423
}
424
}
425
}
426
427
/*
428
* Called to find/add a delegation to a client.
429
*/
430
int
431
nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
432
int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg *dp)
433
{
434
struct nfscldeleg *tdp;
435
struct nfsmount *nmp;
436
437
KASSERT(mp != NULL, ("nfscl_deleg: mp NULL"));
438
nmp = VFSTONFS(mp);
439
440
/*
441
* Since a delegation might be added to the mount,
442
* set NFSMNTP_DELEGISSUED now. If a delegation already
443
* exagain ists, setting this flag is harmless.
444
*/
445
NFSLOCKMNT(nmp);
446
nmp->nm_privflag |= NFSMNTP_DELEGISSUED;
447
NFSUNLOCKMNT(nmp);
448
449
/* Look for the correct deleg, based upon FH */
450
NFSLOCKCLSTATE();
451
tdp = nfscl_finddeleg(clp, nfhp, fhlen);
452
if (tdp == NULL) {
453
if (dp == NULL) {
454
NFSUNLOCKCLSTATE();
455
return (NFSERR_BADSTATEID);
456
}
457
TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
458
LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
459
nfsdl_hash);
460
dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
461
nfsstatsv1.cldelegates++;
462
clp->nfsc_delegcnt++;
463
} else {
464
/*
465
* A delegation already exists. If the new one is a Write
466
* delegation and the old one a Read delegation, return the
467
* Read delegation. Otherwise, return the new delegation.
468
*/
469
if (dp != NULL) {
470
if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0 &&
471
(tdp->nfsdl_flags & NFSCLDL_READ) != 0) {
472
TAILQ_REMOVE(&clp->nfsc_deleg, tdp, nfsdl_list);
473
LIST_REMOVE(tdp, nfsdl_hash);
474
TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
475
nfsdl_list);
476
LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp,
477
fhlen), dp, nfsdl_hash);
478
dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
479
} else {
480
tdp = dp; /* Return this one. */
481
}
482
} else {
483
tdp = NULL;
484
}
485
}
486
NFSUNLOCKCLSTATE();
487
if (tdp != NULL) {
488
nfscl_trydelegreturn(tdp, cred, nmp, p);
489
free(tdp, M_NFSCLDELEG);
490
}
491
return (0);
492
}
493
494
/*
495
* Find a delegation for this file handle. Return NULL upon failure.
496
*/
497
static struct nfscldeleg *
498
nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
499
{
500
struct nfscldeleg *dp;
501
502
LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
503
if (dp->nfsdl_fhlen == fhlen &&
504
!NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
505
break;
506
}
507
return (dp);
508
}
509
510
/*
511
* Get a stateid for an I/O operation. First, look for an open and iff
512
* found, return either a lockowner stateid or the open stateid.
513
* If no Open is found, just return error and the special stateid of all zeros.
514
*/
515
int
516
nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
517
int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
518
void **lckpp)
519
{
520
struct nfsclclient *clp;
521
struct nfsclopen *op = NULL, *top;
522
struct nfsclopenhash *oph;
523
struct nfscllockowner *lp;
524
struct nfscldeleg *dp;
525
struct nfsnode *np;
526
struct nfsmount *nmp;
527
struct nfscred ncr;
528
u_int8_t own[NFSV4CL_LOCKNAMELEN], lockown[NFSV4CL_LOCKNAMELEN];
529
int error;
530
bool done;
531
532
*lckpp = NULL;
533
/*
534
* Initially, just set the special stateid of all zeros.
535
* (Don't do this for a DS, since the special stateid can't be used.)
536
*/
537
if (fords == 0) {
538
stateidp->seqid = 0;
539
stateidp->other[0] = 0;
540
stateidp->other[1] = 0;
541
stateidp->other[2] = 0;
542
}
543
if (vp->v_type != VREG)
544
return (EISDIR);
545
np = VTONFS(vp);
546
nmp = VFSTONFS(vp->v_mount);
547
548
/*
549
* For "oneopenown" mounts, first check for a cached open in the
550
* NFS vnode, that can be used as a stateid. This can only be
551
* done if no delegations have been issued to the mount and no
552
* byte range file locking has been done for the file.
553
*/
554
if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp) && fords == 0) {
555
NFSLOCKMNT(nmp);
556
NFSLOCKNODE(np);
557
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0 &&
558
(np->n_flag & NMIGHTBELOCKED) == 0 &&
559
np->n_openstateid != NULL) {
560
stateidp->seqid = 0;
561
stateidp->other[0] =
562
np->n_openstateid->nfso_stateid.other[0];
563
stateidp->other[1] =
564
np->n_openstateid->nfso_stateid.other[1];
565
stateidp->other[2] =
566
np->n_openstateid->nfso_stateid.other[2];
567
NFSUNLOCKNODE(np);
568
NFSUNLOCKMNT(nmp);
569
return (0);
570
}
571
NFSUNLOCKNODE(np);
572
NFSUNLOCKMNT(nmp);
573
}
574
575
NFSLOCKCLSTATE();
576
clp = nfscl_findcl(nmp);
577
if (clp == NULL) {
578
NFSUNLOCKCLSTATE();
579
return (EACCES);
580
}
581
582
/*
583
* Wait for recovery to complete.
584
*/
585
while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
586
(void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
587
PZERO, "nfsrecvr", NULL);
588
589
/*
590
* First, look for a delegation.
591
*/
592
LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
593
if (dp->nfsdl_fhlen == fhlen &&
594
!NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
595
if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
596
(dp->nfsdl_flags & NFSCLDL_WRITE)) {
597
if (NFSHASNFSV4N(nmp))
598
stateidp->seqid = 0;
599
else
600
stateidp->seqid =
601
dp->nfsdl_stateid.seqid;
602
stateidp->other[0] = dp->nfsdl_stateid.other[0];
603
stateidp->other[1] = dp->nfsdl_stateid.other[1];
604
stateidp->other[2] = dp->nfsdl_stateid.other[2];
605
if (!(np->n_flag & NDELEGRECALL)) {
606
TAILQ_REMOVE(&clp->nfsc_deleg, dp,
607
nfsdl_list);
608
TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
609
nfsdl_list);
610
dp->nfsdl_timestamp = NFSD_MONOSEC +
611
120;
612
dp->nfsdl_rwlock.nfslock_usecnt++;
613
*lckpp = (void *)&dp->nfsdl_rwlock;
614
}
615
NFSUNLOCKCLSTATE();
616
return (0);
617
}
618
break;
619
}
620
}
621
622
if (p != NULL) {
623
/*
624
* If p != NULL, we want to search the parentage tree
625
* for a matching OpenOwner and use that.
626
*/
627
if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
628
nfscl_filllockowner(NULL, own, F_POSIX);
629
else
630
nfscl_filllockowner(p->td_proc, own, F_POSIX);
631
nfscl_filllockowner(p->td_proc, lockown, F_POSIX);
632
lp = NULL;
633
error = nfscl_getopen(NULL, clp->nfsc_openhash, nfhp, fhlen,
634
own, lockown, mode, &lp, &op);
635
if (error == 0 && lp != NULL && fords == 0) {
636
/* Don't return a lock stateid for a DS. */
637
if (NFSHASNFSV4N(nmp))
638
stateidp->seqid = 0;
639
else
640
stateidp->seqid = lp->nfsl_stateid.seqid;
641
stateidp->other[0] =
642
lp->nfsl_stateid.other[0];
643
stateidp->other[1] =
644
lp->nfsl_stateid.other[1];
645
stateidp->other[2] =
646
lp->nfsl_stateid.other[2];
647
NFSUNLOCKCLSTATE();
648
return (0);
649
}
650
}
651
if (op == NULL) {
652
/* If not found, just look for any OpenOwner that will work. */
653
top = NULL;
654
done = false;
655
oph = NFSCLOPENHASH(clp, nfhp, fhlen);
656
LIST_FOREACH(op, oph, nfso_hash) {
657
if (op->nfso_fhlen == fhlen &&
658
!NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
659
if (top == NULL && (op->nfso_mode &
660
NFSV4OPEN_ACCESSWRITE) != 0 &&
661
(mode & NFSV4OPEN_ACCESSREAD) != 0)
662
top = op;
663
if ((mode & op->nfso_mode) == mode) {
664
/* LRU order the hash list. */
665
LIST_REMOVE(op, nfso_hash);
666
LIST_INSERT_HEAD(oph, op, nfso_hash);
667
done = true;
668
break;
669
}
670
}
671
}
672
if (!done) {
673
NFSCL_DEBUG(2, "openmode top=%p\n", top);
674
if (top == NULL || NFSHASOPENMODE(nmp)) {
675
NFSUNLOCKCLSTATE();
676
return (ENOENT);
677
} else
678
op = top;
679
}
680
/*
681
* For read aheads or write behinds, use the open cred.
682
* A read ahead or write behind is indicated by p == NULL.
683
*/
684
if (p == NULL)
685
memcpy(&ncr, &op->nfso_cred, sizeof(ncr));
686
}
687
688
/*
689
* No lock stateid, so return the open stateid.
690
*/
691
if (NFSHASNFSV4N(nmp))
692
stateidp->seqid = 0;
693
else
694
stateidp->seqid = op->nfso_stateid.seqid;
695
stateidp->other[0] = op->nfso_stateid.other[0];
696
stateidp->other[1] = op->nfso_stateid.other[1];
697
stateidp->other[2] = op->nfso_stateid.other[2];
698
NFSUNLOCKCLSTATE();
699
if (p == NULL)
700
newnfs_copycred(&ncr, cred);
701
return (0);
702
}
703
704
/*
705
* Search for a matching file, mode and, optionally, lockowner.
706
*/
707
static int
708
nfscl_getopen(struct nfsclownerhead *ohp, struct nfsclopenhash *ohashp,
709
u_int8_t *nfhp, int fhlen, u_int8_t *openown, u_int8_t *lockown,
710
u_int32_t mode, struct nfscllockowner **lpp, struct nfsclopen **opp)
711
{
712
struct nfsclowner *owp;
713
struct nfsclopen *op, *rop, *rop2;
714
struct nfsclopenhash *oph;
715
bool keep_looping;
716
717
KASSERT(ohp == NULL || ohashp == NULL, ("nfscl_getopen: "
718
"only one of ohp and ohashp can be set"));
719
if (lpp != NULL)
720
*lpp = NULL;
721
/*
722
* rop will be set to the open to be returned. There are three
723
* variants of this, all for an open of the correct file:
724
* 1 - A match of lockown.
725
* 2 - A match of the openown, when no lockown match exists.
726
* 3 - A match for any open, if no openown or lockown match exists.
727
* Looking for #2 over #3 probably isn't necessary, but since
728
* RFC3530 is vague w.r.t. the relationship between openowners and
729
* lockowners, I think this is the safer way to go.
730
*/
731
rop = NULL;
732
rop2 = NULL;
733
keep_looping = true;
734
/* Search the client list */
735
if (ohashp == NULL) {
736
/* Search the local opens on the delegation. */
737
LIST_FOREACH(owp, ohp, nfsow_list) {
738
/* and look for the correct open */
739
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
740
if (op->nfso_fhlen == fhlen &&
741
!NFSBCMP(op->nfso_fh, nfhp, fhlen)
742
&& (op->nfso_mode & mode) == mode)
743
keep_looping = nfscl_checkown(owp, op, openown,
744
lockown, lpp, &rop, &rop2);
745
if (!keep_looping)
746
break;
747
}
748
if (!keep_looping)
749
break;
750
}
751
} else {
752
/* Search for matching opens on the hash list. */
753
oph = &ohashp[NFSCLOPENHASHFUNC(nfhp, fhlen)];
754
LIST_FOREACH(op, oph, nfso_hash) {
755
if (op->nfso_fhlen == fhlen &&
756
!NFSBCMP(op->nfso_fh, nfhp, fhlen)
757
&& (op->nfso_mode & mode) == mode)
758
keep_looping = nfscl_checkown(op->nfso_own, op,
759
openown, lockown, lpp, &rop, &rop2);
760
if (!keep_looping) {
761
/* LRU order the hash list. */
762
LIST_REMOVE(op, nfso_hash);
763
LIST_INSERT_HEAD(oph, op, nfso_hash);
764
break;
765
}
766
}
767
}
768
if (rop == NULL)
769
rop = rop2;
770
if (rop == NULL)
771
return (EBADF);
772
*opp = rop;
773
return (0);
774
}
775
776
/* Check for an owner match. */
777
static bool
778
nfscl_checkown(struct nfsclowner *owp, struct nfsclopen *op, uint8_t *openown,
779
uint8_t *lockown, struct nfscllockowner **lpp, struct nfsclopen **ropp,
780
struct nfsclopen **ropp2)
781
{
782
struct nfscllockowner *lp;
783
bool keep_looping;
784
785
keep_looping = true;
786
if (lpp != NULL) {
787
/* Now look for a matching lockowner. */
788
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
789
if (!NFSBCMP(lp->nfsl_owner, lockown,
790
NFSV4CL_LOCKNAMELEN)) {
791
*lpp = lp;
792
*ropp = op;
793
return (false);
794
}
795
}
796
}
797
if (*ropp == NULL && !NFSBCMP(owp->nfsow_owner, openown,
798
NFSV4CL_LOCKNAMELEN)) {
799
*ropp = op;
800
if (lpp == NULL)
801
keep_looping = false;
802
}
803
if (*ropp2 == NULL)
804
*ropp2 = op;
805
return (keep_looping);
806
}
807
808
/*
809
* Release use of an open owner. Called when open operations are done
810
* with the open owner.
811
*/
812
void
813
nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
814
__unused int error, __unused int candelete, int unlocked)
815
{
816
817
if (owp == NULL)
818
return;
819
NFSLOCKCLSTATE();
820
if (unlocked == 0) {
821
if (NFSHASONEOPENOWN(nmp))
822
nfsv4_relref(&owp->nfsow_rwlock);
823
else
824
nfscl_lockunlock(&owp->nfsow_rwlock);
825
}
826
nfscl_clrelease(owp->nfsow_clp);
827
NFSUNLOCKCLSTATE();
828
}
829
830
/*
831
* Release use of an open structure under an open owner.
832
*/
833
void
834
nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
835
int candelete)
836
{
837
struct nfsclclient *clp;
838
struct nfsclowner *owp;
839
840
if (op == NULL)
841
return;
842
NFSLOCKCLSTATE();
843
owp = op->nfso_own;
844
if (NFSHASONEOPENOWN(nmp))
845
nfsv4_relref(&owp->nfsow_rwlock);
846
else
847
nfscl_lockunlock(&owp->nfsow_rwlock);
848
clp = owp->nfsow_clp;
849
if (error && candelete && op->nfso_opencnt == 0)
850
nfscl_freeopen(op, 0, true);
851
nfscl_clrelease(clp);
852
NFSUNLOCKCLSTATE();
853
}
854
855
/*
856
* Called to get a clientid structure. It will optionally lock the
857
* client data structures to do the SetClientId/SetClientId_confirm,
858
* but will release that lock and return the clientid with a reference
859
* count on it.
860
* If the "cred" argument is NULL, a new clientid should not be created.
861
* If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
862
* be done.
863
* It always clpp with a reference count on it, unless returning an error.
864
*/
865
int
866
nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
867
bool tryminvers, bool firstref, struct nfsclclient **clpp)
868
{
869
struct nfsclclient *clp;
870
struct nfsclclient *newclp = NULL;
871
struct nfsmount *nmp;
872
char uuid[HOSTUUIDLEN];
873
int igotlock = 0, error, trystalecnt, clidinusedelay, i;
874
u_int16_t idlen = 0;
875
876
nmp = VFSTONFS(mp);
877
if (cred != NULL) {
878
getcredhostuuid(cred, uuid, sizeof uuid);
879
idlen = strlen(uuid);
880
if (idlen > 0)
881
idlen += sizeof (u_int64_t);
882
else
883
idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
884
newclp = malloc(
885
sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
886
M_WAITOK | M_ZERO);
887
}
888
NFSLOCKCLSTATE();
889
/*
890
* If a forced dismount is already in progress, don't
891
* allocate a new clientid and get out now. For the case where
892
* clp != NULL, this is a harmless optimization.
893
*/
894
if (NFSCL_FORCEDISM(mp)) {
895
NFSUNLOCKCLSTATE();
896
if (newclp != NULL)
897
free(newclp, M_NFSCLCLIENT);
898
return (EBADF);
899
}
900
clp = nmp->nm_clp;
901
if (clp == NULL) {
902
if (newclp == NULL) {
903
NFSUNLOCKCLSTATE();
904
return (EACCES);
905
}
906
clp = newclp;
907
clp->nfsc_idlen = idlen;
908
LIST_INIT(&clp->nfsc_owner);
909
TAILQ_INIT(&clp->nfsc_deleg);
910
TAILQ_INIT(&clp->nfsc_layout);
911
LIST_INIT(&clp->nfsc_devinfo);
912
for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
913
LIST_INIT(&clp->nfsc_deleghash[i]);
914
for (i = 0; i < NFSCLOPENHASHSIZE; i++)
915
LIST_INIT(&clp->nfsc_openhash[i]);
916
for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
917
LIST_INIT(&clp->nfsc_layouthash[i]);
918
clp->nfsc_flags = NFSCLFLAGS_INITED;
919
clp->nfsc_delegcnt = 0;
920
clp->nfsc_deleghighwater = NFSCLDELEGHIGHWATER;
921
clp->nfsc_layoutcnt = 0;
922
clp->nfsc_layouthighwater = NFSCLLAYOUTHIGHWATER;
923
clp->nfsc_clientidrev = 1;
924
clp->nfsc_cbident = nfscl_nextcbident();
925
nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
926
clp->nfsc_idlen);
927
LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
928
nmp->nm_clp = clp;
929
clp->nfsc_nmp = nmp;
930
} else {
931
if (newclp != NULL)
932
free(newclp, M_NFSCLCLIENT);
933
}
934
while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
935
!NFSCL_FORCEDISM(mp))
936
igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
937
NFSCLSTATEMUTEXPTR, mp);
938
if (igotlock == 0) {
939
/*
940
* Call nfsv4_lock() with "iwantlock == 0" on the firstref so
941
* that it will wait for a pending exclusive lock request.
942
* This gives the exclusive lock request priority over this
943
* shared lock request.
944
* An exclusive lock on nfsc_lock is used mainly for server
945
* crash recoveries and delegation recalls.
946
*/
947
if (firstref)
948
nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR,
949
mp);
950
nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
951
}
952
if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
953
/*
954
* Both nfsv4_lock() and nfsv4_getref() know to check
955
* for NFSCL_FORCEDISM() and return without sleeping to
956
* wait for the exclusive lock to be released, since it
957
* might be held by nfscl_umount() and we need to get out
958
* now for that case and not wait until nfscl_umount()
959
* releases it.
960
*/
961
NFSUNLOCKCLSTATE();
962
return (EBADF);
963
}
964
NFSUNLOCKCLSTATE();
965
966
/*
967
* If it needs a clientid, do the setclientid now.
968
*/
969
if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
970
if (!igotlock)
971
panic("nfscl_clget");
972
if (p == NULL || cred == NULL) {
973
NFSLOCKCLSTATE();
974
nfsv4_unlock(&clp->nfsc_lock, 0);
975
NFSUNLOCKCLSTATE();
976
return (EACCES);
977
}
978
/*
979
* If RFC3530 Sec. 14.2.33 is taken literally,
980
* NFSERR_CLIDINUSE will be returned persistently for the
981
* case where a new mount of the same file system is using
982
* a different principal. In practice, NFSERR_CLIDINUSE is
983
* only returned when there is outstanding unexpired state
984
* on the clientid. As such, try for twice the lease
985
* interval, if we know what that is. Otherwise, make a
986
* wild ass guess.
987
* The case of returning NFSERR_STALECLIENTID is far less
988
* likely, but might occur if there is a significant delay
989
* between doing the SetClientID and SetClientIDConfirm Ops,
990
* such that the server throws away the clientid before
991
* receiving the SetClientIDConfirm.
992
*/
993
if (clp->nfsc_renew > 0)
994
clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
995
else
996
clidinusedelay = 120;
997
trystalecnt = 3;
998
do {
999
error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
1000
if (error == NFSERR_STALECLIENTID ||
1001
error == NFSERR_STALEDONTRECOVER ||
1002
error == NFSERR_BADSESSION ||
1003
error == NFSERR_CLIDINUSE) {
1004
(void) nfs_catnap(PZERO, error, "nfs_setcl");
1005
} else if (error == NFSERR_MINORVERMISMATCH &&
1006
tryminvers) {
1007
if (nmp->nm_minorvers > 0)
1008
nmp->nm_minorvers--;
1009
else
1010
tryminvers = false;
1011
}
1012
} while (((error == NFSERR_STALECLIENTID ||
1013
error == NFSERR_BADSESSION ||
1014
error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
1015
(error == NFSERR_CLIDINUSE && --clidinusedelay > 0) ||
1016
(error == NFSERR_MINORVERMISMATCH && tryminvers));
1017
if (error) {
1018
NFSLOCKCLSTATE();
1019
nfsv4_unlock(&clp->nfsc_lock, 0);
1020
NFSUNLOCKCLSTATE();
1021
return (error);
1022
}
1023
clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1024
}
1025
if (igotlock) {
1026
NFSLOCKCLSTATE();
1027
nfsv4_unlock(&clp->nfsc_lock, 1);
1028
NFSUNLOCKCLSTATE();
1029
}
1030
1031
*clpp = clp;
1032
return (0);
1033
}
1034
1035
/*
1036
* Get a reference to a clientid and return it, if valid.
1037
*/
1038
struct nfsclclient *
1039
nfscl_findcl(struct nfsmount *nmp)
1040
{
1041
struct nfsclclient *clp;
1042
1043
clp = nmp->nm_clp;
1044
if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
1045
return (NULL);
1046
return (clp);
1047
}
1048
1049
/*
1050
* Release the clientid structure. It may be locked or reference counted.
1051
*/
1052
static void
1053
nfscl_clrelease(struct nfsclclient *clp)
1054
{
1055
1056
if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1057
nfsv4_unlock(&clp->nfsc_lock, 0);
1058
else
1059
nfsv4_relref(&clp->nfsc_lock);
1060
}
1061
1062
/*
1063
* External call for nfscl_clrelease.
1064
*/
1065
void
1066
nfscl_clientrelease(struct nfsclclient *clp)
1067
{
1068
1069
NFSLOCKCLSTATE();
1070
if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1071
nfsv4_unlock(&clp->nfsc_lock, 0);
1072
else
1073
nfsv4_relref(&clp->nfsc_lock);
1074
NFSUNLOCKCLSTATE();
1075
}
1076
1077
/*
1078
* Called when wanting to lock a byte region.
1079
*/
1080
int
1081
nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1082
short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
1083
int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
1084
struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
1085
{
1086
struct nfscllockowner *lp;
1087
struct nfsclopen *op;
1088
struct nfsclclient *clp;
1089
struct nfscllockowner *nlp;
1090
struct nfscllock *nlop, *otherlop;
1091
struct nfscldeleg *dp = NULL, *ldp = NULL;
1092
struct nfscllockownerhead *lhp = NULL;
1093
struct nfsnode *np;
1094
u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
1095
u_int8_t *openownp;
1096
int error = 0, ret, donelocally = 0;
1097
u_int32_t mode;
1098
1099
/* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1100
mode = 0;
1101
np = VTONFS(vp);
1102
*lpp = NULL;
1103
lp = NULL;
1104
*newonep = 0;
1105
*donelocallyp = 0;
1106
1107
/*
1108
* Might need these, so MALLOC them now, to
1109
* avoid a tsleep() in MALLOC later.
1110
*/
1111
nlp = malloc(
1112
sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1113
otherlop = malloc(
1114
sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1115
nlop = malloc(
1116
sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1117
nlop->nfslo_type = type;
1118
nlop->nfslo_first = off;
1119
if (len == NFS64BITSSET) {
1120
nlop->nfslo_end = NFS64BITSSET;
1121
} else {
1122
nlop->nfslo_end = off + len;
1123
if (nlop->nfslo_end <= nlop->nfslo_first)
1124
error = NFSERR_INVAL;
1125
}
1126
1127
if (!error) {
1128
if (recovery)
1129
clp = rclp;
1130
else
1131
error = nfscl_getcl(vp->v_mount, cred, p, false, true,
1132
&clp);
1133
}
1134
if (error) {
1135
free(nlp, M_NFSCLLOCKOWNER);
1136
free(otherlop, M_NFSCLLOCK);
1137
free(nlop, M_NFSCLLOCK);
1138
return (error);
1139
}
1140
1141
op = NULL;
1142
if (recovery) {
1143
ownp = rownp;
1144
openownp = ropenownp;
1145
} else {
1146
nfscl_filllockowner(id, own, flags);
1147
ownp = own;
1148
if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
1149
nfscl_filllockowner(NULL, openown, F_POSIX);
1150
else
1151
nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1152
openownp = openown;
1153
}
1154
if (!recovery) {
1155
NFSLOCKCLSTATE();
1156
/*
1157
* First, search for a delegation. If one exists for this file,
1158
* the lock can be done locally against it, so long as there
1159
* isn't a local lock conflict.
1160
*/
1161
ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1162
np->n_fhp->nfh_len);
1163
/* Just sanity check for correct type of delegation */
1164
if (dp != NULL && ((dp->nfsdl_flags &
1165
(NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1166
(type == F_WRLCK &&
1167
(dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1168
dp = NULL;
1169
}
1170
if (dp != NULL) {
1171
/* Now, find an open and maybe a lockowner. */
1172
ret = nfscl_getopen(&dp->nfsdl_owner, NULL, np->n_fhp->nfh_fh,
1173
np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1174
if (ret)
1175
ret = nfscl_getopen(NULL, clp->nfsc_openhash,
1176
np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1177
ownp, mode, NULL, &op);
1178
if (!ret) {
1179
lhp = &dp->nfsdl_lock;
1180
TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1181
TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1182
dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1183
donelocally = 1;
1184
} else {
1185
dp = NULL;
1186
}
1187
}
1188
if (!donelocally) {
1189
/*
1190
* Get the related Open and maybe lockowner.
1191
*/
1192
error = nfscl_getopen(NULL, clp->nfsc_openhash,
1193
np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1194
ownp, mode, &lp, &op);
1195
if (!error)
1196
lhp = &op->nfso_lock;
1197
}
1198
if (!error && !recovery)
1199
error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1200
np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1201
if (error) {
1202
if (!recovery) {
1203
nfscl_clrelease(clp);
1204
NFSUNLOCKCLSTATE();
1205
}
1206
free(nlp, M_NFSCLLOCKOWNER);
1207
free(otherlop, M_NFSCLLOCK);
1208
free(nlop, M_NFSCLLOCK);
1209
return (error);
1210
}
1211
1212
/*
1213
* Ok, see if a lockowner exists and create one, as required.
1214
*/
1215
if (lp == NULL)
1216
LIST_FOREACH(lp, lhp, nfsl_list) {
1217
if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1218
break;
1219
}
1220
if (lp == NULL) {
1221
NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1222
if (recovery)
1223
NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1224
NFSV4CL_LOCKNAMELEN);
1225
else
1226
NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1227
NFSV4CL_LOCKNAMELEN);
1228
nlp->nfsl_seqid = 0;
1229
nlp->nfsl_lockflags = flags;
1230
nlp->nfsl_inprog = NULL;
1231
nfscl_lockinit(&nlp->nfsl_rwlock);
1232
LIST_INIT(&nlp->nfsl_lock);
1233
if (donelocally) {
1234
nlp->nfsl_open = NULL;
1235
nfsstatsv1.cllocallockowners++;
1236
} else {
1237
nlp->nfsl_open = op;
1238
nfsstatsv1.cllockowners++;
1239
}
1240
LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1241
lp = nlp;
1242
nlp = NULL;
1243
*newonep = 1;
1244
}
1245
1246
/*
1247
* Now, update the byte ranges for locks.
1248
*/
1249
ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1250
if (!ret)
1251
donelocally = 1;
1252
if (donelocally) {
1253
*donelocallyp = 1;
1254
if (!recovery)
1255
nfscl_clrelease(clp);
1256
} else {
1257
/*
1258
* Serial modifications on the lock owner for multiple threads
1259
* for the same process using a read/write lock.
1260
*/
1261
if (!recovery)
1262
nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1263
}
1264
if (!recovery)
1265
NFSUNLOCKCLSTATE();
1266
1267
if (nlp)
1268
free(nlp, M_NFSCLLOCKOWNER);
1269
if (nlop)
1270
free(nlop, M_NFSCLLOCK);
1271
if (otherlop)
1272
free(otherlop, M_NFSCLLOCK);
1273
1274
*lpp = lp;
1275
return (0);
1276
}
1277
1278
/*
1279
* Called to unlock a byte range, for LockU.
1280
*/
1281
int
1282
nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1283
__unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1284
struct nfsclclient *clp, void *id, int flags,
1285
struct nfscllockowner **lpp, int *dorpcp)
1286
{
1287
struct nfscllockowner *lp;
1288
struct nfsclopen *op;
1289
struct nfscllock *nlop, *other_lop = NULL;
1290
struct nfscldeleg *dp;
1291
struct nfsnode *np;
1292
u_int8_t own[NFSV4CL_LOCKNAMELEN];
1293
int ret = 0, fnd;
1294
1295
np = VTONFS(vp);
1296
*lpp = NULL;
1297
*dorpcp = 0;
1298
1299
/*
1300
* Might need these, so MALLOC them now, to
1301
* avoid a tsleep() in MALLOC later.
1302
*/
1303
nlop = malloc(
1304
sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1305
nlop->nfslo_type = F_UNLCK;
1306
nlop->nfslo_first = off;
1307
if (len == NFS64BITSSET) {
1308
nlop->nfslo_end = NFS64BITSSET;
1309
} else {
1310
nlop->nfslo_end = off + len;
1311
if (nlop->nfslo_end <= nlop->nfslo_first) {
1312
free(nlop, M_NFSCLLOCK);
1313
return (NFSERR_INVAL);
1314
}
1315
}
1316
if (callcnt == 0) {
1317
other_lop = malloc(
1318
sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1319
*other_lop = *nlop;
1320
}
1321
nfscl_filllockowner(id, own, flags);
1322
dp = NULL;
1323
NFSLOCKCLSTATE();
1324
if (callcnt == 0)
1325
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1326
np->n_fhp->nfh_len);
1327
1328
/*
1329
* First, unlock any local regions on a delegation.
1330
*/
1331
if (dp != NULL) {
1332
/* Look for this lockowner. */
1333
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1334
if (!NFSBCMP(lp->nfsl_owner, own,
1335
NFSV4CL_LOCKNAMELEN))
1336
break;
1337
}
1338
if (lp != NULL)
1339
/* Use other_lop, so nlop is still available */
1340
(void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1341
}
1342
1343
/*
1344
* Now, find a matching open/lockowner that hasn't already been done,
1345
* as marked by nfsl_inprog.
1346
*/
1347
lp = NULL;
1348
fnd = 0;
1349
LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1350
np->n_fhp->nfh_len), nfso_hash) {
1351
if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1352
!NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1353
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1354
if (lp->nfsl_inprog == NULL &&
1355
!NFSBCMP(lp->nfsl_owner, own,
1356
NFSV4CL_LOCKNAMELEN)) {
1357
fnd = 1;
1358
break;
1359
}
1360
}
1361
}
1362
if (fnd)
1363
break;
1364
}
1365
1366
if (lp != NULL) {
1367
ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1368
if (ret)
1369
*dorpcp = 1;
1370
/*
1371
* Serial modifications on the lock owner for multiple
1372
* threads for the same process using a read/write lock.
1373
*/
1374
lp->nfsl_inprog = p;
1375
nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1376
*lpp = lp;
1377
}
1378
NFSUNLOCKCLSTATE();
1379
if (nlop)
1380
free(nlop, M_NFSCLLOCK);
1381
if (other_lop)
1382
free(other_lop, M_NFSCLLOCK);
1383
return (0);
1384
}
1385
1386
/*
1387
* Release all lockowners marked in progess for this process and file.
1388
*/
1389
void
1390
nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1391
void *id, int flags)
1392
{
1393
struct nfsclopen *op;
1394
struct nfscllockowner *lp;
1395
struct nfsnode *np;
1396
u_int8_t own[NFSV4CL_LOCKNAMELEN];
1397
1398
np = VTONFS(vp);
1399
nfscl_filllockowner(id, own, flags);
1400
NFSLOCKCLSTATE();
1401
LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1402
np->n_fhp->nfh_len), nfso_hash) {
1403
if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1404
!NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1405
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1406
if (lp->nfsl_inprog == p &&
1407
!NFSBCMP(lp->nfsl_owner, own,
1408
NFSV4CL_LOCKNAMELEN)) {
1409
lp->nfsl_inprog = NULL;
1410
nfscl_lockunlock(&lp->nfsl_rwlock);
1411
}
1412
}
1413
}
1414
}
1415
nfscl_clrelease(clp);
1416
NFSUNLOCKCLSTATE();
1417
}
1418
1419
/*
1420
* Called to find out if any bytes within the byte range specified are
1421
* write locked by the calling process. Used to determine if flushing
1422
* is required before a LockU.
1423
* If in doubt, return 1, so the flush will occur.
1424
*/
1425
int
1426
nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1427
struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1428
{
1429
struct nfscllockowner *lp;
1430
struct nfsclopen *op;
1431
struct nfsclclient *clp;
1432
struct nfscllock *lop;
1433
struct nfscldeleg *dp;
1434
struct nfsnode *np;
1435
u_int64_t off, end;
1436
u_int8_t own[NFSV4CL_LOCKNAMELEN];
1437
int error = 0;
1438
1439
np = VTONFS(vp);
1440
switch (fl->l_whence) {
1441
case SEEK_SET:
1442
case SEEK_CUR:
1443
/*
1444
* Caller is responsible for adding any necessary offset
1445
* when SEEK_CUR is used.
1446
*/
1447
off = fl->l_start;
1448
break;
1449
case SEEK_END:
1450
off = np->n_size + fl->l_start;
1451
break;
1452
default:
1453
return (1);
1454
}
1455
if (fl->l_len != 0) {
1456
end = off + fl->l_len;
1457
if (end < off)
1458
return (1);
1459
} else {
1460
end = NFS64BITSSET;
1461
}
1462
1463
error = nfscl_getcl(vp->v_mount, cred, p, false, true, &clp);
1464
if (error)
1465
return (1);
1466
nfscl_filllockowner(id, own, flags);
1467
NFSLOCKCLSTATE();
1468
1469
/*
1470
* First check the delegation locks.
1471
*/
1472
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1473
if (dp != NULL) {
1474
/* No need to flush if it is a write delegation. */
1475
if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0) {
1476
nfscl_clrelease(clp);
1477
NFSUNLOCKCLSTATE();
1478
return (0);
1479
}
1480
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1481
if (!NFSBCMP(lp->nfsl_owner, own,
1482
NFSV4CL_LOCKNAMELEN))
1483
break;
1484
}
1485
if (lp != NULL) {
1486
LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1487
if (lop->nfslo_first >= end)
1488
break;
1489
if (lop->nfslo_end <= off)
1490
continue;
1491
if (lop->nfslo_type == F_WRLCK) {
1492
nfscl_clrelease(clp);
1493
NFSUNLOCKCLSTATE();
1494
return (1);
1495
}
1496
}
1497
}
1498
}
1499
1500
/*
1501
* Now, check state against the server.
1502
*/
1503
LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1504
np->n_fhp->nfh_len), nfso_hash) {
1505
if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1506
!NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1507
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1508
if (!NFSBCMP(lp->nfsl_owner, own,
1509
NFSV4CL_LOCKNAMELEN))
1510
break;
1511
}
1512
if (lp != NULL) {
1513
LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1514
if (lop->nfslo_first >= end)
1515
break;
1516
if (lop->nfslo_end <= off)
1517
continue;
1518
if (lop->nfslo_type == F_WRLCK) {
1519
nfscl_clrelease(clp);
1520
NFSUNLOCKCLSTATE();
1521
return (1);
1522
}
1523
}
1524
}
1525
}
1526
}
1527
nfscl_clrelease(clp);
1528
NFSUNLOCKCLSTATE();
1529
return (0);
1530
}
1531
1532
/*
1533
* Release a byte range lock owner structure.
1534
*/
1535
void
1536
nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1537
{
1538
struct nfsclclient *clp;
1539
1540
if (lp == NULL)
1541
return;
1542
NFSLOCKCLSTATE();
1543
clp = lp->nfsl_open->nfso_own->nfsow_clp;
1544
if (error != 0 && candelete &&
1545
(lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1546
nfscl_freelockowner(lp, 0);
1547
else
1548
nfscl_lockunlock(&lp->nfsl_rwlock);
1549
nfscl_clrelease(clp);
1550
NFSUNLOCKCLSTATE();
1551
}
1552
1553
/*
1554
* Unlink the open structure.
1555
*/
1556
static void
1557
nfscl_unlinkopen(struct nfsclopen *op)
1558
{
1559
1560
LIST_REMOVE(op, nfso_list);
1561
if (op->nfso_hash.le_prev != NULL)
1562
LIST_REMOVE(op, nfso_hash);
1563
}
1564
1565
/*
1566
* Free up an open structure and any associated byte range lock structures.
1567
*/
1568
void
1569
nfscl_freeopen(struct nfsclopen *op, int local, bool unlink)
1570
{
1571
1572
if (unlink)
1573
nfscl_unlinkopen(op);
1574
nfscl_freealllocks(&op->nfso_lock, local);
1575
free(op, M_NFSCLOPEN);
1576
if (local)
1577
nfsstatsv1.cllocalopens--;
1578
else
1579
nfsstatsv1.clopens--;
1580
}
1581
1582
/*
1583
* Free up all lock owners and associated locks.
1584
*/
1585
static void
1586
nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1587
{
1588
struct nfscllockowner *lp, *nlp;
1589
1590
LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1591
if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1592
panic("nfscllckw");
1593
nfscl_freelockowner(lp, local);
1594
}
1595
}
1596
1597
/*
1598
* Called for an Open when NFSERR_EXPIRED is received from the server.
1599
* If there are no byte range locks nor a Share Deny lost, try to do a
1600
* fresh Open. Otherwise, free the open.
1601
*/
1602
static int
1603
nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1604
struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1605
{
1606
struct nfscllockowner *lp;
1607
struct nfscldeleg *dp;
1608
int mustdelete = 0, error;
1609
1610
/*
1611
* Look for any byte range lock(s).
1612
*/
1613
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1614
if (!LIST_EMPTY(&lp->nfsl_lock)) {
1615
mustdelete = 1;
1616
break;
1617
}
1618
}
1619
1620
/*
1621
* If no byte range lock(s) nor a Share deny, try to re-open.
1622
*/
1623
if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1624
newnfs_copycred(&op->nfso_cred, cred);
1625
dp = NULL;
1626
error = nfsrpc_reopen(nmp, op->nfso_fh,
1627
op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1628
if (error) {
1629
mustdelete = 1;
1630
if (dp != NULL) {
1631
free(dp, M_NFSCLDELEG);
1632
dp = NULL;
1633
}
1634
}
1635
if (dp != NULL)
1636
nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1637
op->nfso_fhlen, cred, p, dp);
1638
}
1639
1640
/*
1641
* If a byte range lock or Share deny or couldn't re-open, free it.
1642
*/
1643
if (mustdelete)
1644
nfscl_freeopen(op, 0, true);
1645
return (mustdelete);
1646
}
1647
1648
/*
1649
* Free up an open owner structure.
1650
*/
1651
static void
1652
nfscl_freeopenowner(struct nfsclowner *owp, int local)
1653
{
1654
int owned;
1655
1656
/*
1657
* Make sure the NFSCLSTATE mutex is held, to avoid races with
1658
* calls in nfscl_renewthread() that do not hold a reference
1659
* count on the nfsclclient and just the mutex.
1660
* The mutex will not be held for calls done with the exclusive
1661
* nfsclclient lock held, in particular, nfscl_hasexpired()
1662
* and nfscl_recalldeleg() might do this.
1663
*/
1664
owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1665
if (owned == 0)
1666
NFSLOCKCLSTATE();
1667
LIST_REMOVE(owp, nfsow_list);
1668
if (owned == 0)
1669
NFSUNLOCKCLSTATE();
1670
free(owp, M_NFSCLOWNER);
1671
if (local)
1672
nfsstatsv1.cllocalopenowners--;
1673
else
1674
nfsstatsv1.clopenowners--;
1675
}
1676
1677
/*
1678
* Free up a byte range lock owner structure.
1679
*/
1680
void
1681
nfscl_freelockowner(struct nfscllockowner *lp, int local)
1682
{
1683
struct nfscllock *lop, *nlop;
1684
int owned;
1685
1686
/*
1687
* Make sure the NFSCLSTATE mutex is held, to avoid races with
1688
* calls in nfscl_renewthread() that do not hold a reference
1689
* count on the nfsclclient and just the mutex.
1690
* The mutex will not be held for calls done with the exclusive
1691
* nfsclclient lock held, in particular, nfscl_hasexpired()
1692
* and nfscl_recalldeleg() might do this.
1693
*/
1694
owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1695
if (owned == 0)
1696
NFSLOCKCLSTATE();
1697
LIST_REMOVE(lp, nfsl_list);
1698
if (owned == 0)
1699
NFSUNLOCKCLSTATE();
1700
LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1701
nfscl_freelock(lop, local);
1702
}
1703
free(lp, M_NFSCLLOCKOWNER);
1704
if (local)
1705
nfsstatsv1.cllocallockowners--;
1706
else
1707
nfsstatsv1.cllockowners--;
1708
}
1709
1710
/*
1711
* Free up a byte range lock structure.
1712
*/
1713
void
1714
nfscl_freelock(struct nfscllock *lop, int local)
1715
{
1716
1717
LIST_REMOVE(lop, nfslo_list);
1718
free(lop, M_NFSCLLOCK);
1719
if (local)
1720
nfsstatsv1.cllocallocks--;
1721
else
1722
nfsstatsv1.cllocks--;
1723
}
1724
1725
/*
1726
* Clean out the state related to a delegation.
1727
*/
1728
static void
1729
nfscl_cleandeleg(struct nfscldeleg *dp)
1730
{
1731
struct nfsclowner *owp, *nowp;
1732
struct nfsclopen *op;
1733
1734
LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1735
op = LIST_FIRST(&owp->nfsow_open);
1736
if (op != NULL) {
1737
if (LIST_NEXT(op, nfso_list) != NULL)
1738
panic("nfscleandel");
1739
nfscl_freeopen(op, 1, true);
1740
}
1741
nfscl_freeopenowner(owp, 1);
1742
}
1743
nfscl_freealllocks(&dp->nfsdl_lock, 1);
1744
}
1745
1746
/*
1747
* Free a delegation.
1748
*/
1749
static void
1750
nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp, bool freeit)
1751
{
1752
1753
TAILQ_REMOVE(hdp, dp, nfsdl_list);
1754
LIST_REMOVE(dp, nfsdl_hash);
1755
dp->nfsdl_clp->nfsc_delegcnt--;
1756
if (freeit)
1757
free(dp, M_NFSCLDELEG);
1758
nfsstatsv1.cldelegates--;
1759
}
1760
1761
/*
1762
* Free up all state related to this client structure.
1763
*/
1764
static void
1765
nfscl_cleanclient(struct nfsclclient *clp)
1766
{
1767
struct nfsclowner *owp, *nowp;
1768
struct nfsclopen *op, *nop;
1769
struct nfscllayout *lyp, *nlyp;
1770
struct nfscldevinfo *dip, *ndip;
1771
1772
TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1773
nfscl_freelayout(lyp);
1774
1775
LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
1776
nfscl_freedevinfo(dip);
1777
1778
/* Now, all the OpenOwners, etc. */
1779
LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1780
LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1781
nfscl_freeopen(op, 0, true);
1782
}
1783
nfscl_freeopenowner(owp, 0);
1784
}
1785
}
1786
1787
/*
1788
* Called when an NFSERR_EXPIRED is received from the server.
1789
*/
1790
static void
1791
nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1792
struct ucred *cred, NFSPROC_T *p)
1793
{
1794
struct nfsclowner *owp, *nowp, *towp;
1795
struct nfsclopen *op, *nop, *top;
1796
struct nfscldeleg *dp, *ndp;
1797
int ret, printed = 0;
1798
1799
/*
1800
* First, merge locally issued Opens into the list for the server.
1801
*/
1802
dp = TAILQ_FIRST(&clp->nfsc_deleg);
1803
while (dp != NULL) {
1804
ndp = TAILQ_NEXT(dp, nfsdl_list);
1805
owp = LIST_FIRST(&dp->nfsdl_owner);
1806
while (owp != NULL) {
1807
nowp = LIST_NEXT(owp, nfsow_list);
1808
op = LIST_FIRST(&owp->nfsow_open);
1809
if (op != NULL) {
1810
if (LIST_NEXT(op, nfso_list) != NULL)
1811
panic("nfsclexp");
1812
LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1813
if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1814
NFSV4CL_LOCKNAMELEN))
1815
break;
1816
}
1817
if (towp != NULL) {
1818
/* Merge opens in */
1819
LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1820
if (top->nfso_fhlen == op->nfso_fhlen &&
1821
!NFSBCMP(top->nfso_fh, op->nfso_fh,
1822
op->nfso_fhlen)) {
1823
top->nfso_mode |= op->nfso_mode;
1824
top->nfso_opencnt += op->nfso_opencnt;
1825
break;
1826
}
1827
}
1828
if (top == NULL) {
1829
/* Just add the open to the owner list */
1830
LIST_REMOVE(op, nfso_list);
1831
op->nfso_own = towp;
1832
LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1833
LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1834
op->nfso_fhlen), op, nfso_hash);
1835
nfsstatsv1.cllocalopens--;
1836
nfsstatsv1.clopens++;
1837
}
1838
} else {
1839
/* Just add the openowner to the client list */
1840
LIST_REMOVE(owp, nfsow_list);
1841
owp->nfsow_clp = clp;
1842
LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1843
LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1844
op->nfso_fhlen), op, nfso_hash);
1845
nfsstatsv1.cllocalopenowners--;
1846
nfsstatsv1.clopenowners++;
1847
nfsstatsv1.cllocalopens--;
1848
nfsstatsv1.clopens++;
1849
}
1850
}
1851
owp = nowp;
1852
}
1853
if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1854
printed = 1;
1855
printf("nfsv4 expired locks lost\n");
1856
}
1857
nfscl_cleandeleg(dp);
1858
nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
1859
dp = ndp;
1860
}
1861
if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1862
panic("nfsclexp");
1863
1864
/*
1865
* Now, try and reopen against the server.
1866
*/
1867
LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1868
owp->nfsow_seqid = 0;
1869
LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1870
ret = nfscl_expireopen(clp, op, nmp, cred, p);
1871
if (ret && !printed) {
1872
printed = 1;
1873
printf("nfsv4 expired locks lost\n");
1874
}
1875
}
1876
if (LIST_EMPTY(&owp->nfsow_open))
1877
nfscl_freeopenowner(owp, 0);
1878
}
1879
}
1880
1881
/*
1882
* This function must be called after the process represented by "own" has
1883
* exited. Must be called with CLSTATE lock held.
1884
*/
1885
static void
1886
nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1887
{
1888
struct nfsclowner *owp, *nowp;
1889
struct nfscllockowner *lp;
1890
struct nfscldeleg *dp;
1891
1892
/* First, get rid of local locks on delegations. */
1893
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1894
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1895
if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1896
if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1897
panic("nfscllckw");
1898
nfscl_freelockowner(lp, 1);
1899
break;
1900
}
1901
}
1902
}
1903
owp = LIST_FIRST(&clp->nfsc_owner);
1904
while (owp != NULL) {
1905
nowp = LIST_NEXT(owp, nfsow_list);
1906
if (!NFSBCMP(owp->nfsow_owner, own,
1907
NFSV4CL_LOCKNAMELEN)) {
1908
/*
1909
* If there are children that haven't closed the
1910
* file descriptors yet, the opens will still be
1911
* here. For that case, let the renew thread clear
1912
* out the OpenOwner later.
1913
*/
1914
if (LIST_EMPTY(&owp->nfsow_open))
1915
nfscl_freeopenowner(owp, 0);
1916
else
1917
owp->nfsow_defunct = 1;
1918
break;
1919
}
1920
owp = nowp;
1921
}
1922
}
1923
1924
/*
1925
* Find open/lock owners for processes that have exited.
1926
*/
1927
static void
1928
nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1929
{
1930
struct nfsclowner *owp, *nowp;
1931
struct nfsclopen *op;
1932
struct nfscllockowner *lp, *nlp;
1933
struct nfscldeleg *dp;
1934
uint8_t own[NFSV4CL_LOCKNAMELEN];
1935
1936
/*
1937
* All the pidhash locks must be acquired, since they are sx locks
1938
* and must be acquired before the mutexes. The pid(s) that will
1939
* be used aren't known yet, so all the locks need to be acquired.
1940
* Fortunately, this function is only performed once/sec.
1941
*/
1942
pidhash_slockall();
1943
NFSLOCKCLSTATE();
1944
LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1945
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1946
LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1947
if (LIST_EMPTY(&lp->nfsl_lock))
1948
nfscl_emptylockowner(lp, lhp);
1949
}
1950
}
1951
if (nfscl_procdoesntexist(owp->nfsow_owner)) {
1952
memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
1953
nfscl_cleanup_common(clp, own);
1954
}
1955
}
1956
1957
/*
1958
* For the single open_owner case, these lock owners need to be
1959
* checked to see if they still exist separately.
1960
* This is because nfscl_procdoesntexist() never returns true for
1961
* the single open_owner so that the above doesn't ever call
1962
* nfscl_cleanup_common().
1963
*/
1964
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1965
LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1966
if (nfscl_procdoesntexist(lp->nfsl_owner)) {
1967
memcpy(own, lp->nfsl_owner,
1968
NFSV4CL_LOCKNAMELEN);
1969
nfscl_cleanup_common(clp, own);
1970
}
1971
}
1972
}
1973
NFSUNLOCKCLSTATE();
1974
pidhash_sunlockall();
1975
}
1976
1977
/*
1978
* Take the empty lock owner and move it to the local lhp list if the
1979
* associated process no longer exists.
1980
*/
1981
static void
1982
nfscl_emptylockowner(struct nfscllockowner *lp,
1983
struct nfscllockownerfhhead *lhp)
1984
{
1985
struct nfscllockownerfh *lfhp, *mylfhp;
1986
struct nfscllockowner *nlp;
1987
int fnd_it;
1988
1989
/* If not a Posix lock owner, just return. */
1990
if ((lp->nfsl_lockflags & F_POSIX) == 0)
1991
return;
1992
1993
fnd_it = 0;
1994
mylfhp = NULL;
1995
/*
1996
* First, search to see if this lock owner is already in the list.
1997
* If it is, then the associated process no longer exists.
1998
*/
1999
SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
2000
if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
2001
!NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
2002
lfhp->nfslfh_len))
2003
mylfhp = lfhp;
2004
LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
2005
if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
2006
NFSV4CL_LOCKNAMELEN))
2007
fnd_it = 1;
2008
}
2009
/* If not found, check if process still exists. */
2010
if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
2011
return;
2012
2013
/* Move the lock owner over to the local list. */
2014
if (mylfhp == NULL) {
2015
mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
2016
M_NOWAIT);
2017
if (mylfhp == NULL)
2018
return;
2019
mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
2020
NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
2021
mylfhp->nfslfh_len);
2022
LIST_INIT(&mylfhp->nfslfh_lock);
2023
SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
2024
}
2025
LIST_REMOVE(lp, nfsl_list);
2026
LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
2027
}
2028
2029
static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
2030
/*
2031
* Called from nfs umount to free up the clientid.
2032
*/
2033
void
2034
nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p, struct nfscldeleghead *dhp)
2035
{
2036
struct nfsclclient *clp;
2037
struct ucred *cred;
2038
int igotlock;
2039
2040
/*
2041
* For the case that matters, this is the thread that set
2042
* MNTK_UNMOUNTF, so it will see it set. The code that follows is
2043
* done to ensure that any thread executing nfscl_getcl() after
2044
* this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
2045
* mutex for NFSLOCKCLSTATE(), so it is "m" for the following
2046
* explanation, courtesy of Alan Cox.
2047
* What follows is a snippet from Alan Cox's email at:
2048
* https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
2049
*
2050
* 1. Set MNTK_UNMOUNTF
2051
* 2. Acquire a standard FreeBSD mutex "m".
2052
* 3. Update some data structures.
2053
* 4. Release mutex "m".
2054
*
2055
* Then, other threads that acquire "m" after step 4 has occurred will
2056
* see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
2057
* step 2 may or may not see MNTK_UNMOUNTF as set.
2058
*/
2059
NFSLOCKCLSTATE();
2060
if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
2061
fake_global++;
2062
NFSUNLOCKCLSTATE();
2063
NFSLOCKCLSTATE();
2064
}
2065
2066
clp = nmp->nm_clp;
2067
if (clp != NULL) {
2068
if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
2069
panic("nfscl umount");
2070
2071
/*
2072
* First, handshake with the nfscl renew thread, to terminate
2073
* it.
2074
*/
2075
clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
2076
while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
2077
(void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
2078
"nfsclumnt", hz);
2079
2080
/*
2081
* Now, get the exclusive lock on the client state, so
2082
* that no uses of the state are still in progress.
2083
*/
2084
do {
2085
igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2086
NFSCLSTATEMUTEXPTR, NULL);
2087
} while (!igotlock);
2088
NFSUNLOCKCLSTATE();
2089
2090
/*
2091
* Free up all the state. It will expire on the server, but
2092
* maybe we should do a SetClientId/SetClientIdConfirm so
2093
* the server throws it away?
2094
*/
2095
LIST_REMOVE(clp, nfsc_list);
2096
nfscl_delegreturnall(clp, p, dhp);
2097
cred = newnfs_getcred();
2098
if (NFSHASNFSV4N(nmp)) {
2099
nfsrpc_destroysession(nmp, NULL, cred, p);
2100
nfsrpc_destroyclient(nmp, clp, cred, p);
2101
} else
2102
nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2103
nfscl_cleanclient(clp);
2104
nmp->nm_clp = NULL;
2105
NFSFREECRED(cred);
2106
free(clp, M_NFSCLCLIENT);
2107
} else
2108
NFSUNLOCKCLSTATE();
2109
}
2110
2111
/*
2112
* This function is called when a server replies with NFSERR_STALECLIENTID
2113
* NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
2114
* doing Opens and Locks with reclaim. If these fail, it deletes the
2115
* corresponding state.
2116
*/
2117
static void
2118
nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred,
2119
NFSPROC_T *p)
2120
{
2121
struct nfsclowner *owp, *nowp;
2122
struct nfsclopen *op, *nop;
2123
struct nfscllockowner *lp, *nlp;
2124
struct nfscllock *lop, *nlop;
2125
struct nfscldeleg *dp, *ndp, *tdp;
2126
struct nfsmount *nmp;
2127
struct ucred *tcred;
2128
struct nfsclopenhead extra_open;
2129
struct nfscldeleghead extra_deleg;
2130
struct nfsreq *rep;
2131
u_int64_t len;
2132
u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
2133
int i, igotlock = 0, error, trycnt, firstlock;
2134
struct nfscllayout *lyp, *nlyp;
2135
bool recovered_one;
2136
2137
/*
2138
* First, lock the client structure, so everyone else will
2139
* block when trying to use state.
2140
*/
2141
NFSLOCKCLSTATE();
2142
clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2143
do {
2144
igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2145
NFSCLSTATEMUTEXPTR, NULL);
2146
} while (!igotlock);
2147
NFSUNLOCKCLSTATE();
2148
2149
nmp = clp->nfsc_nmp;
2150
if (nmp == NULL)
2151
panic("nfscl recover");
2152
2153
/*
2154
* For now, just get rid of all layouts. There may be a need
2155
* to do LayoutCommit Ops with reclaim == true later.
2156
*/
2157
TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
2158
nfscl_freelayout(lyp);
2159
TAILQ_INIT(&clp->nfsc_layout);
2160
for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
2161
LIST_INIT(&clp->nfsc_layouthash[i]);
2162
2163
trycnt = 5;
2164
tcred = NULL;
2165
do {
2166
error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p);
2167
} while ((error == NFSERR_STALECLIENTID ||
2168
error == NFSERR_BADSESSION ||
2169
error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2170
if (error) {
2171
NFSLOCKCLSTATE();
2172
clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2173
NFSCLFLAGS_RECVRINPROG);
2174
wakeup(&clp->nfsc_flags);
2175
nfsv4_unlock(&clp->nfsc_lock, 0);
2176
NFSUNLOCKCLSTATE();
2177
return;
2178
}
2179
clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2180
clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2181
2182
/*
2183
* Mark requests already queued on the server, so that they don't
2184
* initiate another recovery cycle. Any requests already in the
2185
* queue that handle state information will have the old stale
2186
* clientid/stateid and will get a NFSERR_STALESTATEID,
2187
* NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2188
* This will be translated to NFSERR_STALEDONTRECOVER when
2189
* R_DONTRECOVER is set.
2190
*/
2191
NFSLOCKREQ();
2192
TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2193
if (rep->r_nmp == nmp)
2194
rep->r_flags |= R_DONTRECOVER;
2195
}
2196
NFSUNLOCKREQ();
2197
2198
/*
2199
* If nfsrpc_setclient() returns *retokp == true,
2200
* no more recovery is needed.
2201
*/
2202
if (*retokp)
2203
goto out;
2204
2205
/*
2206
* Now, mark all delegations "need reclaim".
2207
*/
2208
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2209
dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2210
2211
TAILQ_INIT(&extra_deleg);
2212
LIST_INIT(&extra_open);
2213
/*
2214
* Now traverse the state lists, doing Open and Lock Reclaims.
2215
*/
2216
tcred = newnfs_getcred();
2217
recovered_one = false;
2218
owp = LIST_FIRST(&clp->nfsc_owner);
2219
while (owp != NULL) {
2220
nowp = LIST_NEXT(owp, nfsow_list);
2221
owp->nfsow_seqid = 0;
2222
op = LIST_FIRST(&owp->nfsow_open);
2223
while (op != NULL) {
2224
nop = LIST_NEXT(op, nfso_list);
2225
if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2226
/* Search for a delegation to reclaim with the open */
2227
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2228
if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2229
continue;
2230
if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2231
mode = NFSV4OPEN_ACCESSWRITE;
2232
delegtype = NFSV4OPEN_DELEGATEWRITE;
2233
} else {
2234
mode = NFSV4OPEN_ACCESSREAD;
2235
delegtype = NFSV4OPEN_DELEGATEREAD;
2236
}
2237
if ((op->nfso_mode & mode) == mode &&
2238
op->nfso_fhlen == dp->nfsdl_fhlen &&
2239
!NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2240
break;
2241
}
2242
ndp = dp;
2243
if (dp == NULL)
2244
delegtype = NFSV4OPEN_DELEGATENONE;
2245
newnfs_copycred(&op->nfso_cred, tcred);
2246
error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2247
op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2248
op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2249
tcred, p);
2250
if (!error) {
2251
recovered_one = true;
2252
/* Handle any replied delegation */
2253
if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2254
|| NFSMNT_RDONLY(nmp->nm_mountp))) {
2255
if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2256
mode = NFSV4OPEN_ACCESSWRITE;
2257
else
2258
mode = NFSV4OPEN_ACCESSREAD;
2259
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2260
if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2261
continue;
2262
if ((op->nfso_mode & mode) == mode &&
2263
op->nfso_fhlen == dp->nfsdl_fhlen &&
2264
!NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2265
op->nfso_fhlen)) {
2266
dp->nfsdl_stateid = ndp->nfsdl_stateid;
2267
dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2268
dp->nfsdl_ace = ndp->nfsdl_ace;
2269
dp->nfsdl_change = ndp->nfsdl_change;
2270
dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2271
if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2272
dp->nfsdl_flags |= NFSCLDL_RECALL;
2273
free(ndp, M_NFSCLDELEG);
2274
ndp = NULL;
2275
break;
2276
}
2277
}
2278
}
2279
if (ndp != NULL)
2280
TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2281
2282
/* and reclaim all byte range locks */
2283
lp = LIST_FIRST(&op->nfso_lock);
2284
while (lp != NULL) {
2285
nlp = LIST_NEXT(lp, nfsl_list);
2286
lp->nfsl_seqid = 0;
2287
firstlock = 1;
2288
lop = LIST_FIRST(&lp->nfsl_lock);
2289
while (lop != NULL) {
2290
nlop = LIST_NEXT(lop, nfslo_list);
2291
if (lop->nfslo_end == NFS64BITSSET)
2292
len = NFS64BITSSET;
2293
else
2294
len = lop->nfslo_end - lop->nfslo_first;
2295
error = nfscl_trylock(nmp, NULL,
2296
op->nfso_fh, op->nfso_fhlen, lp,
2297
firstlock, 1, lop->nfslo_first, len,
2298
lop->nfslo_type, tcred, p);
2299
if (error != 0)
2300
nfscl_freelock(lop, 0);
2301
else
2302
firstlock = 0;
2303
lop = nlop;
2304
}
2305
/* If no locks, but a lockowner, just delete it. */
2306
if (LIST_EMPTY(&lp->nfsl_lock))
2307
nfscl_freelockowner(lp, 0);
2308
lp = nlp;
2309
}
2310
} else if (error == NFSERR_NOGRACE && !recovered_one &&
2311
NFSHASNFSV4N(nmp)) {
2312
/*
2313
* For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2314
* actually end up here, since the client will do
2315
* a recovery for NFSERR_BADSESSION, but will get
2316
* an NFSERR_NOGRACE reply for the first "reclaim"
2317
* attempt.
2318
* So, call nfscl_expireclient() to recover the
2319
* opens as best we can and then do a reclaim
2320
* complete and return.
2321
*/
2322
nfsrpc_reclaimcomplete(nmp, cred, p);
2323
nfscl_expireclient(clp, nmp, tcred, p);
2324
goto out;
2325
}
2326
}
2327
if (error != 0 && error != NFSERR_BADSESSION)
2328
nfscl_freeopen(op, 0, true);
2329
op = nop;
2330
}
2331
owp = nowp;
2332
}
2333
2334
/*
2335
* Now, try and get any delegations not yet reclaimed by cobbling
2336
* to-gether an appropriate open.
2337
*/
2338
nowp = NULL;
2339
dp = TAILQ_FIRST(&clp->nfsc_deleg);
2340
while (dp != NULL) {
2341
ndp = TAILQ_NEXT(dp, nfsdl_list);
2342
if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2343
if (nowp == NULL) {
2344
nowp = malloc(
2345
sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2346
/*
2347
* Name must be as long an largest possible
2348
* NFSV4CL_LOCKNAMELEN. 12 for now.
2349
*/
2350
NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2351
NFSV4CL_LOCKNAMELEN);
2352
LIST_INIT(&nowp->nfsow_open);
2353
nowp->nfsow_clp = clp;
2354
nowp->nfsow_seqid = 0;
2355
nowp->nfsow_defunct = 0;
2356
nfscl_lockinit(&nowp->nfsow_rwlock);
2357
}
2358
nop = NULL;
2359
if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2360
nop = malloc(sizeof (struct nfsclopen) +
2361
dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2362
nop->nfso_own = nowp;
2363
if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2364
nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2365
delegtype = NFSV4OPEN_DELEGATEWRITE;
2366
} else {
2367
nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2368
delegtype = NFSV4OPEN_DELEGATEREAD;
2369
}
2370
nop->nfso_opencnt = 0;
2371
nop->nfso_posixlock = 1;
2372
nop->nfso_fhlen = dp->nfsdl_fhlen;
2373
NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2374
LIST_INIT(&nop->nfso_lock);
2375
nop->nfso_stateid.seqid = 0;
2376
nop->nfso_stateid.other[0] = 0;
2377
nop->nfso_stateid.other[1] = 0;
2378
nop->nfso_stateid.other[2] = 0;
2379
newnfs_copycred(&dp->nfsdl_cred, tcred);
2380
newnfs_copyincred(tcred, &nop->nfso_cred);
2381
tdp = NULL;
2382
error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2383
nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2384
nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2385
delegtype, tcred, p);
2386
if (tdp != NULL) {
2387
if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2388
mode = NFSV4OPEN_ACCESSWRITE;
2389
else
2390
mode = NFSV4OPEN_ACCESSREAD;
2391
if ((nop->nfso_mode & mode) == mode &&
2392
nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2393
!NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2394
nop->nfso_fhlen)) {
2395
dp->nfsdl_stateid = tdp->nfsdl_stateid;
2396
dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2397
dp->nfsdl_ace = tdp->nfsdl_ace;
2398
dp->nfsdl_change = tdp->nfsdl_change;
2399
dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2400
if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2401
dp->nfsdl_flags |= NFSCLDL_RECALL;
2402
free(tdp, M_NFSCLDELEG);
2403
} else {
2404
TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2405
}
2406
}
2407
}
2408
if (error) {
2409
if (nop != NULL)
2410
free(nop, M_NFSCLOPEN);
2411
if (error == NFSERR_NOGRACE && !recovered_one &&
2412
NFSHASNFSV4N(nmp)) {
2413
/*
2414
* For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2415
* actually end up here, since the client will do
2416
* a recovery for NFSERR_BADSESSION, but will get
2417
* an NFSERR_NOGRACE reply for the first "reclaim"
2418
* attempt.
2419
* So, call nfscl_expireclient() to recover the
2420
* opens as best we can and then do a reclaim
2421
* complete and return.
2422
*/
2423
nfsrpc_reclaimcomplete(nmp, cred, p);
2424
nfscl_expireclient(clp, nmp, tcred, p);
2425
free(nowp, M_NFSCLOWNER);
2426
goto out;
2427
}
2428
/*
2429
* Couldn't reclaim it, so throw the state
2430
* away. Ouch!!
2431
*/
2432
nfscl_cleandeleg(dp);
2433
nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
2434
} else {
2435
recovered_one = true;
2436
LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2437
}
2438
}
2439
dp = ndp;
2440
}
2441
2442
/*
2443
* Now, get rid of extra Opens and Delegations.
2444
*/
2445
LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2446
do {
2447
newnfs_copycred(&op->nfso_cred, tcred);
2448
error = nfscl_tryclose(op, tcred, nmp, p, true);
2449
if (error == NFSERR_GRACE)
2450
(void) nfs_catnap(PZERO, error, "nfsexcls");
2451
} while (error == NFSERR_GRACE);
2452
LIST_REMOVE(op, nfso_list);
2453
free(op, M_NFSCLOPEN);
2454
}
2455
if (nowp != NULL)
2456
free(nowp, M_NFSCLOWNER);
2457
2458
TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2459
do {
2460
newnfs_copycred(&dp->nfsdl_cred, tcred);
2461
error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2462
if (error == NFSERR_GRACE)
2463
(void) nfs_catnap(PZERO, error, "nfsexdlg");
2464
} while (error == NFSERR_GRACE);
2465
TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2466
free(dp, M_NFSCLDELEG);
2467
}
2468
2469
/* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2470
if (NFSHASNFSV4N(nmp))
2471
(void)nfsrpc_reclaimcomplete(nmp, cred, p);
2472
2473
out:
2474
NFSLOCKCLSTATE();
2475
clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2476
wakeup(&clp->nfsc_flags);
2477
nfsv4_unlock(&clp->nfsc_lock, 0);
2478
NFSUNLOCKCLSTATE();
2479
if (tcred != NULL)
2480
NFSFREECRED(tcred);
2481
}
2482
2483
/*
2484
* This function is called when a server replies with NFSERR_EXPIRED.
2485
* It deletes all state for the client and does a fresh SetClientId/confirm.
2486
* XXX Someday it should post a signal to the process(es) that hold the
2487
* state, so they know that lock state has been lost.
2488
*/
2489
int
2490
nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2491
{
2492
struct nfsmount *nmp;
2493
struct ucred *cred;
2494
int igotlock = 0, error, trycnt;
2495
2496
/*
2497
* If the clientid has gone away or a new SetClientid has already
2498
* been done, just return ok.
2499
*/
2500
if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2501
return (0);
2502
2503
/*
2504
* First, lock the client structure, so everyone else will
2505
* block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2506
* that only one thread does the work.
2507
*/
2508
NFSLOCKCLSTATE();
2509
clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2510
do {
2511
igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2512
NFSCLSTATEMUTEXPTR, NULL);
2513
} while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2514
if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2515
if (igotlock)
2516
nfsv4_unlock(&clp->nfsc_lock, 0);
2517
NFSUNLOCKCLSTATE();
2518
return (0);
2519
}
2520
clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2521
NFSUNLOCKCLSTATE();
2522
2523
nmp = clp->nfsc_nmp;
2524
if (nmp == NULL)
2525
panic("nfscl expired");
2526
cred = newnfs_getcred();
2527
trycnt = 5;
2528
do {
2529
error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2530
} while ((error == NFSERR_STALECLIENTID ||
2531
error == NFSERR_BADSESSION ||
2532
error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2533
if (error) {
2534
NFSLOCKCLSTATE();
2535
clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2536
} else {
2537
/*
2538
* Expire the state for the client.
2539
*/
2540
nfscl_expireclient(clp, nmp, cred, p);
2541
NFSLOCKCLSTATE();
2542
clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2543
clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2544
}
2545
clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2546
wakeup(&clp->nfsc_flags);
2547
nfsv4_unlock(&clp->nfsc_lock, 0);
2548
NFSUNLOCKCLSTATE();
2549
NFSFREECRED(cred);
2550
return (error);
2551
}
2552
2553
/*
2554
* This function inserts a lock in the list after insert_lop.
2555
*/
2556
static void
2557
nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2558
struct nfscllock *insert_lop, int local)
2559
{
2560
2561
if ((struct nfscllockowner *)insert_lop == lp)
2562
LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2563
else
2564
LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2565
if (local)
2566
nfsstatsv1.cllocallocks++;
2567
else
2568
nfsstatsv1.cllocks++;
2569
}
2570
2571
/*
2572
* This function updates the locking for a lock owner and given file. It
2573
* maintains a list of lock ranges ordered on increasing file offset that
2574
* are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2575
* It always adds new_lop to the list and sometimes uses the one pointed
2576
* at by other_lopp.
2577
* Returns 1 if the locks were modified, 0 otherwise.
2578
*/
2579
static int
2580
nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2581
struct nfscllock **other_lopp, int local)
2582
{
2583
struct nfscllock *new_lop = *new_lopp;
2584
struct nfscllock *lop, *tlop, *ilop;
2585
struct nfscllock *other_lop;
2586
int unlock = 0, modified = 0;
2587
u_int64_t tmp;
2588
2589
/*
2590
* Work down the list until the lock is merged.
2591
*/
2592
if (new_lop->nfslo_type == F_UNLCK)
2593
unlock = 1;
2594
ilop = (struct nfscllock *)lp;
2595
lop = LIST_FIRST(&lp->nfsl_lock);
2596
while (lop != NULL) {
2597
/*
2598
* Only check locks for this file that aren't before the start of
2599
* new lock's range.
2600
*/
2601
if (lop->nfslo_end >= new_lop->nfslo_first) {
2602
if (new_lop->nfslo_end < lop->nfslo_first) {
2603
/*
2604
* If the new lock ends before the start of the
2605
* current lock's range, no merge, just insert
2606
* the new lock.
2607
*/
2608
break;
2609
}
2610
if (new_lop->nfslo_type == lop->nfslo_type ||
2611
(new_lop->nfslo_first <= lop->nfslo_first &&
2612
new_lop->nfslo_end >= lop->nfslo_end)) {
2613
/*
2614
* This lock can be absorbed by the new lock/unlock.
2615
* This happens when it covers the entire range
2616
* of the old lock or is contiguous
2617
* with the old lock and is of the same type or an
2618
* unlock.
2619
*/
2620
if (new_lop->nfslo_type != lop->nfslo_type ||
2621
new_lop->nfslo_first != lop->nfslo_first ||
2622
new_lop->nfslo_end != lop->nfslo_end)
2623
modified = 1;
2624
if (lop->nfslo_first < new_lop->nfslo_first)
2625
new_lop->nfslo_first = lop->nfslo_first;
2626
if (lop->nfslo_end > new_lop->nfslo_end)
2627
new_lop->nfslo_end = lop->nfslo_end;
2628
tlop = lop;
2629
lop = LIST_NEXT(lop, nfslo_list);
2630
nfscl_freelock(tlop, local);
2631
continue;
2632
}
2633
2634
/*
2635
* All these cases are for contiguous locks that are not the
2636
* same type, so they can't be merged.
2637
*/
2638
if (new_lop->nfslo_first <= lop->nfslo_first) {
2639
/*
2640
* This case is where the new lock overlaps with the
2641
* first part of the old lock. Move the start of the
2642
* old lock to just past the end of the new lock. The
2643
* new lock will be inserted in front of the old, since
2644
* ilop hasn't been updated. (We are done now.)
2645
*/
2646
if (lop->nfslo_first != new_lop->nfslo_end) {
2647
lop->nfslo_first = new_lop->nfslo_end;
2648
modified = 1;
2649
}
2650
break;
2651
}
2652
if (new_lop->nfslo_end >= lop->nfslo_end) {
2653
/*
2654
* This case is where the new lock overlaps with the
2655
* end of the old lock's range. Move the old lock's
2656
* end to just before the new lock's first and insert
2657
* the new lock after the old lock.
2658
* Might not be done yet, since the new lock could
2659
* overlap further locks with higher ranges.
2660
*/
2661
if (lop->nfslo_end != new_lop->nfslo_first) {
2662
lop->nfslo_end = new_lop->nfslo_first;
2663
modified = 1;
2664
}
2665
ilop = lop;
2666
lop = LIST_NEXT(lop, nfslo_list);
2667
continue;
2668
}
2669
/*
2670
* The final case is where the new lock's range is in the
2671
* middle of the current lock's and splits the current lock
2672
* up. Use *other_lopp to handle the second part of the
2673
* split old lock range. (We are done now.)
2674
* For unlock, we use new_lop as other_lop and tmp, since
2675
* other_lop and new_lop are the same for this case.
2676
* We noted the unlock case above, so we don't need
2677
* new_lop->nfslo_type any longer.
2678
*/
2679
tmp = new_lop->nfslo_first;
2680
if (unlock) {
2681
other_lop = new_lop;
2682
*new_lopp = NULL;
2683
} else {
2684
other_lop = *other_lopp;
2685
*other_lopp = NULL;
2686
}
2687
other_lop->nfslo_first = new_lop->nfslo_end;
2688
other_lop->nfslo_end = lop->nfslo_end;
2689
other_lop->nfslo_type = lop->nfslo_type;
2690
lop->nfslo_end = tmp;
2691
nfscl_insertlock(lp, other_lop, lop, local);
2692
ilop = lop;
2693
modified = 1;
2694
break;
2695
}
2696
ilop = lop;
2697
lop = LIST_NEXT(lop, nfslo_list);
2698
if (lop == NULL)
2699
break;
2700
}
2701
2702
/*
2703
* Insert the new lock in the list at the appropriate place.
2704
*/
2705
if (!unlock) {
2706
nfscl_insertlock(lp, new_lop, ilop, local);
2707
*new_lopp = NULL;
2708
modified = 1;
2709
}
2710
return (modified);
2711
}
2712
2713
/*
2714
* This function must be run as a kernel thread.
2715
* It does Renew Ops and recovery, when required.
2716
*/
2717
void
2718
nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2719
{
2720
struct nfsclowner *owp, *nowp;
2721
struct nfsclopen *op;
2722
struct nfscllockowner *lp, *nlp;
2723
struct nfscldeleghead dh;
2724
struct nfscldeleg *dp, *ndp;
2725
struct ucred *cred;
2726
u_int32_t clidrev;
2727
int error, cbpathdown, islept, igotlock, ret, clearok;
2728
uint32_t recover_done_time = 0;
2729
time_t mytime;
2730
static time_t prevsec = 0;
2731
struct nfscllockownerfh *lfhp, *nlfhp;
2732
struct nfscllockownerfhhead lfh;
2733
struct nfscllayout *lyp, *nlyp;
2734
struct nfscldevinfo *dip, *ndip;
2735
struct nfscllayouthead rlh;
2736
struct nfsclrecalllayout *recallp;
2737
struct nfsclds *dsp;
2738
bool retok;
2739
struct mount *mp;
2740
vnode_t vp;
2741
2742
cred = newnfs_getcred();
2743
NFSLOCKCLSTATE();
2744
clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2745
mp = clp->nfsc_nmp->nm_mountp;
2746
NFSUNLOCKCLSTATE();
2747
for(;;) {
2748
newnfs_setroot(cred);
2749
cbpathdown = 0;
2750
if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2751
/*
2752
* Only allow one full recover within 1/2 of the lease
2753
* duration (nfsc_renew).
2754
* retok is value/result. If passed in set to true,
2755
* it indicates only a CreateSession operation should
2756
* be attempted.
2757
* If it is returned true, it indicates that the
2758
* recovery only required a CreateSession.
2759
*/
2760
retok = true;
2761
if (recover_done_time < NFSD_MONOSEC) {
2762
recover_done_time = NFSD_MONOSEC +
2763
clp->nfsc_renew;
2764
retok = false;
2765
}
2766
NFSCL_DEBUG(1, "Doing recovery, only "
2767
"createsession=%d\n", retok);
2768
nfscl_recover(clp, &retok, cred, p);
2769
}
2770
if (clp->nfsc_expire <= NFSD_MONOSEC &&
2771
(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2772
clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2773
clidrev = clp->nfsc_clientidrev;
2774
error = nfsrpc_renew(clp, NULL, cred, p);
2775
if (error == NFSERR_CBPATHDOWN)
2776
cbpathdown = 1;
2777
else if (error == NFSERR_STALECLIENTID) {
2778
NFSLOCKCLSTATE();
2779
clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2780
NFSUNLOCKCLSTATE();
2781
} else if (error == NFSERR_EXPIRED)
2782
(void) nfscl_hasexpired(clp, clidrev, p);
2783
}
2784
2785
checkdsrenew:
2786
if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2787
/* Do renews for any DS sessions. */
2788
NFSLOCKMNT(clp->nfsc_nmp);
2789
/* Skip first entry, since the MDS is handled above. */
2790
dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2791
if (dsp != NULL)
2792
dsp = TAILQ_NEXT(dsp, nfsclds_list);
2793
while (dsp != NULL) {
2794
if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2795
dsp->nfsclds_sess.nfsess_defunct == 0) {
2796
dsp->nfsclds_expire = NFSD_MONOSEC +
2797
clp->nfsc_renew;
2798
NFSUNLOCKMNT(clp->nfsc_nmp);
2799
(void)nfsrpc_renew(clp, dsp, cred, p);
2800
goto checkdsrenew;
2801
}
2802
dsp = TAILQ_NEXT(dsp, nfsclds_list);
2803
}
2804
NFSUNLOCKMNT(clp->nfsc_nmp);
2805
}
2806
2807
TAILQ_INIT(&dh);
2808
NFSLOCKCLSTATE();
2809
if (cbpathdown)
2810
/* It's a Total Recall! */
2811
nfscl_totalrecall(clp);
2812
2813
/*
2814
* Now, handle defunct owners.
2815
*/
2816
LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2817
if (LIST_EMPTY(&owp->nfsow_open)) {
2818
if (owp->nfsow_defunct != 0)
2819
nfscl_freeopenowner(owp, 0);
2820
}
2821
}
2822
2823
/*
2824
* Do the recall on any delegations. To avoid trouble, always
2825
* come back up here after having slept.
2826
*/
2827
igotlock = 0;
2828
tryagain:
2829
dp = TAILQ_FIRST(&clp->nfsc_deleg);
2830
while (dp != NULL) {
2831
ndp = TAILQ_NEXT(dp, nfsdl_list);
2832
if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2833
/*
2834
* Wait for outstanding I/O ops to be done.
2835
*/
2836
if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2837
if (igotlock) {
2838
nfsv4_unlock(&clp->nfsc_lock, 0);
2839
igotlock = 0;
2840
}
2841
dp->nfsdl_rwlock.nfslock_lock |=
2842
NFSV4LOCK_WANTED;
2843
msleep(&dp->nfsdl_rwlock,
2844
NFSCLSTATEMUTEXPTR, PVFS, "nfscld",
2845
5 * hz);
2846
if (NFSCL_FORCEDISM(mp))
2847
goto terminate;
2848
goto tryagain;
2849
}
2850
while (!igotlock) {
2851
igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2852
&islept, NFSCLSTATEMUTEXPTR, mp);
2853
if (igotlock == 0 && NFSCL_FORCEDISM(mp))
2854
goto terminate;
2855
if (islept)
2856
goto tryagain;
2857
}
2858
NFSUNLOCKCLSTATE();
2859
newnfs_copycred(&dp->nfsdl_cred, cred);
2860
ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2861
NULL, cred, p, 1, &vp);
2862
if (!ret) {
2863
nfscl_cleandeleg(dp);
2864
TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2865
nfsdl_list);
2866
LIST_REMOVE(dp, nfsdl_hash);
2867
TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2868
clp->nfsc_delegcnt--;
2869
nfsstatsv1.cldelegates--;
2870
}
2871
NFSLOCKCLSTATE();
2872
/*
2873
* The nfsc_lock must be released before doing
2874
* vrele(), since it might call nfs_inactive().
2875
* For the unlikely case where the vnode failed
2876
* to be acquired by nfscl_recalldeleg(), a
2877
* VOP_RECLAIM() should be in progress and it
2878
* will return the delegation.
2879
*/
2880
nfsv4_unlock(&clp->nfsc_lock, 0);
2881
igotlock = 0;
2882
if (vp != NULL) {
2883
NFSUNLOCKCLSTATE();
2884
vrele(vp);
2885
NFSLOCKCLSTATE();
2886
}
2887
goto tryagain;
2888
}
2889
dp = ndp;
2890
}
2891
2892
/*
2893
* Clear out old delegations, if we are above the high water
2894
* mark. Only clear out ones with no state related to them.
2895
* The tailq list is in LRU order.
2896
*/
2897
dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2898
while (clp->nfsc_delegcnt > clp->nfsc_deleghighwater &&
2899
dp != NULL) {
2900
ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2901
if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2902
dp->nfsdl_rwlock.nfslock_lock == 0 &&
2903
dp->nfsdl_timestamp < NFSD_MONOSEC &&
2904
(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2905
NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2906
clearok = 1;
2907
LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2908
op = LIST_FIRST(&owp->nfsow_open);
2909
if (op != NULL) {
2910
clearok = 0;
2911
break;
2912
}
2913
}
2914
if (clearok) {
2915
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2916
if (!LIST_EMPTY(&lp->nfsl_lock)) {
2917
clearok = 0;
2918
break;
2919
}
2920
}
2921
}
2922
if (clearok) {
2923
TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2924
LIST_REMOVE(dp, nfsdl_hash);
2925
TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2926
clp->nfsc_delegcnt--;
2927
nfsstatsv1.cldelegates--;
2928
}
2929
}
2930
dp = ndp;
2931
}
2932
if (igotlock)
2933
nfsv4_unlock(&clp->nfsc_lock, 0);
2934
2935
/*
2936
* Do the recall on any layouts. To avoid trouble, always
2937
* come back up here after having slept.
2938
*/
2939
TAILQ_INIT(&rlh);
2940
tryagain2:
2941
TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2942
if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2943
/*
2944
* Wait for outstanding I/O ops to be done.
2945
*/
2946
if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2947
(lyp->nfsly_lock.nfslock_lock &
2948
NFSV4LOCK_LOCK) != 0) {
2949
lyp->nfsly_lock.nfslock_lock |=
2950
NFSV4LOCK_WANTED;
2951
msleep(&lyp->nfsly_lock.nfslock_lock,
2952
NFSCLSTATEMUTEXPTR, PVFS, "nfslyp",
2953
5 * hz);
2954
if (NFSCL_FORCEDISM(mp))
2955
goto terminate;
2956
goto tryagain2;
2957
}
2958
/* Move the layout to the recall list. */
2959
TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2960
nfsly_list);
2961
LIST_REMOVE(lyp, nfsly_hash);
2962
TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2963
2964
/* Handle any layout commits. */
2965
if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2966
(lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2967
lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2968
NFSUNLOCKCLSTATE();
2969
NFSCL_DEBUG(3, "do layoutcommit\n");
2970
nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2971
cred, p);
2972
NFSLOCKCLSTATE();
2973
goto tryagain2;
2974
}
2975
}
2976
}
2977
2978
/* Now, look for stale layouts. */
2979
lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2980
while (lyp != NULL) {
2981
nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2982
if ((lyp->nfsly_timestamp < NFSD_MONOSEC ||
2983
clp->nfsc_layoutcnt > clp->nfsc_layouthighwater) &&
2984
(lyp->nfsly_flags & (NFSLY_RECALL |
2985
NFSLY_RETONCLOSE)) == 0 &&
2986
lyp->nfsly_lock.nfslock_usecnt == 0 &&
2987
lyp->nfsly_lock.nfslock_lock == 0) {
2988
NFSCL_DEBUG(4, "ret stale lay=%d\n",
2989
clp->nfsc_layoutcnt);
2990
recallp = malloc(sizeof(*recallp),
2991
M_NFSLAYRECALL, M_NOWAIT);
2992
if (recallp == NULL)
2993
break;
2994
(void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2995
lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2996
lyp->nfsly_stateid.seqid, 0, 0, NULL,
2997
recallp);
2998
}
2999
lyp = nlyp;
3000
}
3001
3002
/*
3003
* Free up any unreferenced device info structures.
3004
*/
3005
LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
3006
if (dip->nfsdi_layoutrefs == 0 &&
3007
dip->nfsdi_refcnt == 0) {
3008
NFSCL_DEBUG(4, "freeing devinfo\n");
3009
LIST_REMOVE(dip, nfsdi_list);
3010
nfscl_freedevinfo(dip);
3011
}
3012
}
3013
NFSUNLOCKCLSTATE();
3014
3015
/* Do layout return(s), as required. */
3016
TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
3017
TAILQ_REMOVE(&rlh, lyp, nfsly_list);
3018
NFSCL_DEBUG(4, "ret layout\n");
3019
nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
3020
if ((lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
3021
NFSLOCKCLSTATE();
3022
lyp->nfsly_flags |= NFSLY_RETURNED;
3023
wakeup(lyp);
3024
NFSUNLOCKCLSTATE();
3025
} else
3026
nfscl_freelayout(lyp);
3027
}
3028
3029
/*
3030
* Delegreturn any delegations cleaned out or recalled.
3031
*/
3032
TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
3033
newnfs_copycred(&dp->nfsdl_cred, cred);
3034
(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3035
TAILQ_REMOVE(&dh, dp, nfsdl_list);
3036
free(dp, M_NFSCLDELEG);
3037
}
3038
3039
SLIST_INIT(&lfh);
3040
/*
3041
* Call nfscl_cleanupkext() once per second to check for
3042
* open/lock owners where the process has exited.
3043
*/
3044
mytime = NFSD_MONOSEC;
3045
if (prevsec != mytime) {
3046
prevsec = mytime;
3047
nfscl_cleanupkext(clp, &lfh);
3048
}
3049
3050
/*
3051
* Do a ReleaseLockOwner for all lock owners where the
3052
* associated process no longer exists, as found by
3053
* nfscl_cleanupkext().
3054
*/
3055
newnfs_setroot(cred);
3056
SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
3057
LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
3058
nlp) {
3059
(void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
3060
lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
3061
p);
3062
nfscl_freelockowner(lp, 0);
3063
}
3064
free(lfhp, M_TEMP);
3065
}
3066
SLIST_INIT(&lfh);
3067
3068
NFSLOCKCLSTATE();
3069
if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
3070
(void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
3071
hz);
3072
terminate:
3073
if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
3074
clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
3075
NFSUNLOCKCLSTATE();
3076
NFSFREECRED(cred);
3077
wakeup((caddr_t)clp);
3078
return;
3079
}
3080
NFSUNLOCKCLSTATE();
3081
}
3082
}
3083
3084
/*
3085
* Initiate state recovery. Called when NFSERR_STALECLIENTID,
3086
* NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
3087
*/
3088
void
3089
nfscl_initiate_recovery(struct nfsclclient *clp)
3090
{
3091
3092
if (clp == NULL)
3093
return;
3094
NFSLOCKCLSTATE();
3095
clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
3096
NFSUNLOCKCLSTATE();
3097
wakeup((caddr_t)clp);
3098
}
3099
3100
/*
3101
* Dump out the state stuff for debugging.
3102
*/
3103
void
3104
nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
3105
int lockowner, int locks)
3106
{
3107
struct nfsclclient *clp;
3108
struct nfsclowner *owp;
3109
struct nfsclopen *op;
3110
struct nfscllockowner *lp;
3111
struct nfscllock *lop;
3112
struct nfscldeleg *dp;
3113
3114
clp = nmp->nm_clp;
3115
if (clp == NULL) {
3116
printf("nfscl dumpstate NULL clp\n");
3117
return;
3118
}
3119
NFSLOCKCLSTATE();
3120
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
3121
LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3122
if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3123
printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3124
owp->nfsow_owner[0], owp->nfsow_owner[1],
3125
owp->nfsow_owner[2], owp->nfsow_owner[3],
3126
owp->nfsow_seqid);
3127
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3128
if (opens)
3129
printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3130
op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3131
op->nfso_stateid.other[2], op->nfso_opencnt,
3132
op->nfso_fh[12]);
3133
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3134
if (lockowner)
3135
printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3136
lp->nfsl_owner[0], lp->nfsl_owner[1],
3137
lp->nfsl_owner[2], lp->nfsl_owner[3],
3138
lp->nfsl_seqid,
3139
lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3140
lp->nfsl_stateid.other[2]);
3141
LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3142
if (locks)
3143
#ifdef __FreeBSD__
3144
printf("lck typ=%d fst=%ju end=%ju\n",
3145
lop->nfslo_type, (intmax_t)lop->nfslo_first,
3146
(intmax_t)lop->nfslo_end);
3147
#else
3148
printf("lck typ=%d fst=%qd end=%qd\n",
3149
lop->nfslo_type, lop->nfslo_first,
3150
lop->nfslo_end);
3151
#endif
3152
}
3153
}
3154
}
3155
}
3156
}
3157
LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3158
if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3159
printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3160
owp->nfsow_owner[0], owp->nfsow_owner[1],
3161
owp->nfsow_owner[2], owp->nfsow_owner[3],
3162
owp->nfsow_seqid);
3163
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3164
if (opens)
3165
printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3166
op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3167
op->nfso_stateid.other[2], op->nfso_opencnt,
3168
op->nfso_fh[12]);
3169
LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3170
if (lockowner)
3171
printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3172
lp->nfsl_owner[0], lp->nfsl_owner[1],
3173
lp->nfsl_owner[2], lp->nfsl_owner[3],
3174
lp->nfsl_seqid,
3175
lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3176
lp->nfsl_stateid.other[2]);
3177
LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3178
if (locks)
3179
#ifdef __FreeBSD__
3180
printf("lck typ=%d fst=%ju end=%ju\n",
3181
lop->nfslo_type, (intmax_t)lop->nfslo_first,
3182
(intmax_t)lop->nfslo_end);
3183
#else
3184
printf("lck typ=%d fst=%qd end=%qd\n",
3185
lop->nfslo_type, lop->nfslo_first,
3186
lop->nfslo_end);
3187
#endif
3188
}
3189
}
3190
}
3191
}
3192
NFSUNLOCKCLSTATE();
3193
}
3194
3195
/*
3196
* Check for duplicate open owners and opens.
3197
* (Only used as a diagnostic aid.)
3198
*/
3199
void
3200
nfscl_dupopen(vnode_t vp, int dupopens)
3201
{
3202
struct nfsclclient *clp;
3203
struct nfsclowner *owp, *owp2;
3204
struct nfsclopen *op, *op2;
3205
struct nfsfh *nfhp;
3206
3207
clp = VFSTONFS(vp->v_mount)->nm_clp;
3208
if (clp == NULL) {
3209
printf("nfscl dupopen NULL clp\n");
3210
return;
3211
}
3212
nfhp = VTONFS(vp)->n_fhp;
3213
NFSLOCKCLSTATE();
3214
3215
/*
3216
* First, search for duplicate owners.
3217
* These should never happen!
3218
*/
3219
LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3220
LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3221
if (owp != owp2 &&
3222
!NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
3223
NFSV4CL_LOCKNAMELEN)) {
3224
NFSUNLOCKCLSTATE();
3225
printf("DUP OWNER\n");
3226
nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3227
return;
3228
}
3229
}
3230
}
3231
3232
/*
3233
* Now, search for duplicate stateids.
3234
* These shouldn't happen, either.
3235
*/
3236
LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3237
LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3238
LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3239
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3240
if (op != op2 &&
3241
(op->nfso_stateid.other[0] != 0 ||
3242
op->nfso_stateid.other[1] != 0 ||
3243
op->nfso_stateid.other[2] != 0) &&
3244
op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
3245
op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
3246
op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
3247
NFSUNLOCKCLSTATE();
3248
printf("DUP STATEID\n");
3249
nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3250
return;
3251
}
3252
}
3253
}
3254
}
3255
}
3256
3257
/*
3258
* Now search for duplicate opens.
3259
* Duplicate opens for the same owner
3260
* should never occur. Other duplicates are
3261
* possible and are checked for if "dupopens"
3262
* is true.
3263
*/
3264
LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3265
LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3266
if (nfhp->nfh_len == op2->nfso_fhlen &&
3267
!NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3268
LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3269
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3270
if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3271
!NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3272
(!NFSBCMP(op->nfso_own->nfsow_owner,
3273
op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3274
dupopens)) {
3275
if (!NFSBCMP(op->nfso_own->nfsow_owner,
3276
op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3277
NFSUNLOCKCLSTATE();
3278
printf("BADDUP OPEN\n");
3279
} else {
3280
NFSUNLOCKCLSTATE();
3281
printf("DUP OPEN\n");
3282
}
3283
nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0,
3284
0);
3285
return;
3286
}
3287
}
3288
}
3289
}
3290
}
3291
}
3292
NFSUNLOCKCLSTATE();
3293
}
3294
3295
/*
3296
* During close, find an open that needs to be dereferenced and
3297
* dereference it. If there are no more opens for this file,
3298
* log a message to that effect.
3299
* Opens aren't actually Close'd until VOP_INACTIVE() is performed
3300
* on the file's vnode.
3301
* This is the safe way, since it is difficult to identify
3302
* which open the close is for and I/O can be performed after the
3303
* close(2) system call when a file is mmap'd.
3304
* If it returns 0 for success, there will be a referenced
3305
* clp returned via clpp.
3306
*/
3307
int
3308
nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3309
{
3310
struct nfsclclient *clp;
3311
struct nfsclowner *owp;
3312
struct nfsclopen *op;
3313
struct nfscldeleg *dp;
3314
struct nfsfh *nfhp;
3315
int error, notdecr;
3316
3317
error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3318
if (error)
3319
return (error);
3320
*clpp = clp;
3321
3322
nfhp = VTONFS(vp)->n_fhp;
3323
notdecr = 1;
3324
NFSLOCKCLSTATE();
3325
/*
3326
* First, look for one under a delegation that was locally issued
3327
* and just decrement the opencnt for it. Since all my Opens against
3328
* the server are DENY_NONE, I don't see a problem with hanging
3329
* onto them. (It is much easier to use one of the extant Opens
3330
* that I already have on the server when a Delegation is recalled
3331
* than to do fresh Opens.) Someday, I might need to rethink this, but.
3332
*/
3333
dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3334
if (dp != NULL) {
3335
LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3336
op = LIST_FIRST(&owp->nfsow_open);
3337
if (op != NULL) {
3338
/*
3339
* Since a delegation is for a file, there
3340
* should never be more than one open for
3341
* each openowner.
3342
*/
3343
if (LIST_NEXT(op, nfso_list) != NULL)
3344
panic("nfscdeleg opens");
3345
if (notdecr && op->nfso_opencnt > 0) {
3346
notdecr = 0;
3347
op->nfso_opencnt--;
3348
break;
3349
}
3350
}
3351
}
3352
}
3353
3354
/* Now process the opens against the server. */
3355
LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3356
nfso_hash) {
3357
if (op->nfso_fhlen == nfhp->nfh_len &&
3358
!NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3359
nfhp->nfh_len)) {
3360
/* Found an open, decrement cnt if possible */
3361
if (notdecr && op->nfso_opencnt > 0) {
3362
notdecr = 0;
3363
op->nfso_opencnt--;
3364
}
3365
/*
3366
* There are more opens, so just return.
3367
*/
3368
if (op->nfso_opencnt > 0) {
3369
NFSUNLOCKCLSTATE();
3370
return (0);
3371
}
3372
}
3373
}
3374
NFSUNLOCKCLSTATE();
3375
if (notdecr)
3376
printf("nfscl: never fnd open\n");
3377
return (0);
3378
}
3379
3380
int
3381
nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3382
{
3383
struct nfsclclient *clp;
3384
struct nfsmount *nmp;
3385
struct nfsclowner *owp, *nowp;
3386
struct nfsclopen *op, *nop;
3387
struct nfsclopenhead delayed;
3388
struct nfscldeleg *dp;
3389
struct nfsfh *nfhp;
3390
struct nfsclrecalllayout *recallp;
3391
struct nfscllayout *lyp;
3392
int error;
3393
3394
error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3395
if (error)
3396
return (error);
3397
*clpp = clp;
3398
3399
nmp = VFSTONFS(vp->v_mount);
3400
nfhp = VTONFS(vp)->n_fhp;
3401
recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3402
NFSLOCKCLSTATE();
3403
/*
3404
* First get rid of the local Open structures, which should be no
3405
* longer in use.
3406
*/
3407
dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3408
if (dp != NULL) {
3409
LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3410
op = LIST_FIRST(&owp->nfsow_open);
3411
if (op != NULL) {
3412
KASSERT((op->nfso_opencnt == 0),
3413
("nfscl: bad open cnt on deleg"));
3414
nfscl_freeopen(op, 1, true);
3415
}
3416
nfscl_freeopenowner(owp, 1);
3417
}
3418
}
3419
3420
/* Return any layouts marked return on close. */
3421
nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp,
3422
&lyp);
3423
3424
/* Now process the opens against the server. */
3425
LIST_INIT(&delayed);
3426
lookformore:
3427
LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3428
nfso_hash) {
3429
if (op->nfso_fhlen == nfhp->nfh_len &&
3430
!NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3431
nfhp->nfh_len)) {
3432
/* Found an open, close it. */
3433
#ifdef DIAGNOSTIC
3434
KASSERT((op->nfso_opencnt == 0),
3435
("nfscl: bad open cnt on server (%d)",
3436
op->nfso_opencnt));
3437
#endif
3438
NFSUNLOCKCLSTATE();
3439
if (NFSHASNFSV4N(nmp))
3440
error = nfsrpc_doclose(nmp, op, p, false, true);
3441
else
3442
error = nfsrpc_doclose(nmp, op, p, true, true);
3443
NFSLOCKCLSTATE();
3444
if (error == NFSERR_DELAY) {
3445
nfscl_unlinkopen(op);
3446
op->nfso_own = NULL;
3447
LIST_INSERT_HEAD(&delayed, op, nfso_list);
3448
}
3449
goto lookformore;
3450
}
3451
}
3452
nfscl_clrelease(clp);
3453
3454
/* Now, wait for any layout that is returned upon close. */
3455
if (lyp != NULL) {
3456
while ((lyp->nfsly_flags & NFSLY_RETURNED) == 0) {
3457
if (NFSCL_FORCEDISM(nmp->nm_mountp)) {
3458
lyp = NULL;
3459
break;
3460
}
3461
msleep(lyp, NFSCLSTATEMUTEXPTR, PZERO, "nfslroc", hz);
3462
}
3463
if (lyp != NULL)
3464
nfscl_freelayout(lyp);
3465
}
3466
3467
NFSUNLOCKCLSTATE();
3468
/*
3469
* recallp has been set NULL by nfscl_retoncloselayout() if it was
3470
* used by the function, but calling free() with a NULL pointer is ok.
3471
*/
3472
free(recallp, M_NFSLAYRECALL);
3473
3474
/* Now, loop retrying the delayed closes. */
3475
LIST_FOREACH_SAFE(op, &delayed, nfso_list, nop) {
3476
nfsrpc_doclose(nmp, op, p, true, false);
3477
LIST_REMOVE(op, nfso_list);
3478
nfscl_freeopen(op, 0, false);
3479
}
3480
return (0);
3481
}
3482
3483
/*
3484
* Return all delegations on this client.
3485
* (Must be called with client sleep lock.)
3486
*/
3487
static void
3488
nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p,
3489
struct nfscldeleghead *dhp)
3490
{
3491
struct nfscldeleg *dp, *ndp;
3492
struct ucred *cred;
3493
3494
cred = newnfs_getcred();
3495
TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3496
nfscl_cleandeleg(dp);
3497
(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3498
if (dhp != NULL) {
3499
nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3500
TAILQ_INSERT_HEAD(dhp, dp, nfsdl_list);
3501
} else
3502
nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
3503
}
3504
NFSFREECRED(cred);
3505
}
3506
3507
/*
3508
* Return any delegation for this vp.
3509
*/
3510
void
3511
nfscl_delegreturnvp(struct vnode *vp, bool retdeleg, NFSPROC_T *p)
3512
{
3513
struct nfsclclient *clp;
3514
struct nfscldeleg *dp;
3515
struct ucred *cred;
3516
struct nfsnode *np;
3517
struct nfsmount *nmp;
3518
3519
nmp = VFSTONFS(vp->v_mount);
3520
NFSLOCKMNT(nmp);
3521
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
3522
NFSUNLOCKMNT(nmp);
3523
return;
3524
}
3525
NFSUNLOCKMNT(nmp);
3526
np = VTONFS(vp);
3527
cred = newnfs_getcred();
3528
dp = NULL;
3529
NFSLOCKCLSTATE();
3530
clp = nmp->nm_clp;
3531
if (clp != NULL)
3532
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3533
np->n_fhp->nfh_len);
3534
if (dp != NULL &&
3535
(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0) {
3536
nfscl_cleandeleg(dp);
3537
nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3538
NFSUNLOCKCLSTATE();
3539
if (retdeleg) {
3540
newnfs_copycred(&dp->nfsdl_cred, cred);
3541
nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3542
}
3543
free(dp, M_NFSCLDELEG);
3544
} else
3545
NFSUNLOCKCLSTATE();
3546
NFSFREECRED(cred);
3547
}
3548
3549
/*
3550
* Do a callback RPC.
3551
*/
3552
void
3553
nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3554
{
3555
int clist, gotseq_ok, i, j, k, op, rcalls;
3556
u_int32_t *tl;
3557
struct nfsclclient *clp;
3558
struct nfscldeleg *dp = NULL;
3559
int numops, taglen = -1, error = 0, trunc __unused;
3560
u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3561
u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3562
vnode_t vp = NULL;
3563
struct nfsnode *np;
3564
struct vattr va;
3565
struct nfsfh *nfhp;
3566
mount_t mp;
3567
nfsattrbit_t attrbits, rattrbits;
3568
nfsv4stateid_t stateid;
3569
uint32_t seqid, slotid = 0, highslot, cachethis __unused;
3570
uint8_t sessionid[NFSX_V4SESSIONID];
3571
struct mbuf *rep;
3572
struct nfscllayout *lyp;
3573
uint64_t filesid[2], len, off;
3574
int changed, gotone, laytype, recalltype;
3575
uint32_t iomode;
3576
struct nfsclrecalllayout *recallp = NULL;
3577
struct nfsclsession *tsep;
3578
3579
gotseq_ok = 0;
3580
nfsrvd_rephead(nd);
3581
NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3582
taglen = fxdr_unsigned(int, *tl);
3583
if (taglen < 0 || taglen > NFSV4_OPAQUELIMIT) {
3584
error = EBADRPC;
3585
taglen = -1;
3586
goto nfsmout;
3587
}
3588
if (taglen <= NFSV4_SMALLSTR)
3589
tagstr = tag;
3590
else
3591
tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3592
error = nfsrv_mtostr(nd, tagstr, taglen);
3593
if (error) {
3594
if (taglen > NFSV4_SMALLSTR)
3595
free(tagstr, M_TEMP);
3596
taglen = -1;
3597
goto nfsmout;
3598
}
3599
(void) nfsm_strtom(nd, tag, taglen);
3600
if (taglen > NFSV4_SMALLSTR) {
3601
free(tagstr, M_TEMP);
3602
}
3603
NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3604
NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3605
minorvers = fxdr_unsigned(u_int32_t, *tl++);
3606
if (minorvers != NFSV4_MINORVERSION &&
3607
minorvers != NFSV41_MINORVERSION &&
3608
minorvers != NFSV42_MINORVERSION)
3609
nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3610
cbident = fxdr_unsigned(u_int32_t, *tl++);
3611
if (nd->nd_repstat)
3612
numops = 0;
3613
else
3614
numops = fxdr_unsigned(int, *tl);
3615
/*
3616
* Loop around doing the sub ops.
3617
*/
3618
for (i = 0; i < numops; i++) {
3619
NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3620
NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3621
*repp++ = *tl;
3622
op = fxdr_unsigned(int, *tl);
3623
nd->nd_procnum = op;
3624
if (i == 0 && op != NFSV4OP_CBSEQUENCE && minorvers !=
3625
NFSV4_MINORVERSION) {
3626
nd->nd_repstat = NFSERR_OPNOTINSESS;
3627
*repp = nfscl_errmap(nd, minorvers);
3628
retops++;
3629
break;
3630
}
3631
if (op < NFSV4OP_CBGETATTR ||
3632
(op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3633
(op > NFSV4OP_CBNOTIFYDEVID &&
3634
minorvers == NFSV41_MINORVERSION) ||
3635
(op > NFSV4OP_CBOFFLOAD &&
3636
minorvers == NFSV42_MINORVERSION)) {
3637
nd->nd_repstat = NFSERR_OPILLEGAL;
3638
*repp = nfscl_errmap(nd, minorvers);
3639
retops++;
3640
break;
3641
}
3642
if (op < NFSV42_CBNOPS)
3643
nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3644
switch (op) {
3645
case NFSV4OP_CBGETATTR:
3646
NFSCL_DEBUG(4, "cbgetattr\n");
3647
mp = NULL;
3648
vp = NULL;
3649
error = nfsm_getfh(nd, &nfhp);
3650
if (!error)
3651
error = nfsrv_getattrbits(nd, &attrbits,
3652
NULL, NULL);
3653
if (!error) {
3654
mp = nfscl_getmnt(minorvers, sessionid, cbident,
3655
&clp);
3656
if (mp == NULL)
3657
error = NFSERR_SERVERFAULT;
3658
}
3659
if (!error) {
3660
error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3661
nfhp->nfh_len, p, &np);
3662
if (!error)
3663
vp = NFSTOV(np);
3664
}
3665
if (!error) {
3666
NFSZERO_ATTRBIT(&rattrbits);
3667
NFSLOCKCLSTATE();
3668
dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3669
nfhp->nfh_len);
3670
if (dp != NULL) {
3671
if (NFSISSET_ATTRBIT(&attrbits,
3672
NFSATTRBIT_SIZE)) {
3673
if (vp != NULL)
3674
va.va_size = np->n_size;
3675
else
3676
va.va_size =
3677
dp->nfsdl_size;
3678
NFSSETBIT_ATTRBIT(&rattrbits,
3679
NFSATTRBIT_SIZE);
3680
}
3681
if (NFSISSET_ATTRBIT(&attrbits,
3682
NFSATTRBIT_CHANGE)) {
3683
va.va_filerev =
3684
dp->nfsdl_change;
3685
if (vp == NULL ||
3686
(np->n_flag & NDELEGMOD))
3687
va.va_filerev++;
3688
NFSSETBIT_ATTRBIT(&rattrbits,
3689
NFSATTRBIT_CHANGE);
3690
}
3691
} else
3692
error = NFSERR_SERVERFAULT;
3693
NFSUNLOCKCLSTATE();
3694
}
3695
if (vp != NULL)
3696
vrele(vp);
3697
if (mp != NULL)
3698
vfs_unbusy(mp);
3699
if (nfhp != NULL)
3700
free(nfhp, M_NFSFH);
3701
if (!error)
3702
(void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3703
NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3704
(uint64_t)0, NULL, false, false, false, 0,
3705
NULL, false);
3706
break;
3707
case NFSV4OP_CBRECALL:
3708
NFSCL_DEBUG(4, "cbrecall\n");
3709
NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3710
NFSX_UNSIGNED);
3711
stateid.seqid = *tl++;
3712
NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3713
NFSX_STATEIDOTHER);
3714
tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3715
trunc = fxdr_unsigned(int, *tl);
3716
error = nfsm_getfh(nd, &nfhp);
3717
if (!error) {
3718
NFSLOCKCLSTATE();
3719
if (minorvers == NFSV4_MINORVERSION)
3720
clp = nfscl_getclnt(cbident);
3721
else
3722
clp = nfscl_getclntsess(sessionid);
3723
if (clp != NULL)
3724
nfscl_startdelegrecall(clp, nfhp);
3725
else
3726
error = NFSERR_SERVERFAULT;
3727
NFSUNLOCKCLSTATE();
3728
}
3729
if (nfhp != NULL)
3730
free(nfhp, M_NFSFH);
3731
break;
3732
case NFSV4OP_CBLAYOUTRECALL:
3733
NFSCL_DEBUG(4, "cblayrec\n");
3734
nfhp = NULL;
3735
NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3736
laytype = fxdr_unsigned(int, *tl++);
3737
iomode = fxdr_unsigned(uint32_t, *tl++);
3738
if (newnfs_true == *tl++)
3739
changed = 1;
3740
else
3741
changed = 0;
3742
recalltype = fxdr_unsigned(int, *tl);
3743
NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
3744
laytype, iomode, changed, recalltype);
3745
recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3746
M_WAITOK);
3747
if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
3748
laytype != NFSLAYOUT_FLEXFILE)
3749
error = NFSERR_NOMATCHLAYOUT;
3750
else if (recalltype == NFSLAYOUTRETURN_FILE) {
3751
error = nfsm_getfh(nd, &nfhp);
3752
NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3753
if (error != 0)
3754
goto nfsmout;
3755
NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3756
NFSX_STATEID);
3757
off = fxdr_hyper(tl); tl += 2;
3758
len = fxdr_hyper(tl); tl += 2;
3759
stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3760
NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3761
if (minorvers == NFSV4_MINORVERSION)
3762
error = NFSERR_NOTSUPP;
3763
NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
3764
(uintmax_t)off, (uintmax_t)len,
3765
stateid.seqid, error);
3766
if (error == 0) {
3767
NFSLOCKCLSTATE();
3768
clp = nfscl_getclntsess(sessionid);
3769
NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3770
if (clp != NULL) {
3771
lyp = nfscl_findlayout(clp,
3772
nfhp->nfh_fh,
3773
nfhp->nfh_len);
3774
NFSCL_DEBUG(4, "cblyp=%p\n",
3775
lyp);
3776
if (lyp != NULL &&
3777
(lyp->nfsly_flags &
3778
(NFSLY_FILES |
3779
NFSLY_FLEXFILE)) != 0 &&
3780
!NFSBCMP(stateid.other,
3781
lyp->nfsly_stateid.other,
3782
NFSX_STATEIDOTHER)) {
3783
error =
3784
nfscl_layoutrecall(
3785
recalltype,
3786
lyp, iomode, off,
3787
len, stateid.seqid,
3788
0, 0, NULL,
3789
recallp);
3790
if (error == 0 &&
3791
stateid.seqid >
3792
lyp->nfsly_stateid.seqid)
3793
lyp->nfsly_stateid.seqid =
3794
stateid.seqid;
3795
recallp = NULL;
3796
wakeup(clp);
3797
NFSCL_DEBUG(4,
3798
"aft layrcal=%d "
3799
"layseqid=%d\n",
3800
error,
3801
lyp->nfsly_stateid.seqid);
3802
} else
3803
error =
3804
NFSERR_NOMATCHLAYOUT;
3805
} else
3806
error = NFSERR_NOMATCHLAYOUT;
3807
NFSUNLOCKCLSTATE();
3808
}
3809
free(nfhp, M_NFSFH);
3810
} else if (recalltype == NFSLAYOUTRETURN_FSID) {
3811
NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3812
filesid[0] = fxdr_hyper(tl); tl += 2;
3813
filesid[1] = fxdr_hyper(tl); tl += 2;
3814
gotone = 0;
3815
NFSLOCKCLSTATE();
3816
clp = nfscl_getclntsess(sessionid);
3817
if (clp != NULL) {
3818
TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3819
nfsly_list) {
3820
if (lyp->nfsly_filesid[0] ==
3821
filesid[0] &&
3822
lyp->nfsly_filesid[1] ==
3823
filesid[1]) {
3824
error =
3825
nfscl_layoutrecall(
3826
recalltype,
3827
lyp, iomode, 0,
3828
UINT64_MAX,
3829
lyp->nfsly_stateid.seqid,
3830
0, 0, NULL,
3831
recallp);
3832
recallp = NULL;
3833
gotone = 1;
3834
}
3835
}
3836
if (gotone != 0)
3837
wakeup(clp);
3838
else
3839
error = NFSERR_NOMATCHLAYOUT;
3840
} else
3841
error = NFSERR_NOMATCHLAYOUT;
3842
NFSUNLOCKCLSTATE();
3843
} else if (recalltype == NFSLAYOUTRETURN_ALL) {
3844
gotone = 0;
3845
NFSLOCKCLSTATE();
3846
clp = nfscl_getclntsess(sessionid);
3847
if (clp != NULL) {
3848
TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3849
nfsly_list) {
3850
error = nfscl_layoutrecall(
3851
recalltype, lyp, iomode, 0,
3852
UINT64_MAX,
3853
lyp->nfsly_stateid.seqid,
3854
0, 0, NULL, recallp);
3855
recallp = NULL;
3856
gotone = 1;
3857
}
3858
if (gotone != 0)
3859
wakeup(clp);
3860
else
3861
error = NFSERR_NOMATCHLAYOUT;
3862
} else
3863
error = NFSERR_NOMATCHLAYOUT;
3864
NFSUNLOCKCLSTATE();
3865
} else
3866
error = NFSERR_NOMATCHLAYOUT;
3867
if (recallp != NULL) {
3868
free(recallp, M_NFSLAYRECALL);
3869
recallp = NULL;
3870
}
3871
break;
3872
case NFSV4OP_CBSEQUENCE:
3873
if (i != 0) {
3874
error = NFSERR_SEQUENCEPOS;
3875
break;
3876
}
3877
NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3878
5 * NFSX_UNSIGNED);
3879
bcopy(tl, sessionid, NFSX_V4SESSIONID);
3880
tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3881
seqid = fxdr_unsigned(uint32_t, *tl++);
3882
slotid = fxdr_unsigned(uint32_t, *tl++);
3883
highslot = fxdr_unsigned(uint32_t, *tl++);
3884
cachethis = *tl++;
3885
/* Throw away the referring call stuff. */
3886
clist = fxdr_unsigned(int, *tl);
3887
for (j = 0; j < clist; j++) {
3888
NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3889
NFSX_UNSIGNED);
3890
tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3891
rcalls = fxdr_unsigned(int, *tl);
3892
for (k = 0; k < rcalls; k++) {
3893
NFSM_DISSECT(tl, uint32_t *,
3894
2 * NFSX_UNSIGNED);
3895
}
3896
}
3897
NFSLOCKCLSTATE();
3898
clp = nfscl_getclntsess(sessionid);
3899
if (clp == NULL)
3900
error = NFSERR_SERVERFAULT;
3901
if (error == 0) {
3902
tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3903
error = nfsv4_seqsession(seqid, slotid,
3904
highslot, tsep->nfsess_cbslots, &rep,
3905
tsep->nfsess_backslots);
3906
}
3907
NFSUNLOCKCLSTATE();
3908
if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3909
gotseq_ok = 1;
3910
if (rep != NULL) {
3911
/*
3912
* Handle a reply for a retried
3913
* callback. The reply will be
3914
* re-inserted in the session cache
3915
* by the nfsv4_seqsess_cacherep() call
3916
* after out:
3917
*/
3918
KASSERT(error == NFSERR_REPLYFROMCACHE,
3919
("cbsequence: non-NULL rep"));
3920
NFSCL_DEBUG(4, "Got cbretry\n");
3921
m_freem(nd->nd_mreq);
3922
nd->nd_mreq = rep;
3923
rep = NULL;
3924
goto out;
3925
}
3926
NFSM_BUILD(tl, uint32_t *,
3927
NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3928
bcopy(sessionid, tl, NFSX_V4SESSIONID);
3929
tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3930
*tl++ = txdr_unsigned(seqid);
3931
*tl++ = txdr_unsigned(slotid);
3932
*tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3933
*tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3934
}
3935
break;
3936
case NFSV4OP_CBRECALLSLOT:
3937
NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
3938
highslot = fxdr_unsigned(uint32_t, *tl);
3939
NFSLOCKCLSTATE();
3940
clp = nfscl_getclntsess(sessionid);
3941
if (clp == NULL)
3942
error = NFSERR_SERVERFAULT;
3943
if (error == 0) {
3944
tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3945
mtx_lock(&tsep->nfsess_mtx);
3946
if ((highslot + 1) < tsep->nfsess_foreslots) {
3947
tsep->nfsess_foreslots = (highslot + 1);
3948
nfs_resetslots(tsep);
3949
}
3950
mtx_unlock(&tsep->nfsess_mtx);
3951
}
3952
NFSUNLOCKCLSTATE();
3953
break;
3954
case NFSV4OP_CBRECALLANY:
3955
NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
3956
i = fxdr_unsigned(int, *tl++);
3957
j = fxdr_unsigned(int, *tl);
3958
if (i < 0 || j != 1)
3959
error = NFSERR_BADXDR;
3960
if (error == 0) {
3961
NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
3962
j = fxdr_unsigned(int, *tl);
3963
if (i < 100)
3964
i = 100;
3965
else if (i > 100000)
3966
i = 100000;
3967
NFSLOCKCLSTATE();
3968
clp = nfscl_getclntsess(sessionid);
3969
if (clp == NULL)
3970
error = NFSERR_SERVERFAULT;
3971
if (((j & NFSRCA4_RDATA_DLG) != 0 ||
3972
(j & NFSRCA4_WDATA_DLG) != 0) &&
3973
error == 0 && i <
3974
clp->nfsc_deleghighwater)
3975
clp->nfsc_deleghighwater = i;
3976
if (error == 0 &&
3977
((!NFSHASFLEXFILE(clp->nfsc_nmp) &&
3978
(j & NFSRCA4_FILE_LAYOUT) != 0 &&
3979
i < clp->nfsc_layouthighwater) ||
3980
(NFSHASFLEXFILE(clp->nfsc_nmp) &&
3981
(j & (NFSRCA4_FF_LAYOUT_READ |
3982
NFSRCA4_FF_LAYOUT_RW)) != 0 &&
3983
i < clp->nfsc_layouthighwater)))
3984
clp->nfsc_layouthighwater = i;
3985
NFSUNLOCKCLSTATE();
3986
}
3987
break;
3988
case NFSV4OP_CBNOTIFY:
3989
case NFSV4OP_CBRECALLOBJAVAIL:
3990
case NFSV4OP_CBNOTIFYLOCK:
3991
/*
3992
* These callbacks are not necessarily optional,
3993
* so I think it is better to reply NFS_OK than
3994
* NFSERR_NOTSUPP.
3995
* All provide information for which the FreeBSD client
3996
* does not currently have a use.
3997
* I am not sure if any of these could be generated
3998
* by a NFSv4.1/4.2 server for this client?
3999
*/
4000
error = 0;
4001
NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4002
break;
4003
case NFSV4OP_CBPUSHDELEG:
4004
error = NFSERR_REJECTDELEG;
4005
NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4006
break;
4007
default:
4008
if (i == 0 && minorvers != NFSV4_MINORVERSION)
4009
error = NFSERR_OPNOTINSESS;
4010
else {
4011
NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4012
error = NFSERR_NOTSUPP;
4013
}
4014
break;
4015
}
4016
if (error) {
4017
if (error == EBADRPC || error == NFSERR_BADXDR) {
4018
nd->nd_repstat = NFSERR_BADXDR;
4019
} else {
4020
nd->nd_repstat = error;
4021
}
4022
error = 0;
4023
}
4024
retops++;
4025
if (nd->nd_repstat) {
4026
*repp = nfscl_errmap(nd, minorvers);
4027
break;
4028
} else
4029
*repp = 0; /* NFS4_OK */
4030
}
4031
nfsmout:
4032
if (recallp != NULL)
4033
free(recallp, M_NFSLAYRECALL);
4034
if (error) {
4035
if (error == EBADRPC || error == NFSERR_BADXDR)
4036
nd->nd_repstat = NFSERR_BADXDR;
4037
else
4038
printf("nfsv4 comperr1=%d\n", error);
4039
}
4040
if (taglen == -1) {
4041
NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
4042
*tl++ = 0;
4043
*tl = 0;
4044
} else {
4045
*retopsp = txdr_unsigned(retops);
4046
}
4047
*nd->nd_errp = nfscl_errmap(nd, minorvers);
4048
out:
4049
if (gotseq_ok != 0) {
4050
rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
4051
NFSLOCKCLSTATE();
4052
clp = nfscl_getclntsess(sessionid);
4053
if (clp != NULL) {
4054
tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4055
nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
4056
NFSERR_OK, &rep);
4057
NFSUNLOCKCLSTATE();
4058
} else {
4059
NFSUNLOCKCLSTATE();
4060
m_freem(rep);
4061
}
4062
}
4063
}
4064
4065
/*
4066
* Generate the next cbident value. Basically just increment a static value
4067
* and then check that it isn't already in the list, if it has wrapped around.
4068
*/
4069
static u_int32_t
4070
nfscl_nextcbident(void)
4071
{
4072
struct nfsclclient *clp;
4073
int matched;
4074
static u_int32_t nextcbident = 0;
4075
static int haswrapped = 0;
4076
4077
nextcbident++;
4078
if (nextcbident == 0)
4079
haswrapped = 1;
4080
if (haswrapped) {
4081
/*
4082
* Search the clientid list for one already using this cbident.
4083
*/
4084
do {
4085
matched = 0;
4086
NFSLOCKCLSTATE();
4087
LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4088
if (clp->nfsc_cbident == nextcbident) {
4089
matched = 1;
4090
break;
4091
}
4092
}
4093
NFSUNLOCKCLSTATE();
4094
if (matched == 1)
4095
nextcbident++;
4096
} while (matched);
4097
}
4098
return (nextcbident);
4099
}
4100
4101
/*
4102
* Get the mount point related to a given cbident or session and busy it.
4103
*/
4104
static mount_t
4105
nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
4106
struct nfsclclient **clpp)
4107
{
4108
struct nfsclclient *clp;
4109
mount_t mp;
4110
int error;
4111
struct nfsclsession *tsep;
4112
4113
*clpp = NULL;
4114
NFSLOCKCLSTATE();
4115
LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4116
tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4117
if (minorvers == NFSV4_MINORVERSION) {
4118
if (clp->nfsc_cbident == cbident)
4119
break;
4120
} else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4121
NFSX_V4SESSIONID))
4122
break;
4123
}
4124
if (clp == NULL) {
4125
NFSUNLOCKCLSTATE();
4126
return (NULL);
4127
}
4128
mp = clp->nfsc_nmp->nm_mountp;
4129
vfs_ref(mp);
4130
NFSUNLOCKCLSTATE();
4131
error = vfs_busy(mp, 0);
4132
vfs_rel(mp);
4133
if (error != 0)
4134
return (NULL);
4135
*clpp = clp;
4136
return (mp);
4137
}
4138
4139
/*
4140
* Get the clientid pointer related to a given cbident.
4141
*/
4142
static struct nfsclclient *
4143
nfscl_getclnt(u_int32_t cbident)
4144
{
4145
struct nfsclclient *clp;
4146
4147
LIST_FOREACH(clp, &nfsclhead, nfsc_list)
4148
if (clp->nfsc_cbident == cbident)
4149
break;
4150
return (clp);
4151
}
4152
4153
/*
4154
* Get the clientid pointer related to a given sessionid.
4155
*/
4156
static struct nfsclclient *
4157
nfscl_getclntsess(uint8_t *sessionid)
4158
{
4159
struct nfsclclient *clp;
4160
struct nfsclsession *tsep;
4161
4162
LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4163
tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4164
if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4165
NFSX_V4SESSIONID))
4166
break;
4167
}
4168
return (clp);
4169
}
4170
4171
/*
4172
* Search for a lock conflict locally on the client. A conflict occurs if
4173
* - not same owner and overlapping byte range and at least one of them is
4174
* a write lock or this is an unlock.
4175
*/
4176
static int
4177
nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
4178
struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
4179
struct nfscllock **lopp)
4180
{
4181
struct nfsclopen *op;
4182
int ret;
4183
4184
if (dp != NULL) {
4185
ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
4186
if (ret)
4187
return (ret);
4188
}
4189
LIST_FOREACH(op, NFSCLOPENHASH(clp, fhp, fhlen), nfso_hash) {
4190
if (op->nfso_fhlen == fhlen &&
4191
!NFSBCMP(op->nfso_fh, fhp, fhlen)) {
4192
ret = nfscl_checkconflict(&op->nfso_lock, nlop,
4193
own, lopp);
4194
if (ret)
4195
return (ret);
4196
}
4197
}
4198
return (0);
4199
}
4200
4201
static int
4202
nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
4203
u_int8_t *own, struct nfscllock **lopp)
4204
{
4205
struct nfscllockowner *lp;
4206
struct nfscllock *lop;
4207
4208
LIST_FOREACH(lp, lhp, nfsl_list) {
4209
if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
4210
LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
4211
if (lop->nfslo_first >= nlop->nfslo_end)
4212
break;
4213
if (lop->nfslo_end <= nlop->nfslo_first)
4214
continue;
4215
if (lop->nfslo_type == F_WRLCK ||
4216
nlop->nfslo_type == F_WRLCK ||
4217
nlop->nfslo_type == F_UNLCK) {
4218
if (lopp != NULL)
4219
*lopp = lop;
4220
return (NFSERR_DENIED);
4221
}
4222
}
4223
}
4224
}
4225
return (0);
4226
}
4227
4228
/*
4229
* Check for a local conflicting lock.
4230
*/
4231
int
4232
nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
4233
u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
4234
{
4235
struct nfscllock *lop, nlck;
4236
struct nfscldeleg *dp;
4237
struct nfsnode *np;
4238
u_int8_t own[NFSV4CL_LOCKNAMELEN];
4239
int error;
4240
4241
nlck.nfslo_type = fl->l_type;
4242
nlck.nfslo_first = off;
4243
if (len == NFS64BITSSET) {
4244
nlck.nfslo_end = NFS64BITSSET;
4245
} else {
4246
nlck.nfslo_end = off + len;
4247
if (nlck.nfslo_end <= nlck.nfslo_first)
4248
return (NFSERR_INVAL);
4249
}
4250
np = VTONFS(vp);
4251
nfscl_filllockowner(id, own, flags);
4252
NFSLOCKCLSTATE();
4253
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4254
error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
4255
&nlck, own, dp, &lop);
4256
if (error != 0) {
4257
fl->l_whence = SEEK_SET;
4258
fl->l_start = lop->nfslo_first;
4259
if (lop->nfslo_end == NFS64BITSSET)
4260
fl->l_len = 0;
4261
else
4262
fl->l_len = lop->nfslo_end - lop->nfslo_first;
4263
fl->l_pid = (pid_t)0;
4264
fl->l_type = lop->nfslo_type;
4265
error = -1; /* no RPC required */
4266
} else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
4267
fl->l_type == F_RDLCK)) {
4268
/*
4269
* The delegation ensures that there isn't a conflicting
4270
* lock on the server, so return -1 to indicate an RPC
4271
* isn't required.
4272
*/
4273
fl->l_type = F_UNLCK;
4274
error = -1;
4275
}
4276
NFSUNLOCKCLSTATE();
4277
return (error);
4278
}
4279
4280
/*
4281
* Handle Recall of a delegation.
4282
* The clp must be exclusive locked when this is called.
4283
*/
4284
static int
4285
nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
4286
struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
4287
int called_from_renewthread, vnode_t *vpp)
4288
{
4289
struct nfsclowner *owp, *lowp, *nowp;
4290
struct nfsclopen *op, *lop;
4291
struct nfscllockowner *lp;
4292
struct nfscllock *lckp;
4293
struct nfsnode *np;
4294
int error = 0, ret;
4295
4296
if (vp == NULL) {
4297
KASSERT(vpp != NULL, ("nfscl_recalldeleg: vpp NULL"));
4298
*vpp = NULL;
4299
/*
4300
* First, get a vnode for the file. This is needed to do RPCs.
4301
*/
4302
ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
4303
dp->nfsdl_fhlen, p, &np);
4304
if (ret) {
4305
/*
4306
* File isn't open, so nothing to move over to the
4307
* server.
4308
*/
4309
return (0);
4310
}
4311
vp = NFSTOV(np);
4312
*vpp = vp;
4313
} else {
4314
np = VTONFS(vp);
4315
}
4316
dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
4317
4318
/*
4319
* Ok, if it's a write delegation, flush data to the server, so
4320
* that close/open consistency is retained.
4321
*/
4322
ret = 0;
4323
NFSLOCKNODE(np);
4324
if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
4325
np->n_flag |= NDELEGRECALL;
4326
NFSUNLOCKNODE(np);
4327
ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
4328
NFSLOCKNODE(np);
4329
np->n_flag &= ~NDELEGRECALL;
4330
}
4331
NFSINVALATTRCACHE(np);
4332
NFSUNLOCKNODE(np);
4333
if (ret == EIO && called_from_renewthread != 0) {
4334
/*
4335
* If the flush failed with EIO for the renew thread,
4336
* return now, so that the dirty buffer will be flushed
4337
* later.
4338
*/
4339
return (ret);
4340
}
4341
4342
/*
4343
* Now, for each openowner with opens issued locally, move them
4344
* over to state against the server.
4345
*/
4346
LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
4347
lop = LIST_FIRST(&lowp->nfsow_open);
4348
if (lop != NULL) {
4349
if (LIST_NEXT(lop, nfso_list) != NULL)
4350
panic("nfsdlg mult opens");
4351
/*
4352
* Look for the same openowner against the server.
4353
*/
4354
LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
4355
if (!NFSBCMP(lowp->nfsow_owner,
4356
owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
4357
newnfs_copycred(&dp->nfsdl_cred, cred);
4358
ret = nfscl_moveopen(vp, clp, nmp, lop,
4359
owp, dp, cred, p);
4360
if (ret == NFSERR_STALECLIENTID ||
4361
ret == NFSERR_STALEDONTRECOVER ||
4362
ret == NFSERR_BADSESSION)
4363
return (ret);
4364
if (ret) {
4365
nfscl_freeopen(lop, 1, true);
4366
if (!error)
4367
error = ret;
4368
}
4369
break;
4370
}
4371
}
4372
4373
/*
4374
* If no openowner found, create one and get an open
4375
* for it.
4376
*/
4377
if (owp == NULL) {
4378
nowp = malloc(
4379
sizeof (struct nfsclowner), M_NFSCLOWNER,
4380
M_WAITOK);
4381
nfscl_newopen(clp, NULL, &owp, &nowp, &op,
4382
NULL, lowp->nfsow_owner, dp->nfsdl_fh,
4383
dp->nfsdl_fhlen, NULL, NULL);
4384
newnfs_copycred(&dp->nfsdl_cred, cred);
4385
ret = nfscl_moveopen(vp, clp, nmp, lop,
4386
owp, dp, cred, p);
4387
if (ret) {
4388
nfscl_freeopenowner(owp, 0);
4389
if (ret == NFSERR_STALECLIENTID ||
4390
ret == NFSERR_STALEDONTRECOVER ||
4391
ret == NFSERR_BADSESSION)
4392
return (ret);
4393
if (ret) {
4394
nfscl_freeopen(lop, 1, true);
4395
if (!error)
4396
error = ret;
4397
}
4398
}
4399
}
4400
}
4401
}
4402
4403
/*
4404
* Now, get byte range locks for any locks done locally.
4405
*/
4406
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4407
LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4408
newnfs_copycred(&dp->nfsdl_cred, cred);
4409
ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4410
if (ret == NFSERR_STALESTATEID ||
4411
ret == NFSERR_STALEDONTRECOVER ||
4412
ret == NFSERR_STALECLIENTID ||
4413
ret == NFSERR_BADSESSION)
4414
return (ret);
4415
if (ret && !error)
4416
error = ret;
4417
}
4418
}
4419
return (error);
4420
}
4421
4422
/*
4423
* Move a locally issued open over to an owner on the state list.
4424
* SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4425
* returns with it unlocked.
4426
*/
4427
static int
4428
nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4429
struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4430
struct ucred *cred, NFSPROC_T *p)
4431
{
4432
struct nfsclopen *op, *nop;
4433
struct nfscldeleg *ndp;
4434
struct nfsnode *np;
4435
int error = 0, newone;
4436
4437
/*
4438
* First, look for an appropriate open, If found, just increment the
4439
* opencnt in it.
4440
*/
4441
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4442
if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4443
op->nfso_fhlen == lop->nfso_fhlen &&
4444
!NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4445
op->nfso_opencnt += lop->nfso_opencnt;
4446
nfscl_freeopen(lop, 1, true);
4447
return (0);
4448
}
4449
}
4450
4451
/* No appropriate open, so we have to do one against the server. */
4452
np = VTONFS(vp);
4453
nop = malloc(sizeof (struct nfsclopen) +
4454
lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4455
nop->nfso_hash.le_prev = NULL;
4456
newone = 0;
4457
nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4458
lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4459
ndp = dp;
4460
if (NFSHASNFSV4N(nmp))
4461
error = nfscl_tryopen(nmp, vp, lop->nfso_fh, lop->nfso_fhlen,
4462
lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4463
NULL, 0, &ndp, 0, 0, cred, p);
4464
else
4465
error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4466
np->n_v4->n4_fhlen, lop->nfso_fh, lop->nfso_fhlen,
4467
lop->nfso_mode, op, NFS4NODENAME(np->n_v4),
4468
np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4469
if (error) {
4470
if (newone)
4471
nfscl_freeopen(op, 0, true);
4472
} else {
4473
op->nfso_mode |= lop->nfso_mode;
4474
op->nfso_opencnt += lop->nfso_opencnt;
4475
nfscl_freeopen(lop, 1, true);
4476
}
4477
if (nop != NULL)
4478
free(nop, M_NFSCLOPEN);
4479
if (ndp != NULL) {
4480
/*
4481
* What should I do with the returned delegation, since the
4482
* delegation is being recalled? For now, just printf and
4483
* through it away.
4484
*/
4485
printf("Moveopen returned deleg\n");
4486
free(ndp, M_NFSCLDELEG);
4487
}
4488
return (error);
4489
}
4490
4491
/*
4492
* Recall all delegations on this client.
4493
*/
4494
static void
4495
nfscl_totalrecall(struct nfsclclient *clp)
4496
{
4497
struct nfscldeleg *dp;
4498
4499
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4500
if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4501
dp->nfsdl_flags |= NFSCLDL_RECALL;
4502
}
4503
}
4504
4505
/*
4506
* Relock byte ranges. Called for delegation recall and state expiry.
4507
*/
4508
static int
4509
nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4510
struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4511
NFSPROC_T *p)
4512
{
4513
struct nfscllockowner *nlp;
4514
struct nfsfh *nfhp;
4515
struct nfsnode *np;
4516
u_int64_t off, len;
4517
int error, newone, donelocally;
4518
4519
if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp)) {
4520
np = VTONFS(vp);
4521
NFSLOCKNODE(np);
4522
np->n_flag |= NMIGHTBELOCKED;
4523
NFSUNLOCKNODE(np);
4524
}
4525
4526
off = lop->nfslo_first;
4527
len = lop->nfslo_end - lop->nfslo_first;
4528
error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4529
clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4530
lp->nfsl_openowner, &nlp, &newone, &donelocally);
4531
if (error || donelocally)
4532
return (error);
4533
nfhp = VTONFS(vp)->n_fhp;
4534
error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4535
nfhp->nfh_len, nlp, newone, 0, off,
4536
len, lop->nfslo_type, cred, p);
4537
if (error)
4538
nfscl_freelockowner(nlp, 0);
4539
return (error);
4540
}
4541
4542
/*
4543
* Called to re-open a file. Basically get a vnode for the file handle
4544
* and then call nfsrpc_openrpc() to do the rest.
4545
*/
4546
static int
4547
nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4548
u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4549
struct ucred *cred, NFSPROC_T *p)
4550
{
4551
struct nfsnode *np;
4552
vnode_t vp;
4553
int error;
4554
4555
error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4556
if (error)
4557
return (error);
4558
vp = NFSTOV(np);
4559
if (NFSHASNFSV4N(nmp))
4560
error = nfscl_tryopen(nmp, vp, fhp, fhlen, fhp, fhlen, mode, op,
4561
NULL, 0, dpp, 0, 0, cred, p);
4562
else if (np->n_v4 != NULL)
4563
error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4564
np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4565
NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4566
cred, p);
4567
else
4568
error = EINVAL;
4569
vrele(vp);
4570
return (error);
4571
}
4572
4573
/*
4574
* Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4575
* NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4576
* fail.
4577
*/
4578
static int
4579
nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4580
u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4581
u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4582
int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4583
{
4584
int error;
4585
struct nfscldeleg *dp;
4586
4587
dp = *ndpp;
4588
do {
4589
*ndpp = dp; /* *ndpp needs to be set for retries. */
4590
error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4591
mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4592
0, 0);
4593
if (error == NFSERR_DELAY)
4594
(void) nfs_catnap(PZERO, error, "nfstryop");
4595
} while (error == NFSERR_DELAY);
4596
if (error == EAUTH || error == EACCES) {
4597
/* Try again using system credentials */
4598
newnfs_setroot(cred);
4599
do {
4600
*ndpp = dp; /* *ndpp needs to be set for retries. */
4601
error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4602
newfhlen, mode, op, name, namelen, ndpp, reclaim,
4603
delegtype, cred, p, 1, 0);
4604
if (error == NFSERR_DELAY)
4605
(void) nfs_catnap(PZERO, error, "nfstryop");
4606
} while (error == NFSERR_DELAY);
4607
}
4608
return (error);
4609
}
4610
4611
/*
4612
* Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4613
* NFSERR_DELAY. Also, retry with system credentials, if the provided
4614
* cred don't work.
4615
*/
4616
static int
4617
nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4618
int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4619
u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4620
{
4621
struct nfsrv_descript nfsd, *nd = &nfsd;
4622
int error;
4623
4624
do {
4625
error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4626
reclaim, off, len, type, cred, p, 0);
4627
if (!error && nd->nd_repstat == NFSERR_DELAY)
4628
(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4629
"nfstrylck");
4630
} while (!error && nd->nd_repstat == NFSERR_DELAY);
4631
if (!error)
4632
error = nd->nd_repstat;
4633
if (error == EAUTH || error == EACCES) {
4634
/* Try again using root credentials */
4635
newnfs_setroot(cred);
4636
do {
4637
error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4638
newone, reclaim, off, len, type, cred, p, 1);
4639
if (!error && nd->nd_repstat == NFSERR_DELAY)
4640
(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4641
"nfstrylck");
4642
} while (!error && nd->nd_repstat == NFSERR_DELAY);
4643
if (!error)
4644
error = nd->nd_repstat;
4645
}
4646
return (error);
4647
}
4648
4649
/*
4650
* Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4651
* retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4652
* credentials fail.
4653
*/
4654
int
4655
nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4656
struct nfsmount *nmp, NFSPROC_T *p)
4657
{
4658
int error;
4659
4660
do {
4661
error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4662
if (error == NFSERR_DELAY)
4663
(void) nfs_catnap(PZERO, error, "nfstrydp");
4664
} while (error == NFSERR_DELAY);
4665
if (error == EAUTH || error == EACCES) {
4666
/* Try again using system credentials */
4667
newnfs_setroot(cred);
4668
do {
4669
error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4670
if (error == NFSERR_DELAY)
4671
(void) nfs_catnap(PZERO, error, "nfstrydp");
4672
} while (error == NFSERR_DELAY);
4673
}
4674
return (error);
4675
}
4676
4677
/*
4678
* Try a close against the server. Just call nfsrpc_closerpc(),
4679
* retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4680
* credentials fail.
4681
*/
4682
int
4683
nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4684
struct nfsmount *nmp, NFSPROC_T *p, bool loop_on_delayed)
4685
{
4686
struct nfsrv_descript nfsd, *nd = &nfsd;
4687
int error;
4688
4689
do {
4690
error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4691
if (loop_on_delayed && error == NFSERR_DELAY)
4692
(void) nfs_catnap(PZERO, error, "nfstrycl");
4693
} while (loop_on_delayed && error == NFSERR_DELAY);
4694
if (error == EAUTH || error == EACCES) {
4695
/* Try again using system credentials */
4696
newnfs_setroot(cred);
4697
do {
4698
error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4699
if (loop_on_delayed && error == NFSERR_DELAY)
4700
(void) nfs_catnap(PZERO, error, "nfstrycl");
4701
} while (loop_on_delayed && error == NFSERR_DELAY);
4702
}
4703
return (error);
4704
}
4705
4706
/*
4707
* Decide if a delegation on a file permits close without flushing writes
4708
* to the server. This might be a big performance win in some environments.
4709
* (Not useful until the client does caching on local stable storage.)
4710
*/
4711
int
4712
nfscl_mustflush(vnode_t vp)
4713
{
4714
struct nfsclclient *clp;
4715
struct nfscldeleg *dp;
4716
struct nfsnode *np;
4717
struct nfsmount *nmp;
4718
4719
np = VTONFS(vp);
4720
nmp = VFSTONFS(vp->v_mount);
4721
if (!NFSHASNFSV4(nmp) || vp->v_type != VREG)
4722
return (1);
4723
NFSLOCKMNT(nmp);
4724
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4725
NFSUNLOCKMNT(nmp);
4726
return (1);
4727
}
4728
NFSUNLOCKMNT(nmp);
4729
NFSLOCKCLSTATE();
4730
clp = nfscl_findcl(nmp);
4731
if (clp == NULL) {
4732
NFSUNLOCKCLSTATE();
4733
return (1);
4734
}
4735
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4736
if (dp != NULL && (dp->nfsdl_flags &
4737
(NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4738
NFSCLDL_WRITE &&
4739
(dp->nfsdl_sizelimit >= np->n_size ||
4740
!NFSHASSTRICT3530(nmp))) {
4741
NFSUNLOCKCLSTATE();
4742
return (0);
4743
}
4744
NFSUNLOCKCLSTATE();
4745
return (1);
4746
}
4747
4748
/*
4749
* See if a (write) delegation exists for this file.
4750
*/
4751
int
4752
nfscl_nodeleg(vnode_t vp, int writedeleg)
4753
{
4754
struct nfsclclient *clp;
4755
struct nfscldeleg *dp;
4756
struct nfsnode *np;
4757
struct nfsmount *nmp;
4758
4759
np = VTONFS(vp);
4760
nmp = VFSTONFS(vp->v_mount);
4761
if (!NFSHASNFSV4(nmp) || vp->v_type != VREG)
4762
return (1);
4763
NFSLOCKMNT(nmp);
4764
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4765
NFSUNLOCKMNT(nmp);
4766
return (1);
4767
}
4768
NFSUNLOCKMNT(nmp);
4769
NFSLOCKCLSTATE();
4770
clp = nfscl_findcl(nmp);
4771
if (clp == NULL) {
4772
NFSUNLOCKCLSTATE();
4773
return (1);
4774
}
4775
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4776
if (dp != NULL &&
4777
(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4778
(writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4779
NFSCLDL_WRITE)) {
4780
NFSUNLOCKCLSTATE();
4781
return (0);
4782
}
4783
NFSUNLOCKCLSTATE();
4784
return (1);
4785
}
4786
4787
/*
4788
* Look for an associated delegation that should be DelegReturned.
4789
*/
4790
int
4791
nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4792
{
4793
struct nfsclclient *clp;
4794
struct nfscldeleg *dp;
4795
struct nfsclowner *owp;
4796
struct nfscllockowner *lp;
4797
struct nfsmount *nmp;
4798
struct mount *mp;
4799
struct ucred *cred;
4800
struct nfsnode *np;
4801
int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4802
4803
nmp = VFSTONFS(vp->v_mount);
4804
if (NFSHASPNFS(nmp))
4805
return (retcnt);
4806
NFSLOCKMNT(nmp);
4807
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4808
NFSUNLOCKMNT(nmp);
4809
return (retcnt);
4810
}
4811
NFSUNLOCKMNT(nmp);
4812
np = VTONFS(vp);
4813
mp = nmp->nm_mountp;
4814
NFSLOCKCLSTATE();
4815
/*
4816
* Loop around waiting for:
4817
* - outstanding I/O operations on delegations to complete
4818
* - for a delegation on vp that has state, lock the client and
4819
* do a recall
4820
* - return delegation with no state
4821
*/
4822
while (1) {
4823
clp = nfscl_findcl(nmp);
4824
if (clp == NULL) {
4825
NFSUNLOCKCLSTATE();
4826
return (retcnt);
4827
}
4828
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4829
np->n_fhp->nfh_len);
4830
if (dp != NULL) {
4831
/*
4832
* Wait for outstanding I/O ops to be done.
4833
*/
4834
if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4835
if (igotlock) {
4836
nfsv4_unlock(&clp->nfsc_lock, 0);
4837
igotlock = 0;
4838
}
4839
dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4840
msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4841
"nfscld", hz);
4842
if (NFSCL_FORCEDISM(mp)) {
4843
dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4844
NFSUNLOCKCLSTATE();
4845
return (0);
4846
}
4847
continue;
4848
}
4849
needsrecall = 0;
4850
LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4851
if (!LIST_EMPTY(&owp->nfsow_open)) {
4852
needsrecall = 1;
4853
break;
4854
}
4855
}
4856
if (!needsrecall) {
4857
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4858
if (!LIST_EMPTY(&lp->nfsl_lock)) {
4859
needsrecall = 1;
4860
break;
4861
}
4862
}
4863
}
4864
if (needsrecall && !triedrecall) {
4865
dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4866
islept = 0;
4867
while (!igotlock) {
4868
igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4869
&islept, NFSCLSTATEMUTEXPTR, mp);
4870
if (NFSCL_FORCEDISM(mp)) {
4871
dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4872
if (igotlock)
4873
nfsv4_unlock(&clp->nfsc_lock, 0);
4874
NFSUNLOCKCLSTATE();
4875
return (0);
4876
}
4877
if (islept)
4878
break;
4879
}
4880
if (islept)
4881
continue;
4882
NFSUNLOCKCLSTATE();
4883
cred = newnfs_getcred();
4884
newnfs_copycred(&dp->nfsdl_cred, cred);
4885
nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0, NULL);
4886
NFSFREECRED(cred);
4887
triedrecall = 1;
4888
NFSLOCKCLSTATE();
4889
nfsv4_unlock(&clp->nfsc_lock, 0);
4890
igotlock = 0;
4891
continue;
4892
}
4893
*stp = dp->nfsdl_stateid;
4894
retcnt = 1;
4895
nfscl_cleandeleg(dp);
4896
nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
4897
}
4898
if (igotlock)
4899
nfsv4_unlock(&clp->nfsc_lock, 0);
4900
NFSUNLOCKCLSTATE();
4901
return (retcnt);
4902
}
4903
}
4904
4905
/*
4906
* Look for associated delegation(s) that should be DelegReturned.
4907
*/
4908
int
4909
nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4910
nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4911
{
4912
struct nfsclclient *clp;
4913
struct nfscldeleg *dp;
4914
struct nfsclowner *owp;
4915
struct nfscllockowner *lp;
4916
struct nfsmount *nmp;
4917
struct mount *mp;
4918
struct ucred *cred;
4919
struct nfsnode *np;
4920
int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4921
4922
nmp = VFSTONFS(fvp->v_mount);
4923
*gotfdp = 0;
4924
*gottdp = 0;
4925
if (NFSHASPNFS(nmp))
4926
return (retcnt);
4927
NFSLOCKMNT(nmp);
4928
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4929
NFSUNLOCKMNT(nmp);
4930
return (retcnt);
4931
}
4932
NFSUNLOCKMNT(nmp);
4933
mp = nmp->nm_mountp;
4934
NFSLOCKCLSTATE();
4935
/*
4936
* Loop around waiting for:
4937
* - outstanding I/O operations on delegations to complete
4938
* - for a delegation on fvp that has state, lock the client and
4939
* do a recall
4940
* - return delegation(s) with no state.
4941
*/
4942
while (1) {
4943
clp = nfscl_findcl(nmp);
4944
if (clp == NULL) {
4945
NFSUNLOCKCLSTATE();
4946
return (retcnt);
4947
}
4948
np = VTONFS(fvp);
4949
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4950
np->n_fhp->nfh_len);
4951
if (dp != NULL && *gotfdp == 0) {
4952
/*
4953
* Wait for outstanding I/O ops to be done.
4954
*/
4955
if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4956
if (igotlock) {
4957
nfsv4_unlock(&clp->nfsc_lock, 0);
4958
igotlock = 0;
4959
}
4960
dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4961
msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4962
"nfscld", hz);
4963
if (NFSCL_FORCEDISM(mp)) {
4964
dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4965
NFSUNLOCKCLSTATE();
4966
*gotfdp = 0;
4967
*gottdp = 0;
4968
return (0);
4969
}
4970
continue;
4971
}
4972
needsrecall = 0;
4973
LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4974
if (!LIST_EMPTY(&owp->nfsow_open)) {
4975
needsrecall = 1;
4976
break;
4977
}
4978
}
4979
if (!needsrecall) {
4980
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4981
if (!LIST_EMPTY(&lp->nfsl_lock)) {
4982
needsrecall = 1;
4983
break;
4984
}
4985
}
4986
}
4987
if (needsrecall && !triedrecall) {
4988
dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4989
islept = 0;
4990
while (!igotlock) {
4991
igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4992
&islept, NFSCLSTATEMUTEXPTR, mp);
4993
if (NFSCL_FORCEDISM(mp)) {
4994
dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4995
if (igotlock)
4996
nfsv4_unlock(&clp->nfsc_lock, 0);
4997
NFSUNLOCKCLSTATE();
4998
*gotfdp = 0;
4999
*gottdp = 0;
5000
return (0);
5001
}
5002
if (islept)
5003
break;
5004
}
5005
if (islept)
5006
continue;
5007
NFSUNLOCKCLSTATE();
5008
cred = newnfs_getcred();
5009
newnfs_copycred(&dp->nfsdl_cred, cred);
5010
nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0, NULL);
5011
NFSFREECRED(cred);
5012
triedrecall = 1;
5013
NFSLOCKCLSTATE();
5014
nfsv4_unlock(&clp->nfsc_lock, 0);
5015
igotlock = 0;
5016
continue;
5017
}
5018
*fstp = dp->nfsdl_stateid;
5019
retcnt++;
5020
*gotfdp = 1;
5021
nfscl_cleandeleg(dp);
5022
nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
5023
}
5024
if (igotlock) {
5025
nfsv4_unlock(&clp->nfsc_lock, 0);
5026
igotlock = 0;
5027
}
5028
if (tvp != NULL) {
5029
np = VTONFS(tvp);
5030
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
5031
np->n_fhp->nfh_len);
5032
if (dp != NULL && *gottdp == 0) {
5033
/*
5034
* Wait for outstanding I/O ops to be done.
5035
*/
5036
if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
5037
dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
5038
msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
5039
"nfscld", hz);
5040
if (NFSCL_FORCEDISM(mp)) {
5041
NFSUNLOCKCLSTATE();
5042
*gotfdp = 0;
5043
*gottdp = 0;
5044
return (0);
5045
}
5046
continue;
5047
}
5048
LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
5049
if (!LIST_EMPTY(&owp->nfsow_open)) {
5050
NFSUNLOCKCLSTATE();
5051
return (retcnt);
5052
}
5053
}
5054
LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
5055
if (!LIST_EMPTY(&lp->nfsl_lock)) {
5056
NFSUNLOCKCLSTATE();
5057
return (retcnt);
5058
}
5059
}
5060
*tstp = dp->nfsdl_stateid;
5061
retcnt++;
5062
*gottdp = 1;
5063
nfscl_cleandeleg(dp);
5064
nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
5065
}
5066
}
5067
NFSUNLOCKCLSTATE();
5068
return (retcnt);
5069
}
5070
}
5071
5072
/*
5073
* Get a reference on the clientid associated with the mount point.
5074
* Return 1 if success, 0 otherwise.
5075
*/
5076
int
5077
nfscl_getref(struct nfsmount *nmp)
5078
{
5079
struct nfsclclient *clp;
5080
int ret;
5081
5082
NFSLOCKCLSTATE();
5083
clp = nfscl_findcl(nmp);
5084
if (clp == NULL) {
5085
NFSUNLOCKCLSTATE();
5086
return (0);
5087
}
5088
nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, nmp->nm_mountp);
5089
ret = 1;
5090
if (NFSCL_FORCEDISM(nmp->nm_mountp))
5091
ret = 0;
5092
NFSUNLOCKCLSTATE();
5093
return (ret);
5094
}
5095
5096
/*
5097
* Release a reference on a clientid acquired with the above call.
5098
*/
5099
void
5100
nfscl_relref(struct nfsmount *nmp)
5101
{
5102
struct nfsclclient *clp;
5103
5104
NFSLOCKCLSTATE();
5105
clp = nfscl_findcl(nmp);
5106
if (clp == NULL) {
5107
NFSUNLOCKCLSTATE();
5108
return;
5109
}
5110
nfsv4_relref(&clp->nfsc_lock);
5111
NFSUNLOCKCLSTATE();
5112
}
5113
5114
/*
5115
* Save the size attribute in the delegation, since the nfsnode
5116
* is going away.
5117
*/
5118
void
5119
nfscl_reclaimnode(vnode_t vp)
5120
{
5121
struct nfsclclient *clp;
5122
struct nfscldeleg *dp;
5123
struct nfsnode *np = VTONFS(vp);
5124
struct nfsmount *nmp;
5125
5126
nmp = VFSTONFS(vp->v_mount);
5127
if (!NFSHASNFSV4(nmp))
5128
return;
5129
NFSLOCKCLSTATE();
5130
clp = nfscl_findcl(nmp);
5131
if (clp == NULL) {
5132
NFSUNLOCKCLSTATE();
5133
return;
5134
}
5135
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5136
if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5137
dp->nfsdl_size = np->n_size;
5138
NFSUNLOCKCLSTATE();
5139
}
5140
5141
/*
5142
* Get the saved size attribute in the delegation, since it is a
5143
* newly allocated nfsnode.
5144
*/
5145
void
5146
nfscl_newnode(vnode_t vp)
5147
{
5148
struct nfsclclient *clp;
5149
struct nfscldeleg *dp;
5150
struct nfsnode *np = VTONFS(vp);
5151
struct nfsmount *nmp;
5152
5153
nmp = VFSTONFS(vp->v_mount);
5154
if (!NFSHASNFSV4(nmp))
5155
return;
5156
NFSLOCKCLSTATE();
5157
clp = nfscl_findcl(nmp);
5158
if (clp == NULL) {
5159
NFSUNLOCKCLSTATE();
5160
return;
5161
}
5162
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5163
if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5164
np->n_size = dp->nfsdl_size;
5165
NFSUNLOCKCLSTATE();
5166
}
5167
5168
/*
5169
* If there is a valid write delegation for this file, set the modtime
5170
* to the local clock time.
5171
*/
5172
void
5173
nfscl_delegmodtime(struct vnode *vp, struct timespec *mtime)
5174
{
5175
struct nfsclclient *clp;
5176
struct nfscldeleg *dp;
5177
struct nfsnode *np = VTONFS(vp);
5178
struct nfsmount *nmp;
5179
5180
nmp = VFSTONFS(vp->v_mount);
5181
if (!NFSHASNFSV4(nmp))
5182
return;
5183
NFSLOCKMNT(nmp);
5184
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5185
NFSUNLOCKMNT(nmp);
5186
return;
5187
}
5188
NFSUNLOCKMNT(nmp);
5189
NFSLOCKCLSTATE();
5190
clp = nfscl_findcl(nmp);
5191
if (clp == NULL) {
5192
NFSUNLOCKCLSTATE();
5193
return;
5194
}
5195
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5196
if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
5197
if (mtime != NULL)
5198
dp->nfsdl_modtime = *mtime;
5199
else
5200
nanotime(&dp->nfsdl_modtime);
5201
dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
5202
}
5203
NFSUNLOCKCLSTATE();
5204
}
5205
5206
/*
5207
* If there is a valid write delegation for this file with a modtime set,
5208
* put that modtime in mtime.
5209
*/
5210
void
5211
nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
5212
{
5213
struct nfsclclient *clp;
5214
struct nfscldeleg *dp;
5215
struct nfsnode *np = VTONFS(vp);
5216
struct nfsmount *nmp;
5217
5218
nmp = VFSTONFS(vp->v_mount);
5219
if (!NFSHASNFSV4(nmp))
5220
return;
5221
NFSLOCKMNT(nmp);
5222
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5223
NFSUNLOCKMNT(nmp);
5224
return;
5225
}
5226
NFSUNLOCKMNT(nmp);
5227
NFSLOCKCLSTATE();
5228
clp = nfscl_findcl(nmp);
5229
if (clp == NULL) {
5230
NFSUNLOCKCLSTATE();
5231
return;
5232
}
5233
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5234
if (dp != NULL &&
5235
(dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
5236
(NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
5237
*mtime = dp->nfsdl_modtime;
5238
NFSUNLOCKCLSTATE();
5239
}
5240
5241
static int
5242
nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
5243
{
5244
short *defaulterrp, *errp;
5245
5246
if (!nd->nd_repstat)
5247
return (0);
5248
if (nd->nd_procnum == NFSPROC_NOOP)
5249
return (txdr_unsigned(nd->nd_repstat & 0xffff));
5250
if (nd->nd_repstat == EBADRPC)
5251
return (txdr_unsigned(NFSERR_BADXDR));
5252
if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
5253
nd->nd_repstat == NFSERR_OPILLEGAL)
5254
return (txdr_unsigned(nd->nd_repstat));
5255
if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
5256
minorvers > NFSV4_MINORVERSION) {
5257
/* NFSv4.n error. */
5258
return (txdr_unsigned(nd->nd_repstat));
5259
}
5260
if (nd->nd_procnum < NFSV4OP_CBNOPS)
5261
errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
5262
else
5263
return (txdr_unsigned(nd->nd_repstat));
5264
while (*++errp)
5265
if (*errp == (short)nd->nd_repstat)
5266
return (txdr_unsigned(nd->nd_repstat));
5267
return (txdr_unsigned(*defaulterrp));
5268
}
5269
5270
/*
5271
* Called to find/add a layout to a client.
5272
* This function returns the layout with a refcnt (shared lock) upon
5273
* success (returns 0) or with no lock/refcnt on the layout when an
5274
* error is returned.
5275
* If a layout is passed in via lypp, it is locked (exclusively locked).
5276
*/
5277
int
5278
nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
5279
nfsv4stateid_t *stateidp, int layouttype, int retonclose,
5280
struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
5281
struct ucred *cred, NFSPROC_T *p)
5282
{
5283
struct nfsclclient *clp;
5284
struct nfscllayout *lyp, *tlyp;
5285
struct nfsclflayout *flp;
5286
struct nfsnode *np = VTONFS(vp);
5287
mount_t mp;
5288
int layout_passed_in;
5289
5290
mp = nmp->nm_mountp;
5291
layout_passed_in = 1;
5292
tlyp = NULL;
5293
lyp = *lypp;
5294
if (lyp == NULL) {
5295
layout_passed_in = 0;
5296
tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
5297
M_WAITOK | M_ZERO);
5298
}
5299
5300
NFSLOCKCLSTATE();
5301
clp = nmp->nm_clp;
5302
if (clp == NULL) {
5303
if (layout_passed_in != 0)
5304
nfsv4_unlock(&lyp->nfsly_lock, 0);
5305
NFSUNLOCKCLSTATE();
5306
if (tlyp != NULL)
5307
free(tlyp, M_NFSLAYOUT);
5308
return (EPERM);
5309
}
5310
if (lyp == NULL) {
5311
/*
5312
* Although no lyp was passed in, another thread might have
5313
* allocated one. If one is found, just increment it's ref
5314
* count and return it.
5315
*/
5316
lyp = nfscl_findlayout(clp, fhp, fhlen);
5317
if (lyp == NULL) {
5318
lyp = tlyp;
5319
tlyp = NULL;
5320
lyp->nfsly_stateid.seqid = stateidp->seqid;
5321
lyp->nfsly_stateid.other[0] = stateidp->other[0];
5322
lyp->nfsly_stateid.other[1] = stateidp->other[1];
5323
lyp->nfsly_stateid.other[2] = stateidp->other[2];
5324
lyp->nfsly_lastbyte = 0;
5325
LIST_INIT(&lyp->nfsly_flayread);
5326
LIST_INIT(&lyp->nfsly_flayrw);
5327
LIST_INIT(&lyp->nfsly_recall);
5328
lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
5329
lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
5330
lyp->nfsly_clp = clp;
5331
if (layouttype == NFSLAYOUT_FLEXFILE)
5332
lyp->nfsly_flags = NFSLY_FLEXFILE;
5333
else
5334
lyp->nfsly_flags = NFSLY_FILES;
5335
if (retonclose != 0)
5336
lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5337
lyp->nfsly_fhlen = fhlen;
5338
NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
5339
TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5340
LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
5341
nfsly_hash);
5342
lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5343
clp->nfsc_layoutcnt++;
5344
nfsstatsv1.cllayouts++;
5345
} else {
5346
if (retonclose != 0)
5347
lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5348
if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5349
lyp->nfsly_stateid.seqid = stateidp->seqid;
5350
TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5351
TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5352
lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5353
}
5354
nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5355
if (NFSCL_FORCEDISM(mp)) {
5356
NFSUNLOCKCLSTATE();
5357
if (tlyp != NULL)
5358
free(tlyp, M_NFSLAYOUT);
5359
return (EPERM);
5360
}
5361
*lypp = lyp;
5362
} else if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5363
lyp->nfsly_stateid.seqid = stateidp->seqid;
5364
5365
/* Merge the new list of File Layouts into the list. */
5366
flp = LIST_FIRST(fhlp);
5367
if (flp != NULL) {
5368
if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
5369
nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
5370
else
5371
nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
5372
}
5373
if (layout_passed_in != 0)
5374
nfsv4_unlock(&lyp->nfsly_lock, 1);
5375
NFSUNLOCKCLSTATE();
5376
if (tlyp != NULL)
5377
free(tlyp, M_NFSLAYOUT);
5378
return (0);
5379
}
5380
5381
/*
5382
* Search for a layout by MDS file handle.
5383
* If one is found, it is returned with a refcnt (shared lock) iff
5384
* retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
5385
* returned NULL.
5386
*/
5387
struct nfscllayout *
5388
nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
5389
uint64_t off, uint32_t rwaccess, struct nfsclflayout **retflpp,
5390
int *recalledp)
5391
{
5392
struct nfscllayout *lyp;
5393
mount_t mp;
5394
int error, igotlock;
5395
5396
mp = clp->nfsc_nmp->nm_mountp;
5397
*recalledp = 0;
5398
*retflpp = NULL;
5399
NFSLOCKCLSTATE();
5400
lyp = nfscl_findlayout(clp, fhp, fhlen);
5401
if (lyp != NULL) {
5402
if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5403
TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5404
TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5405
lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5406
error = nfscl_findlayoutforio(lyp, off, rwaccess,
5407
retflpp);
5408
if (error == 0)
5409
nfsv4_getref(&lyp->nfsly_lock, NULL,
5410
NFSCLSTATEMUTEXPTR, mp);
5411
else {
5412
do {
5413
igotlock = nfsv4_lock(&lyp->nfsly_lock,
5414
1, NULL, NFSCLSTATEMUTEXPTR, mp);
5415
} while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
5416
*retflpp = NULL;
5417
}
5418
if (NFSCL_FORCEDISM(mp)) {
5419
lyp = NULL;
5420
*recalledp = 1;
5421
}
5422
} else {
5423
lyp = NULL;
5424
*recalledp = 1;
5425
}
5426
}
5427
NFSUNLOCKCLSTATE();
5428
return (lyp);
5429
}
5430
5431
/*
5432
* Search for a layout by MDS file handle. If one is found, mark in to be
5433
* recalled, if it already marked "return on close".
5434
*/
5435
static void
5436
nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
5437
int fhlen, struct nfsclrecalllayout **recallpp, struct nfscllayout **lypp)
5438
{
5439
struct nfscllayout *lyp;
5440
uint32_t iomode;
5441
5442
*lypp = NULL;
5443
if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) ||
5444
nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
5445
(VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
5446
return;
5447
lyp = nfscl_findlayout(clp, fhp, fhlen);
5448
if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
5449
if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5450
iomode = 0;
5451
if (!LIST_EMPTY(&lyp->nfsly_flayread))
5452
iomode |= NFSLAYOUTIOMODE_READ;
5453
if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5454
iomode |= NFSLAYOUTIOMODE_RW;
5455
nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5456
0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
5457
*recallpp);
5458
NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
5459
*recallpp = NULL;
5460
}
5461
5462
/* Now, wake up renew thread to do LayoutReturn. */
5463
wakeup(clp);
5464
*lypp = lyp;
5465
}
5466
}
5467
5468
/*
5469
* Mark the layout to be recalled and with an error.
5470
* Also, disable the dsp from further use.
5471
*/
5472
void
5473
nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
5474
struct nfscllayout *lyp, struct nfsclds *dsp)
5475
{
5476
struct nfsclrecalllayout *recallp;
5477
uint32_t iomode;
5478
5479
printf("DS being disabled, error=%d\n", stat);
5480
/* Set up the return of the layout. */
5481
recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
5482
iomode = 0;
5483
NFSLOCKCLSTATE();
5484
if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5485
if (!LIST_EMPTY(&lyp->nfsly_flayread))
5486
iomode |= NFSLAYOUTIOMODE_READ;
5487
if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5488
iomode |= NFSLAYOUTIOMODE_RW;
5489
(void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5490
0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
5491
dp->nfsdi_deviceid, recallp);
5492
NFSUNLOCKCLSTATE();
5493
NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
5494
} else {
5495
NFSUNLOCKCLSTATE();
5496
free(recallp, M_NFSLAYRECALL);
5497
}
5498
5499
/* And shut the TCP connection down. */
5500
nfscl_cancelreqs(dsp);
5501
}
5502
5503
/*
5504
* Cancel all RPCs for this "dsp" by closing the connection.
5505
* Also, mark the session as defunct.
5506
* If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
5507
* cannot be shut down.
5508
*/
5509
void
5510
nfscl_cancelreqs(struct nfsclds *dsp)
5511
{
5512
struct __rpc_client *cl;
5513
static int non_event;
5514
5515
NFSLOCKDS(dsp);
5516
if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
5517
dsp->nfsclds_sockp != NULL &&
5518
dsp->nfsclds_sockp->nr_client != NULL) {
5519
dsp->nfsclds_flags |= NFSCLDS_CLOSED;
5520
cl = dsp->nfsclds_sockp->nr_client;
5521
dsp->nfsclds_sess.nfsess_defunct = 1;
5522
NFSUNLOCKDS(dsp);
5523
CLNT_CLOSE(cl);
5524
/*
5525
* This 1sec sleep is done to reduce the number of reconnect
5526
* attempts made on the DS while it has failed.
5527
*/
5528
tsleep(&non_event, PVFS, "ndscls", hz);
5529
return;
5530
}
5531
NFSUNLOCKDS(dsp);
5532
}
5533
5534
/*
5535
* Dereference a layout.
5536
*/
5537
void
5538
nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
5539
{
5540
5541
NFSLOCKCLSTATE();
5542
if (exclocked != 0)
5543
nfsv4_unlock(&lyp->nfsly_lock, 0);
5544
else
5545
nfsv4_relref(&lyp->nfsly_lock);
5546
NFSUNLOCKCLSTATE();
5547
}
5548
5549
/*
5550
* Search for a devinfo by deviceid. If one is found, return it after
5551
* acquiring a reference count on it.
5552
*/
5553
struct nfscldevinfo *
5554
nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
5555
struct nfscldevinfo *dip)
5556
{
5557
5558
NFSLOCKCLSTATE();
5559
if (dip == NULL)
5560
dip = nfscl_finddevinfo(clp, deviceid);
5561
if (dip != NULL)
5562
dip->nfsdi_refcnt++;
5563
NFSUNLOCKCLSTATE();
5564
return (dip);
5565
}
5566
5567
/*
5568
* Dereference a devinfo structure.
5569
*/
5570
static void
5571
nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
5572
{
5573
5574
dip->nfsdi_refcnt--;
5575
if (dip->nfsdi_refcnt == 0)
5576
wakeup(&dip->nfsdi_refcnt);
5577
}
5578
5579
/*
5580
* Dereference a devinfo structure.
5581
*/
5582
void
5583
nfscl_reldevinfo(struct nfscldevinfo *dip)
5584
{
5585
5586
NFSLOCKCLSTATE();
5587
nfscl_reldevinfo_locked(dip);
5588
NFSUNLOCKCLSTATE();
5589
}
5590
5591
/*
5592
* Find a layout for this file handle. Return NULL upon failure.
5593
*/
5594
static struct nfscllayout *
5595
nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5596
{
5597
struct nfscllayout *lyp;
5598
5599
LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5600
if (lyp->nfsly_fhlen == fhlen &&
5601
!NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5602
break;
5603
return (lyp);
5604
}
5605
5606
/*
5607
* Find a devinfo for this deviceid. Return NULL upon failure.
5608
*/
5609
static struct nfscldevinfo *
5610
nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5611
{
5612
struct nfscldevinfo *dip;
5613
5614
LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5615
if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5616
== 0)
5617
break;
5618
return (dip);
5619
}
5620
5621
/*
5622
* Merge the new file layout list into the main one, maintaining it in
5623
* increasing offset order.
5624
*/
5625
static void
5626
nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5627
struct nfsclflayouthead *newfhlp)
5628
{
5629
struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5630
5631
flp = LIST_FIRST(fhlp);
5632
prevflp = NULL;
5633
LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5634
while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5635
prevflp = flp;
5636
flp = LIST_NEXT(flp, nfsfl_list);
5637
}
5638
if (prevflp == NULL)
5639
LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5640
else
5641
LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5642
prevflp = nflp;
5643
}
5644
}
5645
5646
/*
5647
* Add this nfscldevinfo to the client, if it doesn't already exist.
5648
* This function consumes the structure pointed at by dip, if not NULL.
5649
*/
5650
int
5651
nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
5652
struct nfsclflayout *flp)
5653
{
5654
struct nfsclclient *clp;
5655
struct nfscldevinfo *tdip;
5656
uint8_t *dev;
5657
5658
NFSLOCKCLSTATE();
5659
clp = nmp->nm_clp;
5660
if (clp == NULL) {
5661
NFSUNLOCKCLSTATE();
5662
if (dip != NULL)
5663
free(dip, M_NFSDEVINFO);
5664
return (ENODEV);
5665
}
5666
if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5667
dev = flp->nfsfl_dev;
5668
else
5669
dev = flp->nfsfl_ffm[ind].dev;
5670
tdip = nfscl_finddevinfo(clp, dev);
5671
if (tdip != NULL) {
5672
tdip->nfsdi_layoutrefs++;
5673
if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5674
flp->nfsfl_devp = tdip;
5675
else
5676
flp->nfsfl_ffm[ind].devp = tdip;
5677
nfscl_reldevinfo_locked(tdip);
5678
NFSUNLOCKCLSTATE();
5679
if (dip != NULL)
5680
free(dip, M_NFSDEVINFO);
5681
return (0);
5682
}
5683
if (dip != NULL) {
5684
LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5685
dip->nfsdi_layoutrefs = 1;
5686
if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5687
flp->nfsfl_devp = dip;
5688
else
5689
flp->nfsfl_ffm[ind].devp = dip;
5690
}
5691
NFSUNLOCKCLSTATE();
5692
if (dip == NULL)
5693
return (ENODEV);
5694
return (0);
5695
}
5696
5697
/*
5698
* Free up a layout structure and associated file layout structure(s).
5699
*/
5700
void
5701
nfscl_freelayout(struct nfscllayout *layp)
5702
{
5703
struct nfsclflayout *flp, *nflp;
5704
struct nfsclrecalllayout *rp, *nrp;
5705
5706
LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5707
LIST_REMOVE(flp, nfsfl_list);
5708
nfscl_freeflayout(flp);
5709
}
5710
LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5711
LIST_REMOVE(flp, nfsfl_list);
5712
nfscl_freeflayout(flp);
5713
}
5714
LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5715
LIST_REMOVE(rp, nfsrecly_list);
5716
free(rp, M_NFSLAYRECALL);
5717
}
5718
layp->nfsly_clp->nfsc_layoutcnt--;
5719
nfsstatsv1.cllayouts--;
5720
free(layp, M_NFSLAYOUT);
5721
}
5722
5723
/*
5724
* Free up a file layout structure.
5725
*/
5726
void
5727
nfscl_freeflayout(struct nfsclflayout *flp)
5728
{
5729
int i, j;
5730
5731
if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
5732
for (i = 0; i < flp->nfsfl_fhcnt; i++)
5733
free(flp->nfsfl_fh[i], M_NFSFH);
5734
if (flp->nfsfl_devp != NULL)
5735
flp->nfsfl_devp->nfsdi_layoutrefs--;
5736
}
5737
if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
5738
for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
5739
for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
5740
free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
5741
if (flp->nfsfl_ffm[i].devp != NULL)
5742
flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;
5743
}
5744
free(flp, M_NFSFLAYOUT);
5745
}
5746
5747
/*
5748
* Free up a file layout devinfo structure.
5749
*/
5750
void
5751
nfscl_freedevinfo(struct nfscldevinfo *dip)
5752
{
5753
5754
free(dip, M_NFSDEVINFO);
5755
}
5756
5757
/*
5758
* Mark any layouts that match as recalled.
5759
*/
5760
static int
5761
nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5762
uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
5763
char *devid, struct nfsclrecalllayout *recallp)
5764
{
5765
struct nfsclrecalllayout *rp, *orp;
5766
5767
recallp->nfsrecly_recalltype = recalltype;
5768
recallp->nfsrecly_iomode = iomode;
5769
recallp->nfsrecly_stateseqid = stateseqid;
5770
recallp->nfsrecly_off = off;
5771
recallp->nfsrecly_len = len;
5772
recallp->nfsrecly_stat = stat;
5773
recallp->nfsrecly_op = op;
5774
if (devid != NULL)
5775
NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
5776
/*
5777
* Order the list as file returns first, followed by fsid and any
5778
* returns, both in increasing stateseqid order.
5779
* Note that the seqids wrap around, so 1 is after 0xffffffff.
5780
* (I'm not sure this is correct because I find RFC5661 confusing
5781
* on this, but hopefully it will work ok.)
5782
*/
5783
orp = NULL;
5784
LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5785
orp = rp;
5786
if ((recalltype == NFSLAYOUTRETURN_FILE &&
5787
(rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5788
nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5789
(recalltype != NFSLAYOUTRETURN_FILE &&
5790
rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5791
nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5792
LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5793
break;
5794
}
5795
5796
/*
5797
* Put any error return on all the file returns that will
5798
* preceed this one.
5799
*/
5800
if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
5801
stat != 0 && rp->nfsrecly_stat == 0) {
5802
rp->nfsrecly_stat = stat;
5803
rp->nfsrecly_op = op;
5804
if (devid != NULL)
5805
NFSBCOPY(devid, rp->nfsrecly_devid,
5806
NFSX_V4DEVICEID);
5807
}
5808
}
5809
if (rp == NULL) {
5810
if (orp == NULL)
5811
LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5812
nfsrecly_list);
5813
else
5814
LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5815
}
5816
lyp->nfsly_flags |= NFSLY_RECALL;
5817
wakeup(lyp->nfsly_clp);
5818
return (0);
5819
}
5820
5821
/*
5822
* Compare the two seqids for ordering. The trick is that the seqids can
5823
* wrap around from 0xffffffff->0, so check for the cases where one
5824
* has wrapped around.
5825
* Return 1 if seqid1 comes before seqid2, 0 otherwise.
5826
*/
5827
static int
5828
nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5829
{
5830
5831
if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5832
/* seqid2 has wrapped around. */
5833
return (0);
5834
if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5835
/* seqid1 has wrapped around. */
5836
return (1);
5837
if (seqid1 <= seqid2)
5838
return (1);
5839
return (0);
5840
}
5841
5842
/*
5843
* Do a layout return for each of the recalls.
5844
*/
5845
static void
5846
nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5847
struct ucred *cred, NFSPROC_T *p)
5848
{
5849
struct nfsclrecalllayout *rp;
5850
nfsv4stateid_t stateid;
5851
int layouttype;
5852
5853
NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5854
stateid.seqid = lyp->nfsly_stateid.seqid;
5855
if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5856
layouttype = NFSLAYOUT_NFSV4_1_FILES;
5857
else
5858
layouttype = NFSLAYOUT_FLEXFILE;
5859
LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5860
(void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5861
lyp->nfsly_fhlen, 0, layouttype,
5862
rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5863
rp->nfsrecly_off, rp->nfsrecly_len,
5864
&stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
5865
rp->nfsrecly_devid);
5866
}
5867
}
5868
5869
/*
5870
* Do the layout commit for a file layout.
5871
*/
5872
static void
5873
nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5874
struct ucred *cred, NFSPROC_T *p)
5875
{
5876
struct nfsclflayout *flp;
5877
uint64_t len;
5878
int error, layouttype;
5879
5880
if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5881
layouttype = NFSLAYOUT_NFSV4_1_FILES;
5882
else
5883
layouttype = NFSLAYOUT_FLEXFILE;
5884
LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5885
if (layouttype == NFSLAYOUT_FLEXFILE &&
5886
(flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
5887
NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
5888
/* If not supported, don't bother doing it. */
5889
NFSLOCKMNT(nmp);
5890
nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5891
NFSUNLOCKMNT(nmp);
5892
break;
5893
} else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5894
len = flp->nfsfl_end - flp->nfsfl_off;
5895
error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5896
lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5897
lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5898
layouttype, cred, p);
5899
NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5900
if (error == NFSERR_NOTSUPP) {
5901
/* If not supported, don't bother doing it. */
5902
NFSLOCKMNT(nmp);
5903
nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5904
NFSUNLOCKMNT(nmp);
5905
break;
5906
}
5907
}
5908
}
5909
}
5910
5911
/*
5912
* Commit all layouts for a file (vnode).
5913
*/
5914
int
5915
nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5916
{
5917
struct nfsclclient *clp;
5918
struct nfscllayout *lyp;
5919
struct nfsnode *np = VTONFS(vp);
5920
mount_t mp;
5921
struct nfsmount *nmp;
5922
5923
mp = vp->v_mount;
5924
nmp = VFSTONFS(mp);
5925
if (NFSHASNOLAYOUTCOMMIT(nmp))
5926
return (0);
5927
NFSLOCKCLSTATE();
5928
clp = nmp->nm_clp;
5929
if (clp == NULL) {
5930
NFSUNLOCKCLSTATE();
5931
return (EPERM);
5932
}
5933
lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5934
if (lyp == NULL) {
5935
NFSUNLOCKCLSTATE();
5936
return (EPERM);
5937
}
5938
nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5939
if (NFSCL_FORCEDISM(mp)) {
5940
NFSUNLOCKCLSTATE();
5941
return (EPERM);
5942
}
5943
tryagain:
5944
if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5945
lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5946
NFSUNLOCKCLSTATE();
5947
NFSCL_DEBUG(4, "do layoutcommit2\n");
5948
nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5949
NFSLOCKCLSTATE();
5950
goto tryagain;
5951
}
5952
nfsv4_relref(&lyp->nfsly_lock);
5953
NFSUNLOCKCLSTATE();
5954
return (0);
5955
}
5956
5957
/*
5958
* Check access against a delegation ace.
5959
* Return EINVAL for any case where the check cannot be completed.
5960
*/
5961
int
5962
nfscl_delegacecheck(struct vnode *vp, accmode_t accmode, struct ucred *cred)
5963
{
5964
struct nfsclclient *clp;
5965
struct nfscldeleg *dp;
5966
struct nfsnode *np;
5967
struct nfsmount *nmp;
5968
struct acl *aclp;
5969
int error;
5970
5971
np = VTONFS(vp);
5972
nmp = VFSTONFS(vp->v_mount);
5973
if (!NFSHASNFSV4(nmp) || !NFSHASNFSV4N(nmp) || vp->v_type != VREG)
5974
return (EINVAL);
5975
NFSLOCKMNT(nmp);
5976
if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5977
NFSUNLOCKMNT(nmp);
5978
return (EINVAL);
5979
}
5980
NFSUNLOCKMNT(nmp);
5981
aclp = acl_alloc(M_WAITOK);
5982
NFSLOCKCLSTATE();
5983
clp = nfscl_findcl(nmp);
5984
if (clp == NULL) {
5985
NFSUNLOCKCLSTATE();
5986
acl_free(aclp);
5987
return (EINVAL);
5988
}
5989
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5990
if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_RECALL |
5991
NFSCLDL_DELEGRET)) == 0) {
5992
memcpy(&aclp->acl_entry[0], &dp->nfsdl_ace,
5993
sizeof(struct acl_entry));
5994
NFSUNLOCKCLSTATE();
5995
aclp->acl_cnt = 1;
5996
error = vaccess_acl_nfs4(vp->v_type, np->n_vattr.na_uid,
5997
np->n_vattr.na_gid, aclp, accmode, cred);
5998
acl_free(aclp);
5999
if (error == 0 || error == EACCES)
6000
return (error);
6001
} else {
6002
NFSUNLOCKCLSTATE();
6003
acl_free(aclp);
6004
}
6005
return (EINVAL);
6006
}
6007
6008
/*
6009
* Start the recall of a delegation. Called for CB_RECALL and REMOVE
6010
* when nlink == 0 after the REMOVE.
6011
*/
6012
void nfscl_startdelegrecall(struct nfsclclient *clp, struct nfsfh *nfhp)
6013
{
6014
struct nfscldeleg *dp;
6015
6016
dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
6017
if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) {
6018
dp->nfsdl_flags |= NFSCLDL_RECALL;
6019
wakeup((caddr_t)clp);
6020
}
6021
}
6022
6023