/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2008 Isilon Inc http://www.isilon.com/4* Authors: Doug Rabson <[email protected]>5* Developed with Red Inc: Alfred Perlstein <[email protected]>6*7* Redistribution and use in source and binary forms, with or without8* modification, are permitted provided that the following conditions9* are met:10* 1. Redistributions of source code must retain the above copyright11* notice, this list of conditions and the following disclaimer.12* 2. Redistributions in binary form must reproduce the above copyright13* notice, this list of conditions and the following disclaimer in the14* documentation and/or other materials provided with the distribution.15*16* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND17* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE18* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE19* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE20* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL21* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS22* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)23* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT24* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY25* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF26* SUCH DAMAGE.27*/2829#ifndef _NLM_NLM_H_30#define _NLM_NLM_H_3132#ifdef _KERNEL3334#ifdef _SYS_MALLOC_H_35MALLOC_DECLARE(M_NLM);36#endif3738/*39* This value is added to host system IDs when recording NFS client40* locks in the local lock manager.41*/42#define NLM_SYSID_CLIENT 0x10000004344struct nlm_host;45struct vnode;4647extern struct timeval nlm_zero_tv;48extern int nlm_nsm_state;4950/*51* Make a struct netobj.52*/53extern void nlm_make_netobj(struct netobj *dst, caddr_t srt,54size_t srcsize, struct malloc_type *type);5556/*57* Copy a struct netobj.58*/59extern void nlm_copy_netobj(struct netobj *dst, struct netobj *src,60struct malloc_type *type);6162/*63* Search for an existing NLM host that matches the given name64* (typically the caller_name element of an nlm4_lock). If none is65* found, create a new host. If 'addr' is non-NULL, record the remote66* address of the host so that we can call it back for async67* responses. If 'vers' is greater than zero then record the NLM68* program version to use to communicate with this client. The host69* reference count is incremented - the caller must call70* nlm_host_release when it has finished using it.71*/72extern struct nlm_host *nlm_find_host_by_name(const char *name,73const struct sockaddr *addr, rpcvers_t vers);7475/*76* Search for an existing NLM host that matches the given remote77* address. If none is found, create a new host with the requested78* address and remember 'vers' as the NLM protocol version to use for79* that host. The host reference count is incremented - the caller80* must call nlm_host_release when it has finished using it.81*/82extern struct nlm_host *nlm_find_host_by_addr(const struct sockaddr *addr,83int vers);8485/*86* Register this NLM host with the local NSM so that we can be87* notified if it reboots.88*/89extern void nlm_host_monitor(struct nlm_host *host, int state);9091/*92* Decrement the host reference count, freeing resources if the93* reference count reaches zero.94*/95extern void nlm_host_release(struct nlm_host *host);9697/*98* Return an RPC client handle that can be used to talk to the NLM99* running on the given host.100*/101extern CLIENT *nlm_host_get_rpc(struct nlm_host *host, bool_t isserver);102103/*104* Return the system ID for a host.105*/106extern int nlm_host_get_sysid(struct nlm_host *host);107108/*109* Return the remote NSM state value for a host.110*/111extern int nlm_host_get_state(struct nlm_host *host);112113/*114* When sending a blocking lock request, we need to track the request115* in our waiting lock list. We add an entry to the waiting list116* before we send the lock RPC so that we can cope with a granted117* message arriving at any time. Call this function before sending the118* lock rpc. If the lock succeeds, call nlm_deregister_wait_lock with119* the handle this function returns, otherwise nlm_wait_lock. Both120* will remove the entry from the waiting list.121*/122extern void *nlm_register_wait_lock(struct nlm4_lock *lock, struct vnode *vp);123124/*125* Deregister a blocking lock request. Call this if the lock succeeded126* without blocking.127*/128extern void nlm_deregister_wait_lock(void *handle);129130/*131* Wait for a granted callback for a blocked lock request, waiting at132* most timo ticks. If no granted message is received within the133* timeout, return EWOULDBLOCK. If a signal interrupted the wait,134* return EINTR - the caller must arrange to send a cancellation to135* the server. In both cases, the request is removed from the waiting136* list.137*/138extern int nlm_wait_lock(void *handle, int timo);139140/*141* Cancel any pending waits for this vnode - called on forcible unmounts.142*/143extern void nlm_cancel_wait(struct vnode *vp);144145/*146* Called when a host restarts.147*/148extern void nlm_sm_notify(nlm_sm_status *argp);149150/*151* Implementation for lock testing RPCs. If the request was handled152* successfully and rpcp is non-NULL, *rpcp is set to an RPC client153* handle which can be used to send an async rpc reply. Returns zero154* if the request was handled, or a suitable unix error code155* otherwise.156*/157extern int nlm_do_test(nlm4_testargs *argp, nlm4_testres *result,158struct svc_req *rqstp, CLIENT **rpcp);159160/*161* Implementation for lock setting RPCs. If the request was handled162* successfully and rpcp is non-NULL, *rpcp is set to an RPC client163* handle which can be used to send an async rpc reply. Returns zero164* if the request was handled, or a suitable unix error code165* otherwise.166*/167extern int nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result,168struct svc_req *rqstp, bool_t monitor, CLIENT **rpcp);169170/*171* Implementation for cancelling a pending lock request. If the172* request was handled successfully and rpcp is non-NULL, *rpcp is set173* to an RPC client handle which can be used to send an async rpc174* reply. Returns zero if the request was handled, or a suitable unix175* error code otherwise.176*/177extern int nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result,178struct svc_req *rqstp, CLIENT **rpcp);179180/*181* Implementation for unlocking RPCs. If the request was handled182* successfully and rpcp is non-NULL, *rpcp is set to an RPC client183* handle which can be used to send an async rpc reply. Returns zero184* if the request was handled, or a suitable unix error code185* otherwise.186*/187extern int nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result,188struct svc_req *rqstp, CLIENT **rpcp);189190/*191* Implementation for granted RPCs. If the request was handled192* successfully and rpcp is non-NULL, *rpcp is set to an RPC client193* handle which can be used to send an async rpc reply. Returns zero194* if the request was handled, or a suitable unix error code195* otherwise.196*/197extern int nlm_do_granted(nlm4_testargs *argp, nlm4_res *result,198struct svc_req *rqstp, CLIENT **rpcp);199200/*201* Implementation for the granted result RPC. The client may reject the granted202* message, in which case we need to handle it appropriately.203*/204extern void nlm_do_granted_res(nlm4_res *argp, struct svc_req *rqstp);205206/*207* Free all locks associated with the hostname argp->name.208*/209extern void nlm_do_free_all(nlm4_notify *argp);210211/*212* Recover client lock state after a server reboot.213*/214extern void nlm_client_recovery(struct nlm_host *);215216/*217* Interface from NFS client code to the NLM.218*/219struct vop_advlock_args;220struct vop_reclaim_args;221extern int nlm_advlock(struct vop_advlock_args *ap);222extern int nlm_reclaim(struct vop_reclaim_args *ap);223224/*225* Acquire the next sysid for remote locks not handled by the NLM.226*/227extern uint32_t nlm_acquire_next_sysid(void);228229#endif230231#endif232233234