summaryrefslogtreecommitdiffstats
path: root/usr.sbin/rpc.lockd
diff options
context:
space:
mode:
authorsturm <sturm@openbsd.org>2008-06-13 21:32:26 +0000
committersturm <sturm@openbsd.org>2008-06-13 21:32:26 +0000
commit5507b659db81f50043edbb78b30fafce8b70bea2 (patch)
tree45753ea9e3f49c887ab2f2024cff2057132f3429 /usr.sbin/rpc.lockd
parentAdd definitions for nlm version 4 (for use with NFSv3). (diff)
downloadwireguard-openbsd-5507b659db81f50043edbb78b30fafce8b70bea2.tar.xz
wireguard-openbsd-5507b659db81f50043edbb78b30fafce8b70bea2.zip
from NetBSD:
Implement file locking in lockd. All the stuff is done in userland, using fhopen() and flock(). This means that if you kill lockd, all locks will be relased. Shared locks are not handled efficiently, they're serialised in lockd when they could be granted. tested against debian etch (linux 2.6.18) not fully functional yet in mixed NFSv2/v3 environments further development in-tree as discussed w/ deraadt
Diffstat (limited to 'usr.sbin/rpc.lockd')
-rw-r--r--usr.sbin/rpc.lockd/Makefile4
-rw-r--r--usr.sbin/rpc.lockd/lockd.c109
-rw-r--r--usr.sbin/rpc.lockd/lockd.h4
-rw-r--r--usr.sbin/rpc.lockd/lockd_lock.c792
-rw-r--r--usr.sbin/rpc.lockd/lockd_lock.h20
-rw-r--r--usr.sbin/rpc.lockd/procs.c932
-rw-r--r--usr.sbin/rpc.lockd/rpc.lockd.820
7 files changed, 1756 insertions, 125 deletions
diff --git a/usr.sbin/rpc.lockd/Makefile b/usr.sbin/rpc.lockd/Makefile
index d0cf75c3ec8..ff4948a7288 100644
--- a/usr.sbin/rpc.lockd/Makefile
+++ b/usr.sbin/rpc.lockd/Makefile
@@ -1,7 +1,7 @@
-# $OpenBSD: Makefile,v 1.4 1997/08/19 08:05:16 niklas Exp $
+# $OpenBSD: Makefile,v 1.5 2008/06/13 21:32:26 sturm Exp $
PROG = rpc.lockd
-SRCS = nlm_prot_svc.c lockd.c procs.c
+SRCS = nlm_prot_svc.c lockd.c lockd_lock.c procs.c
MAN = rpc.lockd.8
DPADD= ${LIBRPCSVC}
diff --git a/usr.sbin/rpc.lockd/lockd.c b/usr.sbin/rpc.lockd/lockd.c
index df64fa80528..effd860178e 100644
--- a/usr.sbin/rpc.lockd/lockd.c
+++ b/usr.sbin/rpc.lockd/lockd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: lockd.c,v 1.8 2008/05/17 23:31:52 sobrado Exp $ */
+/* $OpenBSD: lockd.c,v 1.9 2008/06/13 21:32:26 sturm Exp $ */
/*
* Copyright (c) 1995
@@ -44,38 +44,56 @@
#include <stdio.h>
#include <syslog.h>
#include <stdlib.h>
+#include <errno.h>
#include <string.h>
#include <netdb.h>
#include "lockd.h"
-extern void nlm_prog_1(struct svc_req *, SVCXPRT *);
-extern void nlm_prog_3(struct svc_req *, SVCXPRT *);
+int debug_level = 0; /* 0 = no debugging syslog() calls */
+int _rpcsvcdirty = 0;
+int grace_expired;
-int debug_level = 0; /* Zero means no debugging syslog() calls */
+void nlm_prog_1(struct svc_req *, SVCXPRT *);
+void nlm_prog_3(struct svc_req *, SVCXPRT *);
+void nlm_prog_4(struct svc_req *, SVCXPRT *);
-int _rpcsvcdirty;
+static void sigalarm_handler(int);
+static void usage(void);
int
main(int argc, char *argv[])
{
SVCXPRT *transp;
+ int ch;
+ struct sigaction sigchild, sigalarm;
+ int grace_period = 30;
- if (argc > 1) {
- if (strncmp(argv[1], "-d", 2)) {
- fprintf(stderr,
- "usage: rpc.lockd [-d [debug_level]]\n");
- exit(1);
+ while ((ch = getopt(argc, argv, "d:g:")) != (-1)) {
+ switch (ch) {
+ case 'd':
+ debug_level = atoi(optarg);
+ if (!debug_level) {
+ usage();
+ /* NOTREACHED */
+ }
+ break;
+ case 'g':
+ grace_period = atoi(optarg);
+ if (!grace_period) {
+ usage();
+ /* NOTREACHED */
+ }
+ break;
+ default:
+ case '?':
+ usage();
+ /* NOTREACHED */
}
- if (argc > 2)
- debug_level = atoi(argv[2]);
- else
- debug_level = atoi(argv[1] + 2);
- if (!debug_level)
- debug_level = 1;
}
(void) pmap_unset(NLM_PROG, NLM_VERS);
(void) pmap_unset(NLM_PROG, NLM_VERSX);
+ (void) pmap_unset(NLM_PROG, NLM_VERS4);
transp = svcudp_create(RPC_ANYSOCK);
if (transp == NULL) {
@@ -92,6 +110,11 @@ main(int argc, char *argv[])
fprintf(stderr, "unable to register (NLM_PROG, NLM_VERSX, udp).\n");
exit(1);
}
+ if (!svc_register(transp, NLM_PROG, NLM_VERS4,
+ (void (*) (struct svc_req *, SVCXPRT *)) nlm_prog_4, IPPROTO_UDP)) {
+ fprintf(stderr, "unable to register (NLM_PROG, NLM_VERS4, udp).\n");
+ exit(1);
+ }
transp = svctcp_create(RPC_ANYSOCK, 0, 0);
if (transp == NULL) {
fprintf(stderr, "cannot create tcp service.\n");
@@ -107,17 +130,61 @@ main(int argc, char *argv[])
fprintf(stderr, "unable to register (NLM_PROG, NLM_VERSX, tcp).\n");
exit(1);
}
-
- if (daemon(0, 0)) {
- perror("cannot fork");
+ if (!svc_register(transp, NLM_PROG, NLM_VERS4,
+ (void (*) (struct svc_req *, SVCXPRT *)) nlm_prog_4, IPPROTO_TCP)) {
+ fprintf(stderr, "unable to register (NLM_PROG, NLM_VERS4, tcp).\n");
exit(1);
}
+
+ /*
+ * Note that it is NOT sensible to run this program from inetd - the
+ * protocol assumes that it will run immediately at boot time.
+ */
+ if (daemon(0, 0) == -1) {
+ err(1, "cannot fork");
+ /* NOTREACHED */
+ }
+
openlog("rpc.lockd", 0, LOG_DAEMON);
if (debug_level)
syslog(LOG_INFO, "Starting, debug level %d", debug_level);
else
syslog(LOG_INFO, "Starting");
- svc_run();
- exit(1);
+ sigchild.sa_handler = sigchild_handler;
+ sigemptyset(&sigchild.sa_mask);
+ sigchild.sa_flags = SA_RESTART;
+ if (sigaction(SIGCHLD, &sigchild, NULL) != 0) {
+ syslog(LOG_WARNING, "sigaction(SIGCHLD) failed (%m)");
+ exit(1);
+ }
+ sigalarm.sa_handler = sigalarm_handler;
+ sigemptyset(&sigalarm.sa_mask);
+ sigalarm.sa_flags = SA_RESETHAND; /* should only happen once */
+ sigalarm.sa_flags |= SA_RESTART;
+ if (sigaction(SIGALRM, &sigalarm, NULL) != 0) {
+ syslog(LOG_WARNING, "sigaction(SIGALRM) failed (%m)");
+ exit(1);
+ }
+ grace_expired = 0;
+ if (alarm(10) == (unsigned int)-1) {
+ syslog(LOG_WARNING, "alarm failed (%m)");
+ exit(1);
+ }
+
+ svc_run(); /* Should never return */
+ return 1;
+}
+
+static void
+/*ARGSUSED*/
+sigalarm_handler(int s)
+{
+ grace_expired = 1;
+}
+
+static void
+usage()
+{
+ errx(1, "usage: rpc.lockd [-d <debuglevel>] [-g <grace period>]");
}
diff --git a/usr.sbin/rpc.lockd/lockd.h b/usr.sbin/rpc.lockd/lockd.h
index 154089c9d04..8a795da1bfb 100644
--- a/usr.sbin/rpc.lockd/lockd.h
+++ b/usr.sbin/rpc.lockd/lockd.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: lockd.h,v 1.3 1997/09/17 23:09:37 deraadt Exp $ */
+/* $OpenBSD: lockd.h,v 1.4 2008/06/13 21:32:26 sturm Exp $ */
/*
* Copyright (c) 1995
@@ -34,3 +34,5 @@
*/
extern int debug_level;
+extern int grace_expired;
+void sigchild_handler(int);
diff --git a/usr.sbin/rpc.lockd/lockd_lock.c b/usr.sbin/rpc.lockd/lockd_lock.c
new file mode 100644
index 00000000000..52d8c96d5b6
--- /dev/null
+++ b/usr.sbin/rpc.lockd/lockd_lock.c
@@ -0,0 +1,792 @@
+/* $NetBSD: lockd_lock.c,v 1.2 2000/06/09 14:00:53 fvdl Exp $ */
+
+/*
+ * Copyright (c) 2000 Manuel Bouyer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Manuel Bouyer.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <syslog.h>
+#include <errno.h>
+#include <string.h>
+#include <signal.h>
+#include <rpc/rpc.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <rpcsvc/sm_inter.h>
+#include <rpcsvc/nlm_prot.h>
+#include "lockd_lock.h"
+#include "lockd.h"
+
+/* A set of utilities for managing file locking */
+LIST_HEAD(lcklst_head, file_lock);
+struct lcklst_head lcklst_head = LIST_HEAD_INITIALIZER(lcklst_head);
+
+#define FHANDLE_SIZE_MAX 1024 /* arbitrary big enough value */
+typedef struct {
+ size_t fhsize;
+ char *fhdata;
+} nfs_fhandle_t;
+
+static int
+fhcmp(const nfs_fhandle_t *fh1, const nfs_fhandle_t *fh2)
+{
+
+ if (fh1->fhsize != fh2->fhsize) {
+ return 1;
+ }
+ return memcmp(fh1->fhdata, fh2->fhdata, fh1->fhsize);
+}
+
+static int
+fhconv(nfs_fhandle_t *fh, const netobj *rfh)
+{
+ size_t sz;
+
+ sz = rfh->n_len;
+ if (sz > FHANDLE_SIZE_MAX) {
+ syslog(LOG_DEBUG,
+ "received fhandle size %zd, max supported size %d",
+ sz, FHANDLE_SIZE_MAX);
+ errno = EINVAL;
+ return -1;
+ }
+ fh->fhdata = malloc(sz);
+ if (fh->fhdata == NULL) {
+ return -1;
+ }
+ fh->fhsize = sz;
+ memcpy(fh->fhdata, rfh->n_bytes, sz);
+ return 0;
+}
+
+static void
+fhfree(nfs_fhandle_t *fh)
+{
+
+ free(fh->fhdata);
+}
+
+/* struct describing a lock */
+struct file_lock {
+ LIST_ENTRY(file_lock) lcklst;
+ nfs_fhandle_t filehandle; /* NFS filehandle */
+ struct sockaddr_in *addr;
+ struct nlm4_holder client; /* lock holder */
+ netobj client_cookie; /* cookie sent by the client */
+ char client_name[128];
+ int nsm_status; /* status from the remote lock manager */
+ int status; /* lock status, see below */
+ int flags; /* lock flags, see lockd_lock.h */
+ pid_t locker; /* pid of the child process trying to get the lock */
+ int fd; /* file descriptor for this lock */
+};
+
+/* lock status */
+#define LKST_LOCKED 1 /* lock is locked */
+#define LKST_WAITING 2 /* file is already locked by another host */
+#define LKST_PROCESSING 3 /* child is trying to acquire the lock */
+#define LKST_DYING 4 /* must dies when we get news from the child */
+
+static struct file_lock *lalloc(void);
+void lfree(struct file_lock *);
+enum nlm_stats do_lock(struct file_lock *, int);
+enum nlm_stats do_unlock(struct file_lock *);
+void send_granted(struct file_lock *, int);
+void siglock(void);
+void sigunlock(void);
+
+#define LL_FH 0x01
+#define LL_NAME 0x02
+#define LL_SVID 0x04
+
+static struct file_lock *lock_lookup(struct file_lock *, int);
+
+/*
+ * lock_lookup: lookup a matching lock.
+ * called with siglock held.
+ */
+static struct file_lock *
+lock_lookup(struct file_lock *newfl, int flags)
+{
+ struct file_lock *fl;
+
+ LIST_FOREACH(fl, &lcklst_head, lcklst) {
+ if ((flags & LL_SVID) != 0 &&
+ newfl->client.svid != fl->client.svid)
+ continue;
+ if ((flags & LL_NAME) != 0 &&
+ strcmp(newfl->client_name, fl->client_name) != 0)
+ continue;
+ if ((flags & LL_FH) != 0 &&
+ fhcmp(&newfl->filehandle, &fl->filehandle) != 0)
+ continue;
+ /* found */
+ break;
+ }
+
+ return fl;
+}
+
+/*
+ * testlock(): inform the caller if the requested lock would be granted or not
+ * returns NULL if lock would granted, or pointer to the current nlm4_holder
+ * otherwise.
+ */
+
+struct nlm4_holder *
+/*ARGSUSED*/
+testlock(struct nlm4_lock *lock, int flags)
+{
+ struct file_lock *fl;
+ nfs_fhandle_t filehandle;
+
+ /* convert lock to a local filehandle */
+ if (fhconv(&filehandle, &lock->fh)) {
+ syslog(LOG_NOTICE, "fhconv failed (%m)");
+ return NULL; /* XXX */
+ }
+
+ siglock();
+ /* search through the list for lock holder */
+ LIST_FOREACH(fl, &lcklst_head, lcklst) {
+ if (fl->status != LKST_LOCKED)
+ continue;
+ if (fhcmp(&fl->filehandle, &filehandle) != 0)
+ continue;
+ /* got it ! */
+ syslog(LOG_DEBUG, "test for %s: found lock held by %s",
+ lock->caller_name, fl->client_name);
+ sigunlock();
+ fhfree(&filehandle);
+ return (&fl->client);
+ }
+ /* not found */
+ sigunlock();
+ fhfree(&filehandle);
+ syslog(LOG_DEBUG, "test for %s: no lock found", lock->caller_name);
+ return NULL;
+}
+
+/*
+ * getlock: try to acquire the lock.
+ * If file is already locked and we can sleep, put the lock in the list with
+ * status LKST_WAITING; it'll be processed later.
+ * Otherwise try to lock. If we're allowed to block, fork a child which
+ * will do the blocking lock.
+ */
+enum nlm_stats
+getlock(nlm4_lockargs * lckarg, struct svc_req *rqstp, int flags)
+{
+ struct file_lock *fl, *newfl;
+ enum nlm_stats retval;
+ struct sockaddr_in *addr;
+
+ if (grace_expired == 0 && lckarg->reclaim == 0)
+ return (flags & LOCK_V4) ?
+ nlm4_denied_grace_period : nlm_denied_grace_period;
+
+ /* allocate new file_lock for this request */
+ newfl = lalloc();
+ if (newfl == NULL) {
+ syslog(LOG_NOTICE, "malloc failed (%m)");
+ /* failed */
+ return (flags & LOCK_V4) ?
+ nlm4_denied_nolock : nlm_denied_nolocks;
+ }
+ if (fhconv(&newfl->filehandle, &lckarg->alock.fh)) {
+ syslog(LOG_NOTICE, "fhconv failed (%m)");
+ lfree(newfl);
+ /* failed */
+ return (flags & LOCK_V4) ?
+ nlm4_denied_nolock : nlm_denied_nolocks;
+ }
+ addr = svc_getcaller(rqstp->rq_xprt);
+ newfl->addr = malloc(addr->sin_len);
+ if (newfl->addr == NULL) {
+ syslog(LOG_NOTICE, "malloc failed (%m)");
+ lfree(newfl);
+ /* failed */
+ return (flags & LOCK_V4) ?
+ nlm4_denied_nolock : nlm_denied_nolocks;
+ }
+ memcpy(newfl->addr, addr, addr->sin_len);
+ newfl->client.exclusive = lckarg->exclusive;
+ newfl->client.svid = lckarg->alock.svid;
+ newfl->client.oh.n_bytes = malloc(lckarg->alock.oh.n_len);
+ if (newfl->client.oh.n_bytes == NULL) {
+ syslog(LOG_NOTICE, "malloc failed (%m)");
+ lfree(newfl);
+ return (flags & LOCK_V4) ?
+ nlm4_denied_nolock : nlm_denied_nolocks;
+ }
+ newfl->client.oh.n_len = lckarg->alock.oh.n_len;
+ memcpy(newfl->client.oh.n_bytes, lckarg->alock.oh.n_bytes,
+ lckarg->alock.oh.n_len);
+ newfl->client.l_offset = lckarg->alock.l_offset;
+ newfl->client.l_len = lckarg->alock.l_len;
+ newfl->client_cookie.n_len = lckarg->cookie.n_len;
+ newfl->client_cookie.n_bytes = malloc(lckarg->cookie.n_len);
+ if (newfl->client_cookie.n_bytes == NULL) {
+ syslog(LOG_NOTICE, "malloc failed (%m)");
+ lfree(newfl);
+ return (flags & LOCK_V4) ?
+ nlm4_denied_nolock : nlm_denied_nolocks;
+ }
+ memcpy(newfl->client_cookie.n_bytes, lckarg->cookie.n_bytes,
+ lckarg->cookie.n_len);
+ strlcpy(newfl->client_name, lckarg->alock.caller_name,
+ sizeof(newfl->client_name));
+ newfl->nsm_status = lckarg->state;
+ newfl->status = 0;
+ newfl->flags = flags;
+ siglock();
+ /* look for a lock rq from this host for this fh */
+ fl = lock_lookup(newfl, LL_FH|LL_NAME|LL_SVID);
+ if (fl) {
+ /* already locked by this host ??? */
+ sigunlock();
+ syslog(LOG_NOTICE, "duplicate lock from %s.%"
+ PRIu32,
+ newfl->client_name, newfl->client.svid);
+ lfree(newfl);
+ switch(fl->status) {
+ case LKST_LOCKED:
+ return (flags & LOCK_V4) ?
+ nlm4_granted : nlm_granted;
+ case LKST_WAITING:
+ case LKST_PROCESSING:
+ return (flags & LOCK_V4) ?
+ nlm4_blocked : nlm_blocked;
+ case LKST_DYING:
+ return (flags & LOCK_V4) ?
+ nlm4_denied : nlm_denied;
+ default:
+ syslog(LOG_NOTICE, "bad status %d",
+ fl->status);
+ return (flags & LOCK_V4) ?
+ nlm4_failed : nlm_denied;
+ }
+ /* NOTREACHED */
+ }
+ fl = lock_lookup(newfl, LL_FH);
+ if (fl) {
+ /*
+ * We already have a lock for this file.
+ * Put this one in waiting state if allowed to block
+ */
+ if (lckarg->block) {
+ syslog(LOG_DEBUG, "lock from %s.%" PRIu32 ": "
+ "already locked, waiting",
+ lckarg->alock.caller_name,
+ lckarg->alock.svid);
+ newfl->status = LKST_WAITING;
+ LIST_INSERT_HEAD(&lcklst_head, newfl, lcklst);
+ sigunlock();
+ return (flags & LOCK_V4) ?
+ nlm4_blocked : nlm_blocked;
+ } else {
+ sigunlock();
+ syslog(LOG_DEBUG, "lock from %s.%" PRIu32 ": "
+ "already locked, failed",
+ lckarg->alock.caller_name,
+ lckarg->alock.svid);
+ lfree(newfl);
+ return (flags & LOCK_V4) ?
+ nlm4_denied : nlm_denied;
+ }
+ /* NOTREACHED */
+ }
+
+ /* no entry for this file yet; add to list */
+ LIST_INSERT_HEAD(&lcklst_head, newfl, lcklst);
+ /* do the lock */
+ retval = do_lock(newfl, lckarg->block);
+ switch (retval) {
+ case nlm4_granted:
+ /* case nlm_granted: is the same as nlm4_granted */
+ case nlm4_blocked:
+ /* case nlm_blocked: is the same as nlm4_blocked */
+ break;
+ default:
+ lfree(newfl);
+ break;
+ }
+ sigunlock();
+ return retval;
+}
+
+/* unlock a filehandle */
+enum nlm_stats
+unlock(nlm4_lock *lck, int flags)
+{
+ struct file_lock *fl;
+ nfs_fhandle_t filehandle;
+ int err = (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
+
+ if (fhconv(&filehandle, &lck->fh)) {
+ syslog(LOG_NOTICE, "fhconv failed (%m)");
+ return (flags & LOCK_V4) ? nlm4_denied : nlm_denied;
+ }
+ siglock();
+ LIST_FOREACH(fl, &lcklst_head, lcklst) {
+ if (strcmp(fl->client_name, lck->caller_name) ||
+ fhcmp(&filehandle, &fl->filehandle) != 0 ||
+ fl->client.oh.n_len != lck->oh.n_len ||
+ memcmp(fl->client.oh.n_bytes, lck->oh.n_bytes,
+ fl->client.oh.n_len) != 0 ||
+ fl->client.svid != lck->svid)
+ continue;
+ /* Got it, unlock and remove from the queue */
+ syslog(LOG_DEBUG, "unlock from %s.%" PRIu32 ": found struct, "
+ "status %d", lck->caller_name, lck->svid, fl->status);
+ switch (fl->status) {
+ case LKST_LOCKED:
+ err = do_unlock(fl);
+ break;
+ case LKST_WAITING:
+ /* remove from the list */
+ LIST_REMOVE(fl, lcklst);
+ lfree(fl);
+ break;
+ case LKST_PROCESSING:
+ /*
+ * being handled by a child; will clean up
+ * when the child exits
+ */
+ fl->status = LKST_DYING;
+ break;
+ case LKST_DYING:
+ /* nothing to do */
+ break;
+ default:
+ syslog(LOG_NOTICE, "unknow status %d for %s",
+ fl->status, fl->client_name);
+ }
+ sigunlock();
+ fhfree(&filehandle);
+ return err;
+ }
+ sigunlock();
+ /* didn't find a matching entry; log anyway */
+ syslog(LOG_NOTICE, "no matching entry for %s",
+ lck->caller_name);
+ fhfree(&filehandle);
+ return (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
+}
+
+static struct file_lock *
+lalloc(void)
+{
+ return calloc(1, sizeof(struct file_lock));
+}
+
+void
+lfree(struct file_lock *fl)
+{
+ free(fl->addr);
+ free(fl->client.oh.n_bytes);
+ free(fl->client_cookie.n_bytes);
+ fhfree(&fl->filehandle);
+ free(fl);
+}
+
+void
+/*ARGSUSED*/
+sigchild_handler(int sig)
+{
+ int sstatus;
+ pid_t pid;
+ struct file_lock *fl;
+
+ for (;;) {
+ pid = wait4(-1, &sstatus, WNOHANG, NULL);
+ if (pid == -1) {
+ if (errno != ECHILD)
+ syslog(LOG_NOTICE, "wait failed (%m)");
+ else
+ syslog(LOG_DEBUG, "wait failed (%m)");
+ return;
+ }
+ if (pid == 0) {
+ /* no more child to handle yet */
+ return;
+ }
+ /*
+ * if we're here we have a child that exited
+ * Find the associated file_lock.
+ */
+ LIST_FOREACH(fl, &lcklst_head, lcklst) {
+ if (pid == fl->locker)
+ break;
+ }
+ if (fl == NULL) {
+ syslog(LOG_NOTICE, "unknown child %d", pid);
+ } else {
+ /* protect from pid reusing. */
+ fl->locker = 0;
+ if (!WIFEXITED(sstatus) || WEXITSTATUS(sstatus) != 0) {
+ syslog(LOG_NOTICE, "child %d failed", pid);
+ /*
+ * can't do much here; we can't reply
+ * anything but OK for blocked locks
+ * Eventually the client will time out
+ * and retry.
+ */
+ do_unlock(fl);
+ return;
+ }
+
+ /* check lock status */
+ syslog(LOG_DEBUG, "processing child %d, status %d",
+ pid, fl->status);
+ switch(fl->status) {
+ case LKST_PROCESSING:
+ fl->status = LKST_LOCKED;
+ send_granted(fl, (fl->flags & LOCK_V4) ?
+ nlm4_granted : nlm_granted);
+ break;
+ case LKST_DYING:
+ do_unlock(fl);
+ break;
+ default:
+ syslog(LOG_NOTICE, "bad lock status (%d) for"
+ " child %d", fl->status, pid);
+ }
+ }
+ }
+}
+
+/*
+ *
+ * try to acquire the lock described by fl. Eventually fock a child to do a
+ * blocking lock if allowed and required.
+ */
+
+enum nlm_stats
+do_lock(struct file_lock *fl, int block)
+{
+ int lflags, error;
+ struct stat st;
+
+ fl->fd = fhopen((fhandle_t *)fl->filehandle.fhdata, O_RDWR);
+ if (fl->fd < 0) {
+ switch (errno) {
+ case ESTALE:
+ error = nlm4_stale_fh;
+ break;
+ case EROFS:
+ error = nlm4_rofs;
+ break;
+ default:
+ error = nlm4_failed;
+ }
+ if ((fl->flags & LOCK_V4) == 0)
+ error = nlm_denied;
+ syslog(LOG_NOTICE, "fhopen failed (from %s) (%m)",
+ fl->client_name);
+ LIST_REMOVE(fl, lcklst);
+ return error;
+ }
+ if (fstat(fl->fd, &st) < 0) {
+ syslog(LOG_NOTICE, "fstat failed (from %s) (%m)",
+ fl->client_name);
+ }
+ syslog(LOG_DEBUG, "lock from %s.%" PRIu32 " for file%s%s: "
+ "dev %u ino %d (uid %d), flags %d",
+ fl->client_name, fl->client.svid,
+ fl->client.exclusive ? " (exclusive)":"", block ? " (block)":"",
+ st.st_dev, st.st_ino, st.st_uid, fl->flags);
+ lflags = LOCK_NB;
+ if (fl->client.exclusive == 0)
+ lflags |= LOCK_SH;
+ else
+ lflags |= LOCK_EX;
+ error = flock(fl->fd, lflags);
+ if (error != 0 && errno == EAGAIN && block) {
+ switch (fl->locker = fork()) {
+ case -1: /* fork failed */
+ syslog(LOG_NOTICE, "fork failed (%m)");
+ LIST_REMOVE(fl, lcklst);
+ close(fl->fd);
+ return (fl->flags & LOCK_V4) ?
+ nlm4_denied_nolock : nlm_denied_nolocks;
+ case 0:
+ /*
+ * Attempt a blocking lock. Will have to call
+ * NLM_GRANTED later.
+ */
+ setproctitle("%s.%" PRIu32,
+ fl->client_name, fl->client.svid);
+ lflags &= ~LOCK_NB;
+ if(flock(fl->fd, lflags) != 0) {
+ syslog(LOG_NOTICE, "flock failed (%m)");
+ _exit(1);
+ }
+ /* lock granted */
+ _exit(0);
+ /*NOTREACHED*/
+ default:
+ syslog(LOG_DEBUG, "lock request from %s.%" PRIu32 ": "
+ "forked %d",
+ fl->client_name, fl->client.svid, fl->locker);
+ fl->status = LKST_PROCESSING;
+ return (fl->flags & LOCK_V4) ?
+ nlm4_blocked : nlm_blocked;
+ }
+ }
+ /* non block case */
+ if (error != 0) {
+ switch (errno) {
+ case EAGAIN:
+ error = nlm4_denied;
+ break;
+ case ESTALE:
+ error = nlm4_stale_fh;
+ break;
+ case EROFS:
+ error = nlm4_rofs;
+ break;
+ default:
+ error = nlm4_failed;
+ }
+ if ((fl->flags & LOCK_V4) == 0)
+ error = nlm_denied;
+ if (errno != EAGAIN)
+ syslog(LOG_NOTICE, "flock for %s failed (%m)",
+ fl->client_name);
+ else syslog(LOG_DEBUG, "flock for %s failed (%m)",
+ fl->client_name);
+ LIST_REMOVE(fl, lcklst);
+ close(fl->fd);
+ return error;
+ }
+ fl->status = LKST_LOCKED;
+ return (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
+}
+
+void
+/*ARGSUSED*/
+send_granted(struct file_lock *fl, int opcode)
+{
+ CLIENT *cli;
+ static char dummy;
+ struct timeval timeo;
+ int success;
+ static struct nlm_res retval;
+ static struct nlm4_res retval4;
+
+ cli = get_client(fl->addr,
+ (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
+ if (cli == NULL) {
+ syslog(LOG_NOTICE, "failed to get CLIENT for %s.%" PRIu32,
+ fl->client_name, fl->client.svid);
+ /*
+ * We fail to notify remote that the lock has been granted.
+ * The client will timeout and retry, the lock will be
+ * granted at this time.
+ */
+ return;
+ }
+ timeo.tv_sec = 0;
+ timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
+
+ if (fl->flags & LOCK_V4) {
+ static nlm4_testargs result;
+ result.cookie = fl->client_cookie;
+ result.exclusive = fl->client.exclusive;
+ result.alock.caller_name = fl->client_name;
+ result.alock.fh.n_len = fl->filehandle.fhsize;
+ result.alock.fh.n_bytes = fl->filehandle.fhdata;
+ result.alock.oh = fl->client.oh;
+ result.alock.svid = fl->client.svid;
+ result.alock.l_offset = fl->client.l_offset;
+ result.alock.l_len = fl->client.l_len;
+ syslog(LOG_DEBUG, "sending v4 reply%s",
+ (fl->flags & LOCK_ASYNC) ? " (async)":"");
+ if (fl->flags & LOCK_ASYNC) {
+ success = clnt_call(cli, NLM4_GRANTED_MSG,
+ xdr_nlm4_testargs, &result, xdr_void, &dummy, timeo);
+ } else {
+ success = clnt_call(cli, NLM4_GRANTED,
+ xdr_nlm4_testargs, &result, xdr_nlm4_res,
+ &retval4, timeo);
+ }
+ } else {
+ static nlm_testargs result;
+
+ result.cookie = fl->client_cookie;
+ result.exclusive = fl->client.exclusive;
+ result.alock.caller_name = fl->client_name;
+ result.alock.fh.n_len = fl->filehandle.fhsize;
+ result.alock.fh.n_bytes = fl->filehandle.fhdata;
+ result.alock.oh = fl->client.oh;
+ result.alock.svid = fl->client.svid;
+ result.alock.l_offset =
+ (unsigned int)fl->client.l_offset;
+ result.alock.l_len =
+ (unsigned int)fl->client.l_len;
+ syslog(LOG_DEBUG, "sending v1 reply%s",
+ (fl->flags & LOCK_ASYNC) ? " (async)":"");
+ if (fl->flags & LOCK_ASYNC) {
+ success = clnt_call(cli, NLM_GRANTED_MSG,
+ xdr_nlm_testargs, &result, xdr_void, &dummy, timeo);
+ } else {
+ success = clnt_call(cli, NLM_GRANTED,
+ xdr_nlm_testargs, &result, xdr_nlm_res,
+ &retval, timeo);
+ }
+ }
+ if (debug_level > 2)
+ syslog(LOG_DEBUG, "clnt_call returns %d(%s) for granted",
+ success, clnt_sperrno(success));
+
+}
+
+enum nlm_stats
+do_unlock(struct file_lock *rfl)
+{
+ struct file_lock *fl;
+ int error;
+ int lockst;
+
+ /* unlock the file: closing is enough ! */
+ if (close(rfl->fd) == -1) {
+ if (errno == ESTALE)
+ error = nlm4_stale_fh;
+ else
+ error = nlm4_failed;
+ if ((rfl->flags & LOCK_V4) == 0)
+ error = nlm_denied;
+ syslog(LOG_NOTICE, "close failed (from %s) (%m)",
+ rfl->client_name);
+ } else {
+ error = (rfl->flags & LOCK_V4) ?
+ nlm4_granted : nlm_granted;
+ }
+ LIST_REMOVE(rfl, lcklst);
+
+ /* process the next LKST_WAITING lock request for this fh */
+ LIST_FOREACH(fl, &lcklst_head, lcklst) {
+ if (fl->status != LKST_WAITING ||
+ fhcmp(&rfl->filehandle, &fl->filehandle) != 0)
+ continue;
+
+ lockst = do_lock(fl, 1); /* If it's LKST_WAITING we can block */
+ switch (lockst) {
+ case nlm4_granted:
+ /* case nlm_granted: same as nlm4_granted */
+ send_granted(fl, (fl->flags & LOCK_V4) ?
+ nlm4_granted : nlm_granted);
+ break;
+ case nlm4_blocked:
+ /* case nlm_blocked: same as nlm4_blocked */
+ break;
+ default:
+ lfree(fl);
+ break;
+ }
+ break;
+ }
+ lfree(rfl);
+ return error;
+}
+
+void
+siglock(void)
+{
+ sigset_t block;
+
+ sigemptyset(&block);
+ sigaddset(&block, SIGCHLD);
+
+ if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
+ syslog(LOG_WARNING, "siglock failed (%m)");
+ }
+}
+
+void
+sigunlock(void)
+{
+ sigset_t block;
+
+ sigemptyset(&block);
+ sigaddset(&block, SIGCHLD);
+
+ if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
+ syslog(LOG_WARNING, "sigunlock failed (%m)");
+ }
+}
+
+void
+notify(const char *hostname, int state)
+{
+ struct file_lock *fl, *next_fl;
+ int err;
+ syslog(LOG_DEBUG, "notify from %s, new state %d", hostname, state);
+ /* search all lock for this host; if status changed, release the lock */
+ siglock();
+ for (fl = LIST_FIRST(&lcklst_head); fl != NULL; fl = next_fl) {
+ next_fl = LIST_NEXT(fl, lcklst);
+ if (strcmp(hostname, fl->client_name) == 0 &&
+ fl->nsm_status != state) {
+ syslog(LOG_DEBUG, "state %d, nsm_state %d, unlocking",
+ fl->status, fl->nsm_status);
+ switch(fl->status) {
+ case LKST_LOCKED:
+ err = do_unlock(fl);
+ if (err != nlm_granted)
+ syslog(LOG_DEBUG,
+ "notify: unlock failed for %s (%d)",
+ hostname, err);
+ break;
+ case LKST_WAITING:
+ LIST_REMOVE(fl, lcklst);
+ lfree(fl);
+ break;
+ case LKST_PROCESSING:
+ fl->status = LKST_DYING;
+ break;
+ case LKST_DYING:
+ break;
+ default:
+ syslog(LOG_NOTICE, "unknow status %d for %s",
+ fl->status, fl->client_name);
+ }
+ }
+ }
+ sigunlock();
+}
diff --git a/usr.sbin/rpc.lockd/lockd_lock.h b/usr.sbin/rpc.lockd/lockd_lock.h
new file mode 100644
index 00000000000..dee5ae78ede
--- /dev/null
+++ b/usr.sbin/rpc.lockd/lockd_lock.h
@@ -0,0 +1,20 @@
+/* $NetBSD: lockd_lock.h,v 1.2 2000/06/09 14:00:54 fvdl Exp $ */
+
+/* Headers and function declarations for file-locking utilities */
+
+struct nlm4_holder *testlock(struct nlm4_lock *, int);
+
+enum nlm_stats getlock(nlm4_lockargs *, struct svc_req *, int);
+enum nlm_stats unlock(nlm4_lock *, int);
+void notify(const char *, int);
+
+/* flags for testlock, getlock & unlock */
+#define LOCK_ASYNC 0x01 /* async version (getlock only) */
+#define LOCK_V4 0x02 /* v4 version */
+#define LOCK_MON 0x04 /* monitored lock (getlock only) */
+#define LOCK_CANCEL 0x08 /* cancel, not unlock request (unlock only) */
+
+/* callbacks from lock_proc.c */
+void transmit_result(int, nlm_res *, struct sockaddr_in *);
+void transmit4_result(int, nlm4_res *, struct sockaddr_in *);
+CLIENT *get_client(struct sockaddr_in *, u_long);
diff --git a/usr.sbin/rpc.lockd/procs.c b/usr.sbin/rpc.lockd/procs.c
index 2c9f7a9470f..018b705c71e 100644
--- a/usr.sbin/rpc.lockd/procs.c
+++ b/usr.sbin/rpc.lockd/procs.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: procs.c,v 1.11 2003/07/06 21:26:14 deraadt Exp $ */
+/* $OpenBSD: procs.c,v 1.12 2008/06/13 21:32:26 sturm Exp $ */
/*
* Copyright (c) 1995
@@ -48,10 +48,18 @@
#include <netdb.h>
#include "lockd.h"
+#include "lockd_lock.h"
#define CLIENT_CACHE_SIZE 64 /* No. of client sockets cached */
#define CLIENT_CACHE_LIFETIME 120 /* In seconds */
+/* log_from_addr ----------------------------------------------------------- */
+/*
+ * Purpose: Log name of function called and source address
+ * Returns: Nothing
+ * Notes: Extracts the source address from the transport handle
+ * passed in as part of the called procedure specification
+ */
static void
log_from_addr(char *fun_name, struct svc_req *req)
{
@@ -70,13 +78,42 @@ log_from_addr(char *fun_name, struct svc_req *req)
}
+/* get_client -------------------------------------------------------------- */
+/*
+ * Purpose: Get a CLIENT* for making RPC calls to lockd on given host
+ * Returns: CLIENT* pointer, from clnt_udp_create, or NULL if error
+ * Notes: Creating a CLIENT* is quite expensive, involving a
+ * conversation with the remote portmapper to get the
+ * port number. Since a given client is quite likely
+ * to make several locking requests in succession, it is
+ * desirable to cache the created CLIENT*.
+ *
+ * Since we are using UDP rather than TCP, there is no cost
+ * to the remote system in keeping these cached indefinitely.
+ * Unfortunately there is a snag: if the remote system
+ * reboots, the cached portmapper results will be invalid,
+ * and we will never detect this since all of the xxx_msg()
+ * calls return no result - we just fire off a udp packet
+ * and hope for the best.
+ *
+ * We solve this by discarding cached values after two
+ * minutes, regardless of whether they have been used
+ * in the meanwhile (since a bad one might have been used
+ * plenty of times, as the host keeps retrying the request
+ * and we keep sending the reply back to the wrong port).
+ *
+ * Given that the entries will always expire in the order
+ * that they were created, there is no point in a LRU
+ * algorithm for when the cache gets full - entries are
+ * always re-used in sequence.
+ */
static CLIENT *clnt_cache_ptr[CLIENT_CACHE_SIZE];
-static long clnt_cache_time[CLIENT_CACHE_SIZE]; /* time entry created */
+static long clnt_cache_time[CLIENT_CACHE_SIZE]; /* time entry created */
static struct in_addr clnt_cache_addr[CLIENT_CACHE_SIZE];
static int clnt_cache_next_to_use = 0;
-static CLIENT *
-get_client(struct sockaddr_in *host_addr)
+CLIENT *
+get_client(struct sockaddr_in *host_addr, u_long vers)
{
CLIENT *client;
int sock_no, i;
@@ -84,12 +121,14 @@ get_client(struct sockaddr_in *host_addr)
gettimeofday(&time_now, NULL);
- /* Search for the given client in the cache, zapping any expired */
- /* entries that we happen to notice in passing. */
+ /*
+ * Search for the given client in the cache, zapping any expired
+ * entries that we happen to notice in passing.
+ */
for (i = 0; i < CLIENT_CACHE_SIZE; i++) {
client = clnt_cache_ptr[i];
- if (client &&
- ((clnt_cache_time[i] + CLIENT_CACHE_LIFETIME) < time_now.tv_sec)) {
+ if (client && ((clnt_cache_time[i] + CLIENT_CACHE_LIFETIME)
+ < time_now.tv_sec)) {
/* Cache entry has expired. */
if (debug_level > 3)
syslog(LOG_DEBUG, "Expired CLIENT* in cache");
@@ -103,11 +142,11 @@ get_client(struct sockaddr_in *host_addr)
/* Found it! */
if (debug_level > 3)
syslog(LOG_DEBUG, "Found CLIENT* in cache");
- return (client);
+ return client;
}
}
- /* Not found in cache. Free the next entry if it is in use */
+ /* Not found in cache. Free the next entry if it is in use. */
if (clnt_cache_ptr[clnt_cache_next_to_use]) {
clnt_destroy(clnt_cache_ptr[clnt_cache_next_to_use]);
clnt_cache_ptr[clnt_cache_next_to_use] = NULL;
@@ -117,276 +156,981 @@ get_client(struct sockaddr_in *host_addr)
retry_time.tv_sec = 5;
retry_time.tv_usec = 0;
host_addr->sin_port = 0;
- client = clntudp_create(host_addr, NLM_PROG, NLM_VERS, retry_time, &sock_no);
+ client = clntudp_create(host_addr, NLM_PROG, vers, retry_time, &sock_no);
if (!client) {
syslog(LOG_ERR, "%s", clnt_spcreateerror("clntudp_create"));
syslog(LOG_ERR, "Unable to return result to %s",
inet_ntoa(host_addr->sin_addr));
- return (NULL);
+ return NULL;
}
+
+ /* Success - update the cache entry */
clnt_cache_ptr[clnt_cache_next_to_use] = client;
clnt_cache_addr[clnt_cache_next_to_use] = host_addr->sin_addr;
clnt_cache_time[clnt_cache_next_to_use] = time_now.tv_sec;
- if (++clnt_cache_next_to_use > CLIENT_CACHE_SIZE)
+ if (++clnt_cache_next_to_use >= CLIENT_CACHE_SIZE)
clnt_cache_next_to_use = 0;
+ /*
+ * Disable the default timeout, so we can specify our own in calls
+ * to clnt_call(). (Note that the timeout is a different concept
+ * from the retry period set in clnt_udp_create() above.)
+ */
retry_time.tv_sec = -1;
retry_time.tv_usec = -1;
- clnt_control(client, CLSET_TIMEOUT, &retry_time);
+ clnt_control(client, CLSET_TIMEOUT, (char *)(void *)&retry_time);
if (debug_level > 3)
syslog(LOG_DEBUG, "Created CLIENT* for %s",
inet_ntoa(host_addr->sin_addr));
- return (client);
+ return client;
}
-static void
-transmit_result(int opcode, nlm_res *result, struct svc_req *req)
+/* transmit_result --------------------------------------------------------- */
+/*
+ * Purpose: Transmit result for nlm_xxx_msg pseudo-RPCs
+ * Returns: Nothing - we have no idea if the datagram got there
+ * Notes: clnt_call() will always fail (with timeout) as we are
+ * calling it with timeout 0 as a hack to just issue a datagram
+ * without expecting a result
+ */
+void
+transmit_result(int opcode, nlm_res *result, struct sockaddr_in *addr)
{
static char dummy;
- struct sockaddr_in *addr;
CLIENT *cli;
- int success;
struct timeval timeo;
+ int success;
- addr = svc_getcaller(req->rq_xprt);
- if ((cli = get_client(addr))) {
- timeo.tv_sec = 0;
+ if ((cli = get_client(addr, NLM_VERS)) != NULL) {
+ timeo.tv_sec = 0; /* No timeout - not expecting response */
timeo.tv_usec = 0;
- success = clnt_call(cli, opcode, xdr_nlm_res, result, xdr_void,
- &dummy, timeo);
+ success = clnt_call(cli, opcode, xdr_nlm_res,
+ result, xdr_void, &dummy, timeo);
+
if (debug_level > 2)
- syslog(LOG_DEBUG, "clnt_call returns %d", success);
+ syslog(LOG_DEBUG, "clnt_call returns %d(%s)",
+ success, clnt_sperrno(success));
}
}
+/* transmit4_result --------------------------------------------------------- */
+/*
+ * Purpose: Transmit result for nlm4_xxx_msg pseudo-RPCs
+ * Returns: Nothing - we have no idea if the datagram got there
+ * Notes: clnt_call() will always fail (with timeout) as we are
+ * calling it with timeout 0 as a hack to just issue a datagram
+ * without expecting a result
+ */
+void
+transmit4_result(int opcode, nlm4_res *result, struct sockaddr_in *addr)
+{
+ static char dummy;
+ CLIENT *cli;
+ struct timeval timeo;
+ int success;
+
+ if ((cli = get_client(addr, NLM_VERS4)) != NULL) {
+ timeo.tv_sec = 0; /* No timeout - not expecting response */
+ timeo.tv_usec = 0;
+
+ success = clnt_call(cli, opcode, xdr_nlm4_res,
+ result, xdr_void, &dummy, timeo);
+
+ if (debug_level > 2)
+ syslog(LOG_DEBUG, "clnt_call returns %d(%s)",
+ success, clnt_sperrno(success));
+ }
+}
+
+/*
+ * converts a struct nlm_lock to struct nlm4_lock
+ */
+static void
+nlmtonlm4(struct nlm_lock *arg, struct nlm4_lock *arg4)
+{
+ memcpy(arg4, arg, sizeof(nlm_lock));
+ arg4->l_offset = arg->l_offset;
+ arg4->l_len = arg->l_len;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * Functions for Unix<->Unix locking (ie. monitored locking, with rpc.statd
+ * involved to ensure reclaim of locks after a crash of the "stateless"
+ * server.
+ *
+ * These all come in two flavours - nlm_xxx() and nlm_xxx_msg().
+ * The first are standard RPCs with argument and result.
+ * The nlm_xxx_msg() calls implement exactly the same functions, but
+ * use two pseudo-RPCs (one in each direction). These calls are NOT
+ * standard use of the RPC protocol in that they do not return a result
+ * at all (NB. this is quite different from returning a void result).
+ * The effect of this is to make the nlm_xxx_msg() calls simple unacknowledged
+ * datagrams, requiring higher-level code to perform retries.
+ *
+ * Despite the disadvantages of the nlm_xxx_msg() approach (some of which
+ * are documented in the comments to get_client() above), this is the
+ * interface used by all current commercial NFS implementations
+ * [Solaris, SCO, AIX etc.]. This is presumed to be because these allow
+ * implementations to continue using the standard RPC libraries, while
+ * avoiding the block-until-result nature of the library interface.
+ *
+ * No client implementations have been identified so far that make use
+ * of the true RPC version (early SunOS releases would be a likely candidate
+ * for testing).
+ */
+/* nlm_test ---------------------------------------------------------------- */
+/*
+ * Purpose: Test whether a specified lock would be granted if requested
+ * Returns: nlm_granted (or error code)
+ * Notes:
+ */
nlm_testres *
nlm_test_1_svc(nlm_testargs *arg, struct svc_req *rqstp)
{
- static nlm_testres res;
+ static nlm_testres result;
+ struct nlm4_lock arg4;
+ struct nlm4_holder *holder;
+ nlmtonlm4(&arg->alock, &arg4);
if (debug_level)
log_from_addr("nlm_test", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
- return (&res);
+
+ holder = testlock(&arg4, 0);
+ /*
+ * Copy the cookie from the argument into the result. Note that this
+ * is slightly hazardous, as the structure contains a pointer to a
+ * malloc()ed buffer that will get freed by the caller. However, the
+ * main function transmits the result before freeing the argument
+ * so it is in fact safe.
+ */
+ result.cookie = arg->cookie;
+ if (holder == NULL) {
+ result.stat.stat = nlm_granted;
+ } else {
+ result.stat.stat = nlm_denied;
+ memcpy(&result.stat.nlm_testrply_u.holder, holder,
+ sizeof(struct nlm_holder));
+ result.stat.nlm_testrply_u.holder.l_offset =
+ (unsigned int)holder->l_offset;
+ result.stat.nlm_testrply_u.holder.l_len =
+ (unsigned int)holder->l_len;
+ }
+ return &result;
}
void *
nlm_test_msg_1_svc(nlm_testargs *arg, struct svc_req *rqstp)
{
- nlm_testres res;
+ nlm_testres result;
static char dummy;
struct sockaddr_in *addr;
CLIENT *cli;
- int success;
+ int success;
struct timeval timeo;
+ struct nlm4_lock arg4;
+ struct nlm4_holder *holder;
+
+ nlmtonlm4(&arg->alock, &arg4);
if (debug_level)
log_from_addr("nlm_test_msg", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
+ holder = testlock(&arg4, 0);
+ result.cookie = arg->cookie;
+ if (holder == NULL) {
+ result.stat.stat = nlm_granted;
+ } else {
+ result.stat.stat = nlm_denied;
+ memcpy(&result.stat.nlm_testrply_u.holder, holder,
+ sizeof(struct nlm_holder));
+ result.stat.nlm_testrply_u.holder.l_offset =
+ (unsigned int)holder->l_offset;
+ result.stat.nlm_testrply_u.holder.l_len =
+ (unsigned int)holder->l_len;
+ }
+
+ /*
+ * nlm_test has different result type to the other operations, so
+ * can't use transmit_result() in this case
+ */
addr = svc_getcaller(rqstp->rq_xprt);
- if ((cli = get_client(addr))) {
- timeo.tv_sec = 0;
+ if ((cli = get_client(addr, NLM_VERS)) != NULL) {
+ timeo.tv_sec = 0; /* No timeout - not expecting response */
timeo.tv_usec = 0;
- success = clnt_call(cli, NLM_TEST_RES, xdr_nlm_testres, &res, xdr_void,
- &dummy, timeo);
+
+ success = clnt_call(cli, NLM_TEST_RES, xdr_nlm_testres,
+ &result, xdr_void, &dummy, timeo);
+
if (debug_level > 2)
syslog(LOG_DEBUG, "clnt_call returns %d", success);
}
- return (NULL);
+ return NULL;
}
+/* nlm_lock ---------------------------------------------------------------- */
+/*
+ * Purposes: Establish a lock
+ * Returns: granted, denied or blocked
+ * Notes: *** grace period support missing
+ */
nlm_res *
nlm_lock_1_svc(nlm_lockargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
+ struct nlm4_lockargs arg4;
+ nlmtonlm4(&arg->alock, &arg4.alock);
+ arg4.cookie = arg->cookie;
+ arg4.block = arg->block;
+ arg4.exclusive = arg->exclusive;
+ arg4.reclaim = arg->reclaim;
+ arg4.state = arg->state;
if (debug_level)
log_from_addr("nlm_lock", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
- return (&res);
+
+ /* copy cookie from arg to result. See comment in nlm_test_1() */
+ result.cookie = arg->cookie;
+
+ result.stat.stat = getlock(&arg4, rqstp, LOCK_MON);
+ return &result;
}
void *
nlm_lock_msg_1_svc(nlm_lockargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
+ struct nlm4_lockargs arg4;
+
+ nlmtonlm4(&arg->alock, &arg4.alock);
+ arg4.cookie = arg->cookie;
+ arg4.block = arg->block;
+ arg4.exclusive = arg->exclusive;
+ arg4.reclaim = arg->reclaim;
+ arg4.state = arg->state;
if (debug_level)
log_from_addr("nlm_lock_msg", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
- transmit_result(NLM_LOCK_RES, &res, rqstp);
- return (NULL);
+
+ result.cookie = arg->cookie;
+ result.stat.stat = getlock(&arg4, rqstp, LOCK_ASYNC | LOCK_MON);
+ transmit_result(NLM_LOCK_RES, &result, svc_getcaller(rqstp->rq_xprt));
+
+ return NULL;
}
+/* nlm_cancel -------------------------------------------------------------- */
+/*
+ * Purpose: Cancel a blocked lock request
+ * Returns: granted or denied
+ * Notes:
+ */
nlm_res *
nlm_cancel_1_svc(nlm_cancargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
+ struct nlm4_lock arg4;
+
+ nlmtonlm4(&arg->alock, &arg4);
if (debug_level)
log_from_addr("nlm_cancel", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_denied;
- return (&res);
+
+ /* copy cookie from arg to result. See comment in nlm_test_1() */
+ result.cookie = arg->cookie;
+
+ /*
+ * Since at present we never return 'nlm_blocked', there can never be
+ * a lock to cancel, so this call always fails.
+ */
+ result.stat.stat = unlock(&arg4, LOCK_CANCEL);
+ return &result;
}
void *
nlm_cancel_msg_1_svc(nlm_cancargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
+ struct nlm4_lock arg4;
+
+ nlmtonlm4(&arg->alock, &arg4);
if (debug_level)
log_from_addr("nlm_cancel_msg", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_denied;
- transmit_result(NLM_CANCEL_RES, &res, rqstp);
- return (NULL);
+
+ result.cookie = arg->cookie;
+ /*
+ * Since at present we never return 'nlm_blocked', there can never be
+ * a lock to cancel, so this call always fails.
+ */
+ result.stat.stat = unlock(&arg4, LOCK_CANCEL);
+ transmit_result(NLM_CANCEL_RES, &result, svc_getcaller(rqstp->rq_xprt));
+ return NULL;
}
+/* nlm_unlock -------------------------------------------------------------- */
+/*
+ * Purpose: Release an existing lock
+ * Returns: Always granted, unless during grace period
+ * Notes: "no such lock" error condition is ignored, as the
+ * protocol uses unreliable UDP datagrams, and may well
+ * re-try an unlock that has already succeeded.
+ */
nlm_res *
nlm_unlock_1_svc(nlm_unlockargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
+ struct nlm4_lock arg4;
+
+ nlmtonlm4(&arg->alock, &arg4);
if (debug_level)
log_from_addr("nlm_unlock", rqstp);
- res.stat.stat = nlm_granted;
- res.cookie = arg->cookie;
- return (&res);
+
+ result.stat.stat = unlock(&arg4, 0);
+ result.cookie = arg->cookie;
+
+ return &result;
}
void *
nlm_unlock_msg_1_svc(nlm_unlockargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
+ struct nlm4_lock arg4;
+
+ nlmtonlm4(&arg->alock, &arg4);
if (debug_level)
log_from_addr("nlm_unlock_msg", rqstp);
- res.stat.stat = nlm_granted;
- res.cookie = arg->cookie;
- transmit_result(NLM_UNLOCK_RES, &res, rqstp);
- return (NULL);
+
+ result.stat.stat = unlock(&arg4, 0);
+ result.cookie = arg->cookie;
+
+ transmit_result(NLM_UNLOCK_RES, &result, svc_getcaller(rqstp->rq_xprt));
+ return NULL;
}
+/* ------------------------------------------------------------------------- */
+/*
+ * Client-side pseudo-RPCs for results. Note that for the client there
+ * are only nlm_xxx_msg() versions of each call, since the 'real RPC'
+ * version returns the results in the RPC result, and so the client
+ * does not normally receive incoming RPCs.
+ *
+ * The exception to this is nlm_granted(), which is genuinely an RPC
+ * call from the server to the client - a 'call-back' in normal procedure
+ * call terms.
+ */
+
+/* nlm_granted ------------------------------------------------------------- */
+/*
+ * Purpose: Receive notification that formerly blocked lock now granted
+ * Returns: always success ('granted')
+ * Notes:
+ */
nlm_res *
nlm_granted_1_svc(nlm_testargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
if (debug_level)
log_from_addr("nlm_granted", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
- return (&res);
+
+ /* copy cookie from arg to result. See comment in nlm_test_1() */
+ result.cookie = arg->cookie;
+
+ result.stat.stat = nlm_granted;
+ return &result;
}
void *
nlm_granted_msg_1_svc(nlm_testargs *arg, struct svc_req *rqstp)
{
- nlm_res res;
+ static nlm_res result;
if (debug_level)
log_from_addr("nlm_granted_msg", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
- transmit_result(NLM_GRANTED_RES, &res, rqstp);
- return (NULL);
+
+ result.cookie = arg->cookie;
+ result.stat.stat = nlm_granted;
+ transmit_result(NLM_GRANTED_RES, &result, svc_getcaller(rqstp->rq_xprt));
+ return NULL;
}
+/* nlm_test_res ------------------------------------------------------------ */
+/*
+ * Purpose: Accept result from earlier nlm_test_msg() call
+ * Returns: Nothing
+ */
void *
+/*ARGSUSED*/
nlm_test_res_1_svc(nlm_testres *arg, struct svc_req *rqstp)
{
if (debug_level)
log_from_addr("nlm_test_res", rqstp);
- return (NULL);
+ return NULL;
}
+/* nlm_lock_res ------------------------------------------------------------ */
+/*
+ * Purpose: Accept result from earlier nlm_lock_msg() call
+ * Returns: Nothing
+ */
void *
+/*ARGSUSED*/
nlm_lock_res_1_svc(nlm_res *arg, struct svc_req *rqstp)
{
if (debug_level)
log_from_addr("nlm_lock_res", rqstp);
- return (NULL);
+ return NULL;
}
+/* nlm_cancel_res ---------------------------------------------------------- */
+/*
+ * Purpose: Accept result from earlier nlm_cancel_msg() call
+ * Returns: Nothing
+ */
void *
+/*ARGSUSED*/
nlm_cancel_res_1_svc(nlm_res *arg, struct svc_req *rqstp)
{
if (debug_level)
log_from_addr("nlm_cancel_res", rqstp);
- return (NULL);
+ return NULL;
}
+/* nlm_unlock_res ---------------------------------------------------------- */
+/*
+ * Purpose: Accept result from earlier nlm_unlock_msg() call
+ * Returns: Nothing
+ */
void *
+/*ARGSUSED*/
nlm_unlock_res_1_svc(nlm_res *arg, struct svc_req *rqstp)
{
if (debug_level)
log_from_addr("nlm_unlock_res", rqstp);
- return (NULL);
+ return NULL;
}
+/* nlm_granted_res --------------------------------------------------------- */
+/*
+ * Purpose: Accept result from earlier nlm_granted_msg() call
+ * Returns: Nothing
+ */
void *
+/*ARGSUSED*/
nlm_granted_res_1_svc(nlm_res *arg, struct svc_req *rqstp)
{
if (debug_level)
log_from_addr("nlm_granted_res", rqstp);
- return (NULL);
+ return NULL;
}
+/* ------------------------------------------------------------------------- */
+/*
+ * Calls for PCNFS locking (aka non-monitored locking, no involvement
+ * of rpc.statd).
+ *
+ * These are all genuine RPCs - no nlm_xxx_msg() nonsense here.
+ */
+
+/* nlm_share --------------------------------------------------------------- */
+/*
+ * Purpose: Establish a DOS-style lock
+ * Returns: success or failure
+ * Notes: Blocking locks are not supported - client is expected
+ * to retry if required.
+ */
nlm_shareres *
nlm_share_3_svc(nlm_shareargs *arg, struct svc_req *rqstp)
{
- static nlm_shareres res;
+ static nlm_shareres result;
if (debug_level)
log_from_addr("nlm_share", rqstp);
- res.cookie = arg->cookie;
- res.stat = nlm_granted;
- res.sequence = 1234356; /* X/Open says this field is ignored? */
- return (&res);
+
+ result.cookie = arg->cookie;
+ result.stat = nlm_granted;
+ result.sequence = 1234356; /* X/Open says this field is ignored? */
+ return &result;
}
+/* nlm_unshare ------------------------------------------------------------ */
+/*
+ * Purpose: Release a DOS-style lock
+ * Returns: nlm_granted, unless in grace period
+ * Notes:
+ */
nlm_shareres *
nlm_unshare_3_svc(nlm_shareargs *arg, struct svc_req *rqstp)
{
- static nlm_shareres res;
+ static nlm_shareres result;
if (debug_level)
log_from_addr("nlm_unshare", rqstp);
- res.cookie = arg->cookie;
- res.stat = nlm_granted;
- res.sequence = 1234356; /* X/Open says this field is ignored? */
- return (&res);
+
+ result.cookie = arg->cookie;
+ result.stat = nlm_granted;
+ result.sequence = 1234356; /* X/Open says this field is ignored? */
+ return &result;
}
+/* nlm_nm_lock ------------------------------------------------------------ */
+/*
+ * Purpose: non-monitored version of nlm_lock()
+ * Returns: as for nlm_lock()
+ * Notes: These locks are in the same style as the standard nlm_lock,
+ * but the rpc.statd should not be called to establish a
+ * monitor for the client machine, since that machine is
+ * declared not to be running a rpc.statd, and so would not
+ * respond to the statd protocol.
+ */
nlm_res *
nlm_nm_lock_3_svc(nlm_lockargs *arg, struct svc_req *rqstp)
{
- static nlm_res res;
+ static nlm_res result;
if (debug_level)
log_from_addr("nlm_nm_lock", rqstp);
- res.cookie = arg->cookie;
- res.stat.stat = nlm_granted;
- return (&res);
+
+ /* copy cookie from arg to result. See comment in nlm_test_1() */
+ result.cookie = arg->cookie;
+ result.stat.stat = nlm_granted;
+ return &result;
}
+/* nlm_free_all ------------------------------------------------------------ */
+/*
+ * Purpose: Release all locks held by a named client
+ * Returns: Nothing
+ * Notes: Potential denial of service security problem here - the
+ * locks to be released are specified by a host name, independent
+ * of the address from which the request has arrived.
+ * Should probably be rejected if the named host has been
+ * using monitored locks.
+ */
void *
+/*ARGSUSED*/
nlm_free_all_3_svc(nlm_notify *arg, struct svc_req *rqstp)
{
static char dummy;
if (debug_level)
log_from_addr("nlm_free_all", rqstp);
- return (&dummy);
+ return &dummy;
+}
+
+/* calls for nlm version 4 (NFSv3) */
+/* nlm_test ---------------------------------------------------------------- */
+/*
+ * Purpose: Test whether a specified lock would be granted if requested
+ * Returns: nlm_granted (or error code)
+ * Notes:
+ */
+nlm4_testres *
+nlm4_test_4_svc(nlm4_testargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_testres result;
+ struct nlm4_holder *holder;
+
+ if (debug_level)
+ log_from_addr("nlm4_test", rqstp);
+
+ holder = testlock(&arg->alock, LOCK_V4);
+
+ /*
+ * Copy the cookie from the argument into the result. Note that this
+ * is slightly hazardous, as the structure contains a pointer to a
+ * malloc()ed buffer that will get freed by the caller. However, the
+ * main function transmits the result before freeing the argument
+ * so it is in fact safe.
+ */
+ result.cookie = arg->cookie;
+ if (holder == NULL) {
+ result.stat.stat = nlm4_granted;
+ } else {
+ result.stat.stat = nlm4_denied;
+ memcpy(&result.stat.nlm4_testrply_u.holder, holder,
+ sizeof(struct nlm4_holder));
+ }
+ return &result;
+}
+
+void *
+nlm4_test_msg_4_svc(nlm4_testargs *arg, struct svc_req *rqstp)
+{
+ nlm4_testres result;
+ static char dummy;
+ struct sockaddr_in *addr;
+ CLIENT *cli;
+ int success;
+ struct timeval timeo;
+ struct nlm4_holder *holder;
+
+ if (debug_level)
+ log_from_addr("nlm4_test_msg", rqstp);
+
+ holder = testlock(&arg->alock, LOCK_V4);
+
+ result.cookie = arg->cookie;
+ if (holder == NULL) {
+ result.stat.stat = nlm4_granted;
+ } else {
+ result.stat.stat = nlm4_denied;
+ memcpy(&result.stat.nlm4_testrply_u.holder, holder,
+ sizeof(struct nlm4_holder));
+ }
+
+ /*
+ * nlm_test has different result type to the other operations, so
+ * can't use transmit4_result() in this case
+ */
+ addr = svc_getcaller(rqstp->rq_xprt);
+ if ((cli = get_client(addr, NLM_VERS4)) != NULL) {
+ timeo.tv_sec = 0; /* No timeout - not expecting response */
+ timeo.tv_usec = 0;
+
+ success = clnt_call(cli, NLM4_TEST_RES, xdr_nlm4_testres,
+ &result, xdr_void, &dummy, timeo);
+
+ if (debug_level > 2)
+ syslog(LOG_DEBUG, "clnt_call returns %d", success);
+ }
+ return NULL;
+}
+
+/* nlm_lock ---------------------------------------------------------------- */
+/*
+ * Purposes: Establish a lock
+ * Returns: granted, denied or blocked
+ * Notes: *** grace period support missing
+ */
+nlm4_res *
+nlm4_lock_4_svc(nlm4_lockargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_lock", rqstp);
+
+ /* copy cookie from arg to result. See comment in nlm_test_4() */
+ result.cookie = arg->cookie;
+
+ result.stat.stat = getlock(arg, rqstp, LOCK_MON | LOCK_V4);
+ return &result;
+}
+
+void *
+nlm4_lock_msg_4_svc(nlm4_lockargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_lock_msg", rqstp);
+
+ result.cookie = arg->cookie;
+ result.stat.stat = getlock(arg, rqstp, LOCK_MON | LOCK_ASYNC | LOCK_V4);
+ transmit4_result(NLM4_LOCK_RES, &result, svc_getcaller(rqstp->rq_xprt));
+
+ return NULL;
+}
+
+/* nlm_cancel -------------------------------------------------------------- */
+/*
+ * Purpose: Cancel a blocked lock request
+ * Returns: granted or denied
+ * Notes:
+ */
+nlm4_res *
+nlm4_cancel_4_svc(nlm4_cancargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_cancel", rqstp);
+
+ /* copy cookie from arg to result. See comment in nlm_test_1() */
+ result.cookie = arg->cookie;
+
+ /*
+ * Since at present we never return 'nlm_blocked', there can never be
+ * a lock to cancel, so this call always fails.
+ */
+ result.stat.stat = unlock(&arg->alock, LOCK_CANCEL);
+ return &result;
+}
+
+void *
+nlm4_cancel_msg_4_svc(nlm4_cancargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_cancel_msg", rqstp);
+
+ result.cookie = arg->cookie;
+ /*
+ * Since at present we never return 'nlm_blocked', there can never be
+ * a lock to cancel, so this call always fails.
+ */
+ result.stat.stat = unlock(&arg->alock, LOCK_CANCEL | LOCK_V4);
+ transmit4_result(NLM4_CANCEL_RES, &result, svc_getcaller(rqstp->rq_xprt));
+
+ return NULL;
+}
+
+/* nlm_unlock -------------------------------------------------------------- */
+/*
+ * Purpose: Release an existing lock
+ * Returns: Always granted, unless during grace period
+ * Notes: "no such lock" error condition is ignored, as the
+ * protocol uses unreliable UDP datagrams, and may well
+ * re-try an unlock that has already succeeded.
+ */
+nlm4_res *
+nlm4_unlock_4_svc(nlm4_unlockargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_unlock", rqstp);
+
+ result.stat.stat = unlock(&arg->alock, LOCK_V4);
+ result.cookie = arg->cookie;
+
+ return &result;
+}
+
+void *
+nlm4_unlock_msg_4_svc(nlm4_unlockargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_unlock_msg", rqstp);
+
+ result.stat.stat = unlock(&arg->alock, LOCK_V4);
+ result.cookie = arg->cookie;
+
+ transmit4_result(NLM4_UNLOCK_RES, &result, svc_getcaller(rqstp->rq_xprt));
+ return NULL;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * Client-side pseudo-RPCs for results. Note that for the client there
+ * are only nlm_xxx_msg() versions of each call, since the 'real RPC'
+ * version returns the results in the RPC result, and so the client
+ * does not normally receive incoming RPCs.
+ *
+ * The exception to this is nlm_granted(), which is genuinely an RPC
+ * call from the server to the client - a 'call-back' in normal procedure
+ * call terms.
+ */
+
+/* nlm_granted ------------------------------------------------------------- */
+/*
+ * Purpose: Receive notification that formerly blocked lock now granted
+ * Returns: always success ('granted')
+ * Notes:
+ */
+nlm4_res *
+nlm4_granted_4_svc(nlm4_testargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_granted", rqstp);
+
+ /* copy cookie from arg to result. See comment in nlm_test_1() */
+ result.cookie = arg->cookie;
+
+ result.stat.stat = nlm4_granted;
+ return &result;
+}
+
+void *
+nlm4_granted_msg_4_svc(nlm4_testargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_granted_msg", rqstp);
+
+ result.cookie = arg->cookie;
+ result.stat.stat = nlm4_granted;
+ transmit4_result(NLM4_GRANTED_RES, &result, svc_getcaller(rqstp->rq_xprt));
+ return NULL;
+}
+
+/* nlm_test_res ------------------------------------------------------------ */
+/*
+ * Purpose: Accept result from earlier nlm_test_msg() call
+ * Returns: Nothing
+ */
+void *
+/*ARGSUSED*/
+nlm4_test_res_4_svc(nlm4_testres *arg, struct svc_req *rqstp)
+{
+ if (debug_level)
+ log_from_addr("nlm4_test_res", rqstp);
+ return NULL;
+}
+
+/* nlm_lock_res ------------------------------------------------------------ */
+/*
+ * Purpose: Accept result from earlier nlm_lock_msg() call
+ * Returns: Nothing
+ */
+void *
+/*ARGSUSED*/
+nlm4_lock_res_4_svc(nlm4_res *arg, struct svc_req *rqstp)
+{
+ if (debug_level)
+ log_from_addr("nlm4_lock_res", rqstp);
+
+ return NULL;
+}
+
+/* nlm_cancel_res ---------------------------------------------------------- */
+/*
+ * Purpose: Accept result from earlier nlm_cancel_msg() call
+ * Returns: Nothing
+ */
+void *
+/*ARGSUSED*/
+nlm4_cancel_res_4_svc(nlm4_res *arg, struct svc_req *rqstp)
+{
+ if (debug_level)
+ log_from_addr("nlm4_cancel_res", rqstp);
+ return NULL;
+}
+
+/* nlm_unlock_res ---------------------------------------------------------- */
+/*
+ * Purpose: Accept result from earlier nlm_unlock_msg() call
+ * Returns: Nothing
+ */
+void *
+/*ARGSUSED*/
+nlm4_unlock_res_4_svc(nlm4_res *arg, struct svc_req *rqstp)
+{
+ if (debug_level)
+ log_from_addr("nlm4_unlock_res", rqstp);
+ return NULL;
+}
+
+/* nlm_granted_res --------------------------------------------------------- */
+/*
+ * Purpose: Accept result from earlier nlm_granted_msg() call
+ * Returns: Nothing
+ */
+void *
+/*ARGSUSED*/
+nlm4_granted_res_4_svc(nlm4_res *arg, struct svc_req *rqstp)
+{
+ if (debug_level)
+ log_from_addr("nlm4_granted_res", rqstp);
+ return NULL;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * Calls for PCNFS locking (aka non-monitored locking, no involvement
+ * of rpc.statd).
+ *
+ * These are all genuine RPCs - no nlm_xxx_msg() nonsense here.
+ */
+
+/* nlm_share --------------------------------------------------------------- */
+/*
+ * Purpose: Establish a DOS-style lock
+ * Returns: success or failure
+ * Notes: Blocking locks are not supported - client is expected
+ * to retry if required.
+ */
+nlm4_shareres *
+nlm4_share_4_svc(nlm4_shareargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_shareres result;
+
+ if (debug_level)
+ log_from_addr("nlm4_share", rqstp);
+
+ result.cookie = arg->cookie;
+ result.stat = nlm4_granted;
+ result.sequence = 1234356; /* X/Open says this field is ignored? */
+ return &result;
+}
+
+/* nlm4_unshare ------------------------------------------------------------ */
+/*
+ * Purpose: Release a DOS-style lock
+ * Returns: nlm_granted, unless in grace period
+ * Notes:
+ */
+nlm4_shareres *
+nlm4_unshare_4_svc(nlm4_shareargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_shareres result;
+
+ if (debug_level)
+ log_from_addr("nlm_unshare", rqstp);
+
+ result.cookie = arg->cookie;
+ result.stat = nlm4_granted;
+ result.sequence = 1234356; /* X/Open says this field is ignored? */
+ return &result;
+}
+
+/* nlm4_nm_lock ------------------------------------------------------------ */
+/*
+ * Purpose: non-monitored version of nlm4_lock()
+ * Returns: as for nlm4_lock()
+ * Notes: These locks are in the same style as the standard nlm4_lock,
+ * but the rpc.statd should not be called to establish a
+ * monitor for the client machine, since that machine is
+ * declared not to be running a rpc.statd, and so would not
+ * respond to the statd protocol.
+ */
+nlm4_res *
+nlm4_nm_lock_4_svc(nlm4_lockargs *arg, struct svc_req *rqstp)
+{
+ static nlm4_res result;
+
+ if (debug_level)
+ log_from_addr("nlm4_nm_lock", rqstp);
+
+ /* copy cookie from arg to result. See comment in nlm4_test_1() */
+ result.cookie = arg->cookie;
+ result.stat.stat = nlm4_granted;
+ return &result;
+}
+
+/* nlm4_free_all ------------------------------------------------------------ */
+/*
+ * Purpose: Release all locks held by a named client
+ * Returns: Nothing
+ * Notes: Potential denial of service security problem here - the
+ * locks to be released are specified by a host name, independent
+ * of the address from which the request has arrived.
+ * Should probably be rejected if the named host has been
+ * using monitored locks.
+ */
+void *
+/*ARGSUSED*/
+nlm4_free_all_4_svc(nlm_notify *arg, struct svc_req *rqstp)
+{
+ static char dummy;
+
+ if (debug_level)
+ log_from_addr("nlm4_free_all", rqstp);
+ return &dummy;
}
diff --git a/usr.sbin/rpc.lockd/rpc.lockd.8 b/usr.sbin/rpc.lockd/rpc.lockd.8
index b750fb0f9c7..642c6c90cab 100644
--- a/usr.sbin/rpc.lockd/rpc.lockd.8
+++ b/usr.sbin/rpc.lockd/rpc.lockd.8
@@ -1,4 +1,4 @@
-.\" $OpenBSD: rpc.lockd.8,v 1.11 2007/05/31 19:20:28 jmc Exp $
+.\" $OpenBSD: rpc.lockd.8,v 1.12 2008/06/13 21:32:26 sturm Exp $
.\"
.\" Copyright (c) 1995 A.R.Gordon, andrew.gordon@net-tel.co.uk
.\" All rights reserved.
@@ -32,7 +32,7 @@
.\" SUCH DAMAGE.
.\"
.\"
-.Dd $Mdocdate: May 31 2007 $
+.Dd $Mdocdate: June 13 2008 $
.Dt RPC.LOCKD 8
.Os
.Sh NAME
@@ -41,6 +41,7 @@
.Sh SYNOPSIS
.Nm rpc.lockd
.Op Fl d Op Ar debug_level
+.Op Fl g Op Ar grace period
.Sh DESCRIPTION
.Nm rpc.lockd
is a daemon which provides file- and record-locking services in an NFS
@@ -61,6 +62,15 @@ is not specified,
level 1 is assumed, giving one log line per protocol operation.
Higher debug levels can be specified, causing display of operation arguments
and internal operations of the daemon.
+.It Fl g
+The
+.Fl g
+option allows to specify the grace period, in seconds.
+During the grace period
+.Nm
+only accepts requests from hosts which are reinitialising locks which
+existed before the server restart.
+Default is 30 seconds.
.El
.Pp
Error conditions are logged to syslog, irrespective of the debug level,
@@ -97,8 +107,4 @@ but there is currently no means for an
.Ox
client to establish locks).
.Pp
-Versions 1, 2 and 3 of the protocol are supported.
-However, only versions
-2 (Unix systems) and 3 (PC-NFS clients) seem to be in common use - the version
-1 support has not been tested due to the lack of version 1 clients against
-which to test.
+The current implementation serialises lock requests that could be shared.