mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-11-30 10:52:50 +00:00
lib/libc/rpc: switch the per-fd structs in clnt_{dg,vc}.c to RB Trees
This saves oodles of memory, especially when "ulimit -n" is large. It also prevents a buffer overflow if getrlimit should fail. Also replace per-fd condvars with mutexes to simplify the code. PR: 274968 MFC after: 2 weeks Sponsored by: Axcient Reviewed by: kib Differential Revision: https://reviews.freebsd.org/D42597
This commit is contained in:
parent
a5c2f4e939
commit
24938f9311
@ -58,7 +58,6 @@
|
||||
|
||||
__BEGIN_DECLS
|
||||
extern u_int __rpc_get_a_size(int);
|
||||
extern int __rpc_dtbsize(void);
|
||||
extern int _rpc_dtablesize(void);
|
||||
extern struct netconfig * __rpcgettp(int);
|
||||
extern int __rpc_get_default_domain(char **);
|
||||
|
@ -48,13 +48,17 @@ static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
|
||||
#include <sys/time.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/tree.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <rpc/rpc.h>
|
||||
#include <rpc/rpcsec_gss.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <stdbool.h>
|
||||
#include <unistd.h>
|
||||
#include <err.h>
|
||||
#include "un-namespace.h"
|
||||
@ -89,26 +93,60 @@ static void clnt_dg_destroy(CLIENT *);
|
||||
* This machinery implements per-fd locks for MT-safety. It is not
|
||||
* sufficient to do per-CLIENT handle locks for MT-safety because a
|
||||
* user may create more than one CLIENT handle with the same fd behind
|
||||
* it. Therefore, we allocate an array of flags and condition variables
|
||||
* (dg_fd) protected by the clnt_fd_lock mutex. dg_fd[fd].lock == 1 => a
|
||||
* call is active on some CLIENT handle created for that fd. The current
|
||||
* implementation holds locks across the entire RPC and reply, including
|
||||
* retransmissions. Yes, this is silly, and as soon as this code is
|
||||
* proven to work, this should be the first thing fixed. One step at a
|
||||
* time.
|
||||
* it. Therefore, we allocate an associative array of flags and condition
|
||||
* variables (dg_fd). The flags and the array are protected by the
|
||||
* clnt_fd_lock mutex. dg_fd[fd].lock == 1 => a call is active on some
|
||||
* CLIENT handle created for that fd. The current implementation holds
|
||||
* locks across the entire RPC and reply, including retransmissions. Yes,
|
||||
* this is silly, and as soon as this code is proven to work, this should
|
||||
* be the first thing fixed. One step at a time.
|
||||
*/
|
||||
static struct {
|
||||
int lock;
|
||||
cond_t cv;
|
||||
} *dg_fd;
|
||||
static void
|
||||
release_fd_lock(int fd, sigset_t mask)
|
||||
struct dg_fd {
|
||||
RB_ENTRY(dg_fd) dg_link;
|
||||
int fd;
|
||||
mutex_t mtx;
|
||||
};
|
||||
static inline int
|
||||
cmp_dg_fd(struct dg_fd *a, struct dg_fd *b)
|
||||
{
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
dg_fd[fd].lock = 0;
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
if (a->fd > b->fd) {
|
||||
return (1);
|
||||
} else if (a->fd < b->fd) {
|
||||
return (-1);
|
||||
} else {
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
RB_HEAD(dg_fd_list, dg_fd);
|
||||
RB_PROTOTYPE(dg_fd_list, dg_fd, dg_link, cmp_dg_fd);
|
||||
RB_GENERATE(dg_fd_list, dg_fd, dg_link, cmp_dg_fd);
|
||||
struct dg_fd_list dg_fd_head = RB_INITIALIZER(&dg_fd_head);
|
||||
|
||||
/*
|
||||
* Find the lock structure for the given file descriptor, or initialize it if
|
||||
* it does not already exist. The clnt_fd_lock mutex must be held.
|
||||
*/
|
||||
static struct dg_fd *
|
||||
dg_fd_find(int fd)
|
||||
{
|
||||
struct dg_fd key, *elem;
|
||||
|
||||
key.fd = fd;
|
||||
elem = RB_FIND(dg_fd_list, &dg_fd_head, &key);
|
||||
if (elem == NULL) {
|
||||
elem = calloc(1, sizeof(*elem));
|
||||
elem->fd = fd;
|
||||
mutex_init(&elem->mtx, NULL);
|
||||
RB_INSERT(dg_fd_list, &dg_fd_head, elem);
|
||||
}
|
||||
return (elem);
|
||||
}
|
||||
|
||||
static void
|
||||
release_fd_lock(struct dg_fd *elem, sigset_t mask)
|
||||
{
|
||||
mutex_unlock(&elem->mtx);
|
||||
thr_sigsetmask(SIG_SETMASK, &mask, NULL);
|
||||
cond_signal(&dg_fd[fd].cv);
|
||||
}
|
||||
|
||||
static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
|
||||
@ -171,35 +209,9 @@ clnt_dg_create(int fd, const struct netbuf *svcaddr, rpcprog_t program,
|
||||
struct cu_data *cu = NULL; /* private data */
|
||||
struct timeval now;
|
||||
struct rpc_msg call_msg;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
struct __rpc_sockinfo si;
|
||||
int one = 1;
|
||||
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
if (dg_fd == NULL) {
|
||||
size_t allocsz;
|
||||
int i;
|
||||
int dtbsize = __rpc_dtbsize();
|
||||
|
||||
allocsz = dtbsize * sizeof (dg_fd[0]);
|
||||
dg_fd = mem_alloc(allocsz);
|
||||
if (dg_fd == NULL) {
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
goto err1;
|
||||
}
|
||||
memset(dg_fd, '\0', allocsz);
|
||||
|
||||
for (i = 0; i < dtbsize; i++)
|
||||
cond_init(&dg_fd[i].cv, 0, (void *) 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
|
||||
if (svcaddr == NULL) {
|
||||
rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
|
||||
return (NULL);
|
||||
@ -320,25 +332,21 @@ clnt_dg_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xargs, void *argsp,
|
||||
struct timespec ts;
|
||||
struct kevent kv;
|
||||
struct sockaddr *sa;
|
||||
struct dg_fd *elem;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
socklen_t salen;
|
||||
ssize_t recvlen = 0;
|
||||
int kin_len, n, rpc_lock_value;
|
||||
int kin_len, n;
|
||||
u_int32_t xid;
|
||||
|
||||
outlen = 0;
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (dg_fd[cu->cu_fd].lock)
|
||||
cond_wait(&dg_fd[cu->cu_fd].cv, &clnt_fd_lock);
|
||||
if (__isthreaded)
|
||||
rpc_lock_value = 1;
|
||||
else
|
||||
rpc_lock_value = 0;
|
||||
dg_fd[cu->cu_fd].lock = rpc_lock_value;
|
||||
elem = dg_fd_find(cu->cu_fd);
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
mutex_lock(&elem->mtx);
|
||||
if (cu->cu_total.tv_usec == -1) {
|
||||
timeout = utimeout; /* use supplied timeout */
|
||||
} else {
|
||||
@ -592,7 +600,7 @@ out:
|
||||
if (cu->cu_kq >= 0)
|
||||
_close(cu->cu_kq);
|
||||
cu->cu_kq = -1;
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (cu->cu_error.re_status);
|
||||
}
|
||||
|
||||
@ -608,6 +616,7 @@ static bool_t
|
||||
clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
|
||||
{
|
||||
struct cu_data *cu = (struct cu_data *)cl->cl_private;
|
||||
struct dg_fd *elem;
|
||||
XDR *xdrs = &(cu->cu_outxdrs);
|
||||
bool_t dummy;
|
||||
sigset_t mask;
|
||||
@ -616,13 +625,12 @@ clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (dg_fd[cu->cu_fd].lock)
|
||||
cond_wait(&dg_fd[cu->cu_fd].cv, &clnt_fd_lock);
|
||||
elem = dg_fd_find(cu->cu_fd);
|
||||
mutex_lock(&elem->mtx);
|
||||
xdrs->x_op = XDR_FREE;
|
||||
dummy = (*xdr_res)(xdrs, res_ptr);
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &mask, NULL);
|
||||
cond_signal(&dg_fd[cu->cu_fd].cv);
|
||||
release_fd_lock(elem, mask);
|
||||
return (dummy);
|
||||
}
|
||||
|
||||
@ -637,41 +645,36 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
|
||||
{
|
||||
struct cu_data *cu = (struct cu_data *)cl->cl_private;
|
||||
struct netbuf *addr;
|
||||
struct dg_fd *elem;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
int rpc_lock_value;
|
||||
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (dg_fd[cu->cu_fd].lock)
|
||||
cond_wait(&dg_fd[cu->cu_fd].cv, &clnt_fd_lock);
|
||||
if (__isthreaded)
|
||||
rpc_lock_value = 1;
|
||||
else
|
||||
rpc_lock_value = 0;
|
||||
dg_fd[cu->cu_fd].lock = rpc_lock_value;
|
||||
elem = dg_fd_find(cu->cu_fd);
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
mutex_lock(&elem->mtx);
|
||||
switch (request) {
|
||||
case CLSET_FD_CLOSE:
|
||||
cu->cu_closeit = TRUE;
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (TRUE);
|
||||
case CLSET_FD_NCLOSE:
|
||||
cu->cu_closeit = FALSE;
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
/* for other requests which use info */
|
||||
if (info == NULL) {
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
switch (request) {
|
||||
case CLSET_TIMEOUT:
|
||||
if (time_not_ok((struct timeval *)info)) {
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
cu->cu_total = *(struct timeval *)info;
|
||||
@ -685,7 +688,7 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
|
||||
break;
|
||||
case CLSET_RETRY_TIMEOUT:
|
||||
if (time_not_ok((struct timeval *)info)) {
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
cu->cu_wait = *(struct timeval *)info;
|
||||
@ -705,7 +708,7 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
|
||||
case CLSET_SVC_ADDR: /* set to new address */
|
||||
addr = (struct netbuf *)info;
|
||||
if (addr->len < sizeof cu->cu_raddr) {
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
(void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
|
||||
@ -768,10 +771,10 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
|
||||
cu->cu_connect = *(int *)info;
|
||||
break;
|
||||
default:
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
release_fd_lock(cu->cu_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
@ -779,6 +782,7 @@ static void
|
||||
clnt_dg_destroy(CLIENT *cl)
|
||||
{
|
||||
struct cu_data *cu = (struct cu_data *)cl->cl_private;
|
||||
struct dg_fd *elem;
|
||||
int cu_fd = cu->cu_fd;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
@ -786,8 +790,8 @@ clnt_dg_destroy(CLIENT *cl)
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (dg_fd[cu_fd].lock)
|
||||
cond_wait(&dg_fd[cu_fd].cv, &clnt_fd_lock);
|
||||
elem = dg_fd_find(cu_fd);
|
||||
mutex_lock(&elem->mtx);
|
||||
if (cu->cu_closeit)
|
||||
(void)_close(cu_fd);
|
||||
if (cu->cu_kq >= 0)
|
||||
@ -800,8 +804,7 @@ clnt_dg_destroy(CLIENT *cl)
|
||||
mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
|
||||
mem_free(cl, sizeof (CLIENT));
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &mask, NULL);
|
||||
cond_signal(&dg_fd[cu_fd].cv);
|
||||
release_fd_lock(elem, mask);
|
||||
}
|
||||
|
||||
static struct clnt_ops *
|
||||
|
@ -60,6 +60,7 @@ static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
|
||||
#include <sys/poll.h>
|
||||
#include <sys/syslog.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/tree.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/uio.h>
|
||||
|
||||
@ -68,7 +69,9 @@ static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
|
||||
#include <err.h>
|
||||
#include <errno.h>
|
||||
#include <netdb.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
@ -120,25 +123,60 @@ struct ct_data {
|
||||
* This machinery implements per-fd locks for MT-safety. It is not
|
||||
* sufficient to do per-CLIENT handle locks for MT-safety because a
|
||||
* user may create more than one CLIENT handle with the same fd behind
|
||||
* it. Therefore, we allocate an array of flags and condition variables
|
||||
* (vc_fd) protected by the clnt_fd_lock mutex. vc_fd_lock[fd] == 1 => a
|
||||
* call is active on some CLIENT handle created for that fd. The current
|
||||
* implementation holds locks across the entire RPC and reply. Yes, this
|
||||
* is silly, and as soon as this code is proven to work, this should be
|
||||
* the first thing fixed. One step at a time.
|
||||
* it. Therefore, we allocate an associative array of flags and condition
|
||||
* variables (vc_fd). The flags and the array are protected by the
|
||||
* clnt_fd_lock mutex. vc_fd_lock[fd] == 1 => a call is active on some
|
||||
* CLIENT handle created for that fd. The current implementation holds
|
||||
* locks across the entire RPC and reply. Yes, this is silly, and as soon
|
||||
* as this code is proven to work, this should be the first thing fixed.
|
||||
* One step at a time.
|
||||
*/
|
||||
static struct {
|
||||
int lock;
|
||||
cond_t cv;
|
||||
} *vc_fd;
|
||||
static void
|
||||
release_fd_lock(int fd, sigset_t mask)
|
||||
struct vc_fd {
|
||||
RB_ENTRY(vc_fd) vc_link;
|
||||
int fd;
|
||||
mutex_t mtx;
|
||||
};
|
||||
static inline int
|
||||
cmp_vc_fd(struct vc_fd *a, struct vc_fd *b)
|
||||
{
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
vc_fd[fd].lock = 0;
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &mask, (sigset_t *) NULL);
|
||||
cond_signal(&vc_fd[fd].cv);
|
||||
if (a->fd > b->fd) {
|
||||
return (1);
|
||||
} else if (a->fd < b->fd) {
|
||||
return (-1);
|
||||
} else {
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
RB_HEAD(vc_fd_list, vc_fd);
|
||||
RB_PROTOTYPE(vc_fd_list, vc_fd, vc_link, cmp_vc_fd);
|
||||
RB_GENERATE(vc_fd_list, vc_fd, vc_link, cmp_vc_fd);
|
||||
struct vc_fd_list vc_fd_head = RB_INITIALIZER(&vc_fd_head);
|
||||
|
||||
/*
|
||||
* Find the lock structure for the given file descriptor, or initialize it if
|
||||
* it does not already exist. The clnt_fd_lock mutex must be held.
|
||||
*/
|
||||
static struct vc_fd *
|
||||
vc_fd_find(int fd)
|
||||
{
|
||||
struct vc_fd key, *elem;
|
||||
|
||||
key.fd = fd;
|
||||
elem = RB_FIND(vc_fd_list, &vc_fd_head, &key);
|
||||
if (elem == NULL) {
|
||||
elem = calloc(1, sizeof(*elem));
|
||||
elem->fd = fd;
|
||||
mutex_init(&elem->mtx, NULL);
|
||||
RB_INSERT(vc_fd_list, &vc_fd_head, elem);
|
||||
}
|
||||
return (elem);
|
||||
}
|
||||
|
||||
static void
|
||||
release_fd_lock(struct vc_fd *elem, sigset_t mask)
|
||||
{
|
||||
mutex_unlock(&elem->mtx);
|
||||
thr_sigsetmask(SIG_SETMASK, &mask, NULL);
|
||||
}
|
||||
|
||||
static const char clnt_vc_errstr[] = "%s : %s";
|
||||
@ -172,8 +210,6 @@ clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
|
||||
struct timeval now;
|
||||
struct rpc_msg call_msg;
|
||||
static u_int32_t disrupt;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
struct sockaddr_storage ss;
|
||||
socklen_t slen;
|
||||
struct __rpc_sockinfo si;
|
||||
@ -191,26 +227,6 @@ clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
|
||||
goto err;
|
||||
}
|
||||
ct->ct_addr.buf = NULL;
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
if (vc_fd == NULL) {
|
||||
size_t allocsz;
|
||||
int i;
|
||||
int dtbsize = __rpc_dtbsize();
|
||||
|
||||
allocsz = dtbsize * sizeof (vc_fd[0]);
|
||||
vc_fd = mem_alloc(allocsz);
|
||||
if (vc_fd == NULL) {
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
goto err;
|
||||
}
|
||||
memset(vc_fd, '\0', allocsz);
|
||||
|
||||
for (i = 0; i < dtbsize; i++)
|
||||
cond_init(&vc_fd[i].cv, 0, (void *) 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX - fvdl connecting while holding a mutex?
|
||||
@ -221,19 +237,16 @@ clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
|
||||
rpc_createerr.cf_stat = RPC_SYSTEMERROR;
|
||||
rpc_createerr.cf_error.re_errno = errno;
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
goto err;
|
||||
}
|
||||
if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){
|
||||
rpc_createerr.cf_stat = RPC_SYSTEMERROR;
|
||||
rpc_createerr.cf_error.re_errno = errno;
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
if (!__rpc_fd2sockinfo(fd, &si))
|
||||
goto err;
|
||||
|
||||
@ -308,12 +321,12 @@ clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr,
|
||||
struct ct_data *ct = (struct ct_data *) cl->cl_private;
|
||||
XDR *xdrs = &(ct->ct_xdrs);
|
||||
struct rpc_msg reply_msg;
|
||||
struct vc_fd *elem;
|
||||
u_int32_t x_id;
|
||||
u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */
|
||||
bool_t shipnow;
|
||||
int refreshes = 2;
|
||||
sigset_t mask, newmask;
|
||||
int rpc_lock_value;
|
||||
bool_t reply_stat;
|
||||
|
||||
assert(cl != NULL);
|
||||
@ -321,14 +334,9 @@ clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr,
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (vc_fd[ct->ct_fd].lock)
|
||||
cond_wait(&vc_fd[ct->ct_fd].cv, &clnt_fd_lock);
|
||||
if (__isthreaded)
|
||||
rpc_lock_value = 1;
|
||||
else
|
||||
rpc_lock_value = 0;
|
||||
vc_fd[ct->ct_fd].lock = rpc_lock_value;
|
||||
elem = vc_fd_find(ct->ct_fd);
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
mutex_lock(&elem->mtx);
|
||||
if (!ct->ct_waitset) {
|
||||
/* If time is not within limits, we ignore it. */
|
||||
if (time_not_ok(&timeout) == FALSE)
|
||||
@ -352,7 +360,7 @@ call_again:
|
||||
if (ct->ct_error.re_status == RPC_SUCCESS)
|
||||
ct->ct_error.re_status = RPC_CANTENCODEARGS;
|
||||
(void)xdrrec_endofrecord(xdrs, TRUE);
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (ct->ct_error.re_status);
|
||||
}
|
||||
} else {
|
||||
@ -363,23 +371,23 @@ call_again:
|
||||
if (ct->ct_error.re_status == RPC_SUCCESS)
|
||||
ct->ct_error.re_status = RPC_CANTENCODEARGS;
|
||||
(void)xdrrec_endofrecord(xdrs, TRUE);
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (ct->ct_error.re_status);
|
||||
}
|
||||
}
|
||||
if (! xdrrec_endofrecord(xdrs, shipnow)) {
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (ct->ct_error.re_status = RPC_CANTSEND);
|
||||
}
|
||||
if (! shipnow) {
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (RPC_SUCCESS);
|
||||
}
|
||||
/*
|
||||
* Hack to provide rpc-based message passing
|
||||
*/
|
||||
if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return(ct->ct_error.re_status = RPC_TIMEDOUT);
|
||||
}
|
||||
|
||||
@ -393,14 +401,14 @@ call_again:
|
||||
reply_msg.acpted_rply.ar_results.where = NULL;
|
||||
reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
|
||||
if (! xdrrec_skiprecord(xdrs)) {
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (ct->ct_error.re_status);
|
||||
}
|
||||
/* now decode and validate the response header */
|
||||
if (! xdr_replymsg(xdrs, &reply_msg)) {
|
||||
if (ct->ct_error.re_status == RPC_SUCCESS)
|
||||
continue;
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (ct->ct_error.re_status);
|
||||
}
|
||||
if (reply_msg.rm_xid == x_id)
|
||||
@ -441,7 +449,7 @@ call_again:
|
||||
if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
|
||||
goto call_again;
|
||||
} /* end of unsuccessful completion */
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (ct->ct_error.re_status);
|
||||
}
|
||||
|
||||
@ -461,6 +469,7 @@ static bool_t
|
||||
clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
|
||||
{
|
||||
struct ct_data *ct;
|
||||
struct vc_fd *elem;
|
||||
XDR *xdrs;
|
||||
bool_t dummy;
|
||||
sigset_t mask;
|
||||
@ -474,14 +483,13 @@ clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (vc_fd[ct->ct_fd].lock)
|
||||
cond_wait(&vc_fd[ct->ct_fd].cv, &clnt_fd_lock);
|
||||
elem = vc_fd_find(ct->ct_fd);
|
||||
mutex_lock(&elem->mtx);
|
||||
xdrs->x_op = XDR_FREE;
|
||||
dummy = (*xdr_res)(xdrs, res_ptr);
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
cond_signal(&vc_fd[ct->ct_fd].cv);
|
||||
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
release_fd_lock(elem, mask);
|
||||
return dummy;
|
||||
}
|
||||
|
||||
@ -509,10 +517,10 @@ static bool_t
|
||||
clnt_vc_control(CLIENT *cl, u_int request, void *info)
|
||||
{
|
||||
struct ct_data *ct;
|
||||
struct vc_fd *elem;
|
||||
void *infop = info;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
int rpc_lock_value;
|
||||
|
||||
assert(cl != NULL);
|
||||
|
||||
@ -521,23 +529,18 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (vc_fd[ct->ct_fd].lock)
|
||||
cond_wait(&vc_fd[ct->ct_fd].cv, &clnt_fd_lock);
|
||||
if (__isthreaded)
|
||||
rpc_lock_value = 1;
|
||||
else
|
||||
rpc_lock_value = 0;
|
||||
vc_fd[ct->ct_fd].lock = rpc_lock_value;
|
||||
elem = vc_fd_find(ct->ct_fd);
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
mutex_lock(&elem->mtx);
|
||||
|
||||
switch (request) {
|
||||
case CLSET_FD_CLOSE:
|
||||
ct->ct_closeit = TRUE;
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (TRUE);
|
||||
case CLSET_FD_NCLOSE:
|
||||
ct->ct_closeit = FALSE;
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (TRUE);
|
||||
default:
|
||||
break;
|
||||
@ -545,13 +548,13 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
|
||||
|
||||
/* for other requests which use info */
|
||||
if (info == NULL) {
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
switch (request) {
|
||||
case CLSET_TIMEOUT:
|
||||
if (time_not_ok((struct timeval *)info)) {
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
ct->ct_wait = *(struct timeval *)infop;
|
||||
@ -571,7 +574,7 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
|
||||
*(struct netbuf *)info = ct->ct_addr;
|
||||
break;
|
||||
case CLSET_SVC_ADDR: /* set to new address */
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
case CLGET_XID:
|
||||
/*
|
||||
@ -615,10 +618,10 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
|
||||
break;
|
||||
|
||||
default:
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (FALSE);
|
||||
}
|
||||
release_fd_lock(ct->ct_fd, mask);
|
||||
release_fd_lock(elem, mask);
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
@ -627,6 +630,7 @@ static void
|
||||
clnt_vc_destroy(CLIENT *cl)
|
||||
{
|
||||
struct ct_data *ct = (struct ct_data *) cl->cl_private;
|
||||
struct vc_fd *elem;
|
||||
int ct_fd = ct->ct_fd;
|
||||
sigset_t mask;
|
||||
sigset_t newmask;
|
||||
@ -638,8 +642,8 @@ clnt_vc_destroy(CLIENT *cl)
|
||||
sigfillset(&newmask);
|
||||
thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
|
||||
mutex_lock(&clnt_fd_lock);
|
||||
while (vc_fd[ct_fd].lock)
|
||||
cond_wait(&vc_fd[ct_fd].cv, &clnt_fd_lock);
|
||||
elem = vc_fd_find(ct_fd);
|
||||
mutex_lock(&elem->mtx);
|
||||
if (ct->ct_closeit && ct->ct_fd != -1) {
|
||||
(void)_close(ct->ct_fd);
|
||||
}
|
||||
@ -652,8 +656,7 @@ clnt_vc_destroy(CLIENT *cl)
|
||||
mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
|
||||
mem_free(cl, sizeof(CLIENT));
|
||||
mutex_unlock(&clnt_fd_lock);
|
||||
thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
|
||||
cond_signal(&vc_fd[ct_fd].cv);
|
||||
release_fd_lock(elem, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -59,7 +59,6 @@
|
||||
|
||||
__BEGIN_DECLS
|
||||
extern u_int __rpc_get_a_size(int);
|
||||
extern int __rpc_dtbsize(void);
|
||||
extern struct netconfig * __rpcgettp(int);
|
||||
extern int __rpc_get_default_domain(char **);
|
||||
|
||||
|
@ -104,29 +104,6 @@ static char *strlocase(char *);
|
||||
#endif
|
||||
static int getnettype(const char *);
|
||||
|
||||
/*
|
||||
* Cache the result of getrlimit(), so we don't have to do an
|
||||
* expensive call every time.
|
||||
*/
|
||||
int
|
||||
__rpc_dtbsize(void)
|
||||
{
|
||||
static int tbsize;
|
||||
struct rlimit rl;
|
||||
|
||||
if (tbsize) {
|
||||
return (tbsize);
|
||||
}
|
||||
if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {
|
||||
return (tbsize = (int)rl.rlim_max);
|
||||
}
|
||||
/*
|
||||
* Something wrong. I'll try to save face by returning a
|
||||
* pessimistic number.
|
||||
*/
|
||||
return (32);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Find the appropriate buffer size
|
||||
|
@ -70,7 +70,6 @@
|
||||
__BEGIN_DECLS
|
||||
#ifndef _KERNEL
|
||||
extern u_int __rpc_get_a_size(int);
|
||||
extern int __rpc_dtbsize(void);
|
||||
extern struct netconfig * __rpcgettp(int);
|
||||
extern int __rpc_get_default_domain(char **);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user