mirror of
https://git.openafs.org/openafs.git
synced 2025-01-31 05:27:44 +00:00
DEVEL15-rx-statistics-active-flag-20090110
LICENSE MIT Permit rx statistics gathering to be disabled by setting the new rx_stats_active variable to 0. This avoids grabbing the rx_stats_mutex throughout the execution of the requests and permits greater concurrency thoughout the library. (cherry picked from commit 30cfed8e25dfcc6c423d1e121360f048d6724328)
This commit is contained in:
parent
449a759be2
commit
6b60f14257
@ -237,6 +237,8 @@ EXPORTS
|
||||
rx_GetMinUdpBufSize @242
|
||||
rx_SetUdpBufSize @243
|
||||
rx_getAllAddrMaskMtu @244
|
||||
rx_stats_active @245 DATA
|
||||
rx_StatsOnOff @246
|
||||
|
||||
; for performance testing
|
||||
rx_TSFPQGlobSize @2001 DATA
|
||||
|
157
src/rx/rx.c
157
src/rx/rx.c
@ -867,7 +867,8 @@ rx_NewConnection(register afs_uint32 shost, u_short sport, u_short sservice,
|
||||
conn->refCount++; /* no lock required since only this thread knows... */
|
||||
conn->next = rx_connHashTable[hashindex];
|
||||
rx_connHashTable[hashindex] = conn;
|
||||
rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
|
||||
MUTEX_EXIT(&rx_connHashTable_lock);
|
||||
USERPRI;
|
||||
return conn;
|
||||
@ -909,18 +910,23 @@ rxi_CleanupConnection(struct rx_connection *conn)
|
||||
conn->peer->idleWhen = clock_Sec();
|
||||
if (conn->peer->refCount < 1) {
|
||||
conn->peer->refCount = 1;
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rxi_lowPeerRefCount++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rxi_lowPeerRefCount++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
conn->peer->refCount--;
|
||||
MUTEX_EXIT(&rx_peerHashTable_lock);
|
||||
|
||||
if (conn->type == RX_SERVER_CONNECTION)
|
||||
rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
|
||||
else
|
||||
rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
{
|
||||
if (conn->type == RX_SERVER_CONNECTION)
|
||||
rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
|
||||
else
|
||||
rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
|
||||
}
|
||||
#ifndef KERNEL
|
||||
if (conn->specific) {
|
||||
int i;
|
||||
@ -977,9 +983,11 @@ rxi_DestroyConnectionNoLock(register struct rx_connection *conn)
|
||||
if (conn->refCount > 0)
|
||||
conn->refCount--;
|
||||
else {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rxi_lowConnRefCount++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rxi_lowConnRefCount++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
|
||||
@ -1515,9 +1523,11 @@ rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp)
|
||||
(*tservice->afterProc) (call, code);
|
||||
|
||||
rx_EndCall(call, code);
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rxi_nCalls++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rxi_nCalls++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2216,7 +2226,8 @@ rxi_NewCall(register struct rx_connection *conn, register int channel)
|
||||
call = queue_First(&rx_freeCallQueue, rx_call);
|
||||
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
|
||||
queue_Remove(call);
|
||||
rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
|
||||
MUTEX_EXIT(&rx_freeCallQueue_lock);
|
||||
MUTEX_ENTER(&call->lock);
|
||||
CLEAR_CALL_QUEUE_LOCK(call);
|
||||
@ -2311,7 +2322,8 @@ rxi_FreeCall(register struct rx_call *call)
|
||||
#else /* AFS_GLOBAL_RXLOCK_KERNEL */
|
||||
queue_Append(&rx_freeCallQueue, call);
|
||||
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
|
||||
rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
|
||||
MUTEX_EXIT(&rx_freeCallQueue_lock);
|
||||
|
||||
/* Destroy the connection if it was previously slated for
|
||||
@ -2347,7 +2359,8 @@ rxi_Alloc(register size_t size)
|
||||
{
|
||||
register char *p;
|
||||
|
||||
rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
|
||||
|
||||
p = (char *)
|
||||
#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
|
||||
@ -2364,7 +2377,8 @@ p = (char *)
|
||||
void
|
||||
rxi_Free(void *addr, register size_t size)
|
||||
{
|
||||
rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
|
||||
osi_Free(addr, size);
|
||||
}
|
||||
|
||||
@ -2434,7 +2448,8 @@ rxi_FindPeer(register afs_uint32 host, register u_short port,
|
||||
pp->next = rx_peerHashTable[hashIndex];
|
||||
rx_peerHashTable[hashIndex] = pp;
|
||||
rxi_InitPeerParams(pp);
|
||||
rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
if (pp && create) {
|
||||
@ -2544,7 +2559,8 @@ rxi_FindConnection(osi_socket socket, register afs_int32 host,
|
||||
/* XXXX Connection timeout? */
|
||||
if (service->newConnProc)
|
||||
(*service->newConnProc) (conn);
|
||||
rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
|
||||
}
|
||||
|
||||
MUTEX_ENTER(&conn->conn_data_lock);
|
||||
@ -2742,7 +2758,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
* then, since this is a client connection we're getting data for
|
||||
* it must be for the previous call.
|
||||
*/
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
MUTEX_ENTER(&conn->conn_data_lock);
|
||||
conn->refCount--;
|
||||
MUTEX_EXIT(&conn->conn_data_lock);
|
||||
@ -2754,7 +2771,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
|
||||
if (type == RX_SERVER_CONNECTION) { /* We're the server */
|
||||
if (np->header.callNumber < currentCallNumber) {
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
#ifdef RX_ENABLE_LOCKS
|
||||
if (call)
|
||||
MUTEX_EXIT(&call->lock);
|
||||
@ -2789,7 +2807,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
MUTEX_ENTER(&conn->conn_data_lock);
|
||||
conn->refCount--;
|
||||
MUTEX_EXIT(&conn->conn_data_lock);
|
||||
rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
|
||||
return tp;
|
||||
}
|
||||
rxi_KeepAliveOn(call);
|
||||
@ -2852,7 +2871,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
MUTEX_ENTER(&conn->conn_data_lock);
|
||||
conn->refCount--;
|
||||
MUTEX_EXIT(&conn->conn_data_lock);
|
||||
rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
|
||||
return tp;
|
||||
}
|
||||
rxi_KeepAliveOn(call);
|
||||
@ -2863,7 +2883,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
/* Ignore all incoming acknowledgements for calls in DALLY state */
|
||||
if (call && (call->state == RX_STATE_DALLY)
|
||||
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
|
||||
rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
|
||||
#ifdef RX_ENABLE_LOCKS
|
||||
if (call) {
|
||||
MUTEX_EXIT(&call->lock);
|
||||
@ -2878,7 +2899,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
/* Ignore anything that's not relevant to the current call. If there
|
||||
* isn't a current call, then no packet is relevant. */
|
||||
if (!call || (np->header.callNumber != currentCallNumber)) {
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
#ifdef RX_ENABLE_LOCKS
|
||||
if (call) {
|
||||
MUTEX_EXIT(&call->lock);
|
||||
@ -2943,7 +2965,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
|
||||
* XXX interact badly with the server-restart detection
|
||||
* XXX code in receiveackpacket. */
|
||||
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
|
||||
MUTEX_EXIT(&call->lock);
|
||||
MUTEX_ENTER(&conn->conn_data_lock);
|
||||
conn->refCount--;
|
||||
@ -3246,7 +3269,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
|
||||
int isFirst;
|
||||
struct rx_packet *tnp;
|
||||
struct clock when, now;
|
||||
rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
|
||||
|
||||
#ifdef KERNEL
|
||||
/* If there are no packet buffers, drop this new packet, unless we can find
|
||||
@ -3256,7 +3280,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
|
||||
MUTEX_ENTER(&rx_freePktQ_lock);
|
||||
rxi_NeedMorePackets = TRUE;
|
||||
MUTEX_EXIT(&rx_freePktQ_lock);
|
||||
rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
|
||||
call->rprev = np->header.serial;
|
||||
rxi_calltrace(RX_TRACE_DROP, call);
|
||||
dpf(("packet %x dropped on receipt - quota problems", np));
|
||||
@ -3321,7 +3346,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
|
||||
/* Check to make sure it is not a duplicate of one already queued */
|
||||
if (queue_IsNotEmpty(&call->rq)
|
||||
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
|
||||
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
|
||||
dpf(("packet %x dropped on receipt - duplicate", np));
|
||||
rxevent_Cancel(call->delayedAckEvent, call,
|
||||
RX_CALL_REFCOUNT_DELAY);
|
||||
@ -3409,7 +3435,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
|
||||
/* If the new packet's sequence number has been sent to the
|
||||
* application already, then this is a duplicate */
|
||||
if (seq < call->rnext) {
|
||||
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
|
||||
rxevent_Cancel(call->delayedAckEvent, call,
|
||||
RX_CALL_REFCOUNT_DELAY);
|
||||
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
|
||||
@ -3436,7 +3463,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
|
||||
0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
|
||||
/*Check for duplicate packet */
|
||||
if (seq == tp->header.seq) {
|
||||
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
|
||||
rxevent_Cancel(call->delayedAckEvent, call,
|
||||
RX_CALL_REFCOUNT_DELAY);
|
||||
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
|
||||
@ -3680,7 +3708,8 @@ rxi_ReceiveAckPacket(register struct rx_call *call, struct rx_packet *np,
|
||||
u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
|
||||
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
|
||||
|
||||
rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
|
||||
ap = (struct rx_ackPacket *)rx_DataOf(np);
|
||||
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
|
||||
if (nbytes < 0)
|
||||
@ -4571,7 +4600,8 @@ rxi_ConnectionError(register struct rx_connection *conn,
|
||||
}
|
||||
}
|
||||
conn->error = error;
|
||||
rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5009,7 +5039,8 @@ rxi_SendAck(register struct rx_call *call,
|
||||
nbytes -= p->wirevec[i].iov_len;
|
||||
}
|
||||
}
|
||||
rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
|
||||
#ifndef RX_ENABLE_TSFPQ
|
||||
if (!optionalPacket)
|
||||
rxi_FreePacket(p);
|
||||
@ -5033,7 +5064,8 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
|
||||
peer->nSent += len;
|
||||
if (resending)
|
||||
peer->reSends += len;
|
||||
rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
|
||||
MUTEX_EXIT(&peer->peer_lock);
|
||||
|
||||
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
|
||||
@ -5068,7 +5100,8 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
|
||||
* packet until the congestion window reaches the ack rate. */
|
||||
if (list[i]->header.serial) {
|
||||
requestAck = 1;
|
||||
rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
|
||||
} else {
|
||||
/* improved RTO calculation- not Karn */
|
||||
list[i]->firstSent = *now;
|
||||
@ -5083,7 +5116,8 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
|
||||
peer->nSent++;
|
||||
if (resending)
|
||||
peer->reSends++;
|
||||
rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
|
||||
MUTEX_EXIT(&peer->peer_lock);
|
||||
|
||||
/* Tag this packet as not being the last in this group,
|
||||
@ -5307,7 +5341,8 @@ rxi_Start(struct rxevent *event,
|
||||
}
|
||||
if (call->error) {
|
||||
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
|
||||
rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
@ -5382,7 +5417,8 @@ rxi_Start(struct rxevent *event,
|
||||
if (p->flags & RX_PKTFLAG_ACKED) {
|
||||
/* Since we may block, don't trust this */
|
||||
usenow.sec = usenow.usec = 0;
|
||||
rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
|
||||
continue; /* Ignore this packet if it has been acknowledged */
|
||||
}
|
||||
|
||||
@ -5449,7 +5485,8 @@ rxi_Start(struct rxevent *event,
|
||||
* the time to reset the call. This will also inform the using
|
||||
* process that the call is in an error state.
|
||||
*/
|
||||
rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
|
||||
call->flags &= ~RX_CALL_TQ_BUSY;
|
||||
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
|
||||
dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
|
||||
@ -5933,19 +5970,21 @@ rxi_ComputeRoundTripTime(register struct rx_packet *p,
|
||||
return; /* somebody set the clock back, don't count this time. */
|
||||
}
|
||||
clock_Sub(rttp, sentp);
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
if (clock_Lt(rttp, &rx_stats.minRtt))
|
||||
rx_stats.minRtt = *rttp;
|
||||
if (clock_Gt(rttp, &rx_stats.maxRtt)) {
|
||||
if (rttp->sec > 60) {
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
return; /* somebody set the clock ahead */
|
||||
}
|
||||
rx_stats.maxRtt = *rttp;
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
if (clock_Lt(rttp, &rx_stats.minRtt))
|
||||
rx_stats.minRtt = *rttp;
|
||||
if (clock_Gt(rttp, &rx_stats.maxRtt)) {
|
||||
if (rttp->sec > 60) {
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
return; /* somebody set the clock ahead */
|
||||
}
|
||||
rx_stats.maxRtt = *rttp;
|
||||
}
|
||||
clock_Add(&rx_stats.totalRtt, rttp);
|
||||
rx_stats.nRttSamples++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
clock_Add(&rx_stats.totalRtt, rttp);
|
||||
rx_stats.nRttSamples++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
|
||||
/* better rtt calculation courtesy of UMich crew (dave,larry,peter,?) */
|
||||
|
||||
@ -6128,7 +6167,8 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2)
|
||||
rxi_rpc_peer_stat_cnt -= num_funcs;
|
||||
}
|
||||
rxi_FreePeer(peer);
|
||||
rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
|
||||
if (peer == *peer_ptr) {
|
||||
*peer_ptr = next;
|
||||
prev = next;
|
||||
@ -6367,6 +6407,12 @@ rx_DebugOnOff(int on)
|
||||
{
|
||||
rxdebug_active = on;
|
||||
}
|
||||
|
||||
void
|
||||
rx_StatsOnOff(int on)
|
||||
{
|
||||
rx_stats_active = on;
|
||||
}
|
||||
#endif /* AFS_NT40_ENV */
|
||||
|
||||
|
||||
@ -6974,7 +7020,8 @@ shutdown_rx(void)
|
||||
}
|
||||
next = peer->next;
|
||||
rxi_FreePeer(peer);
|
||||
rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -543,6 +543,8 @@ EXT afs_kmutex_t rx_connHashTable_lock;
|
||||
#define rxi_AllocConnection() (struct rx_connection *) rxi_Alloc(sizeof(struct rx_connection))
|
||||
#define rxi_FreeConnection(conn) (rxi_Free(conn, sizeof(struct rx_connection)))
|
||||
|
||||
EXT afs_int32 rx_stats_active GLOBALSINIT(1); /* boolean - rx statistics gathering */
|
||||
|
||||
#ifdef RXDEBUG
|
||||
/* Some debugging stuff */
|
||||
EXT FILE *rx_debugFile; /* Set by the user to a stdio file for debugging output */
|
||||
|
@ -328,9 +328,11 @@ MyPacketProc(struct rx_packet **ahandle, int asize)
|
||||
RX_PACKET_CLASS_RECV_CBUF)) {
|
||||
rxi_FreePacket(tp);
|
||||
tp = NULL;
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.noPacketBuffersOnRead++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.noPacketBuffersOnRead++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -339,9 +341,11 @@ MyPacketProc(struct rx_packet **ahandle, int asize)
|
||||
* should do this at a higher layer and let other
|
||||
* end know we're losing.
|
||||
*/
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.bogusPacketOnRead++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.bogusPacketOnRead++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
/* I DON"T LIKE THIS PRINTF -- PRINTFS MAKE THINGS VERY VERY SLOOWWW */
|
||||
dpf(("rx: packet dropped: bad ulen=%d\n", asize));
|
||||
tp = NULL;
|
||||
@ -1176,10 +1180,12 @@ rxk_ReadPacket(osi_socket so, struct rx_packet *p, int *host, int *port)
|
||||
p->length = nbytes - RX_HEADER_SIZE;;
|
||||
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
|
||||
if (nbytes <= 0) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.bogusPacketOnRead++;
|
||||
rx_stats.bogusHost = from.sin_addr.s_addr;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.bogusPacketOnRead++;
|
||||
rx_stats.bogusHost = from.sin_addr.s_addr;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
dpf(("B: bogus packet from [%x,%d] nb=%d",
|
||||
from.sin_addr.s_addr, from.sin_port, nbytes));
|
||||
}
|
||||
@ -1191,9 +1197,11 @@ rxk_ReadPacket(osi_socket so, struct rx_packet *p, int *host, int *port)
|
||||
*host = from.sin_addr.s_addr;
|
||||
*port = from.sin_port;
|
||||
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.packetsRead[p->header.type - 1]++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.packetsRead[p->header.type - 1]++;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
/* Free any empty packet buffers at the end of this packet */
|
||||
|
@ -316,22 +316,24 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
|
||||
|
||||
if (overq) {
|
||||
rxi_NeedMorePackets = TRUE;
|
||||
switch (class) {
|
||||
case RX_PACKET_CLASS_RECEIVE:
|
||||
rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND:
|
||||
rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SPECIAL:
|
||||
rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_RECV_CBUF:
|
||||
rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND_CBUF:
|
||||
rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
if (rx_stats_active) {
|
||||
switch (class) {
|
||||
case RX_PACKET_CLASS_RECEIVE:
|
||||
rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND:
|
||||
rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SPECIAL:
|
||||
rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_RECV_CBUF:
|
||||
rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND_CBUF:
|
||||
rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1125,28 +1127,31 @@ rxi_AllocPacketNoLock(int class)
|
||||
#ifdef KERNEL
|
||||
if (rxi_OverQuota(class)) {
|
||||
rxi_NeedMorePackets = TRUE;
|
||||
switch (class) {
|
||||
case RX_PACKET_CLASS_RECEIVE:
|
||||
rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND:
|
||||
rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SPECIAL:
|
||||
rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_RECV_CBUF:
|
||||
rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND_CBUF:
|
||||
rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
if (rx_stats_active) {
|
||||
switch (class) {
|
||||
case RX_PACKET_CLASS_RECEIVE:
|
||||
rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND:
|
||||
rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SPECIAL:
|
||||
rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_RECV_CBUF:
|
||||
rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND_CBUF:
|
||||
rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (struct rx_packet *)0;
|
||||
}
|
||||
#endif /* KERNEL */
|
||||
|
||||
rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
|
||||
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
|
||||
|
||||
#ifdef KERNEL
|
||||
@ -1182,28 +1187,31 @@ rxi_AllocPacketNoLock(int class)
|
||||
#ifdef KERNEL
|
||||
if (rxi_OverQuota(class)) {
|
||||
rxi_NeedMorePackets = TRUE;
|
||||
switch (class) {
|
||||
case RX_PACKET_CLASS_RECEIVE:
|
||||
rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND:
|
||||
rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SPECIAL:
|
||||
rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_RECV_CBUF:
|
||||
rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND_CBUF:
|
||||
rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
}
|
||||
if (rx_stats_active) {
|
||||
switch (class) {
|
||||
case RX_PACKET_CLASS_RECEIVE:
|
||||
rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND:
|
||||
rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SPECIAL:
|
||||
rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_RECV_CBUF:
|
||||
rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
case RX_PACKET_CLASS_SEND_CBUF:
|
||||
rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (struct rx_packet *)0;
|
||||
}
|
||||
#endif /* KERNEL */
|
||||
|
||||
rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
|
||||
|
||||
#ifdef KERNEL
|
||||
if (queue_IsEmpty(&rx_freePacketQueue))
|
||||
@ -1239,7 +1247,8 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
|
||||
|
||||
RX_TS_INFO_GET(rx_ts_info);
|
||||
|
||||
rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
|
||||
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
|
||||
MUTEX_ENTER(&rx_freePktQ_lock);
|
||||
|
||||
@ -1459,12 +1468,15 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
|
||||
p->length = (nbytes - RX_HEADER_SIZE);
|
||||
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
|
||||
if (nbytes < 0 && errno == EWOULDBLOCK) {
|
||||
rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
|
||||
} else if (nbytes <= 0) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.bogusPacketOnRead++;
|
||||
rx_stats.bogusHost = from.sin_addr.s_addr;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.bogusPacketOnRead++;
|
||||
rx_stats.bogusHost = from.sin_addr.s_addr;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
|
||||
ntohs(from.sin_port), nbytes));
|
||||
}
|
||||
@ -1494,7 +1506,8 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
|
||||
*port = from.sin_port;
|
||||
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
|
||||
struct rx_peer *peer;
|
||||
rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
|
||||
/*
|
||||
* Try to look up this peer structure. If it doesn't exist,
|
||||
* don't create a new one -
|
||||
@ -2038,6 +2051,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
|
||||
return ap;
|
||||
|
||||
/* Since its all int32s convert to network order with a loop. */
|
||||
if (rx_stats_active)
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
s = (afs_int32 *) & rx_stats;
|
||||
for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
|
||||
@ -2045,6 +2059,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
|
||||
|
||||
tl = ap->length;
|
||||
ap->length = sizeof(rx_stats);
|
||||
if (rx_stats_active)
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
|
||||
ap->length = tl;
|
||||
@ -2260,7 +2275,8 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
|
||||
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
|
||||
p->length + RX_HEADER_SIZE, istack)) != 0) {
|
||||
/* send failed, so let's hurry up the resend, eh? */
|
||||
rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
|
||||
p->retryTime = p->timeSent; /* resend it very soon */
|
||||
clock_Addmsec(&(p->retryTime),
|
||||
10 + (((afs_uint32) p->backoff) << 8));
|
||||
@ -2300,7 +2316,8 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
|
||||
}
|
||||
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
|
||||
#endif
|
||||
rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
|
||||
MUTEX_ENTER(&peer->peer_lock);
|
||||
hadd32(peer->bytesSent, p->length);
|
||||
MUTEX_EXIT(&peer->peer_lock);
|
||||
@ -2445,7 +2462,8 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
|
||||
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
|
||||
istack)) != 0) {
|
||||
/* send failed, so let's hurry up the resend, eh? */
|
||||
rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
|
||||
for (i = 0; i < len; i++) {
|
||||
p = list[i];
|
||||
p->retryTime = p->timeSent; /* resend it very soon */
|
||||
@ -2482,7 +2500,8 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
|
||||
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
|
||||
|
||||
#endif
|
||||
rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
|
||||
if (rx_stats_active)
|
||||
rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
|
||||
MUTEX_ENTER(&peer->peer_lock);
|
||||
hadd32(peer->bytesSent, p->length);
|
||||
MUTEX_EXIT(&peer->peer_lock);
|
||||
|
@ -26,6 +26,7 @@ extern int rx_Init(u_int port);
|
||||
extern int rx_InitHost(u_int host, u_int port);
|
||||
#ifdef AFS_NT40_ENV
|
||||
extern void rx_DebugOnOff(int on);
|
||||
extern void rx_StatsOnOff(int on);
|
||||
#endif
|
||||
#ifndef KERNEL
|
||||
extern void rxi_StartServerProcs(int nExistingProcs);
|
||||
|
@ -195,9 +195,11 @@ rxi_GetHostUDPSocket(u_int ahost, u_short port)
|
||||
if (!greedy)
|
||||
(osi_Msg "%s*WARNING* Unable to increase buffering on socket\n",
|
||||
name);
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.socketGreedy = greedy;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
if (rx_stats_active) {
|
||||
MUTEX_ENTER(&rx_stats_mutex);
|
||||
rx_stats.socketGreedy = greedy;
|
||||
MUTEX_EXIT(&rx_stats_mutex);
|
||||
}
|
||||
}
|
||||
#endif /* AFS_DJGPP_ENV */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user