0);
CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
0);
- RWLOCK_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
+ MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
0);
- RWLOCK_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
+ MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
0);
MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
0);
CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
0);
- RWLOCK_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
+ MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
0);
- RWLOCK_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
+ MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
0);
MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
#if defined(AFS_HPUX110_ENV)
rx_SetEpoch(tv.tv_sec); /* Start time of this package, rxkad
* will provide a randomer value. */
#endif
- rx_MutexAdd(rxi_dataQuota, rx_extraQuota, rx_stats_mutex); /* + extra pkts caller asked to rsrv */
+ rx_MutexAdd(rxi_dataQuota, rx_extraQuota, rx_stats_mutex); /* + extra pkts caller asked to rsrv */
/* *Slightly* random start time for the cid. This is just to help
* out with the hashing function at the peer */
rx_nextCid = ((tv.tv_sec ^ tv.tv_usec) << RX_CIDSHIFT);
dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
NETPRI;
- RWLOCK_WRLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
/*
* allocate the connection and all of its clones.
rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
}
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
}
* idle time to now. rxi_ReapConnections will reap it if it's still
* idle (refCount == 0) after rx_idlePeerTime (60 seconds) have passed.
*/
- RWLOCK_WRLOCK(&rx_peerHashTable_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
if (conn->peer->refCount < 2) {
conn->peer->idleWhen = clock_Sec();
if (conn->peer->refCount < 1) {
}
}
conn->peer->refCount--;
- RWLOCK_UNLOCK(&rx_peerHashTable_lock);
+ MUTEX_EXIT(&rx_peerHashTable_lock);
if (conn->type == RX_SERVER_CONNECTION)
rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
{
register struct rx_connection *tconn, *dtconn;
- RWLOCK_WRLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
/* destroy any clones that might exist */
if (!rx_IsClonedConn(conn)) {
/* conn should be at the head of the cleanup list */
if (conn == rx_connCleanup_list) {
rx_connCleanup_list = rx_connCleanup_list->next;
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
rxi_CleanupConnection(conn);
}
#ifdef RX_ENABLE_LOCKS
else {
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
}
#endif /* RX_ENABLE_LOCKS */
}
if (conn->refCount > 0)
conn->refCount--;
else {
- rx_MutexIncrement(rxi_lowConnRefCount, rx_stats_mutex);
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_lowConnRefCount++;
+ MUTEX_EXIT(&rx_stats_mutex);
}
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
if (havecalls) {
/* Don't destroy the connection if there are any call
* structures still in use */
- rx_MutexOr(conn->flags, RX_CONN_DESTROY_ME, conn->conn_data_lock);
+ rx_MutexOr(conn->flags, RX_CONN_DESTROY_ME, conn->conn_data_lock);
USERPRI;
return;
}
#else
osi_rxSleep(conn);
#endif
- rx_MutexDecrement(conn->makeCallWaiters, conn->conn_data_lock);
+ rx_MutexDecrement(conn->makeCallWaiters, conn->conn_data_lock);
} else {
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&conn->conn_data_lock);
}
/* search for next free call on this connection or
}
rxi_DeleteCachedConnections();
if (rx_connHashTable) {
- RWLOCK_WRLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
for (conn_ptr = &rx_connHashTable[0], conn_end =
&rx_connHashTable[rx_hashTableSize]; conn_ptr < conn_end;
conn_ptr++) {
struct rx_connection *conn;
conn = rx_connCleanup_list;
rx_connCleanup_list = rx_connCleanup_list->next;
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
rxi_CleanupConnection(conn);
- RWLOCK_WRLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
}
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
#endif /* RX_ENABLE_LOCKS */
}
rxi_flushtrace();
struct rx_peer **peer_ptr, **peer_end;
int hashIndex;
- RWLOCK_RDLOCK(&rx_peerHashTable_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
if (port == 0) {
for (peer_ptr = &rx_peerHashTable[0], peer_end =
&rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
}
}
}
- RWLOCK_UNLOCK(&rx_peerHashTable_lock);
+ MUTEX_EXIT(&rx_peerHashTable_lock);
}
/* Find the peer process represented by the supplied (host,port)
register struct rx_peer *pp;
int hashIndex;
hashIndex = PEER_HASH(host, port);
- RWLOCK_RDLOCK(&rx_peerHashTable_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) {
if ((pp->host == host) && (pp->port == port))
break;
MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
queue_Init(&pp->congestionQueue);
queue_Init(&pp->rpcStats);
- RWLOCK_UPLOCK(&rx_peerHashTable_lock);
pp->next = rx_peerHashTable[hashIndex];
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
}
if (origPeer)
origPeer->refCount--;
- RWLOCK_UNLOCK(&rx_peerHashTable_lock);
+ MUTEX_EXIT(&rx_peerHashTable_lock);
return pp;
}
int hashindex, flag, i;
register struct rx_connection *conn;
hashindex = CONN_HASH(host, port, cid, epoch, type);
- RWLOCK_RDLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
rxLastConn ? (conn = rxLastConn, flag = 0) : (conn =
rx_connHashTable[hashindex],
flag = 1);
* like this, and there seems to be some CM bug that makes this
* happen from time to time -- in which case, the fileserver
* asserts. */
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
return (struct rx_connection *)0;
}
if (pp->host == host && pp->port == port)
if (!conn) {
struct rx_service *service;
if (type == RX_CLIENT_CONNECTION) {
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
return (struct rx_connection *)0;
}
service = rxi_FindService(socket, serviceId);
if (!service || (securityIndex >= service->nSecurityObjects)
|| (service->securityObjects[securityIndex] == 0)) {
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
return (struct rx_connection *)0;
}
conn = rxi_AllocConnection(); /* This bzero's the connection */
MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
+ conn->next = rx_connHashTable[hashindex];
+ rx_connHashTable[hashindex] = conn;
conn->peer = rxi_FindPeer(host, port, 0, 1);
conn->type = RX_SERVER_CONNECTION;
conn->lastSendTime = clock_Sec(); /* don't GC immediately */
conn->epoch = epoch;
+ conn->cid = cid & RX_CIDMASK;
/* conn->serial = conn->lastSerial = 0; */
/* conn->timeout = 0; */
conn->ackRate = RX_FAST_ACK_RATE;
conn->twind[i] = rx_initSendWindow;
conn->rwind[i] = rx_initReceiveWindow;
}
- RWLOCK_UPLOCK(&rx_connHashTable_lock);
- conn->next = rx_connHashTable[hashindex];
- rx_connHashTable[hashindex] = conn;
- conn->cid = cid & RX_CIDMASK;
/* Notify security object of the new connection */
RXS_NewConnection(conn->securityObject, conn);
/* XXXX Connection timeout? */
rx_MutexIncrement(conn->refCount, conn->conn_data_lock);
rxLastConn = conn; /* store this connection as the last conn used */
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
return conn;
}
call->flags &= ~RX_CALL_WAIT_PROC;
if (queue_IsOnQueue(call)) {
queue_Remove(call);
- rx_MutexDecrement(rx_nWaiting, rx_stats_mutex);
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_nWaiting--;
+ MUTEX_EXIT(&rx_stats_mutex);
}
}
call->state = RX_STATE_ACTIVE;
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC) {
- rx_MutexDecrement(rx_nWaiting, rx_stats_mutex);
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_nWaiting--;
+ MUTEX_EXIT(&rx_stats_mutex);
}
}
MUTEX_EXIT(call->call_queue_lock);
{
struct rx_connection **conn_ptr, **conn_end;
int i, havecalls = 0;
- RWLOCK_WRLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
for (conn_ptr = &rx_connHashTable[0], conn_end =
&rx_connHashTable[rx_hashTableSize]; conn_ptr < conn_end;
conn_ptr++) {
struct rx_connection *conn;
conn = rx_connCleanup_list;
rx_connCleanup_list = rx_connCleanup_list->next;
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
rxi_CleanupConnection(conn);
- RWLOCK_WRLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
}
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
#endif /* RX_ENABLE_LOCKS */
}
struct rx_peer **peer_ptr, **peer_end;
int code;
MUTEX_ENTER(&rx_rpc_stats);
- RWLOCK_RDLOCK(&rx_peerHashTable_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
for (peer_ptr = &rx_peerHashTable[0], peer_end =
&rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
peer_ptr++) {
rxi_Free(rpc_stat, space);
rxi_rpc_peer_stat_cnt -= num_funcs;
}
+ rxi_FreePeer(peer);
rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
- RWLOCK_UPLOCK(&rx_peerHashTable_lock);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
} else
prev->next = next;
- rxi_FreePeer(peer);
} else {
if (code) {
MUTEX_EXIT(&peer->peer_lock);
}
}
}
- RWLOCK_UNLOCK(&rx_peerHashTable_lock);
+ MUTEX_EXIT(&rx_peerHashTable_lock);
MUTEX_EXIT(&rx_rpc_stats);
}
for (queue_Scan
(&peer->rpcStats, rpc_stat, nrpc_stat,
rx_interface_stat)) {
- int num_funcs;
+ unsigned int num_funcs;
if (!rpc_stat)
break;
queue_Remove(&rpc_stat->queue_header);
}
for (i = 0; i < rx_hashTableSize; i++) {
register struct rx_connection *tc, *ntc;
- RWLOCK_RDLOCK(&rx_connHashTable_lock);
+ MUTEX_ENTER(&rx_connHashTable_lock);
for (tc = rx_connHashTable[i]; tc; tc = ntc) {
ntc = tc->next;
for (j = 0; j < RX_MAXCALLS; j++) {
}
rxi_Free(tc, sizeof(*tc));
}
- RWLOCK_UNLOCK(&rx_connHashTable_lock);
+ MUTEX_EXIT(&rx_connHashTable_lock);
}
MUTEX_ENTER(&freeSQEList_lock);
MUTEX_EXIT(&freeSQEList_lock);
MUTEX_DESTROY(&freeSQEList_lock);
MUTEX_DESTROY(&rx_freeCallQueue_lock);
- RWLOCK_DESTROY(&rx_connHashTable_lock);
- RWLOCK_DESTROY(&rx_peerHashTable_lock);
+ MUTEX_DESTROY(&rx_connHashTable_lock);
+ MUTEX_DESTROY(&rx_peerHashTable_lock);
MUTEX_DESTROY(&rx_serverPool_lock);
osi_Free(rx_connHashTable,
rx_enable_stats = 0;
}
- RWLOCK_RDLOCK(&rx_peerHashTable_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
for (peer_ptr = &rx_peerHashTable[0], peer_end =
&rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
peer_ptr++) {
}
}
}
- RWLOCK_UNLOCK(&rx_peerHashTable_lock);
+ MUTEX_EXIT(&rx_peerHashTable_lock);
MUTEX_EXIT(&rx_rpc_stats);
}