tservice = rxi_AllocService();
NETPRI;
+
+#ifdef RX_ENABLE_LOCKS
+ MUTEX_INIT(&tservice->svc_data_lock, "svc data lock", MUTEX_DEFAULT, 0);
+#endif
+
for (i = 0; i < RX_MAX_SERVICES; i++) {
struct rx_service *service = rx_services[i];
if (service) {
service->connDeadTime = rx_connDeadTime;
service->executeRequestProc = serviceProc;
service->checkReach = 0;
+ service->nSpecific = 0;
+ service->specific = NULL;
rx_services[i] = service; /* not visible until now */
USERPRI;
return service;
}
void
-rxi_SetPeerMtu(afs_uint32 host, afs_uint32 port, int mtu)
+rxi_SetPeerMtu(struct rx_peer *peer, afs_uint32 host, afs_uint32 port, int mtu)
{
- struct rx_peer **peer_ptr, **peer_end;
- struct rx_peer *peer = NULL;
+ struct rx_peer **peer_ptr = NULL, **peer_end = NULL;
+ struct rx_peer *next = NULL;
int hashIndex;
- MUTEX_ENTER(&rx_peerHashTable_lock);
- if (port == 0) {
- for (peer_ptr = &rx_peerHashTable[0], peer_end =
- &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
- peer_ptr++) {
- struct rx_peer *next;
- for (peer = *peer_ptr; peer; peer = next) {
- next = peer->next;
- if (host == peer->host)
- break;
- }
- }
+ if (!peer) {
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ if (port == 0) {
+ peer_ptr = &rx_peerHashTable[0];
+ peer_end = &rx_peerHashTable[rx_hashTableSize];
+ next = NULL;
+ resume:
+ for ( ; peer_ptr < peer_end; peer_ptr++) {
+ if (!peer)
+ peer = *peer_ptr;
+ for ( ; peer; peer = next) {
+ next = peer->next;
+ if (host == peer->host)
+ break;
+ }
+ }
+ } else {
+ hashIndex = PEER_HASH(host, port);
+ for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
+ if ((peer->host == host) && (peer->port == port))
+ break;
+ }
+ }
} else {
- hashIndex = PEER_HASH(host, port);
- for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
- if ((peer->host == host) && (peer->port == port))
- break;
- }
+ MUTEX_ENTER(&rx_peerHashTable_lock);
}
if (peer) {
MUTEX_EXIT(&rx_peerHashTable_lock);
MUTEX_ENTER(&peer->peer_lock);
+ /* We don't handle dropping below min, so don't */
+ mtu = MAX(mtu, RX_MIN_PACKET_SIZE);
peer->ifMTU=MIN(mtu, peer->ifMTU);
peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ /* if we tweaked this down, need to tune our peer MTU too */
+ peer->MTU = MIN(peer->MTU, peer->natMTU);
+ /* if we discovered a sub-1500 mtu, degrade */
+ if (peer->ifMTU < OLD_MAX_PACKET_SIZE)
+ peer->maxDgramPackets = 1;
+ /* We no longer have valid peer packet information */
+ if (peer->maxPacketSize-RX_IPUDP_SIZE > peer->ifMTU)
+ peer->maxPacketSize = 0;
MUTEX_EXIT(&peer->peer_lock);
MUTEX_ENTER(&rx_peerHashTable_lock);
- peer->refCount++;
+ peer->refCount--;
+ if (host && !port) {
+ peer = next;
+ /* pick up where we left off */
+ goto resume;
+ }
}
MUTEX_EXIT(&rx_peerHashTable_lock);
}
int newAckCount = 0;
u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
+ int pktsize = 0; /* Set if we need to update the peer mtu */
if (rx_stats_active)
rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
if (ap->reason == RX_ACK_PING_RESPONSE)
rxi_UpdatePeerReach(conn, call);
+ if (conn->lastPacketSizeSeq) {
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if (first >= conn->lastPacketSizeSeq) {
+ pktsize = conn->lastPacketSize;
+ conn->lastPacketSize = conn->lastPacketSizeSeq = 0;
+ }
+ MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_ENTER(&peer->peer_lock);
+ /* start somewhere */
+ if (!peer->maxPacketSize)
+ peer->maxPacketSize = np->length+RX_IPUDP_SIZE;
+
+ if (pktsize > peer->maxPacketSize) {
+ peer->maxPacketSize = pktsize;
+ if ((pktsize-RX_IPUDP_SIZE > peer->ifMTU)) {
+ peer->ifMTU=pktsize-RX_IPUDP_SIZE;
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ }
+ }
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+
#ifdef RXDEBUG
#ifdef AFS_NT40_ENV
if (rxdebug_active) {
}
call->MTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE;
} else if (call->MTU < peer->maxMTU) {
- call->MTU += peer->natMTU;
- call->MTU = MIN(call->MTU, peer->maxMTU);
+ /* don't upgrade if we can't handle it */
+ if ((call->nDgramPackets == 1) && (call->MTU >= peer->ifMTU))
+ call->MTU = peer->ifMTU;
+ else {
+ call->MTU += peer->natMTU;
+ call->MTU = MIN(call->MTU, peer->maxMTU);
+ }
}
call->nAcks = 0;
}
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* idle connections) */
- call->lastSendData = conn->lastSendTime = call->lastSendTime = clock_Sec();
+ conn->lastSendTime = call->lastSendTime = clock_Sec();
+ /* Let a set of retransmits trigger an idle timeout */
+ if (!resending)
+ call->lastSendData = call->lastSendTime;
}
/* When sending packets we need to follow these rules:
* processing), and for the connection (so that we can discover
* idle connections) */
conn->lastSendTime = call->lastSendTime = clock_Sec();
- /* Don't count keepalives here, so idleness can be tracked. */
- if ((p->header.type != RX_PACKET_TYPE_ACK) || (((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING))
+ /* Don't count keepalive ping/acks here, so idleness can be tracked. */
+ if ((p->header.type != RX_PACKET_TYPE_ACK) ||
+ ((((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING) &&
+ (((struct rx_ackPacket *)rx_DataOf(p))->reason !=
+ RX_ACK_PING_RESPONSE)))
call->lastSendData = call->lastSendTime;
}
struct rx_connection *conn = call->conn;
afs_uint32 now;
afs_uint32 deadTime;
+ int cerror = 0;
+ int newmtu = 0;
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (call->flags & RX_CALL_TQ_BUSY) {
);
if (ire && ire->ire_max_frag > 0)
- rxi_SetPeerMtu(call->conn->peer->host, 0, ire->ire_max_frag);
+ rxi_SetPeerMtu(NULL, call->conn->peer->host, 0,
+ ire->ire_max_frag);
#if defined(GLOBAL_NETSTACKID)
netstack_rele(ns);
#endif
#endif
#endif /* ADAPT_PMTU */
- rxi_CallError(call, RX_CALL_DEAD);
- return -1;
+ cerror = RX_CALL_DEAD;
+ goto mtuout;
} else {
#ifdef RX_ENABLE_LOCKS
/* Cancel pending events */
return -1;
}
return 0;
+mtuout:
+ if (call->conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT) {
+ /* if we never succeeded, let the error pass out as-is */
+ if (call->conn->peer->maxPacketSize)
+ cerror = call->conn->msgsizeRetryErr;
+
+ /* if we thought we could send more, perhaps things got worse */
+ if (call->conn->peer->maxPacketSize > conn->lastPacketSize)
+ /* maxpacketsize will be cleared in rxi_SetPeerMtu */
+ newmtu = MAX(call->conn->peer->maxPacketSize-RX_IPUDP_SIZE,
+ conn->lastPacketSize-(128+RX_IPUDP_SIZE));
+ else
+ newmtu = conn->lastPacketSize-(128+RX_IPUDP_SIZE);
+
+ /* minimum capped in SetPeerMtu */
+ rxi_SetPeerMtu(call->conn->peer, 0, 0, newmtu);
+
+ /* clean up */
+ conn->lastPacketSize = 0;
+
+ /* needed so ResetCall doesn't clobber us. */
+ call->MTU = call->conn->peer->ifMTU;
+ }
+ rxi_CallError(call, cerror);
+ return -1;
}
void
MUTEX_EXIT(&conn->conn_data_lock);
}
+void
+rx_SetServiceSpecific(struct rx_service *svc, int key, void *ptr)
+{
+ int i;
+ MUTEX_ENTER(&svc->svc_data_lock);
+ if (!svc->specific) {
+ svc->specific = (void **)malloc((key + 1) * sizeof(void *));
+ for (i = 0; i < key; i++)
+ svc->specific[i] = NULL;
+ svc->nSpecific = key + 1;
+ svc->specific[key] = ptr;
+ } else if (key >= svc->nSpecific) {
+ svc->specific = (void **)
+ realloc(svc->specific, (key + 1) * sizeof(void *));
+ for (i = svc->nSpecific; i < key; i++)
+ svc->specific[i] = NULL;
+ svc->nSpecific = key + 1;
+ svc->specific[key] = ptr;
+ } else {
+ if (svc->specific[key] && rxi_keyCreate_destructor[key])
+ (*rxi_keyCreate_destructor[key]) (svc->specific[key]);
+ svc->specific[key] = ptr;
+ }
+ MUTEX_EXIT(&svc->svc_data_lock);
+}
+
void *
rx_GetSpecific(struct rx_connection *conn, int key)
{
return ptr;
}
+void *
+rx_GetServiceSpecific(struct rx_service *svc, int key)
+{
+ void *ptr;
+ MUTEX_ENTER(&svc->svc_data_lock);
+ if (key >= svc->nSpecific)
+ ptr = NULL;
+ else
+ ptr = svc->specific[key];
+ MUTEX_EXIT(&svc->svc_data_lock);
+ return ptr;
+}
+
+
#endif /* !KERNEL */
/*