2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* rx_user.c contains routines specific to the user space UNIX implementation of rx */
12 /* rxi_syscall is currently not prototyped */
14 #include <afsconfig.h>
15 #include <afs/param.h>
18 # include <sys/types.h>
23 # include <WINNT/syscfg.h>
25 # include <sys/socket.h>
26 # include <sys/file.h>
28 # include <sys/stat.h>
29 # include <netinet/in.h>
30 # include <sys/time.h>
32 # include <sys/ioctl.h>
36 #if !defined(AFS_AIX_ENV) && !defined(AFS_NT40_ENV)
37 # include <sys/syscall.h>
39 #include <afs/afs_args.h>
40 #include <afs/afsutil.h>
42 #ifndef IPPORT_USERRESERVED
43 /* If in.h doesn't define this, define it anyway. Unfortunately, defining
44 this doesn't put the code into the kernel to restrict kernel assigned
45 port numbers to numbers below IPPORT_USERRESERVED... */
46 #define IPPORT_USERRESERVED 5000
49 #if defined(HAVE_LINUX_ERRQUEUE_H) && defined(ADAPT_PMTU)
50 #include <linux/types.h>
51 #include <linux/errqueue.h>
58 # include <sys/time.h>
61 #include "rx_atomic.h"
62 #include "rx_globals.h"
64 #ifdef AFS_PTHREAD_ENV
67 * The rx_if_init_mutex mutex protects the following global variables:
71 afs_kmutex_t rx_if_init_mutex;
72 #define LOCK_IF_INIT MUTEX_ENTER(&rx_if_init_mutex)
73 #define UNLOCK_IF_INIT MUTEX_EXIT(&rx_if_init_mutex)
76 * The rx_if_mutex mutex protects the following global variables:
82 afs_kmutex_t rx_if_mutex;
83 #define LOCK_IF MUTEX_ENTER(&rx_if_mutex)
84 #define UNLOCK_IF MUTEX_EXIT(&rx_if_mutex)
87 #define UNLOCK_IF_INIT
90 #endif /* AFS_PTHREAD_ENV */
94 * Make a socket for receiving/sending IP packets. Set it into non-blocking
95 * and large buffering modes. If port isn't specified, the kernel will pick
96 * one. Returns the socket (>= 0) on success. Returns OSI_NULLSOCKET on
97 * failure. Port must be in network byte order.
100 rxi_GetHostUDPSocket(u_int ahost, u_short port)
103 osi_socket socketFd = OSI_NULLSOCKET;
104 struct sockaddr_in taddr;
105 char *name = "rxi_GetUDPSocket: ";
106 #ifdef AFS_LINUX22_ENV
107 #if defined(ADAPT_PMTU)
108 int pmtu=IP_PMTUDISC_WANT;
111 int pmtu=IP_PMTUDISC_DONT;
115 #if !defined(AFS_NT40_ENV)
116 if (ntohs(port) >= IPPORT_RESERVED && ntohs(port) < IPPORT_USERRESERVED) {
117 /* (osi_Msg "%s*WARNING* port number %d is not a reserved port number. Use port numbers above %d\n", name, port, IPPORT_USERRESERVED);
120 if (ntohs(port) > 0 && ntohs(port) < IPPORT_RESERVED && geteuid() != 0) {
122 "%sport number %d is a reserved port number which may only be used by root. Use port numbers above %d\n",
123 name, ntohs(port), IPPORT_USERRESERVED);
127 socketFd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
129 if (socketFd == OSI_NULLSOCKET) {
131 fprintf(stderr, "socket() failed with error %u\n", WSAGetLastError());
139 rxi_xmit_init(socketFd);
140 #endif /* AFS_NT40_ENV */
142 taddr.sin_addr.s_addr = ahost;
143 taddr.sin_family = AF_INET;
144 taddr.sin_port = (u_short) port;
145 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
146 taddr.sin_len = sizeof(struct sockaddr_in);
148 #define MAX_RX_BINDS 10
149 for (binds = 0; binds < MAX_RX_BINDS; binds++) {
152 code = bind(socketFd, (struct sockaddr *)&taddr, sizeof(taddr));
156 (osi_Msg "%sbind failed\n", name);
159 #if !defined(AFS_NT40_ENV)
161 * Set close-on-exec on rx socket
163 fcntl(socketFd, F_SETFD, 1);
166 /* Use one of three different ways of getting a socket buffer expanded to
174 len2 = rx_UdpBufSize;
176 /* find the size closest to rx_UdpBufSize that will be accepted */
177 while (!greedy && len2 > len1) {
180 (socketFd, SOL_SOCKET, SO_RCVBUF, (char *)&len2,
186 /* but do not let it get smaller than 32K */
196 (socketFd, SOL_SOCKET, SO_SNDBUF, (char *)&len1,
200 (socketFd, SOL_SOCKET, SO_RCVBUF, (char *)&len2,
203 (osi_Msg "%s*WARNING* Unable to increase buffering on socket\n",
206 rx_atomic_set(&rx_stats.socketGreedy, greedy);
209 #ifdef AFS_LINUX22_ENV
210 setsockopt(socketFd, SOL_IP, IP_MTU_DISCOVER, &pmtu, sizeof(pmtu));
211 #if defined(ADAPT_PMTU)
212 setsockopt(socketFd, SOL_IP, IP_RECVERR, &recverr, sizeof(recverr));
215 if (rxi_Listen(socketFd) < 0) {
223 if (socketFd != OSI_NULLSOCKET)
224 closesocket(socketFd);
226 if (socketFd != OSI_NULLSOCKET)
230 return OSI_NULLSOCKET;
234 rxi_GetUDPSocket(u_short port)
236 return rxi_GetHostUDPSocket(htonl(INADDR_ANY), port);
240 osi_Panic(char *msg, ...)
244 (osi_Msg "Fatal Rx error: ");
253 * osi_AssertFailU() -- used by the osi_Assert() macro.
257 osi_AssertFailU(const char *expr, const char *file, int line)
259 osi_Panic("assertion failed: %s, file: %s, line: %d\n", expr,
263 #if defined(AFS_AIX32_ENV) && !defined(KERNEL)
265 static const char memZero;
267 osi_Alloc(afs_int32 x)
270 * 0-length allocs may return NULL ptr from malloc, so we special-case
271 * things so that NULL returned iff an error occurred
274 return (void *)&memZero;
279 osi_Free(void *x, afs_int32 size)
286 #endif /* defined(AFS_AIX32_ENV) && !defined(KERNEL) */
288 #define ADDRSPERSITE 16
291 static afs_uint32 rxi_NetAddrs[ADDRSPERSITE]; /* host order */
292 static int myNetMTUs[ADDRSPERSITE];
293 static int myNetMasks[ADDRSPERSITE];
294 static int myNetFlags[ADDRSPERSITE];
295 static u_int rxi_numNetAddrs;
296 static int Inited = 0;
298 #if defined(AFS_NT40_ENV)
302 /* The IP address list can change so we must query for it */
305 /* we don't want to use the loopback adapter which is first */
306 /* this is a bad bad hack */
307 if (rxi_numNetAddrs > 1)
308 return htonl(rxi_NetAddrs[1]);
309 else if (rxi_numNetAddrs > 0)
310 return htonl(rxi_NetAddrs[0]);
316 ** return number of addresses
317 ** and the addresses themselves in the buffer
318 ** maxSize - max number of interfaces to return.
321 rx_getAllAddr(afs_uint32 * buffer, int maxSize)
323 int count = 0, offset = 0;
325 /* The IP address list can change so we must query for it */
328 for (count = 0; offset < rxi_numNetAddrs && maxSize > 0;
329 count++, offset++, maxSize--)
330 buffer[count] = htonl(rxi_NetAddrs[offset]);
335 /* this function returns the total number of interface addresses
336 * the buffer has to be passed in by the caller. It also returns
337 * the matching interface mask and mtu. All values are returned
338 * in network byte order.
341 rx_getAllAddrMaskMtu(afs_uint32 addrBuffer[], afs_uint32 maskBuffer[],
342 afs_uint32 mtuBuffer[], int maxSize)
344 int count = 0, offset = 0;
346 /* The IP address list can change so we must query for it */
350 offset < rxi_numNetAddrs && maxSize > 0;
351 count++, offset++, maxSize--) {
352 addrBuffer[count] = htonl(rxi_NetAddrs[offset]);
353 maskBuffer[count] = htonl(myNetMasks[offset]);
354 mtuBuffer[count] = htonl(myNetMTUs[offset]);
361 extern int rxinit_status;
363 rxi_InitMorePackets(void) {
364 int npackets, ncbufs;
366 ncbufs = (rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE);
368 ncbufs = ncbufs / RX_CBUFFERSIZE;
369 npackets = rx_initSendWindow - 1;
370 rxi_MorePackets(npackets * (ncbufs + 1));
382 if (Inited < 2 && rxinit_status == 0) {
383 /* We couldn't initialize more packets earlier.
385 rxi_InitMorePackets();
395 rxi_numNetAddrs = ADDRSPERSITE;
396 (void)syscfg_GetIFInfo(&rxi_numNetAddrs, rxi_NetAddrs,
397 myNetMasks, myNetMTUs, myNetFlags);
399 for (i = 0; i < rxi_numNetAddrs; i++) {
400 rxsize = rxi_AdjustIfMTU(myNetMTUs[i] - RX_IPUDP_SIZE);
402 rxi_nRecvFrags * rxsize + (rxi_nRecvFrags - 1) * UDP_HDR_SIZE;
403 maxsize = rxi_AdjustMaxMTU(rxsize, maxsize);
404 if (rx_maxReceiveSize > maxsize) {
405 rx_maxReceiveSize = MIN(RX_MAX_PACKET_SIZE, maxsize);
407 MIN(rx_maxReceiveSize, rx_maxReceiveSizeUser);
409 if (rx_MyMaxSendSize > maxsize) {
410 rx_MyMaxSendSize = MIN(RX_MAX_PACKET_SIZE, maxsize);
416 * If rxinit_status is still set, rx_InitHost() has yet to be called
417 * and we therefore do not have any mutex locks initialized. As a
418 * result we cannot call rxi_MorePackets() without crashing.
423 rxi_InitMorePackets();
428 fudge_netmask(afs_uint32 addr)
434 else if (IN_CLASSB(addr))
436 else if (IN_CLASSC(addr))
446 #if !defined(AFS_AIX_ENV) && !defined(AFS_NT40_ENV) && !defined(AFS_LINUX20_ENV)
448 rxi_syscall(afs_uint32 a3, afs_uint32 a4, void *a5)
453 old = signal(SIGSYS, SIG_IGN);
455 #if defined(AFS_SGI_ENV)
456 rcode = afs_syscall(AFS_SYSCALL, 28, a3, a4, a5);
458 rcode = syscall(AFS_SYSCALL, 28 /* AFSCALL_CALL */ , a3, a4, a5);
459 #endif /* AFS_SGI_ENV */
465 #endif /* AFS_AIX_ENV */
474 struct ifreq ifs[ADDRSPERSITE];
477 char buf[BUFSIZ], *cp, *cplim;
479 struct sockaddr_in *a;
490 memset(rxi_NetAddrs, 0, sizeof(rxi_NetAddrs));
491 memset(myNetFlags, 0, sizeof(myNetFlags));
492 memset(myNetMTUs, 0, sizeof(myNetMTUs));
493 memset(myNetMasks, 0, sizeof(myNetMasks));
495 s = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
496 if (s == OSI_NULLSOCKET)
499 ifc.ifc_len = sizeof(buf);
503 ifc.ifc_len = sizeof(ifs);
504 ifc.ifc_buf = (caddr_t) & ifs[0];
505 memset(&ifs[0], 0, sizeof(ifs));
507 res = ioctl(s, SIOCGIFCONF, &ifc);
509 /* fputs(stderr, "ioctl error IFCONF\n"); */
516 #define size(p) MAX((p).sa_len, sizeof(p))
517 cplim = buf + ifc.ifc_len; /*skip over if's with big ifr_addr's */
518 for (cp = buf; cp < cplim;
519 cp += sizeof(ifr->ifr_name) + MAX(a->sin_len, sizeof(*a))) {
520 if (rxi_numNetAddrs >= ADDRSPERSITE)
523 ifr = (struct ifreq *)cp;
525 len = ifc.ifc_len / sizeof(struct ifreq);
526 if (len > ADDRSPERSITE)
529 for (i = 0; i < len; ++i) {
531 res = ioctl(s, SIOCGIFADDR, ifr);
534 /* fputs(stderr, "ioctl error IFADDR\n");
535 * perror(ifr->ifr_name); */
538 a = (struct sockaddr_in *)&ifr->ifr_addr;
539 if (a->sin_family != AF_INET)
541 rxi_NetAddrs[rxi_numNetAddrs] = ntohl(a->sin_addr.s_addr);
542 if (rx_IsLoopbackAddr(rxi_NetAddrs[rxi_numNetAddrs])) {
543 /* we don't really care about "localhost" */
546 for (j = 0; j < rxi_numNetAddrs; j++) {
547 if (rxi_NetAddrs[j] == rxi_NetAddrs[rxi_numNetAddrs])
550 if (j < rxi_numNetAddrs)
553 /* fprintf(stderr, "if %s addr=%x\n", ifr->ifr_name,
554 * rxi_NetAddrs[rxi_numNetAddrs]); */
557 res = ioctl(s, SIOCGIFFLAGS, ifr);
559 myNetFlags[rxi_numNetAddrs] = ifr->ifr_flags;
561 /* Handle aliased loopbacks as well. */
562 if (ifr->ifr_flags & IFF_LOOPBACK)
565 /* fprintf(stderr, "if %s flags=%x\n",
566 * ifr->ifr_name, ifr->ifr_flags); */
568 * fputs(stderr, "ioctl error IFFLAGS\n");
569 * perror(ifr->ifr_name); */
571 #endif /* SIOCGIFFLAGS */
573 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV)
574 /* this won't run on an AIX system w/o a cache manager */
575 rxi_syscallp = rxi_syscall;
578 /* If I refer to kernel extensions that aren't loaded on AIX, the
579 * program refuses to load and run, so I simply can't include the
580 * following code. Fortunately, AIX is the one operating system in
581 * which the subsequent ioctl works reliably. */
583 if ((*rxi_syscallp) (20 /*AFSOP_GETMTU */ ,
584 htonl(rxi_NetAddrs[rxi_numNetAddrs]),
585 &(myNetMTUs[rxi_numNetAddrs]))) {
586 /* fputs(stderr, "syscall error GETMTU\n");
587 * perror(ifr->ifr_name); */
588 myNetMTUs[rxi_numNetAddrs] = 0;
590 if ((*rxi_syscallp) (42 /*AFSOP_GETMASK */ ,
591 htonl(rxi_NetAddrs[rxi_numNetAddrs]),
592 &(myNetMasks[rxi_numNetAddrs]))) {
593 /* fputs(stderr, "syscall error GETMASK\n");
594 * perror(ifr->ifr_name); */
595 myNetMasks[rxi_numNetAddrs] = 0;
597 myNetMasks[rxi_numNetAddrs] =
598 ntohl(myNetMasks[rxi_numNetAddrs]);
599 /* fprintf(stderr, "if %s mask=0x%x\n",
600 * ifr->ifr_name, myNetMasks[rxi_numNetAddrs]); */
603 if (myNetMTUs[rxi_numNetAddrs] == 0) {
604 myNetMTUs[rxi_numNetAddrs] = OLD_MAX_PACKET_SIZE + RX_IPUDP_SIZE;
606 res = ioctl(s, SIOCGIFMTU, ifr);
607 if ((res == 0) && (ifr->ifr_metric > 128)) { /* sanity check */
608 myNetMTUs[rxi_numNetAddrs] = ifr->ifr_metric;
609 /* fprintf(stderr, "if %s mtu=%d\n",
610 * ifr->ifr_name, ifr->ifr_metric); */
612 /* fputs(stderr, "ioctl error IFMTU\n");
613 * perror(ifr->ifr_name); */
618 if (myNetMasks[rxi_numNetAddrs] == 0) {
619 myNetMasks[rxi_numNetAddrs] =
620 fudge_netmask(rxi_NetAddrs[rxi_numNetAddrs]);
621 #ifdef SIOCGIFNETMASK
622 res = ioctl(s, SIOCGIFNETMASK, ifr);
624 a = (struct sockaddr_in *)&ifr->ifr_addr;
625 myNetMasks[rxi_numNetAddrs] = ntohl(a->sin_addr.s_addr);
626 /* fprintf(stderr, "if %s subnetmask=0x%x\n",
627 * ifr->ifr_name, myNetMasks[rxi_numNetAddrs]); */
629 /* fputs(stderr, "ioctl error IFMASK\n");
630 * perror(ifr->ifr_name); */
635 if (!rx_IsLoopbackAddr(rxi_NetAddrs[rxi_numNetAddrs])) { /* ignore lo0 */
638 rxi_nRecvFrags * (myNetMTUs[rxi_numNetAddrs] - RX_IP_SIZE);
639 maxsize -= UDP_HDR_SIZE; /* only the first frag has a UDP hdr */
640 if (rx_maxReceiveSize < maxsize)
641 rx_maxReceiveSize = MIN(RX_MAX_PACKET_SIZE, maxsize);
648 /* have to allocate at least enough to allow a single packet to reach its
649 * maximum size, so ReadPacket will work. Allocate enough for a couple
650 * of packets to do so, for good measure */
652 int npackets, ncbufs;
654 rx_maxJumboRecvSize =
655 RX_HEADER_SIZE + rxi_nDgramPackets * RX_JUMBOBUFFERSIZE +
656 (rxi_nDgramPackets - 1) * RX_JUMBOHEADERSIZE;
657 rx_maxJumboRecvSize = MAX(rx_maxJumboRecvSize, rx_maxReceiveSize);
658 ncbufs = (rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE);
660 ncbufs = ncbufs / RX_CBUFFERSIZE;
661 npackets = rx_initSendWindow - 1;
662 rxi_MorePackets(npackets * (ncbufs + 1));
666 #endif /* AFS_NT40_ENV */
668 /* Called from rxi_FindPeer, when initializing a clear rx_peer structure,
669 * to get interesting information.
670 * Curiously enough, the rx_peerHashTable_lock currently protects the
671 * Inited variable (and hence rx_GetIFInfo). When the fs suite uses
672 * pthreads, this issue will need to be revisited.
676 rxi_InitPeerParams(struct rx_peer *pp)
681 #if defined(ADAPT_PMTU) && defined(IP_MTU)
683 struct sockaddr_in addr;
692 * there's a race here since more than one thread could call
693 * rx_GetIFInfo. The race stops in rx_GetIFInfo.
701 /* try to second-guess IP, and identify which link is most likely to
702 * be used for traffic to/from this host. */
703 ppaddr = ntohl(pp->host);
707 pp->rateFlag = 2; /* start timing after two full packets */
708 /* I don't initialize these, because I presume they are bzero'd...
709 * pp->burstSize pp->burst pp->burstWait.sec pp->burstWait.usec
710 * pp->timeout.usec */
713 for (ix = 0; ix < rxi_numNetAddrs; ++ix) {
714 if ((rxi_NetAddrs[ix] & myNetMasks[ix]) == (ppaddr & myNetMasks[ix])) {
715 #ifdef IFF_POINTOPOINT
716 if (myNetFlags[ix] & IFF_POINTOPOINT)
718 #endif /* IFF_POINTOPOINT */
719 rxmtu = myNetMTUs[ix] - RX_IPUDP_SIZE;
720 if (rxmtu < RX_MIN_PACKET_SIZE)
721 rxmtu = RX_MIN_PACKET_SIZE;
722 if (pp->ifMTU < rxmtu)
723 pp->ifMTU = MIN(rx_MyMaxSendSize, rxmtu);
727 if (!pp->ifMTU) { /* not local */
729 pp->ifMTU = MIN(rx_MyMaxSendSize, RX_REMOTE_PACKET_SIZE);
731 #else /* ADAPT_MTU */
732 pp->rateFlag = 2; /* start timing after two full packets */
734 pp->ifMTU = MIN(rx_MyMaxSendSize, OLD_MAX_PACKET_SIZE);
735 #endif /* ADAPT_MTU */
736 #if defined(ADAPT_PMTU) && defined(IP_MTU)
737 sock=socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
738 if (sock != OSI_NULLSOCKET) {
739 addr.sin_family = AF_INET;
740 addr.sin_addr.s_addr = pp->host;
741 addr.sin_port = pp->port;
742 if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) == 0) {
744 socklen_t s = sizeof(mtu);
745 if (getsockopt(sock, SOL_IP, IP_MTU, &mtu, &s)== 0) {
746 pp->ifMTU = MIN(mtu - RX_IPUDP_SIZE, pp->ifMTU);
756 pp->ifMTU = rxi_AdjustIfMTU(pp->ifMTU);
757 pp->maxMTU = OLD_MAX_PACKET_SIZE; /* for compatibility with old guys */
758 pp->natMTU = MIN((int)pp->ifMTU, OLD_MAX_PACKET_SIZE);
759 pp->maxDgramPackets =
760 MIN(rxi_nDgramPackets,
761 rxi_AdjustDgramPackets(rxi_nSendFrags, pp->ifMTU));
763 MIN(rxi_nDgramPackets,
764 rxi_AdjustDgramPackets(rxi_nSendFrags, pp->ifMTU));
765 pp->maxDgramPackets = 1;
766 /* Initialize slow start parameters */
767 pp->MTU = MIN(pp->natMTU, pp->maxMTU);
769 pp->nDgramPackets = 1;
773 /* Don't expose jumobgram internals. */
777 rx_maxReceiveSize = OLD_MAX_PACKET_SIZE;
778 rxi_nSendFrags = rxi_nRecvFrags = 1;
781 /* Override max MTU. If rx_SetNoJumbo is called, it must be
782 called before calling rx_SetMaxMTU since SetNoJumbo clobbers rx_maxReceiveSize */
784 rx_SetMaxMTU(int mtu)
786 rx_MyMaxSendSize = rx_maxReceiveSizeUser = rx_maxReceiveSize = mtu;
789 #if defined(ADAPT_PMTU)
791 rxi_HandleSocketError(int socket)
794 #if defined(HAVE_LINUX_ERRQUEUE_H)
796 struct cmsghdr *cmsg;
797 struct sock_extended_err *err;
798 struct sockaddr_in addr;
799 char controlmsgbuf[256];
802 msg.msg_name = &addr;
803 msg.msg_namelen = sizeof(addr);
806 msg.msg_control = controlmsgbuf;
807 msg.msg_controllen = 256;
809 code = recvmsg(socket, &msg, MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC);
811 if (code < 0 || !(msg.msg_flags & MSG_ERRQUEUE))
814 for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
815 if ((char *)cmsg - controlmsgbuf > msg.msg_controllen - CMSG_SPACE(0) ||
816 (char *)cmsg - controlmsgbuf > msg.msg_controllen - CMSG_SPACE(cmsg->cmsg_len) ||
817 cmsg->cmsg_len == 0) {
821 if (cmsg->cmsg_level == SOL_IP && cmsg->cmsg_type == IP_RECVERR)
827 err =(struct sock_extended_err *) CMSG_DATA(cmsg);
829 if (err->ee_errno == EMSGSIZE && err->ee_info >= 68) {
830 rxi_SetPeerMtu(NULL, addr.sin_addr.s_addr, addr.sin_port,
831 err->ee_info - RX_IPUDP_SIZE);
833 /* other DEST_UNREACH's and TIME_EXCEEDED should be dealt with too */