2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* rx_user.c contains routines specific to the user space UNIX implementation of rx */
12 /* rxi_syscall is currently not prototyped */
14 #include <afsconfig.h>
15 #include <afs/param.h>
20 # include <WINNT/syscfg.h>
24 #if !defined(AFS_AIX_ENV) && !defined(AFS_NT40_ENV)
25 # include <sys/syscall.h>
27 #include <afs/afs_args.h>
28 #include <afs/afsutil.h>
30 #ifndef IPPORT_USERRESERVED
31 /* If in.h doesn't define this, define it anyway. Unfortunately, defining
32 this doesn't put the code into the kernel to restrict kernel assigned
33 port numbers to numbers below IPPORT_USERRESERVED... */
34 #define IPPORT_USERRESERVED 5000
37 #if defined(HAVE_LINUX_ERRQUEUE_H) && defined(ADAPT_PMTU)
38 #include <linux/types.h>
39 #include <linux/errqueue.h>
46 #include "rx_atomic.h"
47 #include "rx_globals.h"
49 #ifdef AFS_PTHREAD_ENV
52 * The rx_if_init_mutex mutex protects the following global variables:
56 afs_kmutex_t rx_if_init_mutex;
57 #define LOCK_IF_INIT MUTEX_ENTER(&rx_if_init_mutex)
58 #define UNLOCK_IF_INIT MUTEX_EXIT(&rx_if_init_mutex)
61 * The rx_if_mutex mutex protects the following global variables:
67 afs_kmutex_t rx_if_mutex;
68 #define LOCK_IF MUTEX_ENTER(&rx_if_mutex)
69 #define UNLOCK_IF MUTEX_EXIT(&rx_if_mutex)
72 #define UNLOCK_IF_INIT
75 #endif /* AFS_PTHREAD_ENV */
79 * Make a socket for receiving/sending IP packets. Set it into non-blocking
80 * and large buffering modes. If port isn't specified, the kernel will pick
81 * one. Returns the socket (>= 0) on success. Returns OSI_NULLSOCKET on
82 * failure. Port must be in network byte order.
85 rxi_GetHostUDPSocket(u_int ahost, u_short port)
88 osi_socket socketFd = OSI_NULLSOCKET;
89 struct sockaddr_in taddr;
90 char *name = "rxi_GetUDPSocket: ";
91 #ifdef AFS_LINUX22_ENV
92 #if defined(ADAPT_PMTU)
93 int pmtu=IP_PMTUDISC_WANT;
96 int pmtu=IP_PMTUDISC_DONT;
100 #if !defined(AFS_NT40_ENV)
101 if (ntohs(port) >= IPPORT_RESERVED && ntohs(port) < IPPORT_USERRESERVED) {
102 /* (osi_Msg "%s*WARNING* port number %d is not a reserved port number. Use port numbers above %d\n", name, port, IPPORT_USERRESERVED);
105 if (ntohs(port) > 0 && ntohs(port) < IPPORT_RESERVED && geteuid() != 0) {
107 "%sport number %d is a reserved port number which may only be used by root. Use port numbers above %d\n",
108 name, ntohs(port), IPPORT_USERRESERVED);
112 socketFd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
114 if (socketFd == OSI_NULLSOCKET) {
116 fprintf(stderr, "socket() failed with error %u\n", WSAGetLastError());
124 rxi_xmit_init(socketFd);
125 #endif /* AFS_NT40_ENV */
127 taddr.sin_addr.s_addr = ahost;
128 taddr.sin_family = AF_INET;
129 taddr.sin_port = (u_short) port;
130 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
131 taddr.sin_len = sizeof(struct sockaddr_in);
133 #define MAX_RX_BINDS 10
134 for (binds = 0; binds < MAX_RX_BINDS; binds++) {
137 code = bind(socketFd, (struct sockaddr *)&taddr, sizeof(taddr));
141 (osi_Msg "%sbind failed\n", name);
144 #if !defined(AFS_NT40_ENV)
146 * Set close-on-exec on rx socket
148 fcntl(socketFd, F_SETFD, 1);
151 /* Use one of three different ways of getting a socket buffer expanded to
159 len2 = rx_UdpBufSize;
161 /* find the size closest to rx_UdpBufSize that will be accepted */
162 while (!greedy && len2 > len1) {
165 (socketFd, SOL_SOCKET, SO_RCVBUF, (char *)&len2,
171 /* but do not let it get smaller than 32K */
181 (socketFd, SOL_SOCKET, SO_SNDBUF, (char *)&len1,
185 (socketFd, SOL_SOCKET, SO_RCVBUF, (char *)&len2,
188 (osi_Msg "%s*WARNING* Unable to increase buffering on socket\n",
191 rx_atomic_set(&rx_stats.socketGreedy, greedy);
194 #ifdef AFS_LINUX22_ENV
195 setsockopt(socketFd, SOL_IP, IP_MTU_DISCOVER, &pmtu, sizeof(pmtu));
196 #if defined(ADAPT_PMTU)
197 setsockopt(socketFd, SOL_IP, IP_RECVERR, &recverr, sizeof(recverr));
200 if (rxi_Listen(socketFd) < 0) {
208 if (socketFd != OSI_NULLSOCKET)
209 closesocket(socketFd);
211 if (socketFd != OSI_NULLSOCKET)
215 return OSI_NULLSOCKET;
219 rxi_GetUDPSocket(u_short port)
221 return rxi_GetHostUDPSocket(htonl(INADDR_ANY), port);
225 osi_Panic(char *msg, ...)
229 (osi_Msg "Fatal Rx error: ");
238 * osi_AssertFailU() -- used by the osi_Assert() macro.
242 osi_AssertFailU(const char *expr, const char *file, int line)
244 osi_Panic("assertion failed: %s, file: %s, line: %d\n", expr,
248 #if defined(AFS_AIX32_ENV) && !defined(KERNEL)
250 static const char memZero;
252 osi_Alloc(afs_int32 x)
255 * 0-length allocs may return NULL ptr from malloc, so we special-case
256 * things so that NULL returned iff an error occurred
259 return (void *)&memZero;
264 osi_Free(void *x, afs_int32 size)
271 #endif /* defined(AFS_AIX32_ENV) && !defined(KERNEL) */
273 #define ADDRSPERSITE 16
276 static afs_uint32 rxi_NetAddrs[ADDRSPERSITE]; /* host order */
277 static int myNetMTUs[ADDRSPERSITE];
278 static int myNetMasks[ADDRSPERSITE];
279 static int myNetFlags[ADDRSPERSITE];
280 static u_int rxi_numNetAddrs;
281 static int Inited = 0;
283 #if defined(AFS_NT40_ENV)
287 /* The IP address list can change so we must query for it */
290 /* we don't want to use the loopback adapter which is first */
291 /* this is a bad bad hack */
292 if (rxi_numNetAddrs > 1)
293 return htonl(rxi_NetAddrs[1]);
294 else if (rxi_numNetAddrs > 0)
295 return htonl(rxi_NetAddrs[0]);
301 ** return number of addresses
302 ** and the addresses themselves in the buffer
303 ** maxSize - max number of interfaces to return.
306 rx_getAllAddr(afs_uint32 * buffer, int maxSize)
308 int count = 0, offset = 0;
310 /* The IP address list can change so we must query for it */
313 for (count = 0; offset < rxi_numNetAddrs && maxSize > 0;
314 count++, offset++, maxSize--)
315 buffer[count] = htonl(rxi_NetAddrs[offset]);
320 /* this function returns the total number of interface addresses
321 * the buffer has to be passed in by the caller. It also returns
322 * the matching interface mask and mtu. All values are returned
323 * in network byte order.
326 rx_getAllAddrMaskMtu(afs_uint32 addrBuffer[], afs_uint32 maskBuffer[],
327 afs_uint32 mtuBuffer[], int maxSize)
329 int count = 0, offset = 0;
331 /* The IP address list can change so we must query for it */
335 offset < rxi_numNetAddrs && maxSize > 0;
336 count++, offset++, maxSize--) {
337 addrBuffer[count] = htonl(rxi_NetAddrs[offset]);
338 maskBuffer[count] = htonl(myNetMasks[offset]);
339 mtuBuffer[count] = htonl(myNetMTUs[offset]);
346 extern int rxinit_status;
348 rxi_InitMorePackets(void) {
349 int npackets, ncbufs;
351 ncbufs = (rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE);
353 ncbufs = ncbufs / RX_CBUFFERSIZE;
354 npackets = rx_initSendWindow - 1;
355 rxi_MorePackets(npackets * (ncbufs + 1));
367 if (Inited < 2 && rxinit_status == 0) {
368 /* We couldn't initialize more packets earlier.
370 rxi_InitMorePackets();
380 rxi_numNetAddrs = ADDRSPERSITE;
381 (void)syscfg_GetIFInfo(&rxi_numNetAddrs, rxi_NetAddrs,
382 myNetMasks, myNetMTUs, myNetFlags);
384 for (i = 0; i < rxi_numNetAddrs; i++) {
385 rxsize = rxi_AdjustIfMTU(myNetMTUs[i] - RX_IPUDP_SIZE);
387 rxi_nRecvFrags * rxsize + (rxi_nRecvFrags - 1) * UDP_HDR_SIZE;
388 maxsize = rxi_AdjustMaxMTU(rxsize, maxsize);
389 if (rx_maxReceiveSize > maxsize) {
390 rx_maxReceiveSize = MIN(RX_MAX_PACKET_SIZE, maxsize);
392 MIN(rx_maxReceiveSize, rx_maxReceiveSizeUser);
394 if (rx_MyMaxSendSize > maxsize) {
395 rx_MyMaxSendSize = MIN(RX_MAX_PACKET_SIZE, maxsize);
401 * If rxinit_status is still set, rx_InitHost() has yet to be called
402 * and we therefore do not have any mutex locks initialized. As a
403 * result we cannot call rxi_MorePackets() without crashing.
408 rxi_InitMorePackets();
413 fudge_netmask(afs_uint32 addr)
419 else if (IN_CLASSB(addr))
421 else if (IN_CLASSC(addr))
431 #if !defined(AFS_AIX_ENV) && !defined(AFS_NT40_ENV) && !defined(AFS_LINUX20_ENV)
433 rxi_syscall(afs_uint32 a3, afs_uint32 a4, void *a5)
438 old = signal(SIGSYS, SIG_IGN);
440 #if defined(AFS_SGI_ENV)
441 rcode = afs_syscall(AFS_SYSCALL, 28, a3, a4, a5);
443 rcode = syscall(AFS_SYSCALL, 28 /* AFSCALL_CALL */ , a3, a4, a5);
444 #endif /* AFS_SGI_ENV */
450 #endif /* AFS_AIX_ENV */
459 struct ifreq ifs[ADDRSPERSITE];
462 char buf[BUFSIZ], *cp, *cplim;
464 struct sockaddr_in *a;
475 memset(rxi_NetAddrs, 0, sizeof(rxi_NetAddrs));
476 memset(myNetFlags, 0, sizeof(myNetFlags));
477 memset(myNetMTUs, 0, sizeof(myNetMTUs));
478 memset(myNetMasks, 0, sizeof(myNetMasks));
480 s = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
481 if (s == OSI_NULLSOCKET)
484 ifc.ifc_len = sizeof(buf);
488 ifc.ifc_len = sizeof(ifs);
489 ifc.ifc_buf = (caddr_t) & ifs[0];
490 memset(&ifs[0], 0, sizeof(ifs));
492 res = ioctl(s, SIOCGIFCONF, &ifc);
494 /* fputs(stderr, "ioctl error IFCONF\n"); */
501 #define size(p) MAX((p).sa_len, sizeof(p))
502 cplim = buf + ifc.ifc_len; /*skip over if's with big ifr_addr's */
503 for (cp = buf; cp < cplim;
504 cp += sizeof(ifr->ifr_name) + MAX(a->sin_len, sizeof(*a))) {
505 if (rxi_numNetAddrs >= ADDRSPERSITE)
508 ifr = (struct ifreq *)cp;
510 len = ifc.ifc_len / sizeof(struct ifreq);
511 if (len > ADDRSPERSITE)
514 for (i = 0; i < len; ++i) {
516 res = ioctl(s, SIOCGIFADDR, ifr);
519 /* fputs(stderr, "ioctl error IFADDR\n");
520 * perror(ifr->ifr_name); */
523 a = (struct sockaddr_in *)&ifr->ifr_addr;
524 if (a->sin_family != AF_INET)
526 rxi_NetAddrs[rxi_numNetAddrs] = ntohl(a->sin_addr.s_addr);
527 if (rx_IsLoopbackAddr(rxi_NetAddrs[rxi_numNetAddrs])) {
528 /* we don't really care about "localhost" */
531 for (j = 0; j < rxi_numNetAddrs; j++) {
532 if (rxi_NetAddrs[j] == rxi_NetAddrs[rxi_numNetAddrs])
535 if (j < rxi_numNetAddrs)
538 /* fprintf(stderr, "if %s addr=%x\n", ifr->ifr_name,
539 * rxi_NetAddrs[rxi_numNetAddrs]); */
542 res = ioctl(s, SIOCGIFFLAGS, ifr);
544 myNetFlags[rxi_numNetAddrs] = ifr->ifr_flags;
546 /* Handle aliased loopbacks as well. */
547 if (ifr->ifr_flags & IFF_LOOPBACK)
550 /* fprintf(stderr, "if %s flags=%x\n",
551 * ifr->ifr_name, ifr->ifr_flags); */
553 * fputs(stderr, "ioctl error IFFLAGS\n");
554 * perror(ifr->ifr_name); */
556 #endif /* SIOCGIFFLAGS */
558 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV)
559 /* this won't run on an AIX system w/o a cache manager */
560 rxi_syscallp = rxi_syscall;
563 /* If I refer to kernel extensions that aren't loaded on AIX, the
564 * program refuses to load and run, so I simply can't include the
565 * following code. Fortunately, AIX is the one operating system in
566 * which the subsequent ioctl works reliably. */
568 if ((*rxi_syscallp) (20 /*AFSOP_GETMTU */ ,
569 htonl(rxi_NetAddrs[rxi_numNetAddrs]),
570 &(myNetMTUs[rxi_numNetAddrs]))) {
571 /* fputs(stderr, "syscall error GETMTU\n");
572 * perror(ifr->ifr_name); */
573 myNetMTUs[rxi_numNetAddrs] = 0;
575 if ((*rxi_syscallp) (42 /*AFSOP_GETMASK */ ,
576 htonl(rxi_NetAddrs[rxi_numNetAddrs]),
577 &(myNetMasks[rxi_numNetAddrs]))) {
578 /* fputs(stderr, "syscall error GETMASK\n");
579 * perror(ifr->ifr_name); */
580 myNetMasks[rxi_numNetAddrs] = 0;
582 myNetMasks[rxi_numNetAddrs] =
583 ntohl(myNetMasks[rxi_numNetAddrs]);
584 /* fprintf(stderr, "if %s mask=0x%x\n",
585 * ifr->ifr_name, myNetMasks[rxi_numNetAddrs]); */
588 if (myNetMTUs[rxi_numNetAddrs] == 0) {
589 myNetMTUs[rxi_numNetAddrs] = OLD_MAX_PACKET_SIZE + RX_IPUDP_SIZE;
591 res = ioctl(s, SIOCGIFMTU, ifr);
592 if ((res == 0) && (ifr->ifr_metric > 128)) { /* sanity check */
593 myNetMTUs[rxi_numNetAddrs] = ifr->ifr_metric;
594 /* fprintf(stderr, "if %s mtu=%d\n",
595 * ifr->ifr_name, ifr->ifr_metric); */
597 /* fputs(stderr, "ioctl error IFMTU\n");
598 * perror(ifr->ifr_name); */
603 if (myNetMasks[rxi_numNetAddrs] == 0) {
604 myNetMasks[rxi_numNetAddrs] =
605 fudge_netmask(rxi_NetAddrs[rxi_numNetAddrs]);
606 #ifdef SIOCGIFNETMASK
607 res = ioctl(s, SIOCGIFNETMASK, ifr);
609 a = (struct sockaddr_in *)&ifr->ifr_addr;
610 myNetMasks[rxi_numNetAddrs] = ntohl(a->sin_addr.s_addr);
611 /* fprintf(stderr, "if %s subnetmask=0x%x\n",
612 * ifr->ifr_name, myNetMasks[rxi_numNetAddrs]); */
614 /* fputs(stderr, "ioctl error IFMASK\n");
615 * perror(ifr->ifr_name); */
620 if (!rx_IsLoopbackAddr(rxi_NetAddrs[rxi_numNetAddrs])) { /* ignore lo0 */
623 rxi_nRecvFrags * (myNetMTUs[rxi_numNetAddrs] - RX_IP_SIZE);
624 maxsize -= UDP_HDR_SIZE; /* only the first frag has a UDP hdr */
625 if (rx_maxReceiveSize < maxsize)
626 rx_maxReceiveSize = MIN(RX_MAX_PACKET_SIZE, maxsize);
633 /* have to allocate at least enough to allow a single packet to reach its
634 * maximum size, so ReadPacket will work. Allocate enough for a couple
635 * of packets to do so, for good measure */
637 int npackets, ncbufs;
639 rx_maxJumboRecvSize =
640 RX_HEADER_SIZE + rxi_nDgramPackets * RX_JUMBOBUFFERSIZE +
641 (rxi_nDgramPackets - 1) * RX_JUMBOHEADERSIZE;
642 rx_maxJumboRecvSize = MAX(rx_maxJumboRecvSize, rx_maxReceiveSize);
643 ncbufs = (rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE);
645 ncbufs = ncbufs / RX_CBUFFERSIZE;
646 npackets = rx_initSendWindow - 1;
647 rxi_MorePackets(npackets * (ncbufs + 1));
651 #endif /* AFS_NT40_ENV */
653 /* Called from rxi_FindPeer, when initializing a clear rx_peer structure,
654 * to get interesting information.
655 * Curiously enough, the rx_peerHashTable_lock currently protects the
656 * Inited variable (and hence rx_GetIFInfo). When the fs suite uses
657 * pthreads, this issue will need to be revisited.
661 rxi_InitPeerParams(struct rx_peer *pp)
666 #if defined(ADAPT_PMTU) && defined(IP_MTU)
668 struct sockaddr_in addr;
675 * there's a race here since more than one thread could call
676 * rx_GetIFInfo. The race stops in rx_GetIFInfo.
684 /* try to second-guess IP, and identify which link is most likely to
685 * be used for traffic to/from this host. */
686 ppaddr = ntohl(pp->host);
689 rx_rto_setPeerTimeoutSecs(pp, 2);
690 pp->rateFlag = 2; /* start timing after two full packets */
691 /* I don't initialize these, because I presume they are bzero'd...
692 * pp->burstSize pp->burst pp->burstWait.sec pp->burstWait.usec
696 for (ix = 0; ix < rxi_numNetAddrs; ++ix) {
697 if ((rxi_NetAddrs[ix] & myNetMasks[ix]) == (ppaddr & myNetMasks[ix])) {
698 #ifdef IFF_POINTOPOINT
699 if (myNetFlags[ix] & IFF_POINTOPOINT)
700 rx_rto_setPeerTimeoutSecs(pp, 4);
701 #endif /* IFF_POINTOPOINT */
703 rxmtu = myNetMTUs[ix] - RX_IPUDP_SIZE;
704 if (rxmtu < RX_MIN_PACKET_SIZE)
705 rxmtu = RX_MIN_PACKET_SIZE;
706 if (pp->ifMTU < rxmtu)
707 pp->ifMTU = MIN(rx_MyMaxSendSize, rxmtu);
711 if (!pp->ifMTU) { /* not local */
712 rx_rto_setPeerTimeoutSecs(pp, 3);
713 pp->ifMTU = MIN(rx_MyMaxSendSize, RX_REMOTE_PACKET_SIZE);
715 #else /* ADAPT_MTU */
716 pp->rateFlag = 2; /* start timing after two full packets */
717 rx_rto_setPeerTimeoutSecs(pp, 2);
718 pp->ifMTU = MIN(rx_MyMaxSendSize, OLD_MAX_PACKET_SIZE);
719 #endif /* ADAPT_MTU */
720 #if defined(ADAPT_PMTU) && defined(IP_MTU)
721 sock=socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
722 if (sock != OSI_NULLSOCKET) {
723 addr.sin_family = AF_INET;
724 addr.sin_addr.s_addr = pp->host;
725 addr.sin_port = pp->port;
726 if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) == 0) {
728 socklen_t s = sizeof(mtu);
729 if (getsockopt(sock, SOL_IP, IP_MTU, &mtu, &s)== 0) {
730 pp->ifMTU = MIN(mtu - RX_IPUDP_SIZE, pp->ifMTU);
740 pp->ifMTU = rxi_AdjustIfMTU(pp->ifMTU);
741 pp->maxMTU = OLD_MAX_PACKET_SIZE; /* for compatibility with old guys */
742 pp->natMTU = MIN((int)pp->ifMTU, OLD_MAX_PACKET_SIZE);
743 pp->maxDgramPackets =
744 MIN(rxi_nDgramPackets,
745 rxi_AdjustDgramPackets(rxi_nSendFrags, pp->ifMTU));
747 MIN(rxi_nDgramPackets,
748 rxi_AdjustDgramPackets(rxi_nSendFrags, pp->ifMTU));
749 pp->maxDgramPackets = 1;
750 /* Initialize slow start parameters */
751 pp->MTU = MIN(pp->natMTU, pp->maxMTU);
753 pp->nDgramPackets = 1;
757 /* Don't expose jumobgram internals. */
761 rx_maxReceiveSize = OLD_MAX_PACKET_SIZE;
762 rxi_nSendFrags = rxi_nRecvFrags = 1;
765 /* Override max MTU. If rx_SetNoJumbo is called, it must be
766 called before calling rx_SetMaxMTU since SetNoJumbo clobbers rx_maxReceiveSize */
768 rx_SetMaxMTU(int mtu)
770 rx_MyMaxSendSize = rx_maxReceiveSizeUser = rx_maxReceiveSize = mtu;
773 #if defined(ADAPT_PMTU)
775 rxi_HandleSocketError(int socket)
778 #if defined(HAVE_LINUX_ERRQUEUE_H)
780 struct cmsghdr *cmsg;
781 struct sock_extended_err *err;
782 struct sockaddr_in addr;
783 char controlmsgbuf[256];
786 msg.msg_name = &addr;
787 msg.msg_namelen = sizeof(addr);
790 msg.msg_control = controlmsgbuf;
791 msg.msg_controllen = 256;
793 code = recvmsg(socket, &msg, MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC);
795 if (code < 0 || !(msg.msg_flags & MSG_ERRQUEUE))
798 for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
799 if ((char *)cmsg - controlmsgbuf > msg.msg_controllen - CMSG_SPACE(0) ||
800 (char *)cmsg - controlmsgbuf > msg.msg_controllen - CMSG_SPACE(cmsg->cmsg_len) ||
801 cmsg->cmsg_len == 0) {
805 if (cmsg->cmsg_level == SOL_IP && cmsg->cmsg_type == IP_RECVERR)
811 err =(struct sock_extended_err *) CMSG_DATA(cmsg);
813 if (err->ee_errno == EMSGSIZE && err->ee_info >= 68) {
814 rxi_SetPeerMtu(NULL, addr.sin_addr.s_addr, addr.sin_port,
815 err->ee_info - RX_IPUDP_SIZE);
817 /* other DEST_UNREACH's and TIME_EXCEEDED should be dealt with too */