2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
16 #include <sys/malloc.h>
17 #include "rx/rx_kcommon.h"
19 #ifdef RXK_LISTENER_ENV
20 int osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
21 int nvecs, int *alength)
25 struct iovec iov[RX_MAXIOVECS];
26 struct sockaddr *sa = NULL;
29 int haveGlock = ISAFS_GLOCK();
30 /*AFS_STATCNT(osi_NetReceive);*/
32 if (nvecs > RX_MAXIOVECS)
33 osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs);
35 for (i = 0 ; i < nvecs ; i++)
41 u.uio_resid = *alength;
42 u.uio_segflg = UIO_SYSSPACE;
52 code = soreceive(asocket, &sa, &u, NULL, NULL, NULL);
59 Debugger("afs NetReceive busted");
66 *alength -= u.uio_resid;
68 if (sa->sa_family == AF_INET) {
70 *addr = *(struct sockaddr_in *) sa;
72 printf("Unknown socket family %d in NetReceive\n", sa->sa_family);
78 extern int rxk_ListenerPid;
79 void osi_StopListener(void)
84 p = pfind(rxk_ListenerPid);
90 osi_NetSend(osi_socket asocket, struct sockaddr_in *addr,
91 struct iovec *dvec, int nvecs, afs_int32 alength, int istack)
93 register afs_int32 code;
95 struct iovec iov[RX_MAXIOVECS];
97 int haveGlock = ISAFS_GLOCK();
99 AFS_STATCNT(osi_NetSend);
100 if (nvecs > RX_MAXIOVECS)
101 osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);
103 for (i = 0 ; i < nvecs ; i++)
107 u.uio_iovcnt = nvecs;
109 u.uio_resid = alength;
110 u.uio_segflg = UIO_SYSSPACE;
111 u.uio_rw = UIO_WRITE;
112 #ifdef AFS_FBSD50_ENV
118 addr->sin_len = sizeof(struct sockaddr_in);
125 #ifdef AFS_FBSD50_ENV
126 code = sosend(asocket, (struct sockaddr *) addr, &u, NULL, NULL, 0, curthread);
128 code = sosend(asocket, (struct sockaddr *) addr, &u, NULL, NULL, 0, curproc);
133 Debugger("afs NetSend busted");
143 /* This code *almost* works :( */
144 static struct protosw parent_proto; /* udp proto switch */
145 static void rxk_input (struct mbuf *am, int iphlen);
146 static void rxk_fasttimo (void);
148 /* start intercepting basic calls */
150 register struct protosw *tpro, *last;
151 if (rxk_initDone) return 0;
153 last = inetdomain.dom_protoswNPROTOSW;
154 for (tpro = inetdomain.dom_protosw; tpro < last; tpro++)
155 if (tpro->pr_protocol == IPPROTO_UDP) {
156 #if 0 /* not exported */
157 /* force UDP checksumming on for AFS */
161 memcpy(&parent_proto, tpro, sizeof(parent_proto));
162 tpro->pr_input = rxk_input;
163 tpro->pr_fasttimo = rxk_fasttimo;
165 * don't bother with pr_drain and pr_ctlinput
166 * until we have something to do
171 osi_Panic("inet:no udp");
175 static void rxk_input (struct mbuf *am, int iphlen)
178 register unsigned short *tsp;
181 register struct ip *ti;
182 struct udpiphdr *tvu;
186 struct sockaddr_in taddr;
189 int data_len, comp_sum;
194 /* make sure we have base ip and udp headers in first mbuf */
195 if (iphlen > sizeof (struct ip)) {
196 ip_stripoptions(am, NULL);
197 iphlen = sizeof (struct ip);
200 if (am->m_len < sizeof(struct udpiphdr)) {
201 am = m_pullup(am, sizeof(struct udpiphdr));
208 ti = mtod(am, struct ip *);
209 /* skip basic ip hdr */
210 tu = (struct udphdr *)(((char *)ti) + sizeof(struct ip));
212 /* now read the port out */
216 for(tsp=rxk_ports, i=0; i<MAXRXPORTS;i++) {
217 if (*tsp++ == port) {
218 /* checksum the packet */
220 * Make mbuf data length reflect UDP length.
221 * If not enough data to reflect UDP length, drop.
223 tvu = (struct udpiphdr *)ti;
224 tlen = ntohs((u_short)tvu->ui_ulen);
225 if ((int)ti->ip_len != tlen) {
226 if (tlen > (int)ti->ip_len) {
231 m_adj(am, tlen - (int)ti->ip_len);
233 /* deliver packet to rx */
234 taddr.sin_family = AF_INET; /* compute source address */
235 taddr.sin_port = tu->uh_sport;
236 taddr.sin_addr.s_addr = ti->ip_src.s_addr;
237 taddr.sin_len = sizeof(taddr);
238 tvu = (struct udpiphdr *) ti; /* virtual udp structure, for cksum */
239 /* handle the checksum. Note that this code damages the actual ip
240 header (replacing it with the virtual one, which is the same size),
241 so we must ensure we get everything out we need, first */
242 if ( tu->uh_sum != 0) {
243 /* if the checksum is there, always check it. It's crazy not
244 * to, unless you can really be sure that your
245 * underlying network (and interfaces and drivers and
246 * DMA hardware, etc!) is error-free. First, fill
247 * in entire virtual ip header. */
248 memset(tvu->ui_i.ih_x1, 0, 9);
249 tvu->ui_len = tvu->ui_ulen;
250 tlen = ntohs((unsigned short)(tvu->ui_ulen));
251 if (in_cksum(am, sizeof(struct ip) + tlen)) {
252 /* checksum, including cksum field, doesn't come out 0, so
253 this packet is bad */
261 * 28 is IP (20) + UDP (8) header. ulen includes
262 * udp header, and we *don't* tell RX about udp
263 * header either. So, we remove those 8 as well.
265 data_len = ntohs(tu->uh_ulen);
268 if (!(*rxk_GetPacketProc)(&phandle, data_len)) {
269 if (rx_mb_to_packet(am, m_freem, 28, data_len, phandle)) {
270 /* XXX should just increment counter here.. */
271 printf("rx: truncated UDP packet\n");
272 rxi_FreePacket(phandle);
275 (*rxk_PacketArrivalProc)(phandle, &taddr,
276 rxk_portRocks[i], data_len);
285 /* if we get here, try to deliver packet to udp */
286 if (tproc = parent_proto.pr_input) (*tproc)(am,iphlen);
293 * UDP fast timer to raise events for all but Solaris and NCR.
294 * Called about 5 times per second (at unknown priority?). Must go to
295 * splnet or obtain global lock before touching anything significant.
297 static void rxk_fasttimo (void)
302 /* do rx fasttimo processing here */
303 rxevent_RaiseEvents(&temp);
304 if (tproc = parent_proto.pr_fasttimo) (*tproc)();
307 /* rx_NetSend - send asize bytes at adata from asocket to host at addr.
309 * Now, why do we allocate a new buffer when we could theoretically use the one
310 * pointed to by adata? Because PRU_SEND returns after queueing the message,
311 * not after sending it. If the sender changes the data after queueing it,
312 * we'd see the already-queued data change. One attempt to fix this without
313 * adding a copy would be to have this function wait until the datagram is
314 * sent; however this doesn't work well. In particular, if a host is down, and
315 * an ARP fails to that host, this packet will be queued until the ARP request
316 * comes back, which could be hours later. We can't block in this routine that
317 * long, since it prevents RPC timeouts from happening.
319 /* XXX In the brave new world, steal the data bufs out of the rx_packet iovec,
320 * and just queue those. XXX
323 /* set lock on sockbuf sb; can't call sblock since we're at interrupt level
326 register struct sockbuf *sb; {
327 AFS_STATCNT(trysblock);
328 if (sb->sb_flags & SB_LOCK){
329 return -1; /* can't lock socket */
331 sb->sb_flags |= SB_LOCK;
335 /* We only have to do all the mbuf management ourselves if we can be called at
336 interrupt time. in RXK_LISTENER_ENV, we can just call sosend() */
338 osi_NetSend(osi_socket asocket, struct sockaddr_in *addr,
339 struct iovec *dvec, int nvec, afs_int32 asize, int istack)
341 register struct mbuf *tm, *um;
342 register afs_int32 code;
344 struct mbuf *top = 0;
345 register struct mbuf *m, **mp;
356 AFS_STATCNT(osi_NetSend);
357 /* Actually, the Ultrix way is as good as any for us, so we don't bother with
358 * special mbufs any more. Used to think we could get away with not copying
359 * the data to the interface, but there's no way to tell the caller not to
360 * reuse the buffers after sending, so we lost out on that trick anyway */
362 if (trysblock(&asocket->so_snd)) {
368 tdata = dvec[i].iov_base;
369 tl = dvec[i].iov_len;
373 MGETHDR(m, M_DONTWAIT, MT_DATA);
375 sbunlock(&asocket->so_snd);
381 m->m_pkthdr.rcvif = NULL;
383 MGET(m, M_DONTWAIT, MT_DATA);
385 /* can't get an mbuf, give up */
386 if (top) m_freem(top); /* free mbuf list we're building */
387 sbunlock(&asocket->so_snd);
392 * WARNING: the `4 * MLEN' is somewhat dubious. It is better than
393 * `NBPG', which may have no relation to `CLBYTES'. Also, `CLBYTES'
394 * may be so large that we never use clusters, resulting in far
395 * too many mbufs being used. It is often better to briefly use
396 * a cluster, even if we are only using a portion of it. Since
397 * we are on the xmit side, it shouldn't end up sitting on a queue
398 * for a potentially unbounded time (except perhaps if we are talking
401 if (asize >= 4 * MLEN) { /* try to get cluster mbuf */
402 /* different algorithms for getting cluster mbuf */
403 MCLGET(m, M_DONTWAIT);
404 if ((m->m_flags & M_EXT) == 0)
408 /* now compute usable size */
409 len = MIN(mlen, asize);
410 /* Should I look at MAPPED_MBUFS??? */
413 len = MIN(mlen, asize);
417 top->m_pkthdr.len += len;
418 tpa = mtod(m, caddr_t);
421 memcpy(tpa, tdata, rlen);
431 /* shouldn't come here! */
432 asize = 0; /* so we make progress toward completion */
435 tdata = dvec[i].iov_base;
436 tl = dvec[i].iov_len;
448 /* setup mbuf corresponding to destination address */
449 um = m_get(M_DONTWAIT, MT_SONAME);
451 if (top) m_freem(top); /* free mbuf chain */
452 sbunlock(&asocket->so_snd);
456 memcpy(mtod(um, caddr_t), addr, sizeof(*addr));
457 addr->sin_len = um->m_len = sizeof(*addr);
458 /* note that udp_usrreq frees funny mbuf. We hold onto data, but mbuf
459 * around it is gone. */
460 /* haveGlock = ISAFS_GLOCK();
464 /* SOCKET_LOCK(asocket); */
465 /* code = (*asocket->so_proto->pr_usrreq)(asocket, PRU_SEND, tm, um, 0); */
467 if (before) Debugger("afs NetSend before");
469 code = (*asocket->so_proto->pr_usrreqs->pru_send)(asocket, 0, tm,
470 (struct sockaddr *) addr,
472 /* SOCKET_UNLOCK(asocket); */
476 sbunlock(&asocket->so_snd);
481 Debugger("afs NetSend busted");
490 #endif /* AFS_FBSD40_ENV */