2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
17 #include <sys/malloc.h>
18 #include "rx/rx_kcommon.h"
20 #ifdef RXK_LISTENER_ENV
22 osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr,
23 struct iovec *dvec, int nvecs, int *alength)
27 struct iovec iov[RX_MAXIOVECS];
28 struct sockaddr *sa = NULL;
31 int haveGlock = ISAFS_GLOCK();
32 /*AFS_STATCNT(osi_NetReceive); */
34 if (nvecs > RX_MAXIOVECS)
35 osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs);
37 for (i = 0; i < nvecs; i++)
43 u.uio_resid = *alength;
44 u.uio_segflg = UIO_SYSSPACE;
54 code = soreceive(asocket, &sa, &u, NULL, NULL, NULL);
61 Debugger("afs NetReceive busted");
68 *alength -= u.uio_resid;
70 if (sa->sa_family == AF_INET) {
72 *addr = *(struct sockaddr_in *)sa;
74 printf("Unknown socket family %d in NetReceive\n", sa->sa_family);
80 extern int rxk_ListenerPid;
82 osi_StopListener(void)
87 * Have to drop global lock to safely do this.
88 * soclose() is currently protected by Giant,
89 * but pfind and psignal are MPSAFE.
93 p = pfind(rxk_ListenerPid);
103 osi_NetSend(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
104 int nvecs, afs_int32 alength, int istack)
106 register afs_int32 code;
108 struct iovec iov[RX_MAXIOVECS];
110 int haveGlock = ISAFS_GLOCK();
112 AFS_STATCNT(osi_NetSend);
113 if (nvecs > RX_MAXIOVECS)
114 osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);
116 for (i = 0; i < nvecs; i++)
120 u.uio_iovcnt = nvecs;
122 u.uio_resid = alength;
123 u.uio_segflg = UIO_SYSSPACE;
124 u.uio_rw = UIO_WRITE;
125 #ifdef AFS_FBSD50_ENV
131 addr->sin_len = sizeof(struct sockaddr_in);
138 #ifdef AFS_FBSD50_ENV
140 sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0,
144 sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0, curproc);
149 Debugger("afs NetSend busted");
159 /* This code *almost* works :( */
160 static struct protosw parent_proto; /* udp proto switch */
161 static void rxk_input(struct mbuf *am, int iphlen);
162 static void rxk_fasttimo(void);
164 /* start intercepting basic calls */
167 register struct protosw *tpro, *last;
171 last = inetdomain.dom_protoswNPROTOSW;
172 for (tpro = inetdomain.dom_protosw; tpro < last; tpro++)
173 if (tpro->pr_protocol == IPPROTO_UDP) {
174 #if 0 /* not exported */
175 /* force UDP checksumming on for AFS */
179 memcpy(&parent_proto, tpro, sizeof(parent_proto));
180 tpro->pr_input = rxk_input;
181 tpro->pr_fasttimo = rxk_fasttimo;
183 * don't bother with pr_drain and pr_ctlinput
184 * until we have something to do
189 osi_Panic("inet:no udp");
194 rxk_input(struct mbuf *am, int iphlen)
197 register unsigned short *tsp;
200 register struct ip *ti;
201 struct udpiphdr *tvu;
205 struct sockaddr_in taddr;
208 int data_len, comp_sum;
213 /* make sure we have base ip and udp headers in first mbuf */
214 if (iphlen > sizeof(struct ip)) {
215 ip_stripoptions(am, NULL);
216 iphlen = sizeof(struct ip);
219 if (am->m_len < sizeof(struct udpiphdr)) {
220 am = m_pullup(am, sizeof(struct udpiphdr));
227 ti = mtod(am, struct ip *);
228 /* skip basic ip hdr */
229 tu = (struct udphdr *)(((char *)ti) + sizeof(struct ip));
231 /* now read the port out */
235 for (tsp = rxk_ports, i = 0; i < MAXRXPORTS; i++) {
236 if (*tsp++ == port) {
237 /* checksum the packet */
239 * Make mbuf data length reflect UDP length.
240 * If not enough data to reflect UDP length, drop.
242 tvu = (struct udpiphdr *)ti;
243 tlen = ntohs((u_short) tvu->ui_ulen);
244 if ((int)ti->ip_len != tlen) {
245 if (tlen > (int)ti->ip_len) {
250 m_adj(am, tlen - (int)ti->ip_len);
252 /* deliver packet to rx */
253 taddr.sin_family = AF_INET; /* compute source address */
254 taddr.sin_port = tu->uh_sport;
255 taddr.sin_addr.s_addr = ti->ip_src.s_addr;
256 taddr.sin_len = sizeof(taddr);
257 tvu = (struct udpiphdr *)ti; /* virtual udp structure, for cksum */
258 /* handle the checksum. Note that this code damages the actual ip
259 * header (replacing it with the virtual one, which is the same size),
260 * so we must ensure we get everything out we need, first */
261 if (tu->uh_sum != 0) {
262 /* if the checksum is there, always check it. It's crazy not
263 * to, unless you can really be sure that your
264 * underlying network (and interfaces and drivers and
265 * DMA hardware, etc!) is error-free. First, fill
266 * in entire virtual ip header. */
267 memset(tvu->ui_i.ih_x1, 0, 9);
268 tvu->ui_len = tvu->ui_ulen;
269 tlen = ntohs((unsigned short)(tvu->ui_ulen));
270 if (in_cksum(am, sizeof(struct ip) + tlen)) {
271 /* checksum, including cksum field, doesn't come out 0, so
272 * this packet is bad */
280 * 28 is IP (20) + UDP (8) header. ulen includes
281 * udp header, and we *don't* tell RX about udp
282 * header either. So, we remove those 8 as well.
284 data_len = ntohs(tu->uh_ulen);
286 if (!(*rxk_GetPacketProc) (&phandle, data_len)) {
287 if (rx_mb_to_packet(am, m_freem, 28, data_len, phandle)) {
288 /* XXX should just increment counter here.. */
289 printf("rx: truncated UDP packet\n");
290 rxi_FreePacket(phandle);
292 (*rxk_PacketArrivalProc) (phandle, &taddr,
293 rxk_portRocks[i], data_len);
302 /* if we get here, try to deliver packet to udp */
303 if (tproc = parent_proto.pr_input)
304 (*tproc) (am, iphlen);
311 * UDP fast timer to raise events for all but Solaris and NCR.
312 * Called about 5 times per second (at unknown priority?). Must go to
313 * splnet or obtain global lock before touching anything significant.
321 /* do rx fasttimo processing here */
322 rxevent_RaiseEvents(&temp);
323 if (tproc = parent_proto.pr_fasttimo)
327 /* rx_NetSend - send asize bytes at adata from asocket to host at addr.
329 * Now, why do we allocate a new buffer when we could theoretically use the one
330 * pointed to by adata? Because PRU_SEND returns after queueing the message,
331 * not after sending it. If the sender changes the data after queueing it,
332 * we'd see the already-queued data change. One attempt to fix this without
333 * adding a copy would be to have this function wait until the datagram is
334 * sent; however this doesn't work well. In particular, if a host is down, and
335 * an ARP fails to that host, this packet will be queued until the ARP request
336 * comes back, which could be hours later. We can't block in this routine that
337 * long, since it prevents RPC timeouts from happening.
339 /* XXX In the brave new world, steal the data bufs out of the rx_packet iovec,
340 * and just queue those. XXX
343 /* set lock on sockbuf sb; can't call sblock since we're at interrupt level
347 register struct sockbuf *sb;
349 AFS_STATCNT(trysblock);
350 if (sb->sb_flags & SB_LOCK) {
351 return -1; /* can't lock socket */
353 sb->sb_flags |= SB_LOCK;
357 /* We only have to do all the mbuf management ourselves if we can be called at
358 interrupt time. in RXK_LISTENER_ENV, we can just call sosend() */
360 osi_NetSend(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
361 int nvec, afs_int32 asize, int istack)
363 register struct mbuf *tm, *um;
364 register afs_int32 code;
366 struct mbuf *top = 0;
367 register struct mbuf *m, **mp;
375 static int before = 0;
378 AFS_STATCNT(osi_NetSend);
379 /* Actually, the Ultrix way is as good as any for us, so we don't bother with
380 * special mbufs any more. Used to think we could get away with not copying
381 * the data to the interface, but there's no way to tell the caller not to
382 * reuse the buffers after sending, so we lost out on that trick anyway */
384 if (trysblock(&asocket->so_snd)) {
390 tdata = dvec[i].iov_base;
391 tl = dvec[i].iov_len;
395 MGETHDR(m, M_DONTWAIT, MT_DATA);
397 sbunlock(&asocket->so_snd);
403 m->m_pkthdr.rcvif = NULL;
405 MGET(m, M_DONTWAIT, MT_DATA);
407 /* can't get an mbuf, give up */
409 m_freem(top); /* free mbuf list we're building */
410 sbunlock(&asocket->so_snd);
415 * WARNING: the `4 * MLEN' is somewhat dubious. It is better than
416 * `NBPG', which may have no relation to `CLBYTES'. Also, `CLBYTES'
417 * may be so large that we never use clusters, resulting in far
418 * too many mbufs being used. It is often better to briefly use
419 * a cluster, even if we are only using a portion of it. Since
420 * we are on the xmit side, it shouldn't end up sitting on a queue
421 * for a potentially unbounded time (except perhaps if we are talking
424 if (asize >= 4 * MLEN) { /* try to get cluster mbuf */
425 /* different algorithms for getting cluster mbuf */
426 MCLGET(m, M_DONTWAIT);
427 if ((m->m_flags & M_EXT) == 0)
431 /* now compute usable size */
432 len = MIN(mlen, asize);
433 /* Should I look at MAPPED_MBUFS??? */
436 len = MIN(mlen, asize);
440 top->m_pkthdr.len += len;
441 tpa = mtod(m, caddr_t);
444 memcpy(tpa, tdata, rlen);
454 /* shouldn't come here! */
455 asize = 0; /* so we make progress toward completion */
458 tdata = dvec[i].iov_base;
459 tl = dvec[i].iov_len;
471 /* setup mbuf corresponding to destination address */
472 um = m_get(M_DONTWAIT, MT_SONAME);
475 m_freem(top); /* free mbuf chain */
476 sbunlock(&asocket->so_snd);
480 memcpy(mtod(um, caddr_t), addr, sizeof(*addr));
481 addr->sin_len = um->m_len = sizeof(*addr);
482 /* note that udp_usrreq frees funny mbuf. We hold onto data, but mbuf
483 * around it is gone. */
484 /* haveGlock = ISAFS_GLOCK();
488 /* SOCKET_LOCK(asocket); */
489 /* code = (*asocket->so_proto->pr_usrreq)(asocket, PRU_SEND, tm, um, 0); */
492 Debugger("afs NetSend before");
495 (*asocket->so_proto->pr_usrreqs->pru_send) (asocket, 0, tm,
498 /* SOCKET_UNLOCK(asocket); */
502 sbunlock(&asocket->so_snd);
507 Debugger("afs NetSend busted");
516 #endif /* AFS_FBSD40_ENV */