2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
17 #include <sys/malloc.h>
18 #include "rx/rx_kcommon.h"
20 #ifdef RXK_LISTENER_ENV
22 osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr,
23 struct iovec *dvec, int nvecs, int *alength)
27 struct iovec iov[RX_MAXIOVECS];
28 struct sockaddr *sa = NULL;
31 int haveGlock = ISAFS_GLOCK();
32 /*AFS_STATCNT(osi_NetReceive); */
34 if (nvecs > RX_MAXIOVECS)
35 osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs);
37 for (i = 0; i < nvecs; i++)
43 u.uio_resid = *alength;
44 u.uio_segflg = UIO_SYSSPACE;
54 code = soreceive(asocket, &sa, &u, NULL, NULL, NULL);
61 Debugger("afs NetReceive busted");
68 *alength -= u.uio_resid;
70 if (sa->sa_family == AF_INET) {
72 *addr = *(struct sockaddr_in *)sa;
74 printf("Unknown socket family %d in NetReceive\n", sa->sa_family);
80 extern int rxk_ListenerPid;
82 osi_StopListener(void)
87 p = pfind(rxk_ListenerPid);
93 osi_NetSend(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
94 int nvecs, afs_int32 alength, int istack)
96 register afs_int32 code;
98 struct iovec iov[RX_MAXIOVECS];
100 int haveGlock = ISAFS_GLOCK();
102 AFS_STATCNT(osi_NetSend);
103 if (nvecs > RX_MAXIOVECS)
104 osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);
106 for (i = 0; i < nvecs; i++)
110 u.uio_iovcnt = nvecs;
112 u.uio_resid = alength;
113 u.uio_segflg = UIO_SYSSPACE;
114 u.uio_rw = UIO_WRITE;
115 #ifdef AFS_FBSD50_ENV
121 addr->sin_len = sizeof(struct sockaddr_in);
128 #ifdef AFS_FBSD50_ENV
130 sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0,
134 sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0, curproc);
139 Debugger("afs NetSend busted");
149 /* This code *almost* works :( */
150 static struct protosw parent_proto; /* udp proto switch */
151 static void rxk_input(struct mbuf *am, int iphlen);
152 static void rxk_fasttimo(void);
154 /* start intercepting basic calls */
157 register struct protosw *tpro, *last;
161 last = inetdomain.dom_protoswNPROTOSW;
162 for (tpro = inetdomain.dom_protosw; tpro < last; tpro++)
163 if (tpro->pr_protocol == IPPROTO_UDP) {
164 #if 0 /* not exported */
165 /* force UDP checksumming on for AFS */
169 memcpy(&parent_proto, tpro, sizeof(parent_proto));
170 tpro->pr_input = rxk_input;
171 tpro->pr_fasttimo = rxk_fasttimo;
173 * don't bother with pr_drain and pr_ctlinput
174 * until we have something to do
179 osi_Panic("inet:no udp");
184 rxk_input(struct mbuf *am, int iphlen)
187 register unsigned short *tsp;
190 register struct ip *ti;
191 struct udpiphdr *tvu;
195 struct sockaddr_in taddr;
198 int data_len, comp_sum;
203 /* make sure we have base ip and udp headers in first mbuf */
204 if (iphlen > sizeof(struct ip)) {
205 ip_stripoptions(am, NULL);
206 iphlen = sizeof(struct ip);
209 if (am->m_len < sizeof(struct udpiphdr)) {
210 am = m_pullup(am, sizeof(struct udpiphdr));
217 ti = mtod(am, struct ip *);
218 /* skip basic ip hdr */
219 tu = (struct udphdr *)(((char *)ti) + sizeof(struct ip));
221 /* now read the port out */
225 for (tsp = rxk_ports, i = 0; i < MAXRXPORTS; i++) {
226 if (*tsp++ == port) {
227 /* checksum the packet */
229 * Make mbuf data length reflect UDP length.
230 * If not enough data to reflect UDP length, drop.
232 tvu = (struct udpiphdr *)ti;
233 tlen = ntohs((u_short) tvu->ui_ulen);
234 if ((int)ti->ip_len != tlen) {
235 if (tlen > (int)ti->ip_len) {
240 m_adj(am, tlen - (int)ti->ip_len);
242 /* deliver packet to rx */
243 taddr.sin_family = AF_INET; /* compute source address */
244 taddr.sin_port = tu->uh_sport;
245 taddr.sin_addr.s_addr = ti->ip_src.s_addr;
246 taddr.sin_len = sizeof(taddr);
247 tvu = (struct udpiphdr *)ti; /* virtual udp structure, for cksum */
248 /* handle the checksum. Note that this code damages the actual ip
249 * header (replacing it with the virtual one, which is the same size),
250 * so we must ensure we get everything out we need, first */
251 if (tu->uh_sum != 0) {
252 /* if the checksum is there, always check it. It's crazy not
253 * to, unless you can really be sure that your
254 * underlying network (and interfaces and drivers and
255 * DMA hardware, etc!) is error-free. First, fill
256 * in entire virtual ip header. */
257 memset(tvu->ui_i.ih_x1, 0, 9);
258 tvu->ui_len = tvu->ui_ulen;
259 tlen = ntohs((unsigned short)(tvu->ui_ulen));
260 if (in_cksum(am, sizeof(struct ip) + tlen)) {
261 /* checksum, including cksum field, doesn't come out 0, so
262 * this packet is bad */
270 * 28 is IP (20) + UDP (8) header. ulen includes
271 * udp header, and we *don't* tell RX about udp
272 * header either. So, we remove those 8 as well.
274 data_len = ntohs(tu->uh_ulen);
277 if (!(*rxk_GetPacketProc) (&phandle, data_len)) {
278 if (rx_mb_to_packet(am, m_freem, 28, data_len, phandle)) {
279 /* XXX should just increment counter here.. */
280 printf("rx: truncated UDP packet\n");
281 rxi_FreePacket(phandle);
283 (*rxk_PacketArrivalProc) (phandle, &taddr,
284 rxk_portRocks[i], data_len);
294 /* if we get here, try to deliver packet to udp */
295 if (tproc = parent_proto.pr_input)
296 (*tproc) (am, iphlen);
303 * UDP fast timer to raise events for all but Solaris and NCR.
304 * Called about 5 times per second (at unknown priority?). Must go to
305 * splnet or obtain global lock before touching anything significant.
313 /* do rx fasttimo processing here */
314 rxevent_RaiseEvents(&temp);
315 if (tproc = parent_proto.pr_fasttimo)
319 /* rx_NetSend - send asize bytes at adata from asocket to host at addr.
321 * Now, why do we allocate a new buffer when we could theoretically use the one
322 * pointed to by adata? Because PRU_SEND returns after queueing the message,
323 * not after sending it. If the sender changes the data after queueing it,
324 * we'd see the already-queued data change. One attempt to fix this without
325 * adding a copy would be to have this function wait until the datagram is
326 * sent; however this doesn't work well. In particular, if a host is down, and
327 * an ARP fails to that host, this packet will be queued until the ARP request
328 * comes back, which could be hours later. We can't block in this routine that
329 * long, since it prevents RPC timeouts from happening.
331 /* XXX In the brave new world, steal the data bufs out of the rx_packet iovec,
332 * and just queue those. XXX
335 /* set lock on sockbuf sb; can't call sblock since we're at interrupt level
339 register struct sockbuf *sb;
341 AFS_STATCNT(trysblock);
342 if (sb->sb_flags & SB_LOCK) {
343 return -1; /* can't lock socket */
345 sb->sb_flags |= SB_LOCK;
349 /* We only have to do all the mbuf management ourselves if we can be called at
350 interrupt time. in RXK_LISTENER_ENV, we can just call sosend() */
352 osi_NetSend(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
353 int nvec, afs_int32 asize, int istack)
355 register struct mbuf *tm, *um;
356 register afs_int32 code;
358 struct mbuf *top = 0;
359 register struct mbuf *m, **mp;
367 static int before = 0;
370 AFS_STATCNT(osi_NetSend);
371 /* Actually, the Ultrix way is as good as any for us, so we don't bother with
372 * special mbufs any more. Used to think we could get away with not copying
373 * the data to the interface, but there's no way to tell the caller not to
374 * reuse the buffers after sending, so we lost out on that trick anyway */
376 if (trysblock(&asocket->so_snd)) {
382 tdata = dvec[i].iov_base;
383 tl = dvec[i].iov_len;
387 MGETHDR(m, M_DONTWAIT, MT_DATA);
389 sbunlock(&asocket->so_snd);
395 m->m_pkthdr.rcvif = NULL;
397 MGET(m, M_DONTWAIT, MT_DATA);
399 /* can't get an mbuf, give up */
401 m_freem(top); /* free mbuf list we're building */
402 sbunlock(&asocket->so_snd);
407 * WARNING: the `4 * MLEN' is somewhat dubious. It is better than
408 * `NBPG', which may have no relation to `CLBYTES'. Also, `CLBYTES'
409 * may be so large that we never use clusters, resulting in far
410 * too many mbufs being used. It is often better to briefly use
411 * a cluster, even if we are only using a portion of it. Since
412 * we are on the xmit side, it shouldn't end up sitting on a queue
413 * for a potentially unbounded time (except perhaps if we are talking
416 if (asize >= 4 * MLEN) { /* try to get cluster mbuf */
417 /* different algorithms for getting cluster mbuf */
418 MCLGET(m, M_DONTWAIT);
419 if ((m->m_flags & M_EXT) == 0)
423 /* now compute usable size */
424 len = MIN(mlen, asize);
425 /* Should I look at MAPPED_MBUFS??? */
428 len = MIN(mlen, asize);
432 top->m_pkthdr.len += len;
433 tpa = mtod(m, caddr_t);
436 memcpy(tpa, tdata, rlen);
446 /* shouldn't come here! */
447 asize = 0; /* so we make progress toward completion */
450 tdata = dvec[i].iov_base;
451 tl = dvec[i].iov_len;
463 /* setup mbuf corresponding to destination address */
464 um = m_get(M_DONTWAIT, MT_SONAME);
467 m_freem(top); /* free mbuf chain */
468 sbunlock(&asocket->so_snd);
472 memcpy(mtod(um, caddr_t), addr, sizeof(*addr));
473 addr->sin_len = um->m_len = sizeof(*addr);
474 /* note that udp_usrreq frees funny mbuf. We hold onto data, but mbuf
475 * around it is gone. */
476 /* haveGlock = ISAFS_GLOCK();
480 /* SOCKET_LOCK(asocket); */
481 /* code = (*asocket->so_proto->pr_usrreq)(asocket, PRU_SEND, tm, um, 0); */
484 Debugger("afs NetSend before");
487 (*asocket->so_proto->pr_usrreqs->pru_send) (asocket, 0, tm,
490 /* SOCKET_UNLOCK(asocket); */
494 sbunlock(&asocket->so_snd);
499 Debugger("afs NetSend busted");
508 #endif /* AFS_FBSD40_ENV */