}
#endif /* AFS_SUN5_ENV */
+#ifdef AFS_SOCKPROXY_ENV
+/**
+ * Deallocate packets.
+ *
+ * @param[inout] a_pktlist list of packets to be freed
+ * @param[in] a_npkts number of packets
+ */
+static void
+sockproxy_pkts_free(struct afs_pkt_hdr **a_pktlist, size_t a_npkts)
+{
+ int pkt_i;
+ struct afs_pkt_hdr *pktlist = *a_pktlist;
+
+ if (pktlist == NULL) {
+ return;
+ }
+ *a_pktlist = NULL;
+
+ for (pkt_i = 0; pkt_i < a_npkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt = &pktlist[pkt_i];
+ afs_osi_Free(pkt->payload, pkt->size);
+ pkt->payload = NULL;
+ }
+ afs_osi_Free(pktlist, a_npkts * sizeof(pktlist[0]));
+}
+
+/**
+ * Copy packets from user-space.
+ *
+ * @param[in] a_uaddr address of list of packets in user-space
+ * @param[out] a_pktlist packets copied from user-space
+ * @param[in] a_npkts number of packets to be copied
+ *
+ * @return errno error codes.
+ */
+static int
+sockproxy_pkts_copyin(user_addr_t a_uaddr, struct afs_pkt_hdr **a_pktlist,
+ size_t a_npkts)
+{
+ int code, pkt_i;
+ struct afs_pkt_hdr *pktlist_u;
+ struct afs_pkt_hdr *pktlist_k = NULL;
+
+ *a_pktlist = NULL;
+ if (a_npkts == 0) {
+ return 0;
+ }
+ if (a_npkts > AFS_SOCKPROXY_PKT_MAX) {
+ return E2BIG;
+ }
+
+ pktlist_u = afs_osi_Alloc(a_npkts * sizeof(pktlist_u[0]));
+ if (pktlist_u == NULL) {
+ code = ENOMEM;
+ goto done;
+ }
+
+ pktlist_k = afs_osi_Alloc(a_npkts * sizeof(pktlist_k[0]));
+ if (pktlist_k == NULL) {
+ code = ENOMEM;
+ goto done;
+ }
+ memset(pktlist_k, 0, a_npkts * sizeof(pktlist_k[0]));
+
+ AFS_COPYIN(a_uaddr, pktlist_u, a_npkts * sizeof(pktlist_u[0]), code);
+ if (code != 0) {
+ goto done;
+ }
+
+ for (pkt_i = 0; pkt_i < a_npkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt_k = &pktlist_k[pkt_i];
+ struct afs_pkt_hdr *pkt_u = &pktlist_u[pkt_i];
+
+ if (pkt_u->size > AFS_SOCKPROXY_PAYLOAD_MAX) {
+ code = E2BIG;
+ goto done;
+ }
+ /*
+ * Notice that pkt_u->payload points to a user-space address. When
+ * copying the data from pkt_u to pkt_k, make sure that pkt_k->payload
+ * points to a valid address in kernel-space.
+ */
+ *pkt_k = *pkt_u;
+ pkt_k->payload = afs_osi_Alloc(pkt_k->size);
+ if (pkt_k->payload == NULL) {
+ code = ENOMEM;
+ goto done;
+ }
+
+ AFS_COPYIN((user_addr_t)pkt_u->payload, pkt_k->payload, pkt_k->size,
+ code);
+ if (code != 0) {
+ goto done;
+ }
+ }
+
+ *a_pktlist = pktlist_k;
+ pktlist_k = NULL;
+ code = 0;
+
+ done:
+ sockproxy_pkts_free(&pktlist_k, a_npkts);
+ afs_osi_Free(pktlist_u, a_npkts * sizeof(pktlist_u[0]));
+
+ return code;
+}
+
+/**
+ * Copy packets to user-space.
+ *
+ * @param[in] a_uaddr dst address of list of packets in user-space
+ * @param[in] a_pktlist packets to be copied to user-space
+ * @param[in] a_npkts number of packets to be copied
+ *
+ * @return 0 on success; non-zero otherwise.
+ */
+static int
+sockproxy_pkts_copyout(user_addr_t a_uaddr, struct afs_pkt_hdr *a_pktlist,
+ size_t a_npkts)
+{
+ int code, pkt_i;
+ struct afs_pkt_hdr *pktlist_u = NULL;
+ struct afs_pkt_hdr *pktlist_k = a_pktlist;
+
+ if (a_npkts == 0) {
+ return 0;
+ }
+ if (a_npkts > AFS_SOCKPROXY_PKT_MAX) {
+ return E2BIG;
+ }
+ if (a_pktlist == NULL) {
+ return EINVAL;
+ }
+
+ pktlist_u = afs_osi_Alloc(a_npkts * sizeof(pktlist_u[0]));
+ if (pktlist_u == NULL) {
+ code = ENOMEM;
+ goto done;
+ }
+
+ AFS_COPYIN(a_uaddr, pktlist_u, a_npkts * sizeof(pktlist_u[0]), code);
+ if (code != 0) {
+ goto done;
+ }
+
+ for (pkt_i = 0; pkt_i < a_npkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt_k = &pktlist_k[pkt_i];
+ struct afs_pkt_hdr *pkt_u = &pktlist_u[pkt_i];
+ void *payload_uaddr;
+
+ if (pkt_k->size > pkt_u->size) {
+ code = ENOSPC;
+ goto done;
+ }
+
+ /* Copy pkt_k -> pkt_u, but preserve pkt_u->payload */
+ payload_uaddr = pkt_u->payload;
+ *pkt_u = *pkt_k;
+ pkt_u->payload = payload_uaddr;
+
+ AFS_COPYOUT(pkt_k->payload, (user_addr_t)pkt_u->payload, pkt_k->size,
+ code);
+ if (code != 0) {
+ goto done;
+ }
+ }
+
+ AFS_COPYOUT(pktlist_u, a_uaddr, a_npkts * sizeof(pktlist_u[0]), code);
+ if (code != 0) {
+ goto done;
+ }
+
+ done:
+ afs_osi_Free(pktlist_u, a_npkts * sizeof(pktlist_u[0]));
+ return code;
+}
+
+/**
+ * Receive / send packets from / to user-space.
+ *
+ * @param[in] a_parm_uspc afs_uspc_param struct
+ * @param[in] a_parm_pkts packets to be received / sent
+ *
+ * @return 0 on success; non-zero otherwise.
+ */
+static int
+sockproxy_handler(user_addr_t a_parm_uspc, user_addr_t a_parm_pkts)
+{
+ int code;
+ size_t orig_npkts, npkts;
+ struct afs_uspc_param uspc;
+
+ struct afs_pkt_hdr *pkts_recv;
+ struct afs_pkt_hdr *pkts_send;
+
+ AFS_GUNLOCK();
+
+ orig_npkts = 0;
+ memset(&uspc, 0, sizeof(uspc));
+
+ pkts_recv = NULL;
+ pkts_send = NULL;
+
+ /* get response from user-space */
+ AFS_COPYIN(a_parm_uspc, &uspc, sizeof(uspc), code);
+ if (code != 0) {
+ afs_warn("afs: AFSOP_SOCKPROXY_HANDLER can't read uspc\n");
+ goto done;
+ }
+
+ npkts = uspc.req.usp.npkts;
+ orig_npkts = npkts;
+
+ if (uspc.reqtype == AFS_USPC_SOCKPROXY_RECV && npkts > 0) {
+ /* copyin packets in from user-space */
+ code = sockproxy_pkts_copyin(a_parm_pkts, &pkts_recv, npkts);
+ if (code) {
+ afs_warn("afs: AFSOP_SOCKPROXY_HANDLER can't read pkts\n");
+ goto done;
+ }
+ }
+
+ /*
+ * send response from user-space (if any) to the rx layer and wait for a
+ * new request.
+ */
+ code = rxk_SockProxyReply(&uspc, pkts_recv, &pkts_send);
+ if (code) {
+ afs_warn("afs: AFSOP_SOCKPROXY_HANDLER rxk_SockProxyReply failed\n");
+ goto done;
+ }
+
+ /* send request to user-space process */
+ AFS_COPYOUT(&uspc, a_parm_uspc, sizeof(uspc), code);
+ if (code) {
+ afs_warn("afs: AFSOP_SOCKPROXY_HANDLER can't write uspc\n");
+ goto done;
+ }
+
+ npkts = uspc.req.usp.npkts;
+ if (uspc.reqtype == AFS_USPC_SOCKPROXY_SEND && npkts > 0) {
+ /* check if process allocated enough memory to receive the packets */
+ if (npkts > orig_npkts) {
+ code = ENOSPC;
+ goto done;
+ }
+
+ /* copyout packets to user-space */
+ code = sockproxy_pkts_copyout(a_parm_pkts, pkts_send, npkts);
+ if (code != 0) {
+ afs_warn("afs: AFSOP_SOCKPROXY_HANDLER can't write pkts\n");
+ goto done;
+ }
+ }
+ done:
+ sockproxy_pkts_free(&pkts_recv, orig_npkts);
+ AFS_GLOCK();
+
+ return code;
+}
+#endif /* AFS_SOCKPROXY_ENV */
+
#ifdef AFS_DARWIN100_ENV
# define AFSKPTR(X) k ## X
int
afs_volume_ttl = parm2;
code = 0;
}
+#ifdef AFS_SOCKPROXY_ENV
+ } else if (parm == AFSOP_SOCKPROXY_HANDLER) {
+ code = sockproxy_handler(AFSKPTR(parm2), AFSKPTR(parm3));
+#endif
} else {
code = EINVAL;
}
#undef VIRTUE
#undef VICE
+#ifdef AFS_SOCKPROXY_ENV
+# include <sys/types.h>
+# include <sys/socket.h>
+#endif
#define CACHEINFOFILE "cacheinfo"
#define DCACHEFILE "CacheItems"
}
#endif
+#ifdef AFS_SOCKPROXY_ENV
+
+# define AFS_SOCKPROXY_RECV_IDX 0
+# define AFS_SOCKPROXY_INIT_IDX 1
+
+/*
+ * Must be less than or equal to the limits supported by libafs:
+ * AFS_SOCKPROXY_PKT_MAX and AFS_SOCKPROXY_PAYLOAD_MAX.
+ */
+# define AFS_SOCKPROXY_PKT_ALLOC 32
+# define AFS_SOCKPROXY_PAYLOAD_ALLOC 2832
+
+/**
+ * Create socket, set send and receive buffer size, and bind a name to it.
+ *
+ * @param[in] a_addr address to be assigned to the socket
+ * @param[in] a_port port to be assigned to the socket
+ * @param[out] a_sock socket
+ *
+ * @return 0 on success; errno otherwise.
+ */
+static int
+SockProxyStart(afs_int32 a_addr, afs_int32 a_port, int *a_sock)
+{
+ int code;
+ int attempt_i;
+ int blen, bsize;
+ int sock;
+ struct sockaddr_in ip4;
+
+ *a_sock = -1;
+
+ /* create an endpoint for communication */
+ sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (sock < 0) {
+ code = errno;
+ goto done;
+ }
+
+ /* set options on socket */
+ blen = 50000;
+ bsize = sizeof(blen);
+ for (attempt_i = 0; attempt_i < 2; attempt_i++) {
+ code = setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &blen, bsize);
+ code |= setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &blen, bsize);
+ if (code == 0) {
+ break;
+ }
+ /* setsockopt didn't succeed. try a smaller size. */
+ blen = 32766;
+ }
+ if (code != 0) {
+ code = EINVAL;
+ goto done;
+ }
+ /* assign addr to the socket */
+ ip4.sin_family = AF_INET;
+ ip4.sin_port = a_port;
+ ip4.sin_addr.s_addr = a_addr;
+
+ code = bind(sock, (struct sockaddr *)&ip4, sizeof(ip4));
+ if (code != 0) {
+ code = errno;
+ goto done;
+ }
+
+ /* success */
+ *a_sock = sock;
+ sock = -1;
+
+ done:
+ if (sock >= 0) {
+ close(sock);
+ }
+ return code;
+}
+
+/**
+ * Create and initialize socket with address received from kernel-space.
+ *
+ * @param[in] a_idx index of the process responsible for this task
+ * @param[out] a_sock socket
+ */
+static void
+SockProxyStartProc(int a_idx, int *a_sock)
+{
+ int code, initialized;
+ struct afs_uspc_param uspc;
+
+ initialized = 0;
+
+ memset(&uspc, 0, sizeof(uspc));
+ uspc.req.usp.idx = a_idx;
+
+ while (!initialized) {
+ uspc.reqtype = AFS_USPC_SOCKPROXY_START;
+
+ code = afsd_syscall(AFSOP_SOCKPROXY_HANDLER, &uspc, NULL);
+ if (code) {
+ /* Error; try again. */
+ uspc.retval = -1;
+ sleep(10);
+ continue;
+ }
+
+ switch (uspc.reqtype) {
+ case AFS_USPC_SHUTDOWN:
+ exit(0);
+ break;
+
+ case AFS_USPC_SOCKPROXY_START:
+ uspc.retval =
+ SockProxyStart(uspc.req.usp.addr, uspc.req.usp.port, a_sock);
+ if (uspc.retval == 0) {
+ /* We've setup the socket successfully. */
+ initialized = 1;
+ }
+ break;
+
+ default:
+ /* Error; try again. We must handle AFS_USPC_SOCKPROXY_START before
+ * doing anything else. */
+ uspc.retval = -1;
+ sleep(10);
+ }
+ }
+ /* Startup is done. */
+}
+
+/**
+ * Send list of packets received from kernel-space.
+ *
+ * @param[in] a_sock socket
+ * @param[in] a_pktlist list of packets
+ * @param[in] a_npkts number of packets
+ *
+ * @return number of packets successfully sent; -1 if couldn't send any packet.
+ */
+static int
+SockProxySend(int a_sock, struct afs_pkt_hdr *a_pktlist, int a_npkts)
+{
+ int code;
+ int pkt_i, n_sent;
+
+ n_sent = 0;
+
+ for (pkt_i = 0; pkt_i < a_npkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt = &a_pktlist[pkt_i];
+ struct sockaddr_in addr;
+ struct iovec iov;
+ struct msghdr msg;
+
+ memset(&iov, 0, sizeof(iov));
+ memset(&msg, 0, sizeof(msg));
+ memset(&addr, 0, sizeof(addr));
+
+ addr.sin_addr.s_addr = pkt->addr;
+ addr.sin_port = pkt->port;
+
+ iov.iov_base = pkt->payload;
+ iov.iov_len = pkt->size;
+
+ msg.msg_name = &addr;
+ msg.msg_namelen = sizeof(addr);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ code = sendmsg(a_sock, &msg, 0);
+ if (code < 0) {
+ break;
+ }
+ n_sent++;
+ }
+
+ if (n_sent > 0) {
+ return n_sent;
+ }
+ return -1;
+}
+
+/**
+ * Alloc maximum number of packets, each with the largest payload possible.
+ *
+ * @param[out] a_pktlist allocated packets
+ * @param[out] a_npkts number of packets allocated
+ */
+static void
+pkts_alloc(struct afs_pkt_hdr **a_pktlist, int *a_npkts)
+{
+ int pkt_i, n_pkts;
+ struct afs_pkt_hdr *pktlist;
+
+ n_pkts = AFS_SOCKPROXY_PKT_ALLOC;
+ pktlist = calloc(n_pkts, sizeof(pktlist[0]));
+ opr_Assert(pktlist != NULL);
+
+ for (pkt_i = 0; pkt_i < n_pkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt = &pktlist[pkt_i];
+ pkt->size = AFS_SOCKPROXY_PAYLOAD_ALLOC;
+
+ pkt->payload = calloc(1, pkt->size);
+ opr_Assert(pkt->payload != NULL);
+ }
+
+ *a_pktlist = pktlist;
+ *a_npkts = n_pkts;
+}
+
+/**
+ * Reset pkt->size for all packets.
+ *
+ * @param[in] a_pktlist list of packets
+ * @param[in] a_npkts number of packets
+ */
+static void
+pkts_resetsize(struct afs_pkt_hdr *a_pktlist, int a_npkts)
+{
+ int pkt_i;
+
+ for (pkt_i = 0; pkt_i < a_npkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt = &a_pktlist[pkt_i];
+ pkt->size = AFS_SOCKPROXY_PAYLOAD_ALLOC;
+ }
+}
+
+/**
+ * Serve packet sending requests from kernel-space.
+ *
+ * @param[in] a_sock socket
+ * @param[in] a_idx index of the process responsible for this task
+ * @param[in] a_recvpid pid of process responsible for receiving packets
+ * (used to simplify shutdown procedures)
+ */
+static void
+SockProxySendProc(int a_sock, int a_idx, int a_recvpid)
+{
+ int code, n_pkts;
+ struct afs_pkt_hdr *pktlist;
+ struct afs_uspc_param uspc;
+
+ n_pkts = 0;
+ pktlist = NULL;
+ memset(&uspc, 0, sizeof(uspc));
+
+ pkts_alloc(&pktlist, &n_pkts);
+ uspc.reqtype = AFS_USPC_SOCKPROXY_SEND;
+ uspc.req.usp.idx = a_idx;
+
+ while (1) {
+ uspc.req.usp.npkts = n_pkts;
+ pkts_resetsize(pktlist, n_pkts);
+
+ code = afsd_syscall(AFSOP_SOCKPROXY_HANDLER, &uspc, pktlist);
+ if (code) {
+ uspc.retval = -1;
+ sleep(10);
+ continue;
+ }
+
+ switch (uspc.reqtype) {
+ case AFS_USPC_SHUTDOWN:
+ if (a_recvpid != 0) {
+ /*
+ * We're shutting down, so kill the SockProxyReceiveProc
+ * process. We don't wait for that process to get its own
+ * AFS_USPC_SHUTDOWN response, since it may be stuck in
+ * recvmsg() waiting for packets from the net.
+ */
+ kill(a_recvpid, SIGKILL);
+ }
+ exit(0);
+ break;
+
+ case AFS_USPC_SOCKPROXY_SEND:
+ uspc.retval = SockProxySend(a_sock, pktlist, uspc.req.usp.npkts);
+ break;
+
+ default:
+ /* Some other operation? */
+ uspc.retval = -1;
+ sleep(10);
+ }
+ }
+ /* never reached */
+}
+
+/**
+ * Receive list of packets from the socket received as an argument.
+ *
+ * @param[in] a_sock socket
+ * @param[out] a_pkts packets received
+ * @param[in] a_maxpkts maximum number of packets we can receive
+ *
+ * @return number of packets received.
+ */
+static int
+SockProxyRecv(int a_sock, struct afs_pkt_hdr *a_pkts, int a_maxpkts)
+{
+ int pkt_i, n_recv;
+ int flags;
+
+ struct iovec iov;
+ struct msghdr msg;
+ struct sockaddr_in from;
+
+ n_recv = 0;
+ flags = 0;
+
+ for (pkt_i = 0; pkt_i < a_maxpkts; pkt_i++) {
+ struct afs_pkt_hdr *pkt = &a_pkts[pkt_i];
+ ssize_t nbytes;
+
+ pkt->size = AFS_SOCKPROXY_PAYLOAD_ALLOC;
+
+ memset(&iov, 0, sizeof(iov));
+ iov.iov_base = pkt->payload;
+ iov.iov_len = pkt->size;
+
+ memset(&from, 0, sizeof(from));
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_name = &from;
+ msg.msg_namelen = sizeof(from);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ nbytes = recvmsg(a_sock, &msg, flags);
+ if (nbytes < 0) {
+ break;
+ }
+
+ pkt->size = nbytes;
+ pkt->addr = from.sin_addr.s_addr;
+ pkt->port = from.sin_port;
+
+ n_recv++;
+ flags = MSG_DONTWAIT;
+ }
+ return n_recv;
+}
+
+/**
+ * Receive packets addressed to kernel-space and deliver those packages to it.
+ *
+ * @param[in] a_sock socket
+ * @param[in] a_idx index of the process responsible for this task
+ */
+static void
+SockProxyReceiveProc(int a_sock, int a_idx)
+{
+ int code, n_pkts;
+ struct afs_pkt_hdr *pktlist;
+ struct afs_uspc_param uspc;
+
+ n_pkts = 0;
+ pktlist = NULL;
+ pkts_alloc(&pktlist, &n_pkts);
+
+ memset(&uspc, 0, sizeof(uspc));
+ uspc.req.usp.idx = a_idx;
+ uspc.req.usp.npkts = 0;
+
+ while (1) {
+ uspc.reqtype = AFS_USPC_SOCKPROXY_RECV;
+
+ code = afsd_syscall(AFSOP_SOCKPROXY_HANDLER, &uspc, pktlist);
+ if (code) {
+ uspc.retval = -1;
+ sleep(10);
+ continue;
+ }
+
+ switch (uspc.reqtype) {
+ case AFS_USPC_SHUTDOWN:
+ exit(0);
+ break;
+
+ case AFS_USPC_SOCKPROXY_RECV:
+ uspc.req.usp.npkts = SockProxyRecv(a_sock, pktlist, n_pkts);
+ uspc.retval = 0;
+ break;
+
+ default:
+ /* Some other operation? */
+ uspc.retval = -1;
+ uspc.req.usp.npkts = 0;
+ sleep(10);
+ }
+ }
+ /* never reached */
+}
+
+/**
+ * Start processes responsible for sending and receiving packets for libafs.
+ *
+ * @param[in] a_rock not used
+ */
+static void *
+sockproxy_thread(void *a_rock)
+{
+ int idx;
+ int sock;
+ int recvpid;
+
+ sock = -1;
+ /*
+ * Since the socket proxy handler runs as a user process,
+ * need to drop the controlling TTY, etc.
+ */
+ if (afsd_daemon(0, 0) == -1) {
+ printf("Error starting socket proxy handler: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ /*
+ * Before we do anything else, we must first wait for a
+ * AFS_USPC_SOCKPROXY_START request to setup our udp socket.
+ */
+ SockProxyStartProc(AFS_SOCKPROXY_INIT_IDX, &sock);
+
+ /* Now fork our AFS_USPC_SOCKPROXY_RECV process. */
+ recvpid = fork();
+ opr_Assert(recvpid >= 0);
+ if (recvpid == 0) {
+ SockProxyReceiveProc(sock, AFS_SOCKPROXY_RECV_IDX);
+ exit(1);
+ }
+
+ /* Now fork our AFS_USPC_SOCKPROXY_SEND processes. */
+ for (idx = 0; idx < AFS_SOCKPROXY_NPROCS; idx++) {
+ pid_t child;
+
+ if (idx == AFS_SOCKPROXY_RECV_IDX) {
+ /* Receiver already forked. */
+ continue;
+ }
+ if (idx == AFS_SOCKPROXY_INIT_IDX) {
+ /* We'll start the handler for this index in this process, below. */
+ continue;
+ }
+
+ child = fork();
+ opr_Assert(child >= 0);
+ if (child == 0) {
+ SockProxySendProc(sock, idx, 0);
+ exit(1);
+ }
+ }
+ SockProxySendProc(sock, AFS_SOCKPROXY_INIT_IDX, recvpid);
+
+ return NULL;
+}
+#endif /* AFS_SOCKPROXY_ENV */
+
static void *
afsdb_thread(void *rock)
{
fork_rx_syscall(rn, AFSOP_RXEVENT_DAEMON);
#endif
+#ifdef AFS_SOCKPROXY_ENV
+ if (afsd_verbose)
+ printf("%s: Forking socket proxy handlers.\n", rn);
+ afsd_fork(0, sockproxy_thread, NULL);
+#endif
+
if (enable_afsdb) {
if (afsd_verbose)
printf("%s: Forking AFSDB lookup handler.\n", rn);
params[0] = CAST_SYSCALL_PARAM((va_arg(ap, void *)));
break;
case AFSOP_ADDCELLALIAS:
+#ifdef AFS_SOCKPROXY_ENV
+ case AFSOP_SOCKPROXY_HANDLER:
+#endif
params[0] = CAST_SYSCALL_PARAM((va_arg(ap, void *)));
params[1] = CAST_SYSCALL_PARAM((va_arg(ap, void *)));
break;
#include <afsconfig.h>
#include "afs/param.h"
+#ifdef AFS_SOCKPROXY_ENV
+# include <afs/afs_args.h>
+#endif
#include "rx/rx_kcommon.h"
#include "rx/rx_atomic.h"
#endif
#ifdef RXK_UPCALL_ENV
+
+struct afs_pkt_hdr;
+static void rx_upcall_common(socket_t so, struct afs_pkt_hdr *pkt);
+
+# ifdef AFS_SOCKPROXY_ENV
+
+/*
+ * Here is the kernel portion of our implementation of a userspace "proxy" for
+ * socket operations (aka "sockproxy"). This is required on macOS after 10.15,
+ * because DARWIN no longer provides an in-kernel socket API, so we need to get
+ * userspace to handle our networking. Here's how it generally works:
+ *
+ * During startup, afsd forks a process that invokes the
+ * AFSOP_SOCKPROXY_HANDLER syscall, which takes a struct afs_uspc_param, just
+ * like with AFSOP_BKG_HANDLER. Each sockproxy afsd process has an "index"
+ * associated with it, which ties the process to the corresponding entry in the
+ * rx_sockproxy_ch.proc array. We have a few afsd sockproxy processes:
+ *
+ * - The receiver process, which calls recvmsg() to receive packets from the
+ * net. This always uses index 0.
+ *
+ * - The sender processes, which calls sendmsg() to send packets to the net.
+ * These run on indices 1 through (AFS_SOCKPROXY_NPROCS - 1).
+ *
+ * During startup, we only have one afsd sockproxy process, which waits for a
+ * AFS_USPC_SOCKPROXY_START message. When afsd gets that message, it creates
+ * the socket for sending and receiving packets from the net, and then fork()s
+ * to create the other sockproxy procs, which then send and receive packets.
+ *
+ * When we need to send a packet, our osi_NetSend goes through
+ * SockProxyRequest, which finds an idle afsd sockproxy process (or waits for
+ * one to exist), and populates the relevant arguments, wakes up the afsd
+ * process, and waits for a response. The afsd process sends the packet, then
+ * responds with an error code, going through rxk_SockProxyReply which looks up
+ * the corresponding request and wakes up the SockProxyRequest caller.
+ *
+ * When receiving packets, there is no request/reply mechanism. Instead, the
+ * afsd process just receives whatever packets it can get, and submits them via
+ * AFS_USPC_SOCKPROXY_RECV. The packets are processed via rx_upcall_sockproxy,
+ * similar to the rx_upcall mechanism that existed for previous versions of
+ * DARWIN.
+ *
+ * When the client has started shutting down, all calls to
+ * AFSOP_SOCKPROXY_HANDLER will respond with AFS_USPC_SHUTDOWN, which tells the
+ * afsd process to exit. We wait for all non-receiver sockproxy procs to get
+ * the AFS_USPC_SHUTDOWN message before continuing with the shutdown process.
+ *
+ * For the receiver process, we can't just wait for it to get the
+ * AFS_USPC_SHUTDOWN message, since it may be blocked in recvmsg(). Instead, we
+ * rely on one of the other afsd sockproxy procs to kill the receiver process
+ * with a signal. If the receiver process is inside an afs syscall, it'll get
+ * a AFS_USPC_SHUTDOWN respond anyway, but if it's in userspace, it'll just get
+ * killed by the signal. We know that someone must be handling killing the
+ * receiver process, since we wait for all of the other sockproxy procs to be
+ * notified of the shutdown.
+ */
+
+struct rx_sockproxy_proc {
+ struct opr_queue entry; /* chain of processes available for use */
+ int inited; /* process successfully initialized */
+ int op; /* operation to be performed (AFS_USPC_SOCKPROXY_*) */
+ char pending; /* waiting for a reply from userspace */
+ char complete; /* response received from userspace */
+ int ret; /* value returned by op executed on userspace */
+ afs_uint32 addr; /* ipv4 address for socket */
+ afs_uint32 port; /* port for socket */
+ struct afs_pkt_hdr *pkts; /* packets to be sent */
+ int npkts; /* number of packets to be sent */
+ afs_kcondvar_t cv_op; /* request / reply received (lock: rx_sockproxy_channel.lock) */
+};
+
+struct rx_sockproxy_channel {
+ int shutdown;
+ /*
+ * processes running on userspace, each with a specific role:
+ * proc[0]: recvmsg.
+ * proc[1]: socket, setsockopt, bind, and sendmsg.
+ * ...
+ * proc[AFS_SOCKPROXY_NPROCS-2]: sendmsg.
+ * proc[AFS_SOCKPROXY_NPROCS-1]: sendmsg.
+ */
+ struct rx_sockproxy_proc proc[AFS_SOCKPROXY_NPROCS];
+ struct opr_queue procq;
+ afs_kcondvar_t cv_procq;
+ afs_kmutex_t lock;
+};
+/* communication channel between SockProxyRequest and rxk_SockProxyReply */
+static struct rx_sockproxy_channel rx_sockproxy_ch;
+
+/* osi_socket returned by rxk_NewSocketHost on success */
+static osi_socket *SockProxySocket;
+
+/* number of afsd sockproxy processes running (not counting the receive
+ * process). Protected by GLOCK. */
+static int afs_sockproxy_procs;
+
+/**
+ * Return process that provides the given operation.
+ *
+ * @param[in] a_op operation
+ *
+ * @return process on success; NULL otherwise.
+ *
+ * @pre rx_sockproxy_ch.lock held
+ */
+static struct rx_sockproxy_proc *
+SockProxyGetProc(int a_op)
+{
+ struct rx_sockproxy_proc *proc = NULL;
+ struct rx_sockproxy_channel *ch = &rx_sockproxy_ch;
+
+ switch (a_op) {
+ case AFS_USPC_SHUTDOWN:
+ case AFS_USPC_SOCKPROXY_START:
+ case AFS_USPC_SOCKPROXY_SEND:
+ /* These are normal operations to process a request for. */
+ break;
+ default:
+ /*
+ * Any other request shouldn't be going through the SockProxyRequest
+ * framework, so something weird is going on.
+ */
+ printf("afs: SockProxyGetProc internal error: op %d not found.\n", a_op);
+ return NULL;
+ }
+
+ while (!ch->shutdown && opr_queue_IsEmpty(&ch->procq)) {
+ /* no process available */
+ CV_WAIT(&ch->cv_procq, &ch->lock);
+ }
+ if (ch->shutdown) {
+ return NULL;
+ }
+ proc = opr_queue_First(&ch->procq, struct rx_sockproxy_proc, entry);
+ opr_queue_Remove(&proc->entry);
+
+ return proc;
+}
+
+/**
+ * Delegate given operation to user-space process.
+ *
+ * @param[in] a_op operation
+ * @param[in] a_sin address assigned to socket
+ * @param[in] a_pkts packets to be sent by process
+ * @param[in] a_npkts number of pkts
+ *
+ * @return value returned by the requested operation.
+ */
+static int
+SockProxyRequest(int a_op, struct sockaddr_in *a_sin,
+ struct afs_pkt_hdr *a_pkts, int a_npkts)
+{
+ int ret = -1;
+ struct rx_sockproxy_channel *ch = &rx_sockproxy_ch;
+ struct rx_sockproxy_proc *proc;
+
+ MUTEX_ENTER(&ch->lock);
+
+ proc = SockProxyGetProc(a_op);
+ if (proc == NULL) {
+ /* proc not found or shutting down */
+ goto done;
+ }
+
+ if (a_op == AFS_USPC_SOCKPROXY_START) {
+ if (a_sin == NULL) {
+ printf("SockProxyRequest: _SOCKPROXY_START given NULL sin\n");
+ goto done;
+ }
+ proc->addr = a_sin->sin_addr.s_addr;
+ proc->port = a_sin->sin_port;
+ }
+ if (a_op == AFS_USPC_SOCKPROXY_SEND) {
+ if (a_pkts == NULL) {
+ printf("SockProxyRequest: _SOCKPROXY_SEND given NULL pkts\n");
+ goto done;
+ }
+ proc->pkts = a_pkts;
+ proc->npkts = a_npkts;
+ }
+
+ proc->op = a_op;
+ proc->pending = 1;
+ proc->complete = 0;
+
+ CV_BROADCAST(&proc->cv_op);
+ /* if shutting down, there is no need to wait for the response from the
+ * userspace process since it will exit and never return. */
+ if (proc->op == AFS_USPC_SHUTDOWN) {
+ struct rx_sockproxy_proc *p;
+
+ while (!opr_queue_IsEmpty(&ch->procq)) {
+ /* wake up procs waiting for a request so they can shutdown */
+ p = opr_queue_First(&ch->procq, struct rx_sockproxy_proc, entry);
+ opr_queue_Remove(&p->entry);
+
+ p->op = AFS_USPC_SHUTDOWN;
+ CV_BROADCAST(&p->cv_op);
+ }
+ ch->shutdown = 1;
+ /* wake up other threads waiting for a proc so they can return */
+ CV_BROADCAST(&ch->cv_procq);
+
+ ret = 0;
+ goto done;
+ }
+
+ /* wait for response from userspace process */
+ while (!proc->complete) {
+ CV_WAIT(&proc->cv_op, &ch->lock);
+ }
+ ret = proc->ret;
+
+ /* add proc to the queue of procs available for use */
+ opr_queue_Append(&ch->procq, &proc->entry);
+ CV_BROADCAST(&ch->cv_procq);
+ done:
+ MUTEX_EXIT(&ch->lock);
+ return ret;
+}
+
+/**
+ * Send packets to the given address.
+ *
+ * @param[in] so not used
+ * @param[in] addr destination address
+ * @param[in] dvec vector holding data to be sent
+ * @param[in] nvecs number of dvec entries
+ * @param[in] alength packet size
+ * @param[in] istack not used
+ *
+ * @return 0 on success.
+ */
+int
+osi_NetSend(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec,
+ int nvecs, afs_int32 alength, int istack)
+{
+ int iov_i, code;
+ int haveGlock;
+
+ struct afs_pkt_hdr pkt;
+ int npkts, nbytes, n_sent;
+ char *payloadp;
+
+ npkts = 1; /* for now, send one packet at a time */
+ nbytes = 0;
+
+ AFS_STATCNT(osi_NetSend);
+
+ memset(&pkt, 0, sizeof(pkt));
+ haveGlock = ISAFS_GLOCK();
+
+ if (nvecs > RX_MAXIOVECS) {
+ osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);
+ }
+ if (alength > AFS_SOCKPROXY_PAYLOAD_MAX) {
+ osi_Panic("osi_NetSend: %d: Payload is too big.\n", alength);
+ }
+ if ((afs_termState == AFSOP_STOP_RXK_LISTENER) ||
+ (afs_termState == AFSOP_STOP_COMPLETE)) {
+ return -1;
+ }
+
+ if (haveGlock) {
+ AFS_GUNLOCK();
+ }
+
+ pkt.addr = addr->sin_addr.s_addr;
+ pkt.port = addr->sin_port;
+ pkt.size = alength;
+ pkt.payload = rxi_Alloc(alength);
+
+ payloadp = pkt.payload;
+ for (iov_i = 0; iov_i < nvecs; iov_i++) {
+ nbytes += dvec[iov_i].iov_len;
+ osi_Assert(nbytes <= alength);
+
+ memcpy(payloadp, dvec[iov_i].iov_base, dvec[iov_i].iov_len);
+ payloadp += dvec[iov_i].iov_len;
+ }
+
+ /* returns the number of packets sent */
+ n_sent = SockProxyRequest(AFS_USPC_SOCKPROXY_SEND, NULL, &pkt, npkts);
+ if (n_sent > 0) {
+ /* success */
+ code = 0;
+ } else {
+ code = EIO;
+ }
+
+ rxi_Free(pkt.payload, alength);
+ if (haveGlock) {
+ AFS_GLOCK();
+ }
+ return code;
+}
+
+/**
+ * Check if index is valid for a given op.
+ *
+ * @param[in] a_op operation
+ * @param[in] a_idx index
+ *
+ * @return 1 if valid; 0 otherwise.
+ */
+static int
+IsSockProxyIndexValid(int a_op, int a_idx)
+{
+ switch (a_op) {
+ case AFS_USPC_SOCKPROXY_RECV:
+ /* index 0 is reserved for the receiver */
+ if (a_idx == 0)
+ return 1;
+ break;
+
+ case AFS_USPC_SHUTDOWN:
+ case AFS_USPC_SOCKPROXY_START:
+ case AFS_USPC_SOCKPROXY_SEND:
+ /* non-receiver procs can use any index besides 0 */
+ if (a_idx > 0 && a_idx < AFS_SOCKPROXY_NPROCS)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Receive packet from user-space.
+ *
+ * @param[in] a_pkt packet to be received
+ */
+static void
+rx_upcall_sockproxy(struct afs_pkt_hdr *a_pkt)
+{
+ /*
+ * Although SockProxySocket is not used as an endpoint for communication,
+ * rxi_FindService uses this information to find the correct service
+ * structure.
+ */
+ rx_upcall_common(SockProxySocket, a_pkt);
+}
+
+/**
+ * Receive response from user-space process.
+ *
+ * @param[in] uspc control information exchanged between rx and procs
+ * @param[in] pkts_recv packets to be received
+ * @param[out] pkts_send packets to be sent (do not free; the memory is
+ * owned by the SockProxyRequest caller)
+ *
+ * @return 0 on success; -1 otherwise.
+ */
+int
+rxk_SockProxyReply(struct afs_uspc_param *uspc,
+ struct afs_pkt_hdr *pkts_recv,
+ struct afs_pkt_hdr **pkts_send)
+{
+ struct rx_sockproxy_channel *ch = &rx_sockproxy_ch;
+ struct rx_sockproxy_proc *proc;
+ int procidx, shutdown;
+
+ procidx = uspc->req.usp.idx;
+ shutdown = 0;
+
+ if (!IsSockProxyIndexValid(uspc->reqtype, procidx)) {
+ printf("rxk_SockProxyReply: bad proc %d idx %d\n",
+ uspc->reqtype, procidx);
+ return -1;
+ }
+
+ MUTEX_ENTER(&ch->lock);
+ proc = &ch->proc[procidx];
+
+ /* process successfully initialized */
+ if (proc->inited == 0) {
+ proc->inited = 1;
+ if (uspc->reqtype != AFS_USPC_SOCKPROXY_RECV) {
+ /*
+ * Don't count the _RECV process in our count of
+ * afs_sockproxy_procs. The _RECV process will be killed by one of
+ * the other sockproxy procs during shutdown, to make sure it
+ * doesn't get stuck waiting for packets.
+ */
+ MUTEX_EXIT(&ch->lock);
+ AFS_GLOCK();
+ afs_sockproxy_procs++;
+ AFS_GUNLOCK();
+ MUTEX_ENTER(&ch->lock);
+
+ /*
+ * Add proc to the queue of procs waiting for a request. Skip
+ * AFS_USPC_SOCKPROXY_RECV since there is no request/reply
+ * mechanism for this type of request.
+ */
+ opr_queue_Append(&ch->procq, &proc->entry);
+ CV_BROADCAST(&ch->cv_procq);
+ }
+ }
+
+ /* response received from userspace process */
+ if (proc->pending) {
+ proc->op = -1;
+ proc->pending = 0;
+ proc->complete = 1;
+ proc->ret = uspc->retval;
+
+ CV_BROADCAST(&proc->cv_op);
+ }
+
+ if (ch->shutdown) {
+ shutdown = 1;
+ goto done;
+ }
+
+ if (uspc->reqtype == AFS_USPC_SOCKPROXY_RECV) {
+ int pkt_i;
+
+ MUTEX_EXIT(&ch->lock);
+ for (pkt_i = 0; pkt_i < uspc->req.usp.npkts; pkt_i++) {
+ rx_upcall_sockproxy(&pkts_recv[pkt_i]);
+ }
+ return 0;
+ }
+
+ while (!proc->pending && !ch->shutdown) {
+ /* wait for requests */
+ CV_WAIT(&proc->cv_op, &ch->lock);
+ }
+ if (!IsSockProxyIndexValid(proc->op, procidx)) {
+ printf("rxk_SockProxyReply: bad proc %d idx %d\n", proc->op, procidx);
+ MUTEX_EXIT(&ch->lock);
+ return -1;
+ }
+ /* request received */
+ uspc->reqtype = proc->op;
+
+ if (ch->shutdown) {
+ shutdown = 1;
+ goto done;
+ }
+
+ if (uspc->reqtype == AFS_USPC_SOCKPROXY_START) {
+ uspc->req.usp.addr = proc->addr;
+ uspc->req.usp.port = proc->port;
+ }
+ if (uspc->reqtype == AFS_USPC_SOCKPROXY_SEND) {
+ *pkts_send = proc->pkts;
+ uspc->req.usp.npkts = proc->npkts;
+ }
+
+ done:
+ MUTEX_EXIT(&ch->lock);
+ if (shutdown) {
+ AFS_GLOCK();
+ if (uspc->reqtype != AFS_USPC_SOCKPROXY_RECV &&
+ afs_sockproxy_procs > 0) {
+ /*
+ * wait for all non-recv sockproxy procs before continuing the
+ * shutdown process.
+ */
+ afs_sockproxy_procs--;
+ if (afs_sockproxy_procs == 0) {
+ afs_termState = AFSOP_STOP_NETIF;
+ afs_osi_Wakeup(&afs_termState);
+ }
+ }
+ AFS_GUNLOCK();
+ /* tell the afsd process to exit */
+ uspc->reqtype = AFS_USPC_SHUTDOWN;
+ }
+ return 0;
+}
+
+/**
+ * Shutdown socket proxy.
+ */
+void
+osi_StopNetIfPoller(void)
+{
+ AFS_GUNLOCK();
+ SockProxyRequest(AFS_USPC_SHUTDOWN, NULL, NULL, 0);
+ AFS_GLOCK();
+
+ while (afs_termState == AFSOP_STOP_SOCKPROXY) {
+ afs_osi_Sleep(&afs_termState);
+ }
+ if (SockProxySocket != NULL) {
+ rxi_Free(SockProxySocket, sizeof(*SockProxySocket));
+ SockProxySocket = NULL;
+ }
+
+ if (afs_termState == AFSOP_STOP_NETIF) {
+ afs_termState = AFSOP_STOP_COMPLETE;
+ osi_rxWakeup(&afs_termState);
+ }
+}
+
+/**
+ * Open and bind RX socket.
+ *
+ * @param[in] ahost ip address
+ * @param[in] aport port number
+ *
+ * @return non-NULL on success; NULL otherwise.
+ */
+osi_socket *
+rxk_NewSocketHost(afs_uint32 ahost, short aport)
+{
+ int code;
+ struct sockaddr_in addr;
+
+ AFS_STATCNT(osi_NewSocket);
+
+ if (SockProxySocket != NULL) {
+ /* Just make sure we don't init twice. */
+ return SockProxySocket;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_port = aport;
+ addr.sin_addr.s_addr = ahost;
+
+ AFS_GUNLOCK();
+
+ code = SockProxyRequest(AFS_USPC_SOCKPROXY_START, &addr, NULL, 0);
+ if (code != 0) {
+ afs_warn("rxk_NewSocketHost: Couldn't initialize socket (%d).\n", code);
+ AFS_GLOCK();
+ return NULL;
+ }
+
+ AFS_GLOCK();
+
+ /*
+ * success. notice that the rxk_NewSocketHost interface forces us to return
+ * an osi_socket address on success. however, if AFS_SOCKPROXY_ENV is
+ * defined, the socket returned by this function is not used. since the
+ * caller is expecting an osi_socket, return one to represent success.
+ */
+ if (SockProxySocket == NULL) {
+ SockProxySocket = rxi_Alloc(sizeof(*SockProxySocket));
+ } else {
+ /* should not happen */
+ afs_warn("rxk_NewSocketHost: SockProxySocket already initialized "
+ "(this should not happen, but continuing regardless).\n");
+ }
+ return SockProxySocket;
+}
+
+/**
+ * Open and bind RX socket to all local interfaces.
+ *
+ * @param[in] aport port number
+ *
+ * @return non-NULL on success; NULL otherwise.
+ */
+osi_socket *
+rxk_NewSocket(short aport)
+{
+ return rxk_NewSocketHost(0, aport);
+}
+
+/**
+ * Init communication channel used by SockProxyRequest and rxk_SockProxyReply.
+ */
+void
+rxk_SockProxySetup(void)
+{
+ int proc_i;
+
+ opr_queue_Init(&rx_sockproxy_ch.procq);
+ CV_INIT(&rx_sockproxy_ch.cv_procq, "rx_sockproxy_cv_procq", CV_DEFAULT, 0);
+ MUTEX_INIT(&rx_sockproxy_ch.lock, "rx_sockproxy_mutex", MUTEX_DEFAULT, 0);
+
+ for (proc_i = 0; proc_i < AFS_SOCKPROXY_NPROCS; proc_i++) {
+ struct rx_sockproxy_proc *proc = &rx_sockproxy_ch.proc[proc_i];
+
+ proc->op = -1;
+ CV_INIT(&proc->cv_op, "rx_sockproxy_cv_op", CV_DEFAULT, 0);
+ }
+}
+
+/**
+ * Destroy communication channel.
+ */
+void
+rxk_SockProxyFinish(void)
+{
+ int proc_i;
+
+ for (proc_i = 0; proc_i < AFS_SOCKPROXY_NPROCS; proc_i++) {
+ struct rx_sockproxy_proc *proc = &rx_sockproxy_ch.proc[proc_i];
+
+ CV_DESTROY(&proc->cv_op);
+ }
+ MUTEX_DESTROY(&rx_sockproxy_ch.lock);
+ CV_DESTROY(&rx_sockproxy_ch.cv_procq);
+
+ memset(&rx_sockproxy_ch, 0, sizeof(rx_sockproxy_ch));
+}
+
+# else /* AFS_SOCKPROXY_ENV */
+
+/* in listener env, the listener shutdown does this. we have no listener */
+void
+osi_StopNetIfPoller(void)
+{
+ soclose(rx_socket);
+ if (afs_termState == AFSOP_STOP_NETIF) {
+ afs_termState = AFSOP_STOP_COMPLETE;
+ osi_rxWakeup(&afs_termState);
+ }
+}
+
void
rx_upcall(socket_t so, void *arg, __unused int waitflag)
{
- mbuf_t m;
+ rx_upcall_common(so, NULL);
+}
+
+# endif /* AFS_SOCKPROXY_ENV */
+
+static void
+rx_upcall_common(socket_t so, struct afs_pkt_hdr *pkt)
+{
int error = 0;
int i, flags = 0;
struct msghdr msg;
msg.msg_namelen = sizeof(struct sockaddr_storage);
sa =(struct sockaddr *) &ss;
- do {
- m = NULL;
+# ifdef AFS_SOCKPROXY_ENV
+ {
+ char *payload;
+ size_t sz;
+
+ osi_Assert(pkt != NULL);
+
+ sa->sa_family = AF_INET;
+ ((struct sockaddr_in*)sa)->sin_addr.s_addr = pkt->addr;
+ ((struct sockaddr_in*)sa)->sin_port = pkt->port;
+
+ payload = pkt->payload;
+ nbytes = pkt->size;
+ resid = nbytes;
+ noffset = 0;
+
+ for (i = 0; i < p->niovecs && resid > 0; i++) {
+ sz = MIN(resid, p->wirevec[i].iov_len);
+ memcpy(p->wirevec[i].iov_base, payload, sz);
+ resid -= sz;
+ noffset += sz;
+ payload += sz;
+ }
+ }
+# else
+ {
+ mbuf_t m = NULL;
error = sock_receivembuf(so, &msg, &m, MSG_DONTWAIT, &nbytes);
if (!error) {
size_t sz, offset = 0;
noffset += sz;
}
}
- } while (0);
- mbuf_freem(m);
+ mbuf_freem(m);
+ }
+# endif /* AFS_SOCKPROXY_ENV */
/* restore the vec to its correct state */
p->wirevec[p->niovecs - 1].iov_len = savelen;
return;
}
-/* in listener env, the listener shutdown does this. we have no listener */
-void
-osi_StopNetIfPoller(void)
-{
- soclose(rx_socket);
- if (afs_termState == AFSOP_STOP_NETIF) {
- afs_termState = AFSOP_STOP_COMPLETE;
- osi_rxWakeup(&afs_termState);
- }
-}
#elif defined(RXK_LISTENER_ENV)
int
osi_NetReceive(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec,
#error need upcall or listener
#endif
+#ifndef AFS_SOCKPROXY_ENV
int
osi_NetSend(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec,
int nvecs, afs_int32 alength, int istack)
int i;
struct iovec iov[RX_MAXIOVECS];
int haveGlock = ISAFS_GLOCK();
-#ifdef AFS_DARWIN80_ENV
+# ifdef AFS_DARWIN80_ENV
socket_t asocket = (socket_t)so;
struct msghdr msg;
size_t slen;
-#else
+# else
struct socket *asocket = (struct socket *)so;
struct uio u;
memset(&u, 0, sizeof(u));
-#endif
+# endif
memset(&iov, 0, sizeof(iov));
AFS_STATCNT(osi_NetSend);
if (haveGlock)
AFS_GUNLOCK();
-#if defined(KERNEL_FUNNEL)
+# if defined(KERNEL_FUNNEL)
thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
-#endif
-#ifdef AFS_DARWIN80_ENV
+# endif
+# ifdef AFS_DARWIN80_ENV
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_name = addr;
msg.msg_namelen = ((struct sockaddr *)addr)->sa_len;
msg.msg_iov = &iov[0];
msg.msg_iovlen = nvecs;
code = sock_send(asocket, &msg, 0, &slen);
-#else
+# else
u.uio_iov = &iov[0];
u.uio_iovcnt = nvecs;
u.uio_offset = 0;
u.uio_rw = UIO_WRITE;
u.uio_procp = NULL;
code = sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0);
-#endif
+# endif
-#if defined(KERNEL_FUNNEL)
+# if defined(KERNEL_FUNNEL)
thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
-#endif
+# endif
if (haveGlock)
AFS_GLOCK();
return code;
}
+#endif /* !AFS_SOCKPROXY_ENV */