2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * xdr_rx.c. XDR using RX.
14 #include <afsconfig.h>
15 #include <afs/param.h>
19 # include "afs/sysincludes.h"
23 # ifdef AFS_LINUX20_ENV
24 # include "h/socket.h"
26 # include "rpc/types.h"
28 # ifdef AFS_LINUX22_ENV
30 # define quad_t __quad_t
31 # define u_quad_t __u_quad_t
34 # include "netinet/in.h"
35 # endif /* !UKERNEL */
37 # include <sys/types.h>
40 # include <netinet/in.h>
47 /* Static prototypes */
48 #ifdef AFS_XDR_64BITOPS
49 static bool_t xdrrx_getint64(XDR *axdrs, long *lp);
50 static bool_t xdrrx_putint64(XDR *axdrs, long *lp);
51 #endif /* AFS_XDR_64BITOPS */
53 static bool_t xdrrx_getint32(XDR *axdrs, afs_int32 * lp);
54 static bool_t xdrrx_putint32(XDR *axdrs, afs_int32 * lp);
55 static bool_t xdrrx_getbytes(XDR *axdrs, caddr_t addr,
57 static bool_t xdrrx_putbytes(XDR *axdrs, caddr_t addr,
59 static afs_int32 *xdrrx_inline(XDR *axdrs, u_int len);
63 * Ops vector for stdio type XDR
65 static struct xdr_ops xdrrx_ops = {
66 #ifndef HAVE_STRUCT_LABEL_SUPPORT
67 #ifdef AFS_XDR_64BITOPS
68 xdrrx_getint64, /* deserialize an afs_int64 */
69 xdrrx_putint64, /* serialize an afs_int64 */
71 /* Windows does not support labeled assigments */
72 #if !(defined(KERNEL) && defined(AFS_SUN57_ENV))
73 xdrrx_getint32, /* deserialize an afs_int32 */
74 xdrrx_putint32, /* serialize an afs_int32 */
76 xdrrx_getbytes, /* deserialize counted bytes */
77 xdrrx_putbytes, /* serialize counted bytes */
78 NULL, /* get offset in the stream: not supported. */
79 NULL, /* set offset in the stream: not supported. */
80 xdrrx_inline, /* prime stream for inline macros */
81 NULL, /* destroy stream */
82 #if (defined(KERNEL) && defined(AFS_SUN57_ENV))
83 NULL, /* control - not implemented */
84 xdrrx_getint32, /* not supported */
85 xdrrx_putint32, /* serialize an afs_int32 */
88 #ifdef AFS_XDR_64BITOPS
89 .x_getint64 = xdrrx_getint64,
90 .x_putint64 = xdrrx_putint64,
91 #endif /* AFS_XDR_64BITOPS */
92 .x_getint32 = xdrrx_getint32, /* deserialize an afs_int32 */
93 .x_putint32 = xdrrx_putint32, /* serialize an afs_int32 */
94 .x_getbytes = xdrrx_getbytes, /* deserialize counted bytes */
95 .x_putbytes = xdrrx_putbytes, /* serialize counted bytes */
96 .x_getpostn = NULL, /* get offset in the stream: not supported. */
97 .x_setpostn = NULL, /* set offset in the stream: not supported. */
98 .x_inline = xdrrx_inline, /* prime stream for inline macros */
99 .x_destroy = NULL, /* destroy stream */
100 #if defined(KERNEL) && defined(AFS_SUN57_ENV)
107 * Initialize an rx xdr handle, for a given rx call. op must be XDR_ENCODE or XDR_DECODE.
108 * Call must have been returned by rx_MakeCall or rx_GetCall.
111 xdrrx_create(XDR * xdrs, struct rx_call *call,
115 xdrs->x_ops = &xdrrx_ops;
116 xdrs->x_private = (caddr_t) call;
119 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
120 #define STACK_TO_PIN 2*PAGESIZE /* 8K */
121 int rx_pin_failed = 0;
124 #ifdef AFS_XDR_64BITOPS
126 xdrrx_getint64(XDR *axdrs, long *lp)
128 XDR * xdrs = (XDR *)axdrs;
129 struct rx_call *call = ((struct rx_call *)(xdrs)->x_private);
132 if (rx_Read32(call, &i) == sizeof(i)) {
140 xdrrx_putint64(XDR *axdrs, long *lp)
142 XDR * xdrs = (XDR *)axdrs;
143 afs_int32 code, i = htonl(*lp);
144 struct rx_call *call = ((struct rx_call *)(xdrs)->x_private);
146 code = (rx_Write32(call, &i) == sizeof(i));
149 #endif /* AFS_XDR_64BITOPS */
152 xdrrx_getint32(XDR *axdrs, afs_int32 * lp)
155 XDR * xdrs = (XDR *)axdrs;
156 struct rx_call *call = ((struct rx_call *)(xdrs)->x_private);
157 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
158 char *saddr = (char *)&l;
159 saddr -= STACK_TO_PIN;
161 * Hack of hacks: Aix3.2 only guarantees that the next 2K of stack in pinned. Under
162 * splnet (disables interrupts), which is set throughout rx, we can't swap in stack
163 * pages if we need so we panic. Since sometimes, under splnet, we'll use more than
164 * 2K stack we could try to bring the next few stack pages in here before we call the rx
165 * layer. Of course this doesn't guarantee that those stack pages won't be swapped
166 * out between here and calling splnet. So we now pin (and unpin) them instead to
167 * guarantee that they remain there.
169 if (pin(saddr, STACK_TO_PIN)) {
170 /* XXX There's little we can do by continue XXX */
175 if (rx_Read32(call, &l) == sizeof(l)) {
177 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
179 unpin(saddr, STACK_TO_PIN);
183 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
185 unpin(saddr, STACK_TO_PIN);
191 xdrrx_putint32(XDR *axdrs, afs_int32 * lp)
193 afs_int32 code, l = htonl(*lp);
194 XDR * xdrs = (XDR *)axdrs;
195 struct rx_call *call = ((struct rx_call *)(xdrs)->x_private);
196 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
197 char *saddr = (char *)&code;
198 saddr -= STACK_TO_PIN;
200 * Hack of hacks: Aix3.2 only guarantees that the next 2K of stack in pinned. Under
201 * splnet (disables interrupts), which is set throughout rx, we can't swap in stack
202 * pages if we need so we panic. Since sometimes, under splnet, we'll use more than
203 * 2K stack we could try to bring the next few stack pages in here before we call the rx
204 * layer. Of course this doesn't guarantee that those stack pages won't be swapped
205 * out between here and calling splnet. So we now pin (and unpin) them instead to
206 * guarantee that they remain there.
208 if (pin(saddr, STACK_TO_PIN)) {
213 code = (rx_Write32(call, &l) == sizeof(l));
214 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
216 unpin(saddr, STACK_TO_PIN);
222 xdrrx_getbytes(XDR *axdrs, caddr_t addr, u_int len)
225 XDR * xdrs = (XDR *)axdrs;
226 struct rx_call *call = ((struct rx_call *)(xdrs)->x_private);
227 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
228 char *saddr = (char *)&code;
229 saddr -= STACK_TO_PIN;
231 * Hack of hacks: Aix3.2 only guarantees that the next 2K of stack in pinned. Under
232 * splnet (disables interrupts), which is set throughout rx, we can't swap in stack
233 * pages if we need so we panic. Since sometimes, under splnet, we'll use more than
234 * 2K stack we could try to bring the next few stack pages in here before we call the rx
235 * layer. Of course this doesn't guarantee that those stack pages won't be swapped
236 * out between here and calling splnet. So we now pin (and unpin) them instead to
237 * guarantee that they remain there.
239 if (pin(saddr, STACK_TO_PIN)) {
240 /* XXX There's little we can do by continue XXX */
245 code = (rx_Read(call, addr, len) == len);
246 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
248 unpin(saddr, STACK_TO_PIN);
254 xdrrx_putbytes(XDR *axdrs, caddr_t addr, u_int len)
257 XDR * xdrs = (XDR *)axdrs;
258 struct rx_call *call = ((struct rx_call *)(xdrs)->x_private);
259 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
260 char *saddr = (char *)&code;
261 saddr -= STACK_TO_PIN;
263 * Hack of hacks: Aix3.2 only guarantees that the next 2K of stack in pinned. Under
264 * splnet (disables interrupts), which is set throughout rx, we can't swap in stack
265 * pages if we need so we panic. Since sometimes, under splnet, we'll use more than
266 * 2K stack we could try to bring the next few stack pages in here before we call the rx
267 * layer. Of course this doesn't guarantee that those stack pages won't be swapped
268 * out between here and calling splnet. So we now pin (and unpin) them instead to
269 * guarantee that they remain there.
271 if (pin(saddr, STACK_TO_PIN)) {
272 /* XXX There's little we can do by continue XXX */
277 code = (rx_Write(call, addr, len) == len);
278 #if defined(KERNEL) && defined(AFS_AIX32_ENV)
280 unpin(saddr, STACK_TO_PIN);
285 #ifdef undef /* not used */
287 xdrrx_getpos(XDR * xdrs)
289 /* Not supported. What error code should we return? (It doesn't matter: it will never be called, anyway!) */
294 xdrrx_setpos(XDR * xdrs, u_int pos)
302 xdrrx_inline(XDR *axdrs, u_int len)
304 /* I don't know what this routine is supposed to do, but the stdio module returns null, so we will, too */