#endif
#endif /* KERNEL */
-#include "rx_queue.h"
+#include <opr/queue.h>
+
#include "rx_clock.h"
#include "rx_event.h"
#include "rx_misc.h"
/* Connection management */
-extern void rx_SetConnectionEpoch(struct rx_connection *conn, int epoch);
-extern int rx_GetConnectionEpoch(struct rx_connection *conn);
-extern void rx_SetConnectionId(struct rx_connection *conn, int id);
-extern int rx_GetConnectionId(struct rx_connection *conn);
+extern afs_uint32 rx_GetConnectionEpoch(struct rx_connection *conn);
+extern afs_uint32 rx_GetConnectionId(struct rx_connection *conn);
extern void *rx_GetSecurityData(struct rx_connection *conn);
extern void rx_SetSecurityData(struct rx_connection *conn, void *data);
extern int rx_IsUsingPktCksum(struct rx_connection *conn);
-extern void rx_SetSecurityHeaderSize(struct rx_connection *conn, int size);
-extern int rx_GetSecurityHeaderSize(struct rx_connection *conn);
-extern void rx_SetSecurityMaxTrailerSize(struct rx_connection *conn, int size);
-extern int rx_GetSecurityMaxTrailerSize(struct rx_connection *conn);
+extern void rx_SetSecurityHeaderSize(struct rx_connection *conn, afs_uint32 size);
+extern afs_uint32 rx_GetSecurityHeaderSize(struct rx_connection *conn);
+extern void rx_SetSecurityMaxTrailerSize(struct rx_connection *conn, afs_uint32 size);
+extern afs_uint32 rx_GetSecurityMaxTrailerSize(struct rx_connection *conn);
extern void rx_SetMsgsizeRetryErr(struct rx_connection *conn, int err);
extern int rx_IsServerConn(struct rx_connection *conn);
extern int rx_IsClientConn(struct rx_connection *conn);
unsigned int totalFunc,
int isServer);
+/* Peer management */
+extern afs_uint32 rx_HostOf(struct rx_peer *peer);
+extern u_short rx_PortOf(struct rx_peer *peer);
/* Packets */
#define RX_CALL_PEER_BUSY 0x20000 /* the last packet we received on this call was a
* BUSY packet; i.e. the channel for this call is busy */
#define RX_CALL_ACKALL_SENT 0x40000 /* ACKALL has been sent on the call */
-
+#define RX_CALL_FLUSH 0x80000 /* Transmit queue should be flushed to peer */
#endif
#define RX_WAIT 1
#define RX_DONTWAIT 0
-#define rx_HostOf(peer) ((peer)->host)
-#define rx_PortOf(peer) ((peer)->port)
#define rx_GetLocalStatus(call, status) ((call)->localStatus)
/* Define procedure to set service dead time */
#define rx_SetIdleDeadTime(service,time) ((service)->idleDeadTime = (time))
-/*
- * Define error to return in server connections when failing to answer.
- * (server only) For example, AFS viced sends VNOSERVICE.
- */
-#define rx_SetServerIdleDeadErr(service,err) ((service)->idleDeadErr = (err))
-
/* Define procedures for getting and setting before and after execute-request procs */
#define rx_SetAfterProc(service,proc) ((service)->afterProc = (proc))
#define rx_SetBeforeProc(service,proc) ((service)->beforeProc = (proc))
u_short connDeadTime; /* Seconds until a client of this service will be declared dead, if it is not responding */
u_short idleDeadTime; /* Time a server will wait for I/O to start up again */
u_char checkReach; /* Check for asymmetric clients? */
- afs_int32 idleDeadErr;
int nSpecific; /* number entries in specific data */
void **specific; /* pointer to connection specific data */
#ifdef RX_ENABLE_LOCKS
#endif /* KDUMP_RX_LOCK */
-/* A server puts itself on an idle queue for a service using an
- * instance of the following structure. When a call arrives, the call
- * structure pointer is placed in "newcall", the routine to execute to
- * service the request is placed in executeRequestProc, and the
- * process is woken up. The queue entry's address is used for the
- * sleep/wakeup. If socketp is non-null, then this thread is willing
- * to become a listener thread. A thread sets *socketp to -1 before
- * sleeping. If *socketp is not -1 when the thread awakes, it is now
- * the listener thread for *socketp. When socketp is non-null, tno
- * contains the server's threadID, which is used to make decitions in GetCall.
- */
-#ifdef KDUMP_RX_LOCK
-struct rx_serverQueueEntry_rx_lock {
-#else
-struct rx_serverQueueEntry {
-#endif
- struct rx_queue queueItemHeader;
-#ifdef KDUMP_RX_LOCK
- struct rx_call_rx_lock *newcall;
-#else
- struct rx_call *newcall;
-#endif
-#ifdef RX_ENABLE_LOCKS
- afs_kmutex_t lock;
- afs_kcondvar_t cv;
-#endif
- int tno;
- osi_socket *socketp;
-};
-
-
-/* A peer refers to a peer process, specified by a (host,port) pair. There may be more than one peer on a given host. */
-#ifdef KDUMP_RX_LOCK
-struct rx_peer_rx_lock {
- struct rx_peer_rx_lock *next; /* Next in hash conflict or free list */
-#else
-struct rx_peer {
- struct rx_peer *next; /* Next in hash conflict or free list */
-#endif
-#ifdef RX_ENABLE_LOCKS
- afs_kmutex_t peer_lock; /* Lock peer */
-#endif /* RX_ENABLE_LOCKS */
- afs_uint32 host; /* Remote IP address, in net byte order */
- u_short port; /* Remote UDP port, in net byte order */
-
- /* interface mtu probably used for this host - includes RX Header */
- u_short ifMTU; /* doesn't include IP header */
-
- /* For garbage collection */
- afs_uint32 idleWhen; /* When the refcountwent to zero */
- afs_int32 refCount; /* Reference count for this structure (rx_peerHashTable_lock) */
-
- /* Congestion control parameters */
- u_char burstSize; /* Reinitialization size for the burst parameter */
- u_char burst; /* Number of packets that can be transmitted right now, without pausing */
- struct clock burstWait; /* Delay until new burst is allowed */
- struct rx_queue congestionQueue; /* Calls that are waiting for non-zero burst value */
- int rtt; /* Smoothed round trip time, measured in milliseconds/8 */
- int rtt_dev; /* Smoothed rtt mean difference, in milliseconds/4 */
- int nSent; /* Total number of distinct data packets sent, not including retransmissions */
- int reSends; /* Total number of retransmissions for this peer, since this structure was created */
-
-/* Skew: if a packet is received N packets later than expected (based
- * on packet serial numbers), then we define it to have a skew of N.
- * The maximum skew values allow us to decide when a packet hasn't
- * been received yet because it is out-of-order, as opposed to when it
- * is likely to have been dropped. */
- afs_uint32 inPacketSkew; /* Maximum skew on incoming packets */
- afs_uint32 outPacketSkew; /* Peer-reported max skew on our sent packets */
-
- /* the "natural" MTU, excluding IP,UDP headers, is negotiated by the endpoints */
- u_short natMTU;
- u_short maxMTU;
- /* negotiated maximum number of packets to send in a single datagram. */
- u_short maxDgramPackets;
- /* local maximum number of packets to send in a single datagram. */
- u_short ifDgramPackets;
- /*
- * MTU, cwind, and nDgramPackets are used to initialize
- * slow start parameters for new calls. These values are set whenever a
- * call sends a retransmission and at the end of each call.
- * congestSeq is incremented each time the congestion parameters are
- * changed by a call recovering from a dropped packet. A call used
- * MAX when updating congestion parameters if it started with the
- * current congestion sequence number, otherwise it uses MIN.
- */
- u_short MTU; /* MTU for AFS 3.4a jumboGrams */
- u_short cwind; /* congestion window */
- u_short nDgramPackets; /* number packets per AFS 3.5 jumbogram */
- u_short congestSeq; /* Changed when a call retransmits */
- afs_hyper_t bytesSent; /* Number of bytes sent to this peer */
- afs_hyper_t bytesReceived; /* Number of bytes received from this peer */
- struct rx_queue rpcStats; /* rpc statistic list */
- int lastReachTime; /* Last time we verified reachability */
- afs_int32 maxPacketSize; /* peer packetsize hint */
-};
-
#ifndef KDUMP_RX_LOCK
/* Flag bits for connection structure */
#define RX_CONN_MAKECALL_WAITING 1 /* rx_NewCall is waiting for a channel */
#define RX_CHECKREACH_TTL 60 /* Re-check reachability this often */
/*
+ * rx_GetNetworkError 'origin' constants. These define the meaning of the
+ * 'type' and 'code' values returned by rx_GetNetworkError.
+ */
+
+/* Used for ICMP errors; the type and code are the ICMP type and code,
+ * respectively */
+#define RX_NETWORK_ERROR_ORIGIN_ICMP (0)
+
+/*
* RX error codes. RX uses error codes from -1 to -64 and -100.
* Rxgen uses other error codes < -64 (see src/rxgen/rpc_errors.h);
* user programs are expected to return positive error codes
/* EMSGSIZE returned from network. Packet too big, must fragment */
#define RX_MSGSIZE (-8)
-/*
- * Idle dead timeout error. This error is never sent on the wire.
- * rxi_SendCallAbort() translates RX_CALL_IDLE to RX_CALL_TIMEOUT.
- */
-#define RX_CALL_IDLE (-9)
+/* The value -9 was previously used for RX_CALL_IDLE but is now free for
+ * reuse. */
/*
* Busy call channel error. This error is never sent on the wire.
#define RX_RESTARTING (-100)
typedef enum {
- RX_SECIDX_NULL = 0,
- RX_SECIDX_KAD = 2,
- RX_SECIDX_GK = 4,
- RX_SECIDX_K5 = 5,
+ RX_SECIDX_NULL = 0, /** rxnull, no security. */
+ RX_SECIDX_VAB = 1, /** vice tokens with bcrypt. Unused. */
+ RX_SECIDX_KAD = 2, /** kerberos/DES. */
+ RX_SECIDX_KAE = 3, /** rxkad, but always encrypt. */
+ RX_SECIDX_GK = 4, /** rxgk, RFC 3961 crypto. */
+ RX_SECIDX_K5 = 5, /** kerberos 5 tickets as tokens. */
} rx_securityIndex;
+/*
+ * We use an enum for the symbol definitions but have no need for a typedef
+ * because the enum is at least as wide as 'int' and these have to fit into
+ * a field of type 'char'. Direct assigment will do the right thing if the
+ * enum value fits into that type.
+ */
+enum {
+ RX_SECTYPE_UNK = 0,
+ RX_SECTYPE_NULL = 1,
+ RX_SECTYPE_VAB = 2,
+ RX_SECTYPE_KAD = 3,
+};
struct rx_securityObjectStats {
- char type; /* 0:unk 1:null,2:vab 3:kad */
+ char type; /* An RX_SECTYPE_* value */
char level;
char sparec[10]; /* force correct alignment */
afs_int32 flags; /* 1=>unalloc, 2=>auth, 4=>expired */
afs_uint32 interfaceId;
afs_uint32 func_total;
afs_uint32 func_index;
- afs_hyper_t invocations;
- afs_hyper_t bytes_sent;
- afs_hyper_t bytes_rcvd;
+ afs_uint64 invocations;
+ afs_uint64 bytes_sent;
+ afs_uint64 bytes_rcvd;
struct clock queue_time_sum;
struct clock queue_time_sum_sqr;
struct clock queue_time_min;
#define RX_STATS_RETRIEVAL_FIRST_EDITION 1 /* first implementation */
typedef struct rx_interface_stat {
- struct rx_queue queue_header;
- struct rx_queue all_peers;
+ struct opr_queue entry;
+ struct opr_queue entryPeers;
rx_function_entry_v1_t stats[1]; /* make sure this is aligned correctly */
} rx_interface_stat_t, *rx_interface_stat_p;
#include "rx_prototypes.h"
#endif
+static_inline afs_uint32
+RPCOpStat_Peer(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->remote_peer;
+}
+
+static_inline afs_uint32
+RPCOpStat_Port(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->remote_port;
+}
+
+static_inline afs_uint32
+RPCOpStat_IsServer(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->remote_is_server;
+}
+
+static_inline afs_uint32
+RPCOpStat_InterfaceId(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->interfaceId;
+}
+
+static_inline afs_uint32
+RPCOpStat_NumFuncs(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->func_total;
+}
+
+static_inline afs_uint32
+RPCOpStat_CurFunc(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->func_index;
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSum(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_sum);
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSumSqr(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_sum_sqr);
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSumMin(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_min);
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSumMax(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_max);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSum(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_sum);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSumSqr(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_sum_sqr);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSumMin(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_min);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSumMax(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_max);
+}
+
+static_inline afs_uint64
+RPCOpStat_NumCalls(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->invocations;
+}
+
+static_inline afs_uint64
+RPCOpStat_BytesSent(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->bytes_sent;
+}
+
+static_inline afs_uint64
+RPCOpStat_BytesRcvd(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->bytes_rcvd;
+}
#endif /* !KDUMP_RX_LOCK */