#endif
#endif /* KERNEL */
-#include "rx_queue.h"
+#include <opr/queue.h>
+
#include "rx_clock.h"
#include "rx_event.h"
#include "rx_misc.h"
/* Connection management */
-extern void rx_SetConnectionEpoch(struct rx_connection *conn, int epoch);
-extern int rx_GetConnectionEpoch(struct rx_connection *conn);
-extern void rx_SetConnectionId(struct rx_connection *conn, int id);
-extern int rx_GetConnectionId(struct rx_connection *conn);
+extern afs_uint32 rx_GetConnectionEpoch(struct rx_connection *conn);
+extern afs_uint32 rx_GetConnectionId(struct rx_connection *conn);
extern void *rx_GetSecurityData(struct rx_connection *conn);
extern void rx_SetSecurityData(struct rx_connection *conn, void *data);
extern int rx_IsUsingPktCksum(struct rx_connection *conn);
-extern void rx_SetSecurityHeaderSize(struct rx_connection *conn, int size);
-extern int rx_GetSecurityHeaderSize(struct rx_connection *conn);
-extern void rx_SetSecurityMaxTrailerSize(struct rx_connection *conn, int size);
-extern int rx_GetSecurityMaxTrailerSize(struct rx_connection *conn);
+extern void rx_SetSecurityHeaderSize(struct rx_connection *conn, afs_uint32 size);
+extern afs_uint32 rx_GetSecurityHeaderSize(struct rx_connection *conn);
+extern void rx_SetSecurityMaxTrailerSize(struct rx_connection *conn, afs_uint32 size);
+extern afs_uint32 rx_GetSecurityMaxTrailerSize(struct rx_connection *conn);
extern void rx_SetMsgsizeRetryErr(struct rx_connection *conn, int err);
extern int rx_IsServerConn(struct rx_connection *conn);
extern int rx_IsClientConn(struct rx_connection *conn);
#define RX_CALL_PEER_BUSY 0x20000 /* the last packet we received on this call was a
* BUSY packet; i.e. the channel for this call is busy */
#define RX_CALL_ACKALL_SENT 0x40000 /* ACKALL has been sent on the call */
-
+#define RX_CALL_FLUSH 0x80000 /* Transmit queue should be flushed to peer */
#endif
#endif /* KDUMP_RX_LOCK */
-/* A server puts itself on an idle queue for a service using an
- * instance of the following structure. When a call arrives, the call
- * structure pointer is placed in "newcall", the routine to execute to
- * service the request is placed in executeRequestProc, and the
- * process is woken up. The queue entry's address is used for the
- * sleep/wakeup. If socketp is non-null, then this thread is willing
- * to become a listener thread. A thread sets *socketp to -1 before
- * sleeping. If *socketp is not -1 when the thread awakes, it is now
- * the listener thread for *socketp. When socketp is non-null, tno
- * contains the server's threadID, which is used to make decitions in GetCall.
- */
-#ifdef KDUMP_RX_LOCK
-struct rx_serverQueueEntry_rx_lock {
-#else
-struct rx_serverQueueEntry {
-#endif
- struct rx_queue queueItemHeader;
-#ifdef KDUMP_RX_LOCK
- struct rx_call_rx_lock *newcall;
-#else
- struct rx_call *newcall;
-#endif
-#ifdef RX_ENABLE_LOCKS
- afs_kmutex_t lock;
- afs_kcondvar_t cv;
-#endif
- int tno;
- osi_socket *socketp;
-};
-
#ifndef KDUMP_RX_LOCK
/* Flag bits for connection structure */
#define RX_CONN_MAKECALL_WAITING 1 /* rx_NewCall is waiting for a channel */
#define RX_CHECKREACH_TTL 60 /* Re-check reachability this often */
/*
+ * rx_GetNetworkError 'origin' constants. These define the meaning of the
+ * 'type' and 'code' values returned by rx_GetNetworkError.
+ */
+
+/* Used for ICMP errors; the type and code are the ICMP type and code,
+ * respectively */
+#define RX_NETWORK_ERROR_ORIGIN_ICMP (0)
+
+/*
* RX error codes. RX uses error codes from -1 to -64 and -100.
* Rxgen uses other error codes < -64 (see src/rxgen/rpc_errors.h);
* user programs are expected to return positive error codes
#define RX_RESTARTING (-100)
typedef enum {
- RX_SECIDX_NULL = 0,
- RX_SECIDX_KAD = 2,
- RX_SECIDX_GK = 4,
- RX_SECIDX_K5 = 5,
+ RX_SECIDX_NULL = 0, /** rxnull, no security. */
+ RX_SECIDX_VAB = 1, /** vice tokens with bcrypt. Unused. */
+ RX_SECIDX_KAD = 2, /** kerberos/DES. */
+ RX_SECIDX_KAE = 3, /** rxkad, but always encrypt. */
+ RX_SECIDX_GK = 4, /** rxgk, RFC 3961 crypto. */
+ RX_SECIDX_K5 = 5, /** kerberos 5 tickets as tokens. */
} rx_securityIndex;
struct rx_securityObjectStats {
afs_uint32 interfaceId;
afs_uint32 func_total;
afs_uint32 func_index;
- afs_hyper_t invocations;
- afs_hyper_t bytes_sent;
- afs_hyper_t bytes_rcvd;
+ afs_uint64 invocations;
+ afs_uint64 bytes_sent;
+ afs_uint64 bytes_rcvd;
struct clock queue_time_sum;
struct clock queue_time_sum_sqr;
struct clock queue_time_min;
#define RX_STATS_RETRIEVAL_FIRST_EDITION 1 /* first implementation */
typedef struct rx_interface_stat {
- struct rx_queue queue_header;
- struct rx_queue all_peers;
+ struct opr_queue entry;
+ struct opr_queue entryPeers;
rx_function_entry_v1_t stats[1]; /* make sure this is aligned correctly */
} rx_interface_stat_t, *rx_interface_stat_p;
#include "rx_prototypes.h"
#endif
+static_inline afs_uint32
+RPCOpStat_Peer(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->remote_peer;
+}
+
+static_inline afs_uint32
+RPCOpStat_Port(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->remote_port;
+}
+
+static_inline afs_uint32
+RPCOpStat_IsServer(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->remote_is_server;
+}
+
+static_inline afs_uint32
+RPCOpStat_InterfaceId(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->interfaceId;
+}
+
+static_inline afs_uint32
+RPCOpStat_NumFuncs(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->func_total;
+}
+
+static_inline afs_uint32
+RPCOpStat_CurFunc(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->func_index;
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSum(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_sum);
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSumSqr(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_sum_sqr);
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSumMin(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_min);
+}
+
+static_inline struct clock *
+RPCOpStat_QTimeSumMax(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->queue_time_max);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSum(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_sum);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSumSqr(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_sum_sqr);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSumMin(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_min);
+}
+
+static_inline struct clock *
+RPCOpStat_ExecTimeSumMax(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return &(rpcop_stat->execution_time_max);
+}
+
+static_inline afs_uint64
+RPCOpStat_NumCalls(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->invocations;
+}
+
+static_inline afs_uint64
+RPCOpStat_BytesSent(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->bytes_sent;
+}
+
+static_inline afs_uint64
+RPCOpStat_BytesRcvd(void *blob) {
+ rx_function_entry_v1_p rpcop_stat = (rx_function_entry_v1_p)blob;
+ return rpcop_stat->bytes_rcvd;
+}
#endif /* !KDUMP_RX_LOCK */