/* rxk_NewSocket
* open and bind RX socket
*/
-struct osi_socket *
+osi_socket *
rxk_NewSocketHost(afs_uint32 ahost, short aport)
{
struct socket *sockp;
}
TO_USER_SPACE();
- sockp->ops->setsockopt(sockp, SOL_IP, IP_MTU_DISCOVER, &pmtu, sizeof(pmtu));
+ sockp->ops->setsockopt(sockp, SOL_IP, IP_MTU_DISCOVER, (char *)&pmtu,
+ sizeof(pmtu));
TO_KERNEL_SPACE();
- return (struct osi_socket *)sockp;
+ return (osi_socket *)sockp;
}
-struct osi_socket *
+osi_socket *
rxk_NewSocket(short aport)
{
return rxk_NewSocketHost(htonl(INADDR_ANY), aport);
* non-zero = failure
*/
int
-osi_NetSend(osi_socket sop, struct sockaddr_in *to, struct iovec *iov,
+osi_NetSend(osi_socket sop, struct sockaddr_in *to, struct iovec *iovec,
int iovcnt, afs_int32 size, int istack)
{
KERNEL_SPACE_DECL;
struct msghdr msg;
int code;
- struct iovec tmpvec[RX_MAXWVECS + 2];
-
- if (iovcnt > RX_MAXWVECS + 2) {
- osi_Panic("Too many (%d) iovecs passed to osi_NetSend\n", iovcnt);
- }
- if (iovcnt <= 2) { /* avoid pointless uiomove */
- tmpvec[0].iov_base = iov[0].iov_base;
- tmpvec[0].iov_len = size;
- msg.msg_iovlen = 1;
- } else {
- memcpy(tmpvec, iov, iovcnt * sizeof(struct iovec));
- msg.msg_iovlen = iovcnt;
- }
- msg.msg_iov = tmpvec;
+ msg.msg_iovlen = iovcnt;
+ msg.msg_iov = iovec;
msg.msg_name = to;
msg.msg_namelen = sizeof(*to);
msg.msg_control = NULL;
TO_KERNEL_SPACE();
if (code < 0) {
+#ifdef AFS_LINUX26_ENV
+#ifdef CONFIG_PM
+ if (
+#ifdef PF_FREEZE
+ current->flags & PF_FREEZE
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
+ !current->todo
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
+ test_ti_thread_flag(current->thread_info, TIF_FREEZE)
+#else
+ test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
+#endif
+#endif
+#endif
+ )
+#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
+ refrigerator(PF_FREEZE);
+#else
+ refrigerator();
+#endif
+ set_current_state(TASK_INTERRUPTIBLE);
+#endif
+#endif
+
/* Clear the error before using the socket again.
* Oh joy, Linux has hidden header files as well. It appears we can
* simply call again and have it clear itself via sock_error().
return code;
}
-
+#ifdef EXPORTED_TASKLIST_LOCK
+extern rwlock_t tasklist_lock __attribute__((weak));
+#endif
void
osi_StopListener(void)
{
struct task_struct *listener;
extern int rxk_ListenerPid;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- read_lock(&tasklist_lock);
+ while (rxk_ListenerPid) {
+#ifdef EXPORTED_TASKLIST_LOCK
+ if (&tasklist_lock)
+ read_lock(&tasklist_lock);
#endif
- listener = find_task_by_pid(rxk_ListenerPid);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- read_unlock(&tasklist_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+#ifdef EXPORTED_TASKLIST_LOCK
+ else
#endif
- while (rxk_ListenerPid) {
- struct task_struct *p;
-
- flush_signals(listener);
- force_sig(SIGKILL, listener);
+ rcu_read_lock();
+#endif
+ listener = find_task_by_pid(rxk_ListenerPid);
+ if (listener) {
+ flush_signals(listener);
+ force_sig(SIGKILL, listener);
+ }
+#ifdef EXPORTED_TASKLIST_LOCK
+ if (&tasklist_lock)
+ read_unlock(&tasklist_lock);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+#ifdef EXPORTED_TASKLIST_LOCK
+ else
+#endif
+ rcu_read_unlock();
+#endif
+ if (!listener)
+ break;
afs_osi_Sleep(&rxk_ListenerPid);
}
sock_release(rx_socket);