if test "x$ac_cv_linux_fs_struct_inode_has_i_security" = "xyes"; then
AC_DEFINE(STRUCT_INODE_HAS_I_SECURITY, 1, [define if you struct inode has i_security])
fi
+ if test "x$ac_cv_linux_fs_struct_inode_has_i_mutex" = "xyes"; then
+ AC_DEFINE(STRUCT_INODE_HAS_I_MUTEX, 1, [define if you struct inode has i_mutex])
+ fi
if test "x$ac_cv_linux_fs_struct_inode_has_i_sb_list" = "xyes"; then
AC_DEFINE(STRUCT_INODE_HAS_I_SB_LIST, 1, [define if you struct inode has i_sb_list])
fi
#define afs_linux_page_address(page) (afs_linux_page_offset + PAGE_SIZE * (page - mem_map))
#if defined(__KERNEL__)
-#include "../h/sched.h"
-#include "linux/wait.h"
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+extern struct mutex afs_global_lock;
+#else
extern struct semaphore afs_global_lock;
+#define mutex_lock(lock) down(lock)
+#define mutex_unlock(lock) up(lock)
+#endif
extern int afs_global_owner;
#define AFS_GLOCK() \
do { \
- down(&afs_global_lock); \
+ mutex_lock(&afs_global_lock); \
if (afs_global_owner) \
osi_Panic("afs_global_lock already held by pid %d", \
afs_global_owner); \
if (!ISAFS_GLOCK()) \
osi_Panic("afs global lock not held at %s:%d", __FILE__, __LINE__); \
afs_global_owner = 0; \
- up(&afs_global_lock); \
+ mutex_unlock(&afs_global_lock); \
} while (0)
-
-
#else
#define AFS_GLOCK()
#define AFS_GUNLOCK()
static long get_page_offset(void);
#endif
-#if defined(AFS_LINUX24_ENV)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+DEFINE_MUTEX(afs_global_lock);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
DECLARE_MUTEX(afs_global_lock);
#else
struct semaphore afs_global_lock = MUTEX;
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/semaphore.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+#include <linux/mutex.h>
+#endif
#include <linux/errno.h>
#ifdef COMPLETION_H_EXISTS
#include <linux/completion.h>
void
afs_mutex_init(afs_kmutex_t * l)
{
-#if defined(AFS_LINUX24_ENV)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ mutex_init(&l->mutex);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
init_MUTEX(&l->sem);
#else
l->sem = MUTEX;
void
afs_mutex_enter(afs_kmutex_t * l)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ mutex_lock(&l->mutex);
+#else
down(&l->sem);
+#endif
if (l->owner)
osi_Panic("mutex_enter: 0x%x held by %d", l, l->owner);
l->owner = current->pid;
int
afs_mutex_tryenter(afs_kmutex_t * l)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ if (mutex_trylock(&l->mutex) == 0)
+#else
if (down_trylock(&l->sem))
+#endif
return 0;
l->owner = current->pid;
return 1;
if (l->owner != current->pid)
osi_Panic("mutex_exit: 0x%x held by %d", l, l->owner);
l->owner = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ mutex_unlock(&l->mutex);
+#else
up(&l->sem);
+#endif
}
/* CV_WAIT and CV_TIMEDWAIT sleep until the specified event occurs, or, in the
struct coda_inode_info {
};
#endif
-#include "linux/wait.h"
-#include "linux/sched.h"
+#include <linux/version.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+#include <linux/mutex.h>
+#else
+#include <asm/semaphore.h>
+#endif
typedef struct afs_kmutex {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ struct mutex mutex;
+#else
struct semaphore sem;
+#endif
int owner;
} afs_kmutex_t;