1 /* Kernel compatibility routines
3 * This file contains definitions to provide compatibility between different
4 * versions of the Linux kernel. It is an ifdef maze, but the idea is that
5 * by concentrating the horror here, the rest of the tree may remaing a
9 #ifndef AFS_LINUX_OSI_COMPAT_H
10 #define AFS_LINUX_OSI_COMPAT_H
12 #if defined(HAVE_LINUX_FREEZER_H)
13 # include <linux/freezer.h>
16 #if defined(LINUX_KEYRING_SUPPORT)
17 # include <linux/rwsem.h>
18 # include <linux/key.h>
19 # if defined(HAVE_LINUX_KEY_TYPE_H)
20 # include <linux/key-type.h>
22 # ifndef KEY_ALLOC_IN_QUOTA
23 /* Before these flags were added in Linux commit v2.6.18-rc1~816,
24 * key_alloc just took a boolean not_in_quota */
25 # define KEY_ALLOC_IN_QUOTA 0
26 # define KEY_ALLOC_NOT_IN_QUOTA 1
30 #if defined(STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT) && !defined(DCACHE_NEED_AUTOMOUNT)
31 # define DCACHE_NEED_AUTOMOUNT DMANAGED_AUTOMOUNT
34 #ifdef HAVE_LINUX_STRUCT_VFS_PATH
35 typedef struct vfs_path afs_linux_path_t;
37 typedef struct path afs_linux_path_t;
40 #if defined(STRUCT_DENTRY_HAS_D_U_D_ALIAS)
41 # define d_alias d_u.d_alias
44 #if defined(STRUCT_FILE_HAS_F_PATH)
45 # if !defined(f_dentry)
46 # define f_dentry f_path.dentry
50 #ifndef HAVE_LINUX_DO_SYNC_READ
52 do_sync_read(struct file *fp, char *buf, size_t count, loff_t *offp) {
53 return generic_file_read(fp, buf, count, offp);
57 do_sync_write(struct file *fp, char *buf, size_t count, loff_t *offp) {
58 return generic_file_write(fp, buf, count, offp);
61 #endif /* DO_SYNC_READ */
64 afs_posix_lock_file(struct file *fp, struct file_lock *flp) {
65 #ifdef POSIX_LOCK_FILE_WAIT_ARG
66 return posix_lock_file(fp, flp, NULL);
68 flp->fl_flags &=~ FL_SLEEP;
69 return posix_lock_file(fp, flp);
74 afs_posix_test_lock(struct file *fp, struct file_lock *flp) {
75 #if defined(POSIX_TEST_LOCK_CONFLICT_ARG)
76 struct file_lock conflict;
77 if (posix_test_lock(fp, flp, &conflict)) {
78 locks_copy_lock(flp, &conflict);
79 flp->fl_type = F_UNLCK;
81 #elif defined(POSIX_TEST_LOCK_RETURNS_CONFLICT)
82 struct file_lock *conflict;
83 conflict = posix_test_lock(fp, flp);
85 locks_copy_lock(flp, conflict);
86 flp->fl_type = F_UNLCK;
89 posix_test_lock(fp, flp);
93 #ifdef DCACHE_NFSFS_RENAMED
95 afs_linux_clear_nfsfs_renamed(struct dentry *dp) {
96 spin_lock(&dp->d_lock);
97 dp->d_flags &= ~DCACHE_NFSFS_RENAMED;
98 spin_unlock(&dp->d_lock);
102 afs_linux_set_nfsfs_renamed(struct dentry *dp) {
103 spin_lock(&dp->d_lock);
104 dp->d_flags |= DCACHE_NFSFS_RENAMED;
105 spin_unlock(&dp->d_lock);
109 afs_linux_nfsfs_renamed(struct dentry *dp) {
110 return dp->d_flags & DCACHE_NFSFS_RENAMED;
114 static inline void afs_linux_clear_nfsfs_renamed(void) { return; }
115 static inline void afs_linux_set_nfsfs_renamed(void) { return; }
118 #ifndef HAVE_LINUX_HLIST_UNHASHED
120 hlist_unhashed(const struct hlist_node *h) {
121 return (!h->pprev == NULL);
125 #if defined(WRITEPAGE_ACTIVATE)
126 #define AOP_WRITEPAGE_ACTIVATE WRITEPAGE_ACTIVATE
129 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN) && !defined(HAVE_LINUX_GRAB_CACHE_PAGE_WRITE_BEGIN)
130 static inline struct page *
131 grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index,
132 unsigned int flags) {
133 return __grab_cache_page(mapping, index);
137 #if defined(HAVE_KMEM_CACHE_T)
138 #define afs_kmem_cache_t kmem_cache_t
140 #define afs_kmem_cache_t struct kmem_cache
143 extern void init_once(void *);
144 #if defined(HAVE_KMEM_CACHE_T)
146 init_once_func(void * foo, kmem_cache_t * cachep, unsigned long flags) {
147 #if defined(SLAB_CTOR_VERIFY)
148 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
149 SLAB_CTOR_CONSTRUCTOR)
153 #elif defined(KMEM_CACHE_INIT)
155 init_once_func(struct kmem_cache * cachep, void * foo) {
158 #elif !defined(KMEM_CACHE_CTOR_TAKES_VOID)
160 init_once_func(void * foo, struct kmem_cache * cachep, unsigned long flags) {
161 #if defined(SLAB_CTOR_VERIFY)
162 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
163 SLAB_CTOR_CONSTRUCTOR)
169 init_once_func(void * foo) {
174 #ifndef SLAB_RECLAIM_ACCOUNT
175 #define SLAB_RECLAIM_ACCOUNT 0
178 #if defined(SLAB_KERNEL)
179 #define KALLOC_TYPE SLAB_KERNEL
181 #define KALLOC_TYPE GFP_KERNEL
184 #ifdef LINUX_KEYRING_SUPPORT
185 static inline struct key *
186 afs_linux_key_alloc(struct key_type *type, const char *desc, afs_kuid_t uid,
187 afs_kgid_t gid, key_perm_t perm, unsigned long flags)
189 # if defined(KEY_ALLOC_NEEDS_STRUCT_TASK)
190 return key_alloc(type, desc, uid, gid, current, perm, flags);
191 # elif defined(KEY_ALLOC_NEEDS_CRED)
192 return key_alloc(type, desc, uid, gid, current_cred(), perm, flags);
194 return key_alloc(type, desc, uid, gid, perm, flags);
198 # if defined(STRUCT_TASK_STRUCT_HAS_CRED)
199 static inline struct key *
200 afs_session_keyring(afs_ucred_t *cred)
202 # if defined(STRUCT_CRED_HAS_SESSION_KEYRING)
203 return cred->session_keyring;
205 return cred->tgcred->session_keyring;
209 static inline struct key*
210 afs_linux_search_keyring(afs_ucred_t *cred, struct key_type *type)
214 if (afs_session_keyring(cred)) {
215 key_ref = keyring_search(
216 make_key_ref(afs_session_keyring(cred), 1),
219 return ERR_CAST(key_ref);
221 return key_ref_to_ptr(key_ref);
224 return ERR_PTR(-ENOKEY);
227 static inline struct key*
228 afs_linux_search_keyring(afs_ucred_t *cred, struct key_type *type)
230 return request_key(type, "_pag", NULL);
232 # endif /* STRUCT_TASK_STRUCT_HAS_CRED */
234 static_inline struct key *
235 afs_set_session_keyring(struct key *keyring)
238 #if defined(STRUCT_CRED_HAS_SESSION_KEYRING)
239 struct cred *new_creds;
240 old = current_session_keyring();
241 new_creds = prepare_creds();
242 rcu_assign_pointer(new_creds->session_keyring, keyring);
243 commit_creds(new_creds);
245 spin_lock_irq(¤t->sighand->siglock);
246 old = task_session_keyring(current);
248 task_session_keyring(current) = keyring;
249 spin_unlock_irq(¤t->sighand->siglock);
253 #endif /* LINUX_KEYRING_SUPPORT */
255 #ifdef STRUCT_TASK_STRUCT_HAS_CRED
257 afs_linux_cred_is_current(afs_ucred_t *cred)
259 return (cred == current_cred());
263 afs_linux_cred_is_current(afs_ucred_t *cred)
269 #ifndef HAVE_LINUX_PAGE_OFFSET
271 page_offset(struct page *pp)
273 return (((loff_t) pp->index) << PAGE_CACHE_SHIFT);
277 #ifndef HAVE_LINUX_ZERO_USER_SEGMENTS
279 zero_user_segments(struct page *pp, unsigned int from1, unsigned int to1,
280 unsigned int from2, unsigned int to2)
282 void *base = kmap_atomic(pp, KM_USER0);
285 memset(base + from1, 0, to1 - from1);
288 memset(base + from2, 0, to2 - from2);
290 flush_dcache_page(pp);
291 kunmap_atomic(base, KM_USER0);
295 zero_user_segment(struct page *pp, unsigned int from1, unsigned int to1)
297 zero_user_segments(pp, from1, to1, 0, 0);
301 #ifndef HAVE_LINUX_KERNEL_SETSOCKOPT
302 /* Available from 2.6.19 */
305 kernel_setsockopt(struct socket *sockp, int level, int name, char *val,
307 mm_segment_t old_fs = get_fs();
311 ret = sockp->ops->setsockopt(sockp, level, name, val, len);
318 kernel_getsockopt(struct socket *sockp, int level, int name, char *val,
320 mm_segment_t old_fs = get_fs();
324 ret = sockp->ops->getsockopt(sockp, level, name, val, len);
331 #ifdef HAVE_TRY_TO_FREEZE
333 afs_try_to_freeze(void) {
334 # ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
335 return try_to_freeze(PF_FREEZE);
337 return try_to_freeze();
342 afs_try_to_freeze(void) {
344 if (current->flags & PF_FREEZE) {
345 refrigerator(PF_FREEZE);
353 /* The commit which changed refrigerator so that it takes no arguments
354 * also added freezing(), so if LINUX_REFRIGERATOR_TAKES_PF_FREEZE is
355 * true, the kernel doesn't have a freezing() function.
357 #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
359 freezing(struct task_struct *p)
362 return p->flags & PF_FREEZE;
369 #if !defined(HAVE_LINUX_PAGECHECKED)
370 # if defined(HAVE_LINUX_PAGEFSMISC)
371 # include <linux/page-flags.h>
373 # define PageChecked(p) PageFsMisc((p))
374 # define SetPageChecked(p) SetPageFsMisc((p))
375 # define ClearPageChecked(p) ClearPageFsMisc((p))
380 #if !defined(NEW_EXPORT_OPS)
381 extern struct export_operations export_op_default;
384 static inline struct dentry *
385 afs_get_dentry_from_fh(struct super_block *afs_cacheSBp, afs_dcache_id_t *ainode,
386 int cache_fh_len, int cache_fh_type,
387 int (*afs_fh_acceptable)(void *, struct dentry *)) {
388 #if defined(NEW_EXPORT_OPS)
389 return afs_cacheSBp->s_export_op->fh_to_dentry(afs_cacheSBp, &ainode->ufs.fh,
390 cache_fh_len, cache_fh_type);
392 if (afs_cacheSBp->s_export_op && afs_cacheSBp->s_export_op->decode_fh)
393 return afs_cacheSBp->s_export_op->decode_fh(afs_cacheSBp, ainode->ufs.raw,
394 cache_fh_len, cache_fh_type, afs_fh_acceptable, NULL);
396 return export_op_default.decode_fh(afs_cacheSBp, ainode->ufs.raw,
397 cache_fh_len, cache_fh_type, afs_fh_acceptable, NULL);
402 afs_get_fh_from_dentry(struct dentry *dp, afs_ufs_dcache_id_t *ainode, int *max_lenp) {
403 if (dp->d_sb->s_export_op->encode_fh)
404 #if defined(EXPORT_OP_ENCODE_FH_TAKES_INODES)
405 return dp->d_sb->s_export_op->encode_fh(dp->d_inode, &ainode->raw[0], max_lenp, NULL);
407 return dp->d_sb->s_export_op->encode_fh(dp, &ainode->raw[0], max_lenp, 0);
409 #if defined(NEW_EXPORT_OPS)
410 /* If fs doesn't provide an encode_fh method, assume the default INO32 type */
411 *max_lenp = sizeof(struct fid)/4;
412 ainode->fh.i32.ino = dp->d_inode->i_ino;
413 ainode->fh.i32.gen = dp->d_inode->i_generation;
414 return FILEID_INO32_GEN;
416 /* or call the default encoding function for the old API */
417 return export_op_default.encode_fh(dp, &ainode->raw[0], max_lenp, 0);
422 afs_init_sb_export_ops(struct super_block *sb) {
423 #if !defined(NEW_EXPORT_OPS)
425 * decode_fh will call this function. If not defined for this FS, make
426 * sure it points to the default
428 if (!sb->s_export_op->find_exported_dentry) {
429 /* Some kernels (at least 2.6.9) do not prototype find_exported_dentry,
430 * even though it is exported, so prototype it ourselves. Newer
431 * kernels do prototype it, but as long as our protoype matches the
432 * real one (the signature never changed before NEW_EXPORT_OPS came
433 * into play), there should be no problems. */
434 extern struct dentry * find_exported_dentry(struct super_block *sb, void *obj, void *parent,
435 int (*acceptable)(void *context, struct dentry *de),
437 sb->s_export_op->find_exported_dentry = find_exported_dentry;
443 afs_linux_lock_inode(struct inode *ip) {
444 #ifdef STRUCT_INODE_HAS_I_MUTEX
445 mutex_lock(&ip->i_mutex);
452 afs_linux_unlock_inode(struct inode *ip) {
453 #ifdef STRUCT_INODE_HAS_I_MUTEX
454 mutex_unlock(&ip->i_mutex);
461 afs_inode_setattr(struct osi_file *afile, struct iattr *newattrs) {
464 struct inode *inode = OSIFILE_INODE(afile);
465 #if !defined(HAVE_LINUX_INODE_SETATTR)
466 code = inode->i_op->setattr(afile->filp->f_dentry, newattrs);
467 #elif defined(INODE_SETATTR_NOT_VOID)
468 if (inode->i_op && inode->i_op->setattr)
469 code = inode->i_op->setattr(afile->filp->f_dentry, newattrs);
471 code = inode_setattr(inode, newattrs);
473 inode_setattr(inode, newattrs);
478 #if defined(HAVE_LINUX_PATH_LOOKUP)
480 afs_kern_path(char *aname, int flags, struct nameidata *nd) {
481 return path_lookup(aname, flags, nd);
485 afs_kern_path(char *aname, int flags, afs_linux_path_t *path) {
486 return kern_path(aname, flags, path);
491 #if defined(HAVE_LINUX_PATH_LOOKUP)
492 afs_get_dentry_ref(struct nameidata *nd, struct vfsmount **mnt, struct dentry **dpp) {
494 afs_get_dentry_ref(afs_linux_path_t *path, struct vfsmount **mnt, struct dentry **dpp) {
496 #if defined(HAVE_LINUX_PATH_LOOKUP)
497 # if defined(STRUCT_NAMEIDATA_HAS_PATH)
498 *dpp = dget(nd->path.dentry);
500 *mnt = mntget(nd->path.mnt);
503 *dpp = dget(nd->dentry);
505 *mnt = mntget(nd->mnt);
509 *dpp = dget(path->dentry);
511 *mnt = mntget(path->mnt);
516 /* wait_event_freezable appeared with 2.6.24 */
518 /* These implement the original AFS wait behaviour, with respect to the
519 * refrigerator, rather than the behaviour of the current wait_event_freezable
523 #ifndef wait_event_freezable
524 # define wait_event_freezable(waitqueue, condition) \
528 _ret = wait_event_interruptible(waitqueue, \
529 (condition) || freezing(current)); \
530 if (_ret && !freezing(current)) \
532 else if (!(condition)) \
534 } while (afs_try_to_freeze()); \
538 # define wait_event_freezable_timeout(waitqueue, condition, timeout) \
542 _ret = wait_event_interruptible_timeout(waitqueue, \
544 freezing(current)), \
546 } while (afs_try_to_freeze()); \
551 #if defined(STRUCT_TASK_STRUCT_HAS_CRED)
552 static inline struct file *
553 afs_dentry_open(struct dentry *dp, struct vfsmount *mnt, int flags, const struct cred *creds) {
554 #if defined(DENTRY_OPEN_TAKES_PATH)
555 afs_linux_path_t path;
559 /* note that dentry_open will path_get for us */
560 filp = dentry_open(&path, flags, creds);
563 return dentry_open(dget(dp), mntget(mnt), flags, creds);
569 afs_truncate(struct inode *inode, int len)
572 #if defined(STRUCT_INODE_OPERATIONS_HAS_TRUNCATE)
573 code = vmtruncate(inode, len);
575 code = inode_newsize_ok(inode, len);
577 truncate_setsize(inode, len);
582 static inline struct proc_dir_entry *
583 afs_proc_create(char *name, umode_t mode, struct proc_dir_entry *parent, struct file_operations *fops) {
584 #if defined(HAVE_LINUX_PROC_CREATE)
585 return proc_create(name, mode, parent, fops);
587 struct proc_dir_entry *entry;
588 entry = create_proc_entry(name, mode, parent);
590 entry->proc_fops = fops;
596 afs_dentry_count(struct dentry *dp)
598 #if defined(HAVE_LINUX_D_COUNT)
600 #elif defined(D_COUNT_INT)
603 return atomic_read(&dp->d_count);
608 afs_maybe_shrink_dcache(struct dentry *dp)
610 #if defined(HAVE_LINUX_D_COUNT) || defined(D_COUNT_INT)
611 spin_lock(&dp->d_lock);
612 if (afs_dentry_count(dp) > 1) {
613 spin_unlock(&dp->d_lock);
614 shrink_dcache_parent(dp);
616 spin_unlock(&dp->d_lock);
618 if (afs_dentry_count(dp) > 1)
619 shrink_dcache_parent(dp);
624 afs_d_invalidate(struct dentry *dp)
626 #if defined(D_INVALIDATE_IS_VOID)
630 return d_invalidate(dp);
634 #endif /* AFS_LINUX_OSI_COMPAT_H */