1 /* Kernel compatibility routines
3 * This file contains definitions to provide compatibility between different
4 * versions of the Linux kernel. It is an ifdef maze, but the idea is that
5 * by concentrating the horror here, the rest of the tree may remaing a
9 #ifndef AFS_LINUX_OSI_COMPAT_H
10 #define AFS_LINUX_OSI_COMPAT_H
12 #if defined(HAVE_LINUX_FREEZER_H)
13 # include <linux/freezer.h>
16 #if defined(HAVE_LINUX_FILELOCK_H)
17 # include <linux/filelock.h>
20 #if defined(LINUX_KEYRING_SUPPORT)
21 # include <linux/rwsem.h>
22 # include <linux/key.h>
23 # if defined(HAVE_LINUX_KEY_TYPE_H)
24 # include <linux/key-type.h>
26 # ifndef KEY_ALLOC_IN_QUOTA
27 /* Before these flags were added in Linux commit v2.6.18-rc1~816,
28 * key_alloc just took a boolean not_in_quota */
29 # define KEY_ALLOC_IN_QUOTA 0
30 # define KEY_ALLOC_NOT_IN_QUOTA 1
34 #if !defined(HAVE_LINUX_KTHREAD_COMPLETE_AND_EXIT)
35 # define kthread_complete_and_exit complete_and_exit
38 #if defined(STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT) && !defined(DCACHE_NEED_AUTOMOUNT)
39 # define DCACHE_NEED_AUTOMOUNT DMANAGED_AUTOMOUNT
42 #ifdef HAVE_LINUX_STRUCT_VFS_PATH
43 typedef struct vfs_path afs_linux_path_t;
45 typedef struct path afs_linux_path_t;
48 #if defined(STRUCT_DENTRY_HAS_D_U_D_ALIAS)
49 # define d_alias d_u.d_alias
52 #if defined(STRUCT_DENTRY_HAS_D_U_D_CHILD)
53 # define d_child d_u.d_child
56 #if defined(STRUCT_FILE_HAS_F_PATH)
57 # if !defined(f_dentry)
58 # define f_dentry f_path.dentry
62 #ifndef HAVE_LINUX_FILE_DENTRY
63 #define file_dentry(file) ((file)->f_dentry)
66 #if defined(HAVE_LINUX_LOCKS_LOCK_FILE_WAIT)
67 # define flock_lock_file_wait locks_lock_file_wait
70 #if !defined(HAVE_LINUX_DO_SYNC_READ) && !defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
72 do_sync_read(struct file *fp, char *buf, size_t count, loff_t *offp) {
73 return generic_file_read(fp, buf, count, offp);
77 do_sync_write(struct file *fp, char *buf, size_t count, loff_t *offp) {
78 return generic_file_write(fp, buf, count, offp);
81 #endif /* DO_SYNC_READ */
84 afs_posix_lock_file(struct file *fp, struct file_lock *flp) {
85 #ifdef POSIX_LOCK_FILE_WAIT_ARG
86 return posix_lock_file(fp, flp, NULL);
88 flp->fl_flags &=~ FL_SLEEP;
89 return posix_lock_file(fp, flp);
94 afs_posix_test_lock(struct file *fp, struct file_lock *flp) {
95 #if defined(POSIX_TEST_LOCK_CONFLICT_ARG)
96 struct file_lock conflict;
97 if (posix_test_lock(fp, flp, &conflict)) {
98 locks_copy_lock(flp, &conflict);
99 flp->fl_type = F_UNLCK;
101 #elif defined(POSIX_TEST_LOCK_RETURNS_CONFLICT)
102 struct file_lock *conflict;
103 conflict = posix_test_lock(fp, flp);
105 locks_copy_lock(flp, conflict);
106 flp->fl_type = F_UNLCK;
109 posix_test_lock(fp, flp);
113 #ifdef DCACHE_NFSFS_RENAMED
115 afs_linux_clear_nfsfs_renamed(struct dentry *dp) {
116 spin_lock(&dp->d_lock);
117 dp->d_flags &= ~DCACHE_NFSFS_RENAMED;
118 spin_unlock(&dp->d_lock);
122 afs_linux_set_nfsfs_renamed(struct dentry *dp) {
123 spin_lock(&dp->d_lock);
124 dp->d_flags |= DCACHE_NFSFS_RENAMED;
125 spin_unlock(&dp->d_lock);
129 afs_linux_nfsfs_renamed(struct dentry *dp) {
130 return dp->d_flags & DCACHE_NFSFS_RENAMED;
134 static inline void afs_linux_clear_nfsfs_renamed(void) { return; }
135 static inline void afs_linux_set_nfsfs_renamed(void) { return; }
138 #ifndef HAVE_LINUX_HLIST_UNHASHED
140 hlist_unhashed(const struct hlist_node *h) {
141 return (!h->pprev == NULL);
145 #if defined(WRITEPAGE_ACTIVATE)
146 #define AOP_WRITEPAGE_ACTIVATE WRITEPAGE_ACTIVATE
149 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN) && \
150 !defined(HAVE_LINUX_GRAB_CACHE_PAGE_WRITE_BEGIN_WITHFLAGS) && \
151 !defined(HAVE_LINUX_GRAB_CACHE_PAGE_WRITE_BEGIN_NOFLAGS)
152 static inline struct page *
153 grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index,
154 unsigned int flags) {
155 return __grab_cache_page(mapping, index);
159 #if defined(HAVE_KMEM_CACHE_T)
160 #define afs_kmem_cache_t kmem_cache_t
162 #define afs_kmem_cache_t struct kmem_cache
165 extern void init_once(void *);
166 #if defined(HAVE_KMEM_CACHE_T)
168 init_once_func(void * foo, kmem_cache_t * cachep, unsigned long flags) {
169 #if defined(SLAB_CTOR_VERIFY)
170 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
171 SLAB_CTOR_CONSTRUCTOR)
175 #elif defined(KMEM_CACHE_INIT)
177 init_once_func(struct kmem_cache * cachep, void * foo) {
180 #elif !defined(KMEM_CACHE_CTOR_TAKES_VOID)
182 init_once_func(void * foo, struct kmem_cache * cachep, unsigned long flags) {
183 #if defined(SLAB_CTOR_VERIFY)
184 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
185 SLAB_CTOR_CONSTRUCTOR)
191 init_once_func(void * foo) {
196 #ifndef SLAB_RECLAIM_ACCOUNT
197 #define SLAB_RECLAIM_ACCOUNT 0
200 #if defined(SLAB_KERNEL)
201 #define KALLOC_TYPE SLAB_KERNEL
203 #define KALLOC_TYPE GFP_KERNEL
206 #ifdef LINUX_KEYRING_SUPPORT
207 static inline struct key *
208 afs_linux_key_alloc(struct key_type *type, const char *desc, afs_kuid_t uid,
209 afs_kgid_t gid, key_perm_t perm, unsigned long flags)
211 # if defined(KEY_ALLOC_BYPASS_RESTRICTION)
212 return key_alloc(type, desc, uid, gid, current_cred(), perm, flags, NULL);
213 # elif defined(KEY_ALLOC_NEEDS_STRUCT_TASK)
214 return key_alloc(type, desc, uid, gid, current, perm, flags);
215 # elif defined(KEY_ALLOC_NEEDS_CRED)
216 return key_alloc(type, desc, uid, gid, current_cred(), perm, flags);
218 return key_alloc(type, desc, uid, gid, perm, flags);
222 # if defined(STRUCT_TASK_STRUCT_HAS_CRED)
223 static inline struct key *
224 afs_session_keyring(afs_ucred_t *cred)
226 # if defined(STRUCT_CRED_HAS_SESSION_KEYRING)
227 return cred->session_keyring;
229 return cred->tgcred->session_keyring;
233 static inline struct key*
234 afs_linux_search_keyring(afs_ucred_t *cred, struct key_type *type)
238 if (afs_session_keyring(cred)) {
239 # if defined(KEYRING_SEARCH_TAKES_RECURSE)
240 key_ref = keyring_search(
241 make_key_ref(afs_session_keyring(cred), 1),
244 key_ref = keyring_search(
245 make_key_ref(afs_session_keyring(cred), 1),
249 return ERR_CAST(key_ref);
251 return key_ref_to_ptr(key_ref);
254 return ERR_PTR(-ENOKEY);
257 static inline struct key*
258 afs_linux_search_keyring(afs_ucred_t *cred, struct key_type *type)
260 return request_key(type, "_pag", NULL);
262 # endif /* STRUCT_TASK_STRUCT_HAS_CRED */
264 static_inline struct key *
265 afs_set_session_keyring(struct key *keyring)
268 #if defined(STRUCT_CRED_HAS_SESSION_KEYRING)
269 struct cred *new_creds;
270 old = current_session_keyring();
271 new_creds = prepare_creds();
272 rcu_assign_pointer(new_creds->session_keyring, keyring);
273 commit_creds(new_creds);
275 spin_lock_irq(¤t->sighand->siglock);
276 old = task_session_keyring(current);
278 task_session_keyring(current) = keyring;
279 spin_unlock_irq(¤t->sighand->siglock);
283 #endif /* LINUX_KEYRING_SUPPORT */
285 #ifdef STRUCT_TASK_STRUCT_HAS_CRED
287 afs_linux_cred_is_current(afs_ucred_t *cred)
289 return (cred == current_cred());
293 afs_linux_cred_is_current(afs_ucred_t *cred)
299 #ifndef HAVE_LINUX_PAGE_OFFSET
301 page_offset(struct page *pp)
303 return (((loff_t) pp->index) << PAGE_SHIFT);
307 #ifndef HAVE_LINUX_ZERO_USER_SEGMENTS
309 zero_user_segments(struct page *pp, unsigned int from1, unsigned int to1,
310 unsigned int from2, unsigned int to2)
312 void *base = kmap_atomic(pp, KM_USER0);
315 memset(base + from1, 0, to1 - from1);
318 memset(base + from2, 0, to2 - from2);
320 flush_dcache_page(pp);
321 kunmap_atomic(base, KM_USER0);
325 zero_user_segment(struct page *pp, unsigned int from1, unsigned int to1)
327 zero_user_segments(pp, from1, to1, 0, 0);
331 #if defined(HAVE_LINUX_IP_SOCK_SET)
333 /* ip_sock_set_* introduced in linux 5.8 */
335 afs_linux_sock_set_mtu_discover(struct socket *sockp, int pmtu)
337 ip_sock_set_mtu_discover(sockp->sk, pmtu);
340 afs_linux_sock_set_recverr(struct socket *sockp)
342 ip_sock_set_recverr(sockp->sk);
345 # if !defined(HAVE_LINUX_KERNEL_SETSOCKOPT)
346 /* Available from 2.6.19 */
348 kernel_setsockopt(struct socket *sockp, int level, int name, char *val,
350 mm_segment_t old_fs = get_fs();
354 ret = sockp->ops->setsockopt(sockp, level, name, val, len);
359 # endif /* !HAVE_LINUX_KERNEL_SETSOCKOPT */
362 afs_linux_sock_set_mtu_discover(struct socket *sockp, int pmtu)
364 kernel_setsockopt(sockp, SOL_IP, IP_MTU_DISCOVER, (char *)&pmtu,
368 afs_linux_sock_set_recverr(struct socket *sockp)
371 kernel_setsockopt(sockp, SOL_IP, IP_RECVERR, (char *)&recverr,
374 #endif /* !HAVE_LINUX_IP_SOCK_SET */
376 #ifdef HAVE_TRY_TO_FREEZE
378 afs_try_to_freeze(void) {
379 # ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
380 return try_to_freeze(PF_FREEZE);
382 return try_to_freeze();
387 afs_try_to_freeze(void) {
389 if (current->flags & PF_FREEZE) {
390 refrigerator(PF_FREEZE);
398 /* The commit which changed refrigerator so that it takes no arguments
399 * also added freezing(), so if LINUX_REFRIGERATOR_TAKES_PF_FREEZE is
400 * true, the kernel doesn't have a freezing() function.
402 #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
404 freezing(struct task_struct *p)
407 return p->flags & PF_FREEZE;
414 #if !defined(HAVE_LINUX_PAGECHECKED)
415 # if defined(HAVE_LINUX_PAGEFSMISC)
416 # include <linux/page-flags.h>
418 # define PageChecked(p) PageFsMisc((p))
419 # define SetPageChecked(p) SetPageFsMisc((p))
420 # define ClearPageChecked(p) ClearPageFsMisc((p))
425 #if !defined(NEW_EXPORT_OPS)
426 extern struct export_operations export_op_default;
429 static inline struct dentry *
430 afs_get_dentry_from_fh(struct super_block *afs_cacheSBp, afs_dcache_id_t *ainode,
431 int cache_fh_len, int cache_fh_type,
432 int (*afs_fh_acceptable)(void *, struct dentry *)) {
433 #if defined(NEW_EXPORT_OPS)
434 return afs_cacheSBp->s_export_op->fh_to_dentry(afs_cacheSBp, &ainode->ufs.fh,
435 cache_fh_len, cache_fh_type);
437 if (afs_cacheSBp->s_export_op && afs_cacheSBp->s_export_op->decode_fh)
438 return afs_cacheSBp->s_export_op->decode_fh(afs_cacheSBp, ainode->ufs.raw,
439 cache_fh_len, cache_fh_type, afs_fh_acceptable, NULL);
441 return export_op_default.decode_fh(afs_cacheSBp, ainode->ufs.raw,
442 cache_fh_len, cache_fh_type, afs_fh_acceptable, NULL);
447 afs_get_fh_from_dentry(struct dentry *dp, afs_ufs_dcache_id_t *ainode, int *max_lenp) {
448 if (dp->d_sb->s_export_op->encode_fh)
449 #if defined(EXPORT_OP_ENCODE_FH_TAKES_INODES)
450 return dp->d_sb->s_export_op->encode_fh(dp->d_inode, &ainode->raw[0], max_lenp, NULL);
452 return dp->d_sb->s_export_op->encode_fh(dp, &ainode->raw[0], max_lenp, 0);
454 #if defined(NEW_EXPORT_OPS)
455 /* If fs doesn't provide an encode_fh method, assume the default INO32 type */
456 *max_lenp = sizeof(struct fid)/4;
457 ainode->fh.i32.ino = dp->d_inode->i_ino;
458 ainode->fh.i32.gen = dp->d_inode->i_generation;
459 return FILEID_INO32_GEN;
461 /* or call the default encoding function for the old API */
462 return export_op_default.encode_fh(dp, &ainode->raw[0], max_lenp, 0);
467 afs_init_sb_export_ops(struct super_block *sb) {
468 #if !defined(NEW_EXPORT_OPS)
470 * decode_fh will call this function. If not defined for this FS, make
471 * sure it points to the default
473 if (!sb->s_export_op->find_exported_dentry) {
474 /* Some kernels (at least 2.6.9) do not prototype find_exported_dentry,
475 * even though it is exported, so prototype it ourselves. Newer
476 * kernels do prototype it, but as long as our protoype matches the
477 * real one (the signature never changed before NEW_EXPORT_OPS came
478 * into play), there should be no problems. */
479 extern struct dentry * find_exported_dentry(struct super_block *sb, void *obj, void *parent,
480 int (*acceptable)(void *context, struct dentry *de),
482 sb->s_export_op->find_exported_dentry = find_exported_dentry;
488 afs_linux_lock_inode(struct inode *ip) {
489 #if defined(HAVE_LINUX_INODE_LOCK)
491 #elif defined(STRUCT_INODE_HAS_I_MUTEX)
492 mutex_lock(&ip->i_mutex);
499 afs_linux_unlock_inode(struct inode *ip) {
500 #if defined(HAVE_LINUX_INODE_LOCK)
502 #elif defined(STRUCT_INODE_HAS_I_MUTEX)
503 mutex_unlock(&ip->i_mutex);
509 /* Use these to lock or unlock an inode for processing
510 * its dentry aliases en masse.
512 #if defined(HAVE_DCACHE_LOCK)
513 #define afs_d_alias_lock(ip) spin_lock(&dcache_lock)
514 #define afs_d_alias_unlock(ip) spin_unlock(&dcache_lock)
516 #define afs_d_alias_lock(ip) spin_lock(&(ip)->i_lock)
517 #define afs_d_alias_unlock(ip) spin_unlock(&(ip)->i_lock)
521 /* Use this instead of dget for dentry operations
522 * that occur under a higher lock (e.g. alias processing).
523 * Requires that the higher lock (e.g. dcache_lock or
524 * inode->i_lock) is already held.
527 afs_linux_dget(struct dentry *dp) {
528 #if defined(HAVE_DCACHE_LOCK)
537 afs_inode_setattr(struct osi_file *afile, struct iattr *newattrs) {
540 struct inode *inode = OSIFILE_INODE(afile);
541 #if defined(IOP_TAKES_MNT_IDMAP)
542 code = inode->i_op->setattr(afs_mnt_idmap, afile->filp->f_dentry, newattrs);
543 #elif defined(IOP_TAKES_USER_NAMESPACE)
544 code = inode->i_op->setattr(afs_ns, afile->filp->f_dentry, newattrs);
545 #elif !defined(HAVE_LINUX_INODE_SETATTR)
546 code = inode->i_op->setattr(afile->filp->f_dentry, newattrs);
547 #elif defined(INODE_SETATTR_NOT_VOID)
548 if (inode->i_op && inode->i_op->setattr)
549 code = inode->i_op->setattr(afile->filp->f_dentry, newattrs);
551 code = inode_setattr(inode, newattrs);
553 inode_setattr(inode, newattrs);
558 #if defined(HAVE_LINUX_PATH_LOOKUP)
560 afs_kern_path(char *aname, int flags, struct nameidata *nd) {
561 return path_lookup(aname, flags, nd);
565 afs_kern_path(char *aname, int flags, afs_linux_path_t *path) {
566 return kern_path(aname, flags, path);
571 #if defined(HAVE_LINUX_PATH_LOOKUP)
572 afs_get_dentry_ref(struct nameidata *nd, struct vfsmount **mnt, struct dentry **dpp) {
574 afs_get_dentry_ref(afs_linux_path_t *path, struct vfsmount **mnt, struct dentry **dpp) {
576 #if defined(HAVE_LINUX_PATH_LOOKUP)
577 # if defined(STRUCT_NAMEIDATA_HAS_PATH)
578 *dpp = dget(nd->path.dentry);
580 *mnt = mntget(nd->path.mnt);
583 *dpp = dget(nd->dentry);
585 *mnt = mntget(nd->mnt);
589 *dpp = dget(path->dentry);
591 *mnt = mntget(path->mnt);
596 /* wait_event_freezable appeared with 2.6.24 */
598 /* These implement the original AFS wait behaviour, with respect to the
599 * refrigerator, rather than the behaviour of the current wait_event_freezable
603 #ifndef wait_event_freezable
604 # define wait_event_freezable(waitqueue, condition) \
608 _ret = wait_event_interruptible(waitqueue, \
609 (condition) || freezing(current)); \
610 if (_ret && !freezing(current)) \
612 else if (!(condition)) \
614 } while (afs_try_to_freeze()); \
618 # define wait_event_freezable_timeout(waitqueue, condition, timeout) \
622 _ret = wait_event_interruptible_timeout(waitqueue, \
624 freezing(current)), \
626 } while (afs_try_to_freeze()); \
631 #if defined(STRUCT_TASK_STRUCT_HAS_CRED)
632 static inline struct file *
633 afs_dentry_open(struct dentry *dp, struct vfsmount *mnt, int flags, const struct cred *creds) {
634 #if defined(DENTRY_OPEN_TAKES_PATH)
635 afs_linux_path_t path;
639 /* note that dentry_open will path_get for us */
640 filp = dentry_open(&path, flags, creds);
643 return dentry_open(dget(dp), mntget(mnt), flags, creds);
649 afs_truncate(struct inode *inode, int len)
652 #if defined(STRUCT_INODE_OPERATIONS_HAS_TRUNCATE)
653 code = vmtruncate(inode, len);
655 code = inode_newsize_ok(inode, len);
657 truncate_setsize(inode, len);
662 static inline struct proc_dir_entry *
663 #if defined(HAVE_LINUX_STRUCT_PROC_OPS)
664 afs_proc_create(char *name, umode_t mode, struct proc_dir_entry *parent, struct proc_ops *ops) {
666 afs_proc_create(char *name, umode_t mode, struct proc_dir_entry *parent, struct file_operations *ops) {
668 #if defined(HAVE_LINUX_PROC_CREATE)
669 return proc_create(name, mode, parent, ops);
671 struct proc_dir_entry *entry;
672 entry = create_proc_entry(name, mode, parent);
674 entry->proc_fops = ops;
680 afs_dentry_count(struct dentry *dp)
682 #if defined(HAVE_LINUX_D_COUNT)
684 #elif defined(D_COUNT_INT)
687 return atomic_read(&dp->d_count);
692 afs_maybe_shrink_dcache(struct dentry *dp)
694 #if defined(HAVE_LINUX_D_COUNT) || defined(D_COUNT_INT)
695 spin_lock(&dp->d_lock);
696 if (afs_dentry_count(dp) > 1) {
697 spin_unlock(&dp->d_lock);
698 shrink_dcache_parent(dp);
700 spin_unlock(&dp->d_lock);
702 if (afs_dentry_count(dp) > 1)
703 shrink_dcache_parent(dp);
708 afs_d_invalidate(struct dentry *dp)
710 #if defined(D_INVALIDATE_IS_VOID)
714 return d_invalidate(dp);
718 #if defined(HAVE_LINUX___VFS_WRITE)
719 # define AFS_FILE_NEEDS_SET_FS 1
720 #elif defined(HAVE_LINUX_KERNEL_WRITE)
721 /* #undef AFS_FILE_NEEDS_SET_FS */
723 # define AFS_FILE_NEEDS_SET_FS 1
727 afs_file_read(struct file *filp, char __user *buf, size_t len, loff_t *pos)
729 #if defined(HAVE_LINUX___VFS_WRITE)
730 return __vfs_read(filp, buf, len, pos);
731 #elif defined(HAVE_LINUX_KERNEL_WRITE)
732 # if defined(KERNEL_READ_OFFSET_IS_LAST)
733 return kernel_read(filp, buf, len, pos);
735 return kernel_read(filp, *pos, buf, len);
738 return filp->f_op->read(filp, buf, len, pos);
743 afs_file_write(struct file *filp, char __user *buf, size_t len, loff_t *pos)
745 #if defined(HAVE_LINUX___VFS_WRITE)
746 return __vfs_write(filp, buf, len, pos);
747 #elif defined(HAVE_LINUX_KERNEL_WRITE)
748 # if defined(KERNEL_READ_OFFSET_IS_LAST)
749 return kernel_write(filp, buf, len, pos);
751 return kernel_write(filp, buf, len, *pos);
754 return filp->f_op->write(filp, buf, len, pos);
759 afs_d_path(struct dentry *dp, struct vfsmount *mnt, char *buf, int buflen)
761 #ifdef D_PATH_TAKES_STRUCT_PATH
762 afs_linux_path_t p = { .mnt = mnt, .dentry = dp };
763 return d_path(&p, buf, buflen);
765 return d_path(dp, mnt, buf, buflen);
770 afs_setattr_prepare(struct dentry *dp, struct iattr *newattrs)
772 #if defined(IOP_TAKES_MNT_IDMAP)
773 return setattr_prepare(afs_mnt_idmap, dp, newattrs);
774 #elif defined(IOP_TAKES_USER_NAMESPACE)
775 return setattr_prepare(afs_ns, dp, newattrs);
776 #elif defined(HAVE_LINUX_SETATTR_PREPARE)
777 return setattr_prepare(dp, newattrs);
779 return inode_change_ok(dp->d_inode, newattrs);
784 * afs_d_alias_foreach - Iterate over dentry aliases of an inode. Use like this:
786 * afs_d_alias_lock(inode);
787 * afs_d_alias_foreach(dentry, inode, node) {
788 * spin_lock(&dentry->d_lock);
789 * dentry->d_foo = bar;
790 * spin_unlock(&dentry->d_lock);
792 * afs_d_alias_unlock(inode);
794 * 'node' is a struct hlist_node, and is only used when D_ALIAS_IS_HLIST &&
795 * !HLIST_ITERATOR_NO_NODE.
797 * afs_d_alias_foreach_reverse is the same, but traverses the list in the
798 * reverse direction when possible (non-D_ALIAS_IS_HLIST). For
799 * D_ALIAS_IS_HLIST, Linux doesn't provide macros for going in reverse (struct
800 * hlist_head doesn't point directly to the end of the list), so just traverse
803 * @pre afs_d_alias_lock(inode) held
805 #if defined(D_ALIAS_IS_HLIST)
807 # if defined(HLIST_ITERATOR_NO_NODE)
808 # define afs_d_alias_foreach(dentry, inode, node) \
809 hlist_for_each_entry((dentry), &((inode)->i_dentry), d_alias)
811 # define afs_d_alias_foreach(dentry, inode, node) \
812 hlist_for_each_entry((dentry), (node), &((inode)->i_dentry), d_alias)
813 # endif /* HLIST_ITERATOR_NO_NODE */
814 # define afs_d_alias_foreach_reverse afs_d_alias_foreach
816 #else /* D_ALIAS_IS_HLIST */
818 # define afs_d_alias_foreach(dentry, inode, node) \
819 list_for_each_entry((dentry), &((inode)->i_dentry), d_alias)
820 # define afs_d_alias_foreach_reverse(dentry, inode, node) \
821 list_for_each_entry_reverse((dentry), &((inode)->i_dentry), d_alias)
823 #endif /* D_ALIAS_IS_HLIST */
825 #endif /* AFS_LINUX_OSI_COMPAT_H */