2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * Linux support routines.
14 #include <afsconfig.h>
15 #include "afs/param.h"
19 #include "afs/sysincludes.h"
20 #include "afsincludes.h"
21 #include "afs/afs_stats.h"
22 #if defined(AFS_LINUX24_ENV)
23 #include "h/smp_lock.h"
26 char *crash_addr = 0; /* Induce an oops by writing here. */
28 /* Lookup name and return vnode for same. */
29 int osi_lookupname(char *aname, uio_seg_t seg, int followlink,
30 vnode_t **dirvpp, struct dentry **dpp)
32 #if defined(AFS_LINUX24_ENV)
35 struct dentry *dp = NULL;
40 #if defined(AFS_LINUX24_ENV)
41 if (seg == AFS_UIOUSER) {
43 user_path_walk(aname, &nd) : user_path_walk_link(aname, &nd);
46 if (path_init(aname, followlink ? LOOKUP_FOLLOW : 0, &nd))
47 code = path_walk(aname, &nd);
51 if (nd.dentry->d_inode) {
52 *dpp = dget(nd.dentry);
59 if (seg == AFS_UIOUSER) {
60 dp = followlink ? namei(aname) : lnamei(aname);
63 dp = lookup_dentry(aname, NULL, followlink ? 1 : 0);
66 if (dp && !IS_ERR(dp)) {
79 /* Intialize cache device info and fragment size for disk cache partition. */
80 int osi_InitCacheInfo(char *aname)
84 extern ino_t cacheInode;
85 extern struct osi_dev cacheDev;
86 extern afs_int32 afs_fsfragsize;
87 extern struct super_block *afs_cacheSBp;
89 code = osi_lookupname(aname, AFS_UIOSYS, 1, NULL, &dp);
90 if (code) return ENOENT;
92 cacheInode = dp->d_inode->i_ino;
93 cacheDev.dev = dp->d_inode->i_dev;
94 afs_fsfragsize = dp->d_inode->i_sb->s_blocksize - 1;
95 afs_cacheSBp = dp->d_inode->i_sb;
103 #define FOP_READ(F, B, C) (F)->f_op->read(F, B, (size_t)(C), &(F)->f_pos)
104 #define FOP_WRITE(F, B, C) (F)->f_op->write(F, B, (size_t)(C), &(F)->f_pos)
107 * Seek, then read or write to an open inode. addrp points to data in
110 int osi_rdwr(int rw, struct osi_file *file, caddr_t addrp, size_t asize,
115 struct file *filp = &file->file;
116 off_t offset = file->offset;
117 unsigned long savelim;
119 /* Seek to the desired position. Return -1 on error. */
120 if (filp->f_op->llseek) {
121 if (filp->f_op->llseek(filp, (loff_t)offset, 0) != offset)
125 filp->f_pos = offset;
127 savelim = current->rlim[RLIMIT_FSIZE].rlim_cur;
128 current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
130 /* Read/Write the data. */
133 code = FOP_READ(filp, addrp, asize);
134 else if (rw == UIO_WRITE)
135 code = FOP_WRITE(filp, addrp, asize);
136 else /* all is well? */
140 current->rlim[RLIMIT_FSIZE].rlim_cur = savelim;
143 *resid = asize - code;
150 /* This variant is called from AFS read/write routines and takes a uio
151 * struct and, if successful, returns 0.
153 int osi_file_uio_rdwr(struct osi_file *osifile, uio_t *uiop, int rw)
155 struct file *filp = &osifile->file;
156 struct inode *ip = FILE_INODE(&osifile->file);
161 unsigned long savelim;
163 savelim = current->rlim[RLIMIT_FSIZE].rlim_cur;
164 current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
166 if (uiop->uio_seg == AFS_UIOSYS)
169 filp->f_pos = uiop->uio_offset;
170 while (code == 0 && uiop->uio_resid > 0 && uiop->uio_iovcnt > 0) {
172 count = iov->iov_len;
180 code = FOP_READ(filp, iov->iov_base, count);
182 code = FOP_WRITE(filp, iov->iov_base, count);
189 iov->iov_base += code;
190 iov->iov_len -= code;
191 uiop->uio_resid -= code;
192 uiop->uio_offset += code;
196 if (uiop->uio_seg == AFS_UIOSYS)
199 current->rlim[RLIMIT_FSIZE].rlim_cur = savelim;
205 * Setup a uio struct.
207 void setup_uio(uio_t *uiop, struct iovec *iovecp, char *buf,
208 afs_offs_t pos, int count, uio_flag_t flag,
211 iovecp->iov_base = buf;
212 iovecp->iov_len = count;
213 uiop->uio_iov = iovecp;
214 uiop->uio_iovcnt = 1;
215 uiop->uio_offset = pos;
217 uiop->uio_resid = count;
218 uiop->uio_flag = flag;
223 * UIO_READ : dp -> uio
224 * UIO_WRITE : uio -> dp
226 int uiomove(char *dp, int length, uio_flag_t rw, uio_t *uiop)
232 while (length > 0 && uiop->uio_resid > 0 && uiop->uio_iovcnt > 0) {
234 count = iov->iov_len;
245 switch(uiop->uio_seg) {
249 memcpy(iov->iov_base, dp, count); break;
251 memcpy(dp, iov->iov_base, count); break;
253 printf("uiomove: Bad rw = %d\n", rw);
260 AFS_COPYOUT(dp, iov->iov_base, count, code); break;
262 AFS_COPYIN(iov->iov_base, dp, count, code); break;
264 printf("uiomove: Bad rw = %d\n", rw);
269 printf("uiomove: Bad seg = %d\n", uiop->uio_seg);
275 iov->iov_base += count;
276 iov->iov_len -= count;
277 uiop->uio_offset += count;
278 uiop->uio_resid -= count;
283 void afs_osi_SetTime(osi_timeval_t *tvp)
285 extern int (*sys_settimeofdayp)(struct timeval *tv, struct timezone *tz);
286 #ifdef AFS_LINUX_64BIT_KERNEL
288 AFS_STATCNT(osi_SetTime);
289 tv.tv_sec = tvp->tv_sec;
290 tv.tv_usec = tvp->tv_usec;
291 (void) (*sys_settimeofdayp)(&tv, NULL);
295 AFS_STATCNT(osi_SetTime);
298 (void) (*sys_settimeofdayp)(tvp, NULL);
303 /* Free all the pages on any of the vnodes in the vlru. Must be done before
304 * freeing all memory.
306 void osi_linux_free_inode_pages(void)
311 extern struct vcache *afs_vhashT[VCSIZE];
313 for (i=0; i<VCSIZE; i++) {
314 for(tvc = afs_vhashT[i]; tvc; tvc=tvc->hnext) {
316 #if defined(AFS_LINUX24_ENV)
317 if (ip->i_data.nrpages) {
321 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
322 truncate_inode_pages(&ip->i_data, 0);
323 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,15)
324 truncate_inode_pages(ip, 0);
326 invalidate_inode_pages(ip);
328 #if defined(AFS_LINUX24_ENV)
329 if (ip->i_data.nrpages) {
333 printf("Failed to invalidate all pages on inode 0x%x\n",
341 void osi_clear_inode(struct inode *ip)
343 cred_t *credp = crref();
344 struct vcache *vcp = ITOAFS(ip);
346 #if defined(AFS_LINUX24_ENV)
347 if (atomic_read(&ip->i_count) > 1)
351 printf("afs_put_inode: ino %d (0x%x) has count %d\n", ip->i_ino, ip, ip->i_count);
353 afs_InactiveVCache(vcp, credp);
354 ObtainWriteLock(&vcp->lock, 504);
355 #if defined(AFS_LINUX24_ENV)
356 atomic_set(&ip->i_count, 0);
360 ip->i_nlink = 0; /* iput checks this after calling this routine. */
361 ReleaseWriteLock(&vcp->lock);
365 /* iput an inode. Since we still have a separate inode pool, we don't want
366 * to call iput on AFS inodes, since they would then end up on Linux's
369 void osi_iput(struct inode *ip)
371 extern struct vfs *afs_globalVFS;
374 #if defined(AFS_LINUX24_ENV)
375 if (atomic_read(&ip->i_count) == 0 || atomic_read(&ip->i_count) & 0xffff0000) {
377 if (ip->i_count == 0 || ip->i_count & 0xffff0000) {
379 osi_Panic("IPUT Bad refCount %d on inode 0x%x\n",
380 #if defined(AFS_LINUX24_ENV)
381 atomic_read(&ip->i_count), ip);
386 if (afs_globalVFS && afs_globalVFS == ip->i_sb ) {
387 #if defined(AFS_LINUX24_ENV)
388 atomic_dec(&ip->i_count);
389 if (!atomic_read(&ip->i_count))
401 /* check_bad_parent() : Checks if this dentry's vcache is a root vcache
402 * that has its mvid (parent dir's fid) pointer set to the wrong directory
403 * due to being mounted in multiple points at once. If so, check_bad_parent()
404 * calls afs_lookup() to correct the vcache's mvid, as well as the volume's
405 * dotdotfid and mtpoint fid members.
407 * dp - dentry to be checked.
411 * This dentry's vcache's mvid will be set to the correct parent directory's
413 * This root vnode's volume will have its dotdotfid and mtpoint fids set
414 * to the correct parent and mountpoint fids.
417 void check_bad_parent(struct dentry *dp)
420 struct vcache *vcp = ITOAFS(dp->d_inode), *avc = NULL;
421 struct vcache *pvc = ITOAFS(dp->d_parent->d_inode);
423 if (vcp->mvid->Fid.Volume != pvc->fid.Fid.Volume) { /* bad parent */
427 /* force a lookup, so vcp->mvid is fixed up */
428 afs_lookup(pvc, dp->d_name.name, &avc, credp);
429 if (!avc || vcp != avc) { /* bad, very bad.. */
430 afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
431 "afs_linux_revalidate : bad pointer returned from afs_lookup origvc newvc dentry",
432 ICL_TYPE_POINTER, vcp,
433 ICL_TYPE_POINTER, avc,
434 ICL_TYPE_POINTER, dp);
439 } /* if bad parent */
444 struct task_struct *rxk_ListenerTask;
446 void osi_linux_mask(void)
448 spin_lock_irq(¤t->sigmask_lock);
449 sigfillset(¤t->blocked);
450 recalc_sigpending(current);
451 spin_unlock_irq(¤t->sigmask_lock);
454 void osi_linux_unmask(void)
456 spin_lock_irq(&rxk_ListenerTask->sigmask_lock);
457 sigemptyset(&rxk_ListenerTask->blocked);
458 flush_signals(rxk_ListenerTask);
459 recalc_sigpending(rxk_ListenerTask);
460 spin_unlock_irq(&rxk_ListenerTask->sigmask_lock);
463 void osi_linux_rxkreg(void)
465 rxk_ListenerTask = current;