AFS_GUNLOCK();
if (!afile) {
osi_Panic("osi_UFSOpen: Failed to allocate %d bytes for osi_file.\n",
- sizeof(struct osi_file));
+ (int)sizeof(struct osi_file));
}
memset(afile, 0, sizeof(struct osi_file));
#if defined(HAVE_IGET)
int change_parent)
{
struct group_info *group_info;
+#ifndef AFS_LINUX26_ONEGROUP_ENV
gid_t g0, g1;
+#endif
struct group_info *tmp;
int i;
#ifdef AFS_LINUX26_ONEGROUP_ENV
#undef TO_USER_SPACE
#undef TO_KERNEL_SPACE
#endif
-#define KERNEL_SPACE_DECL mm_segment_t _fs_space_decl
+#define KERNEL_SPACE_DECL mm_segment_t _fs_space_decl={0}
#define TO_USER_SPACE() { _fs_space_decl = get_fs(); set_fs(get_ds()); }
#define TO_KERNEL_SPACE() set_fs(_fs_space_decl)
#if defined(EXPORTED_INIT_MM)
#ifdef AFS_LINUX24_ENV
#include <linux/module.h> /* early to avoid printf->printk mapping */
+#ifdef AFS_LINUX26_ENV
+#include <scsi/scsi.h> /* for scsi_command_size */
+#endif
#ifndef OSI_PROBE_STANDALONE
#include "afs/sysincludes.h"
#include "afsincludes.h"
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/mm.h>
-#ifdef AFS_LINUX26_ENV
-#include <scsi/scsi.h> /* for scsi_command_size */
-#endif
#if defined(AFS_PPC64_LINUX26_ENV)
#include <asm/abs_addr.h>
("$Header$");
#include <linux/module.h> /* early to avoid printf->printk mapping */
+#ifdef HAVE_KERNEL_LINUX_SEQ_FILE_H
+#include <linux/seq_file.h>
+#endif
#include "afs/sysincludes.h"
#include "afsincludes.h"
#include "afs/nfsclient.h"
#include <linux/sched.h>
#include <linux/kernel.h>
-#ifdef HAVE_KERNEL_LINUX_SEQ_FILE_H
-#include <linux/seq_file.h>
-#endif
-
struct proc_dir_entry *openafs_procfs;
#ifdef HAVE_KERNEL_LINUX_SEQ_FILE_H
#endif
/* Java VMs ask for l_len=(long)-1 regardless of OS/CPU; bottom 32 bits
* sometimes get masked off by OS */
- if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7ffffffffffffffe))
+ if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7ffffffffffffffeLL))
af->l_len = 0;
/* next line makes byte range locks always succeed,
* even when they should block */
int flagIndex = 0; /* First file with bulk fetch flag set */
int inlinebulk = 0; /* Did we use InlineBulk RPC or not? */
XSTATS_DECLS;
+ dotdot.Cell = 0;
+ dotdot.Fid.Unique = 0;
+ dotdot.Fid.Vnode = 0;
#ifdef AFS_DARWIN80_ENV
panic("bulkstatus doesn't work on AFS_DARWIN80_ENV. don't call it");
#endif
if (!code) {
struct VenusFid *oldmvid = NULL;
if (tvc->mvid)
- oldmvid = tvc->mvid;
+ oldmvid = (char *)tvc->mvid;
tvc->mvid = (struct VenusFid *)unlname;
if (oldmvid)
osi_FreeSmallSpace(oldmvid);
{
int code;
DECLARE_COMPLETION(c);
-#if defined(AFS_LINUX26_ENV)
+#if defined(AFS_LINUX26_ENV)
+#if defined(INIT_WORK_HAS_DATA)
struct work_struct tq;
+#endif
#else
struct tq_struct tq;
#endif
afs_rootFid.Cell = localcell;
if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
&& afs_globalVp) {
- struct vcache *tvc = afs_globalVp;
/* If we had a root fid before and it changed location we reset
* the afs_globalVp so that it will be reevaluated.
* Just decrement the reference count. This only occurs during
afs_uint32 maxVictimPtr; /* where it is */
int discard;
int curbucket;
+#if defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
int vfslocked;
+#endif
#if defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
vfslocked = VFS_LOCK_GIANT(afs_globalVFS);
int
afs_InitCellInfo(char *afile)
{
- ino_t inode;
+ ino_t inode = 0;
int code;
#if defined(LINUX_USE_FH)
struct fid fh;
AFS_STATCNT(osi_AllocLargeSpace);
if (size > AFS_LRALLOCSIZ)
- osi_Panic("osi_AllocLargeSpace: size=%d\n", size);
+ osi_Panic("osi_AllocLargeSpace: size=%d\n", (int)size);
afs_stats_cmperf.LargeBlocksActive++;
if (!freePacketList) {
char *p;
AFS_STATCNT(osi_AllocSmallSpace);
if (size > AFS_SMALLOCSIZ)
- osi_Panic("osi_AllocSmallS: size=%d\n", size);
+ osi_Panic("osi_AllocSmallS: size=%d\n", (int)size);
if (!freeSmallList) {
afs_stats_cmperf.SmallBlocksAlloced++;
return rv;
}
#elif defined(AFS_LINUX22_ENV)
-const struct AFS_UCRED *
+struct AFS_UCRED *
afs_osi_proc2cred(AFS_PROC * pr)
{
struct AFS_UCRED *rv = NULL;
void
afs_get_groups_from_pag(afs_uint32 pag, gid_t * g0p, gid_t * g1p)
{
+#ifndef AFS_LINUX26_ONEGROUP_ENV
unsigned short g0, g1;
+#endif
AFS_STATCNT(afs_get_groups_from_pag);
afs_int32
-PagInCred(const struct AFS_UCRED *cred)
+PagInCred(struct AFS_UCRED *cred)
{
afs_int32 pag;
+#if !defined(AFS_LINUX26_ONEGROUP_ENV)
gid_t g0, g1;
+#endif
#if defined(AFS_SUN510_ENV)
const gid_t *gids;
int ngroups;
void
osi_FlushPages(register struct vcache *avc, struct AFS_UCRED *credp)
{
+#ifdef AFS_FBSD70_ENV
int vfslocked;
+#endif
afs_hyper_t origDV;
#if defined(AFS_CACHE_BYPASS)
/* The optimization to check DV under read lock below is identical a
register afs_int32 i;
register struct unixuser *tu;
register char *cp;
- afs_int32 iterator;
+ afs_int32 iterator = 0;
int newStyle;
AFS_STATCNT(PGetTokens);
extern int afs_osi_suser(void *credp);
extern void afs_osi_TraverseProcTable(void);
#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_PROC)
-extern const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC * pr);
+extern struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC * pr);
#endif
/* afs_osi_alloc.c */
#endif
#endif
extern void afs_get_groups_from_pag(afs_uint32 pag, gid_t * g0p, gid_t * g1p);
-extern afs_int32 PagInCred(const struct AFS_UCRED *cred);
+extern afs_int32 PagInCred(struct AFS_UCRED *cred);
/* afs_osi_uio.c */
extern int afsio_copy(struct uio *ainuio, struct uio *aoutuio,
multi_Rx(rxconns,nconns)
{
tv.tv_sec = tv.tv_usec = 0;
- multi_RXAFS_GetTime(&tv.tv_sec, &tv.tv_usec);
+ multi_RXAFS_GetTime((afs_uint32 *)&tv.tv_sec, (afs_uint32 *)&tv.tv_usec);
tc = conns[multi_i];
sa = tc->srvr;
if (conntimer[multi_i] == 1)
#endif /* !KERNEL */
#define XSTATS_DECLS struct afs_stats_opTimingData *opP = NULL; \
- osi_timeval_t opStartTime, opStopTime, elapsedTime
+ osi_timeval_t opStartTime = { 0, 0}, opStopTime, elapsedTime
#define XSTATS_START_TIME(arg) \
opP = &(afs_stats_cmfullperf.rpc.fsRPCTimes[arg]); \
afs_GCPAGs_perproc_func(AFS_PROC * pproc)
{
afs_int32 pag, hash, uid;
- const struct AFS_UCRED *pcred;
+ struct AFS_UCRED *pcred;
afs_GCPAGs_perproc_count++;
# endif
{
int i;
- char *panicstr;
i = 0;
for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
void
afs_vcacheInit(int astatSize)
{
+#if (!defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)) || defined(AFS_SGI_ENV)
register struct vcache *tvp;
+#endif
int i;
#if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
if (!afs_maxvcount) {
#define SIGN 0x80000000
#define hadd32(a,i) \
- (((((a).low ^ (int)(i)) & SIGN) \
+ ((void)((((a).low ^ (int)(i)) & SIGN) \
? (((((a).low + (int)(i)) & SIGN) == 0) && (a).high++) \
: (((a).low & (int)(i) & SIGN) && (a).high++)), \
(a).low += (int)(i))
down(&l->sem);
#endif
if (l->owner)
- osi_Panic("mutex_enter: 0x%x held by %d", l, l->owner);
+ osi_Panic("mutex_enter: 0x%lx held by %d", (unsigned long)l, l->owner);
l->owner = current->pid;
}
afs_mutex_exit(afs_kmutex_t * l)
{
if (l->owner != current->pid)
- osi_Panic("mutex_exit: 0x%x held by %d", l, l->owner);
+ osi_Panic("mutex_exit: 0x%lx held by %d", (unsigned long)l, l->owner);
l->owner = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
mutex_unlock(&l->mutex);
#else
struct wait_queue wait = { current, NULL };
#endif
-
+ sigemptyset(&saved_set);
seq = cv->seq;
set_current_state(TASK_INTERRUPTIBLE);
void
osi_StopListener(void)
{
- struct task_struct *listener;
- extern int rxk_ListenerPid;
extern struct task_struct *rxk_ListenerTask;
while (rxk_ListenerTask) {
int newPackets = 0;
int didHardAck = 0;
int haveLast = 0;
- afs_uint32 seq, serial, flags;
+ afs_uint32 seq;
+ afs_uint32 serial=0, flags=0;
int isFirst;
struct rx_packet *tnp;
struct clock when, now;
if (!data || !tlen)
break;
tlen = MIN(len, tlen);
- fc_cbc_encrypt(data, data, tlen, schedule, xor, DECRYPT);
+ fc_cbc_encrypt(data, data, tlen, *schedule, xor, DECRYPT);
len -= tlen;
}
/* Do this if packet checksums are ever enabled (below), but
if (!data || !tlen)
break;
tlen = MIN(len, tlen);
- fc_cbc_encrypt(data, data, tlen, schedule, xor, ENCRYPT);
+ fc_cbc_encrypt(data, data, tlen, *schedule, xor, ENCRYPT);
len -= tlen;
}
return 0;
/* IN int encrypt; * 0 ==> decrypt, else encrypt */
afs_int32
fc_ecb_encrypt(void * clear, void * cipher,
- fc_KeySchedule schedule, int encrypt)
+ const fc_KeySchedule schedule, int encrypt)
{
afs_uint32 L, R;
volatile afs_uint32 S, P;
*/
afs_int32
fc_cbc_encrypt(void *input, void *output, afs_int32 length,
- fc_KeySchedule key, afs_uint32 * xor, int encrypt)
+ const fc_KeySchedule key, afs_uint32 * xor, int encrypt)
{
afs_uint32 i, j;
afs_uint32 t_input[2];
rxkad_SetupEndpoint(aconnp, &tendpoint);
memcpy((void *)xor, aivec, 2 * sizeof(afs_int32));
- fc_cbc_encrypt(&tendpoint, &tendpoint, sizeof(tendpoint), aschedule, xor,
+ fc_cbc_encrypt(&tendpoint, &tendpoint, sizeof(tendpoint), *aschedule, xor,
ENCRYPT);
memcpy(aresult,
((char *)&tendpoint) + sizeof(tendpoint) - ENCRYPTIONBLOCKSIZE,
word[0] ^= aivec[0];
word[1] ^= aivec[1];
/* encrypts word as if it were a character string */
- fc_ecb_encrypt(word, word, aschedule, ENCRYPT);
+ fc_ecb_encrypt(word, word, *aschedule, ENCRYPT);
t = ntohl(word[1]);
t = (t >> 16) & 0xffff;
if (t == 0)
return RXKADINCONSISTENCY;
rxkad_SetLevel(aconn, tcp->level); /* set header and trailer sizes */
rxkad_AllocCID(aobj, aconn); /* CHANGES cid AND epoch!!!! */
- rxkad_DeriveXORInfo(aconn, tcp->keysched, tcp->ivec, tccp->preSeq);
+ rxkad_DeriveXORInfo(aconn, (fc_KeySchedule *)tcp->keysched, (char *)tcp->ivec, (char *)tccp->preSeq);
INC_RXKAD_STATS(connections[rxkad_LevelIndex(tcp->level)]);
}
{
struct rx_connection *tconn;
rxkad_level level;
- fc_KeySchedule *schedule;
+ const fc_KeySchedule *schedule;
fc_InitializationVector *ivec;
int len;
int nlen = 0;
INC_RXKAD_STATS(checkPackets[rxkad_StatIndex(rxkad_server, level)]);
sconn->stats.packetsReceived++;
sconn->stats.bytesReceived += len;
- schedule = (fc_KeySchedule *) sconn->keysched;
+ schedule = (const fc_KeySchedule *) sconn->keysched;
ivec = (fc_InitializationVector *) sconn->ivec;
} else {
INC_RXKAD_STATS(expired);
cconn->stats.packetsReceived++;
cconn->stats.bytesReceived += len;
preSeq = cconn->preSeq;
- schedule = (fc_KeySchedule *) tcp->keysched;
+ schedule = (const fc_KeySchedule *) tcp->keysched;
ivec = (fc_InitializationVector *) tcp->ivec;
}
if (checkCksum) {
- code = ComputeSum(apacket, schedule, preSeq);
+ code = ComputeSum(apacket, (fc_KeySchedule *)schedule, preSeq);
if (code != rx_GetPacketCksum(apacket))
return RXKADSEALEDINCON;
}
return 0; /* shouldn't happen */
case rxkad_auth:
rx_Pullup(apacket, 8); /* the following encrypts 8 bytes only */
- fc_ecb_encrypt(rx_DataOf(apacket), rx_DataOf(apacket), schedule,
+ fc_ecb_encrypt(rx_DataOf(apacket), rx_DataOf(apacket), *schedule,
DECRYPT);
break;
case rxkad_crypt:
- code = rxkad_DecryptPacket(tconn, schedule, ivec, len, apacket);
+ code = rxkad_DecryptPacket(tconn, schedule, (const fc_InitializationVector *)ivec, len, apacket);
if (code)
return code;
break;
nlen - (len + rx_GetSecurityHeaderSize(tconn)));
}
rx_Pullup(apacket, 8); /* the following encrypts 8 bytes only */
- fc_ecb_encrypt(rx_DataOf(apacket), rx_DataOf(apacket), schedule,
+ fc_ecb_encrypt(rx_DataOf(apacket), rx_DataOf(apacket), *schedule,
ENCRYPT);
break;
case rxkad_crypt:
rxi_RoundUpPacket(apacket,
nlen - (len + rx_GetSecurityHeaderSize(tconn)));
}
- code = rxkad_EncryptPacket(tconn, schedule, ivec, nlen, apacket);
+ code = rxkad_EncryptPacket(tconn, (const fc_KeySchedule *)schedule, (const fc_InitializationVector *)ivec, nlen, apacket);
if (code)
return code;
break;
extern int fc_keysched(struct ktc_encryptionKey *key,
fc_KeySchedule schedule);
extern afs_int32 fc_ecb_encrypt(void * clear, void * cipher,
- fc_KeySchedule schedule, int encrypt);
+ const fc_KeySchedule schedule, int encrypt);
extern afs_int32 fc_cbc_encrypt(void *input, void *output, afs_int32 length,
- fc_KeySchedule key, afs_uint32 * iv,
+ const fc_KeySchedule key, afs_uint32 * iv,
int encrypt);
/* rxkad_client.c */