cm_initparams_v1 cm_initParams;
char *cm_sysName = 0;
-int cm_sysNameCount = 0;
+unsigned int cm_sysNameCount = 0;
char *cm_sysNameList[MAXNUMSYSNAMES];
DWORD TraceOption = 0;
int afsd_InitCM(char **reasonP)
{
- osi_uid_t debugID;
- long cacheBlocks;
- long cacheSize;
- long logChunkSize;
- long stats;
- long traceBufSize;
+ osi_uid_t debugID;
+ long cacheBlocks;
+ long cacheSize;
+ long logChunkSize;
+ long stats;
+ long traceBufSize;
long maxcpus;
- long ltt, ltto;
+ long ltt, ltto;
long rx_mtu, rx_nojumbo;
long virtualCache;
- char rootCellName[256];
- struct rx_service *serverp;
- static struct rx_securityClass *nullServerSecurityClassp;
- struct hostent *thp;
- char *msgBuf;
- char buf[200];
- HKEY parmKey;
- DWORD dummyLen;
+ char rootCellName[256];
+ struct rx_service *serverp;
+ static struct rx_securityClass *nullServerSecurityClassp;
+ struct hostent *thp;
+ char *msgBuf;
+ char buf[200];
+ HKEY parmKey;
+ DWORD dummyLen;
DWORD regType;
- long code;
- /*int freelanceEnabled;*/
- WSADATA WSAjunk;
+ long code;
+ /*int freelanceEnabled;*/
+ WSADATA WSAjunk;
lana_number_t lanaNum;
int i;
- WSAStartup(0x0101, &WSAjunk);
+ WSAStartup(0x0101, &WSAjunk);
afsd_initUpperCaseTable();
- /* setup osidebug server at RPC slot 1000 */
- osi_LongToUID(1000, &debugID);
- code = osi_InitDebug(&debugID);
- afsi_log("osi_InitDebug code %d", code);
+ /* setup osidebug server at RPC slot 1000 */
+ osi_LongToUID(1000, &debugID);
+ code = osi_InitDebug(&debugID);
+ afsi_log("osi_InitDebug code %d", code);
// osi_LockTypeSetDefault("stat"); /* comment this out for speed *
- if (code != 0) {
- *reasonP = "unknown error";
- return -1;
- }
+ if (code != 0) {
+ *reasonP = "unknown error";
+ return -1;
+ }
- /* who are we ? */
- gethostname(cm_HostName, sizeof(cm_HostName));
- afsi_log("gethostname %s", cm_HostName);
- thp = gethostbyname(cm_HostName);
- memcpy(&cm_HostAddr, thp->h_addr_list[0], 4);
-
- /* seed random number generator */
- srand(ntohl(cm_HostAddr));
-
- /* Look up configuration parameters in Registry */
- code = RegOpenKeyEx(HKEY_LOCAL_MACHINE, AFSConfigKeyName,
- 0, KEY_QUERY_VALUE, &parmKey);
- if (code != ERROR_SUCCESS) {
- FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM
- | FORMAT_MESSAGE_ALLOCATE_BUFFER,
- NULL, code, 0, (LPTSTR)&msgBuf, 0, NULL);
- StringCbPrintfA(buf, sizeof(buf),
- "Failure in configuration while opening Registry: %s",
- msgBuf);
- osi_panic(buf, __FILE__, __LINE__);
- }
+ /* who are we ? */
+ gethostname(cm_HostName, sizeof(cm_HostName));
+ afsi_log("gethostname %s", cm_HostName);
+ thp = gethostbyname(cm_HostName);
+ memcpy(&cm_HostAddr, thp->h_addr_list[0], 4);
+
+ /* seed random number generator */
+ srand(ntohl(cm_HostAddr));
+
+ /* Look up configuration parameters in Registry */
+ code = RegOpenKeyEx(HKEY_LOCAL_MACHINE, AFSConfigKeyName,
+ 0, KEY_QUERY_VALUE, &parmKey);
+ if (code != ERROR_SUCCESS) {
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM
+ | FORMAT_MESSAGE_ALLOCATE_BUFFER,
+ NULL, code, 0, (LPTSTR)&msgBuf, 0, NULL);
+ StringCbPrintfA(buf, sizeof(buf),
+ "Failure in configuration while opening Registry: %s",
+ msgBuf);
+ osi_panic(buf, __FILE__, __LINE__);
+ }
dummyLen = sizeof(maxcpus);
- code = RegQueryValueEx(parmKey, "MaxCPUs", NULL, NULL,
- (BYTE *) &maxcpus, &dummyLen);
- if (code == ERROR_SUCCESS) {
+ code = RegQueryValueEx(parmKey, "MaxCPUs", NULL, NULL,
+ (BYTE *) &maxcpus, &dummyLen);
+ if (code == ERROR_SUCCESS) {
HANDLE hProcess;
DWORD_PTR processAffinityMask, systemAffinityMask;
}
}
- dummyLen = sizeof(TraceOption);
- code = RegQueryValueEx(parmKey, "TraceOption", NULL, NULL,
- (BYTE *) &TraceOption, &dummyLen);
+ dummyLen = sizeof(TraceOption);
+ code = RegQueryValueEx(parmKey, "TraceOption", NULL, NULL,
+ (BYTE *) &TraceOption, &dummyLen);
afsi_log("Event Log Tracing = %lX", TraceOption);
- dummyLen = sizeof(traceBufSize);
- code = RegQueryValueEx(parmKey, "TraceBufferSize", NULL, NULL,
- (BYTE *) &traceBufSize, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Trace Buffer size %d", traceBufSize);
- else {
- traceBufSize = CM_CONFIGDEFAULT_TRACEBUFSIZE;
- afsi_log("Default trace buffer size %d", traceBufSize);
- }
+ dummyLen = sizeof(traceBufSize);
+ code = RegQueryValueEx(parmKey, "TraceBufferSize", NULL, NULL,
+ (BYTE *) &traceBufSize, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Trace Buffer size %d", traceBufSize);
+ else {
+ traceBufSize = CM_CONFIGDEFAULT_TRACEBUFSIZE;
+ afsi_log("Default trace buffer size %d", traceBufSize);
+ }
- /* setup and enable debug log */
- afsd_logp = osi_LogCreate("afsd", traceBufSize);
- afsi_log("osi_LogCreate log addr %x", (int)afsd_logp);
+ /* setup and enable debug log */
+ afsd_logp = osi_LogCreate("afsd", traceBufSize);
+ afsi_log("osi_LogCreate log addr %x", (int)afsd_logp);
osi_LogEnable(afsd_logp);
- logReady = 1;
+ logReady = 1;
osi_Log0(afsd_logp, "Log init");
- dummyLen = sizeof(cacheSize);
- code = RegQueryValueEx(parmKey, "CacheSize", NULL, NULL,
- (BYTE *) &cacheSize, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Cache size %d", cacheSize);
- else {
- cacheSize = CM_CONFIGDEFAULT_CACHESIZE;
- afsi_log("Default cache size %d", cacheSize);
- }
+ dummyLen = sizeof(cacheSize);
+ code = RegQueryValueEx(parmKey, "CacheSize", NULL, NULL,
+ (BYTE *) &cacheSize, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Cache size %d", cacheSize);
+ else {
+ cacheSize = CM_CONFIGDEFAULT_CACHESIZE;
+ afsi_log("Default cache size %d", cacheSize);
+ }
- dummyLen = sizeof(logChunkSize);
- code = RegQueryValueEx(parmKey, "ChunkSize", NULL, NULL,
- (BYTE *) &logChunkSize, &dummyLen);
- if (code == ERROR_SUCCESS) {
- if (logChunkSize < 12 || logChunkSize > 30) {
- afsi_log("Invalid chunk size %d, using default",
- logChunkSize);
- logChunkSize = CM_CONFIGDEFAULT_CHUNKSIZE;
- }
- afsi_log("Chunk size %d", logChunkSize);
- } else {
- logChunkSize = CM_CONFIGDEFAULT_CHUNKSIZE;
- afsi_log("Default chunk size %d", logChunkSize);
- }
- cm_logChunkSize = logChunkSize;
- cm_chunkSize = 1 << logChunkSize;
-
- dummyLen = sizeof(numBkgD);
- code = RegQueryValueEx(parmKey, "Daemons", NULL, NULL,
- (BYTE *) &numBkgD, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("%d background daemons", numBkgD);
- else {
- numBkgD = CM_CONFIGDEFAULT_DAEMONS;
- afsi_log("Defaulting to %d background daemons", numBkgD);
- }
+ dummyLen = sizeof(logChunkSize);
+ code = RegQueryValueEx(parmKey, "ChunkSize", NULL, NULL,
+ (BYTE *) &logChunkSize, &dummyLen);
+ if (code == ERROR_SUCCESS) {
+ if (logChunkSize < 12 || logChunkSize > 30) {
+ afsi_log("Invalid chunk size %d, using default",
+ logChunkSize);
+ logChunkSize = CM_CONFIGDEFAULT_CHUNKSIZE;
+ }
+ afsi_log("Chunk size %d", logChunkSize);
+ } else {
+ logChunkSize = CM_CONFIGDEFAULT_CHUNKSIZE;
+ afsi_log("Default chunk size %d", logChunkSize);
+ }
+ cm_logChunkSize = logChunkSize;
+ cm_chunkSize = 1 << logChunkSize;
+
+ dummyLen = sizeof(numBkgD);
+ code = RegQueryValueEx(parmKey, "Daemons", NULL, NULL,
+ (BYTE *) &numBkgD, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("%d background daemons", numBkgD);
+ else {
+ numBkgD = CM_CONFIGDEFAULT_DAEMONS;
+ afsi_log("Defaulting to %d background daemons", numBkgD);
+ }
- dummyLen = sizeof(numSvThreads);
- code = RegQueryValueEx(parmKey, "ServerThreads", NULL, NULL,
- (BYTE *) &numSvThreads, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("%d server threads", numSvThreads);
- else {
- numSvThreads = CM_CONFIGDEFAULT_SVTHREADS;
- afsi_log("Defaulting to %d server threads", numSvThreads);
- }
+ dummyLen = sizeof(numSvThreads);
+ code = RegQueryValueEx(parmKey, "ServerThreads", NULL, NULL,
+ (BYTE *) &numSvThreads, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("%d server threads", numSvThreads);
+ else {
+ numSvThreads = CM_CONFIGDEFAULT_SVTHREADS;
+ afsi_log("Defaulting to %d server threads", numSvThreads);
+ }
- dummyLen = sizeof(stats);
- code = RegQueryValueEx(parmKey, "Stats", NULL, NULL,
- (BYTE *) &stats, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Status cache size %d", stats);
- else {
- stats = CM_CONFIGDEFAULT_STATS;
- afsi_log("Default status cache size %d", stats);
- }
+ dummyLen = sizeof(stats);
+ code = RegQueryValueEx(parmKey, "Stats", NULL, NULL,
+ (BYTE *) &stats, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Status cache size %d", stats);
+ else {
+ stats = CM_CONFIGDEFAULT_STATS;
+ afsi_log("Default status cache size %d", stats);
+ }
- dummyLen = sizeof(ltt);
- code = RegQueryValueEx(parmKey, "LogoffTokenTransfer", NULL, NULL,
- (BYTE *) <t, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Logoff token transfer %s", (ltt ? "on" : "off"));
- else {
- ltt = 1;
- afsi_log("Logoff token transfer on by default");
- }
+ dummyLen = sizeof(ltt);
+ code = RegQueryValueEx(parmKey, "LogoffTokenTransfer", NULL, NULL,
+ (BYTE *) <t, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Logoff token transfer %s", (ltt ? "on" : "off"));
+ else {
+ ltt = 1;
+ afsi_log("Logoff token transfer on by default");
+ }
smb_LogoffTokenTransfer = ltt;
afsi_log("Logoff token transfer is currently ignored");
- if (ltt) {
- dummyLen = sizeof(ltto);
- code = RegQueryValueEx(parmKey, "LogoffTokenTransferTimeout",
- NULL, NULL, (BYTE *) <to, &dummyLen);
- if (code == ERROR_SUCCESS)
+ if (ltt) {
+ dummyLen = sizeof(ltto);
+ code = RegQueryValueEx(parmKey, "LogoffTokenTransferTimeout",
+ NULL, NULL, (BYTE *) <to, &dummyLen);
+ if (code == ERROR_SUCCESS)
afsi_log("Logoff token tranfer timeout %d seconds", ltto);
- else {
- ltto = 10;
- afsi_log("Default logoff token transfer timeout 10 seconds");
- }
- } else {
+ else {
+ ltto = 10;
+ afsi_log("Default logoff token transfer timeout 10 seconds");
+ }
+ } else {
ltto = 0;
- }
+ }
smb_LogoffTransferTimeout = ltto;
afsi_log("Default logoff token is currently ignored");
- dummyLen = sizeof(cm_rootVolumeName);
- code = RegQueryValueEx(parmKey, "RootVolume", NULL, NULL,
- cm_rootVolumeName, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Root volume %s", cm_rootVolumeName);
- else {
- StringCbCopyA(cm_rootVolumeName, sizeof(cm_rootVolumeName), "root.afs");
- afsi_log("Default root volume name root.afs");
- }
+ dummyLen = sizeof(cm_rootVolumeName);
+ code = RegQueryValueEx(parmKey, "RootVolume", NULL, NULL,
+ cm_rootVolumeName, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Root volume %s", cm_rootVolumeName);
+ else {
+ StringCbCopyA(cm_rootVolumeName, sizeof(cm_rootVolumeName), "root.afs");
+ afsi_log("Default root volume name root.afs");
+ }
- cm_mountRootLen = sizeof(cm_mountRoot);
- code = RegQueryValueEx(parmKey, "MountRoot", NULL, NULL,
- cm_mountRoot, &cm_mountRootLen);
- if (code == ERROR_SUCCESS) {
- afsi_log("Mount root %s", cm_mountRoot);
- cm_mountRootLen = strlen(cm_mountRoot);
- } else {
- StringCbCopyA(cm_mountRoot, sizeof(cm_mountRoot), "/afs");
- cm_mountRootLen = 4;
- /* Don't log */
- }
+ cm_mountRootLen = sizeof(cm_mountRoot);
+ code = RegQueryValueEx(parmKey, "MountRoot", NULL, NULL,
+ cm_mountRoot, &cm_mountRootLen);
+ if (code == ERROR_SUCCESS) {
+ afsi_log("Mount root %s", cm_mountRoot);
+ cm_mountRootLen = strlen(cm_mountRoot);
+ } else {
+ StringCbCopyA(cm_mountRoot, sizeof(cm_mountRoot), "/afs");
+ cm_mountRootLen = 4;
+ /* Don't log */
+ }
- dummyLen = sizeof(buf);
- code = RegQueryValueEx(parmKey, "CachePath", NULL, ®Type,
- buf, &dummyLen);
+ dummyLen = sizeof(buf);
+ code = RegQueryValueEx(parmKey, "CachePath", NULL, ®Type,
+ buf, &dummyLen);
if (code == ERROR_SUCCESS && buf[0]) {
if (regType == REG_EXPAND_SZ) {
dummyLen = ExpandEnvironmentStrings(buf, cm_CachePath, sizeof(cm_CachePath));
} else {
StringCbCopyA(cm_CachePath, sizeof(cm_CachePath), buf);
}
- afsi_log("Cache path %s", cm_CachePath);
+ afsi_log("Cache path %s", cm_CachePath);
} else {
- GetWindowsDirectory(cm_CachePath, sizeof(cm_CachePath));
- cm_CachePath[2] = 0; /* get drive letter only */
- StringCbCatA(cm_CachePath, sizeof(cm_CachePath), "\\AFSCache");
- afsi_log("Default cache path %s", cm_CachePath);
- }
+ GetWindowsDirectory(cm_CachePath, sizeof(cm_CachePath));
+ cm_CachePath[2] = 0; /* get drive letter only */
+ StringCbCatA(cm_CachePath, sizeof(cm_CachePath), "\\AFSCache");
+ afsi_log("Default cache path %s", cm_CachePath);
+ }
dummyLen = sizeof(virtualCache);
code = RegQueryValueEx(parmKey, "NonPersistentCaching", NULL, NULL,
- &virtualCache, &dummyLen);
+ &virtualCache, &dummyLen);
if (code == ERROR_SUCCESS && virtualCache) {
buf_cacheType = CM_BUF_CACHETYPE_VIRTUAL;
} else {
}
afsi_log("Cache type is %s", ((buf_cacheType == CM_BUF_CACHETYPE_FILE)?"FILE":"VIRTUAL"));
- dummyLen = sizeof(traceOnPanic);
- code = RegQueryValueEx(parmKey, "TrapOnPanic", NULL, NULL,
- (BYTE *) &traceOnPanic, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Set to %s on panic",
- traceOnPanic ? "trap" : "not trap");
- else {
- traceOnPanic = 0;
- /* Don't log */
- }
+ dummyLen = sizeof(traceOnPanic);
+ code = RegQueryValueEx(parmKey, "TrapOnPanic", NULL, NULL,
+ (BYTE *) &traceOnPanic, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Set to %s on panic",
+ traceOnPanic ? "trap" : "not trap");
+ else {
+ traceOnPanic = 0;
+ /* Don't log */
+ }
- dummyLen = sizeof(reportSessionStartups);
- code = RegQueryValueEx(parmKey, "ReportSessionStartups", NULL, NULL,
- (BYTE *) &reportSessionStartups, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Session startups %s be recorded in the Event Log",
- reportSessionStartups ? "will" : "will not");
- else {
- reportSessionStartups = 0;
- /* Don't log */
- }
+ dummyLen = sizeof(reportSessionStartups);
+ code = RegQueryValueEx(parmKey, "ReportSessionStartups", NULL, NULL,
+ (BYTE *) &reportSessionStartups, &dummyLen);
+ if (code == ERROR_SUCCESS)
+ afsi_log("Session startups %s be recorded in the Event Log",
+ reportSessionStartups ? "will" : "will not");
+ else {
+ reportSessionStartups = 0;
+ /* Don't log */
+ }
for ( i=0; i < MAXNUMSYSNAMES; i++ ) {
cm_sysNameList[i] = osi_Alloc(MAXSYSNAME);
}
cm_sysName = cm_sysNameList[0];
- dummyLen = MAXSYSNAME;
+ dummyLen = MAXSYSNAME;
code = RegQueryValueEx(parmKey, "SysName", NULL, NULL, cm_sysName, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("Sys name %s", cm_sysName);
- else {
- StringCbCopyA(cm_sysName, MAXSYSNAME, "i386_nt40");
- afsi_log("Default sys name %s", cm_sysName);
- }
+ if (code == ERROR_SUCCESS)
+ afsi_log("Sys name %s", cm_sysName);
+ else {
+ StringCbCopyA(cm_sysName, MAXSYSNAME, "i386_nt40");
+ afsi_log("Default sys name %s", cm_sysName);
+ }
cm_sysNameCount = 1;
- dummyLen = sizeof(cryptall);
- code = RegQueryValueEx(parmKey, "SecurityLevel", NULL, NULL,
- (BYTE *) &cryptall, &dummyLen);
- if (code == ERROR_SUCCESS)
- afsi_log("SecurityLevel is %s", cryptall?"crypt":"clear");
- else {
- cryptall = rxkad_clear;
- afsi_log("Default SecurityLevel is clear");
- }
+ dummyLen = sizeof(cryptall);
+ code = RegQueryValueEx(parmKey, "SecurityLevel", NULL, NULL,
+ (BYTE *) &cryptall, &dummyLen);
+ if (code == ERROR_SUCCESS) {
+ afsi_log("SecurityLevel is %s", cryptall?"crypt":"clear");
+ } else {
+ cryptall = 0;
+ afsi_log("Default SecurityLevel is clear");
+ }
#ifdef AFS_AFSDB_ENV
- dummyLen = sizeof(cm_dnsEnabled);
- code = RegQueryValueEx(parmKey, "UseDNS", NULL, NULL,
- (BYTE *) &cm_dnsEnabled, &dummyLen);
- if (code == ERROR_SUCCESS) {
- afsi_log("DNS %s be used to find AFS cell servers",
- cm_dnsEnabled ? "will" : "will not");
- }
- else {
- cm_dnsEnabled = 1; /* default on */
- afsi_log("Default to use DNS to find AFS cell servers");
- }
+ dummyLen = sizeof(cm_dnsEnabled);
+ code = RegQueryValueEx(parmKey, "UseDNS", NULL, NULL,
+ (BYTE *) &cm_dnsEnabled, &dummyLen);
+ if (code == ERROR_SUCCESS) {
+ afsi_log("DNS %s be used to find AFS cell servers",
+ cm_dnsEnabled ? "will" : "will not");
+ }
+ else {
+ cm_dnsEnabled = 1; /* default on */
+ afsi_log("Default to use DNS to find AFS cell servers");
+ }
#else /* AFS_AFSDB_ENV */
- afsi_log("AFS not built with DNS support to find AFS cell servers");
+ afsi_log("AFS not built with DNS support to find AFS cell servers");
#endif /* AFS_AFSDB_ENV */
#ifdef AFS_FREELANCE_CLIENT
- dummyLen = sizeof(cm_freelanceEnabled);
- code = RegQueryValueEx(parmKey, "FreelanceClient", NULL, NULL,
- (BYTE *) &cm_freelanceEnabled, &dummyLen);
- if (code == ERROR_SUCCESS) {
- afsi_log("Freelance client feature %s activated",
- cm_freelanceEnabled ? "is" : "is not");
- }
- else {
- cm_freelanceEnabled = 0; /* default off */
- }
+ dummyLen = sizeof(cm_freelanceEnabled);
+ code = RegQueryValueEx(parmKey, "FreelanceClient", NULL, NULL,
+ (BYTE *) &cm_freelanceEnabled, &dummyLen);
+ if (code == ERROR_SUCCESS) {
+ afsi_log("Freelance client feature %s activated",
+ cm_freelanceEnabled ? "is" : "is not");
+ }
+ else {
+ cm_freelanceEnabled = 0; /* default off */
+ }
#endif /* AFS_FREELANCE_CLIENT */
#ifdef COMMENT
}
afsi_log("Maximum number of VCs per server is %d", smb_maxVCPerServer);
- dummyLen = sizeof(smb_authType);
- code = RegQueryValueEx(parmKey, "SMBAuthType", NULL, NULL,
- (BYTE *) &smb_authType, &dummyLen);
+ dummyLen = sizeof(smb_authType);
+ code = RegQueryValueEx(parmKey, "SMBAuthType", NULL, NULL,
+ (BYTE *) &smb_authType, &dummyLen);
- if (code != ERROR_SUCCESS ||
- (smb_authType != SMB_AUTH_EXTENDED && smb_authType != SMB_AUTH_NTLM && smb_authType != SMB_AUTH_NONE)) {
- smb_authType = SMB_AUTH_EXTENDED; /* default is to use extended authentication */
- }
- afsi_log("SMB authentication type is %s", ((smb_authType == SMB_AUTH_NONE)?"NONE":((smb_authType == SMB_AUTH_EXTENDED)?"EXTENDED":"NTLM")));
+ if (code != ERROR_SUCCESS ||
+ (smb_authType != SMB_AUTH_EXTENDED && smb_authType != SMB_AUTH_NTLM && smb_authType != SMB_AUTH_NONE)) {
+ smb_authType = SMB_AUTH_EXTENDED; /* default is to use extended authentication */
+ }
+ afsi_log("SMB authentication type is %s", ((smb_authType == SMB_AUTH_NONE)?"NONE":((smb_authType == SMB_AUTH_EXTENDED)?"EXTENDED":"NTLM")));
dummyLen = sizeof(rx_nojumbo);
code = RegQueryValueEx(parmKey, "RxNoJumbo", NULL, NULL,
if (code != ERROR_SUCCESS) {
rx_nojumbo = 0;
}
- if(rx_nojumbo)
+ if (rx_nojumbo)
afsi_log("RX Jumbograms are disabled");
dummyLen = sizeof(rx_mtu);
if (code != ERROR_SUCCESS || !rx_mtu) {
rx_mtu = -1;
}
- if(rx_mtu != -1)
+ if (rx_mtu != -1)
afsi_log("RX maximum MTU is %d", rx_mtu);
dummyLen = sizeof(ConnDeadtimeout);
(BYTE *) &HardDeadtimeout, &dummyLen);
afsi_log("HardDeadTimeout is %d", HardDeadtimeout);
- RegCloseKey (parmKey);
+ RegCloseKey (parmKey);
/* Call lanahelper to get Netbios name, lan adapter number and gateway flag */
if(SUCCEEDED(code = lana_GetUncServerNameEx(cm_NetbiosName, &lanaNum, &isGateway, LANA_NETBIOS_NAME_FULL))) {
LANadapter = (lanaNum == LANA_INVALID)? -1: lanaNum;
- if(LANadapter != -1)
+ if (LANadapter != -1)
afsi_log("LAN adapter number %d", LANadapter);
else
afsi_log("LAN adapter number not determined");
- if(isGateway)
+ if (isGateway)
afsi_log("Set for gateway service");
afsi_log("Using >%s< as SMB server name", cm_NetbiosName);
osi_panic(buf, __FILE__, __LINE__);
}
- /* setup early variables */
- /* These both used to be configurable. */
- smb_UseV3 = 1;
+ /* setup early variables */
+ /* These both used to be configurable. */
+ smb_UseV3 = 1;
buf_bufferSize = CM_CONFIGDEFAULT_BLOCKSIZE;
- /* turn from 1024 byte units into memory blocks */
+ /* turn from 1024 byte units into memory blocks */
cacheBlocks = (cacheSize * 1024) / buf_bufferSize;
- /* get network related info */
- cm_noIPAddr = CM_MAXINTERFACE_ADDR;
- code = syscfg_GetIFInfo(&cm_noIPAddr,
- cm_IPAddr, cm_SubnetMask,
- cm_NetMtu, cm_NetFlags);
-
- if ( (cm_noIPAddr <= 0) || (code <= 0 ) )
- afsi_log("syscfg_GetIFInfo error code %d", code);
- else
- afsi_log("First Network address %x SubnetMask %x",
- cm_IPAddr[0], cm_SubnetMask[0]);
-
- /*
- * Save client configuration for GetCacheConfig requests
- */
- cm_initParams.nChunkFiles = 0;
- cm_initParams.nStatCaches = stats;
- cm_initParams.nDataCaches = 0;
- cm_initParams.nVolumeCaches = 0;
- cm_initParams.firstChunkSize = cm_chunkSize;
- cm_initParams.otherChunkSize = cm_chunkSize;
- cm_initParams.cacheSize = cacheSize;
- cm_initParams.setTime = 0;
- cm_initParams.memCache = 0;
+ /* get network related info */
+ cm_noIPAddr = CM_MAXINTERFACE_ADDR;
+ code = syscfg_GetIFInfo(&cm_noIPAddr,
+ cm_IPAddr, cm_SubnetMask,
+ cm_NetMtu, cm_NetFlags);
+
+ if ( (cm_noIPAddr <= 0) || (code <= 0 ) )
+ afsi_log("syscfg_GetIFInfo error code %d", code);
+ else
+ afsi_log("First Network address %x SubnetMask %x",
+ cm_IPAddr[0], cm_SubnetMask[0]);
+
+ /*
+ * Save client configuration for GetCacheConfig requests
+ */
+ cm_initParams.nChunkFiles = 0;
+ cm_initParams.nStatCaches = stats;
+ cm_initParams.nDataCaches = 0;
+ cm_initParams.nVolumeCaches = 0;
+ cm_initParams.firstChunkSize = cm_chunkSize;
+ cm_initParams.otherChunkSize = cm_chunkSize;
+ cm_initParams.cacheSize = cacheSize;
+ cm_initParams.setTime = 0;
+ cm_initParams.memCache = 0;
/* Set RX parameters before initializing RX */
if ( rx_nojumbo ) {
/* Ensure the AFS Netbios Name is registered to allow loopback access */
configureBackConnectionHostNames();
- /* initialize RX, and tell it to listen to port 7001, which is used for
+ /* initialize RX, and tell it to listen to port 7001, which is used for
* callback RPC messages.
*/
- code = rx_Init(htons(7001));
- afsi_log("rx_Init code %x", code);
- if (code != 0) {
- *reasonP = "afsd: failed to init rx client on port 7001";
- return -1;
- }
+ code = rx_Init(htons(7001));
+ afsi_log("rx_Init code %x", code);
+ if (code != 0) {
+ *reasonP = "afsd: failed to init rx client on port 7001";
+ return -1;
+ }
- /* Initialize the RPC server for session keys */
- RpcInit();
+ /* Initialize the RPC server for session keys */
+ RpcInit();
- /* create an unauthenticated service #1 for callbacks */
- nullServerSecurityClassp = rxnull_NewServerSecurityObject();
+ /* create an unauthenticated service #1 for callbacks */
+ nullServerSecurityClassp = rxnull_NewServerSecurityObject();
serverp = rx_NewService(0, 1, "AFS", &nullServerSecurityClassp, 1,
- RXAFSCB_ExecuteRequest);
- afsi_log("rx_NewService addr %x", (int)serverp);
- if (serverp == NULL) {
- *reasonP = "unknown error";
- return -1;
- }
+ RXAFSCB_ExecuteRequest);
+ afsi_log("rx_NewService addr %x", (int)serverp);
+ if (serverp == NULL) {
+ *reasonP = "unknown error";
+ return -1;
+ }
- nullServerSecurityClassp = rxnull_NewServerSecurityObject();
+ nullServerSecurityClassp = rxnull_NewServerSecurityObject();
serverp = rx_NewService(0, RX_STATS_SERVICE_ID, "rpcstats",
- &nullServerSecurityClassp, 1, RXSTATS_ExecuteRequest);
- afsi_log("rx_NewService addr %x", (int)serverp);
- if (serverp == NULL) {
- *reasonP = "unknown error";
- return -1;
- }
+ &nullServerSecurityClassp, 1, RXSTATS_ExecuteRequest);
+ afsi_log("rx_NewService addr %x", (int)serverp);
+ if (serverp == NULL) {
+ *reasonP = "unknown error";
+ return -1;
+ }
/* start server threads, *not* donating this one to the pool */
rx_StartServer(0);
- afsi_log("rx_StartServer");
+ afsi_log("rx_StartServer");
- /* init user daemon, and other packages */
- cm_InitUser();
+ /* init user daemon, and other packages */
+ cm_InitUser();
- cm_InitACLCache(2*stats);
+ cm_InitACLCache(2*stats);
- cm_InitConn();
+ cm_InitConn();
cm_InitCell();
cm_InitSCache(stats);
code = cm_InitDCache(0, cacheBlocks);
- afsi_log("cm_InitDCache code %x", code);
- if (code != 0) {
- *reasonP = "error initializing cache";
- return -1;
- }
+ afsi_log("cm_InitDCache code %x", code);
+ if (code != 0) {
+ *reasonP = "error initializing cache";
+ return -1;
+ }
#ifdef AFS_AFSDB_ENV
#if !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0500)
- if (cm_InitDNS(cm_dnsEnabled) == -1)
- cm_dnsEnabled = 0; /* init failed, so deactivate */
- afsi_log("cm_InitDNS %d", cm_dnsEnabled);
+ if (cm_InitDNS(cm_dnsEnabled) == -1)
+ cm_dnsEnabled = 0; /* init failed, so deactivate */
+ afsi_log("cm_InitDNS %d", cm_dnsEnabled);
#endif
#endif
- code = cm_GetRootCellName(rootCellName);
- afsi_log("cm_GetRootCellName code %d, cm_freelanceEnabled= %d, rcn= %s",
- code, cm_freelanceEnabled, (code ? "<none>" : rootCellName));
- if (code != 0 && !cm_freelanceEnabled)
+ code = cm_GetRootCellName(rootCellName);
+ afsi_log("cm_GetRootCellName code %d, cm_freelanceEnabled= %d, rcn= %s",
+ code, cm_freelanceEnabled, (code ? "<none>" : rootCellName));
+ if (code != 0 && !cm_freelanceEnabled)
{
- *reasonP = "can't find root cell name in afsd.ini";
- return -1;
- }
+ *reasonP = "can't find root cell name in afsd.ini";
+ return -1;
+ }
else if (cm_freelanceEnabled)
cm_rootCellp = NULL;
*reasonP = "can't find root cell in afsdcell.ini";
return -1;
}
- }
-
+ }
#ifdef AFS_FREELANCE_CLIENT
- if (cm_freelanceEnabled)
- cm_InitFreelance();
+ if (cm_freelanceEnabled)
+ cm_InitFreelance();
#endif
- return 0;
+ return 0;
}
int afsd_InitDaemons(char **reasonP)
/* hold a reference to an already held buffer */
void buf_Hold(cm_buf_t *bp)
{
- lock_ObtainWrite(&buf_globalLock);
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
}
/* incremental sync daemon. Writes 1/10th of all the buffers every 5000 ms */
void buf_IncrSyncer(long parm)
{
- cm_buf_t *bp; /* buffer we're hacking on; held */
- long i; /* counter */
- long nAtOnce; /* how many to do at once */
- cm_req_t req;
-
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_allp;
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- nAtOnce = buf_nbuffers / 10;
- while (1) {
+ cm_buf_t *bp; /* buffer we're hacking on; held */
+ long i; /* counter */
+ long nAtOnce; /* how many to do at once */
+ cm_req_t req;
+
+ lock_ObtainWrite(&buf_globalLock);
+ bp = buf_allp;
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ nAtOnce = buf_nbuffers / 10;
+ while (1) {
#ifndef DJGPP
- i = SleepEx(5000, 1);
- if (i != 0) continue;
+ i = SleepEx(5000, 1);
+ if (i != 0) continue;
#else
- thrd_Sleep(5000);
+ thrd_Sleep(5000);
#endif /* DJGPP */
- /* now go through our percentage of the buffers */
- for(i=0; i<nAtOnce; i++) {
- /* don't want its identity changing while we're
- * messing with it, so must do all of this with
- * bp held.
- */
-
- /* start cleaning the buffer; don't touch log pages since
- * the log code counts on knowing exactly who is writing
- * a log page at any given instant.
- */
- cm_InitReq(&req);
- req.flags |= CM_REQ_NORETRY;
- buf_CleanAsync(bp, &req);
-
- /* now advance to the next buffer; the allp chain never changes,
- * and so can be followed even when holding no locks.
- */
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- bp = bp->allp;
- if (!bp) bp = buf_allp;
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- } /* for loop over a bunch of buffers */
- } /* whole daemon's while loop */
+ /* now go through our percentage of the buffers */
+ for(i=0; i<nAtOnce; i++) {
+ /* don't want its identity changing while we're
+ * messing with it, so must do all of this with
+ * bp held.
+ */
+
+ /* start cleaning the buffer; don't touch log pages since
+ * the log code counts on knowing exactly who is writing
+ * a log page at any given instant.
+ */
+ cm_InitReq(&req);
+ req.flags |= CM_REQ_NORETRY;
+ buf_CleanAsync(bp, &req);
+
+ /* now advance to the next buffer; the allp chain never changes,
+ * and so can be followed even when holding no locks.
+ */
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ bp = bp->allp;
+ if (!bp) bp = buf_allp;
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ } /* for loop over a bunch of buffers */
+ } /* whole daemon's while loop */
}
#ifndef DJGPP
*/
PSECURITY_ATTRIBUTES CreateCacheFileSA()
{
- PSECURITY_ATTRIBUTES psa;
- PSECURITY_DESCRIPTOR psd;
- SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
- PSID AdminSID;
- DWORD AdminSIDlength;
- PACL AdminOnlyACL;
- DWORD ACLlength;
-
- /* Get Administrator SID */
- AllocateAndInitializeSid(&authority, 2,
- SECURITY_BUILTIN_DOMAIN_RID,
- DOMAIN_ALIAS_RID_ADMINS,
- 0, 0, 0, 0, 0, 0,
- &AdminSID);
-
- /* Create Administrator-only ACL */
- AdminSIDlength = GetLengthSid(AdminSID);
- ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
- + AdminSIDlength - sizeof(DWORD);
- AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
- InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
- AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
- STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
- AdminSID);
-
- /* Create security descriptor */
- psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
- InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
- SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
-
- /* Create security attributes structure */
- psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
- psa->nLength = sizeof(SECURITY_ATTRIBUTES);
- psa->lpSecurityDescriptor = psd;
- psa->bInheritHandle = TRUE;
-
- return psa;
-}
+ PSECURITY_ATTRIBUTES psa;
+ PSECURITY_DESCRIPTOR psd;
+ SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
+ PSID AdminSID;
+ DWORD AdminSIDlength;
+ PACL AdminOnlyACL;
+ DWORD ACLlength;
+
+ /* Get Administrator SID */
+ AllocateAndInitializeSid(&authority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID,
+ DOMAIN_ALIAS_RID_ADMINS,
+ 0, 0, 0, 0, 0, 0,
+ &AdminSID);
+
+ /* Create Administrator-only ACL */
+ AdminSIDlength = GetLengthSid(AdminSID);
+ ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
+ + AdminSIDlength - sizeof(DWORD);
+ AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
+ InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
+ AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
+ STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
+ AdminSID);
+
+ /* Create security descriptor */
+ psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
+ InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
+ SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
+
+ /* Create security attributes structure */
+ psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
+ psa->nLength = sizeof(SECURITY_ATTRIBUTES);
+ psa->lpSecurityDescriptor = psd;
+ psa->bInheritHandle = TRUE;
+
+ return psa;
+}
#endif /* !DJGPP */
#ifndef DJGPP
/* Free a security attribute structure created by CreateCacheFileSA() */
VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa)
{
- BOOL b1, b2;
- PACL pAcl;
-
- GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
- GlobalFree(pAcl);
- GlobalFree(psa->lpSecurityDescriptor);
- GlobalFree(psa);
-}
+ BOOL b1, b2;
+ PACL pAcl;
+
+ GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
+ GlobalFree(pAcl);
+ GlobalFree(psa->lpSecurityDescriptor);
+ GlobalFree(psa);
+}
#endif /* !DJGPP */
/* initialize the buffer package; called with no locks
*/
long buf_Init(cm_buf_ops_t *opsp)
{
- static osi_once_t once;
- cm_buf_t *bp;
- long sectorSize;
- thread_t phandle;
+ static osi_once_t once;
+ cm_buf_t *bp;
+ long sectorSize;
+ thread_t phandle;
#ifndef DJGPP
- HANDLE hf, hm;
- PSECURITY_ATTRIBUTES psa;
+ HANDLE hf, hm;
+ PSECURITY_ATTRIBUTES psa;
#endif /* !DJGPP */
- long i;
- unsigned long pid;
- char *data;
- long cs;
+ long i;
+ unsigned long pid;
+ char *data;
+ long cs;
#ifndef DJGPP
- /* Get system info; all we really want is the allocation granularity */
- GetSystemInfo(&sysInfo);
+ /* Get system info; all we really want is the allocation granularity */
+ GetSystemInfo(&sysInfo);
#endif /* !DJGPP */
- /* Have to be able to reserve a whole chunk */
- if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
- return CM_ERROR_TOOFEWBUFS;
+ /* Have to be able to reserve a whole chunk */
+ if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
+ return CM_ERROR_TOOFEWBUFS;
- /* recall for callouts */
- cm_buf_opsp = opsp;
+ /* recall for callouts */
+ cm_buf_opsp = opsp;
- if (osi_Once(&once)) {
- /* initialize global locks */
- lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
+ if (osi_Once(&once)) {
+ /* initialize global locks */
+ lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
#ifndef DJGPP
- /*
- * Cache file mapping constrained by
- * system allocation granularity;
- * round up, assuming granularity is a power of two
- */
- cs = buf_nbuffers * buf_bufferSize;
- cs = (cs + (sysInfo.dwAllocationGranularity - 1))
- & ~(sysInfo.dwAllocationGranularity - 1);
- if (cs != buf_nbuffers * buf_bufferSize) {
- buf_nbuffers = cs / buf_bufferSize;
- afsi_log("Cache size rounded up to %d buffers",
- buf_nbuffers);
- }
+ /*
+ * Cache file mapping constrained by
+ * system allocation granularity;
+ * round up, assuming granularity is a power of two
+ */
+ cs = buf_nbuffers * buf_bufferSize;
+ cs = (cs + (sysInfo.dwAllocationGranularity - 1))
+ & ~(sysInfo.dwAllocationGranularity - 1);
+ if (cs != buf_nbuffers * buf_bufferSize) {
+ buf_nbuffers = cs / buf_bufferSize;
+ afsi_log("Cache size rounded up to %d buffers",
+ buf_nbuffers);
+ }
#endif /* !DJGPP */
- /* remember this for those who want to reset it */
- buf_nOrigBuffers = buf_nbuffers;
+ /* remember this for those who want to reset it */
+ buf_nOrigBuffers = buf_nbuffers;
- /* lower hash size to a prime number */
- buf_hashSize = osi_PrimeLessThan(buf_hashSize);
+ /* lower hash size to a prime number */
+ buf_hashSize = osi_PrimeLessThan(buf_hashSize);
- /* create hash table */
- buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
- memset((void *)buf_hashTablepp, 0,
- buf_hashSize * sizeof(cm_buf_t *));
+ /* create hash table */
+ buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
+ memset((void *)buf_hashTablepp, 0,
+ buf_hashSize * sizeof(cm_buf_t *));
- /* another hash table */
- buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
- memset((void *)buf_fileHashTablepp, 0,
- buf_hashSize * sizeof(cm_buf_t *));
+ /* another hash table */
+ buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
+ memset((void *)buf_fileHashTablepp, 0,
+ buf_hashSize * sizeof(cm_buf_t *));
- /* min value for which this works */
- sectorSize = 1;
+ /* min value for which this works */
+ sectorSize = 1;
#ifndef DJGPP
- if(buf_cacheType == CM_BUF_CACHETYPE_FILE) {
- /* Reserve buffer space by mapping cache file */
- psa = CreateCacheFileSA();
- hf = CreateFile(cm_CachePath,
- GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE,
- psa,
- OPEN_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
- if (hf == INVALID_HANDLE_VALUE) {
- afsi_log("create file error %d", GetLastError());
- return CM_ERROR_INVAL;
- }
- FreeCacheFileSA(psa);
+ if (buf_cacheType == CM_BUF_CACHETYPE_FILE) {
+ /* Reserve buffer space by mapping cache file */
+ psa = CreateCacheFileSA();
+ hf = CreateFile(cm_CachePath,
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ psa,
+ OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+ if (hf == INVALID_HANDLE_VALUE) {
+ afsi_log("Error creating cache file \"%s\" error %d",
+ cm_CachePath, GetLastError());
+ return CM_ERROR_INVAL;
+ }
+ FreeCacheFileSA(psa);
} else { /* buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL */
hf = INVALID_HANDLE_VALUE;
}
- CacheHandle = hf;
- hm = CreateFileMapping(hf,
- NULL,
- PAGE_READWRITE,
- 0, buf_nbuffers * buf_bufferSize,
- NULL);
- if (hm == NULL) {
- if (GetLastError() == ERROR_DISK_FULL) {
- afsi_log("Error creating cache file mapping: disk full");
- return CM_ERROR_TOOMANYBUFS;
- }
- return CM_ERROR_INVAL;
- }
- data = MapViewOfFile(hm,
- FILE_MAP_ALL_ACCESS,
- 0, 0,
- buf_nbuffers * buf_bufferSize);
- if (data == NULL) {
- if(hf != INVALID_HANDLE_VALUE) CloseHandle(hf);
- CloseHandle(hm);
- return CM_ERROR_INVAL;
- }
- CloseHandle(hm);
-#else
- /* djgpp doesn't support memory mapped files */
- data = malloc(buf_nbuffers * buf_bufferSize);
+ CacheHandle = hf;
+ hm = CreateFileMapping(hf,
+ NULL,
+ PAGE_READWRITE,
+ 0, buf_nbuffers * buf_bufferSize,
+ NULL);
+ if (hm == NULL) {
+ if (GetLastError() == ERROR_DISK_FULL) {
+ afsi_log("Error creating cache file \"%s\" mapping: disk full",
+ cm_CachePath);
+ return CM_ERROR_TOOMANYBUFS;
+ }
+ return CM_ERROR_INVAL;
+ }
+ data = MapViewOfFile(hm,
+ FILE_MAP_ALL_ACCESS,
+ 0, 0,
+ buf_nbuffers * buf_bufferSize);
+ if (data == NULL) {
+ if (hf != INVALID_HANDLE_VALUE)
+ CloseHandle(hf);
+ CloseHandle(hm);
+ return CM_ERROR_INVAL;
+ }
+ CloseHandle(hm);
+#else
+ /* djgpp doesn't support memory mapped files */
+ data = malloc(buf_nbuffers * buf_bufferSize);
#endif /* !DJGPP */
- /* create buffer headers and put in free list */
- bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
- buf_allp = NULL;
- for(i=0; i<buf_nbuffers; i++) {
- /* allocate and zero some storage */
- memset(bp, 0, sizeof(cm_buf_t));
+ /* create buffer headers and put in free list */
+ bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
+ buf_allp = NULL;
+ for(i=0; i<buf_nbuffers; i++) {
+ /* allocate and zero some storage */
+ memset(bp, 0, sizeof(cm_buf_t));
- /* thread on list of all buffers */
- bp->allp = buf_allp;
- buf_allp = bp;
-
- osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
- bp->flags |= CM_BUF_INLRU;
- lock_InitializeMutex(&bp->mx, "Buffer mutex");
+ /* thread on list of all buffers */
+ bp->allp = buf_allp;
+ buf_allp = bp;
- /* grab appropriate number of bytes from aligned zone */
- bp->datap = data;
+ osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
+ bp->flags |= CM_BUF_INLRU;
+ lock_InitializeMutex(&bp->mx, "Buffer mutex");
- /* setup last buffer pointer */
- if (i == 0)
- buf_freeListEndp = bp;
+ /* grab appropriate number of bytes from aligned zone */
+ bp->datap = data;
- /* next */
- bp++;
- data += buf_bufferSize;
- }
-
- /* none reserved at first */
- buf_reservedBufs = 0;
-
- /* just for safety's sake */
- buf_maxReservedBufs = buf_nbuffers - 3;
-
- /* init the buffer trace log */
- buf_logp = osi_LogCreate("buffer", 10);
+ /* setup last buffer pointer */
+ if (i == 0)
+ buf_freeListEndp = bp;
- osi_EndOnce(&once);
-
- /* and create the incr-syncer */
- phandle = thrd_Create(0, 0,
- (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
- "buf_IncrSyncer");
+ /* next */
+ bp++;
+ data += buf_bufferSize;
+ }
+
+ /* none reserved at first */
+ buf_reservedBufs = 0;
+
+ /* just for safety's sake */
+ buf_maxReservedBufs = buf_nbuffers - 3;
+
+ /* init the buffer trace log */
+ buf_logp = osi_LogCreate("buffer", 10);
- osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
+ osi_EndOnce(&once);
+
+ /* and create the incr-syncer */
+ phandle = thrd_Create(0, 0,
+ (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
+ "buf_IncrSyncer");
+
+ osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
#ifndef DJGPP
- CloseHandle(phandle);
+ CloseHandle(phandle);
#endif /* !DJGPP */
- }
+ }
- return 0;
+ return 0;
}
/* add nbuffers to the buffer pool, if possible.
*/
long buf_AddBuffers(long nbuffers)
{
- cm_buf_t *bp;
- int i;
- char *data;
+ cm_buf_t *bp;
+ int i;
+ char *data;
#ifndef DJGPP
- HANDLE hm;
- long cs;
+ HANDLE hm;
+ long cs;
afsi_log("%d buffers being added to the existing cache of size %d",
nbuffers, buf_nbuffers);
return CM_ERROR_INVAL;
}
- /*
- * Cache file mapping constrained by
- * system allocation granularity;
- * round up, assuming granularity is a power of two;
- * assume existing cache size is already rounded
- */
- cs = nbuffers * buf_bufferSize;
- cs = (cs + (sysInfo.dwAllocationGranularity - 1))
- & ~(sysInfo.dwAllocationGranularity - 1);
- if (cs != nbuffers * buf_bufferSize) {
- nbuffers = cs / buf_bufferSize;
- }
-
- /* Reserve additional buffer space by remapping cache file */
- hm = CreateFileMapping(CacheHandle,
- NULL,
- PAGE_READWRITE,
- 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
- NULL);
- if (hm == NULL) {
- if (GetLastError() == ERROR_DISK_FULL)
- return CM_ERROR_TOOMANYBUFS;
- else
- return CM_ERROR_INVAL;
- }
- data = MapViewOfFile(hm,
- FILE_MAP_ALL_ACCESS,
- 0, buf_nbuffers * buf_bufferSize,
- nbuffers * buf_bufferSize);
- if (data == NULL) {
- CloseHandle(hm);
- return CM_ERROR_INVAL;
- }
- CloseHandle(hm);
+ /*
+ * Cache file mapping constrained by
+ * system allocation granularity;
+ * round up, assuming granularity is a power of two;
+ * assume existing cache size is already rounded
+ */
+ cs = nbuffers * buf_bufferSize;
+ cs = (cs + (sysInfo.dwAllocationGranularity - 1))
+ & ~(sysInfo.dwAllocationGranularity - 1);
+ if (cs != nbuffers * buf_bufferSize) {
+ nbuffers = cs / buf_bufferSize;
+ }
+
+ /* Reserve additional buffer space by remapping cache file */
+ hm = CreateFileMapping(CacheHandle,
+ NULL,
+ PAGE_READWRITE,
+ 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
+ NULL);
+ if (hm == NULL) {
+ if (GetLastError() == ERROR_DISK_FULL)
+ return CM_ERROR_TOOMANYBUFS;
+ else
+ return CM_ERROR_INVAL;
+ }
+ data = MapViewOfFile(hm,
+ FILE_MAP_ALL_ACCESS,
+ 0, buf_nbuffers * buf_bufferSize,
+ nbuffers * buf_bufferSize);
+ if (data == NULL) {
+ CloseHandle(hm);
+ return CM_ERROR_INVAL;
+ }
+ CloseHandle(hm);
#else
- data = malloc(buf_nbuffers * buf_bufferSize);
+ data = malloc(buf_nbuffers * buf_bufferSize);
#endif /* DJGPP */
- /* Create buffer headers and put in free list */
- bp = malloc(nbuffers * sizeof(*bp));
+ /* Create buffer headers and put in free list */
+ bp = malloc(nbuffers * sizeof(*bp));
- for(i=0; i<nbuffers; i++) {
- memset(bp, 0, sizeof(*bp));
+ for(i=0; i<nbuffers; i++) {
+ memset(bp, 0, sizeof(*bp));
- lock_InitializeMutex(&bp->mx, "cm_buf_t");
+ lock_InitializeMutex(&bp->mx, "cm_buf_t");
- /* grab appropriate number of bytes from aligned zone */
- bp->datap = data;
+ /* grab appropriate number of bytes from aligned zone */
+ bp->datap = data;
- bp->flags |= CM_BUF_INLRU;
-
- lock_ObtainWrite(&buf_globalLock);
- /* note that buf_allp chain is covered by buf_globalLock now */
- bp->allp = buf_allp;
- buf_allp = bp;
- osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
- if (!buf_freeListEndp) buf_freeListEndp = bp;
- buf_nbuffers++;
- lock_ReleaseWrite(&buf_globalLock);
+ bp->flags |= CM_BUF_INLRU;
+
+ lock_ObtainWrite(&buf_globalLock);
+ /* note that buf_allp chain is covered by buf_globalLock now */
+ bp->allp = buf_allp;
+ buf_allp = bp;
+ osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
+ if (!buf_freeListEndp) buf_freeListEndp = bp;
+ buf_nbuffers++;
+ lock_ReleaseWrite(&buf_globalLock);
- bp++;
- data += buf_bufferSize;
+ bp++;
+ data += buf_bufferSize;
- } /* for loop over all buffers */
+ } /* for loop over all buffers */
- return 0;
-}
+ return 0;
+}
/* interface to set the number of buffers to an exact figure.
* Called with no locks held.
return CM_ERROR_INVAL;
if (nbuffers == buf_nbuffers)
return 0;
- else if (nbuffers > buf_nbuffers)
- return buf_AddBuffers(nbuffers - buf_nbuffers);
+ else if (nbuffers > buf_nbuffers)
+ return buf_AddBuffers(nbuffers - buf_nbuffers);
else
return CM_ERROR_INVAL;
}
/* release a buffer. Buffer must be referenced, but unlocked. */
void buf_Release(cm_buf_t *bp)
{
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ lock_ReleaseWrite(&buf_globalLock);
}
/* wait for reading or writing to clear; called with write-locked
*/
void buf_WaitIO(cm_buf_t *bp)
{
- while (1) {
- /* if no IO is happening, we're done */
- if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
- break;
+ while (1) {
+ /* if no IO is happening, we're done */
+ if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
+ break;
/* otherwise I/O is happening, but some other thread is waiting for
* the I/O already. Wait for that guy to figure out what happened,
* the I/O to complete. Do so.
*/
if (bp->flags & CM_BUF_WAITING) {
- bp->flags &= ~CM_BUF_WAITING;
+ bp->flags &= ~CM_BUF_WAITING;
osi_Wakeup((long) bp);
}
osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
/* code to drop reference count while holding buf_globalLock */
void buf_LockedRelease(cm_buf_t *bp)
{
- /* ensure that we're in the LRU queue if our ref count is 0 */
- osi_assert(bp->refCount > 0);
- if (--bp->refCount == 0) {
- if (!(bp->flags & CM_BUF_INLRU)) {
- osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
-
- /* watch for transition from empty to one element */
- if (!buf_freeListEndp)
- buf_freeListEndp = buf_freeListp;
- bp->flags |= CM_BUF_INLRU;
- }
+ /* ensure that we're in the LRU queue if our ref count is 0 */
+ osi_assert(bp->refCount > 0);
+ if (--bp->refCount == 0) {
+ if (!(bp->flags & CM_BUF_INLRU)) {
+ osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
+
+ /* watch for transition from empty to one element */
+ if (!buf_freeListEndp)
+ buf_freeListEndp = buf_freeListp;
+ bp->flags |= CM_BUF_INLRU;
}
-}
+ }
+}
/* find a buffer, if any, for a particular file ID and offset. Assumes
* that buf_globalLock is write locked when called.
*/
cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp)
{
- long i;
- cm_buf_t *bp;
-
- i = BUF_HASH(&scp->fid, offsetp);
- for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
- if (cm_FidCmp(&scp->fid, &bp->fid) == 0
- && offsetp->LowPart == bp->offset.LowPart
- && offsetp->HighPart == bp->offset.HighPart) {
- bp->refCount++;
- break;
- }
+ long i;
+ cm_buf_t *bp;
+
+ i = BUF_HASH(&scp->fid, offsetp);
+ for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
+ if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+ && offsetp->LowPart == bp->offset.LowPart
+ && offsetp->HighPart == bp->offset.HighPart) {
+ bp->refCount++;
+ break;
}
+ }
- /* return whatever we found, if anything */
- return bp;
+ /* return whatever we found, if anything */
+ return bp;
}
/* find a buffer with offset *offsetp for vnode *scp. Called
*/
cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
{
- cm_buf_t *bp;
+ cm_buf_t *bp;
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_LockedFind(scp, offsetp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ bp = buf_LockedFind(scp, offsetp);
+ lock_ReleaseWrite(&buf_globalLock);
- return bp;
-}
+ return bp;
+}
/* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
* write-locked.
*/
void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp)
{
- long code;
+ long code;
- code = 0;
- while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
- lock_ReleaseMutex(&bp->mx);
+ code = 0;
+ while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
+ lock_ReleaseMutex(&bp->mx);
- code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
- buf_bufferSize, 0, bp->userp,
- reqp);
+ code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
+ buf_bufferSize, 0, bp->userp,
+ reqp);
- lock_ObtainMutex(&bp->mx);
- if (code) break;
+ lock_ObtainMutex(&bp->mx);
+ if (code)
+ break;
#ifdef DISKCACHE95
- /* Disk cache support */
- /* write buffer to disk cache (synchronous for now) */
- diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
+ /* Disk cache support */
+ /* write buffer to disk cache (synchronous for now) */
+ diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
#endif /* DISKCACHE95 */
- };
+ };
- /* do logging after call to GetLastError, or else */
- osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
+ /* do logging after call to GetLastError, or else */
+ osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
- /* if someone was waiting for the I/O that just completed or failed,
- * wake them up.
- */
- if (bp->flags & CM_BUF_WAITING) {
- /* turn off flags and wakeup users */
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
- }
+ /* if someone was waiting for the I/O that just completed or failed,
+ * wake them up.
+ */
+ if (bp->flags & CM_BUF_WAITING) {
+ /* turn off flags and wakeup users */
+ bp->flags &= ~CM_BUF_WAITING;
+ osi_Wakeup((long) bp);
+ }
}
/* Called with a zero-ref count buffer and with the buf_globalLock write locked.
*/
void buf_Recycle(cm_buf_t *bp)
{
- int i;
- cm_buf_t **lbpp;
- cm_buf_t *tbp;
- cm_buf_t *prevBp, *nextBp;
-
- /* if we get here, we know that the buffer still has a 0 ref count,
- * and that it is clean and has no currently pending I/O. This is
- * the dude to return.
- * Remember that as long as the ref count is 0, we know that we won't
- * have any lock conflicts, so we can grab the buffer lock out of
- * order in the locking hierarchy.
- */
+ int i;
+ cm_buf_t **lbpp;
+ cm_buf_t *tbp;
+ cm_buf_t *prevBp, *nextBp;
+
+ /* if we get here, we know that the buffer still has a 0 ref count,
+ * and that it is clean and has no currently pending I/O. This is
+ * the dude to return.
+ * Remember that as long as the ref count is 0, we know that we won't
+ * have any lock conflicts, so we can grab the buffer lock out of
+ * order in the locking hierarchy.
+ */
osi_Log2( buf_logp, "buf_Recycle recycles 0x%x, off 0x%x",
- bp, bp->offset.LowPart);
+ bp, bp->offset.LowPart);
- osi_assert(bp->refCount == 0);
- osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
- lock_AssertWrite(&buf_globalLock);
+ osi_assert(bp->refCount == 0);
+ osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
+ lock_AssertWrite(&buf_globalLock);
- if (bp->flags & CM_BUF_INHASH) {
- /* Remove from hash */
+ if (bp->flags & CM_BUF_INHASH) {
+ /* Remove from hash */
- i = BUF_HASH(&bp->fid, &bp->offset);
- lbpp = &(buf_hashTablepp[i]);
- for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
- if (tbp == bp) break;
- }
+ i = BUF_HASH(&bp->fid, &bp->offset);
+ lbpp = &(buf_hashTablepp[i]);
+ for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
+ if (tbp == bp) break;
+ }
- /* we better find it */
- osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
+ /* we better find it */
+ osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
- *lbpp = bp->hashp; /* hash out */
+ *lbpp = bp->hashp; /* hash out */
- /* Remove from file hash */
+ /* Remove from file hash */
- i = BUF_FILEHASH(&bp->fid);
- prevBp = bp->fileHashBackp;
- nextBp = bp->fileHashp;
- if (prevBp)
- prevBp->fileHashp = nextBp;
- else
- buf_fileHashTablepp[i] = nextBp;
- if (nextBp)
- nextBp->fileHashBackp = prevBp;
+ i = BUF_FILEHASH(&bp->fid);
+ prevBp = bp->fileHashBackp;
+ nextBp = bp->fileHashp;
+ if (prevBp)
+ prevBp->fileHashp = nextBp;
+ else
+ buf_fileHashTablepp[i] = nextBp;
+ if (nextBp)
+ nextBp->fileHashBackp = prevBp;
- bp->flags &= ~CM_BUF_INHASH;
- }
-
- /* bump the soft reference counter now, to invalidate softRefs; no
- * wakeup is required since people don't sleep waiting for this
- * counter to change.
- */
- bp->idCounter++;
+ bp->flags &= ~CM_BUF_INHASH;
+ }
- /* make the fid unrecognizable */
- memset(&bp->fid, 0, sizeof(bp->fid));
-}
+ /* bump the soft reference counter now, to invalidate softRefs; no
+ * wakeup is required since people don't sleep waiting for this
+ * counter to change.
+ */
+ bp->idCounter++;
+
+ /* make the fid unrecognizable */
+ memset(&bp->fid, 0, sizeof(bp->fid));
+}
/* recycle a buffer, removing it from the free list, hashing in its new identity
* and returning it write-locked so that no one can use it. Called without
*/
long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
{
- cm_buf_t *bp; /* buffer we're dealing with */
- cm_buf_t *nextBp; /* next buffer in file hash chain */
- long i; /* temp */
- cm_req_t req;
-
- cm_InitReq(&req); /* just in case */
-
- while(1) {
-retry:
- lock_ObtainWrite(&buf_globalLock);
- /* check to see if we lost the race */
- if (scp) {
- if (bp = buf_LockedFind(scp, offsetp)) {
- bp->refCount--;
- lock_ReleaseWrite(&buf_globalLock);
- return CM_BUF_EXISTS;
- }
- }
-
- /* for debugging, assert free list isn't empty, although we
- * really should try waiting for a running tranasction to finish
- * instead of this; or better, we should have a transaction
- * throttler prevent us from entering this situation.
- */
- osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
+ cm_buf_t *bp; /* buffer we're dealing with */
+ cm_buf_t *nextBp; /* next buffer in file hash chain */
+ long i; /* temp */
+ cm_req_t req;
- /* look at all buffers in free list, some of which may temp.
- * have high refcounts and which then should be skipped,
- * starting cleaning I/O for those which are dirty. If we find
- * a clean buffer, we rehash it, lock it and return it.
- */
- for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
- /* check to see if it really has zero ref count. This
- * code can bump refcounts, at least, so it may not be
- * zero.
- */
- if (bp->refCount > 0) continue;
-
- /* we don't have to lock buffer itself, since the ref
- * count is 0 and we know it will stay zero as long as
- * we hold the global lock.
- */
-
- /* don't recycle someone in our own chunk */
- if (!cm_FidCmp(&bp->fid, &scp->fid)
- && (bp->offset.LowPart & (-cm_chunkSize))
- == (offsetp->LowPart & (-cm_chunkSize)))
- continue;
-
- /* if this page is being filled (!) or cleaned, see if
- * the I/O has completed. If not, skip it, otherwise
- * do the final processing for the I/O.
- */
- if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
- /* probably shouldn't do this much work while
- * holding the big lock? Watch for contention
- * here.
- */
- continue;
- }
-
- if (bp->flags & CM_BUF_DIRTY) {
- /* if the buffer is dirty, start cleaning it and
- * move on to the next buffer. We do this with
- * just the lock required to minimize contention
- * on the big lock.
- */
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
-
- /* grab required lock and clean; this only
- * starts the I/O. By the time we're back,
- * it'll still be marked dirty, but it will also
- * have the WRITING flag set, so we won't get
- * back here.
- */
- buf_CleanAsync(bp, &req);
-
- /* now put it back and go around again */
- buf_Release(bp);
- goto retry;
- }
-
- /* if we get here, we know that the buffer still has a 0
- * ref count, and that it is clean and has no currently
- * pending I/O. This is the dude to return.
- * Remember that as long as the ref count is 0, we know
- * that we won't have any lock conflicts, so we can grab
- * the buffer lock out of order in the locking hierarchy.
- */
- buf_Recycle(bp);
-
- /* clean up junk flags */
- bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
- bp->dataVersion = -1; /* unknown so far */
-
- /* now hash in as our new buffer, and give it the
- * appropriate label, if requested.
- */
- if (scp) {
- bp->flags |= CM_BUF_INHASH;
- bp->fid = scp->fid;
- bp->offset = *offsetp;
- i = BUF_HASH(&scp->fid, offsetp);
- bp->hashp = buf_hashTablepp[i];
- buf_hashTablepp[i] = bp;
- i = BUF_FILEHASH(&scp->fid);
- nextBp = buf_fileHashTablepp[i];
- bp->fileHashp = nextBp;
- bp->fileHashBackp = NULL;
- if (nextBp)
- nextBp->fileHashBackp = bp;
- buf_fileHashTablepp[i] = bp;
- }
+ cm_InitReq(&req); /* just in case */
+
+ while(1) {
+ retry:
+ lock_ObtainWrite(&buf_globalLock);
+ /* check to see if we lost the race */
+ if (scp) {
+ if (bp = buf_LockedFind(scp, offsetp)) {
+ bp->refCount--;
+ lock_ReleaseWrite(&buf_globalLock);
+ return CM_BUF_EXISTS;
+ }
+ }
+
+ /* for debugging, assert free list isn't empty, although we
+ * really should try waiting for a running tranasction to finish
+ * instead of this; or better, we should have a transaction
+ * throttler prevent us from entering this situation.
+ */
+ osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
+
+ /* look at all buffers in free list, some of which may temp.
+ * have high refcounts and which then should be skipped,
+ * starting cleaning I/O for those which are dirty. If we find
+ * a clean buffer, we rehash it, lock it and return it.
+ */
+ for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+ /* check to see if it really has zero ref count. This
+ * code can bump refcounts, at least, so it may not be
+ * zero.
+ */
+ if (bp->refCount > 0)
+ continue;
- /* prepare to return it. Start by giving it a good
- * refcount */
- bp->refCount = 1;
+ /* we don't have to lock buffer itself, since the ref
+ * count is 0 and we know it will stay zero as long as
+ * we hold the global lock.
+ */
+
+ /* don't recycle someone in our own chunk */
+ if (!cm_FidCmp(&bp->fid, &scp->fid)
+ && (bp->offset.LowPart & (-cm_chunkSize))
+ == (offsetp->LowPart & (-cm_chunkSize)))
+ continue;
+
+ /* if this page is being filled (!) or cleaned, see if
+ * the I/O has completed. If not, skip it, otherwise
+ * do the final processing for the I/O.
+ */
+ if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
+ /* probably shouldn't do this much work while
+ * holding the big lock? Watch for contention
+ * here.
+ */
+ continue;
+ }
- /* and since it has a non-zero ref count, we should move
- * it from the lru queue. It better be still there,
- * since we've held the global (big) lock since we found
- * it there.
- */
- osi_assertx(bp->flags & CM_BUF_INLRU,
- "buf_GetNewLocked: LRU screwup");
- if (buf_freeListEndp == bp) {
- /* we're the last guy in this queue, so maintain it */
- buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
- }
- osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
- bp->flags &= ~CM_BUF_INLRU;
+ if (bp->flags & CM_BUF_DIRTY) {
+ /* if the buffer is dirty, start cleaning it and
+ * move on to the next buffer. We do this with
+ * just the lock required to minimize contention
+ * on the big lock.
+ */
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+
+ /* grab required lock and clean; this only
+ * starts the I/O. By the time we're back,
+ * it'll still be marked dirty, but it will also
+ * have the WRITING flag set, so we won't get
+ * back here.
+ */
+ buf_CleanAsync(bp, &req);
+
+ /* now put it back and go around again */
+ buf_Release(bp);
+ goto retry;
+ }
+
+ /* if we get here, we know that the buffer still has a 0
+ * ref count, and that it is clean and has no currently
+ * pending I/O. This is the dude to return.
+ * Remember that as long as the ref count is 0, we know
+ * that we won't have any lock conflicts, so we can grab
+ * the buffer lock out of order in the locking hierarchy.
+ */
+ buf_Recycle(bp);
+
+ /* clean up junk flags */
+ bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
+ bp->dataVersion = -1; /* unknown so far */
+
+ /* now hash in as our new buffer, and give it the
+ * appropriate label, if requested.
+ */
+ if (scp) {
+ bp->flags |= CM_BUF_INHASH;
+ bp->fid = scp->fid;
+ bp->offset = *offsetp;
+ i = BUF_HASH(&scp->fid, offsetp);
+ bp->hashp = buf_hashTablepp[i];
+ buf_hashTablepp[i] = bp;
+ i = BUF_FILEHASH(&scp->fid);
+ nextBp = buf_fileHashTablepp[i];
+ bp->fileHashp = nextBp;
+ bp->fileHashBackp = NULL;
+ if (nextBp)
+ nextBp->fileHashBackp = bp;
+ buf_fileHashTablepp[i] = bp;
+ }
+
+ /* prepare to return it. Start by giving it a good
+ * refcount */
+ bp->refCount = 1;
- /* finally, grab the mutex so that people don't use it
- * before the caller fills it with data. Again, no one
- * should have been able to get to this dude to lock it.
- */
- osi_assertx(lock_TryMutex(&bp->mx),
- "buf_GetNewLocked: TryMutex failed");
-
- lock_ReleaseWrite(&buf_globalLock);
- *bufpp = bp;
- return 0;
- } /* for all buffers in lru queue */
- lock_ReleaseWrite(&buf_globalLock);
- } /* while loop over everything */
- /* not reached */
+ /* and since it has a non-zero ref count, we should move
+ * it from the lru queue. It better be still there,
+ * since we've held the global (big) lock since we found
+ * it there.
+ */
+ osi_assertx(bp->flags & CM_BUF_INLRU,
+ "buf_GetNewLocked: LRU screwup");
+ if (buf_freeListEndp == bp) {
+ /* we're the last guy in this queue, so maintain it */
+ buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
+ }
+ osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
+ bp->flags &= ~CM_BUF_INLRU;
+
+ /* finally, grab the mutex so that people don't use it
+ * before the caller fills it with data. Again, no one
+ * should have been able to get to this dude to lock it.
+ */
+ osi_assertx(lock_TryMutex(&bp->mx),
+ "buf_GetNewLocked: TryMutex failed");
+
+ lock_ReleaseWrite(&buf_globalLock);
+ *bufpp = bp;
+ return 0;
+ } /* for all buffers in lru queue */
+ lock_ReleaseWrite(&buf_globalLock);
+ } /* while loop over everything */
+ /* not reached */
} /* the proc */
/* get a page, returning it held but unlocked. Doesn't fill in the page
*/
long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
{
- cm_buf_t *bp;
- long code;
- osi_hyper_t pageOffset;
- int created;
-
- created = 0;
- pageOffset.HighPart = offsetp->HighPart;
- pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
- while (1) {
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_LockedFind(scp, &pageOffset);
- lock_ReleaseWrite(&buf_globalLock);
- if (bp) {
- /* lock it and break out */
- lock_ObtainMutex(&bp->mx);
- break;
- }
-
- /* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ cm_buf_t *bp;
+ long code;
+ osi_hyper_t pageOffset;
+ int created;
+
+ created = 0;
+ pageOffset.HighPart = offsetp->HighPart;
+ pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
+ while (1) {
+ lock_ObtainWrite(&buf_globalLock);
+ bp = buf_LockedFind(scp, &pageOffset);
+ lock_ReleaseWrite(&buf_globalLock);
+ if (bp) {
+ /* lock it and break out */
+ lock_ObtainMutex(&bp->mx);
+ break;
+ }
- /* check if the buffer was created in a race condition branch.
- * If so, go around so we can hold a reference to it.
- */
- if (code == CM_BUF_EXISTS) continue;
-
- /* something else went wrong */
- if (code != 0) return code;
-
- /* otherwise, we have a locked buffer that we just created */
- created = 1;
- break;
- } /* big while loop */
-
- /* wait for reads */
- if (bp->flags & CM_BUF_READING)
- buf_WaitIO(bp);
+ /* otherwise, we have to create a page */
+ code = buf_GetNewLocked(scp, &pageOffset, &bp);
- /* once it has been read once, we can unlock it and return it, still
- * with its refcount held.
+ /* check if the buffer was created in a race condition branch.
+ * If so, go around so we can hold a reference to it.
*/
- lock_ReleaseMutex(&bp->mx);
- *bufpp = bp;
- osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
- bp, (long) scp, offsetp->LowPart);
- return 0;
+ if (code == CM_BUF_EXISTS)
+ continue;
+
+ /* something else went wrong */
+ if (code != 0)
+ return code;
+
+ /* otherwise, we have a locked buffer that we just created */
+ created = 1;
+ break;
+ } /* big while loop */
+
+ /* wait for reads */
+ if (bp->flags & CM_BUF_READING)
+ buf_WaitIO(bp);
+
+ /* once it has been read once, we can unlock it and return it, still
+ * with its refcount held.
+ */
+ lock_ReleaseMutex(&bp->mx);
+ *bufpp = bp;
+ osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
+ bp, (long) scp, offsetp->LowPart);
+ return 0;
}
/* get a page, returning it held but unlocked. Make sure it is complete */
long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
{
- cm_buf_t *bp;
- long code;
- osi_hyper_t pageOffset;
- unsigned long tcount;
- int created;
+ cm_buf_t *bp;
+ long code;
+ osi_hyper_t pageOffset;
+ unsigned long tcount;
+ int created;
#ifdef DISKCACHE95
- cm_diskcache_t *dcp;
+ cm_diskcache_t *dcp;
#endif /* DISKCACHE95 */
- created = 0;
- pageOffset.HighPart = offsetp->HighPart;
- pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
- while (1) {
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_LockedFind(scp, &pageOffset);
- lock_ReleaseWrite(&buf_globalLock);
- if (bp) {
- /* lock it and break out */
- lock_ObtainMutex(&bp->mx);
- break;
+ created = 0;
+ pageOffset.HighPart = offsetp->HighPart;
+ pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
+ while (1) {
+ lock_ObtainWrite(&buf_globalLock);
+ bp = buf_LockedFind(scp, &pageOffset);
+ lock_ReleaseWrite(&buf_globalLock);
+ if (bp) {
+ /* lock it and break out */
+ lock_ObtainMutex(&bp->mx);
+ break;
#ifdef DISKCACHE95
- /* touch disk chunk to update LRU info */
- diskcache_Touch(bp->dcp);
+ /* touch disk chunk to update LRU info */
+ diskcache_Touch(bp->dcp);
#endif /* DISKCACHE95 */
- }
-
- /* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ }
- /* check if the buffer was created in a race condition branch.
- * If so, go around so we can hold a reference to it.
- */
- if (code == CM_BUF_EXISTS) continue;
-
- /* something else went wrong */
- if (code != 0) return code;
-
- /* otherwise, we have a locked buffer that we just created */
- created = 1;
- break;
- } /* big while loop */
-
- /* if we get here, we have a locked buffer that may have just been
- * created, in which case it needs to be filled with data.
+ /* otherwise, we have to create a page */
+ code = buf_GetNewLocked(scp, &pageOffset, &bp);
+
+ /* check if the buffer was created in a race condition branch.
+ * If so, go around so we can hold a reference to it.
*/
- if (created) {
- /* load the page; freshly created pages should be idle */
- osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
+ if (code == CM_BUF_EXISTS)
+ continue;
+
+ /* something else went wrong */
+ if (code != 0)
+ return code;
+
+ /* otherwise, we have a locked buffer that we just created */
+ created = 1;
+ break;
+ } /* big while loop */
+
+ /* if we get here, we have a locked buffer that may have just been
+ * created, in which case it needs to be filled with data.
+ */
+ if (created) {
+ /* load the page; freshly created pages should be idle */
+ osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
- /* setup offset, event */
+ /* setup offset, event */
#ifndef DJGPP /* doesn't seem to be used */
- bp->over.Offset = bp->offset.LowPart;
- bp->over.OffsetHigh = bp->offset.HighPart;
+ bp->over.Offset = bp->offset.LowPart;
+ bp->over.OffsetHigh = bp->offset.HighPart;
#endif /* !DJGPP */
- /* start the I/O; may drop lock */
- bp->flags |= CM_BUF_READING;
- code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
+ /* start the I/O; may drop lock */
+ bp->flags |= CM_BUF_READING;
+ code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
#ifdef DISKCACHE95
- code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
- bp->dcp = dcp; /* pointer to disk cache struct. */
+ code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
+ bp->dcp = dcp; /* pointer to disk cache struct. */
#endif /* DISKCACHE95 */
- if (code != 0) {
- /* failure or queued */
+ if (code != 0) {
+ /* failure or queued */
#ifndef DJGPP /* cm_bufRead always returns 0 */
- if (code != ERROR_IO_PENDING) {
+ if (code != ERROR_IO_PENDING) {
#endif
- bp->error = code;
- bp->flags |= CM_BUF_ERROR;
- bp->flags &= ~CM_BUF_READING;
- if (bp->flags & CM_BUF_WAITING) {
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
- }
- lock_ReleaseMutex(&bp->mx);
- buf_Release(bp);
- return code;
+ bp->error = code;
+ bp->flags |= CM_BUF_ERROR;
+ bp->flags &= ~CM_BUF_READING;
+ if (bp->flags & CM_BUF_WAITING) {
+ bp->flags &= ~CM_BUF_WAITING;
+ osi_Wakeup((long) bp);
+ }
+ lock_ReleaseMutex(&bp->mx);
+ buf_Release(bp);
+ return code;
#ifndef DJGPP
- }
+ }
#endif
- } else {
- /* otherwise, I/O completed instantly and we're done, except
- * for padding the xfr out with 0s and checking for EOF
- */
- if (tcount < (unsigned long) buf_bufferSize) {
- memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
- if (tcount == 0)
- bp->flags |= CM_BUF_EOF;
- }
- bp->flags &= ~CM_BUF_READING;
- if (bp->flags & CM_BUF_WAITING) {
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
- }
- }
-
- } /* if created */
-
- /* wait for reads, either that which we started above, or that someone
- * else started. We don't care if we return a buffer being cleaned.
- */
- if (bp->flags & CM_BUF_READING)
- buf_WaitIO(bp);
+ } else {
+ /* otherwise, I/O completed instantly and we're done, except
+ * for padding the xfr out with 0s and checking for EOF
+ */
+ if (tcount < (unsigned long) buf_bufferSize) {
+ memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
+ if (tcount == 0)
+ bp->flags |= CM_BUF_EOF;
+ }
+ bp->flags &= ~CM_BUF_READING;
+ if (bp->flags & CM_BUF_WAITING) {
+ bp->flags &= ~CM_BUF_WAITING;
+ osi_Wakeup((long) bp);
+ }
+ }
- /* once it has been read once, we can unlock it and return it, still
- * with its refcount held.
- */
- lock_ReleaseMutex(&bp->mx);
- *bufpp = bp;
+ } /* if created */
- /* now remove from queue; will be put in at the head (farthest from
- * being recycled) when we're done in buf_Release.
- */
- lock_ObtainWrite(&buf_globalLock);
- if (bp->flags & CM_BUF_INLRU) {
- if (buf_freeListEndp == bp)
- buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
- osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
- bp->flags &= ~CM_BUF_INLRU;
- }
- lock_ReleaseWrite(&buf_globalLock);
+ /* wait for reads, either that which we started above, or that someone
+ * else started. We don't care if we return a buffer being cleaned.
+ */
+ if (bp->flags & CM_BUF_READING)
+ buf_WaitIO(bp);
- osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
- bp, (long) scp, offsetp->LowPart);
- return 0;
+ /* once it has been read once, we can unlock it and return it, still
+ * with its refcount held.
+ */
+ lock_ReleaseMutex(&bp->mx);
+ *bufpp = bp;
+
+ /* now remove from queue; will be put in at the head (farthest from
+ * being recycled) when we're done in buf_Release.
+ */
+ lock_ObtainWrite(&buf_globalLock);
+ if (bp->flags & CM_BUF_INLRU) {
+ if (buf_freeListEndp == bp)
+ buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
+ osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
+ bp->flags &= ~CM_BUF_INLRU;
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+
+ osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
+ bp, (long) scp, offsetp->LowPart);
+ return 0;
}
/* count # of elements in the free list;
*/
long buf_CountFreeList(void)
{
- long count;
- cm_buf_t *bufp;
-
- count = 0;
- lock_ObtainRead(&buf_globalLock);
- for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
- /* if the buffer doesn't have an identity, or if the buffer
- * has been invalidate (by having its DV stomped upon), then
- * count it as free, since it isn't really being utilized.
- */
- if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
- count++;
- }
- lock_ReleaseRead(&buf_globalLock);
- return count;
+ long count;
+ cm_buf_t *bufp;
+
+ count = 0;
+ lock_ObtainRead(&buf_globalLock);
+ for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
+ /* if the buffer doesn't have an identity, or if the buffer
+ * has been invalidate (by having its DV stomped upon), then
+ * count it as free, since it isn't really being utilized.
+ */
+ if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
+ count++;
+ }
+ lock_ReleaseRead(&buf_globalLock);
+ return count;
}
/* clean a buffer synchronously */
void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
{
- lock_ObtainMutex(&bp->mx);
- buf_LockedCleanAsync(bp, reqp);
- lock_ReleaseMutex(&bp->mx);
-}
+ lock_ObtainMutex(&bp->mx);
+ buf_LockedCleanAsync(bp, reqp);
+ lock_ReleaseMutex(&bp->mx);
+}
/* wait for a buffer's cleaning to finish */
void buf_CleanWait(cm_buf_t *bp)
{
- lock_ObtainMutex(&bp->mx);
- if (bp->flags & CM_BUF_WRITING) {
- buf_WaitIO(bp);
- }
- lock_ReleaseMutex(&bp->mx);
-}
+ lock_ObtainMutex(&bp->mx);
+ if (bp->flags & CM_BUF_WRITING) {
+ buf_WaitIO(bp);
+ }
+ lock_ReleaseMutex(&bp->mx);
+}
/* set the dirty flag on a buffer, and set associated write-ahead log,
* if there is one. Allow one to be added to a buffer, but not changed.
*/
void buf_SetDirty(cm_buf_t *bp)
{
- osi_assert(bp->refCount > 0);
+ osi_assert(bp->refCount > 0);
- osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
+ osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
- /* set dirty bit */
- bp->flags |= CM_BUF_DIRTY;
+ /* set dirty bit */
+ bp->flags |= CM_BUF_DIRTY;
- /* and turn off EOF flag, since it has associated data now */
- bp->flags &= ~CM_BUF_EOF;
+ /* and turn off EOF flag, since it has associated data now */
+ bp->flags &= ~CM_BUF_EOF;
}
/* clean all buffers, reset log pointers and invalidate all buffers.
*/
long buf_CleanAndReset(void)
{
- long i;
- cm_buf_t *bp;
- cm_req_t req;
-
- lock_ObtainWrite(&buf_globalLock);
- for(i=0; i<buf_hashSize; i++) {
- for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
-
- /* now no locks are held; clean buffer and go on */
- cm_InitReq(&req);
- buf_CleanAsync(bp, &req);
- buf_CleanWait(bp);
-
- /* relock and release buffer */
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- } /* over one bucket */
- } /* for loop over all hash buckets */
-
- /* release locks */
- lock_ReleaseWrite(&buf_globalLock);
+ long i;
+ cm_buf_t *bp;
+ cm_req_t req;
- /* and we're done */
- return 0;
-}
+ lock_ObtainWrite(&buf_globalLock);
+ for(i=0; i<buf_hashSize; i++) {
+ for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+
+ /* now no locks are held; clean buffer and go on */
+ cm_InitReq(&req);
+ buf_CleanAsync(bp, &req);
+ buf_CleanWait(bp);
+
+ /* relock and release buffer */
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ } /* over one bucket */
+ } /* for loop over all hash buckets */
+
+ /* release locks */
+ lock_ReleaseWrite(&buf_globalLock);
+
+ /* and we're done */
+ return 0;
+}
/* called without global lock being held, reserves buffers for callers
* that need more than one held (not locked) at once.
*/
void buf_ReserveBuffers(long nbuffers)
{
- lock_ObtainWrite(&buf_globalLock);
- while (1) {
- if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
- buf_reserveWaiting = 1;
- osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
- osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
- lock_ObtainWrite(&buf_globalLock);
- }
- else {
- buf_reservedBufs += nbuffers;
- break;
- }
+ lock_ObtainWrite(&buf_globalLock);
+ while (1) {
+ if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
+ buf_reserveWaiting = 1;
+ osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
+ osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ }
+ else {
+ buf_reservedBufs += nbuffers;
+ break;
}
- lock_ReleaseWrite(&buf_globalLock);
+ }
+ lock_ReleaseWrite(&buf_globalLock);
}
int buf_TryReserveBuffers(long nbuffers)
{
- int code;
-
- lock_ObtainWrite(&buf_globalLock);
- if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
- code = 0;
- }
- else {
- buf_reservedBufs += nbuffers;
- code = 1;
- }
- lock_ReleaseWrite(&buf_globalLock);
- return code;
-}
+ int code;
+
+ lock_ObtainWrite(&buf_globalLock);
+ if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
+ code = 0;
+ }
+ else {
+ buf_reservedBufs += nbuffers;
+ code = 1;
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+ return code;
+}
/* called without global lock held, releases reservation held by
* buf_ReserveBuffers.
*/
void buf_UnreserveBuffers(long nbuffers)
{
- lock_ObtainWrite(&buf_globalLock);
- buf_reservedBufs -= nbuffers;
- if (buf_reserveWaiting) {
- buf_reserveWaiting = 0;
- osi_Wakeup((long) &buf_reservedBufs);
- }
- lock_ReleaseWrite(&buf_globalLock);
-}
+ lock_ObtainWrite(&buf_globalLock);
+ buf_reservedBufs -= nbuffers;
+ if (buf_reserveWaiting) {
+ buf_reserveWaiting = 0;
+ osi_Wakeup((long) &buf_reservedBufs);
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+}
/* truncate the buffers past sizep, zeroing out the page, if we don't
* end on a page boundary.
* Requires cm_bufCreateLock to be write locked.
*/
long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
- osi_hyper_t *sizep)
+ osi_hyper_t *sizep)
{
- cm_buf_t *bufp;
- cm_buf_t *nbufp; /* next buffer, if didRelease */
- osi_hyper_t bufEnd;
- long code;
- long bufferPos;
- int didRelease;
- long i;
-
- /* assert that cm_bufCreateLock is held in write mode */
- lock_AssertWrite(&scp->bufCreateLock);
-
- i = BUF_FILEHASH(&scp->fid);
-
- lock_ObtainWrite(&buf_globalLock);
- bufp = buf_fileHashTablepp[i];
- if (bufp == NULL) {
- lock_ReleaseWrite(&buf_globalLock);
- return 0;
- }
-
- bufp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- for(; bufp; bufp = nbufp) {
- didRelease = 0;
- lock_ObtainMutex(&bufp->mx);
-
- bufEnd.HighPart = 0;
- bufEnd.LowPart = buf_bufferSize;
- bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
-
- if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
- LargeIntegerLessThan(*sizep, bufEnd)) {
- buf_WaitIO(bufp);
- }
- lock_ObtainMutex(&scp->mx);
+ cm_buf_t *bufp;
+ cm_buf_t *nbufp; /* next buffer, if didRelease */
+ osi_hyper_t bufEnd;
+ long code;
+ long bufferPos;
+ int didRelease;
+ long i;
+
+ /* assert that cm_bufCreateLock is held in write mode */
+ lock_AssertWrite(&scp->bufCreateLock);
+
+ i = BUF_FILEHASH(&scp->fid);
+
+ lock_ObtainWrite(&buf_globalLock);
+ bufp = buf_fileHashTablepp[i];
+ if (bufp == NULL) {
+ lock_ReleaseWrite(&buf_globalLock);
+ return 0;
+ }
+
+ bufp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ for(; bufp; bufp = nbufp) {
+ didRelease = 0;
+ lock_ObtainMutex(&bufp->mx);
+
+ bufEnd.HighPart = 0;
+ bufEnd.LowPart = buf_bufferSize;
+ bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
+
+ if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
+ LargeIntegerLessThan(*sizep, bufEnd)) {
+ buf_WaitIO(bufp);
+ }
+ lock_ObtainMutex(&scp->mx);
- /* make sure we have a callback (so we have the right value for
- * the length), and wait for it to be safe to do a truncate.
- */
- code = cm_SyncOp(scp, bufp, userp, reqp, 0,
- CM_SCACHESYNC_NEEDCALLBACK
- | CM_SCACHESYNC_GETSTATUS
- | CM_SCACHESYNC_SETSIZE
- | CM_SCACHESYNC_BUFLOCKED);
- /* if we succeeded in our locking, and this applies to the right
- * file, and the truncate request overlaps the buffer either
- * totally or partially, then do something.
+ /* make sure we have a callback (so we have the right value for
+ * the length), and wait for it to be safe to do a truncate.
+ */
+ code = cm_SyncOp(scp, bufp, userp, reqp, 0,
+ CM_SCACHESYNC_NEEDCALLBACK
+ | CM_SCACHESYNC_GETSTATUS
+ | CM_SCACHESYNC_SETSIZE
+ | CM_SCACHESYNC_BUFLOCKED);
+ /* if we succeeded in our locking, and this applies to the right
+ * file, and the truncate request overlaps the buffer either
+ * totally or partially, then do something.
+ */
+ if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
+ && LargeIntegerLessThan(*sizep, bufEnd)) {
+
+ lock_ObtainWrite(&buf_globalLock);
+
+ /* destroy the buffer, turning off its dirty bit, if
+ * we're truncating the whole buffer. Otherwise, set
+ * the dirty bit, and clear out the tail of the buffer
+ * if we just overlap some.
+ */
+ if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
+ /* truncating the entire page */
+ bufp->flags &= ~CM_BUF_DIRTY;
+ bufp->dataVersion = -1; /* known bad */
+ bufp->dirtyCounter++;
+ }
+ else {
+ /* don't set dirty, since dirty implies
+ * currently up-to-date. Don't need to do this,
+ * since we'll update the length anyway.
+ *
+ * Zero out remainder of the page, in case we
+ * seek and write past EOF, and make this data
+ * visible again.
*/
- if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
- && LargeIntegerLessThan(*sizep, bufEnd)) {
-
- lock_ObtainWrite(&buf_globalLock);
-
- /* destroy the buffer, turning off its dirty bit, if
- * we're truncating the whole buffer. Otherwise, set
- * the dirty bit, and clear out the tail of the buffer
- * if we just overlap some.
- */
- if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
- /* truncating the entire page */
- bufp->flags &= ~CM_BUF_DIRTY;
- bufp->dataVersion = -1; /* known bad */
- bufp->dirtyCounter++;
- }
- else {
- /* don't set dirty, since dirty implies
- * currently up-to-date. Don't need to do this,
- * since we'll update the length anyway.
- *
- * Zero out remainder of the page, in case we
- * seek and write past EOF, and make this data
- * visible again.
- */
- bufferPos = sizep->LowPart & (buf_bufferSize - 1);
- osi_assert(bufferPos != 0);
- memset(bufp->datap + bufferPos, 0,
- buf_bufferSize - bufferPos);
- }
-
- lock_ReleaseWrite(&buf_globalLock);
- }
+ bufferPos = sizep->LowPart & (buf_bufferSize - 1);
+ osi_assert(bufferPos != 0);
+ memset(bufp->datap + bufferPos, 0,
+ buf_bufferSize - bufferPos);
+ }
+
+ lock_ReleaseWrite(&buf_globalLock);
+ }
- lock_ReleaseMutex(&scp->mx);
- lock_ReleaseMutex(&bufp->mx);
- if (!didRelease) {
- lock_ObtainWrite(&buf_globalLock);
- nbufp = bufp->fileHashp;
- if (nbufp) nbufp->refCount++;
- buf_LockedRelease(bufp);
- lock_ReleaseWrite(&buf_globalLock);
- }
-
- /* bail out early if we fail */
- if (code) {
- /* at this point, nbufp is held; bufp has already been
- * released.
- */
- if (nbufp) buf_Release(nbufp);
- return code;
- }
- }
-
- /* success */
- return 0;
+ lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&bufp->mx);
+ if (!didRelease) {
+ lock_ObtainWrite(&buf_globalLock);
+ nbufp = bufp->fileHashp;
+ if (nbufp) nbufp->refCount++;
+ buf_LockedRelease(bufp);
+ lock_ReleaseWrite(&buf_globalLock);
+ }
+
+ /* bail out early if we fail */
+ if (code) {
+ /* at this point, nbufp is held; bufp has already been
+ * released.
+ */
+ if (nbufp)
+ buf_Release(nbufp);
+ return code;
+ }
+ }
+
+ /* success */
+ return 0;
}
long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
{
- long code;
- cm_buf_t *bp; /* buffer we're hacking on */
- cm_buf_t *nbp;
- int didRelease;
- long i;
-
- i = BUF_FILEHASH(&scp->fid);
-
- code = 0;
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_fileHashTablepp[i];
- if (bp) bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- for(; bp; bp = nbp) {
- didRelease = 0; /* haven't released this buffer yet */
-
- /* clean buffer synchronously */
- if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- lock_ObtainMutex(&bp->mx);
-
- /* start cleaning the buffer, and wait for it to finish */
- buf_LockedCleanAsync(bp, reqp);
- buf_WaitIO(bp);
- lock_ReleaseMutex(&bp->mx);
-
- code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
- if (code) goto skip;
-
- lock_ObtainWrite(&buf_globalLock);
- /* actually, we only know that buffer is clean if ref
- * count is 1, since we don't have buffer itself locked.
- */
- if (!(bp->flags & CM_BUF_DIRTY)) {
- if (bp->refCount == 1) { /* bp is held above */
- buf_LockedRelease(bp);
- nbp = bp->fileHashp;
- if (nbp) nbp->refCount++;
- didRelease = 1;
- buf_Recycle(bp);
- }
- }
- lock_ReleaseWrite(&buf_globalLock);
-
- (*cm_buf_opsp->Unstabilizep)(scp, userp);
- }
-
-skip:
- if (!didRelease) {
- lock_ObtainWrite(&buf_globalLock);
- if (nbp = bp->fileHashp) nbp->refCount++;
- buf_LockedRelease(bp);
- lock_ReleaseWrite(&buf_globalLock);
- }
- } /* for loop over a bunch of buffers */
-
- /* done */
- return code;
-}
+ long code;
+ cm_buf_t *bp; /* buffer we're hacking on */
+ cm_buf_t *nbp;
+ int didRelease;
+ long i;
+
+ i = BUF_FILEHASH(&scp->fid);
+
+ code = 0;
+ lock_ObtainWrite(&buf_globalLock);
+ bp = buf_fileHashTablepp[i];
+ if (bp) bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ for(; bp; bp = nbp) {
+ didRelease = 0; /* haven't released this buffer yet */
+
+ /* clean buffer synchronously */
+ if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
+ lock_ObtainMutex(&bp->mx);
+
+ /* start cleaning the buffer, and wait for it to finish */
+ buf_LockedCleanAsync(bp, reqp);
+ buf_WaitIO(bp);
+ lock_ReleaseMutex(&bp->mx);
+
+ code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
+ if (code) goto skip;
+
+ lock_ObtainWrite(&buf_globalLock);
+ /* actually, we only know that buffer is clean if ref
+ * count is 1, since we don't have buffer itself locked.
+ */
+ if (!(bp->flags & CM_BUF_DIRTY)) {
+ if (bp->refCount == 1) { /* bp is held above */
+ buf_LockedRelease(bp);
+ nbp = bp->fileHashp;
+ if (nbp) nbp->refCount++;
+ didRelease = 1;
+ buf_Recycle(bp);
+ }
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+
+ (*cm_buf_opsp->Unstabilizep)(scp, userp);
+ }
+
+ skip:
+ if (!didRelease) {
+ lock_ObtainWrite(&buf_globalLock);
+ if (nbp = bp->fileHashp) nbp->refCount++;
+ buf_LockedRelease(bp);
+ lock_ReleaseWrite(&buf_globalLock);
+ }
+ } /* for loop over a bunch of buffers */
+
+ /* done */
+ return code;
+}
long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
{
- long code;
- cm_buf_t *bp; /* buffer we're hacking on */
+ long code;
+ cm_buf_t *bp; /* buffer we're hacking on */
cm_buf_t *nbp; /* next one */
- long i;
+ long i;
- i = BUF_FILEHASH(&scp->fid);
+ i = BUF_FILEHASH(&scp->fid);
- code = 0;
- lock_ObtainWrite(&buf_globalLock);
+ code = 0;
+ lock_ObtainWrite(&buf_globalLock);
bp = buf_fileHashTablepp[i];
if (bp) bp->refCount++;
lock_ReleaseWrite(&buf_globalLock);
- for(; bp; bp = nbp) {
- /* clean buffer synchronously */
- if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- if (userp) {
+ for(; bp; bp = nbp) {
+ /* clean buffer synchronously */
+ if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
+ if (userp) {
cm_HoldUser(userp);
- lock_ObtainMutex(&bp->mx);
- if (bp->userp)
+ lock_ObtainMutex(&bp->mx);
+ if (bp->userp)
cm_ReleaseUser(bp->userp);
bp->userp = userp;
- lock_ReleaseMutex(&bp->mx);
- }
- buf_CleanAsync(bp, reqp);
+ lock_ReleaseMutex(&bp->mx);
+ }
+ buf_CleanAsync(bp, reqp);
buf_CleanWait(bp);
lock_ObtainMutex(&bp->mx);
- if (bp->flags & CM_BUF_ERROR) {
- if (code == 0 || code == -1) code = bp->error;
- if (code == 0) code = -1;
+ if (bp->flags & CM_BUF_ERROR) {
+ if (code == 0 || code == -1)
+ code = bp->error;
+ if (code == 0)
+ code = -1;
}
lock_ReleaseMutex(&bp->mx);
- }
+ }
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
nbp = bp->fileHashp;
if (nbp) nbp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- } /* for loop over a bunch of buffers */
-
+ lock_ReleaseWrite(&buf_globalLock);
+ } /* for loop over a bunch of buffers */
+
/* done */
- return code;
+ return code;
}
/* dump the contents of the buf_hashTablepp. */
char output[1024];
int i;
- if (buf_hashTablepp == NULL)
- return -1;
+ if (buf_hashTablepp == NULL)
+ return -1;
lock_ObtainRead(&buf_globalLock);
*/
void cm_RecordRacingRevoke(cm_fid_t *fidp, long cancelFlags)
{
- cm_racingRevokes_t *rp;
+ cm_racingRevokes_t *rp;
- lock_ObtainWrite(&cm_callbackLock);
+ lock_ObtainWrite(&cm_callbackLock);
osi_Log3(afsd_logp, "RecordRacingRevoke Volume %d Flags %lX activeCalls %d",
fidp->volume, cancelFlags, cm_activeCallbackGrantingCalls);
- if (cm_activeCallbackGrantingCalls > 0) {
- rp = malloc(sizeof(*rp));
- memset(rp, 0, sizeof(*rp));
- osi_QAdd((osi_queue_t **) &cm_racingRevokesp, &rp->q);
- rp->flags |= (cancelFlags & CM_RACINGFLAG_ALL);
- if (fidp) rp->fid = *fidp;
- rp->callbackCount = ++cm_callbackCount;
- }
- lock_ReleaseWrite(&cm_callbackLock);
+ if (cm_activeCallbackGrantingCalls > 0) {
+ rp = malloc(sizeof(*rp));
+ memset(rp, 0, sizeof(*rp));
+ osi_QAdd((osi_queue_t **) &cm_racingRevokesp, &rp->q);
+ rp->flags |= (cancelFlags & CM_RACINGFLAG_ALL);
+ if (fidp) rp->fid = *fidp;
+ rp->callbackCount = ++cm_callbackCount;
+ }
+ lock_ReleaseWrite(&cm_callbackLock);
}
/*
*/
void cm_RevokeCallback(struct rx_call *callp, AFSFid *fidp)
{
- cm_fid_t tfid;
- cm_scache_t *scp;
- long hash;
+ cm_fid_t tfid;
+ cm_scache_t *scp;
+ long hash;
- /* don't bother setting cell, since we won't be checking it (to aid
- * in working with multi-homed servers: we don't know the cell if we
- * don't recognize the IP address).
- */
- tfid.cell = 0;
- tfid.volume = fidp->Volume;
- tfid.vnode = fidp->Vnode;
- tfid.unique = fidp->Unique;
- hash = CM_SCACHE_HASH(&tfid);
+ /* don't bother setting cell, since we won't be checking it (to aid
+ * in working with multi-homed servers: we don't know the cell if we
+ * don't recognize the IP address).
+ */
+ tfid.cell = 0;
+ tfid.volume = fidp->Volume;
+ tfid.vnode = fidp->Vnode;
+ tfid.unique = fidp->Unique;
+ hash = CM_SCACHE_HASH(&tfid);
osi_Log3(afsd_logp, "RevokeCallback vol %d vn %d un %d",
- fidp->Volume, fidp->Vnode, fidp->Unique);
+ fidp->Volume, fidp->Vnode, fidp->Unique);
- /* do this first, so that if we're executing a callback granting call
- * at this moment, we kill it before it can be merged in. Otherwise,
- * it could complete while we're doing the scan below, and get missed
- * by both the scan and by this code.
- */
- cm_RecordRacingRevoke(&tfid, 0);
+ /* do this first, so that if we're executing a callback granting call
+ * at this moment, we kill it before it can be merged in. Otherwise,
+ * it could complete while we're doing the scan below, and get missed
+ * by both the scan and by this code.
+ */
+ cm_RecordRacingRevoke(&tfid, 0);
- lock_ObtainWrite(&cm_scacheLock);
- /* do all in the hash bucket, since we don't know how many we'll find with
- * varying cells.
- */
- for(scp = cm_hashTablep[hash]; scp; scp=scp->nextp) {
- if (scp->fid.volume == tfid.volume &&
- scp->fid.vnode == tfid.vnode &&
- scp->fid.unique == tfid.unique) {
- scp->refCount++;
- lock_ReleaseWrite(&cm_scacheLock);
+ lock_ObtainWrite(&cm_scacheLock);
+ /* do all in the hash bucket, since we don't know how many we'll find with
+ * varying cells.
+ */
+ for (scp = cm_hashTablep[hash]; scp; scp=scp->nextp) {
+ if (scp->fid.volume == tfid.volume &&
+ scp->fid.vnode == tfid.vnode &&
+ scp->fid.unique == tfid.unique) {
+ scp->refCount++;
+ lock_ReleaseWrite(&cm_scacheLock);
osi_Log1(afsd_logp, "Discarding SCache scp %x", scp);
- lock_ObtainMutex(&scp->mx);
- cm_DiscardSCache(scp);
- lock_ReleaseMutex(&scp->mx);
- cm_CallbackNotifyChange(scp);
- lock_ObtainWrite(&cm_scacheLock);
- scp->refCount--;
- }
+ lock_ObtainMutex(&scp->mx);
+ cm_DiscardSCache(scp);
+ lock_ReleaseMutex(&scp->mx);
+ cm_CallbackNotifyChange(scp);
+ lock_ObtainWrite(&cm_scacheLock);
+ scp->refCount--;
}
- lock_ReleaseWrite(&cm_scacheLock);
+ }
+ lock_ReleaseWrite(&cm_scacheLock);
}
/* called to revoke a volume callback, which is typically issued when a volume
*/
void cm_RevokeVolumeCallback(struct rx_call *callp, AFSFid *fidp)
{
- long hash;
- cm_scache_t *scp;
- cm_fid_t tfid;
+ long hash;
+ cm_scache_t *scp;
+ cm_fid_t tfid;
osi_Log1(afsd_logp, "RevokeVolumeCallback %d", fidp->Volume);
- /* do this first, so that if we're executing a callback granting call
- * at this moment, we kill it before it can be merged in. Otherwise,
- * it could complete while we're doing the scan below, and get missed
- * by both the scan and by this code.
- */
- tfid.cell = tfid.vnode = tfid.unique = 0;
- tfid.volume = fidp->Volume;
- cm_RecordRacingRevoke(&tfid, CM_RACINGFLAG_CANCELVOL);
-
-
- lock_ObtainWrite(&cm_scacheLock);
- for(hash = 0; hash < cm_hashTableSize; hash++) {
- for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
- if (scp->fid.volume == fidp->Volume) {
- scp->refCount++;
- lock_ReleaseWrite(&cm_scacheLock);
- lock_ObtainMutex(&scp->mx);
+ /* do this first, so that if we're executing a callback granting call
+ * at this moment, we kill it before it can be merged in. Otherwise,
+ * it could complete while we're doing the scan below, and get missed
+ * by both the scan and by this code.
+ */
+ tfid.cell = tfid.vnode = tfid.unique = 0;
+ tfid.volume = fidp->Volume;
+ cm_RecordRacingRevoke(&tfid, CM_RACINGFLAG_CANCELVOL);
+
+
+ lock_ObtainWrite(&cm_scacheLock);
+ for (hash = 0; hash < cm_hashTableSize; hash++) {
+ for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
+ if (scp->fid.volume == fidp->Volume) {
+ scp->refCount++;
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ObtainMutex(&scp->mx);
osi_Log1(afsd_logp, "Discarding SCache scp %x", scp);
- cm_DiscardSCache(scp);
- lock_ReleaseMutex(&scp->mx);
- cm_CallbackNotifyChange(scp);
- lock_ObtainWrite(&cm_scacheLock);
- scp->refCount--;
- }
- } /* search one hash bucket */
- } /* search all hash buckets */
-
- lock_ReleaseWrite(&cm_scacheLock);
+ cm_DiscardSCache(scp);
+ lock_ReleaseMutex(&scp->mx);
+ cm_CallbackNotifyChange(scp);
+ lock_ObtainWrite(&cm_scacheLock);
+ scp->refCount--;
+ }
+ } /* search one hash bucket */
+ } /* search all hash buckets */
+
+ lock_ReleaseWrite(&cm_scacheLock);
}
/* handle incoming RPC callback breaking message.
*/
SRXAFSCB_CallBack(struct rx_call *callp, AFSCBFids *fidsArrayp, AFSCBs *cbsArrayp)
{
- int i;
- AFSFid *tfidp;
+ int i;
+ AFSFid *tfidp;
osi_Log0(afsd_logp, "SRXAFSCB_CallBack");
- for(i=0; i < (long) fidsArrayp->AFSCBFids_len; i++) {
- tfidp = &fidsArrayp->AFSCBFids_val[i];
+ for (i=0; i < (long) fidsArrayp->AFSCBFids_len; i++) {
+ tfidp = &fidsArrayp->AFSCBFids_val[i];
if (tfidp->Volume == 0)
continue; /* means don't do anything */
- else if (tfidp->Vnode == 0)
- cm_RevokeVolumeCallback(callp, tfidp);
+ else if (tfidp->Vnode == 0)
+ cm_RevokeVolumeCallback(callp, tfidp);
else
cm_RevokeCallback(callp, tfidp);
- }
+ }
- return 0;
+ return 0;
}
/* called with no locks by RPC system when a server indicates that it has never
* are "rare," hopefully this won't be a problem.
*/
lock_ObtainWrite(&cm_scacheLock);
- for(hash = 0; hash < cm_hashTableSize; hash++) {
- for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
- scp->refCount++;
- lock_ReleaseWrite(&cm_scacheLock);
- lock_ObtainMutex(&scp->mx);
- discarded = 0;
- if (scp->cbServerp != NULL) {
- /* we have a callback, now decide if we should clear it */
- if (scp->cbServerp == tsp || tsp == NULL) {
+ for (hash = 0; hash < cm_hashTableSize; hash++) {
+ for (scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
+ scp->refCount++;
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ObtainMutex(&scp->mx);
+ discarded = 0;
+ if (scp->cbServerp != NULL) {
+ /* we have a callback, now decide if we should clear it */
+ if (scp->cbServerp == tsp || tsp == NULL) {
osi_Log1(afsd_logp, "Discarding SCache scp %x", scp);
- cm_DiscardSCache(scp);
- discarded = 1;
- }
- }
- lock_ReleaseMutex(&scp->mx);
- if (discarded)
- cm_CallbackNotifyChange(scp);
- lock_ObtainWrite(&cm_scacheLock);
- scp->refCount--;
- } /* search one hash bucket */
- } /* search all hash buckets */
+ cm_DiscardSCache(scp);
+ discarded = 1;
+ }
+ }
+ lock_ReleaseMutex(&scp->mx);
+ if (discarded)
+ cm_CallbackNotifyChange(scp);
+ lock_ObtainWrite(&cm_scacheLock);
+ scp->refCount--;
+ } /* search one hash bucket */
+ } /* search all hash buckets */
lock_ReleaseWrite(&cm_scacheLock);
/* we're done with the server structure */
- if (tsp) cm_PutServer(tsp);
+ if (tsp)
+ cm_PutServer(tsp);
}
return 0;
SRXAFSCB_Probe(struct rx_call *callp)
{
osi_Log0(afsd_logp, "SRXAFSCB_Probe - not implemented");
- return 0;
+ return 0;
}
/* debug interface: not implemented */
/* debug interface: not implemented */
SRXAFSCB_GetLock(struct rx_call *callp, long index, AFSDBLock *lockp)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_GetLock - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_GetCE(struct rx_call *callp, long index, AFSDBCacheEntry *cep)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_GetCE - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_XStatsVersion(struct rx_call *callp, long *vp)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_XStatsVersion - not implemented");
- *vp = -1;
- return RXGEN_OPCODE;
+ *vp = -1;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_GetXStats(struct rx_call *callp, long cvn, long coln, long *srvp, long *timep,
AFSCB_CollData *datap)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_GetXStats - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_InitCallBackState2(struct rx_call *callp, struct interfaceAddr* addr)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_InitCallBackState2 - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_WhoAreYou(struct rx_call *callp, struct interfaceAddr* addr)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_WhoAreYou - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_InitCallBackState3(struct rx_call *callp, afsUUID* serverUuid)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_InitCallBackState3 - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/* debug interface: not implemented */
SRXAFSCB_ProbeUuid(struct rx_call *callp, afsUUID* clientUuid)
{
- /* XXXX */
+ /* XXXX */
osi_Log0(afsd_logp, "SRXAFSCB_ProbeUuid - not implemented");
- return RXGEN_OPCODE;
+ return RXGEN_OPCODE;
}
/*------------------------------------------------------------------------
/* called by afsd without any locks to initialize this module */
void cm_InitCallback(void)
{
- lock_InitializeRWLock(&cm_callbackLock, "cm_callbackLock");
- cm_activeCallbackGrantingCalls = 0;
+ lock_InitializeRWLock(&cm_callbackLock, "cm_callbackLock");
+ cm_activeCallbackGrantingCalls = 0;
}
/* called with locked scp; tells us whether we've got a callback.
// to be called because cm_GetCallback has some initialization work to do.
// If cm_fakeDirCallback is 2, then it means that the fake directory is in
// good shape and we simply return true, provided no change is detected.
- int fdc, fgc;
+ int fdc, fgc;
if (cm_freelanceEnabled &&
scp->fid.cell==AFS_FAKE_ROOT_CELL_ID && scp->fid.volume==AFS_FAKE_ROOT_VOL_ID) {
*/
void cm_StartCallbackGrantingCall(cm_scache_t *scp, cm_callbackRequest_t *cbrp)
{
- lock_ObtainWrite(&cm_callbackLock);
- cbrp->callbackCount = cm_callbackCount;
- cm_activeCallbackGrantingCalls++;
- cbrp->startTime = osi_Time();
- cbrp->serverp = NULL;
- lock_ReleaseWrite(&cm_callbackLock);
+ lock_ObtainWrite(&cm_callbackLock);
+ cbrp->callbackCount = cm_callbackCount;
+ cm_activeCallbackGrantingCalls++;
+ cbrp->startTime = osi_Time();
+ cbrp->serverp = NULL;
+ lock_ReleaseWrite(&cm_callbackLock);
}
/* Called at the end of a callback-granting call, to remove the callback
* this locking hierarchy.
*/
void cm_EndCallbackGrantingCall(cm_scache_t *scp, cm_callbackRequest_t *cbrp,
- AFSCallBack *cbp, long flags)
+ AFSCallBack *cbp, long flags)
{
- cm_racingRevokes_t *revp; /* where we are */
- cm_racingRevokes_t *nrevp; /* where we'll be next */
- int freeFlag;
+ cm_racingRevokes_t *revp; /* where we are */
+ cm_racingRevokes_t *nrevp; /* where we'll be next */
+ int freeFlag;
cm_server_t * serverp = 0;
- lock_ObtainWrite(&cm_callbackLock);
- if (flags & CM_CALLBACK_MAINTAINCOUNT) {
- osi_assert(cm_activeCallbackGrantingCalls > 0);
- }
- else {
- osi_assert(cm_activeCallbackGrantingCalls-- > 0);
- }
+ lock_ObtainWrite(&cm_callbackLock);
+ if (flags & CM_CALLBACK_MAINTAINCOUNT) {
+ osi_assert(cm_activeCallbackGrantingCalls > 0);
+ }
+ else {
+ osi_assert(cm_activeCallbackGrantingCalls-- > 0);
+ }
if (cm_activeCallbackGrantingCalls == 0)
freeFlag = 1;
else
freeFlag = 0;
- /* record the callback; we'll clear it below if we really lose it */
+ /* record the callback; we'll clear it below if we really lose it */
if (cbrp) {
if (scp) {
if (scp->cbServerp != cbrp->serverp) {
serverp = scp->cbServerp;
}
- scp->cbServerp = cbrp->serverp;
- scp->cbExpires = cbrp->startTime + cbp->ExpirationTime;
+ scp->cbServerp = cbrp->serverp;
+ scp->cbExpires = cbrp->startTime + cbp->ExpirationTime;
} else {
serverp = cbrp->serverp;
}
cbrp->serverp = NULL;
- }
+ }
- /* a callback was actually revoked during our granting call, so
- * run down the list of revoked fids, looking for ours.
- * If activeCallbackGrantingCalls is zero, free the elements, too.
- *
- * May need to go through entire list just to do the freeing.
- */
- for(revp = cm_racingRevokesp; revp; revp = nrevp) {
- nrevp = (cm_racingRevokes_t *) osi_QNext(&revp->q);
- /* if this callback came in later than when we started the
- * callback-granting call, and if this fid is the right fid,
- * then clear the callback.
- */
+ /* a callback was actually revoked during our granting call, so
+ * run down the list of revoked fids, looking for ours.
+ * If activeCallbackGrantingCalls is zero, free the elements, too.
+ *
+ * May need to go through entire list just to do the freeing.
+ */
+ for (revp = cm_racingRevokesp; revp; revp = nrevp) {
+ nrevp = (cm_racingRevokes_t *) osi_QNext(&revp->q);
+ /* if this callback came in later than when we started the
+ * callback-granting call, and if this fid is the right fid,
+ * then clear the callback.
+ */
if (scp && cbrp && cbrp->callbackCount != cm_callbackCount
- && revp->callbackCount > cbrp->callbackCount
+ && revp->callbackCount > cbrp->callbackCount
&& (( scp->fid.volume == revp->fid.volume &&
- scp->fid.vnode == revp->fid.vnode &&
- scp->fid.unique == revp->fid.unique)
- ||
- ((revp->flags & CM_RACINGFLAG_CANCELVOL) &&
- scp->fid.volume == revp->fid.volume)
- ||
- (revp->flags & CM_RACINGFLAG_CANCELALL))) {
- /* this one matches */
- osi_Log4(afsd_logp,
- "Racing revoke scp %x old cbc %d rev cbc %d cur cbc %d",
- scp,
- cbrp->callbackCount, revp->callbackCount,
- cm_callbackCount);
- cm_DiscardSCache(scp);
- /*
- * Since we don't have a callback to preserve, it's
- * OK to drop the lock and re-obtain it.
- */
- lock_ReleaseMutex(&scp->mx);
- cm_CallbackNotifyChange(scp);
- lock_ObtainMutex(&scp->mx);
- }
- if (freeFlag) free(revp);
+ scp->fid.vnode == revp->fid.vnode &&
+ scp->fid.unique == revp->fid.unique)
+ ||
+ ((revp->flags & CM_RACINGFLAG_CANCELVOL) &&
+ scp->fid.volume == revp->fid.volume)
+ ||
+ (revp->flags & CM_RACINGFLAG_CANCELALL))) {
+ /* this one matches */
+ osi_Log4(afsd_logp,
+ "Racing revoke scp %x old cbc %d rev cbc %d cur cbc %d",
+ scp,
+ cbrp->callbackCount, revp->callbackCount,
+ cm_callbackCount);
+ cm_DiscardSCache(scp);
+ /*
+ * Since we don't have a callback to preserve, it's
+ * OK to drop the lock and re-obtain it.
+ */
+ lock_ReleaseMutex(&scp->mx);
+ cm_CallbackNotifyChange(scp);
+ lock_ObtainMutex(&scp->mx);
}
+ if (freeFlag) free(revp);
+ }
- /* if we freed the list, zap the pointer to it */
- if (freeFlag) cm_racingRevokesp = NULL;
+ /* if we freed the list, zap the pointer to it */
+ if (freeFlag) cm_racingRevokesp = NULL;
- lock_ReleaseWrite(&cm_callbackLock);
+ lock_ReleaseWrite(&cm_callbackLock);
if ( serverp ) {
lock_ObtainWrite(&cm_serverLock);
* called with locked scp; returns with same.
*/
long cm_GetCallback(cm_scache_t *scp, struct cm_user *userp,
- struct cm_req *reqp, long flags)
+ struct cm_req *reqp, long flags)
{
- long code;
+ long code;
cm_conn_t *connp;
AFSFetchStatus afsStatus;
AFSVolSync volSync;
osi_Log2(afsd_logp, "GetCallback scp %x flags %lX", scp, flags);
#ifdef AFS_FREELANCE_CLIENT
- // The case where a callback is needed on /afs is handled
- // specially. We need to fetch the status by calling
- // cm_MergeStatus and mark that cm_fakeDirCallback is 2
- if (cm_freelanceEnabled) {
+ // The case where a callback is needed on /afs is handled
+ // specially. We need to fetch the status by calling
+ // cm_MergeStatus and mark that cm_fakeDirCallback is 2
+ if (cm_freelanceEnabled) {
if (scp->fid.cell==AFS_FAKE_ROOT_CELL_ID &&
scp->fid.volume==AFS_FAKE_ROOT_VOL_ID &&
scp->fid.unique==0x1 &&
}
#endif /* AFS_FREELANCE_CLIENT */
- mustCall = (flags & 1);
- cm_AFSFidFromFid(&tfid, &scp->fid);
- while (1) {
- if (!mustCall && cm_HaveCallback(scp)) return 0;
+ mustCall = (flags & 1);
+ cm_AFSFidFromFid(&tfid, &scp->fid);
+ while (1) {
+ if (!mustCall && cm_HaveCallback(scp)) return 0;
/* turn off mustCall, since it has now forced us past the check above */
mustCall = 0;
/* otherwise, we have to make an RPC to get the status */
- sflags = CM_SCACHESYNC_FETCHSTATUS | CM_SCACHESYNC_GETCALLBACK;
+ sflags = CM_SCACHESYNC_FETCHSTATUS | CM_SCACHESYNC_GETCALLBACK;
cm_SyncOp(scp, NULL, NULL, NULL, 0, sflags);
cm_StartCallbackGrantingCall(scp, &cbr);
sfid = scp->fid;
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&scp->mx);
- /* now make the RPC */
- osi_Log1(afsd_logp, "CALL FetchStatus vp %x", (long) scp);
+ /* now make the RPC */
+ osi_Log1(afsd_logp, "CALL FetchStatus vp %x", (long) scp);
do {
- code = cm_Conn(&sfid, userp, reqp, &connp);
+ code = cm_Conn(&sfid, userp, reqp, &connp);
if (code) continue;
-
+
+ lock_ObtainMutex(&connp->mx);
code = RXAFS_FetchStatus(connp->callp, &tfid,
&afsStatus, &callback, &volSync);
-
- } while (cm_Analyze(connp, userp, reqp, &sfid, &volSync, NULL,
+ lock_ReleaseMutex(&connp->mx);
+ } while (cm_Analyze(connp, userp, reqp, &sfid, &volSync, NULL,
&cbr, code));
code = cm_MapRPCError(code, reqp);
- osi_Log0(afsd_logp, "CALL FetchStatus DONE");
+ osi_Log0(afsd_logp, "CALL FetchStatus DONE");
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
cm_SyncOpDone(scp, NULL, sflags);
- if (code == 0) {
+ if (code == 0) {
cm_EndCallbackGrantingCall(scp, &cbr, &callback, 0);
cm_MergeStatus(scp, &afsStatus, &volSync, userp, 0);
- }
+ }
else
cm_EndCallbackGrantingCall(NULL, &cbr, NULL, 0);
now = osi_Time();
lock_ObtainWrite(&cm_scacheLock);
- for(i=0; i<cm_hashTableSize; i++) {
- for(scp = cm_hashTablep[i]; scp; scp=scp->nextp) {
+ for (i=0; i<cm_hashTableSize; i++) {
+ for (scp = cm_hashTablep[i]; scp; scp=scp->nextp) {
scp->refCount++;
lock_ReleaseWrite(&cm_scacheLock);
if (scp->cbExpires > 0 && (scp->cbServerp == NULL || now > scp->cbExpires)) {
/* called with a held server to GC all bad connections hanging off of the server */
void cm_GCConnections(cm_server_t *serverp)
{
- cm_conn_t *tcp;
+ cm_conn_t *tcp;
cm_conn_t **lcpp;
cm_user_t *userp;
- lock_ObtainWrite(&cm_connLock);
- lcpp = &serverp->connsp;
- for(tcp = *lcpp; tcp; tcp = *lcpp) {
- userp = tcp->userp;
- if (userp && tcp->refCount == 0 && (userp->vcRefs == 0)) {
- /* do the deletion of this guy */
+ lock_ObtainWrite(&cm_connLock);
+ lcpp = &serverp->connsp;
+ for (tcp = *lcpp; tcp; tcp = *lcpp) {
+ userp = tcp->userp;
+ if (userp && tcp->refCount == 0 && (userp->vcRefs == 0)) {
+ /* do the deletion of this guy */
cm_PutServer(tcp->serverp);
cm_ReleaseUser(userp);
*lcpp = tcp->nextp;
- rx_DestroyConnection(tcp->callp);
+ rx_DestroyConnection(tcp->callp);
lock_FinalizeMutex(&tcp->mx);
free(tcp);
}
else {
- /* just advance to the next */
+ /* just advance to the next */
lcpp = &tcp->nextp;
}
}
- lock_ReleaseWrite(&cm_connLock);
+ lock_ReleaseWrite(&cm_connLock);
}
static void cm_NewRXConnection(cm_conn_t *tcp, cm_ucell_t *ucellp,
int serviceID;
int secIndex;
struct rx_securityClass *secObjp;
- afs_int32 level;
+ afs_int32 level;
- if (serverp->type == CM_SERVER_VLDB) {
- port = htons(7003);
+ if (serverp->type == CM_SERVER_VLDB) {
+ port = htons(7003);
serviceID = 52;
}
else {
- osi_assert(serverp->type == CM_SERVER_FILE);
+ osi_assert(serverp->type == CM_SERVER_FILE);
port = htons(7000);
serviceID = 1;
}
- if (ucellp->flags & CM_UCELLFLAG_RXKAD) {
- secIndex = 2;
- if (cryptall) {
- level = rxkad_crypt;
- tcp->cryptlevel = rxkad_crypt;
- } else {
- level = rxkad_clear;
- }
+ if (ucellp->flags & CM_UCELLFLAG_RXKAD) {
+ secIndex = 2;
+ if (cryptall) {
+ level = tcp->cryptlevel = rxkad_crypt;
+ } else {
+ level = tcp->cryptlevel = rxkad_clear;
+ }
secObjp = rxkad_NewClientSecurityObject(level,
&ucellp->sessionKey, ucellp->kvno,
ucellp->ticketLen, ucellp->ticketp);
secIndex = 0;
secObjp = rxnull_NewClientSecurityObject();
}
- osi_assert(secObjp != NULL);
+ osi_assert(secObjp != NULL);
tcp->callp = rx_NewConnection(serverp->addr.sin_addr.s_addr,
port,
serviceID,
secObjp,
secIndex);
- rx_SetConnDeadTime(tcp->callp, ConnDeadtimeout);
- rx_SetConnHardDeadTime(tcp->callp, HardDeadtimeout);
- tcp->ucgen = ucellp->gen;
+ rx_SetConnDeadTime(tcp->callp, ConnDeadtimeout);
+ rx_SetConnHardDeadTime(tcp->callp, HardDeadtimeout);
+ tcp->ucgen = ucellp->gen;
if (secObjp)
rxs_Release(secObjp); /* Decrement the initial refCount */
}
long cm_ConnByServer(cm_server_t *serverp, cm_user_t *userp, cm_conn_t **connpp)
{
- cm_conn_t *tcp;
+ cm_conn_t *tcp;
cm_ucell_t *ucellp;
- lock_ObtainMutex(&userp->mx);
- lock_ObtainWrite(&cm_connLock);
- for(tcp = serverp->connsp; tcp; tcp=tcp->nextp) {
+ lock_ObtainMutex(&userp->mx);
+ lock_ObtainWrite(&cm_connLock);
+ for (tcp = serverp->connsp; tcp; tcp=tcp->nextp) {
if (tcp->userp == userp)
break;
}
- /* find ucell structure */
+ /* find ucell structure */
ucellp = cm_GetUCell(userp, serverp->cellp);
- if (!tcp) {
+ if (!tcp) {
cm_GetServer(serverp);
- tcp = malloc(sizeof(*tcp));
+ tcp = malloc(sizeof(*tcp));
memset(tcp, 0, sizeof(*tcp));
tcp->nextp = serverp->connsp;
serverp->connsp = tcp;
cm_HoldUser(userp);
tcp->userp = userp;
lock_InitializeMutex(&tcp->mx, "cm_conn_t mutex");
+ lock_ObtainMutex(&tcp->mx);
tcp->serverp = serverp;
- tcp->cryptlevel = rxkad_clear;
- cm_NewRXConnection(tcp, ucellp, serverp);
- tcp->refCount = 1;
- }
- else {
- if ((tcp->ucgen < ucellp->gen) || (tcp->cryptlevel != cryptall))
- {
- rx_DestroyConnection(tcp->callp);
- cm_NewRXConnection(tcp, ucellp, serverp);
- }
+ tcp->cryptlevel = rxkad_clear;
+ cm_NewRXConnection(tcp, ucellp, serverp);
+ tcp->refCount = 1;
+ lock_ReleaseMutex(&tcp->mx);
+ } else {
+ if ((tcp->ucgen < ucellp->gen) ||
+ (tcp->cryptlevel != (cryptall ? rxkad_crypt : rxkad_clear)))
+ {
+ lock_ObtainMutex(&tcp->mx);
+ rx_DestroyConnection(tcp->callp);
+ cm_NewRXConnection(tcp, ucellp, serverp);
+ lock_ReleaseMutex(&tcp->mx);
+ }
tcp->refCount++;
- }
- lock_ReleaseWrite(&cm_connLock);
+ }
+ lock_ReleaseWrite(&cm_connLock);
lock_ReleaseMutex(&userp->mx);
- /* return this pointer to our caller */
+ /* return this pointer to our caller */
osi_Log1(afsd_logp, "cm_ConnByServer returning conn 0x%x", (long) tcp);
- *connpp = tcp;
+ *connpp = tcp;
return 0;
}
* or when holding or releasing a vnode pointer.
*/
long cm_BufWrite(void *vfidp, osi_hyper_t *offsetp, long length, long flags,
- cm_user_t *userp, cm_req_t *reqp)
+ cm_user_t *userp, cm_req_t *reqp)
{
- /* store the data back from this buffer; the buffer is locked and held,
- * but the vnode involved isn't locked, yet. It is held by its
- * reference from the buffer, which won't change until the buffer is
- * released by our caller. Thus, we don't have to worry about holding
- * bufp->scp.
- */
- long code;
- cm_fid_t *fidp = vfidp;
+ /* store the data back from this buffer; the buffer is locked and held,
+ * but the vnode involved isn't locked, yet. It is held by its
+ * reference from the buffer, which won't change until the buffer is
+ * released by our caller. Thus, we don't have to worry about holding
+ * bufp->scp.
+ */
+ long code;
+ cm_fid_t *fidp = vfidp;
cm_scache_t *scp;
long nbytes;
long temp;
* drops lots of locks, and may indeed return a properly initialized
* buffer, although more likely it will just return a new, empty, buffer.
*/
- scp = cm_FindSCache(fidp);
- if (scp == NULL)
- return CM_ERROR_NOSUCHFILE; /* shouldn't happen */
+ scp = cm_FindSCache(fidp);
+ if (scp == NULL)
+ return CM_ERROR_NOSUCHFILE; /* shouldn't happen */
- cm_AFSFidFromFid(&tfid, fidp);
+ cm_AFSFidFromFid(&tfid, fidp);
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
if (code) {
- osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseSCache(scp);
+ osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseSCache(scp);
return code;
}
- if (biod.length == 0) {
- osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseBIOD(&biod, 1); /* should be a NOOP */
- cm_ReleaseSCache(scp);
+ if (biod.length == 0) {
+ osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseBIOD(&biod, 1); /* should be a NOOP */
+ cm_ReleaseSCache(scp);
return 0;
- }
+ }
- /* Serialize StoreData RPC's; for rationale see cm_scache.c */
- (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
+ /* Serialize StoreData RPC's; for rationale see cm_scache.c */
+ (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
- /* prepare the output status for the store */
- scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
+ /* prepare the output status for the store */
+ scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
cm_StatusFromAttr(&inStatus, scp, NULL);
truncPos = scp->length.LowPart;
if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
truncPos = scp->truncPos.LowPart;
scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
- /* compute how many bytes to write from this buffer */
+ /* compute how many bytes to write from this buffer */
thyper = LargeIntegerSubtract(scp->length, biod.offset);
if (LargeIntegerLessThanZero(thyper)) {
- /* entire buffer is past EOF */
- nbytes = 0;
+ /* entire buffer is past EOF */
+ nbytes = 0;
}
else {
- /* otherwise write out part of buffer before EOF, but not
+ /* otherwise write out part of buffer before EOF, but not
* more than bufferSize bytes.
*/
- nbytes = thyper.LowPart;
+ nbytes = thyper.LowPart;
if (nbytes > biod.length)
nbytes = biod.length;
}
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&scp->mx);
/* now we're ready to do the store operation */
do {
- code = cm_Conn(&scp->fid, userp, reqp, &connp);
+ code = cm_Conn(&scp->fid, userp, reqp, &connp);
if (code)
continue;
- callp = rx_NewCall(connp->callp);
+ callp = rx_NewCall(connp->callp);
- osi_Log3(afsd_logp, "CALL StoreData vp %x, off 0x%x, size 0x%x",
+ osi_Log3(afsd_logp, "CALL StoreData vp %x, off 0x%x, size 0x%x",
(long) scp, biod.offset.LowPart, nbytes);
+ lock_ObtainMutex(&connp->mx);
code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
biod.offset.LowPart, nbytes, truncPos);
- if (code == 0) {
+ if (code == 0) {
/* write the data from the the list of buffers */
qdp = NULL;
- while(nbytes > 0) {
- if (qdp == NULL)
- qdp = biod.bufListEndp;
- else
- qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
- osi_assert(qdp != NULL);
+ while(nbytes > 0) {
+ if (qdp == NULL)
+ qdp = biod.bufListEndp;
+ else
+ qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
+ osi_assert(qdp != NULL);
bufp = osi_GetQData(qdp);
bufferp = bufp->datap;
wbytes = nbytes;
if (temp != wbytes) {
osi_Log2(afsd_logp, "rx_Write failed %d != %d",temp,wbytes);
code = -1;
- break;
- } else {
+ break;
+ } else {
osi_Log1(afsd_logp, "rx_Write succeeded %d",temp);
- }
+ }
nbytes -= wbytes;
} /* while more bytes to write */
- } /* if RPC started successfully */
+ } /* if RPC started successfully */
else {
osi_Log1(afsd_logp, "StartRXAFS_StoreData failed (%lX)",code);
}
- if (code == 0) {
- code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
+ if (code == 0) {
+ code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
if (code)
osi_Log1(afsd_logp, "EndRXAFS_StoreData failed (%lX)",code);
}
code = rx_EndCall(callp, code);
+ lock_ReleaseMutex(&connp->mx);
+
osi_Log0(afsd_logp, "CALL StoreData DONE");
- } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
+ } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
code = cm_MapRPCError(code, reqp);
/* now, clean up our state */
lock_ObtainMutex(&scp->mx);
- cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+ cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
- if (code == 0) {
- /* now, here's something a little tricky: in AFS 3, a dirty
- * length can't be directly stored, instead, a dirty chunk is
- * stored that sets the file's size (by writing and by using
- * the truncate-first option in the store call).
- *
+ if (code == 0) {
+ /* now, here's something a little tricky: in AFS 3, a dirty
+ * length can't be directly stored, instead, a dirty chunk is
+ * stored that sets the file's size (by writing and by using
+ * the truncate-first option in the store call).
+ *
* At this point, we've just finished a store, and so the trunc
- * pos field is clean. If the file's size at the server is at
- * least as big as we think it should be, then we turn off the
- * length dirty bit, since all the other dirty buffers must
- * precede this one in the file.
+ * pos field is clean. If the file's size at the server is at
+ * least as big as we think it should be, then we turn off the
+ * length dirty bit, since all the other dirty buffers must
+ * precede this one in the file.
*
* The file's desired size shouldn't be smaller than what's
- * stored at the server now, since we just did the trunc pos
- * store.
+ * stored at the server now, since we just did the trunc pos
+ * store.
*
* We have to turn off the length dirty bit as soon as we can,
- * so that we see updates made by other machines.
+ * so that we see updates made by other machines.
*/
- if (outStatus.Length >= scp->length.LowPart)
+ if (outStatus.Length >= scp->length.LowPart)
scp->mask &= ~CM_SCACHEMASK_LENGTH;
- cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
- } else {
- if (code == CM_ERROR_SPACE)
- scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
- else if (code == CM_ERROR_QUOTA)
- scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
- }
+ cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
+ } else {
+ if (code == CM_ERROR_SPACE)
+ scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
+ else if (code == CM_ERROR_QUOTA)
+ scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
+ }
lock_ReleaseMutex(&scp->mx);
cm_ReleaseBIOD(&biod, 1);
- cm_ReleaseSCache(scp);
+ cm_ReleaseSCache(scp);
return code;
}
AFSStoreStatus inStatus;
AFSVolSync volSync;
AFSFid tfid;
- long code;
- long truncPos;
- cm_conn_t *connp;
+ long code;
+ long truncPos;
+ cm_conn_t *connp;
struct rx_call *callp;
- /* Serialize StoreData RPC's; for rationale see cm_scache.c */
- (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
+ /* Serialize StoreData RPC's; for rationale see cm_scache.c */
+ (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
CM_SCACHESYNC_STOREDATA_EXCL);
- /* prepare the output status for the store */
- inStatus.Mask = AFS_SETMODTIME;
- inStatus.ClientModTime = scp->clientModTime;
- scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
+ /* prepare the output status for the store */
+ inStatus.Mask = AFS_SETMODTIME;
+ inStatus.ClientModTime = scp->clientModTime;
+ scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
- /* calculate truncation position */
+ /* calculate truncation position */
truncPos = scp->length.LowPart;
if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
&& scp->truncPos.LowPart < (unsigned long) truncPos)
truncPos = scp->truncPos.LowPart;
- scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
-
- lock_ReleaseMutex(&scp->mx);
+ scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
- cm_AFSFidFromFid(&tfid, &scp->fid);
+ lock_ReleaseMutex(&scp->mx);
+
+ cm_AFSFidFromFid(&tfid, &scp->fid);
/* now we're ready to do the store operation */
do {
- code = cm_Conn(&scp->fid, userp, reqp, &connp);
+ code = cm_Conn(&scp->fid, userp, reqp, &connp);
if (code)
continue;
- callp = rx_NewCall(connp->callp);
+ callp = rx_NewCall(connp->callp);
+ lock_ObtainMutex(&connp->mx);
code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
0, 0, truncPos);
- if (code == 0)
- code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
+ if (code == 0)
+ code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
code = rx_EndCall(callp, code);
- } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
+
+ lock_ReleaseMutex(&connp->mx);
+
+ } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
code = cm_MapRPCError(code, reqp);
/* now, clean up our state */
lock_ObtainMutex(&scp->mx);
- cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+ cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
- if (code == 0) {
- /*
- * For explanation of handling of CM_SCACHEMASK_LENGTH,
- * see cm_BufWrite().
- */
- if (outStatus.Length >= scp->length.LowPart)
+ if (code == 0) {
+ /*
+ * For explanation of handling of CM_SCACHEMASK_LENGTH,
+ * see cm_BufWrite().
+ */
+ if (outStatus.Length >= scp->length.LowPart)
scp->mask &= ~CM_SCACHEMASK_LENGTH;
- cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
- }
+ cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
+ }
- return code;
+ return code;
}
long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
{
- *bytesReadp = buf_bufferSize;
+ *bytesReadp = buf_bufferSize;
- /* now return a code that means that I/O is done */
+ /* now return a code that means that I/O is done */
return 0;
}
*/
long cm_BufStabilize(void *parmp, cm_user_t *userp, cm_req_t *reqp)
{
- cm_scache_t *scp;
+ cm_scache_t *scp;
long code;
scp = parmp;
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
code = cm_SyncOp(scp, NULL, userp, reqp, 0,
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
- if (code) {
- lock_ReleaseMutex(&scp->mx);
+ if (code) {
+ lock_ReleaseMutex(&scp->mx);
return code;
- }
+ }
return 0;
}
/* undoes the work that cm_BufStabilize does: releases lock so things can change again */
long cm_BufUnstabilize(void *parmp, cm_user_t *userp)
{
- cm_scache_t *scp;
+ cm_scache_t *scp;
scp = parmp;
lock_ReleaseMutex(&scp->mx);
- /* always succeeds */
+ /* always succeeds */
return 0;
}
cm_buf_ops_t cm_bufOps = {
- cm_BufWrite,
+ cm_BufWrite,
cm_BufRead,
cm_BufStabilize,
cm_BufUnstabilize
int cm_InitDCache(long chunkSize, long nbuffers)
{
- lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
- if (nbuffers)
+ lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
+ if (nbuffers)
buf_nbuffers = nbuffers;
- return buf_Init(&cm_bufOps);
+ return buf_Init(&cm_bufOps);
}
/* check to see if we have an up-to-date buffer. The buffer must have
*/
int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
{
- int code;
- if (!cm_HaveCallback(scp))
- return 0;
- if ((bufp->cmFlags
- & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
- == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
- return 1;
- if (bufp->dataVersion == scp->dataVersion)
- return 1;
- if (!isBufLocked) {
- code = lock_TryMutex(&bufp->mx);
+ int code;
+ if (!cm_HaveCallback(scp))
+ return 0;
+ if ((bufp->cmFlags
+ & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
+ == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
+ return 1;
+ if (bufp->dataVersion == scp->dataVersion)
+ return 1;
+ if (!isBufLocked) {
+ code = lock_TryMutex(&bufp->mx);
if (code == 0) {
- /* don't have the lock, and can't lock it, then
+ /* don't have the lock, and can't lock it, then
* return failure.
*/
return 0;
}
}
- /* remember dirty flag for later */
- code = bufp->flags & CM_BUF_DIRTY;
+ /* remember dirty flag for later */
+ code = bufp->flags & CM_BUF_DIRTY;
- /* release lock if we obtained it here */
- if (!isBufLocked)
+ /* release lock if we obtained it here */
+ if (!isBufLocked)
lock_ReleaseMutex(&bufp->mx);
- /* if buffer was dirty, buffer is acceptable for use */
- if (code)
- return 1;
- else
- return 0;
+ /* if buffer was dirty, buffer is acceptable for use */
+ if (code)
+ return 1;
+ else
+ return 0;
}
/* used when deciding whether to do a prefetch or not */
long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, long length,
- cm_user_t *up, cm_req_t *reqp, osi_hyper_t *realBasep)
+ cm_user_t *up, cm_req_t *reqp, osi_hyper_t *realBasep)
{
- osi_hyper_t toffset;
+ osi_hyper_t toffset;
osi_hyper_t tbase;
long code;
cm_buf_t *bp;
/* now scan all buffers in the range, looking for any that look like
* they need work.
*/
- tbase = *startBasep;
- stop = 0;
- lock_ObtainMutex(&scp->mx);
+ tbase = *startBasep;
+ stop = 0;
+ lock_ObtainMutex(&scp->mx);
while(length > 0) {
- /* get callback so we can do a meaningful dataVersion comparison */
+ /* get callback so we can do a meaningful dataVersion comparison */
code = cm_SyncOp(scp, NULL, up, reqp, 0,
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
- if (code) {
- scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
- lock_ReleaseMutex(&scp->mx);
+ if (code) {
+ scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
+ lock_ReleaseMutex(&scp->mx);
return code;
}
if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
- /* we're past the end of file */
+ /* we're past the end of file */
break;
}
- bp = buf_Find(scp, &tbase);
- /* We cheat slightly by not locking the bp mutex. */
+ bp = buf_Find(scp, &tbase);
+ /* We cheat slightly by not locking the bp mutex. */
if (bp) {
if ((bp->cmFlags
- & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
+ & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
&& bp->dataVersion != scp->dataVersion)
stop = 1;
buf_Release(bp);
- }
+ }
else
stop = 1;
- /* if this buffer is essentially guaranteed to require a fetch,
+ /* if this buffer is essentially guaranteed to require a fetch,
* break out here and return this position.
*/
if (stop)
* particular buffer in the range that definitely needs to be fetched.
*/
if (stop == 0) {
- /* return non-zero code since realBasep won't be valid */
- scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
- code = -1;
- }
+ /* return non-zero code since realBasep won't be valid */
+ scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
+ code = -1;
+ }
else {
- /* successfully found a page that will need fetching */
- *realBasep = tbase;
+ /* successfully found a page that will need fetching */
+ *realBasep = tbase;
code = 0;
}
lock_ReleaseMutex(&scp->mx);
}
void cm_BkgStore(cm_scache_t *scp, long p1, long p2, long p3, long p4,
- cm_user_t *userp)
+ cm_user_t *userp)
{
- osi_hyper_t toffset;
+ osi_hyper_t toffset;
long length;
- cm_req_t req;
+ cm_req_t req;
- cm_InitReq(&req);
- req.flags |= CM_REQ_NORETRY;
+ cm_InitReq(&req);
+ req.flags |= CM_REQ_NORETRY;
toffset.LowPart = p1;
toffset.HighPart = p2;
length = p3;
- osi_Log2(afsd_logp, "Starting BKG store vp 0x%x, base 0x%x", scp, p1);
+ osi_Log2(afsd_logp, "Starting BKG store vp 0x%x, base 0x%x", scp, p1);
- cm_BufWrite(&scp->fid, &toffset, length, /* flags */ 0, userp, &req);
+ cm_BufWrite(&scp->fid, &toffset, length, /* flags */ 0, userp, &req);
- lock_ObtainMutex(&scp->mx);
- cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
+ lock_ObtainMutex(&scp->mx);
+ cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
lock_ReleaseMutex(&scp->mx);
}
void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base)
{
- osi_hyper_t thyper;
-
- if (code == 0) {
- thyper.LowPart = cm_chunkSize;
- thyper.HighPart = 0;
- thyper = LargeIntegerAdd(*base, thyper);
- thyper.LowPart &= (-cm_chunkSize);
- if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
+ osi_hyper_t thyper;
+
+ if (code == 0) {
+ thyper.LowPart = cm_chunkSize;
+ thyper.HighPart = 0;
+ thyper = LargeIntegerAdd(*base, thyper);
+ thyper.LowPart &= (-cm_chunkSize);
+ if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
scp->prefetch.base = *base;
- if (LargeIntegerGreaterThan(thyper, scp->prefetch.end))
+ if (LargeIntegerGreaterThan(thyper, scp->prefetch.end))
scp->prefetch.end = thyper;
- }
- scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
+ }
+ scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
}
/* do the prefetch */
void cm_BkgPrefetch(cm_scache_t *scp, long p1, long p2, long p3, long p4,
- cm_user_t *userp)
+ cm_user_t *userp)
{
- long length;
+ long length;
osi_hyper_t base;
long code;
cm_buf_t *bp;
- int cpff = 0; /* cleared prefetch flag */
- cm_req_t req;
+ int cpff = 0; /* cleared prefetch flag */
+ cm_req_t req;
- cm_InitReq(&req);
- req.flags |= CM_REQ_NORETRY;
+ cm_InitReq(&req);
+ req.flags |= CM_REQ_NORETRY;
- base.LowPart = p1;
+ base.LowPart = p1;
base.HighPart = p2;
length = p3;
- osi_Log2(afsd_logp, "Starting BKG prefetch vp 0x%x, base 0x%x", scp, p1);
+ osi_Log2(afsd_logp, "Starting BKG prefetch vp 0x%x, base 0x%x", scp, p1);
code = buf_Get(scp, &base, &bp);
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
if (code || (bp->cmFlags & CM_BUF_CMFETCHING)) {
- scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
- lock_ReleaseMutex(&scp->mx);
- return;
- }
+ scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
+ lock_ReleaseMutex(&scp->mx);
+ return;
+ }
code = cm_GetBuffer(scp, bp, &cpff, userp, &req);
- if (!cpff)
+ if (!cpff)
cm_ClearPrefetchFlag(code, scp, &base);
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&scp->mx);
buf_Release(bp);
return;
}
* do a prefetch.
*/
void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp,
- cm_user_t *userp, cm_req_t *reqp)
+ cm_user_t *userp, cm_req_t *reqp)
{
- long code;
+ long code;
osi_hyper_t realBase;
osi_hyper_t readBase;
readBase = *offsetp;
- /* round up to chunk boundary */
- readBase.LowPart += (cm_chunkSize-1);
- readBase.LowPart &= (-cm_chunkSize);
+ /* round up to chunk boundary */
+ readBase.LowPart += (cm_chunkSize-1);
+ readBase.LowPart &= (-cm_chunkSize);
- lock_ObtainMutex(&scp->mx);
- if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
+ lock_ObtainMutex(&scp->mx);
+ if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
|| LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&scp->mx);
return;
- }
- scp->flags |= CM_SCACHEFLAG_PREFETCHING;
+ }
+ scp->flags |= CM_SCACHEFLAG_PREFETCHING;
- /* start the scan at the latter of the end of this read or
+ /* start the scan at the latter of the end of this read or
* the end of the last fetched region.
*/
- if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
+ if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
readBase = scp->prefetch.end;
lock_ReleaseMutex(&scp->mx);
code = cm_CheckFetchRange(scp, &readBase, cm_chunkSize, userp, reqp,
- &realBase);
- if (code)
+ &realBase);
+ if (code)
return; /* can't find something to prefetch */
osi_Log2(afsd_logp, "BKG Prefetch request vp 0x%x, base 0x%x",
* is being written out.
*/
long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
- cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
+ cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
{
cm_buf_t *bufp;
osi_queueData_t *qdp;
long code;
long flags; /* flags to cm_SyncOp */
- /* clear things out */
- biop->scp = scp; /* don't hold */
+ /* clear things out */
+ biop->scp = scp; /* don't hold */
biop->offset = *inOffsetp;
biop->length = 0;
biop->bufListp = NULL;
biop->bufListEndp = NULL;
- biop->reserved = 0;
+ biop->reserved = 0;
- /* reserve a chunk's worth of buffers */
- lock_ReleaseMutex(&scp->mx);
- buf_ReserveBuffers(cm_chunkSize / buf_bufferSize);
- lock_ObtainMutex(&scp->mx);
+ /* reserve a chunk's worth of buffers */
+ lock_ReleaseMutex(&scp->mx);
+ buf_ReserveBuffers(cm_chunkSize / buf_bufferSize);
+ lock_ObtainMutex(&scp->mx);
bufp = NULL;
- for(temp = 0; temp < inSize; temp += buf_bufferSize, bufp = NULL) {
- thyper.HighPart = 0;
- thyper.LowPart = temp;
+ for (temp = 0; temp < inSize; temp += buf_bufferSize, bufp = NULL) {
+ thyper.HighPart = 0;
+ thyper.LowPart = temp;
tbase = LargeIntegerAdd(*inOffsetp, thyper);
bufp = buf_Find(scp, &tbase);
if (bufp) {
- /* get buffer mutex and scp mutex safely */
- lock_ReleaseMutex(&scp->mx);
- lock_ObtainMutex(&bufp->mx);
- lock_ObtainMutex(&scp->mx);
-
- flags = CM_SCACHESYNC_NEEDCALLBACK
- | CM_SCACHESYNC_GETSTATUS
- | CM_SCACHESYNC_STOREDATA
- | CM_SCACHESYNC_BUFLOCKED;
- code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
+ /* get buffer mutex and scp mutex safely */
+ lock_ReleaseMutex(&scp->mx);
+ lock_ObtainMutex(&bufp->mx);
+ lock_ObtainMutex(&scp->mx);
+
+ flags = CM_SCACHESYNC_NEEDCALLBACK
+ | CM_SCACHESYNC_GETSTATUS
+ | CM_SCACHESYNC_STOREDATA
+ | CM_SCACHESYNC_BUFLOCKED;
+ code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
if (code) {
- lock_ReleaseMutex(&bufp->mx);
+ lock_ReleaseMutex(&bufp->mx);
buf_Release(bufp);
- buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
+ buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
return code;
- }
+ }
- /* if the buffer is dirty, we're done */
+ /* if the buffer is dirty, we're done */
if (bufp->flags & CM_BUF_DIRTY) {
osi_assertx(!(bufp->flags & CM_BUF_WRITING),
"WRITING w/o CMSTORING in SetupStoreBIOD");
- bufp->flags |= CM_BUF_WRITING;
- break;
+ bufp->flags |= CM_BUF_WRITING;
+ break;
}
- /* this buffer is clean, so there's no reason to process it */
- cm_SyncOpDone(scp, bufp, flags);
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
- }
+ /* this buffer is clean, so there's no reason to process it */
+ cm_SyncOpDone(scp, bufp, flags);
+ lock_ReleaseMutex(&bufp->mx);
+ buf_Release(bufp);
+ }
}
- biop->reserved = 1;
+ biop->reserved = 1;
/* if we get here, if bufp is null, we didn't find any dirty buffers
- * that weren't already being stored back, so we just quit now.
+ * that weren't already being stored back, so we just quit now.
*/
- if (!bufp) {
- return 0;
- }
+ if (!bufp) {
+ return 0;
+ }
- /* don't need buffer mutex any more */
- lock_ReleaseMutex(&bufp->mx);
+ /* don't need buffer mutex any more */
+ lock_ReleaseMutex(&bufp->mx);
- /* put this element in the list */
+ /* put this element in the list */
qdp = osi_QDAlloc();
osi_SetQData(qdp, bufp);
- /* don't have to hold bufp, since held by buf_Find above */
+ /* don't have to hold bufp, since held by buf_Find above */
osi_QAddH((osi_queue_t **) &biop->bufListp,
(osi_queue_t **) &biop->bufListEndp,
&qdp->q);
firstModOffset = bufp->offset;
biop->offset = firstModOffset;
- /* compute the window surrounding *inOffsetp of size cm_chunkSize */
- scanStart = *inOffsetp;
+ /* compute the window surrounding *inOffsetp of size cm_chunkSize */
+ scanStart = *inOffsetp;
scanStart.LowPart &= (-cm_chunkSize);
- thyper.LowPart = cm_chunkSize;
+ thyper.LowPart = cm_chunkSize;
thyper.HighPart = 0;
- scanEnd = LargeIntegerAdd(scanStart, thyper);
+ scanEnd = LargeIntegerAdd(scanStart, thyper);
- flags = CM_SCACHESYNC_NEEDCALLBACK
- | CM_SCACHESYNC_GETSTATUS
+ flags = CM_SCACHESYNC_NEEDCALLBACK
+ | CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_STOREDATA
| CM_SCACHESYNC_BUFLOCKED
| CM_SCACHESYNC_NOWAIT;
- /* start by looking backwards until scanStart */
- thyper.HighPart = 0; /* hyper version of buf_bufferSize */
+ /* start by looking backwards until scanStart */
+ thyper.HighPart = 0; /* hyper version of buf_bufferSize */
thyper.LowPart = buf_bufferSize;
- tbase = LargeIntegerSubtract(firstModOffset, thyper);
+ tbase = LargeIntegerSubtract(firstModOffset, thyper);
while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
/* see if we can find the buffer */
- bufp = buf_Find(scp, &tbase);
+ bufp = buf_Find(scp, &tbase);
if (!bufp)
break;
- /* try to lock it, and quit if we can't (simplifies locking) */
+ /* try to lock it, and quit if we can't (simplifies locking) */
code = lock_TryMutex(&bufp->mx);
if (code == 0) {
- buf_Release(bufp);
+ buf_Release(bufp);
break;
}
code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
if (code) {
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
+ lock_ReleaseMutex(&bufp->mx);
+ buf_Release(bufp);
break;
}
- if (!(bufp->flags & CM_BUF_DIRTY)) {
- /* buffer is clean, so we shouldn't add it */
- cm_SyncOpDone(scp, bufp, flags);
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
+ if (!(bufp->flags & CM_BUF_DIRTY)) {
+ /* buffer is clean, so we shouldn't add it */
+ cm_SyncOpDone(scp, bufp, flags);
+ lock_ReleaseMutex(&bufp->mx);
+ buf_Release(bufp);
break;
}
- /* don't need buffer mutex any more */
- lock_ReleaseMutex(&bufp->mx);
+ /* don't need buffer mutex any more */
+ lock_ReleaseMutex(&bufp->mx);
/* we have a dirty buffer ready for storing. Add it to the tail
* of the list, since it immediately precedes all of the disk
* addresses we've already collected.
*/
- qdp = osi_QDAlloc();
+ qdp = osi_QDAlloc();
osi_SetQData(qdp, bufp);
/* no buf_hold necessary, since we have it held from buf_Find */
osi_QAddT((osi_queue_t **) &biop->bufListp,
(osi_queue_t **) &biop->bufListEndp,
&qdp->q);
- /* update biod info describing the transfer */
+ /* update biod info describing the transfer */
biop->offset = LargeIntegerSubtract(biop->offset, thyper);
biop->length += buf_bufferSize;
tbase = LargeIntegerSubtract(tbase, thyper);
} /* while loop looking for pages preceding the one we found */
- /* now, find later dirty, contiguous pages, and add them to the list */
- thyper.HighPart = 0; /* hyper version of buf_bufferSize */
+ /* now, find later dirty, contiguous pages, and add them to the list */
+ thyper.HighPart = 0; /* hyper version of buf_bufferSize */
thyper.LowPart = buf_bufferSize;
- tbase = LargeIntegerAdd(firstModOffset, thyper);
+ tbase = LargeIntegerAdd(firstModOffset, thyper);
while(LargeIntegerLessThan(tbase, scanEnd)) {
- /* see if we can find the buffer */
- bufp = buf_Find(scp, &tbase);
+ /* see if we can find the buffer */
+ bufp = buf_Find(scp, &tbase);
if (!bufp)
break;
- /* try to lock it, and quit if we can't (simplifies locking) */
+ /* try to lock it, and quit if we can't (simplifies locking) */
code = lock_TryMutex(&bufp->mx);
if (code == 0) {
- buf_Release(bufp);
+ buf_Release(bufp);
break;
}
code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
if (code) {
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
+ lock_ReleaseMutex(&bufp->mx);
+ buf_Release(bufp);
break;
}
- if (!(bufp->flags & CM_BUF_DIRTY)) {
- /* buffer is clean, so we shouldn't add it */
- cm_SyncOpDone(scp, bufp, flags);
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
+ if (!(bufp->flags & CM_BUF_DIRTY)) {
+ /* buffer is clean, so we shouldn't add it */
+ cm_SyncOpDone(scp, bufp, flags);
+ lock_ReleaseMutex(&bufp->mx);
+ buf_Release(bufp);
break;
}
- /* don't need buffer mutex any more */
- lock_ReleaseMutex(&bufp->mx);
+ /* don't need buffer mutex any more */
+ lock_ReleaseMutex(&bufp->mx);
/* we have a dirty buffer ready for storing. Add it to the head
* of the list, since it immediately follows all of the disk
* addresses we've already collected.
*/
- qdp = osi_QDAlloc();
+ qdp = osi_QDAlloc();
osi_SetQData(qdp, bufp);
/* no buf_hold necessary, since we have it held from buf_Find */
osi_QAddH((osi_queue_t **) &biop->bufListp,
(osi_queue_t **) &biop->bufListEndp,
&qdp->q);
- /* update biod info describing the transfer */
+ /* update biod info describing the transfer */
biop->length += buf_bufferSize;
/* update loop pointer */
long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
cm_bulkIO_t *biop, cm_user_t *up, cm_req_t *reqp)
{
- long code;
+ long code;
cm_buf_t *tbp;
osi_hyper_t toffset; /* a long long temp variable */
osi_hyper_t pageBase; /* base offset we're looking at */
osi_hyper_t fileSize; /* the # of bytes in the file */
osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
osi_queueData_t *heldBufListEndp; /* first one */
- int reserving;
+ int reserving;
biop->scp = scp;
biop->offset = *offsetp;
- /* null out the list of buffers */
+ /* null out the list of buffers */
biop->bufListp = biop->bufListEndp = NULL;
- biop->reserved = 0;
+ biop->reserved = 0;
- /* first lookup the file's length, so we know when to stop */
+ /* first lookup the file's length, so we know when to stop */
code = cm_SyncOp(scp, NULL, up, reqp, 0,
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
if (code)
return code;
- /* copy out size, since it may change */
+ /* copy out size, since it may change */
fileSize = scp->serverLength;
lock_ReleaseMutex(&scp->mx);
- pageBase = *offsetp;
+ pageBase = *offsetp;
collected = pageBase.LowPart & (cm_chunkSize - 1);
heldBufListp = NULL;
heldBufListEndp = NULL;
- /*
- * Obtaining buffers can cause dirty buffers to be recycled, which
- * can cause a storeback, so cannot be done while we have buffers
- * reserved.
- *
- * To get around this, we get buffers twice. Before reserving buffers,
- * we obtain and release each one individually. After reserving
- * buffers, we try to obtain them again, but only by lookup, not by
- * recycling. If a buffer has gone away while we were waiting for
- * the others, we just use whatever buffers we already have.
- *
- * On entry to this function, we are already holding a buffer, so we
- * can't wait for reservation. So we call buf_TryReserveBuffers()
- * instead. Not only that, we can't really even call buf_Get(), for
- * the same reason. We can't avoid that, though. To avoid deadlock
- * we allow only one thread to be executing the buf_Get()-buf_Release()
- * sequence at a time.
- */
-
- /* first hold all buffers, since we can't hold any locks in buf_Get */
+ /*
+ * Obtaining buffers can cause dirty buffers to be recycled, which
+ * can cause a storeback, so cannot be done while we have buffers
+ * reserved.
+ *
+ * To get around this, we get buffers twice. Before reserving buffers,
+ * we obtain and release each one individually. After reserving
+ * buffers, we try to obtain them again, but only by lookup, not by
+ * recycling. If a buffer has gone away while we were waiting for
+ * the others, we just use whatever buffers we already have.
+ *
+ * On entry to this function, we are already holding a buffer, so we
+ * can't wait for reservation. So we call buf_TryReserveBuffers()
+ * instead. Not only that, we can't really even call buf_Get(), for
+ * the same reason. We can't avoid that, though. To avoid deadlock
+ * we allow only one thread to be executing the buf_Get()-buf_Release()
+ * sequence at a time.
+ */
+
+ /* first hold all buffers, since we can't hold any locks in buf_Get */
while (1) {
- /* stop at chunk boundary */
- if (collected >= cm_chunkSize) break;
+ /* stop at chunk boundary */
+ if (collected >= cm_chunkSize) break;
/* see if the next page would be past EOF */
if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize)) break;
- lock_ObtainMutex(&cm_bufGetMutex);
+ lock_ObtainMutex(&cm_bufGetMutex);
- code = buf_Get(scp, &pageBase, &tbp);
+ code = buf_Get(scp, &pageBase, &tbp);
if (code) {
- lock_ReleaseMutex(&cm_bufGetMutex);
- lock_ObtainMutex(&scp->mx);
- return code;
- }
+ lock_ReleaseMutex(&cm_bufGetMutex);
+ lock_ObtainMutex(&scp->mx);
+ return code;
+ }
- buf_Release(tbp);
+ buf_Release(tbp);
- lock_ReleaseMutex(&cm_bufGetMutex);
+ lock_ReleaseMutex(&cm_bufGetMutex);
toffset.HighPart = 0;
toffset.LowPart = buf_bufferSize;
pageBase = LargeIntegerAdd(toffset, pageBase);
- collected += buf_bufferSize;
+ collected += buf_bufferSize;
}
/* reserve a chunk's worth of buffers if possible */
- reserving = buf_TryReserveBuffers(cm_chunkSize / buf_bufferSize);
+ reserving = buf_TryReserveBuffers(cm_chunkSize / buf_bufferSize);
- pageBase = *offsetp;
+ pageBase = *offsetp;
collected = pageBase.LowPart & (cm_chunkSize - 1);
- /* now hold all buffers, if they are still there */
+ /* now hold all buffers, if they are still there */
while (1) {
- /* stop at chunk boundary */
- if (collected >= cm_chunkSize)
+ /* stop at chunk boundary */
+ if (collected >= cm_chunkSize)
break;
/* see if the next page would be past EOF */
break;
tbp = buf_Find(scp, &pageBase);
- if (!tbp)
+ if (!tbp)
break;
/* add the buffer to the list */
- qdp = osi_QDAlloc();
+ qdp = osi_QDAlloc();
osi_SetQData(qdp, tbp);
osi_QAdd((osi_queue_t **)&heldBufListp, &qdp->q);
if (!heldBufListEndp) heldBufListEndp = qdp;
- /* leave tbp held (from buf_Get) */
+ /* leave tbp held (from buf_Get) */
- if (!reserving)
+ if (!reserving)
break;
collected += buf_bufferSize;
}
/* look at each buffer, adding it into the list if it looks idle and
- * filled with old data. One special case: wait for idle if it is the
- * first buffer since we really need that one for our caller to make
- * any progress.
+ * filled with old data. One special case: wait for idle if it is the
+ * first buffer since we really need that one for our caller to make
+ * any progress.
*/
isFirst = 1;
collected = 0; /* now count how many we'll really use */
- for(tqdp = heldBufListEndp;
+ for (tqdp = heldBufListEndp;
tqdp;
- tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
- /* get a ptr to the held buffer */
- tbp = osi_GetQData(tqdp);
+ tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
+ /* get a ptr to the held buffer */
+ tbp = osi_GetQData(tqdp);
pageBase = tbp->offset;
- /* now lock the buffer lock */
- lock_ObtainMutex(&tbp->mx);
- lock_ObtainMutex(&scp->mx);
+ /* now lock the buffer lock */
+ lock_ObtainMutex(&tbp->mx);
+ lock_ObtainMutex(&scp->mx);
- /* don't bother fetching over data that is already current */
- if (tbp->dataVersion == scp->dataVersion) {
- /* we don't need this buffer, since it is current */
- lock_ReleaseMutex(&scp->mx);
+ /* don't bother fetching over data that is already current */
+ if (tbp->dataVersion == scp->dataVersion) {
+ /* we don't need this buffer, since it is current */
+ lock_ReleaseMutex(&scp->mx);
lock_ReleaseMutex(&tbp->mx);
break;
}
- flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_FETCHDATA
- | CM_SCACHESYNC_BUFLOCKED;
- if (!isFirst)
+ flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_FETCHDATA
+ | CM_SCACHESYNC_BUFLOCKED;
+ if (!isFirst)
flags |= CM_SCACHESYNC_NOWAIT;
- /* wait for the buffer to serialize, if required. Doesn't
- * release the scp or buffer lock(s) if NOWAIT is specified.
+ /* wait for the buffer to serialize, if required. Doesn't
+ * release the scp or buffer lock(s) if NOWAIT is specified.
*/
- code = cm_SyncOp(scp, tbp, up, reqp, 0, flags);
+ code = cm_SyncOp(scp, tbp, up, reqp, 0, flags);
if (code) {
- lock_ReleaseMutex(&scp->mx);
- lock_ReleaseMutex(&tbp->mx);
+ lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&tbp->mx);
break;
- }
+ }
- /* don't fetch over dirty buffers */
+ /* don't fetch over dirty buffers */
if (tbp->flags & CM_BUF_DIRTY) {
- cm_SyncOpDone(scp, tbp, flags);
- lock_ReleaseMutex(&scp->mx);
+ cm_SyncOpDone(scp, tbp, flags);
+ lock_ReleaseMutex(&scp->mx);
lock_ReleaseMutex(&tbp->mx);
break;
- }
+ }
- /* Release locks */
- lock_ReleaseMutex(&scp->mx);
- lock_ReleaseMutex(&tbp->mx);
+ /* Release locks */
+ lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&tbp->mx);
/* add the buffer to the list */
- qdp = osi_QDAlloc();
+ qdp = osi_QDAlloc();
osi_SetQData(qdp, tbp);
osi_QAdd((osi_queue_t **)&biop->bufListp, &qdp->q);
if (!biop->bufListEndp)
biop->bufListEndp = qdp;
- buf_Hold(tbp);
+ buf_Hold(tbp);
- /* from now on, a failure just stops our collection process, but
+ /* from now on, a failure just stops our collection process, but
* we still do the I/O to whatever we've already managed to collect.
*/
isFirst = 0;
}
/* now, we've held in biop->bufListp all the buffer's we're really
- * interested in. We also have holds left from heldBufListp, and we
- * now release those holds on the buffers.
+ * interested in. We also have holds left from heldBufListp, and we
+ * now release those holds on the buffers.
*/
- for(qdp = heldBufListp; qdp; qdp = tqdp) {
- tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
- tbp = osi_GetQData(qdp);
+ for (qdp = heldBufListp; qdp; qdp = tqdp) {
+ tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
+ tbp = osi_GetQData(qdp);
osi_QDFree(qdp);
buf_Release(tbp);
}
- /* Caller expects this */
- lock_ObtainMutex(&scp->mx);
+ /* Caller expects this */
+ lock_ObtainMutex(&scp->mx);
- /* if we got a failure setting up the first buffer, then we don't have
+ /* if we got a failure setting up the first buffer, then we don't have
* any side effects yet, and we also have failed an operation that the
* caller requires to make any progress. Give up now.
*/
if (code && isFirst) {
- buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
- return code;
- }
+ buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
+ return code;
+ }
/* otherwise, we're still OK, and should just return the I/O setup we've
* got.
*/
- biop->length = collected;
- biop->reserved = reserving;
+ biop->length = collected;
+ biop->reserved = reserving;
return 0;
}
*/
void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore)
{
- cm_scache_t *scp;
+ cm_scache_t *scp;
cm_buf_t *bufp;
osi_queueData_t *qdp;
osi_queueData_t *nqdp;
int flags;
- /* Give back reserved buffers */
- if (biop->reserved)
- buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
+ /* Give back reserved buffers */
+ if (biop->reserved)
+ buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
- flags = CM_SCACHESYNC_NEEDCALLBACK;
+ flags = CM_SCACHESYNC_NEEDCALLBACK;
if (isStore)
flags |= CM_SCACHESYNC_STOREDATA;
- else
- flags |= CM_SCACHESYNC_FETCHDATA;
+ else
+ flags |= CM_SCACHESYNC_FETCHDATA;
- scp = biop->scp;
+ scp = biop->scp;
for(qdp = biop->bufListp; qdp; qdp = nqdp) {
- /* lookup next guy first, since we're going to free this one */
- nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
+ /* lookup next guy first, since we're going to free this one */
+ nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
- /* extract buffer and free queue data */
+ /* extract buffer and free queue data */
bufp = osi_GetQData(qdp);
osi_QDFree(qdp);
/* now, mark I/O as done, unlock the buffer and release it */
- lock_ObtainMutex(&bufp->mx);
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainMutex(&bufp->mx);
+ lock_ObtainMutex(&scp->mx);
cm_SyncOpDone(scp, bufp, flags);
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&scp->mx);
- /* turn off writing and wakeup users */
+ /* turn off writing and wakeup users */
if (isStore) {
if (bufp->flags & CM_BUF_WAITING) {
- osi_Wakeup((long) bufp);
+ osi_Wakeup((long) bufp);
}
bufp->flags &= ~(CM_BUF_WAITING | CM_BUF_WRITING | CM_BUF_DIRTY);
}
* The scp is locked on return.
*/
long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *up,
- cm_req_t *reqp)
+ cm_req_t *reqp)
{
- long code;
+ long code;
long nbytes; /* bytes in transfer */
long rbytes; /* bytes in rx_Read call */
long temp;
struct rx_call *callp;
cm_bulkIO_t biod; /* bulk IO descriptor */
cm_conn_t *connp;
- int getroot;
- long t1, t2;
+ int getroot;
+ long t1, t2;
/* now, the buffer may or may not be filled with good data (buf_GetNew
* drops lots of locks, and may indeed return a properly initialized
#ifdef AFS_FREELANCE_CLIENT
- // yj: if they're trying to get the /afs directory, we need to
- // handle it differently, since it's local rather than on any
- // server
+ // yj: if they're trying to get the /afs directory, we need to
+ // handle it differently, since it's local rather than on any
+ // server
- getroot = (scp==cm_rootSCachep);
- if (getroot)
- osi_Log1(afsd_logp,"GetBuffer returns cm_rootSCachep=%x",cm_rootSCachep);
+ getroot = (scp==cm_rootSCachep);
+ if (getroot)
+ osi_Log1(afsd_logp,"GetBuffer returns cm_rootSCachep=%x",cm_rootSCachep);
#endif
- cm_AFSFidFromFid(&tfid, &scp->fid);
+ cm_AFSFidFromFid(&tfid, &scp->fid);
- code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, up, reqp);
- if (code) {
- /* couldn't even get the first page setup properly */
- osi_Log1(afsd_logp, "SetupFetchBIOD failure code %d", code);
+ code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, up, reqp);
+ if (code) {
+ /* couldn't even get the first page setup properly */
+ osi_Log1(afsd_logp, "SetupFetchBIOD failure code %d", code);
return code;
- }
+ }
/* once we get here, we have the callback in place, we know that no one
- * is fetching the data now. Check one last time that we still have
- * the wrong data, and then fetch it if we're still wrong.
- *
+ * is fetching the data now. Check one last time that we still have
+ * the wrong data, and then fetch it if we're still wrong.
+ *
* We can lose a race condition and end up with biod.length zero, in
- * which case we just retry.
+ * which case we just retry.
*/
if (bufp->dataVersion == scp->dataVersion || biod.length == 0) {
- osi_Log3(afsd_logp, "Bad DVs %d, %d or length 0x%x",
+ osi_Log3(afsd_logp, "Bad DVs %d, %d or length 0x%x",
bufp->dataVersion, scp->dataVersion, biod.length);
- if ((bufp->dataVersion == -1
- || bufp->dataVersion < scp->dataVersion)
+ if ((bufp->dataVersion == -1
+ || bufp->dataVersion < scp->dataVersion)
&& LargeIntegerGreaterThanOrEqualTo(bufp->offset,
scp->serverLength)) {
- if (bufp->dataVersion == -1)
- memset(bufp->datap, 0, buf_bufferSize);
- bufp->dataVersion = scp->dataVersion;
- }
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseBIOD(&biod, 0);
- lock_ObtainMutex(&scp->mx);
+ if (bufp->dataVersion == -1)
+ memset(bufp->datap, 0, buf_bufferSize);
+ bufp->dataVersion = scp->dataVersion;
+ }
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseBIOD(&biod, 0);
+ lock_ObtainMutex(&scp->mx);
return 0;
}
#ifdef AFS_FREELANCE_CLIENT
- // yj code
- // if getroot then we don't need to make any calls
- // just return fake data
+ // yj code
+ // if getroot then we don't need to make any calls
+ // just return fake data
- if (cm_freelanceEnabled && getroot) {
- // setup the fake status
- afsStatus.InterfaceVersion = 0x1;
- afsStatus.FileType = 0x2;
- afsStatus.LinkCount = scp->linkCount;
- afsStatus.Length = cm_fakeDirSize;
- afsStatus.DataVersion = cm_fakeDirVersion;
- afsStatus.Author = 0x1;
- afsStatus.Owner = 0x0;
- afsStatus.CallerAccess = 0x9;
- afsStatus.AnonymousAccess = 0x9;
- afsStatus.UnixModeBits = 0x1ff;
- afsStatus.ParentVnode = 0x1;
- afsStatus.ParentUnique = 0x1;
- afsStatus.ResidencyMask = 0;
- afsStatus.ClientModTime = FakeFreelanceModTime;
- afsStatus.ServerModTime = FakeFreelanceModTime;
- afsStatus.Group = 0;
- afsStatus.SyncCounter = 0;
- afsStatus.dataVersionHigh = 0;
+ if (cm_freelanceEnabled && getroot) {
+ // setup the fake status
+ afsStatus.InterfaceVersion = 0x1;
+ afsStatus.FileType = 0x2;
+ afsStatus.LinkCount = scp->linkCount;
+ afsStatus.Length = cm_fakeDirSize;
+ afsStatus.DataVersion = cm_fakeDirVersion;
+ afsStatus.Author = 0x1;
+ afsStatus.Owner = 0x0;
+ afsStatus.CallerAccess = 0x9;
+ afsStatus.AnonymousAccess = 0x9;
+ afsStatus.UnixModeBits = 0x1ff;
+ afsStatus.ParentVnode = 0x1;
+ afsStatus.ParentUnique = 0x1;
+ afsStatus.ResidencyMask = 0;
+ afsStatus.ClientModTime = FakeFreelanceModTime;
+ afsStatus.ServerModTime = FakeFreelanceModTime;
+ afsStatus.Group = 0;
+ afsStatus.SyncCounter = 0;
+ afsStatus.dataVersionHigh = 0;
- // once we're done setting up the status info,
- // we just fill the buffer pages with fakedata
- // from cm_FakeRootDir. Extra pages are set to
- // 0.
+ // once we're done setting up the status info,
+ // we just fill the buffer pages with fakedata
+ // from cm_FakeRootDir. Extra pages are set to
+ // 0.
- lock_ObtainMutex(&cm_Freelance_Lock);
- t1 = bufp->offset.LowPart;
- qdp = biod.bufListEndp;
- while (qdp) {
- tbufp = osi_GetQData(qdp);
- bufferp=tbufp->datap;
- memset(bufferp, 0, buf_bufferSize);
- t2 = cm_fakeDirSize - t1;
- if (t2>buf_bufferSize) t2=buf_bufferSize;
- if (t2 > 0) {
- memcpy(bufferp, cm_FakeRootDir+t1, t2);
- } else {
- t2 = 0;
- }
- t1+=t2;
- qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
-
- }
- lock_ReleaseMutex(&cm_Freelance_Lock);
+ lock_ObtainMutex(&cm_Freelance_Lock);
+ t1 = bufp->offset.LowPart;
+ qdp = biod.bufListEndp;
+ while (qdp) {
+ tbufp = osi_GetQData(qdp);
+ bufferp=tbufp->datap;
+ memset(bufferp, 0, buf_bufferSize);
+ t2 = cm_fakeDirSize - t1;
+ if (t2>buf_bufferSize) t2=buf_bufferSize;
+ if (t2 > 0) {
+ memcpy(bufferp, cm_FakeRootDir+t1, t2);
+ } else {
+ t2 = 0;
+ }
+ t1+=t2;
+ qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
+
+ }
+ lock_ReleaseMutex(&cm_Freelance_Lock);
- // once we're done, we skip over the part of the
- // code that does the ACTUAL fetching of data for
- // real files
+ // once we're done, we skip over the part of the
+ // code that does the ACTUAL fetching of data for
+ // real files
- goto fetchingcompleted;
- }
+ goto fetchingcompleted;
+ }
#endif /* AFS_FREELANCE_CLIENT */
/* now make the call */
do {
- code = cm_Conn(&scp->fid, up, reqp, &connp);
+ code = cm_Conn(&scp->fid, up, reqp, &connp);
if (code)
continue;
- callp = rx_NewCall(connp->callp);
+ lock_ObtainMutex(&connp->mx);
+ callp = rx_NewCall(connp->callp);
- osi_Log3(afsd_logp, "CALL FetchData vp %x, off 0x%x, size 0x%x",
- (long) scp, biod.offset.LowPart, biod.length);
+ osi_Log3(afsd_logp, "CALL FetchData vp %x, off 0x%x, size 0x%x",
+ (long) scp, biod.offset.LowPart, biod.length);
code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
biod.length);
- /* now copy the data out of the pipe and put it in the buffer */
- temp = rx_Read(callp, (char *)&nbytes, 4);
- if (temp == 4) {
- nbytes = ntohl(nbytes);
+ /* now copy the data out of the pipe and put it in the buffer */
+ temp = rx_Read(callp, (char *)&nbytes, 4);
+ if (temp == 4) {
+ nbytes = ntohl(nbytes);
if (nbytes > biod.length)
code = (callp->error < 0) ? callp->error : -1;
}
else
code = (callp->error < 0) ? callp->error : -1;
- if (code == 0) {
+ if (code == 0) {
qdp = biod.bufListEndp;
if (qdp) {
- tbufp = osi_GetQData(qdp);
+ tbufp = osi_GetQData(qdp);
bufferp = tbufp->datap;
}
else
bufferp = NULL;
- /* fill nbytes of data from the pipe into the pages.
- * When we stop, qdp will point at the last page we're
- * dealing with, and bufferp will tell us where we
- * stopped. We'll need this info below when we clear
- * the remainder of the last page out (and potentially
+ /* fill nbytes of data from the pipe into the pages.
+ * When we stop, qdp will point at the last page we're
+ * dealing with, and bufferp will tell us where we
+ * stopped. We'll need this info below when we clear
+ * the remainder of the last page out (and potentially
* clear later pages out, if we fetch past EOF).
*/
- while(nbytes > 0) {
- /* assert that there are still more buffers;
- * our check above for nbytes being less than
- * biod.length should ensure this.
+ while (nbytes > 0) {
+ /* assert that there are still more buffers;
+ * our check above for nbytes being less than
+ * biod.length should ensure this.
*/
- osi_assert(bufferp != NULL);
+ osi_assert(bufferp != NULL);
- /* read rbytes of data */
+ /* read rbytes of data */
rbytes = (nbytes > buf_bufferSize? buf_bufferSize : nbytes);
temp = rx_Read(callp, bufferp, rbytes);
if (temp < rbytes) {
code = (callp->error < 0) ? callp->error : -1;
break;
- }
-
- /* allow read-while-fetching.
- * if this is the last buffer, clear the
- * PREFETCHING flag, so the reader waiting for
- * this buffer will start a prefetch.
- */
- tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
- lock_ObtainMutex(&scp->mx);
- if (scp->flags & CM_SCACHEFLAG_WAITING) {
- scp->flags &= ~CM_SCACHEFLAG_WAITING;
- osi_Wakeup((long) &scp->flags);
- }
- if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
- *cpffp = 1;
- cm_ClearPrefetchFlag(0, scp, &biod.offset);
- }
- lock_ReleaseMutex(&scp->mx);
-
- /* and adjust counters */
+ }
+
+ /* allow read-while-fetching.
+ * if this is the last buffer, clear the
+ * PREFETCHING flag, so the reader waiting for
+ * this buffer will start a prefetch.
+ */
+ tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
+ lock_ObtainMutex(&scp->mx);
+ if (scp->flags & CM_SCACHEFLAG_WAITING) {
+ scp->flags &= ~CM_SCACHEFLAG_WAITING;
+ osi_Wakeup((long) &scp->flags);
+ }
+ if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
+ *cpffp = 1;
+ cm_ClearPrefetchFlag(0, scp, &biod.offset);
+ }
+ lock_ReleaseMutex(&scp->mx);
+
+ /* and adjust counters */
nbytes -= temp;
-
+
/* and move to the next buffer */
- if (nbytes != 0) {
+ if (nbytes != 0) {
qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
if (qdp) {
- tbufp = osi_GetQData(qdp);
+ tbufp = osi_GetQData(qdp);
bufferp = tbufp->datap;
}
else
bufferp = NULL;
- } else
+ } else
bufferp += temp;
}
/* zero out remainder of last pages, in case we are
- * fetching past EOF. We were fetching an integral #
- * of pages, but stopped, potentially in the middle of
- * a page. Zero the remainder of that page, and then
- * all of the rest of the pages.
+ * fetching past EOF. We were fetching an integral #
+ * of pages, but stopped, potentially in the middle of
+ * a page. Zero the remainder of that page, and then
+ * all of the rest of the pages.
*/
- /* bytes fetched */
+ /* bytes fetched */
rbytes = bufferp - tbufp->datap;
- /* bytes left to zero */
+ /* bytes left to zero */
rbytes = buf_bufferSize - rbytes;
while(qdp) {
if (rbytes != 0)
- memset(bufferp, 0, rbytes);
+ memset(bufferp, 0, rbytes);
qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
- if (qdp == NULL)
+ if (qdp == NULL)
break;
- tbufp = osi_GetQData(qdp);
+ tbufp = osi_GetQData(qdp);
bufferp = tbufp->datap;
- /* bytes to clear in this page */
- rbytes = buf_bufferSize;
- }
- }
+ /* bytes to clear in this page */
+ rbytes = buf_bufferSize;
+ }
+ }
- if (code == 0)
- code = EndRXAFS_FetchData(callp, &afsStatus, &callback, &volSync);
- else
- osi_Log0(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error");
+ if (code == 0)
+ code = EndRXAFS_FetchData(callp, &afsStatus, &callback, &volSync);
+ else
+ osi_Log0(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error");
code = rx_EndCall(callp, code);
if (code == RXKADUNKNOWNKEY)
osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
osi_Log0(afsd_logp, "CALL FetchData DONE");
- } while (cm_Analyze(connp, up, reqp, &scp->fid, &volSync, NULL, NULL, code));
+ lock_ReleaseMutex(&connp->mx);
+
+ } while (cm_Analyze(connp, up, reqp, &scp->fid, &volSync, NULL, NULL, code));
fetchingcompleted:
code = cm_MapRPCError(code, reqp);
lock_ObtainMutex(&scp->mx);
- /* we know that no one else has changed the buffer, since we still have
- * the fetching flag on the buffers, and we have the scp locked again.
- * Copy in the version # into the buffer if we got code 0 back from the
- * read.
+ /* we know that no one else has changed the buffer, since we still have
+ * the fetching flag on the buffers, and we have the scp locked again.
+ * Copy in the version # into the buffer if we got code 0 back from the
+ * read.
*/
- if (code == 0) {
- for(qdp = biod.bufListp;
- qdp;
- qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
- tbufp = osi_GetQData(qdp);
+ if (code == 0) {
+ for(qdp = biod.bufListp;
+ qdp;
+ qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
+ tbufp = osi_GetQData(qdp);
tbufp->dataVersion = afsStatus.DataVersion;
#ifdef DISKCACHE95
}
}
- /* release scatter/gather I/O structure (buffers, locks) */
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseBIOD(&biod, 0);
- lock_ObtainMutex(&scp->mx);
+ /* release scatter/gather I/O structure (buffers, locks) */
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseBIOD(&biod, 0);
+ lock_ObtainMutex(&scp->mx);
if (code == 0)
cm_MergeStatus(scp, &afsStatus, &volSync, up, 0);
- return code;
+ return code;
}
extern afs_int32 cryptall;
extern char cm_NetbiosName[];
+extern void afsi_log(char *pattern, ...);
+
void cm_InitIoctl(void)
{
- lock_InitializeMutex(&cm_Afsdsbmt_Lock, "AFSDSBMT.INI Access Lock");
+ lock_InitializeMutex(&cm_Afsdsbmt_Lock, "AFSDSBMT.INI Access Lock");
}
long cm_FlushFile(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
{
- long code;
+ long code;
- lock_ObtainWrite(&scp->bufCreateLock);
- code = buf_FlushCleanPages(scp, userp, reqp);
+ lock_ObtainWrite(&scp->bufCreateLock);
+ code = buf_FlushCleanPages(scp, userp, reqp);
- lock_ObtainMutex(&scp->mx);
- scp->cbServerp = NULL;
- scp->cbExpires = 0;
- lock_ReleaseMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
+ scp->cbServerp = NULL;
+ scp->cbExpires = 0;
+ lock_ReleaseMutex(&scp->mx);
- lock_ReleaseWrite(&scp->bufCreateLock);
- cm_dnlcPurgedp(scp);
+ lock_ReleaseWrite(&scp->bufCreateLock);
+ cm_dnlcPurgedp(scp);
- return code;
+ return code;
}
/*
*/
void cm_ResetACLCache(cm_user_t *userp)
{
- cm_scache_t *scp;
- int hash;
-
- lock_ObtainWrite(&cm_scacheLock);
- for (hash=0; hash < cm_hashTableSize; hash++) {
- for (scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
- scp->refCount++;
- lock_ReleaseWrite(&cm_scacheLock);
- lock_ObtainMutex(&scp->mx);
- cm_InvalidateACLUser(scp, userp);
- lock_ReleaseMutex(&scp->mx);
- lock_ObtainWrite(&cm_scacheLock);
- scp->refCount--;
- }
- }
- lock_ReleaseWrite(&cm_scacheLock);
-}
+ cm_scache_t *scp;
+ int hash;
+
+ lock_ObtainWrite(&cm_scacheLock);
+ for (hash=0; hash < cm_hashTableSize; hash++) {
+ for (scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
+ scp->refCount++;
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ObtainMutex(&scp->mx);
+ cm_InvalidateACLUser(scp, userp);
+ lock_ReleaseMutex(&scp->mx);
+ lock_ObtainWrite(&cm_scacheLock);
+ scp->refCount--;
+ }
+ }
+ lock_ReleaseWrite(&cm_scacheLock);
+}
/*
* TranslateExtendedChars - This is a fix for TR 54482.
long cm_ParseIoctlPath(smb_ioctl_t *ioctlp, cm_user_t *userp, cm_req_t *reqp,
cm_scache_t **scpp)
{
- long code;
- cm_scache_t *substRootp;
+ long code;
+ cm_scache_t *substRootp;
char * relativePath = ioctlp->inDatap;
/* This is usually the file name, but for StatMountPoint it is the path. */
* \\netbios-name\submount\path\.
* \\netbios-name\submount\path\file
*/
- TranslateExtendedChars(relativePath);
+ TranslateExtendedChars(relativePath);
if (relativePath[0] == relativePath[1] &&
relativePath[1] == '\\' &&
CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
userp, sharePath, reqp, &substRootp);
free(sharePath);
- if (code) return code;
+ if (code)
+ return code;
- code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
- userp, NULL, reqp, scpp);
- if (code) return code;
+ code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
+ userp, NULL, reqp, scpp);
+ if (code)
+ return code;
} else {
/* otherwise, treat the name as a cellname mounted off the afs root.
- * This requires that we reconstruct the shareName string with
- * leading and trailing slashes.
- */
+ * This requires that we reconstruct the shareName string with
+ * leading and trailing slashes.
+ */
p = relativePath + 2 + strlen(cm_NetbiosName) + 1;
- if ( !_strnicmp("all", p, 3) )
- p += 4;
-
- shareName[0] = '/';
- for (i = 1; *p && *p != '\\'; i++,p++ ) {
- shareName[i] = *p;
- }
- p++; /* skip past trailing slash */
- shareName[i++] = '/'; /* add trailing slash */
- shareName[i] = 0; /* terminate string */
-
-
- code = cm_NameI(cm_rootSCachep, ioctlp->prefix->data,
+ if ( !_strnicmp("all", p, 3) )
+ p += 4;
+
+ shareName[0] = '/';
+ for (i = 1; *p && *p != '\\'; i++,p++ ) {
+ shareName[i] = *p;
+ }
+ p++; /* skip past trailing slash */
+ shareName[i++] = '/'; /* add trailing slash */
+ shareName[i] = 0; /* terminate string */
+
+
+ code = cm_NameI(cm_rootSCachep, ioctlp->prefix->data,
CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
userp, shareName, reqp, &substRootp);
- if (code) return code;
+ if (code)
+ return code;
- code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
- userp, NULL, reqp, scpp);
- if (code) return code;
+ code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
+ userp, NULL, reqp, scpp);
+ if (code)
+ return code;
}
} else {
code = cm_NameI(cm_rootSCachep, ioctlp->prefix->data,
CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
userp, ioctlp->tidPathp, reqp, &substRootp);
- if (code) return code;
+ if (code)
+ return code;
code = cm_NameI(substRootp, relativePath, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
userp, NULL, reqp, scpp);
- if (code) return code;
+ if (code)
+ return code;
}
- /* # of bytes of path */
+ /* # of bytes of path */
code = strlen(ioctlp->inDatap) + 1;
ioctlp->inDatap += code;
/* This is usually nothing, but for StatMountPoint it is the file name. */
TranslateExtendedChars(ioctlp->inDatap);
- /* and return success */
+ /* and return success */
return 0;
}
void cm_SkipIoctlPath(smb_ioctl_t *ioctlp)
{
- long temp;
+ long temp;
- temp = strlen(ioctlp->inDatap) + 1;
- ioctlp->inDatap += temp;
-}
+ temp = strlen(ioctlp->inDatap) + 1;
+ ioctlp->inDatap += temp;
+}
/* format the specified path to look like "/afs/<cellname>/usr", by
*/
void cm_NormalizeAfsPath (char *outpathp, char *inpathp)
{
- char *cp;
+ char *cp;
char bslash_mountRoot[256];
strncpy(bslash_mountRoot, cm_mountRoot, sizeof(bslash_mountRoot) - 1);
bslash_mountRoot[0] = '\\';
if (!strnicmp (inpathp, cm_mountRoot, strlen(cm_mountRoot)))
- lstrcpy (outpathp, inpathp);
- else if (!strnicmp (inpathp, bslash_mountRoot, strlen(bslash_mountRoot)))
- lstrcpy (outpathp, inpathp);
- else if ((inpathp[0] == '/') || (inpathp[0] == '\\'))
- sprintf (outpathp, "%s%s", cm_mountRoot, inpathp);
- else // inpathp looks like "<cell>/usr"
- sprintf (outpathp, "%s/%s", cm_mountRoot, inpathp);
-
- for (cp = outpathp; *cp != 0; ++cp) {
- if (*cp == '\\')
- *cp = '/';
- }
-
- if (strlen(outpathp) && (outpathp[strlen(outpathp)-1] == '/')) {
- outpathp[strlen(outpathp)-1] = 0;
- }
-
- if (!strcmpi (outpathp, cm_mountRoot)) {
+ lstrcpy (outpathp, inpathp);
+ else if (!strnicmp (inpathp, bslash_mountRoot, strlen(bslash_mountRoot)))
+ lstrcpy (outpathp, inpathp);
+ else if ((inpathp[0] == '/') || (inpathp[0] == '\\'))
+ sprintf (outpathp, "%s%s", cm_mountRoot, inpathp);
+ else // inpathp looks like "<cell>/usr"
+ sprintf (outpathp, "%s/%s", cm_mountRoot, inpathp);
+
+ for (cp = outpathp; *cp != 0; ++cp) {
+ if (*cp == '\\')
+ *cp = '/';
+ }
+
+ if (strlen(outpathp) && (outpathp[strlen(outpathp)-1] == '/')) {
+ outpathp[strlen(outpathp)-1] = 0;
+ }
+
+ if (!strcmpi (outpathp, cm_mountRoot)) {
strcpy (outpathp, cm_mountRoot);
- }
+ }
}
/* parse the passed-in file name and do a namei on its parent. If we fail,
long cm_ParseIoctlParent(smb_ioctl_t *ioctlp, cm_user_t *userp, cm_req_t *reqp,
cm_scache_t **scpp, char *leafp)
{
- long code;
+ long code;
char tbuffer[1024];
char *tp, *jp;
- cm_scache_t *substRootp;
+ cm_scache_t *substRootp;
- strcpy(tbuffer, ioctlp->inDatap);
+ strcpy(tbuffer, ioctlp->inDatap);
tp = strrchr(tbuffer, '\\');
- jp = strrchr(tbuffer, '/');
- if (!tp)
- tp = jp;
- else if (jp && (tp - tbuffer) < (jp - tbuffer))
- tp = jp;
+ jp = strrchr(tbuffer, '/');
+ if (!tp)
+ tp = jp;
+ else if (jp && (tp - tbuffer) < (jp - tbuffer))
+ tp = jp;
if (!tp) {
strcpy(tbuffer, "\\");
if (leafp)
strcpy(leafp, ioctlp->inDatap);
- }
+ }
else {
*tp = 0;
if (leafp)
strcpy(leafp, tp+1);
- }
+ }
if (tbuffer[0] == tbuffer[1] &&
tbuffer[1] == '\\' &&
free(sharePath);
if (code) return code;
- code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
- userp, NULL, reqp, scpp);
- if (code) return code;
+ code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
+ userp, NULL, reqp, scpp);
+ if (code) return code;
} else {
/* otherwise, treat the name as a cellname mounted off the afs root.
- * This requires that we reconstruct the shareName string with
- * leading and trailing slashes.
- */
+ * This requires that we reconstruct the shareName string with
+ * leading and trailing slashes.
+ */
p = tbuffer + 2 + strlen(cm_NetbiosName) + 1;
- if ( !_strnicmp("all", p, 3) )
- p += 4;
-
- shareName[0] = '/';
- for (i = 1; *p && *p != '\\'; i++,p++ ) {
- shareName[i] = *p;
- }
- p++; /* skip past trailing slash */
- shareName[i++] = '/'; /* add trailing slash */
- shareName[i] = 0; /* terminate string */
-
- code = cm_NameI(cm_rootSCachep, ioctlp->prefix->data,
+ if ( !_strnicmp("all", p, 3) )
+ p += 4;
+
+ shareName[0] = '/';
+ for (i = 1; *p && *p != '\\'; i++,p++ ) {
+ shareName[i] = *p;
+ }
+ p++; /* skip past trailing slash */
+ shareName[i++] = '/'; /* add trailing slash */
+ shareName[i] = 0; /* terminate string */
+
+ code = cm_NameI(cm_rootSCachep, ioctlp->prefix->data,
CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
userp, shareName, reqp, &substRootp);
if (code) return code;
- code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
- userp, NULL, reqp, scpp);
- if (code) return code;
+ code = cm_NameI(substRootp, p, CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
+ userp, NULL, reqp, scpp);
+ if (code) return code;
}
} else {
code = cm_NameI(cm_rootSCachep, ioctlp->prefix->data,
- CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
- userp, ioctlp->tidPathp, reqp, &substRootp);
+ CM_FLAG_CASEFOLD | CM_FLAG_FOLLOW,
+ userp, ioctlp->tidPathp, reqp, &substRootp);
if (code) return code;
code = cm_NameI(substRootp, tbuffer, CM_FLAG_FOLLOW,
- userp, NULL, reqp, scpp);
+ userp, NULL, reqp, scpp);
if (code) return code;
}
- /* # of bytes of path */
- code = strlen(ioctlp->inDatap) + 1;
- ioctlp->inDatap += code;
+ /* # of bytes of path */
+ code = strlen(ioctlp->inDatap) + 1;
+ ioctlp->inDatap += code;
- /* and return success */
- return 0;
+ /* and return success */
+ return 0;
}
long cm_IoctlGetACL(smb_ioctl_t *ioctlp, cm_user_t *userp)
{
- cm_conn_t *connp;
- cm_scache_t *scp;
- AFSOpaque acl;
- AFSFetchStatus fileStatus;
- AFSVolSync volSync;
- long code;
- AFSFid fid;
- int tlen;
- cm_req_t req;
+ cm_conn_t *connp;
+ cm_scache_t *scp;
+ AFSOpaque acl;
+ AFSFetchStatus fileStatus;
+ AFSVolSync volSync;
+ long code;
+ AFSFid fid;
+ int tlen;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
-
- /* now make the get acl call */
- fid.Volume = scp->fid.volume;
- fid.Vnode = scp->fid.vnode;
- fid.Unique = scp->fid.unique;
- do {
- acl.AFSOpaque_val = ioctlp->outDatap;
- acl.AFSOpaque_len = 0;
- code = cm_Conn(&scp->fid, userp, &req, &connp);
- if (code) continue;
-
- code = RXAFS_FetchACL(connp->callp, &fid, &acl, &fileStatus, &volSync);
- } while (cm_Analyze(connp, userp, &req, &scp->fid, &volSync, NULL, NULL, code));
- code = cm_MapRPCError(code, &req);
- cm_ReleaseSCache(scp);
-
- if (code) return code;
-
- /* skip over return data */
- tlen = strlen(ioctlp->outDatap) + 1;
- ioctlp->outDatap += tlen;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
- /* and return success */
- return 0;
+ /* now make the get acl call */
+ fid.Volume = scp->fid.volume;
+ fid.Vnode = scp->fid.vnode;
+ fid.Unique = scp->fid.unique;
+ do {
+ acl.AFSOpaque_val = ioctlp->outDatap;
+ acl.AFSOpaque_len = 0;
+ code = cm_Conn(&scp->fid, userp, &req, &connp);
+ if (code) continue;
+
+ lock_ObtainMutex(&connp->mx);
+ code = RXAFS_FetchACL(connp->callp, &fid, &acl, &fileStatus, &volSync);
+ lock_ReleaseMutex(&connp->mx);
+ } while (cm_Analyze(connp, userp, &req, &scp->fid, &volSync, NULL, NULL, code));
+ code = cm_MapRPCError(code, &req);
+ cm_ReleaseSCache(scp);
+
+ if (code) return code;
+
+ /* skip over return data */
+ tlen = strlen(ioctlp->outDatap) + 1;
+ ioctlp->outDatap += tlen;
+
+ /* and return success */
+ return 0;
}
long cm_IoctlGetFileCellName(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long code;
- cm_scache_t *scp;
- cm_cell_t *cellp;
- cm_req_t req;
+ long code;
+ cm_scache_t *scp;
+ cm_cell_t *cellp;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
-
- cellp = cm_FindCellByID(scp->fid.cell);
- if (cellp) {
- strcpy(ioctlp->outDatap, cellp->namep);
- ioctlp->outDatap += strlen(ioctlp->outDatap) + 1;
- code = 0;
- }
- else code = CM_ERROR_NOSUCHCELL;
-
- cm_ReleaseSCache(scp);
- return code;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
+
+ cellp = cm_FindCellByID(scp->fid.cell);
+ if (cellp) {
+ strcpy(ioctlp->outDatap, cellp->namep);
+ ioctlp->outDatap += strlen(ioctlp->outDatap) + 1;
+ code = 0;
+ }
+ else code = CM_ERROR_NOSUCHCELL;
+
+ cm_ReleaseSCache(scp);
+ return code;
}
long cm_IoctlSetACL(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_conn_t *connp;
- cm_scache_t *scp;
- AFSOpaque acl;
- AFSFetchStatus fileStatus;
- AFSVolSync volSync;
- long code;
- AFSFid fid;
- cm_req_t req;
+ cm_conn_t *connp;
+ cm_scache_t *scp;
+ AFSOpaque acl;
+ AFSFetchStatus fileStatus;
+ AFSVolSync volSync;
+ long code;
+ AFSFid fid;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
- /* now make the get acl call */
- fid.Volume = scp->fid.volume;
- fid.Vnode = scp->fid.vnode;
- fid.Unique = scp->fid.unique;
- do {
- acl.AFSOpaque_val = ioctlp->inDatap;
- acl.AFSOpaque_len = strlen(ioctlp->inDatap)+1;
- code = cm_Conn(&scp->fid, userp, &req, &connp);
- if (code) continue;
-
- code = RXAFS_StoreACL(connp->callp, &fid, &acl, &fileStatus, &volSync);
- } while (cm_Analyze(connp, userp, &req, &scp->fid, &volSync, NULL, NULL, code));
- code = cm_MapRPCError(code, &req);
-
- /* invalidate cache info, since we just trashed the ACL cache */
- lock_ObtainMutex(&scp->mx);
- cm_DiscardSCache(scp);
- lock_ReleaseMutex(&scp->mx);
-
- cm_ReleaseSCache(scp);
-
- return code;
+ /* now make the get acl call */
+ fid.Volume = scp->fid.volume;
+ fid.Vnode = scp->fid.vnode;
+ fid.Unique = scp->fid.unique;
+ do {
+ acl.AFSOpaque_val = ioctlp->inDatap;
+ acl.AFSOpaque_len = strlen(ioctlp->inDatap)+1;
+ code = cm_Conn(&scp->fid, userp, &req, &connp);
+ if (code) continue;
+
+ lock_ObtainMutex(&connp->mx);
+ code = RXAFS_StoreACL(connp->callp, &fid, &acl, &fileStatus, &volSync);
+ lock_ReleaseMutex(&connp->mx);
+ } while (cm_Analyze(connp, userp, &req, &scp->fid, &volSync, NULL, NULL, code));
+ code = cm_MapRPCError(code, &req);
+
+ /* invalidate cache info, since we just trashed the ACL cache */
+ lock_ObtainMutex(&scp->mx);
+ cm_DiscardSCache(scp);
+ lock_ReleaseMutex(&scp->mx);
+
+ cm_ReleaseSCache(scp);
+
+ return code;
}
long cm_IoctlFlushVolume(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long code;
- cm_scache_t *scp;
- unsigned long volume;
- int i;
- cm_req_t req;
+ long code;
+ cm_scache_t *scp;
+ unsigned long volume;
+ int i;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
- volume = scp->fid.volume;
- cm_ReleaseSCache(scp);
-
- lock_ObtainWrite(&cm_scacheLock);
- for(i=0; i<cm_hashTableSize; i++) {
- for(scp = cm_hashTablep[i]; scp; scp = scp->nextp) {
- if (scp->fid.volume == volume) {
- scp->refCount++;
- lock_ReleaseWrite(&cm_scacheLock);
-
- /* now flush the file */
- cm_FlushFile(scp, userp, &req);
+ volume = scp->fid.volume;
+ cm_ReleaseSCache(scp);
- lock_ObtainWrite(&cm_scacheLock);
- scp->refCount--;
- }
- }
+ lock_ObtainWrite(&cm_scacheLock);
+ for (i=0; i<cm_hashTableSize; i++) {
+ for (scp = cm_hashTablep[i]; scp; scp = scp->nextp) {
+ if (scp->fid.volume == volume) {
+ scp->refCount++;
+ lock_ReleaseWrite(&cm_scacheLock);
+
+ /* now flush the file */
+ code = cm_FlushFile(scp, userp, &req);
+ if ( code )
+ afsi_log("cm_FlushFile returns error: [%x]",code);
+ lock_ObtainWrite(&cm_scacheLock);
+ scp->refCount--;
+ }
}
- lock_ReleaseWrite(&cm_scacheLock);
+ }
+ lock_ReleaseWrite(&cm_scacheLock);
- return code;
+ return code;
}
long cm_IoctlFlushFile(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long code;
- cm_scache_t *scp;
- cm_req_t req;
+ long code;
+ cm_scache_t *scp;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
- cm_FlushFile(scp, userp, &req);
- cm_ReleaseSCache(scp);
+ cm_FlushFile(scp, userp, &req);
+ cm_ReleaseSCache(scp);
- return 0;
+ return 0;
}
long cm_IoctlSetVolumeStatus(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_scache_t *scp;
- char volName[32];
- char offLineMsg[256];
- char motd[256];
- cm_conn_t *tcp;
- long code;
- AFSFetchVolumeStatus volStat;
- AFSStoreVolumeStatus storeStat;
- cm_volume_t *tvp;
- char *cp;
- cm_cell_t *cellp;
- cm_req_t req;
-
- cm_InitReq(&req);
-
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
+ cm_scache_t *scp;
+ char volName[32];
+ char offLineMsg[256];
+ char motd[256];
+ cm_conn_t *tcp;
+ long code;
+ AFSFetchVolumeStatus volStat;
+ AFSStoreVolumeStatus storeStat;
+ cm_volume_t *tvp;
+ char *cp;
+ cm_cell_t *cellp;
+ cm_req_t req;
- cellp = cm_FindCellByID(scp->fid.cell);
- osi_assert(cellp);
+ cm_InitReq(&req);
- if (scp->flags & CM_SCACHEFLAG_RO) {
- cm_ReleaseSCache(scp);
- return CM_ERROR_READONLY;
- }
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
- code = cm_GetVolumeByID(cellp, scp->fid.volume, userp, &req, &tvp);
- if (code) {
- cm_ReleaseSCache(scp);
- return code;
- }
-
- /* Copy the junk out, using cp as a roving pointer. */
- cp = ioctlp->inDatap;
- memcpy((char *)&volStat, cp, sizeof(AFSFetchVolumeStatus));
- cp += sizeof(AFSFetchVolumeStatus);
- strcpy(volName, cp);
- cp += strlen(volName)+1;
- strcpy(offLineMsg, cp);
- cp += strlen(offLineMsg)+1;
- strcpy(motd, cp);
- storeStat.Mask = 0;
- if (volStat.MinQuota != -1) {
- storeStat.MinQuota = volStat.MinQuota;
- storeStat.Mask |= AFS_SETMINQUOTA;
- }
- if (volStat.MaxQuota != -1) {
- storeStat.MaxQuota = volStat.MaxQuota;
- storeStat.Mask |= AFS_SETMAXQUOTA;
- }
-
- do {
- code = cm_Conn(&scp->fid, userp, &req, &tcp);
- if (code) continue;
-
- code = RXAFS_SetVolumeStatus(tcp->callp, scp->fid.volume,
- &storeStat, volName, offLineMsg, motd);
- } while (cm_Analyze(tcp, userp, &req, &scp->fid, NULL, NULL, NULL, code));
- code = cm_MapRPCError(code, &req);
-
- /* return on failure */
- cm_ReleaseSCache(scp);
- if (code) {
- return code;
- }
-
- /* we are sending parms back to make compat. with prev system. should
- * change interface later to not ask for current status, just set
- * new status
- */
- cp = ioctlp->outDatap;
- memcpy(cp, (char *)&volStat, sizeof(VolumeStatus));
- cp += sizeof(VolumeStatus);
- strcpy(cp, volName);
- cp += strlen(volName)+1;
- strcpy(cp, offLineMsg);
- cp += strlen(offLineMsg)+1;
- strcpy(cp, motd);
- cp += strlen(motd)+1;
-
- /* now return updated return data pointer */
- ioctlp->outDatap = cp;
-
- return 0;
-}
+ cellp = cm_FindCellByID(scp->fid.cell);
+ osi_assert(cellp);
+
+ if (scp->flags & CM_SCACHEFLAG_RO) {
+ cm_ReleaseSCache(scp);
+ return CM_ERROR_READONLY;
+ }
+
+ code = cm_GetVolumeByID(cellp, scp->fid.volume, userp, &req, &tvp);
+ if (code) {
+ cm_ReleaseSCache(scp);
+ return code;
+ }
+
+ /* Copy the junk out, using cp as a roving pointer. */
+ cp = ioctlp->inDatap;
+ memcpy((char *)&volStat, cp, sizeof(AFSFetchVolumeStatus));
+ cp += sizeof(AFSFetchVolumeStatus);
+ strcpy(volName, cp);
+ cp += strlen(volName)+1;
+ strcpy(offLineMsg, cp);
+ cp += strlen(offLineMsg)+1;
+ strcpy(motd, cp);
+ storeStat.Mask = 0;
+ if (volStat.MinQuota != -1) {
+ storeStat.MinQuota = volStat.MinQuota;
+ storeStat.Mask |= AFS_SETMINQUOTA;
+ }
+ if (volStat.MaxQuota != -1) {
+ storeStat.MaxQuota = volStat.MaxQuota;
+ storeStat.Mask |= AFS_SETMAXQUOTA;
+ }
+
+ do {
+ code = cm_Conn(&scp->fid, userp, &req, &tcp);
+ if (code) continue;
+
+ lock_ObtainMutex(&tcp->mx);
+ code = RXAFS_SetVolumeStatus(tcp->callp, scp->fid.volume,
+ &storeStat, volName, offLineMsg, motd);
+ lock_ReleaseMutex(&tcp->mx);
+ } while (cm_Analyze(tcp, userp, &req, &scp->fid, NULL, NULL, NULL, code));
+ code = cm_MapRPCError(code, &req);
+
+ /* return on failure */
+ cm_ReleaseSCache(scp);
+ if (code) {
+ return code;
+ }
+
+ /* we are sending parms back to make compat. with prev system. should
+ * change interface later to not ask for current status, just set
+ * new status
+ */
+ cp = ioctlp->outDatap;
+ memcpy(cp, (char *)&volStat, sizeof(VolumeStatus));
+ cp += sizeof(VolumeStatus);
+ strcpy(cp, volName);
+ cp += strlen(volName)+1;
+ strcpy(cp, offLineMsg);
+ cp += strlen(offLineMsg)+1;
+ strcpy(cp, motd);
+ cp += strlen(motd)+1;
+
+ /* now return updated return data pointer */
+ ioctlp->outDatap = cp;
+
+ return 0;
+}
long cm_IoctlGetVolumeStatus(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- char volName[32];
- cm_scache_t *scp;
- char offLineMsg[256];
- char motd[256];
- cm_conn_t *tcp;
- register long code;
- AFSFetchVolumeStatus volStat;
- register char *cp;
- char *Name;
- char *OfflineMsg;
- char *MOTD;
- cm_req_t req;
-
- cm_InitReq(&req);
-
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
- if (code) return code;
+ char volName[32];
+ cm_scache_t *scp;
+ char offLineMsg[256];
+ char motd[256];
+ cm_conn_t *tcp;
+ register long code;
+ AFSFetchVolumeStatus volStat;
+ register char *cp;
+ char *Name;
+ char *OfflineMsg;
+ char *MOTD;
+ cm_req_t req;
+
+ cm_InitReq(&req);
- Name = volName;
- OfflineMsg = offLineMsg;
- MOTD = motd;
- do {
- code = cm_Conn(&scp->fid, userp, &req, &tcp);
- if (code) continue;
-
- code = RXAFS_GetVolumeStatus(tcp->callp, scp->fid.volume,
- &volStat, &Name, &OfflineMsg, &MOTD);
- } while (cm_Analyze(tcp, userp, &req, &scp->fid, NULL, NULL, NULL, code));
- code = cm_MapRPCError(code, &req);
-
- cm_ReleaseSCache(scp);
- if (code) return code;
-
- /* Copy all this junk into msg->im_data, keeping track of the lengths. */
- cp = ioctlp->outDatap;
- memcpy(cp, (char *)&volStat, sizeof(AFSFetchVolumeStatus));
- cp += sizeof(AFSFetchVolumeStatus);
- strcpy(cp, volName);
- cp += strlen(volName)+1;
- strcpy(cp, offLineMsg);
- cp += strlen(offLineMsg)+1;
- strcpy(cp, motd);
- cp += strlen(motd)+1;
-
- /* return new size */
- ioctlp->outDatap = cp;
-
- return 0;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
+ if (code) return code;
+
+ Name = volName;
+ OfflineMsg = offLineMsg;
+ MOTD = motd;
+ do {
+ code = cm_Conn(&scp->fid, userp, &req, &tcp);
+ if (code) continue;
+
+ lock_ObtainMutex(&tcp->mx);
+ code = RXAFS_GetVolumeStatus(tcp->callp, scp->fid.volume,
+ &volStat, &Name, &OfflineMsg, &MOTD);
+ lock_ReleaseMutex(&tcp->mx);
+ } while (cm_Analyze(tcp, userp, &req, &scp->fid, NULL, NULL, NULL, code));
+ code = cm_MapRPCError(code, &req);
+
+ cm_ReleaseSCache(scp);
+ if (code) return code;
+
+ /* Copy all this junk into msg->im_data, keeping track of the lengths. */
+ cp = ioctlp->outDatap;
+ memcpy(cp, (char *)&volStat, sizeof(AFSFetchVolumeStatus));
+ cp += sizeof(AFSFetchVolumeStatus);
+ strcpy(cp, volName);
+ cp += strlen(volName)+1;
+ strcpy(cp, offLineMsg);
+ cp += strlen(offLineMsg)+1;
+ strcpy(cp, motd);
+ cp += strlen(motd)+1;
+
+ /* return new size */
+ ioctlp->outDatap = cp;
+
+ return 0;
}
long cm_IoctlWhereIs(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long code;
+ long code;
cm_scache_t *scp;
cm_cell_t *cellp;
cm_volume_t *tvp;
- cm_serverRef_t **tsrpp, *current;
+ cm_serverRef_t **tsrpp, *current;
cm_server_t *tsp;
unsigned long volume;
char *cp;
cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
code = cm_ParseIoctlPath(ioctlp, userp, &req, &scp);
if (code) return code;
- volume = scp->fid.volume;
+ volume = scp->fid.volume;
- cellp = cm_FindCellByID(scp->fid.cell);
+ cellp = cm_FindCellByID(scp->fid.cell);
osi_assert(cellp);
cm_ReleaseSCache(scp);
- code = cm_GetVolumeByID(cellp, volume, userp, &req, &tvp);
+ code = cm_GetVolumeByID(cellp, volume, userp, &req, &tvp);
if (code) return code;
cp = ioctlp->outDatap;
- lock_ObtainMutex(&tvp->mx);
- tsrpp = cm_GetVolServers(tvp, volume);
- lock_ObtainRead(&cm_serverLock);
- for (current = *tsrpp; current; current = current->next) {
- tsp = current->server;
- memcpy(cp, (char *)&tsp->addr.sin_addr.s_addr, sizeof(long));
- cp += sizeof(long);
- }
- lock_ReleaseRead(&cm_serverLock);
+ lock_ObtainMutex(&tvp->mx);
+ tsrpp = cm_GetVolServers(tvp, volume);
+ lock_ObtainRead(&cm_serverLock);
+ for (current = *tsrpp; current; current = current->next) {
+ tsp = current->server;
+ memcpy(cp, (char *)&tsp->addr.sin_addr.s_addr, sizeof(long));
+ cp += sizeof(long);
+ }
+ lock_ReleaseRead(&cm_serverLock);
cm_FreeServerList(tsrpp);
lock_ReleaseMutex(&tvp->mx);
- /* still room for terminating NULL, add it on */
- volume = 0; /* reuse vbl */
- memcpy(cp, (char *)&volume, sizeof(long));
- cp += sizeof(long);
+ /* still room for terminating NULL, add it on */
+ volume = 0; /* reuse vbl */
+ memcpy(cp, (char *)&volume, sizeof(long));
+ cp += sizeof(long);
- ioctlp->outDatap = cp;
- cm_PutVolume(tvp);
- return 0;
-}
+ ioctlp->outDatap = cp;
+ cm_PutVolume(tvp);
+ return 0;
+}
long cm_IoctlStatMountPoint(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long code;
- cm_scache_t *dscp;
- cm_scache_t *scp;
- char *cp;
- cm_req_t req;
+ long code;
+ cm_scache_t *dscp;
+ cm_scache_t *scp;
+ char *cp;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &dscp);
- if (code) return code;
-
- cp = ioctlp->inDatap;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &dscp);
+ if (code) return code;
- code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, &req, &scp);
- cm_ReleaseSCache(dscp);
- if (code) return code;
+ cp = ioctlp->inDatap;
+
+ code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, &req, &scp);
+ cm_ReleaseSCache(dscp);
+ if (code) return code;
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
- /* now check that this is a real mount point */
- if (scp->fileType != CM_SCACHETYPE_MOUNTPOINT) {
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseSCache(scp);
- return CM_ERROR_INVAL;
- }
-
- code = cm_ReadMountPoint(scp, userp, &req);
- if (code == 0) {
- cp = ioctlp->outDatap;
- strcpy(cp, scp->mountPointStringp);
- cp += strlen(cp) + 1;
- ioctlp->outDatap = cp;
- }
- lock_ReleaseMutex(&scp->mx);
+ /* now check that this is a real mount point */
+ if (scp->fileType != CM_SCACHETYPE_MOUNTPOINT) {
+ lock_ReleaseMutex(&scp->mx);
cm_ReleaseSCache(scp);
+ return CM_ERROR_INVAL;
+ }
- return code;
-}
+ code = cm_ReadMountPoint(scp, userp, &req);
+ if (code == 0) {
+ cp = ioctlp->outDatap;
+ strcpy(cp, scp->mountPointStringp);
+ cp += strlen(cp) + 1;
+ ioctlp->outDatap = cp;
+ }
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseSCache(scp);
+
+ return code;
+}
long cm_IoctlDeleteMountPoint(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long code;
- cm_scache_t *dscp;
- cm_scache_t *scp;
- char *cp;
- cm_req_t req;
+ long code;
+ cm_scache_t *dscp;
+ cm_scache_t *scp;
+ char *cp;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlPath(ioctlp, userp, &req, &dscp);
- if (code) return code;
-
- cp = ioctlp->inDatap;
+ code = cm_ParseIoctlPath(ioctlp, userp, &req, &dscp);
+ if (code) return code;
+
+ cp = ioctlp->inDatap;
- code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, &req, &scp);
+ code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, &req, &scp);
- /* if something went wrong, bail out now */
- if (code) {
- goto done;
- }
+ /* if something went wrong, bail out now */
+ if (code) {
+ goto done;
+ }
- lock_ObtainMutex(&scp->mx);
- code = cm_SyncOp(scp, NULL, userp, &req, 0,
- CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
- if (code) {
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseSCache(scp);
- goto done;
- }
-
- /* now check that this is a real mount point */
- if (scp->fileType != CM_SCACHETYPE_MOUNTPOINT) {
- lock_ReleaseMutex(&scp->mx);
- cm_ReleaseSCache(scp);
- code = CM_ERROR_INVAL;
- goto done;
- }
-
- /* time to make the RPC, so drop the lock */
- lock_ReleaseMutex(&scp->mx);
+ lock_ObtainMutex(&scp->mx);
+ code = cm_SyncOp(scp, NULL, userp, &req, 0,
+ CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+ if (code) {
+ lock_ReleaseMutex(&scp->mx);
cm_ReleaseSCache(scp);
-
- /* easier to do it this way */
- code = cm_Unlink(dscp, cp, userp, &req);
- if (code == 0 && (dscp->flags & CM_SCACHEFLAG_ANYWATCH))
- smb_NotifyChange(FILE_ACTION_REMOVED,
- FILE_NOTIFY_CHANGE_DIR_NAME,
- dscp, cp, NULL, TRUE);
-
-done:
- cm_ReleaseSCache(dscp);
- return code;
+ goto done;
+ }
+
+ /* now check that this is a real mount point */
+ if (scp->fileType != CM_SCACHETYPE_MOUNTPOINT) {
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseSCache(scp);
+ code = CM_ERROR_INVAL;
+ goto done;
+ }
+
+ /* time to make the RPC, so drop the lock */
+ lock_ReleaseMutex(&scp->mx);
+ cm_ReleaseSCache(scp);
+
+ /* easier to do it this way */
+ code = cm_Unlink(dscp, cp, userp, &req);
+ if (code == 0 && (dscp->flags & CM_SCACHEFLAG_ANYWATCH))
+ smb_NotifyChange(FILE_ACTION_REMOVED,
+ FILE_NOTIFY_CHANGE_DIR_NAME,
+ dscp, cp, NULL, TRUE);
+
+ done:
+ cm_ReleaseSCache(dscp);
+ return code;
}
long cm_IoctlCheckServers(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_cell_t *cellp;
- chservinfo_t csi;
- char *tp;
- char *cp;
- long temp;
- cm_server_t *tsp;
- int haveCell;
+ cm_cell_t *cellp;
+ chservinfo_t csi;
+ char *tp;
+ char *cp;
+ long temp;
+ cm_server_t *tsp;
+ int haveCell;
- cm_SkipIoctlPath(ioctlp); /* we don't care about the path */
- tp = ioctlp->inDatap;
- haveCell = 0;
-
- memcpy(&temp, tp, sizeof(temp));
- if (temp == 0x12345678) { /* For afs3.3 version */
- memcpy(&csi, tp, sizeof(csi));
- if (csi.tinterval >= 0) {
- cp = ioctlp->outDatap;
- memcpy(cp, (char *)&cm_daemonCheckInterval, sizeof(long));
- ioctlp->outDatap += sizeof(long);
- if (csi.tinterval > 0) {
- if (!smb_SUser(userp))
- return CM_ERROR_NOACCESS;
- cm_daemonCheckInterval = csi.tinterval;
- }
- return 0;
- }
- if (csi.tsize)
- haveCell = 1;
- temp = csi.tflags;
- cp = csi.tbuffer;
- } else { /* For pre afs3.3 versions */
- memcpy((char *)&temp, ioctlp->inDatap, sizeof(long));
- ioctlp->inDatap = cp = ioctlp->inDatap + sizeof(long);
- if (cp - ioctlp->inAllocp < ioctlp->inCopied) /* still more data available */
- haveCell = 1;
- }
-
- /*
- * 1: fast check, don't contact servers.
- * 2: local cell only.
- */
- if (haveCell) {
- /* have cell name, too */
- cellp = cm_GetCell(cp, 0);
- if (!cellp) return CM_ERROR_NOSUCHCELL;
- }
- else cellp = (cm_cell_t *) 0;
- if (!cellp && (temp & 2)) {
- /* use local cell */
- cellp = cm_FindCellByID(1);
- }
- if (!(temp & 1)) { /* if not fast, call server checker routine */
- /* check down servers */
- cm_CheckServers(CM_FLAG_CHECKDOWNSERVERS | CM_FLAG_CHECKUPSERVERS,
- cellp);
- }
-
- /* now return the current down server list */
- cp = ioctlp->outDatap;
- lock_ObtainRead(&cm_serverLock);
- for(tsp = cm_allServersp; tsp; tsp=tsp->allNextp) {
- if (cellp && tsp->cellp != cellp) continue; /* cell spec'd and wrong */
- if ((tsp->flags & CM_SERVERFLAG_DOWN)
- && tsp->type == CM_SERVER_FILE) {
- memcpy(cp, (char *)&tsp->addr.sin_addr.s_addr, sizeof(long));
- cp += sizeof(long);
- }
- }
- lock_ReleaseRead(&cm_serverLock);
-
- ioctlp->outDatap = cp;
- return 0;
+ cm_SkipIoctlPath(ioctlp); /* we don't care about the path */
+ tp = ioctlp->inDatap;
+ haveCell = 0;
+
+ memcpy(&temp, tp, sizeof(temp));
+ if (temp == 0x12345678) { /* For afs3.3 version */
+ memcpy(&csi, tp, sizeof(csi));
+ if (csi.tinterval >= 0) {
+ cp = ioctlp->outDatap;
+ memcpy(cp, (char *)&cm_daemonCheckInterval, sizeof(long));
+ ioctlp->outDatap += sizeof(long);
+ if (csi.tinterval > 0) {
+ if (!smb_SUser(userp))
+ return CM_ERROR_NOACCESS;
+ cm_daemonCheckInterval = csi.tinterval;
+ }
+ return 0;
+ }
+ if (csi.tsize)
+ haveCell = 1;
+ temp = csi.tflags;
+ cp = csi.tbuffer;
+ } else { /* For pre afs3.3 versions */
+ memcpy((char *)&temp, ioctlp->inDatap, sizeof(long));
+ ioctlp->inDatap = cp = ioctlp->inDatap + sizeof(long);
+ if (cp - ioctlp->inAllocp < ioctlp->inCopied) /* still more data available */
+ haveCell = 1;
+ }
+
+ /*
+ * 1: fast check, don't contact servers.
+ * 2: local cell only.
+ */
+ if (haveCell) {
+ /* have cell name, too */
+ cellp = cm_GetCell(cp, 0);
+ if (!cellp) return CM_ERROR_NOSUCHCELL;
+ }
+ else cellp = (cm_cell_t *) 0;
+ if (!cellp && (temp & 2)) {
+ /* use local cell */
+ cellp = cm_FindCellByID(1);
+ }
+ if (!(temp & 1)) { /* if not fast, call server checker routine */
+ /* check down servers */
+ cm_CheckServers(CM_FLAG_CHECKDOWNSERVERS | CM_FLAG_CHECKUPSERVERS,
+ cellp);
+ }
+
+ /* now return the current down server list */
+ cp = ioctlp->outDatap;
+ lock_ObtainRead(&cm_serverLock);
+ for (tsp = cm_allServersp; tsp; tsp=tsp->allNextp) {
+ if (cellp && tsp->cellp != cellp) continue; /* cell spec'd and wrong */
+ if ((tsp->flags & CM_SERVERFLAG_DOWN)
+ && tsp->type == CM_SERVER_FILE) {
+ memcpy(cp, (char *)&tsp->addr.sin_addr.s_addr, sizeof(long));
+ cp += sizeof(long);
+ }
+ }
+ lock_ReleaseRead(&cm_serverLock);
+
+ ioctlp->outDatap = cp;
+ return 0;
}
long cm_IoctlGag(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- /* we don't print anything superfluous, so we don't support the gag call */
- return CM_ERROR_INVAL;
+ /* we don't print anything superfluous, so we don't support the gag call */
+ return CM_ERROR_INVAL;
}
long cm_IoctlCheckVolumes(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_CheckVolumes();
- return 0;
-}
+ cm_CheckVolumes();
+ return 0;
+}
long cm_IoctlSetCacheSize(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long temp;
- long code;
-
- cm_SkipIoctlPath(ioctlp);
+ long temp;
+ long code;
- memcpy(&temp, ioctlp->inDatap, sizeof(temp));
- if (temp == 0) temp = buf_nOrigBuffers;
- else {
- /* temp is in 1K units, convert to # of buffers */
- temp = temp / (buf_bufferSize / 1024);
- }
+ cm_SkipIoctlPath(ioctlp);
+
+ memcpy(&temp, ioctlp->inDatap, sizeof(temp));
+ if (temp == 0)
+ temp = buf_nOrigBuffers;
+ else {
+ /* temp is in 1K units, convert to # of buffers */
+ temp = temp / (buf_bufferSize / 1024);
+ }
- /* now adjust the cache size */
- code = buf_SetNBuffers(temp);
+ /* now adjust the cache size */
+ code = buf_SetNBuffers(temp);
- return code;
+ return code;
}
long cm_IoctlTraceControl(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long inValue;
+ long inValue;
- cm_SkipIoctlPath(ioctlp);
+ cm_SkipIoctlPath(ioctlp);
- memcpy(&inValue, ioctlp->inDatap, sizeof(long));
+ memcpy(&inValue, ioctlp->inDatap, sizeof(long));
- /* print trace */
- if (inValue & 8) {
- afsd_ForceTrace(FALSE);
- }
+ /* print trace */
+ if (inValue & 8) {
+ afsd_ForceTrace(FALSE);
+ }
- if (inValue & 2) {
- /* set tracing value to low order bit */
- if ((inValue & 1) == 0) {
- /* disable tracing */
- osi_LogDisable(afsd_logp);
- }
- else {
- /* enable tracing */
- osi_LogEnable(afsd_logp);
- }
- }
-
- /* see if we're supposed to do a reset, too */
- if (inValue & 4) {
- osi_LogReset(afsd_logp);
- }
-
- /* and copy out tracing flag */
- inValue = afsd_logp->enabled; /* use as a temp vbl */
- memcpy(ioctlp->outDatap, &inValue, sizeof(long));
- ioctlp->outDatap += sizeof(long);
- return 0;
-}
+ if (inValue & 2) {
+ /* set tracing value to low order bit */
+ if ((inValue & 1) == 0) {
+ /* disable tracing */
+ osi_LogDisable(afsd_logp);
+ }
+ else {
+ /* enable tracing */
+ osi_LogEnable(afsd_logp);
+ }
+ }
+
+ /* see if we're supposed to do a reset, too */
+ if (inValue & 4) {
+ osi_LogReset(afsd_logp);
+ }
+
+ /* and copy out tracing flag */
+ inValue = afsd_logp->enabled; /* use as a temp vbl */
+ memcpy(ioctlp->outDatap, &inValue, sizeof(long));
+ ioctlp->outDatap += sizeof(long);
+ return 0;
+}
long cm_IoctlGetCacheParms(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_cacheParms_t parms;
-
- memset(&parms, 0, sizeof(parms));
+ cm_cacheParms_t parms;
- /* first we get, in 1K units, the cache size */
- parms.parms[0] = buf_nbuffers * (buf_bufferSize / 1024);
-
- /* and then the actual # of buffers in use (not in the free list, I guess,
- * will be what we do).
- */
- parms.parms[1] = (buf_nbuffers - buf_CountFreeList()) * (buf_bufferSize / 1024);
-
- memcpy(ioctlp->outDatap, &parms, sizeof(parms));
- ioctlp->outDatap += sizeof(parms);
+ memset(&parms, 0, sizeof(parms));
- return 0;
+ /* first we get, in 1K units, the cache size */
+ parms.parms[0] = buf_nbuffers * (buf_bufferSize / 1024);
+
+ /* and then the actual # of buffers in use (not in the free list, I guess,
+ * will be what we do).
+ */
+ parms.parms[1] = (buf_nbuffers - buf_CountFreeList()) * (buf_bufferSize / 1024);
+
+ memcpy(ioctlp->outDatap, &parms, sizeof(parms));
+ ioctlp->outDatap += sizeof(parms);
+
+ return 0;
}
long cm_IoctlGetCell(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long whichCell;
+ long whichCell;
long magic = 0;
- cm_cell_t *tcellp;
- cm_serverRef_t *serverRefp;
+ cm_cell_t *tcellp;
+ cm_serverRef_t *serverRefp;
cm_server_t *serverp;
- long i;
+ long i;
char *cp;
char *tp;
char *basep;
- cm_SkipIoctlPath(ioctlp);
+ cm_SkipIoctlPath(ioctlp);
- tp = ioctlp->inDatap;
+ tp = ioctlp->inDatap;
- memcpy((char *)&whichCell, tp, sizeof(long));
- tp += sizeof(long);
-
- /* see if more than one long passed in, ignoring the null pathname (the -1) */
- if (ioctlp->inCopied-1 > sizeof(long)) {
- memcpy((char *)&magic, tp, sizeof(long));
- }
+ memcpy((char *)&whichCell, tp, sizeof(long));
+ tp += sizeof(long);
+
+ /* see if more than one long passed in, ignoring the null pathname (the -1) */
+ if (ioctlp->inCopied-1 > sizeof(long)) {
+ memcpy((char *)&magic, tp, sizeof(long));
+ }
lock_ObtainRead(&cm_cellLock);
- for(tcellp = cm_allCellsp; tcellp; tcellp = tcellp->nextp) {
- if (whichCell == 0) break;
- whichCell--;
- }
- lock_ReleaseRead(&cm_cellLock);
- if (tcellp) {
- int max = 8;
-
- cp = ioctlp->outDatap;
-
- if (magic == 0x12345678) {
- memcpy(cp, (char *)&magic, sizeof(long));
- max = 13;
- }
- memset(cp, 0, max * sizeof(long));
+ for (tcellp = cm_allCellsp; tcellp; tcellp = tcellp->nextp) {
+ if (whichCell == 0) break;
+ whichCell--;
+ }
+ lock_ReleaseRead(&cm_cellLock);
+ if (tcellp) {
+ int max = 8;
+
+ cp = ioctlp->outDatap;
+
+ if (magic == 0x12345678) {
+ memcpy(cp, (char *)&magic, sizeof(long));
+ max = 13;
+ }
+ memset(cp, 0, max * sizeof(long));
basep = cp;
- lock_ObtainRead(&cm_serverLock); /* for going down server list */
+ lock_ObtainRead(&cm_serverLock); /* for going down server list */
/* jaltman - do the reference counts to serverRefp contents need to be increased? */
- serverRefp = tcellp->vlServersp;
- for(i=0; i<max; i++) {
- if (!serverRefp) break;
- serverp = serverRefp->server;
- memcpy(cp, &serverp->addr.sin_addr.s_addr, sizeof(long));
- cp += sizeof(long);
+ serverRefp = tcellp->vlServersp;
+ for (i=0; i<max; i++) {
+ if (!serverRefp) break;
+ serverp = serverRefp->server;
+ memcpy(cp, &serverp->addr.sin_addr.s_addr, sizeof(long));
+ cp += sizeof(long);
serverRefp = serverRefp->next;
- }
- lock_ReleaseRead(&cm_serverLock);
- cp = basep + max * sizeof(afs_int32);
- strcpy(cp, tcellp->namep);
- cp += strlen(tcellp->namep)+1;
- ioctlp->outDatap = cp;
- }
+ }
+ lock_ReleaseRead(&cm_serverLock);
+ cp = basep + max * sizeof(afs_int32);
+ strcpy(cp, tcellp->namep);
+ cp += strlen(tcellp->namep)+1;
+ ioctlp->outDatap = cp;
+ }
if (tcellp)
return 0;
* are already loaded.
* cell list will be cm_CellLock and cm_ServerLock will be held for write.
- */
+ */
cm_cell_t *cp;
cm_SkipIoctlPath(ioctlp);
lock_ObtainWrite(&cm_cellLock);
- for(cp = cm_allCellsp; cp; cp=cp->nextp)
+ for (cp = cm_allCellsp; cp; cp=cp->nextp)
{
long code;
/* delete all previous server lists - cm_FreeServerList will ask for write on cm_ServerLock*/
cp->vlServersp = NULL;
code = cm_SearchCellFile(cp->namep, cp->namep, cm_AddCellProc, cp);
#ifdef AFS_AFSDB_ENV
- if (code) {
+ if (code) {
if (cm_dnsEnabled) {
int ttl;
code = cm_SearchCellByDNS(cp->namep, cp->namep, &ttl, cm_AddCellProc, cp);
long cm_IoctlGetWsCell(smb_ioctl_t *ioctlp, cm_user_t *userp)
{
- /* if we don't know our default cell, return failure */
- if (cm_rootCellp == NULL) {
- return CM_ERROR_NOSUCHCELL;
- }
-
- /* return the default cellname to the caller */
- strcpy(ioctlp->outDatap, cm_rootCellp->namep);
- ioctlp->outDatap += strlen(ioctlp->outDatap) +1;
-
- /* done: success */
- return 0;
+ /* if we don't know our default cell, return failure */
+ if (cm_rootCellp == NULL) {
+ return CM_ERROR_NOSUCHCELL;
+ }
+
+ /* return the default cellname to the caller */
+ strcpy(ioctlp->outDatap, cm_rootCellp->namep);
+ ioctlp->outDatap += strlen(ioctlp->outDatap) +1;
+
+ /* done: success */
+ return 0;
}
long cm_IoctlSysName(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long setSysName, foundname = 0;
+ long setSysName, foundname = 0;
char *cp, *cp2, inname[MAXSYSNAME], outname[MAXSYSNAME];
int t, count, num = 0;
char **sysnamelist[MAXSYSNAME];
- cm_SkipIoctlPath(ioctlp);
+ cm_SkipIoctlPath(ioctlp);
memcpy(&setSysName, ioctlp->inDatap, sizeof(long));
ioctlp->inDatap += sizeof(long);
strcpy(outname, cm_sysName);
foundname = cm_sysNameCount;
*sysnamelist = cm_sysNameList;
- } else { /* Local guy; only root can change sysname */
+ } else {
+ /* Local guy; only root can change sysname */
/* clear @sys entries from the dnlc, once afs_lookup can
* do lookups of @sys entries and thinks it can trust them */
/* privs ok, store the entry, ... */
cp = ioctlp->inDatap;
for (count = 1; count < setSysName; ++count) {
if (!cm_sysNameList[count])
- osi_panic
- ("cm_IoctlSysName: no cm_sysNameList entry to write\n"
- , __FILE__, __LINE__);
+ osi_panic("cm_IoctlSysName: no cm_sysNameList entry to write\n",
+ __FILE__, __LINE__);
t = strlen(cp);
memcpy(cm_sysNameList[count], cp, t + 1); /* include null */
cp += t + 1;
}
if (!setSysName) {
- /* return the sysname to the caller */
- cp = ioctlp->outDatap;
+ /* return the sysname to the caller */
+ cp = ioctlp->outDatap;
memcpy(cp, (char *)&foundname, sizeof(afs_int32));
cp += sizeof(afs_int32); /* skip found flag */
if (foundname) {
cp += strlen(outname) + 1; /* skip name and terminating null char */
for ( count=1; count < foundname ; ++count) { /* ... or list */
if ( !(*sysnamelist)[count] )
- osi_panic("cm_IoctlSysName: no cm_sysNameList entry to read\n"
- , __FILE__, __LINE__);
+ osi_panic("cm_IoctlSysName: no cm_sysNameList entry to read\n",
+ __FILE__, __LINE__);
t = strlen((*sysnamelist)[count]);
if (t >= MAXSYSNAME)
- osi_panic("cm_IoctlSysName: sysname entry garbled\n"
- , __FILE__, __LINE__);
+ osi_panic("cm_IoctlSysName: sysname entry garbled\n",
+ __FILE__, __LINE__);
strcpy(cp, (*sysnamelist)[count]);
cp += t + 1;
}
ioctlp->outDatap = cp;
}
- /* done: success */
+ /* done: success */
return 0;
}
long cm_IoctlGetCellStatus(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long temp;
- cm_cell_t *cellp;
+ long temp;
+ cm_cell_t *cellp;
- cm_SkipIoctlPath(ioctlp);
+ cm_SkipIoctlPath(ioctlp);
- cellp = cm_GetCell(ioctlp->inDatap, 0);
- if (!cellp) return CM_ERROR_NOSUCHCELL;
+ cellp = cm_GetCell(ioctlp->inDatap, 0);
+ if (!cellp)
+ return CM_ERROR_NOSUCHCELL;
- temp = 0;
- lock_ObtainMutex(&cellp->mx);
- if (cellp->flags & CM_CELLFLAG_SUID)
- temp |= CM_SETCELLFLAG_SUID;
- lock_ReleaseMutex(&cellp->mx);
+ temp = 0;
+ lock_ObtainMutex(&cellp->mx);
+ if (cellp->flags & CM_CELLFLAG_SUID)
+ temp |= CM_SETCELLFLAG_SUID;
+ lock_ReleaseMutex(&cellp->mx);
- /* now copy out parm */
- memcpy(ioctlp->outDatap, &temp, sizeof(long));
- ioctlp->outDatap += sizeof(long);
+ /* now copy out parm */
+ memcpy(ioctlp->outDatap, &temp, sizeof(long));
+ ioctlp->outDatap += sizeof(long);
- return 0;
+ return 0;
}
long cm_IoctlSetCellStatus(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- long temp;
- cm_cell_t *cellp;
+ long temp;
+ cm_cell_t *cellp;
- cm_SkipIoctlPath(ioctlp);
+ cm_SkipIoctlPath(ioctlp);
- cellp = cm_GetCell(ioctlp->inDatap + 2*sizeof(long), 0);
- if (!cellp) return CM_ERROR_NOSUCHCELL;
+ cellp = cm_GetCell(ioctlp->inDatap + 2*sizeof(long), 0);
+ if (!cellp)
+ return CM_ERROR_NOSUCHCELL;
- memcpy((char *)&temp, ioctlp->inDatap, sizeof(long));
+ memcpy((char *)&temp, ioctlp->inDatap, sizeof(long));
- lock_ObtainMutex(&cellp->mx);
- if (temp & CM_SETCELLFLAG_SUID)
- cellp->flags |= CM_CELLFLAG_SUID;
- else
- cellp->flags &= ~CM_CELLFLAG_SUID;
- lock_ReleaseMutex(&cellp->mx);
+ lock_ObtainMutex(&cellp->mx);
+ if (temp & CM_SETCELLFLAG_SUID)
+ cellp->flags |= CM_CELLFLAG_SUID;
+ else
+ cellp->flags &= ~CM_CELLFLAG_SUID;
+ lock_ReleaseMutex(&cellp->mx);
- return 0;
+ return 0;
}
long cm_IoctlSetSPrefs(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_SSetPref_t *spin; /* input */
- cm_SPref_t *srvin; /* one input component */
- cm_server_t *tsp;
- int i, vlonly, noServers, type;
- struct sockaddr_in tmp;
- unsigned short rank;
-
- cm_SkipIoctlPath(ioctlp); /* we don't care about the path */
-
- spin = (cm_SSetPref_t *)ioctlp->inDatap;
- noServers = spin->num_servers;
- vlonly = spin->flags;
- if ( vlonly )
- type = CM_SERVER_VLDB;
- else
+ cm_SSetPref_t *spin; /* input */
+ cm_SPref_t *srvin; /* one input component */
+ cm_server_t *tsp;
+ int i, vlonly, noServers, type;
+ struct sockaddr_in tmp;
+ unsigned short rank;
+
+ cm_SkipIoctlPath(ioctlp); /* we don't care about the path */
+
+ spin = (cm_SSetPref_t *)ioctlp->inDatap;
+ noServers = spin->num_servers;
+ vlonly = spin->flags;
+ if ( vlonly )
+ type = CM_SERVER_VLDB;
+ else
type = CM_SERVER_FILE;
- for ( i=0; i < noServers; i++)
- {
- srvin = &(spin->servers[i]);
- rank = srvin->rank + (rand() & 0x000f);
- tmp.sin_addr = srvin->host;
- tmp.sin_family = AF_INET;
-
- tsp = cm_FindServer(&tmp, type);
- if ( tsp ) /* an existing server - ref count increased */
- {
- tsp->ipRank = rank; /* no need to protect by mutex*/
-
- if ( type == CM_SERVER_FILE) /* fileserver */
- {
- /* find volumes which might have RO copy
- /* on server and change the ordering of
- ** their RO list */
- cm_ChangeRankVolume(tsp);
- }
- else
- {
- /* set preferences for an existing vlserver */
- cm_ChangeRankCellVLServer(tsp);
- }
+ for ( i=0; i < noServers; i++)
+ {
+ srvin = &(spin->servers[i]);
+ rank = srvin->rank + (rand() & 0x000f);
+ tmp.sin_addr = srvin->host;
+ tmp.sin_family = AF_INET;
+
+ tsp = cm_FindServer(&tmp, type);
+ if ( tsp ) /* an existing server - ref count increased */
+ {
+ tsp->ipRank = rank; /* no need to protect by mutex*/
+
+ if ( type == CM_SERVER_FILE) /* fileserver */
+ {
+ /* find volumes which might have RO copy
+ /* on server and change the ordering of
+ ** their RO list */
+ cm_ChangeRankVolume(tsp);
+ }
+ else
+ {
+ /* set preferences for an existing vlserver */
+ cm_ChangeRankCellVLServer(tsp);
+ }
cm_PutServer(tsp); /* decrease refcount */
- }
- else /* add a new server without a cell */
- {
- tsp = cm_NewServer(&tmp, type, NULL); /* refcount = 1 */
- tsp->ipRank = rank;
- }
- }
- return 0;
+ }
+ else /* add a new server without a cell */
+ {
+ tsp = cm_NewServer(&tmp, type, NULL); /* refcount = 1 */
+ tsp->ipRank = rank;
+ }
+ }
+ return 0;
}
long cm_IoctlGetSPrefs(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- cm_SPrefRequest_t *spin; /* input */
- cm_SPrefInfo_t *spout; /* output */
- cm_SPref_t *srvout; /* one output component */
- cm_server_t *tsp;
- int i, vlonly, noServers;
-
- cm_SkipIoctlPath(ioctlp); /* we don't care about the path */
-
- spin = (cm_SPrefRequest_t *)ioctlp->inDatap;
- spout = (cm_SPrefInfo_t *) ioctlp->outDatap;
- srvout = spout->servers;
- noServers = spin->num_servers;
- vlonly = spin->flags & CM_SPREF_VLONLY;
- spout->num_servers = 0;
-
- lock_ObtainRead(&cm_serverLock); /* get server lock */
-
- for(tsp=cm_allServersp, i=0; tsp && noServers; tsp=tsp->allNextp,i++){
- if (spin->offset > i) {
- continue; /* catch up to where we left off */
- }
-
- if ( vlonly && (tsp->type == CM_SERVER_FILE) )
- continue; /* ignore fileserver for -vlserver option*/
- if ( !vlonly && (tsp->type == CM_SERVER_VLDB) )
- continue; /* ignore vlservers */
-
- srvout->host = tsp->addr.sin_addr;
- srvout->rank = tsp->ipRank;
- srvout++;
- spout->num_servers++;
- noServers--;
- }
- lock_ReleaseRead(&cm_serverLock); /* release server lock */
-
- if ( tsp ) /* we ran out of space in the output buffer */
- spout->next_offset = i;
- else
- spout->next_offset = 0;
- ioctlp->outDatap += sizeof(cm_SPrefInfo_t) +
- (spout->num_servers -1 ) * sizeof(cm_SPref_t) ;
- return 0;
+ cm_SPrefRequest_t *spin; /* input */
+ cm_SPrefInfo_t *spout; /* output */
+ cm_SPref_t *srvout; /* one output component */
+ cm_server_t *tsp;
+ int i, vlonly, noServers;
+
+ cm_SkipIoctlPath(ioctlp); /* we don't care about the path */
+
+ spin = (cm_SPrefRequest_t *)ioctlp->inDatap;
+ spout = (cm_SPrefInfo_t *) ioctlp->outDatap;
+ srvout = spout->servers;
+ noServers = spin->num_servers;
+ vlonly = spin->flags & CM_SPREF_VLONLY;
+ spout->num_servers = 0;
+
+ lock_ObtainRead(&cm_serverLock); /* get server lock */
+
+ for (tsp=cm_allServersp, i=0; tsp && noServers; tsp=tsp->allNextp,i++){
+ if (spin->offset > i) {
+ continue; /* catch up to where we left off */
+ }
+
+ if ( vlonly && (tsp->type == CM_SERVER_FILE) )
+ continue; /* ignore fileserver for -vlserver option*/
+ if ( !vlonly && (tsp->type == CM_SERVER_VLDB) )
+ continue; /* ignore vlservers */
+
+ srvout->host = tsp->addr.sin_addr;
+ srvout->rank = tsp->ipRank;
+ srvout++;
+ spout->num_servers++;
+ noServers--;
+ }
+ lock_ReleaseRead(&cm_serverLock); /* release server lock */
+
+ if ( tsp ) /* we ran out of space in the output buffer */
+ spout->next_offset = i;
+ else
+ spout->next_offset = 0;
+ ioctlp->outDatap += sizeof(cm_SPrefInfo_t) +
+ (spout->num_servers -1 ) * sizeof(cm_SPref_t) ;
+ return 0;
}
long cm_IoctlStoreBehind(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- /* we ignore default asynchrony since we only have one way
- * of doing this today.
- */
- return 0;
-}
+ /* we ignore default asynchrony since we only have one way
+ * of doing this today.
+ */
+ return 0;
+}
long cm_IoctlCreateMountPoint(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- char leaf[256];
+ char leaf[256];
long code;
cm_scache_t *dscp;
cm_attr_t tattr;
char *cp;
- cm_req_t req;
+ cm_req_t req;
char mpInfo[256];
char fullCell[256];
- char volume[256];
- char cell[256];
- int ttl;
+ char volume[256];
+ char cell[256];
+ int ttl;
- cm_InitReq(&req);
+ cm_InitReq(&req);
code = cm_ParseIoctlParent(ioctlp, userp, &req, &dscp, leaf);
if (code) return code;
* work on UNIX clients.
*/
- /* Extract the possibly partial cell name */
- strcpy(cell, ioctlp->inDatap + 1); /* Skip the mp type character */
+ /* Extract the possibly partial cell name */
+ strcpy(cell, ioctlp->inDatap + 1); /* Skip the mp type character */
if (cp = strchr(cell, ':')) {
- /* Extract the volume name */
+ /* Extract the volume name */
*cp = 0;
- strcpy(volume, cp + 1);
+ strcpy(volume, cp + 1);
/* Get the full name for this cell */
code = cm_SearchCellFile(cell, fullCell, 0, 0);
#ifdef AFS_AFSDB_ENV
- if (code && cm_dnsEnabled)
+ if (code && cm_dnsEnabled)
code = cm_SearchCellByDNS(cell, fullCell, &ttl, 0, 0);
#endif
if (code)
- return CM_ERROR_NOSUCHCELL;
+ return CM_ERROR_NOSUCHCELL;
sprintf(mpInfo, "%c%s:%s", *ioctlp->inDatap, fullCell, volume);
- } else {
+ } else {
/* No cell name specified */
strcpy(mpInfo, ioctlp->inDatap);
}
#ifdef AFS_FREELANCE_CLIENT
- if (cm_freelanceEnabled && dscp == cm_rootSCachep) {
- /* we are adding the mount point to the root dir., so call
- the freelance code to do the add. */
- osi_Log0(afsd_logp,"IoctlCreateMountPoint within Freelance root dir");
- code = cm_FreelanceAddMount(leaf, fullCell, volume,
- *ioctlp->inDatap == '%', NULL);
- return code;
- }
+ if (cm_freelanceEnabled && dscp == cm_rootSCachep) {
+ /* we are adding the mount point to the root dir., so call
+ * the freelance code to do the add. */
+ osi_Log0(afsd_logp,"IoctlCreateMountPoint within Freelance root dir");
+ code = cm_FreelanceAddMount(leaf, fullCell, volume,
+ *ioctlp->inDatap == '%', NULL);
+ return code;
+ }
#endif
- /* create the symlink with mode 644. The lack of X bits tells
+ /* create the symlink with mode 644. The lack of X bits tells
* us that it is a mount point.
*/
- tattr.mask = CM_ATTRMASK_UNIXMODEBITS | CM_ATTRMASK_CLIENTMODTIME;
+ tattr.mask = CM_ATTRMASK_UNIXMODEBITS | CM_ATTRMASK_CLIENTMODTIME;
tattr.unixModeBits = 0644;
- tattr.clientModTime = time(NULL);
+ tattr.clientModTime = time(NULL);
code = cm_SymLink(dscp, leaf, mpInfo, 0, &tattr, userp, &req);
- if (code == 0 && (dscp->flags & CM_SCACHEFLAG_ANYWATCH))
- smb_NotifyChange(FILE_ACTION_ADDED,
+ if (code == 0 && (dscp->flags & CM_SCACHEFLAG_ANYWATCH))
+ smb_NotifyChange(FILE_ACTION_ADDED,
FILE_NOTIFY_CHANGE_DIR_NAME,
dscp, leaf, NULL, TRUE);
long cm_IoctlSymlink(struct smb_ioctl *ioctlp, struct cm_user *userp)
{
- char leaf[256];
- long code;
- cm_scache_t *dscp;
- cm_attr_t tattr;
- char *cp;
- cm_req_t req;
+ char leaf[256];
+ long code;
+ cm_scache_t *dscp;
+ cm_attr_t tattr;
+ char *cp;
+ cm_req_t req;
- cm_InitReq(&req);
+ cm_InitReq(&req);
- code = cm_ParseIoctlParent(ioctlp, userp, &req, &dscp, leaf);
- if (code) return code;
+ code = cm_ParseIoctlParent(ioctlp, userp, &req, &dscp, leaf);
+ if (code) return code;
- /* Translate chars for the link name */
- TranslateExtendedChars(leaf);
+ /* Translate chars for the link name */
+ TranslateExtendedChars(leaf);
- /* Translate chars for the linked to name */
- TranslateExtendedChars(ioctlp->inDatap);
+ /* Translate chars for the linked to name */
+ TranslateExtendedChars(ioctlp->inDatap);
- cp = ioctlp->inDatap; /* contents of link */
+ cp = ioctlp->inDatap; /* contents of link */
- /* Create symlink with mode 0755. */
- tattr.mask = CM_ATTRMASK_UNIXMODEBITS;
- tattr.unixModeBits = 0755;
+ /* Create symlink with mode 0755. */
+ tattr.mask = CM_ATTRMASK_UNIXMODEBITS;
+ tattr.unixModeBits = 0755;
- code = cm_SymLink(dscp, leaf, cp, 0, &tattr, userp, &req);
- if (code == 0 && (dscp->flags & CM_SCACHEFLAG_ANYWATCH))
- smb_NotifyChange(FILE_ACTION_ADDED,
- FILE_NOTIFY_CHANGE_FILE_NAME
- | FILE_NOTIFY_CHANGE_DIR_NAME,
- &