diff --git a/ClusterInterface/ClusterInterface.csproj b/ClusterInterface/ClusterInterface.csproj
index e82b177..3db6fff 100644
--- a/ClusterInterface/ClusterInterface.csproj
+++ b/ClusterInterface/ClusterInterface.csproj
@@ -1,6 +1,7 @@

-
+
+
Debug
@@ -9,10 +10,11 @@
Library
Properties
Microsoft.Research.Dryad.ClusterInterface
- DryadHttpClusterInterface
+ Microsoft.Research.Dryad.HttpClusterInterface
v4.5
512
+ 41254407
true
@@ -33,99 +35,12 @@
MinimumRecommendedRules.ruleset
-
- False
- ..\packages\Microsoft.Data.Edm.5.6.1\lib\net40\Microsoft.Data.Edm.dll
-
-
- False
- ..\packages\Microsoft.Data.OData.5.6.1\lib\net40\Microsoft.Data.OData.dll
-
-
- False
- ..\packages\Microsoft.Data.Services.Client.5.6.1\lib\net40\Microsoft.Data.Services.Client.dll
-
-
- False
- ..\packages\Microsoft.Hadoop.Client.1.1.1.8\lib\net40\Microsoft.Hadoop.Client.dll
-
-
- False
- ..\packages\Microsoft.Research.Peloponnese.0.7.5-beta\lib\net45\Microsoft.Research.Peloponnese.HadoopBridge.dll
-
-
- False
- ..\packages\Microsoft.Research.Peloponnese.0.7.5-beta\lib\net45\Microsoft.Research.Peloponnese.Utils.dll
-
-
- False
- ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.dll
-
-
- False
- ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.dll
-
-
- False
- ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Common.1.1.1\lib\net45\Microsoft.WindowsAzure.Common.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Common.1.1.1\lib\net45\Microsoft.WindowsAzure.Common.NetFramework.dll
-
-
- ..\packages\Microsoft.WindowsAzure.ConfigurationManager.2.0.3\lib\net40\Microsoft.WindowsAzure.Configuration.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Management.1.2.0\lib\net40\Microsoft.WindowsAzure.Management.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Management.HDInsight.1.1.1.8\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.dll
-
-
- False
- ..\packages\Microsoft.Hadoop.Client.1.1.1.8\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.dll
-
-
- False
- ..\packages\Microsoft.Hadoop.Client.1.1.1.8\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.Core.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Management.Storage.1.1.1\lib\net40\Microsoft.WindowsAzure.Management.Storage.dll
-
-
- False
- ..\packages\WindowsAzure.Storage.4.0.1\lib\net40\Microsoft.WindowsAzure.Storage.dll
-
-
- False
- ..\packages\Newtonsoft.Json.6.0.3\lib\net45\Newtonsoft.Json.dll
-
-
- False
- ..\packages\Microsoft.Net.Http.2.2.22\lib\net45\System.Net.Http.Extensions.dll
-
-
- False
- ..\packages\Microsoft.Net.Http.2.2.22\lib\net45\System.Net.Http.Primitives.dll
-
-
- False
- ..\packages\System.Spatial.5.6.1\lib\net40\System.Spatial.dll
-
@@ -134,29 +49,34 @@
+
+ Properties\SharedAssemblyInfo.cs
+
-
-
+
+ Designer
+
+
+ Designer
+
-
-
-
-
-
This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
+
+
+
+
-
+
+
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/VertexHostMain/app.config b/DryadVertex/VertexHost/vertex/VertexHostMain/app.config
new file mode 100644
index 0000000..69fd58e
--- /dev/null
+++ b/DryadVertex/VertexHost/vertex/VertexHostMain/app.config
@@ -0,0 +1,48 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/DryadVertex/VertexHost/vertex/VertexHostMain/packages.config b/DryadVertex/VertexHost/vertex/VertexHostMain/packages.config
new file mode 100644
index 0000000..08a02ca
--- /dev/null
+++ b/DryadVertex/VertexHost/vertex/VertexHostMain/packages.config
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/WrapperNativeInfo/WrapperNativeInfo.vcxproj b/DryadVertex/VertexHost/vertex/WrapperNativeInfo/WrapperNativeInfo.vcxproj
index eb0aad7..092a7e9 100644
--- a/DryadVertex/VertexHost/vertex/WrapperNativeInfo/WrapperNativeInfo.vcxproj
+++ b/DryadVertex/VertexHost/vertex/WrapperNativeInfo/WrapperNativeInfo.vcxproj
@@ -1,5 +1,5 @@
-
-
+
+
Debug
@@ -21,13 +21,13 @@
StaticLibrary
Unicode
true
- v110
+ v120
StaticLibrary
Unicode
true
- v110
+ v120
@@ -100,4 +100,4 @@
-
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/DryadLINQNativeChannels.def b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/DryadLINQNativeChannels.def
index 7839c61..f77fac6 100644
--- a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/DryadLINQNativeChannels.def
+++ b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/DryadLINQNativeChannels.def
@@ -1,4 +1,4 @@
-LIBRARY "DryadLINQNativeChannels.dll"
+LIBRARY "Microsoft.Research.Dryad.DryadLinq.NativeWrapper.dll"
EXPORTS
GetTotalLength
diff --git a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj
index 90232e4..787337b 100644
--- a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj
+++ b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj
@@ -1,5 +1,5 @@
-
-
+
+
Debug
@@ -19,14 +19,14 @@
DynamicLibrary
Unicode
- v110
+ v120
true
DynamicLibrary
Unicode
true
- v110
+ v120
@@ -52,8 +52,8 @@
AllRules.ruleset
- DryadLINQNativeChannels
- DryadLINQNativeChannels
+ Microsoft.Research.Dryad.DryadLinq.NativeWrapper
+ Microsoft.Research.Dryad.DryadLinq.NativeWrapper
@@ -111,6 +111,7 @@
+
@@ -119,4 +120,4 @@
-
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj.filters b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj.filters
index a49afcc..f5ce144 100644
--- a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj.filters
+++ b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/WrapperNativeInfoDll.vcxproj.filters
@@ -23,6 +23,9 @@
Source Files
+
+ Source Files
+
diff --git a/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/version.cpp b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/version.cpp
new file mode 100644
index 0000000..16fd22c
--- /dev/null
+++ b/DryadVertex/VertexHost/vertex/WrapperNativeInfoDll/version.cpp
@@ -0,0 +1,40 @@
+/*
+Copyright (c) Microsoft Corporation
+
+All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
+compliance with the License. You may obtain a copy of the License
+at http://www.apache.org/licenses/LICENSE-2.0
+
+
+THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
+EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF
+TITLE, FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
+
+
+See the Apache Version 2.0 License for specific language governing permissions and
+limitations under the License.
+
+*/
+
+#ifdef _MANAGED
+#pragma managed
+
+using namespace System::Reflection;
+using namespace System::Runtime::InteropServices;
+
+[assembly: AssemblyTitle("Microsoft.Research.Dryad.DryadLinq.NativeWrapper")];
+[assembly: AssemblyDescription("")];
+[assembly: AssemblyConfiguration("")];
+[assembly: AssemblyCompany("Microsoft Corporation")];
+[assembly: AssemblyProduct("Microsoft.Research.Dryad.DryadLinq.NativeWrapper")];
+[assembly: AssemblyCopyright("Copyright © Microsoft Corporation. All rights reserved.")];
+[assembly: AssemblyTrademark("")];
+[assembly: AssemblyCulture("")];
+
+[assembly: ComVisible(false)];
+
+[assembly: AssemblyVersion("1.0.0.0")];
+[assembly: AssemblyFileVersion("1.0.0.0")];
+#endif
diff --git a/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.cpp b/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.cpp
index 8ff2d94..675edb8 100644
--- a/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.cpp
+++ b/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.cpp
@@ -197,6 +197,14 @@ void ManagedWrapperVertex::Main(WorkQueue* workQueue,
logName = System::IO::Path::Combine(logDirectory, logName);
}
+ int threadsPerWorker = 1;
+ System::String^ threadsPerWorkerStr = System::Environment::GetEnvironmentVariable("DRYAD_THREADS_PER_WORKER");
+ if (threadsPerWorkerStr != nullptr)
+ {
+ threadsPerWorker = Int32::Parse(threadsPerWorkerStr);
+ }
+ DrLogI("ManagedWrapperVertex: threadsPerWorker %u", threadsPerWorker);
+
DrLogI("ManagedWrapperVertex: %p %u %u", nativeInfo, numberOfInputChannels, numberOfOutputChannels);
DrLogI("ManagedWrapperVertex: Calling %s.%s", GetArgument(2), GetArgument(3));
DrLogging::FlushLog();
@@ -215,9 +223,9 @@ void ManagedWrapperVertex::Main(WorkQueue* workQueue,
// The format of vertexBridgeArgs is simply a comma separated string packing vertex assembly, class, method name, and the *actual* vertex method args (==the native channel string)
// L",,,"
//
- System::String^ classFullName = gcnew System::String(GetArgument(2));
- System::String^ assemblyName = classFullName->Substring(0, classFullName->LastIndexOf('.'));
- System::String ^bridgeAssemblyPartialName = gcnew System::String(assemblyName);
+ System::String^ classFullName = gcnew System::String(GetArgument(2));
+ System::String^ assemblyName = classFullName->Substring(0, classFullName->LastIndexOf('.'));
+ System::String ^bridgeAssemblyName = gcnew System::String(assemblyName);
System::String ^bridgeClassName = gcnew System::String(assemblyName + ".Internal.VertexEnv");
System::String ^bridgeMethodName = gcnew System::String(L"VertexBridge");
@@ -240,7 +248,7 @@ void ManagedWrapperVertex::Main(WorkQueue* workQueue,
// ",,,"
//
System::Text::StringBuilder ^vertexBridgeArg = gcnew System::Text::StringBuilder();
- vertexBridgeArg->Append(gcnew System::String(GetArgument(1))); // path to vertex DLL as passed to the vertex host, e.g. L"c:\\HpcTemp\\user\\jobID\\Microsoft.Hpc.Linq0.dll";
+ vertexBridgeArg->Append(gcnew System::String(GetArgument(1))); // path to vertex DLL as passed to the vertex host, e.g. L"\\HpcTemp\\user\\jobID\\Microsoft.Research.DryadLinq0.dll";
vertexBridgeArg->Append(",");
vertexBridgeArg->Append(gcnew System::String(GetArgument(2))); // full name of class that contains vertex entry method, e.g. L"Microsoft.Research.DryadLinq.DryadLinq__Vertex";
vertexBridgeArg->Append(",");
@@ -248,7 +256,6 @@ void ManagedWrapperVertex::Main(WorkQueue* workQueue,
vertexBridgeArg->Append(",");
vertexBridgeArg->Append(vertexMethodArgs->ToString());
-
DrLogI("ManagedWrapperVertex: Calling into Vertex Bridge to invoke Vertex Entry: %s", GetArgument(3));
DrLogging::FlushLog();
@@ -259,12 +266,22 @@ void ManagedWrapperVertex::Main(WorkQueue* workQueue,
//
try
{
- System::Console::WriteLine("Assembly name " + bridgeAssemblyPartialName);
- System::Reflection::Assembly ^vertexBridgeAsm = System::Reflection::Assembly::LoadWithPartialName(bridgeAssemblyPartialName);
- System::Type ^vertexBridgeType = vertexBridgeAsm->GetType(gcnew System::String(bridgeClassName));
- System::Reflection::MethodInfo ^vertexBridgeMethod = vertexBridgeType->GetMethod(gcnew System::String(bridgeMethodName),
- static_cast(System::Reflection::BindingFlags::NonPublic |
- System::Reflection::BindingFlags::Static));
+ System::Reflection::Assembly ^vertexBridgeAsm;
+ try
+ {
+ vertexBridgeAsm = System::Reflection::Assembly::Load(bridgeAssemblyName);
+ }
+ catch (System::Exception ^ie)
+ {
+ DrLogI("ManagedWrapperVertex: Failed to load assembly %s: %s", bridgeAssemblyName, ie->ToString());
+ System::String^ asmLoc = System::IO::Path::Combine("..", bridgeAssemblyName + ".dll");
+ vertexBridgeAsm = System::Reflection::Assembly::LoadFrom(asmLoc);
+ }
+ System::Type ^vertexBridgeType = vertexBridgeAsm->GetType(bridgeClassName);
+ System::Reflection::MethodInfo ^vertexBridgeMethod
+ = vertexBridgeType->GetMethod(bridgeMethodName,
+ static_cast(System::Reflection::BindingFlags::NonPublic |
+ System::Reflection::BindingFlags::Static));
cli::array ^invokeArgs = gcnew array(2);
invokeArgs[0] = logName;
diff --git a/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.vcxproj b/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.vcxproj
index c2c232d..b4ed2af 100644
--- a/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.vcxproj
+++ b/DryadVertex/VertexHost/vertex/managedwrappervertex/ManagedWrapperVertex.vcxproj
@@ -1,5 +1,5 @@
-
-
+
+
Debug
@@ -21,13 +21,13 @@
StaticLibrary
Unicode
true
- v110
+ v120
StaticLibrary
Unicode
true
- v110
+ v120
@@ -99,4 +99,4 @@
-
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj b/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj
index bef34dd..4354712 100644
--- a/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj
+++ b/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj
@@ -1,6 +1,6 @@

-
-
+
+
Debug
@@ -18,19 +18,20 @@
Win32Proj
..\..\..\..\
true
+ VertexHostBody
- Application
+ DynamicLibrary
Unicode
true
- v110
+ v120
- Application
+ DynamicLibrary
Unicode
true
- v110
+ v120
@@ -56,6 +57,13 @@
AllRules.ruleset
+ 16d8f87f
+
+
+ Microsoft.Research.Dryad.$(ProjectName)
+
+
+ Microsoft.Research.Dryad.$(ProjectName)
@@ -112,6 +120,7 @@
+
@@ -124,13 +133,13 @@
-
+
This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
+
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj.filters b/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj.filters
index a2f2e88..ac488a5 100644
--- a/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj.filters
+++ b/DryadVertex/VertexHost/vertex/vertexHost/VertexHost.vcxproj.filters
@@ -2,6 +2,7 @@
+
diff --git a/DryadVertex/VertexHost/vertex/vertexHost/app.config b/DryadVertex/VertexHost/vertex/vertexHost/app.config
index 78537f1..af8b67c 100644
--- a/DryadVertex/VertexHost/vertex/vertexHost/app.config
+++ b/DryadVertex/VertexHost/vertex/vertexHost/app.config
@@ -1,27 +1,43 @@
-
-
-
-
-
+
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
+
+
+
diff --git a/DryadVertex/VertexHost/vertex/vertexHost/packages.config b/DryadVertex/VertexHost/vertex/vertexHost/packages.config
index cb331ea..6757d3e 100644
--- a/DryadVertex/VertexHost/vertex/vertexHost/packages.config
+++ b/DryadVertex/VertexHost/vertex/vertexHost/packages.config
@@ -1,4 +1,4 @@

-
+
\ No newline at end of file
diff --git a/DryadVertex/VertexHost/vertex/vertexHost/version.cpp b/DryadVertex/VertexHost/vertex/vertexHost/version.cpp
new file mode 100644
index 0000000..f848aa6
--- /dev/null
+++ b/DryadVertex/VertexHost/vertex/vertexHost/version.cpp
@@ -0,0 +1,40 @@
+/*
+Copyright (c) Microsoft Corporation
+
+All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
+compliance with the License. You may obtain a copy of the License
+at http://www.apache.org/licenses/LICENSE-2.0
+
+
+THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
+EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF
+TITLE, FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
+
+
+See the Apache Version 2.0 License for specific language governing permissions and
+limitations under the License.
+
+*/
+
+#ifdef _MANAGED
+#pragma managed
+
+using namespace System::Reflection;
+using namespace System::Runtime::InteropServices;
+
+[assembly: AssemblyTitle("Microsoft.Research.Dryad.VertexHost")];
+[assembly: AssemblyDescription("")];
+[assembly: AssemblyConfiguration("")];
+[assembly: AssemblyCompany("Microsoft Corporation")];
+[assembly: AssemblyProduct("Microsoft.Research.Dryad.VertexHost")];
+[assembly: AssemblyCopyright("Copyright © Microsoft Corporation. All rights reserved.")];
+[assembly: AssemblyTrademark("")];
+[assembly: AssemblyCulture("")];
+
+[assembly: ComVisible(false)];
+
+[assembly: AssemblyVersion("1.0.0.0")];
+[assembly: AssemblyFileVersion("1.0.0.0")];
+#endif
diff --git a/DryadVertex/VertexHost/vertex/vertexHost/vertexHost.cpp b/DryadVertex/VertexHost/vertex/vertexHost/vertexHost.cpp
index f41e0dc..653ff25 100644
--- a/DryadVertex/VertexHost/vertex/vertexHost/vertexHost.cpp
+++ b/DryadVertex/VertexHost/vertex/vertexHost/vertexHost.cpp
@@ -174,27 +174,27 @@ void SetLoggingLevel()
GetLoggingFileName(logFileName);
DrLogging::Initialize(logFileName);
- WCHAR traceLevel [MAX_PATH];
- HRESULT hr = DrGetEnvironmentVariable(L"DRYAD_TRACE_LEVEL", traceLevel);
+ WCHAR loggingLevel [MAX_PATH];
+ HRESULT hr = DrGetEnvironmentVariable(L"DRYAD_LOGGING_LEVEL", loggingLevel);
if(hr == DrError_OK)
{
- if(wcscmp(traceLevel, L"OFF") == 0)
+ if(wcscmp(loggingLevel, L"OFF") == 0)
{
DrLogging::SetLoggingLevel(LogLevel_Off);
}
- else if(wcscmp(traceLevel, L"CRITICAL") == 0)
+ else if(wcscmp(loggingLevel, L"CRITICAL") == 0)
{
DrLogging::SetLoggingLevel(LogLevel_Assert);
}
- else if(wcscmp(traceLevel, L"ERROR") == 0)
+ else if(wcscmp(loggingLevel, L"ERROR") == 0)
{
DrLogging::SetLoggingLevel(LogLevel_Error);
}
- else if(wcscmp(traceLevel, L"WARN") == 0)
+ else if(wcscmp(loggingLevel, L"WARN") == 0)
{
DrLogging::SetLoggingLevel(LogLevel_Warning);
}
- else if(wcscmp(traceLevel, L"INFO") == 0)
+ else if(wcscmp(loggingLevel, L"INFO") == 0)
{
DrLogging::SetLoggingLevel(LogLevel_Info);
}
@@ -249,104 +249,119 @@ static void ExceptionHandler(System::Object^ sender, System::UnhandledExceptionE
//
// Start up vertex host
//
-[System::Security::SecurityCriticalAttribute]
-[System::Runtime::ExceptionServices::HandleProcessCorruptedStateExceptionsAttribute]
-#if defined(_AMD64_)
-int wmain(int argc, wchar_t* wargv[])
-#else
-int __cdecl wmain(int argc, wchar_t* wargv[])
-#endif
+public ref class VertexHost
{
- try
+public:
+ [System::Security::SecurityCriticalAttribute]
+ [System::Runtime::ExceptionServices::HandleProcessCorruptedStateExceptionsAttribute]
+ static int Main(array^ managedArgs)
{
- //
- // Enable logging based on environment variable
- //
- SetLoggingLevel();
+ try
+ {
+ int argc = managedArgs->Length;
+ wchar_t** wargv = new wchar_t*[argc+1];
+ for (int i=0; i wch = PtrToStringChars(managedArgs[i]);
+ wargv[i] = _wcsdup(wch);
+ }
+ wargv[argc] = NULL;
- DrInitErrorTable();
- DrInitExitCodeTable();
- DrInitLastAccessTable();
+ //
+ // Enable logging based on environment variable
+ //
+ SetLoggingLevel();
- // Set unhandled exception handler to catch anything thrown from
- // managed code
- System::AppDomain^ currentDomain = System::AppDomain::CurrentDomain;
- currentDomain->UnhandledException += gcnew System::UnhandledExceptionEventHandler(ExceptionHandler);
+ DrInitErrorTable();
+ DrInitExitCodeTable();
+ DrInitLastAccessTable();
- //
- // trace for startup
- //
- DrLogI("Vertex Host starting");
+ // Set unhandled exception handler to catch anything thrown from
+ // managed code
+ System::AppDomain^ currentDomain = System::AppDomain::CurrentDomain;
+ currentDomain->UnhandledException += gcnew System::UnhandledExceptionEventHandler(ExceptionHandler);
- //
- // Get environment variable to know whether to break into debugger
- //
- BreakForDebugger();
+ //
+ // trace for startup
+ //
+ DrLogE("Vertex Host starting");
- //
- // We call Register on the Managed Wrapper vertex factory to force its library to be linked.
- // Registration actually occurs during static initialization.
- //
- s_factoryHWrapper.Register();
+ //
+ // Get environment variable to know whether to break into debugger
+ //
+ BreakForDebugger();
- //
- // Get command line arguments
- //
- char** argv;
- DrGetUtf8CommandArgs(argc, wargv, &argv);
+ //
+ // We call Register on the Managed Wrapper vertex factory to force its library to be linked.
+ // Registration actually occurs during static initialization.
+ //
+ s_factoryHWrapper.Register();
- //
- // Initialize the dryad communication layer with the command line arguments
- //
- int nOpts;
- DrError e;
- e = DryadInitialize(argc, argv, &nOpts);
- if (e != DrError_OK)
- {
//
- // Report error in initializing cluster layer
+ // Get command line arguments
//
- DrLogE("Couldn't initialise Cluster");
- return 1;
- }
+ char** argv;
+ DrGetUtf8CommandArgs(argc, wargv, &argv);
- //
- // Update the argument list to just those parameters that weren't used by cluster init
- //
- EliminateArguments(&argc, argv, 1, nOpts);
+ for (int i=0; iLength; ++i)
+ {
+ free(wargv[i]);
+ }
+ delete [] wargv;
- //
- // Call main function to continue execution of vertex
- //
- int exitCode = DryadVertexMain(argc, argv, NULL);
+ //
+ // Initialize the dryad communication layer with the command line arguments
+ //
+ int nOpts;
+ DrError e;
+ e = DryadInitialize(argc, argv, &nOpts);
+ if (e != DrError_OK)
+ {
+ //
+ // Report error in initializing cluster layer
+ //
+ DrLogE("Couldn't initialise Cluster");
+ return 1;
+ }
- //
- // Close the cluster connection after dryadvertexmain returns
- //
- e = DryadShutdown();
- if (e == DrError_OK)
- {
//
- // Report success
+ // Update the argument list to just those parameters that weren't used by cluster init
//
- DrLogI("Completed uninitialise cluster");
- }
- else
- {
+ EliminateArguments(&argc, argv, 1, nOpts);
+
//
- // Report failure
+ // Call main function to continue execution of vertex
//
- DrLogE("Couldn't uninitialise cluster");
- }
+ int exitCode = DryadVertexMain(argc, argv, NULL);
- return exitCode;
- }
- catch (System::Exception^ e)
- {
- DrLogA("Unhandled exception: %s", DrString(e->ToString()).GetChars());
- return 1;
+ //
+ // Close the cluster connection after dryadvertexmain returns
+ //
+ e = DryadShutdown();
+ if (e == DrError_OK)
+ {
+ //
+ // Report success
+ //
+ DrLogI("Completed uninitialise cluster");
+ }
+ else
+ {
+ //
+ // Report failure
+ //
+ DrLogE("Couldn't uninitialise cluster");
+ }
+
+ return exitCode;
+ }
+ catch (System::Exception^ e)
+ {
+ DrLogA("Unhandled exception: %s", DrString(e->ToString()).GetChars());
+ return 1;
+ }
}
-}
+};
//
// Simple data class which contains the byte array and its length.
diff --git a/GraphManager/GraphManager.vcxproj b/GraphManager/GraphManager.vcxproj
index 5304f03..c63ae5c 100644
--- a/GraphManager/GraphManager.vcxproj
+++ b/GraphManager/GraphManager.vcxproj
@@ -1,6 +1,9 @@

-
-
+
+
+
+
+
Debug
@@ -25,13 +28,13 @@
DynamicLibrary
Pure
Unicode
- v110
+ v120
DynamicLibrary
Unicode
Pure
- v110
+ v120
@@ -59,6 +62,7 @@
Microsoft.Research.Dryad
Microsoft.Research.Dryad
+ e2c2ee05
@@ -223,13 +227,22 @@
-
+
+
+
+
This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/GraphManager/filesystem/DrAzureBlobClient.h b/GraphManager/filesystem/DrAzureBlobClient.h
index 6f69342..91070b1 100644
--- a/GraphManager/filesystem/DrAzureBlobClient.h
+++ b/GraphManager/filesystem/DrAzureBlobClient.h
@@ -20,7 +20,7 @@ limitations under the License.
#pragma once
-using namespace Microsoft::Research::Peloponnese::Storage;
+using namespace Microsoft::Research::Peloponnese::Azure;
DRCLASS(DrAzureInputStream) : public DrInputStream
{
diff --git a/GraphManager/filesystem/DrHdfsClient.cpp b/GraphManager/filesystem/DrHdfsClient.cpp
index 2710da8..1eb9c3e 100644
--- a/GraphManager/filesystem/DrHdfsClient.cpp
+++ b/GraphManager/filesystem/DrHdfsClient.cpp
@@ -129,73 +129,42 @@ DrHdfsInputStream::DrHdfsInputStream()
m_hdfsInstance = DrNull;
}
-HRESULT DrHdfsInputStream::Open(DrUniversePtr universe, DrNativeString streamUri)
+HRESULT DrHdfsInputStream::Open(DrUniversePtr universe, DrNativeString streamUri, DrNativeString recordType)
{
DrString uri = DrString(streamUri);
+ DrString record = DrString(recordType);
- DrLogI("Opening instance for %s", uri.GetChars());
+ DrLogI("Opening instance for %s record type %s", uri.GetChars(), record.GetChars());
- return OpenInternal(universe, uri);
+ return OpenInternal(universe, uri, record);
}
-HRESULT DrHdfsInputStream::OpenInternal(DrUniversePtr universe, DrString streamUri)
+#ifdef _MANAGED
+HRESULT DrHdfsInputStream::OpenInternal(DrUniversePtr universe, DrString streamUri, DrString recordType)
{
m_streamUri = streamUri;
HRESULT err = S_OK;
-#ifdef _MANAGED
-
try
{
-#endif
-
- DrLogI("Opening instance for %s", streamUri.GetChars());
+ DrLogI("Opening instance for %s: %s", streamUri.GetChars(), recordType.GetChars());
m_hdfsInstance = GetHdfsServiceInstance(streamUri);
-
-#ifdef _MANAGED
- DrLogI("Getting file info for %s", streamUri.GetChars());
+
HdfsFileInfo^ stream = m_hdfsInstance->GetFileInfo(streamUri.GetString(), true);
m_fileNameArray = stream->fileNameArray;
- UInt32 totalPartitionCount = static_cast(stream->blockArray->Length);
+ UInt32 totalPartitionCount;
-#else
- bool ret = HdfsBridgeNative::Initialize();
- if (!ret)
+ if (recordType.Compare("Microsoft.Research.DryadLinq.LineRecord") == 0)
{
- DrLogE("Error calling HdfsBridgeNative::Initialize()");
- return E_FAIL;
- }
-
- if (m_hdfsInstance == NULL)
- {
- DrLogE("Error calling GetHdfsServiceInstance(streamUri)");
- return E_FAIL;
+ DrLogI("Getting block-level file info for %s", streamUri.GetChars());
+ totalPartitionCount = static_cast(stream->blockArray->Length);
}
- URL_COMPONENTSA UrlComponents = {0};
- UrlComponents.dwStructSize = sizeof(UrlComponents);
- UrlComponents.dwUrlPathLength = 1;
- UrlComponents.dwHostNameLength = 1;
-
- BOOL fOK = InternetCrackUrlA(streamUri.GetChars(), streamUri.GetCharsLength(), 0, &UrlComponents);
- if (!fOK)
+ else
{
- DrLogE("Error getting stream path from HDFS URI.");
- return E_FAIL;
+ DrLogI("Getting file info for %s", streamUri.GetChars());
+ totalPartitionCount = m_fileNameArray->Length;
}
-
- m_hostname.Set(UrlComponents.lpszHostName);
- m_portNum = UrlComponents.nPort;
-
- InstanceAccessor ia(m_hdfsInstance);
- FileStat* fileStat = NULL;
- ia.OpenFileStat(UrlComponents.lpszUrlPath, true, &fileStat);
- UINT32 totalPartitionCount = 0;
- HdfsBridgeNative::FileStatAccessor fs(fileStat);
- totalPartitionCount = fs.GetNumberOfBlocks();
-
- m_fileNameArray = (const char **)fs.GetFileNameArray();
-#endif
DrLogI("Partition count %d", totalPartitionCount);
@@ -205,35 +174,97 @@ HRESULT DrHdfsInputStream::OpenInternal(DrUniversePtr universe, DrString streamU
m_partOffsets = DrNew DrUINT64Array(totalPartitionCount);
m_partFileIds = DrNew DrUINT32Array(totalPartitionCount);
- for (UINT32 i=0; iblockArray[i];
-#else
- HdfsBridgeNative::HdfsBlockLocInfo* partition = fs.GetBlockInfo(i);
-#endif
- m_affinity[i] = DrNew DrAffinity();
- m_affinity[i]->SetWeight(partition->Size);
- m_partOffsets[i] = partition->Offset;
- m_partFileIds[i] = partition->fileIndex;
+ for (UINT32 i = 0; i < totalPartitionCount; ++i)
+ {
+ HdfsBlockInfo^ partition = stream->blockArray[i];
+ m_affinity[i] = DrNew DrAffinity();
+ m_affinity[i]->SetWeight(partition->Size);
+ m_partOffsets[i] = partition->Offset;
+ m_partFileIds[i] = partition->fileIndex;
-#ifdef _MANAGED
- for (int j = 0; j < partition->Hosts->Length; ++j)
-#else
- for (int j = 0; j < partition->numberOfHosts; ++j)
-#endif
+ for (int j = 0; j < partition->Hosts->Length; ++j)
+ {
+ DrResourceRef location = universe->LookUpResource(partition->Hosts[j]);
+ if (location != DrNull)
+ {
+ m_affinity[i]->AddLocality(location);
+ }
+ }
+ }
+ }
+ else
+ {
+ int fileBlockIndex = 0;
+ for (UINT32 i = 0; i < totalPartitionCount; ++i)
{
- DrResourceRef location = universe->LookUpResource(partition->Hosts[j]);
- if (location != DrNull)
+ m_partOffsets[i] = 0;
+ m_partFileIds[i] = i;
+
+ HdfsBlockInfo^ partition = stream->blockArray[fileBlockIndex];
+ DrAssert(partition->fileIndex == i);
+
+ long long fileSize = partition->Size;
+
+ HashSet^ locations = DrNew HashSet();
+ for (int j = 0; j < partition->Hosts->Length; ++j)
{
- m_affinity[i]->AddLocality(location);
+ DrResourceRef location = universe->LookUpResource(partition->Hosts[j]);
+ if (location != DrNull)
+ {
+ locations->Add(location);
+ }
}
+
+ ++fileBlockIndex;
+
+ while (fileBlockIndex < stream->blockArray->Length && stream->blockArray[fileBlockIndex]->fileIndex == i)
+ {
+ partition = stream->blockArray[fileBlockIndex];
+ fileSize += partition->Size;
+
+ if (locations->Count > 0)
+ {
+ HashSet^ newLocations = DrNew HashSet();
+ for (int j = 0; j < partition->Hosts->Length; ++j)
+ {
+ DrResourceRef location = universe->LookUpResource(partition->Hosts[j]);
+ if (location != DrNull)
+ {
+ newLocations->Add(location);
+ }
+ }
+
+ locations->IntersectWith(newLocations);
+ }
+ }
+
+ m_affinity[i] = DrNew DrAffinity();
+ m_affinity[i]->SetWeight(fileSize);
+
+ System::Text::StringBuilder^ locationText;
+ if (locations->Count > 0)
+ {
+ locationText = gcnew System::Text::StringBuilder("File " + m_fileNameArray[i] + " merged locations:");
+ }
+ else
+ {
+ locationText = gcnew System::Text::StringBuilder("File " + m_fileNameArray[i] + " no shared locations");
+ }
+
+ HashSet::Enumerator^ enumerator = locations->GetEnumerator();
+ while (enumerator->MoveNext())
+ {
+ m_affinity[i]->AddLocality(enumerator->Current);
+ locationText->Append(" ");
+ locationText->Append(enumerator->Current->GetName().GetString());
+ }
+
+ DrString locationLog(locationText->ToString());
+ DrLogI("%s", locationLog.GetChars());
}
-#ifndef _MANAGED
- delete partition;
-#endif
- }
-#ifdef _MANAGED
+ }
}
catch (System::Exception ^e)
{
@@ -247,10 +278,85 @@ HRESULT DrHdfsInputStream::OpenInternal(DrUniversePtr universe, DrString streamU
// TODO: How do we clean this up?
//hdfsInstance->Discard();
}
-#endif
return err;
}
+#else
+HRESULT DrHdfsInputStream::OpenInternal(DrUniversePtr universe, DrString streamUri, DrString recordType)
+{
+ m_streamUri = streamUri;
+ HRESULT err = S_OK;
+
+
+ DrLogI("Opening instance for %s", streamUri.GetChars());
+ m_hdfsInstance = GetHdfsServiceInstance(streamUri);
+
+ bool ret = HdfsBridgeNative::Initialize();
+ if (!ret)
+ {
+ DrLogE("Error calling HdfsBridgeNative::Initialize()");
+ return E_FAIL;
+ }
+
+ if (m_hdfsInstance == NULL)
+ {
+ DrLogE("Error calling GetHdfsServiceInstance(streamUri)");
+ return E_FAIL;
+ }
+ URL_COMPONENTSA UrlComponents = {0};
+ UrlComponents.dwStructSize = sizeof(UrlComponents);
+ UrlComponents.dwUrlPathLength = 1;
+ UrlComponents.dwHostNameLength = 1;
+
+ BOOL fOK = InternetCrackUrlA(streamUri.GetChars(), streamUri.GetCharsLength(), 0, &UrlComponents);
+ if (!fOK)
+ {
+ DrLogE("Error getting stream path from HDFS URI.");
+ return E_FAIL;
+ }
+
+ m_hostname.Set(UrlComponents.lpszHostName);
+ m_portNum = UrlComponents.nPort;
+
+ InstanceAccessor ia(m_hdfsInstance);
+ FileStat* fileStat = NULL;
+ ia.OpenFileStat(UrlComponents.lpszUrlPath, true, &fileStat);
+ UINT32 totalPartitionCount = 0;
+ HdfsBridgeNative::FileStatAccessor fs(fileStat);
+ totalPartitionCount = fs.GetNumberOfBlocks();
+
+ m_fileNameArray = (const char **)fs.GetFileNameArray();
+
+ DrLogI("Partition count %d", totalPartitionCount);
+
+ /* Allocate these arrays even if they're size 0, to avoid
+ NullReferenceException later */
+ m_affinity = DrNew DrAffinityArray(totalPartitionCount);
+ m_partOffsets = DrNew DrUINT64Array(totalPartitionCount);
+ m_partFileIds = DrNew DrUINT32Array(totalPartitionCount);
+
+ for (UINT32 i=0; iSetWeight(partition->Size);
+ m_partOffsets[i] = partition->Offset;
+ m_partFileIds[i] = partition->fileIndex;
+
+ for (int j = 0; j < partition->numberOfHosts; ++j)
+ {
+ DrResourceRef location = universe->LookUpResource(partition->Hosts[j]);
+ if (location != DrNull)
+ {
+ m_affinity[i]->AddLocality(location);
+ }
+ }
+ delete partition;
+ }
+
+ return err;
+}
+#endif
DrNativeString DrHdfsInputStream::GetError()
{
@@ -468,6 +574,25 @@ HRESULT DrHdfsOutputStream::FinalizeSuccessfulParts(DrOutputPartitionArrayRef pa
errorText.SetF("%s", DrString(m_error).GetChars());
return E_FAIL;
}
+
+ String^ userName = Environment::GetEnvironmentVariable("USER");
+ if (userName == nullptr)
+ {
+ userName = Environment::UserName;
+ }
+ try
+ {
+ m_hdfsInstance->SetOwnerAndPermission(m_baseUri, userName, nullptr, Convert::ToInt16("0755", 8));
+ }
+ catch (Exception^ e)
+ {
+ DrString drDst(m_baseUri);
+ DrString err(e->ToString());
+ DrLogE("Can't set %s permissions finalizing HDFS output: %s", drDst.GetChars(), err.GetChars());
+ m_error = "Can't set " + m_baseUri + " permissions finalizing HDFS output: " + e->ToString();
+ errorText.SetF("%s", DrString(m_error).GetChars());
+ return E_FAIL;
+ }
}
else
{
diff --git a/GraphManager/filesystem/DrHdfsClient.h b/GraphManager/filesystem/DrHdfsClient.h
index 28eaa7c..1e2b2a8 100644
--- a/GraphManager/filesystem/DrHdfsClient.h
+++ b/GraphManager/filesystem/DrHdfsClient.h
@@ -30,8 +30,8 @@ DRCLASS(DrHdfsInputStream) : public DrInputStream
{
public:
DrHdfsInputStream();
- HRESULT Open(DrUniversePtr universe, DrNativeString streamUri);
- HRESULT OpenInternal(DrUniversePtr universe, DrString streamUri);
+ HRESULT Open(DrUniversePtr universe, DrNativeString streamUri, DrNativeString recordType);
+ HRESULT OpenInternal(DrUniversePtr universe, DrString streamUri, DrString recordType);
DrNativeString GetError();
virtual DrString GetStreamName() DROVERRIDE;
diff --git a/GraphManager/filesystem/DrPartitionFile.cpp b/GraphManager/filesystem/DrPartitionFile.cpp
index ebdd9e5..02255c0 100644
--- a/GraphManager/filesystem/DrPartitionFile.cpp
+++ b/GraphManager/filesystem/DrPartitionFile.cpp
@@ -79,6 +79,7 @@ static bool ParseReplicatedFromPartitionLine(int partitionNumber,
DrStringR remoteName,
DrPartitionInputStream::OverridePtr over,
bool mustOverride,
+ bool pathIsRooted,
DrString line,
DrUniversePtr universe)
{
@@ -112,8 +113,16 @@ static bool ParseReplicatedFromPartitionLine(int partitionNumber,
sep = lineCopy.IndexOfChar(',');
if (sep == DrStr_InvalidIndex)
{
- DrLogW("Malformed line %s: no list of machines", line.GetChars());
- return false;
+ int n = sscanf_s(lineCopy.GetChars(), "%I64u", &parsedSize);
+ if (n != 1)
+ {
+ DrLogW("Malformed line %s: can't parse size", line.GetChars());
+ return false;
+ }
+
+ affinity->SetWeight(parsedSize);
+
+ lineCopy = DrString("");
}
else
{
@@ -135,8 +144,14 @@ static bool ParseReplicatedFromPartitionLine(int partitionNumber,
if (lineCopy.GetCharsLength() == 0)
{
- DrLogW("Malformed line %s: no partition machines", line.GetChars());
- return false;
+ if (!pathIsRooted || mustOverride)
+ {
+ DrLogW("Malformed line %s: no partition machines", line.GetChars());
+ return false;
+ }
+
+ remoteName.Set(" %Invalid% ");
+ return true;
}
int numberOfReplicas = 0;
@@ -204,7 +219,7 @@ HRESULT DrPartitionInputStream::Open(DrUniversePtr universe, DrNativeString stre
HRESULT DrPartitionInputStream::OpenInternal(DrUniversePtr universe, DrString streamName)
{
HRESULT err = S_OK;
- DrLogI("Opening input file %s", streamName.GetChars(), DRERRORSTRING(err));
+ DrLogI("Opening input file %s", streamName.GetChars(), DRERRORSTRING(err));
FILE* f;
errno_t ferr = fopen_s(&f, streamName.GetChars(), "rb");
if (ferr != 0)
@@ -231,6 +246,12 @@ HRESULT DrPartitionInputStream::OpenInternal(DrUniversePtr universe, DrString st
mustOverride = true;
}
+ bool pathIsRooted = false;
+ if (m_pathNameOnComputer.IndexOfChar(':') != DrStr_InvalidIndex)
+ {
+ pathIsRooted = true;
+ }
+
int numberOfParts;
int n = sscanf_s(partitionSizeLine.GetChars(), "%d", &numberOfParts);
if (n != 1)
@@ -280,6 +301,7 @@ HRESULT DrPartitionInputStream::OpenInternal(DrUniversePtr universe, DrString st
remoteName,
m_override[i],
mustOverride,
+ pathIsRooted,
partitionLine,
universe) == false)
{
diff --git a/GraphManager/jobmanager/version.cpp b/GraphManager/jobmanager/version.cpp
index 9a08424..3f721bc 100644
--- a/GraphManager/jobmanager/version.cpp
+++ b/GraphManager/jobmanager/version.cpp
@@ -19,5 +19,22 @@ limitations under the License.
*/
#ifdef _MANAGED
-[assembly:System::Runtime::InteropServices::ComVisible(false)];
+#pragma managed
+
+using namespace System::Reflection;
+using namespace System::Runtime::InteropServices;
+
+[assembly: AssemblyTitle("Microsoft.Research.Dryad")];
+[assembly: AssemblyDescription("")];
+[assembly: AssemblyConfiguration("")];
+[assembly: AssemblyCompany("Microsoft Corporation")];
+[assembly: AssemblyProduct("Microsoft.Research.Dryad")];
+[assembly: AssemblyCopyright("Copyright © Microsoft Corporation. All rights reserved.")];
+[assembly: AssemblyTrademark("")];
+[assembly: AssemblyCulture("")];
+
+[assembly: ComVisible(false)];
+
+[assembly: AssemblyVersion("1.0.0.0")];
+[assembly: AssemblyFileVersion("1.0.0.0")];
#endif
diff --git a/GraphManager/packages.config b/GraphManager/packages.config
index cb331ea..ffbc528 100644
--- a/GraphManager/packages.config
+++ b/GraphManager/packages.config
@@ -1,4 +1,7 @@

-
+
+
+
+
\ No newline at end of file
diff --git a/GraphManager/reporting/DrCalypsoReporting.cpp b/GraphManager/reporting/DrCalypsoReporting.cpp
index 092ec26..4456282 100644
--- a/GraphManager/reporting/DrCalypsoReporting.cpp
+++ b/GraphManager/reporting/DrCalypsoReporting.cpp
@@ -24,7 +24,8 @@ limitations under the License.
#include
using namespace System::IO;
-using namespace Microsoft::Research::Peloponnese::Storage;
+using namespace Microsoft::Research::Peloponnese::Azure;
+using namespace Microsoft::Research::Peloponnese::Hdfs;
using namespace msclr;
//
@@ -126,7 +127,7 @@ ref class PeloponneseLogger : public Microsoft::Research::Peloponnese::ILogger
DrCalypsoReporter::DrCalypsoReporter(DrNativeString uriString)
{
System::Uri^ uri = DrNew System::Uri(uriString);
- if (uri->Scheme == AzureUtils::BlobScheme)
+ if (uri->Scheme == Utils::BlobScheme)
{
m_logStream = DrNew AzureLogAppendStream(uri, 0x20, false, false, gcnew PeloponneseLogger());
m_flushInterval = 1000;
diff --git a/GraphManager/vertex/DrVertex.cpp b/GraphManager/vertex/DrVertex.cpp
index 723745d..f650d37 100644
--- a/GraphManager/vertex/DrVertex.cpp
+++ b/GraphManager/vertex/DrVertex.cpp
@@ -1743,7 +1743,7 @@ void DrTeeVertex::ReactToFailedVertex(DrVertexOutputGeneratorPtr failedGenerator
DrAssert(m_inputEdges->GetNumberOfEdges() == 1);
DrEdge e = m_inputEdges->GetEdge(0);
- DrLogI("Tee vertex %d.%d: calling ReactToFailedVertex on remote edge", this->m_id, GetVersion());
+ DrLogI("Tee vertex %d.%d: calling ReactToFailedVertex on remote edge", this->m_id, failedGenerator->GetVersion());
e.m_remoteVertex->ReactToFailedVertex(failedGenerator, DrNull, DrNull, DrNull, originalReason);
/* fill in a new generator if it's already there, e.g. if the upstream vertex is a DrStorageVertex */
diff --git a/JobBrowser/Calypso/Properties/AssemblyInfo.cs b/JobBrowser/Calypso/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..b8cdbbd
--- /dev/null
+++ b/JobBrowser/Calypso/Properties/AssemblyInfo.cs
@@ -0,0 +1,56 @@
+
+/*
+Copyright (c) Microsoft Corporation
+
+All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
+compliance with the License. You may obtain a copy of the License
+at http://www.apache.org/licenses/LICENSE-2.0
+
+
+THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
+EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF
+TITLE, FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
+
+
+See the Apache Version 2.0 License for specific language governing permissions and
+limitations under the License.
+
+*/
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("DryadAnalysis")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("MSIT")]
+[assembly: AssemblyProduct("DryadAnalysis")]
+[assembly: AssemblyCopyright("Copyright © MSIT 2008")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("286845bc-d060-49ff-9de8-4381471a0dc6")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/JobBrowser/JOM/ClusterConfiguration.cs b/JobBrowser/JOM/ClusterConfiguration.cs
index 2b897fa..1eec0e2 100644
--- a/JobBrowser/JOM/ClusterConfiguration.cs
+++ b/JobBrowser/JOM/ClusterConfiguration.cs
@@ -19,480 +19,27 @@
*/
+
using System.Security.Cryptography.X509Certificates;
-using System.Xml.Linq;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
-using System.Text;
using System.Net;
+using System.Threading;
+using Microsoft.Research.Peloponnese.Azure;
+using Microsoft.Research.Peloponnese.Hdfs;
+using Microsoft.Research.Peloponnese.Shared;
+using Microsoft.Research.Peloponnese.WebHdfs;
+using Microsoft.Research.Peloponnese.Yarn;
-using Microsoft.Research.Peloponnese.Storage;
+
+using System.Xml.Linq;
using Microsoft.Research.Tools;
using Microsoft.WindowsAzure.Management.HDInsight;
namespace Microsoft.Research.JobObjectModel
{
- ///
- /// Error during conversation with cluster.
- ///
- public sealed class ClusterException : Exception
- {
- ///
- /// Create an exception about handling a cluster.
- ///
- /// Exception message.
- public ClusterException(string message) : base(message) { }
- }
-
- ///
- /// Identifier for a Dryad process; for now we are using globally-unique process GUID.
- /// A Dryad Job identifier is always the same as the job manager process guid.
- ///
- [Serializable]
- public class DryadProcessIdentifier : IEquatable
- {
- ///
- /// Process identifier.
- ///
- private string processIdentifier;
-
- ///
- /// Used just for XML serialization.
- ///
- public DryadProcessIdentifier()
- { }
-
- ///
- /// create an indentifier struct.
- /// Be sure to initialize all fields before use;
- ///
- /// The id of the process (platform-dependent).
- public DryadProcessIdentifier(string pid)
- {
- this.processIdentifier = pid;
- }
-
- ///
- /// Process identifier; public for serialization only.
- ///
- public string ProcessIdentifier
- {
- get { return this.processIdentifier; }
- set { this.processIdentifier = value; }
- }
-
- ///
- /// If true the process identifier is not known.
- ///
- public bool IsUnknown { get { return this.ProcessIdentifier == null; } }
-
- ///
- /// Human-readable description of the process identifier.
- ///
- /// An empty string if the pid is not set.
- public override string ToString()
- {
- if (this.ProcessIdentifier != null) return this.ProcessIdentifier;
- return "";
- }
-
- ///
- /// Equality test.
- ///
- /// Object to compare to.
- /// True if both objects represent the same process id.
- public override bool Equals(object obj)
- {
- if (!(obj is DryadProcessIdentifier))
- return false;
- return this.Equals((DryadProcessIdentifier)obj);
- }
-
- #region IEquatable Members
- ///
- /// Equality test.
- ///
- /// Process id to compare to.
- /// True if the id's represent the same process.
- public bool Equals(DryadProcessIdentifier other)
- {
- if (this.IsUnknown)
- return other.IsUnknown;
- if (other.IsUnknown)
- return false;
- return this.ProcessIdentifier.Equals(other.ProcessIdentifier);
- }
-
- ///
- /// Overriden implementation of getHashCode.
- ///
- /// The hashcode of the process id.
- public override int GetHashCode()
- {
- // ReSharper disable once BaseObjectGetHashCodeCallInGetHashCode
- return base.GetHashCode();
- }
- #endregion
- }
-
- ///
- /// Brief summary of an executed DryadLINQ job.
- ///
- [Serializable]
- public sealed class DryadLinqJobSummary : IEquatable
- {
- ///
- /// Empty constructor for XML serialization.
- ///
- public DryadLinqJobSummary()
- { }
-
- ///
- /// Initialize a job summary.
- ///
- /// Cluster where the job ran.
- /// A string corresponding to the type of ClusterConfiguration.
- /// Machine where job manager ran.
- /// Id of job.
- /// Guid of job manager process.
- /// Id of job on the cluster.
- /// Friendly name used.
- /// Who ran the job.
- /// Start date (not completion date).
- /// Job status.
- /// Estimated end running time.
- /// Virtual cluster where job ran.
- public DryadLinqJobSummary(
- string cluster,
- ClusterConfiguration.ClusterType clusterType,
- string virtualcluster,
- string machine,
- string jobId,
- string clusterJobId,
- DryadProcessIdentifier jmProcessGuid,
- string friendlyname,
- string username,
- DateTime date,
- DateTime endTime,
- ClusterJobInformation.ClusterJobStatus status)
- {
- this.VirtualCluster = virtualcluster;
- this.Cluster = cluster;
- this.ClusterType = clusterType;
- this.Machine = machine;
- this.Name = friendlyname;
- this.User = username;
- this.Date = date;
- this.EndTime = endTime;
- this.Status = status;
- this.ManagerProcessGuid = jmProcessGuid;
- this.JobID = jobId;
- this.ClusterJobId = clusterJobId;
- }
-
- ///
- /// Cluster where the job ran.
- ///
- public string Cluster { get; /*private*/ set; }
- ///
- /// Id of cluster job that originated this DryadLinq job (can be used to find the cluster job from the dryadlinq job).
- ///
- public string ClusterJobId { get; /*private*/ set; }
- ///
- /// Cluster where the job ran.
- ///
- public DateTime EndTime { get; /*private*/ set; }
- ///
- /// String describing cluster type.
- ///
- public ClusterConfiguration.ClusterType ClusterType { get; /*private*/ set; }
- ///
- /// Virtual cluster where job ran.
- ///
- public string VirtualCluster { get; /*private*/ set; }
- ///
- /// (Friendly) name of the job.
- ///
- public string Name { get; /*private*/ set; }
- ///
- /// User who submitted job.
- ///
- public string User { get; /*private*/ set; }
- ///
- /// ID of job on the cluster.
- ///
- public string JobID { get; /*private*/ set; }
- ///
- /// The Guid of the job manager process.
- ///
- public DryadProcessIdentifier ManagerProcessGuid { set; get; }
-
- ///
- /// User who submitted job.
- ///
- public string GetAlias()
- {
- int pos = User.IndexOf(@"\");
- return User.Substring(pos + 1);
- }
-
- ///
- /// Date when job was submitted.
- ///
- public DateTime Date { get; /*private*/ set; }
-
- ///
- /// Did the job fail?
- ///
- public ClusterJobInformation.ClusterJobStatus Status { get; /*internal*/ set; }
-
- ///
- /// Machine where the job manager ran.
- ///
- public string Machine { get; /*private*/ set; }
-
- ///
- /// Get a short name for this job summary.
- ///
- /// Short name of job summary.
- public string ShortName()
- {
- // we use the starting time to uniquify the job name
- return this.Date.ToString("s") + "-" + this.Name;
- }
-
- ///
- /// True if these two summaries are the same. The status and end time do not matter, since the job may still be running.
- ///
- /// Summary to compare against.
- /// True if they are equal.
- public bool Equals(DryadLinqJobSummary other)
- {
- return this.Cluster == other.Cluster &&
- this.ClusterJobId == other.ClusterJobId &&
- this.Date == other.Date &&
- this.Machine == other.Machine &&
- this.Name == other.Name &&
- this.User == other.User;
- }
-
- ///
- /// Hashcode proper for the equality test.
- ///
- /// The object hashcode.
- public override int GetHashCode()
- {
- return this.ClusterJobId.GetHashCode() ^ this.ClusterJobId.GetHashCode() ^ this.Date.GetHashCode() ^ this.Machine.GetHashCode() ^ this.Name.GetHashCode() ^ this.User.GetHashCode();
- }
-
- ///
- /// A string describing the unique identifying part of the summary.
- /// Two different summaries may represent the same job at different times.
- ///
- /// The part common to all jobs.
- public string AsIdentifyingString()
- {
- StringBuilder builder = new StringBuilder();
- builder.AppendFormat("Cluster={0} ClusterJobID={1} Date={2} Machine={3} Name={4} User={5}",
- this.Cluster, this.ClusterJobId, this.Date, this.Machine, this.Name, this.User);
- return builder.ToString();
- }
- }
-
- ///
- /// This class is an abstraction of a cluster-level job, as opposed to a DryadLINQ job.
- /// In Cosmos that's called a task, in HPC that's called a Job.
- /// (In cosmos a task is a recurring job. In DryadLINQ running on top of cosmos, a task always contains exactly one job.)
- ///
- public class ClusterJobInformation : IEquatable
- {
- ///
- /// Status of a cluster job.
- ///
- public enum ClusterJobStatus
- {
- ///
- /// Job is still running.
- ///
- Running,
- ///
- /// Job has finished successfully.
- ///
- Succeeded,
- ///
- /// Job has finished and has failed.
- ///
- Failed,
- ///
- /// Job has been cancelled. Not precise on cosmos clusters.
- ///
- Cancelled,
- ///
- /// Could not determine job status.
- ///
- Unknown,
- };
-
- ///
- /// True if job is finished, false if not, or unknown.
- ///
- /// Job status.
- /// True if the job is no longer running.
- public static bool JobIsFinished(ClusterJobStatus status)
- {
- switch (status)
- {
- case ClusterJobInformation.ClusterJobStatus.Failed:
- case ClusterJobInformation.ClusterJobStatus.Succeeded:
- case ClusterJobInformation.ClusterJobStatus.Cancelled:
- return true;
- case ClusterJobInformation.ClusterJobStatus.Running:
- case ClusterJobInformation.ClusterJobStatus.Unknown:
- return false;
- default:
- throw new InvalidDataException("Invalid job status " + status);
- }
- }
-
- ///
- /// Create a cluster job structure from a bunch of information.
- ///
- /// Cluster where the job is running.
- /// Cluster job guid.
- /// Name of the cluster job.
- /// User who submitted cluster job.
- /// Last execution of cluster job.
- /// Execution status.
- /// Time the job ran.
- /// Cluster where the job has run.
- public ClusterJobInformation(
- string cluster,
- string virtualCluster,
- string clusterJobGuid,
- string jobName,
- string username,
- DateTime date,
- TimeSpan runningTime,
- ClusterJobStatus status)
- {
- this.VirtualCluster = virtualCluster;
- this.Cluster = cluster;
- this.ClusterJobID = clusterJobGuid;
- this.Name = jobName;
- this.User = username;
- this.Date = date;
- this.EstimatedRunningTime = runningTime;
- this.Status = status;
- this.JobSummary = null;
- }
-
- ///
- /// Name of cluster job.
- ///
- public string Name { get; set; }
- ///
- /// Uset who submitted cluster job.
- ///
- public string User { get; set; }
- ///
- /// Date when job was submitted.
- ///
- public DateTime Date { get; set; }
- ///
- /// ID of Job on cluster.
- ///
- public string ClusterJobID { get; set; }
- ///
- /// Status of the execution.
- ///
- public ClusterJobStatus Status { get; set; }
- ///
- /// Cluster where the job ran.
- ///
- public string Cluster { get; set; }
- ///
- /// In some installations a cluster is composed of multiple virtual clusters.
- ///
- public string VirtualCluster { get; set; }
- ///
- /// Is the cluster job information still available on the cluster?
- ///
- public bool IsUnavailable { get; set; }
- ///
- /// Cache here the associated job, if available. Null if not cached.
- ///
- private DryadLinqJobSummary JobSummary { get; set; }
- ///
- /// Estimated time the job ran.
- ///
- public TimeSpan EstimatedRunningTime { get; set; }
-
- ///
- /// If known, set the associated job summary.
- ///
- /// Job summary for this cluster job.
- public void SetAssociatedSummary(DryadLinqJobSummary summary)
- {
- this.JobSummary = summary;
- }
-
- ///
- /// Discover the dryadlinq job associated with a cluster job.
- ///
- /// Cluster configuration.
- /// The job, if any
- /// Delegate used to report errors.
- public DryadLinqJobSummary DiscoverDryadLinqJob(ClusterStatus status, StatusReporter reporter)
- {
- if (this.IsUnavailable)
- return null;
- if (this.JobSummary != null)
- return this.JobSummary;
-
- DryadLinqJobSummary j = status.DiscoverDryadLinqJobFromClusterJob(this, reporter);
- if (j == null)
- {
- this.IsUnavailable = true;
- }
- return this.JobSummary = j;
- }
-
- ///
- /// Copy the content of a cluster job.
- ///
- /// The value to copy from.
- internal void Copy(ClusterJobInformation refresh)
- {
- this.Name = refresh.Name;
- this.Status = refresh.Status;
- this.User = refresh.User;
- this.JobSummary = refresh.JobSummary;
- this.ClusterJobID = refresh.ClusterJobID;
- this.Date = refresh.Date;
- this.IsUnavailable = refresh.IsUnavailable;
- this.VirtualCluster = refresh.VirtualCluster;
- this.EstimatedRunningTime = refresh.EstimatedRunningTime;
- }
-
- ///
- /// True if these two records represent the same job.
- ///
- ///
- ///
- public bool Equals(ClusterJobInformation other)
- {
- return
- this.Cluster == other.Cluster &&
- this.VirtualCluster == other.VirtualCluster &&
- this.Name == other.Name &&
- this.User == other.User &&
- this.ClusterJobID == other.ClusterJobID &&
- this.Date == other.Date;
- }
- }
-
///
/// Serializable properties key-value pairs.
///
@@ -557,7 +104,26 @@ public ClusterConfiguration Create()
for (int i = 0; i < this.Properties.Count; i++)
{
var property = config.GetType().GetProperty(this.Properties[i].Property);
- property.SetValue(config, this.Properties[i].Value);
+
+ string stringValue = this.Properties[i].Value;
+ object value;
+ if (property.PropertyType == typeof (int))
+ {
+ value = int.Parse(stringValue);
+ }
+ else if (property.PropertyType == typeof (string))
+ {
+ value = stringValue;
+ }
+ else if (property.PropertyType == typeof (Uri))
+ {
+ value = new Uri(stringValue);
+ }
+ else
+ {
+ throw new InvalidCastException("Properties of type " + property.PropertyType + " not yet supported");
+ }
+ property.SetValue(config, value);
}
return config;
@@ -579,6 +145,22 @@ public enum ClusterType
///
Unknown,
///
+ /// Cluster is running on the cosmos runtime.
+ ///
+ Cosmos,
+ ///
+ /// Cluster is running on the windows high-performance computing platform released by external research.
+ ///
+ ExternalResearchHPC,
+ ///
+ /// The taiga version of HPC.
+ ///
+ HPC,
+ ///
+ /// Cosmos cluster running scope.
+ ///
+ Scope,
+ ///
/// Fake cluster, used for post-mortem debugging; keeps some information about jobs in a local folder.
///
Cache,
@@ -591,11 +173,38 @@ public enum ClusterType
///
AzureDfs,
///
+ /// Web-access to HDFS
+ ///
+ WebHdfs,
+ ///
+ /// Hdfs direct access.
+ ///
+ Hdfs,
+ ///
+ /// Error in creating configuration.
+ ///
+ Error,
+ ///
/// Max type, unused; for enumerating.
///
MaxUnused,
};
+ ///
+ /// Set of cluster types available.
+ ///
+ public static HashSet Available;
+
+ static ClusterConfiguration()
+ {
+ Available = new HashSet();
+ Available.Add(ClusterType.Cache);
+ Available.Add(ClusterType.LocalEmulator);
+ Available.Add(ClusterType.AzureDfs);
+ Available.Add(ClusterType.WebHdfs);
+ Available.Add(ClusterType.Hdfs);
+ }
+
///
/// Properties that can be edited.
///
@@ -606,6 +215,16 @@ public enum ClusterType
/// Returns null if initialization succeeds, an error otherwise.
///
public abstract string Initialize();
+ ///
+ /// True if the cluster supports diagnosis.
+ ///
+ public bool SupportsDiagnosis { get; protected set; }
+ ///
+ /// Cluster description.
+ ///
+ public string Description { get; set; }
+
+ private delegate object Work();
///
/// Enumerate all clusters this user is subscribed to.
@@ -613,7 +232,32 @@ public enum ClusterType
/// A list of clusters.
public static IEnumerable EnumerateSubscribedClusters()
{
- return AzureDfsClusterConfiguration.EnumerateAzureDfsSubscribedClusters();
+ // ReSharper disable once JoinDeclarationAndInitializer
+ IEnumerable list = null;
+
+ try
+ {
+ Work work = AzureDfsClusterConfiguration.EnumerateAzureDfsSubscribedClusters;
+ IAsyncResult result = work.BeginInvoke(null, null);
+ if (result.IsCompleted == false)
+ {
+ result.AsyncWaitHandle.WaitOne(3000, false);
+ if (result.IsCompleted == false)
+ throw new ApplicationException("Timeout scanning Azure clusters");
+ }
+ list = (List)work.EndInvoke(result);
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine("Exception enumerating DFS clusters: " + ex);
+ }
+
+ if (list != null)
+ {
+ foreach (var c in list)
+ yield return c;
+ }
+
}
///
@@ -664,13 +308,20 @@ public static void ReconstructKnownCluster(ListType of cluster.
protected ClusterConfiguration(ClusterConfiguration.ClusterType type)
{
+ this.Description = "";
this.TypeOfCluster = type;
+ this.SupportsDiagnosis = true;
}
///
@@ -699,7 +352,6 @@ public void SetCredential(NetworkCredential credential)
this.credentials = credential;
}
-
///
/// The name of this cluster.
///
@@ -720,7 +372,7 @@ public void SetCredential(NetworkCredential credential)
///
/// The machine where the metadata for the copied jobs is stored.
///
- public string MetaDataMachine { get; protected set; }
+ public virtual string MetaDataMachine { get; protected set; }
///
/// Time zone of the analyzed cluster. We assume that the cluster is in the local time zone.
///
@@ -778,15 +430,7 @@ public virtual IClusterResidentObject ProcessStdoutFile(DryadProcessIdentifier i
/// The proper cluster status.
public abstract ClusterStatus CreateClusterStatus();
- static Dictionary KnownClusters;
-
- static ClusterConfiguration()
- {
- KnownClusters = new Dictionary();
- //KnownClusters.Add("Cache", new CacheClusterConfiguration());
- //KnownClusters.Add("Local emulation", new LocalEmulator());
- //KnownClusters.Add("AzureDfs", new AzureDfsClusterConfiguration());
- }
+ static Dictionary KnownClusters = new Dictionary();
///
/// A known cluster configuration by name.
@@ -807,9 +451,13 @@ public static ClusterConfiguration KnownClusterByName(string name)
public static void AddKnownCluster(ClusterConfiguration config)
{
if (KnownClusters.ContainsKey(config.Name))
+ {
KnownClusters[config.Name] = config;
+ }
else
+ {
KnownClusters.Add(config.Name, config);
+ }
}
///
@@ -898,10 +546,14 @@ public static ClusterConfiguration CreateConfiguration(ClusterType type)
return new LocalEmulator();
case ClusterType.AzureDfs:
return new AzureDfsClusterConfiguration();
+ case ClusterType.WebHdfs:
+ return new WebHdfsClusterConfiguration();
+ case ClusterType.Hdfs:
+ return new HdfsClusterConfiguration();
case ClusterType.Unknown:
case ClusterType.MaxUnused:
default:
- throw new ArgumentOutOfRangeException("type");
+ return new ErrorConfiguration("Unsupported cluster type " + type);
}
}
@@ -914,11 +566,11 @@ public static ClusterConfiguration CreateConfiguration(ClusterType type)
public abstract string ExtractPidFromGuid(string guid, DryadLinqJobSummary job);
///
- /// Navigate to a given url and return a stream reader with the corresponding web page.
+ /// Navigate to a given url and return a stream with the corresponding web page.
///
/// Url to navigate to.
/// The web page.
- internal virtual StreamReader Navigate(string url)
+ internal virtual Stream Navigate(string url)
{
return Utilities.Navigate(url, null);
}
@@ -947,6 +599,119 @@ public virtual IClusterResidentObject JobQueryPlan(DryadLinqJobSummary job)
}
}
+ ///
+ /// Represents an error in creating a cluster configuration.
+ ///
+ public sealed class ErrorConfiguration : ClusterConfiguration
+ {
+ ///
+ /// Error message.
+ ///
+ public string ErrorMessage { get; private set; }
+
+ ///
+ /// Create an Error Cluster.
+ ///
+ internal ErrorConfiguration(string message)
+ : base(ClusterType.Error)
+ {
+ this.ErrorMessage = message;
+ }
+
+ private static List properties = new List();
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override List GetPropertiesToEdit()
+ {
+ return properties;
+ }
+
+ ///
+ /// Must be called after setting all properties.
+ /// Returns null if initialization succeeds, an error otherwise.
+ ///
+ public override string Initialize()
+ {
+ return this.ErrorMessage;
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override IClusterResidentObject ProcessDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override IClusterResidentObject ProcessWorkDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override DryadProcessIdentifier ProcessFromInputFile(IClusterResidentObject input, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override ClusterStatus CreateClusterStatus()
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override IClusterResidentObject ProcessLogDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override string JMLogFilesPattern(bool error, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override string VertexLogFilesPattern(bool error, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Not used.
+ ///
+ /// Exception.
+ public override string ExtractPidFromGuid(string guid, DryadLinqJobSummary job)
+ {
+ throw new NotImplementedException();
+ }
+ }
+
+
///
@@ -1020,7 +785,7 @@ public override string Initialize()
/// The proper cluster status.
public override ClusterStatus CreateClusterStatus()
{
- var stat = ClusterStatus.LookupStatus(this.Name);
+ var stat = ClusterStatus.LookupStatus(this);
if (stat != null) return stat;
return new CacheClusterStatus(this);
}
@@ -1209,6 +974,7 @@ public sealed class LocalEmulator : ClusterConfiguration
///
/// Folder where job logs are stored.
///
+ // ReSharper disable once UnusedAutoPropertyAccessor.Local
public string JobsFolder { get; private set; }
///
@@ -1217,14 +983,9 @@ public sealed class LocalEmulator : ClusterConfiguration
public LocalEmulator()
: base(ClusterType.LocalEmulator)
{
- string dryadHome = Environment.GetEnvironmentVariable("DRYAD_HOME");
- if (string.IsNullOrEmpty(dryadHome))
- throw new InvalidOperationException("Environment variable DRYAD_HOME is not set");
- this.JobsFolder = Path.Combine(dryadHome, "LocalJobs");
- this.Name = "LocalEmulator";
}
- private static List props = new List();
+ private static List props = new List {"JobsFolder"};
///
/// Must be called after setting all properties.
@@ -1250,7 +1011,7 @@ public override List GetPropertiesToEdit()
/// The proper cluster status.
public override ClusterStatus CreateClusterStatus()
{
- var stat = ClusterStatus.LookupStatus(this.Name);
+ var stat = ClusterStatus.LookupStatus(this);
if (stat != null) return stat;
return new YarnEmulatedClusterStatus(this);
}
@@ -1390,7 +1151,116 @@ public override string ExtractPidFromGuid(string guid, DryadLinqJobSummary job)
///
/// Configuration for an AzureDfs cluster.
///
- public sealed class AzureDfsClusterConfiguration : ClusterConfiguration
+ public abstract class DfsClusterConfiguration : ClusterConfiguration
+ {
+ ///
+ /// Create a cluster representing the local machine only.
+ ///
+ protected DfsClusterConfiguration(ClusterType type)
+ : base(type)
+ {
+ this.SupportsDiagnosis = false;
+ }
+
+ ///
+ /// Work directory of a process vertex.
+ ///
+ /// Vertex guid.
+ /// Machine where process ran.
+ /// The path to the work directory of the vertex.
+ /// Job where the process belongs.
+ /// True if vertex is terminated.
+ public override IClusterResidentObject ProcessWorkDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ {
+ return this.ProcessDirectory(identifier, terminated, machine, job);
+ }
+
+ ///
+ /// Given an input file identify the process that produced it.
+ ///
+ /// Input file of a process.
+ /// Job that contained the process.
+ /// The identity of the process that produced the file.
+ // ReSharper disable UnusedParameter.Global
+ public override DryadProcessIdentifier ProcessFromInputFile(IClusterResidentObject input, DryadLinqJobSummary job)
+ {
+ return null;
+ }
+
+ // ReSharper restore UnusedParameter.Global
+ ///
+ /// File containing standard output of a process.
+ ///
+ /// Process identifier.
+ /// Machine where process ran.
+ /// Job containing process.
+ /// The pathname to the standard output.
+ /// True if vertex is terminated.
+ public override IClusterResidentObject ProcessStdoutFile(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ {
+ if (identifier.ToString() == "jm")
+ {
+ IClusterResidentObject processdir = this.ProcessDirectory(identifier, terminated, machine, job);
+ IClusterResidentObject file = processdir.GetFile("calypso.log");
+ return file;
+ }
+
+ // vertices not supported
+ return null;
+ }
+
+ ///
+ /// Log directory of a process vertex.
+ ///
+ /// Vertex guid.
+ /// The path to the work directory of the vertex.
+ /// Machine where process ran.
+ /// Job where the process belongs.
+ /// Vertex state.
+ public override IClusterResidentObject ProcessLogDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ {
+ return this.ProcessDirectory(identifier, terminated, machine, job);
+ }
+
+ ///
+ /// A shell pattern matching (just the) log files produced by a job manager process.
+ ///
+ /// Pattern matching the log files.
+ /// If true, return only the error logs.
+ /// Job where the JM process belongs.
+ // ReSharper disable once UnusedParameter.Global
+ public override string JMLogFilesPattern(bool error, DryadLinqJobSummary job)
+ {
+ return "*.log";
+ }
+
+ ///
+ /// A shell pattern matching (just the) log files produced by a vertex process.
+ ///
+ /// Pattern matching the log files.
+ /// If true, return only the error logs.
+ /// Job containing this vertex.
+ public override string VertexLogFilesPattern(bool error, DryadLinqJobSummary job)
+ {
+ return "*.log";
+ }
+
+ ///
+ /// Convert a GUID printed by the Dryad job manager into a process-id, which is platform dependent.
+ ///
+ /// Process guid.
+ /// Process id.\
+ /// Job where guid is from.
+ public override string ExtractPidFromGuid(string guid, DryadLinqJobSummary job)
+ {
+ return guid;
+ }
+ }
+
+ ///
+ /// Configuration for an AzureDfs cluster.
+ ///
+ public sealed class AzureDfsClusterConfiguration : DfsClusterConfiguration
{
///
/// Handle to client to enumerate logs.
@@ -1403,14 +1273,22 @@ public sealed class AzureDfsClusterConfiguration : ClusterConfiguration
public AzureDfsClusterConfiguration()
: base(ClusterType.AzureDfs)
{
+ this.Description = "Container is usually `dryad-jobs'";
}
+ ///
+ /// Base Uri to access data in this Cluster.
+ ///
+ public Uri baseUri;
+
///
/// Enumerate all the clusters this user is subscribed to.
///
/// The list of clusters this user is subscribed to.
- public static IEnumerable EnumerateAzureDfsSubscribedClusters()
+ public static List EnumerateAzureDfsSubscribedClusters()
{
+ List configList = new List();
+
var store = new X509Store();
store.Open(OpenFlags.ReadOnly);
var configDir = Path.Combine(
@@ -1445,7 +1323,7 @@ public static IEnumerable EnumerateAzureDfsSubscribedClust
try
{
config = new AzureDfsClusterConfiguration();
- config.AzureClient = new AzureDfsClient(accountName, account.Key, "dryad-jobs");
+ config.AzureClient = new AzureDfsClient(accountName, account.Key, config.Container);
config.Name = cluster.Name;
}
catch (Exception ex)
@@ -1454,18 +1332,43 @@ public static IEnumerable EnumerateAzureDfsSubscribedClust
}
if (config != null)
- yield return config;
+ configList.Add(config);
}
}
}
}
+
+ return configList;
+ }
+
+ ///
+ /// The file containing the job query plan.
+ ///
+ /// Job whose plan is sought.
+ /// An object containing the path, or null if it cannot be found.
+ public override IClusterResidentObject JobQueryPlan(DryadLinqJobSummary job)
+ {
+ try
+ {
+ IClusterResidentObject dir = this.ProcessWorkDirectory(job.ManagerProcessGuid, true, job.Machine, job); // immutable
+ var matchingfiles = dir.GetFilesAndFolders("DryadLinqProgram__*.xml").ToList();
+ if (matchingfiles.Count() != 1)
+ throw new ClusterException("Could not find query plan file; got " + matchingfiles.Count() + " possible matches");
+ IClusterResidentObject result = matchingfiles.First();
+ (result as AzureDfsFile).IsDfsStream = true;
+ result.ShouldCacheLocally = true; // immutable
+ return result;
+ }
+ catch (Exception e)
+ {
+ return new UNCFile(e);
+ }
}
///
/// Azure account name.
///
- public
- string AccountName { get; set; }
+ public string AccountName { get; set; }
///
/// Azure account key.
///
@@ -1475,6 +1378,13 @@ public static IEnumerable EnumerateAzureDfsSubscribedClust
///
public string Container { get; set; }
+ private static List props = new List
+ {
+ "AccountName",
+ "AccountKey",
+ "Container"
+ };
+
///
/// Must be called after setting all properties.
/// Returns true if initialization succeeds.
@@ -1487,6 +1397,7 @@ public override string Initialize()
this.AccountName,
this.AccountKey,
this.Container);
+ this.baseUri = Microsoft.Research.Peloponnese.Azure.Utils.ToAzureUri(this.AccountName, this.Container, "", null, this.AccountKey);
return null;
}
catch (Exception ex)
@@ -1523,52 +1434,126 @@ public override IClusterResidentObject ProcessDirectory(DryadProcessIdentifier i
/// The proper cluster status.
public override ClusterStatus CreateClusterStatus()
{
- var stat = ClusterStatus.LookupStatus(this.Name);
+ var stat = ClusterStatus.LookupStatus(this);
if (stat != null) return stat;
return new AzureDfsClusterStatus(this);
}
///
- /// Work directory of a process vertex.
+ /// Properties that can be edited.
///
- /// Vertex guid.
- /// Machine where process ran.
- /// The path to the work directory of the vertex.
- /// Job where the process belongs.
- /// True if vertex is terminated.
- public override IClusterResidentObject ProcessWorkDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ /// List of properties that can be edited.
+ public override List GetPropertiesToEdit()
{
- return this.ProcessDirectory(identifier, terminated, machine, job);
+ return props;
}
+ }
+ ///
+ /// Configuration for a WebHdfs cluster.
+ ///
+ public sealed class WebHdfsClusterConfiguration : DfsClusterConfiguration
+ {
///
- /// Given an input file identify the process that produced it.
+ /// Handle to client to access files.
///
- /// Input file of a process.
- /// Job that contained the process.
- /// The identity of the process that produced the file.
- // ReSharper disable UnusedParameter.Global
- public override DryadProcessIdentifier ProcessFromInputFile(IClusterResidentObject input, DryadLinqJobSummary job)
+ public HdfsClientBase DfsClient;
+
+ ///
+ /// Create a cluster representing the local machine only.
+ ///
+ public WebHdfsClusterConfiguration()
+ : base(ClusterType.WebHdfs)
{
- return null;
+ this.Description = "JobsFolderUri usually looks like hdfs://headnode:port/JobsFolder";
}
- // ReSharper restore UnusedParameter.Global
///
- /// File containing standard output of a process.
+ /// WebHdfs user name.
///
- /// Process identifier.
+ public string UserName { get; set; }
+ ///
+ /// WebHdfs port.
+ ///
+ public int WebHdfsPort { get; set; }
+ ///
+ /// Uri to folder containing jobs.
+ ///
+ public Uri JobsFolderUri { get; set; }
+ ///
+ /// Machine that supplies job status.
+ ///
+ public string StatusNode { get; set; }
+ ///
+ /// Port of status machine.
+ ///
+ public int StatusNodePort { get; set; }
+
+ private static List props = new List
+ {
+ "UserName",
+ "WebHdfsPort",
+ "JobsFolderUri",
+ "StatusNode",
+ "StatusNodePort"
+ };
+
+ ///
+ /// Must be called after setting all properties.
+ /// Returns true if initialization succeeds.
+ ///
+ public override string Initialize()
+ {
+ try
+ {
+ this.DfsClient = new WebHdfsClient(this.UserName, this.WebHdfsPort);
+ return null;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine(ex);
+ return ex.Message;
+ }
+ }
+
+ ///
+ /// The file containing the job query plan.
+ ///
+ /// Job whose plan is sought.
+ /// An object containing the path, or null if it cannot be found.
+ public override IClusterResidentObject JobQueryPlan(DryadLinqJobSummary job)
+ {
+ try
+ {
+ IClusterResidentObject dir = this.ProcessWorkDirectory(job.ManagerProcessGuid, true, job.Machine, job); // immutable
+ var matchingfiles = dir.GetFilesAndFolders("DryadLinqProgram__*.xml").ToList();
+ if (matchingfiles.Count() != 1)
+ throw new ClusterException("Could not find query plan file; got " + matchingfiles.Count() + " possible matches");
+ IClusterResidentObject result = matchingfiles.First();
+ result.ShouldCacheLocally = true; // immutable
+ return result;
+ }
+ catch (Exception e)
+ {
+ return new UNCFile(e);
+ }
+ }
+
+ ///
+ /// The directory where a specific process is created on the cluster.
+ ///
+ /// Process identifier
/// Machine where process ran.
- /// Job containing process.
- /// The pathname to the standard output.
+ /// Home directory containing the process information (not working directory of vertex).
+ /// Job where the process belongs.
/// True if vertex is terminated.
- public override IClusterResidentObject ProcessStdoutFile(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ public override IClusterResidentObject ProcessDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
{
if (identifier.ToString() == "jm")
{
- IClusterResidentObject processdir = this.ProcessDirectory(identifier, terminated, machine, job);
- IClusterResidentObject file = processdir.GetFile("calypso.log");
- return file;
+ // The job manager process is special
+ var result = new DfsFile(this, this.JobsFolderUri, job, this.DfsClient, job.ClusterJobId, terminated, true);
+ return result;
}
// vertices not supported
@@ -1576,50 +1561,86 @@ public override IClusterResidentObject ProcessStdoutFile(DryadProcessIdentifier
}
///
- /// Log directory of a process vertex.
+ /// Create a cluster status for this cluster.
///
- /// Vertex guid.
- /// The path to the work directory of the vertex.
- /// Machine where process ran.
- /// Job where the process belongs.
- /// Vertex state.
- public override IClusterResidentObject ProcessLogDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
+ /// The proper cluster status.
+ public override ClusterStatus CreateClusterStatus()
{
- return this.ProcessDirectory(identifier, terminated, machine, job);
+ var stat = ClusterStatus.LookupStatus(this);
+ if (stat != null) return stat;
+ return new WebHdfsClusterStatus(this);
}
///
- /// A shell pattern matching (just the) log files produced by a job manager process.
+ /// Properties that can be edited.
///
- /// Pattern matching the log files.
- /// If true, return only the error logs.
- /// Job where the JM process belongs.
- // ReSharper disable once UnusedParameter.Global
- public override string JMLogFilesPattern(bool error, DryadLinqJobSummary job)
+ /// List of properties that can be edited.
+ public override List GetPropertiesToEdit()
{
- return "*.log";
+ return props;
}
+ }
+ ///
+ /// Configuration for an Hdfs cluster.
+ ///
+ public sealed class HdfsClusterConfiguration : DfsClusterConfiguration
+ {
///
- /// A shell pattern matching (just the) log files produced by a vertex process.
+ /// Handle to client to access files.
///
- /// Pattern matching the log files.
- /// If true, return only the error logs.
- /// Job containing this vertex.
- public override string VertexLogFilesPattern(bool error, DryadLinqJobSummary job)
+ public HdfsClientBase DfsClient;
+
+ ///
+ /// Create a cluster representing the local machine only.
+ ///
+ public HdfsClusterConfiguration()
+ : base(ClusterType.Hdfs)
{
- return "*.log";
+ this.Description = "JobsFolderUri should look like hdfs://headnode:port/JobsFolder";
}
///
- /// Convert a GUID printed by the Dryad job manager into a process-id, which is platform dependent.
+ /// Path to cluster.
///
- /// Process guid.
- /// Process id.\
- /// Job where guid is from.
- public override string ExtractPidFromGuid(string guid, DryadLinqJobSummary job)
+ public Uri JobsFolderUri { get; set; }
+ ///
+ /// Port to access HDFS.
+ ///
+ public string UserName { get; set; }
+ ///
+ /// Machine that supplies job status.
+ ///
+ public string StatusNode { get; set; }
+ ///
+ /// Port of status machine.
+ ///
+ public int StatusNodePort { get; set; }
+
+ private static List props = new List
{
- return guid;
+ "UserName",
+ "JobsFolderUri",
+ "StatusNode",
+ "StatusNodePort"
+ };
+
+ ///
+ /// Must be called after setting all properties.
+ /// Returns true if initialization succeeds.
+ ///
+ public override string Initialize()
+ {
+ try
+ {
+ this.DfsClient = new HdfsClient(this.UserName);
+ return null;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine(ex);
+ return ex.Message;
+ }
}
///
@@ -1636,7 +1657,6 @@ public override IClusterResidentObject JobQueryPlan(DryadLinqJobSummary job)
if (matchingfiles.Count() != 1)
throw new ClusterException("Could not find query plan file; got " + matchingfiles.Count() + " possible matches");
IClusterResidentObject result = matchingfiles.First();
- (result as AzureDfsFile).IsDfsStream = true;
result.ShouldCacheLocally = true; // immutable
return result;
}
@@ -1646,12 +1666,38 @@ public override IClusterResidentObject JobQueryPlan(DryadLinqJobSummary job)
}
}
- private static List props = new List
+ ///
+ /// The directory where a specific process is created on the cluster.
+ ///
+ /// Process identifier
+ /// Machine where process ran.
+ /// Home directory containing the process information (not working directory of vertex).
+ /// Job where the process belongs.
+ /// True if vertex is terminated.
+ public override IClusterResidentObject ProcessDirectory(DryadProcessIdentifier identifier, bool terminated, string machine, DryadLinqJobSummary job)
{
- "AccountName",
- "AccountKey",
- "Container"
- };
+ if (identifier.ToString() == "jm")
+ {
+ // The job manager process is special
+ var result = new DfsFile(this, this.JobsFolderUri, job, this.DfsClient, job.ClusterJobId, terminated, true);
+ return result;
+ }
+
+ // vertices not supported
+ return null;
+ }
+
+ ///
+ /// Create a cluster status for this cluster.
+ ///
+ /// The proper cluster status.
+ public override ClusterStatus CreateClusterStatus()
+ {
+ var stat = ClusterStatus.LookupStatus(this);
+ if (stat != null) return stat;
+ var result = new HdfsClusterStatus(this);
+ return result;
+ }
///
/// Properties that can be edited.
diff --git a/JobBrowser/JOM/ClusterStatus.cs b/JobBrowser/JOM/ClusterStatus.cs
index a7ff380..1242280 100644
--- a/JobBrowser/JOM/ClusterStatus.cs
+++ b/JobBrowser/JOM/ClusterStatus.cs
@@ -19,12 +19,22 @@
*/
+
using System;
using System.Collections.Generic;
+using System.Diagnostics;
using System.IO;
using System.Linq;
-using Microsoft.Research.Peloponnese.Storage;
+using System.Web;
+using System.Xml;
+using System.Xml.Linq;
+using Microsoft.Research.Peloponnese.Hdfs;
+using Microsoft.Research.Peloponnese.Shared;
+using Microsoft.Research.Peloponnese.Yarn;
using Microsoft.Research.Tools;
+using System.Text.RegularExpressions;
+using System.Text;
+using JobStatus = Microsoft.Research.Peloponnese.ClusterUtils.JobStatus;
namespace Microsoft.Research.JobObjectModel
{
@@ -68,12 +78,16 @@ protected ClusterStatus(ClusterConfiguration config)
///
/// See if a status is already cached.
///
- /// Name of cluster.
+ /// Cluster configuration.
/// The cached status.
- public static ClusterStatus LookupStatus(string clusterName)
+ public static ClusterStatus LookupStatus(ClusterConfiguration config)
{
- if (ClusterStatuses.ContainsKey(clusterName))
- return ClusterStatuses[clusterName];
+ if (ClusterStatuses.ContainsKey(config.Name))
+ {
+ var retval = ClusterStatuses[config.Name];
+ if (retval.Config.Equals(config))
+ return retval;
+ }
return null;
}
@@ -172,8 +186,8 @@ public virtual ClusterJobInformation DiscoverClusterJob(DryadLinqJobSummary job,
/// Communication manager.
public virtual void RefreshStatus(DryadLinqJobSummary summary, CommManager manager)
{
- // refresh the whole list
- this.RecomputeClusterJobList(summary.VirtualCluster, manager);
+ // refresh the whole list: too expensive
+ // this.RecomputeClusterJobList(summary.VirtualCluster, manager);
ClusterJobInformation info = this.DiscoverClusterJob(summary, manager);
if (info == null)
{
@@ -193,6 +207,7 @@ public virtual void RefreshStatus(DryadLinqJobSummary summary, CommManager manag
+
///
/// A fake cluster keeps some information about past jobs on a local filesystem, to allow post-mortem debugging.
///
@@ -442,20 +457,17 @@ public override bool CancelJob(DryadLinqJobSummary job)
///
/// Status of an Azure DFS cluster.
///
- public class AzureDfsClusterStatus : ClusterStatus
+ public abstract class DfsClusterStatus : ClusterStatus
{
- private AzureDfsClusterConfiguration config;
-
///
/// Create a cluster containing just the local machine.
///
/// Configuration for the local machine.
- public AzureDfsClusterStatus(ClusterConfiguration config)
+ protected DfsClusterStatus(ClusterConfiguration config)
: base(config)
{
- if (!(config is AzureDfsClusterConfiguration))
- throw new ArgumentException("Expected a AzureYarnClusterConfiguration, got a " + config.GetType());
- this.config = config as AzureDfsClusterConfiguration;
+ if (!(config is DfsClusterConfiguration))
+ throw new ArgumentException("Expected a DfsClusterConfiguration, got a " + config.GetType());
}
///
@@ -468,31 +480,6 @@ public AzureDfsClusterStatus(ClusterConfiguration config)
return result;
}
- ///
- /// Force the recomputation of the cluster job list.
- ///
- /// Virtual cluster to use (defined only for some cluster types).
- /// Communication manager.
- protected override void RecomputeClusterJobList(string virtualCluster, CommManager manager)
- {
- this.clusterJobs = new Dictionary();
- var jobs = this.config.AzureClient.EnumerateDirectory("").ToList();
-
- int done = 0;
- foreach (var job in jobs)
- {
- manager.Token.ThrowIfCancellationRequested();
- ClusterJobInformation info = this.GetJobInfo(job);
- if (info != null)
- {
- // ReSharper disable once AssignNullToNotNullAttribute
- this.clusterJobs.Add(job, info);
- }
- manager.Progress(100*done++/jobs.Count);
- }
- manager.Progress(100);
- }
-
///
/// Discover the (unique) dryadlinq job corresponding to a cluster job.
///
@@ -503,7 +490,7 @@ public override DryadLinqJobSummary DiscoverDryadLinqJobFromClusterJob(ClusterJo
{
DryadLinqJobSummary result = new DryadLinqJobSummary(
clusterJob.Cluster,
- this.config.TypeOfCluster,
+ this.Config.TypeOfCluster,
"", // virtual cluster
"", // machine
clusterJob.ClusterJobID, // jobId
@@ -529,6 +516,78 @@ public override DryadLinqJobSummary DiscoverDryadLinqJobFromURL(string url, Stat
throw new InvalidOperationException();
}
+ ///
+ /// Cancel the specified job.
+ ///
+ /// Job whose execution is cancelled.
+ /// True if the cancellation succeeded.
+ public override bool CancelJob(DryadLinqJobSummary job)
+ {
+ return false;
+ }
+ }
+
+ ///
+ /// Status of an Azure DFS cluster.
+ ///
+ public class AzureDfsClusterStatus : DfsClusterStatus
+ {
+ private AzureDfsClusterConfiguration config;
+
+ ///
+ /// Create a cluster containing just the local machine.
+ ///
+ /// Configuration for the local machine.
+ public AzureDfsClusterStatus(ClusterConfiguration config)
+ : base(config)
+ {
+ if (!(config is AzureDfsClusterConfiguration))
+ throw new ArgumentException("Expected a AzureDfsClusterConfiguration, got a " + config.GetType());
+ this.config = config as AzureDfsClusterConfiguration;
+ }
+
+ ///
+ /// Force the recomputation of the cluster job list.
+ ///
+ /// Virtual cluster to use (defined only for some cluster types).
+ /// Communication manager.
+ protected override void RecomputeClusterJobList(string virtualCluster, CommManager manager)
+ {
+ this.clusterJobs = new Dictionary();
+ var jobs = this.config.AzureClient.ExpandFileOrDirectory(AzureDfsFile.UriFromPath(this.config, "")).ToList();
+
+ int done = 0;
+ foreach (var job in jobs)
+ {
+ manager.Token.ThrowIfCancellationRequested();
+ string jobRootFolder = AzureDfsFile.PathFromUri(this.config, job);
+ ClusterJobInformation info = this.GetJobInfo(jobRootFolder);
+ if (info != null)
+ {
+ // ReSharper disable once AssignNullToNotNullAttribute
+ this.clusterJobs.Add(job.AbsolutePath, info);
+ }
+ manager.Progress(100*done++/jobs.Count);
+ }
+ manager.Progress(100);
+ }
+
+ ///
+ /// Extract blob name from a path.
+ ///
+ /// Container name.
+ /// Path.
+ /// The blob part of path.
+ public static string GetBlobName(string container, string path)
+ {
+ if (path.StartsWith("/" + container + "/"))
+ path = path.Substring(container.Length + 2);
+ int q = path.IndexOf('?');
+ if (q > 0)
+ path = path.Substring(0, q);
+ return path;
+ }
+
///
/// Extract the job information from a folder with logs on the local machine.
///
@@ -540,14 +599,19 @@ private ClusterJobInformation GetJobInfo(string jobRootFolder)
DateTime lastHeartBeat = DateTime.MinValue;
ClusterJobInformation.ClusterJobStatus status = ClusterJobInformation.ClusterJobStatus.Unknown;
bool found = false;
- string jobName = jobRootFolder;
- var jobsFolders = this.config.AzureClient.EnumerateDirectory(jobRootFolder).ToList();
+ Uri uri = AzureDfsFile.UriFromPath(this.config, jobRootFolder);
+ var jobsFolders = this.config.AzureClient.ExpandFileOrDirectory(uri).ToList();
+
+ jobRootFolder = GetBlobName(this.config.Container, jobRootFolder);
+ string jobName = jobRootFolder;
+
foreach (var file in jobsFolders)
{
- if (file.EndsWith("heartbeat"))
+ if (file.AbsolutePath.EndsWith("heartbeat"))
{
- var blob = this.config.AzureClient.Container.GetPageBlobReference(file);
+ string blobName = GetBlobName(this.config.Container, file.AbsolutePath);
+ var blob = this.config.AzureClient.Container.GetPageBlobReference(blobName);
blob.FetchAttributes();
var props = blob.Metadata;
if (props.ContainsKey("status"))
@@ -597,11 +661,11 @@ private ClusterJobInformation GetJobInfo(string jobRootFolder)
found = true;
}
- else if (file.Contains("DryadLinqProgram__") &&
+ else if (file.AbsolutePath.Contains("DryadLinqProgram__") &&
// newer heartbeats contain the date
date != DateTime.MinValue)
{
- var blob = this.config.AzureClient.Container.GetBlockBlobReference(file);
+ var blob = this.config.AzureClient.Container.GetBlockBlobReference(AzureDfsFile.PathFromUri(this.config, file));
blob.FetchAttributes();
var props = blob.Properties;
if (props.LastModified.HasValue)
@@ -628,7 +692,6 @@ private ClusterJobInformation GetJobInfo(string jobRootFolder)
/// Communication manager.
public override void RefreshStatus(DryadLinqJobSummary summary, CommManager manager)
{
- // refresh the whole list
ClusterJobInformation info = this.GetJobInfo(summary.JobID);
if (info == null)
{
@@ -645,8 +708,224 @@ public override void RefreshStatus(DryadLinqJobSummary summary, CommManager mana
/// True if the cancellation succeeded.
public override bool CancelJob(DryadLinqJobSummary job)
{
- AzureUtils.KillJob(this.config.AccountName, this.config.AccountKey, this.config.Container, job.ClusterJobId);
- return true;
+ Microsoft.Research.Peloponnese.Azure.Utils.KillJob(this.config.AccountName, this.config.AccountKey, this.config.Container, job.ClusterJobId);
+ return false;
+ }
+ }
+
+ ///
+ /// Cluster status of a WebHdfs cluster.
+ ///
+ public class WebHdfsClusterStatus : DfsClusterStatus
+ {
+ private WebHdfsClusterConfiguration config;
+ ///
+ /// Yarn client to access job status.
+ ///
+ private NativeYarnClient yarnClient;
+
+ ///
+ /// Create a cluster containing just the local machine.
+ ///
+ /// Configuration for the local machine.
+ public WebHdfsClusterStatus(ClusterConfiguration conf)
+ : base(conf)
+ {
+ if (!(conf is WebHdfsClusterConfiguration))
+ throw new ArgumentException("Expected a WebHdfsClusterConfiguration, got a " + conf.GetType());
+ this.config = conf as WebHdfsClusterConfiguration;
+ this.yarnClient = new NativeYarnClient(this.config.StatusNode, this.config.StatusNodePort, new HdfsClient(this.config.UserName));
+ }
+
+ ///
+ /// Extract the job information from a folder with logs on the local machine.
+ ///
+ /// Folder with logs for the specified job.
+ /// The job information, or null if not found.
+ private ClusterJobInformation GetJobInfo(string jobRootFolder)
+ {
+ Uri uri = DfsFile.UriFromPath(this.config.JobsFolderUri, jobRootFolder);
+ long time;
+ long size;
+ this.config.DfsClient.GetFileStatus(uri, out time, out size);
+
+ DateTime date = DfsFile.TimeFromLong(time);
+ ClusterJobInformation.ClusterJobStatus status = ClusterJobInformation.ClusterJobStatus.Unknown;
+ string jobName = Path.GetFileName(jobRootFolder);
+
+ string errorMsg = "";
+
+ try
+ {
+ var jobinfo = this.yarnClient.QueryJob(jobName, uri);
+ var jobstatus = jobinfo.GetStatus();
+ errorMsg = jobinfo.ErrorMsg;
+ switch (jobstatus)
+ {
+ case JobStatus.NotSubmitted:
+ case JobStatus.Waiting:
+ status = ClusterJobInformation.ClusterJobStatus.Unknown;
+ break;
+ case JobStatus.Running:
+ status = ClusterJobInformation.ClusterJobStatus.Running;
+ break;
+ case JobStatus.Success:
+ status = ClusterJobInformation.ClusterJobStatus.Succeeded;
+ break;
+ case JobStatus.Cancelled:
+ status = ClusterJobInformation.ClusterJobStatus.Cancelled;
+ break;
+ case JobStatus.Failure:
+ status = ClusterJobInformation.ClusterJobStatus.Failed;
+ break;
+ default:
+ throw new ArgumentOutOfRangeException();
+ }
+ }
+ catch (Exception)
+ {
+ }
+
+ TimeSpan running = TimeSpan.Zero;
+ var info = new ClusterJobInformation(this.config.Name, "", jobName, jobName, Environment.UserName, date, running, status);
+ return info;
+ }
+
+ ///
+ /// Force the recomputation of the cluster job list.
+ ///
+ /// Virtual cluster to use (defined only for some cluster types).
+ /// Communication manager.
+ // ReSharper disable once UnusedParameter.Global
+ protected override void RecomputeClusterJobList(string virtualCluster, CommManager manager)
+ {
+ this.clusterJobs = new Dictionary();
+ var uri = DfsFile.UriFromPath(this.config.JobsFolderUri, "");
+ var jobsEnum = this.config.DfsClient.EnumerateSubdirectories(uri);
+ List jobs = jobsEnum != null ? jobsEnum.ToList() : new List();
+
+ int done = 0;
+ foreach (var job in jobs)
+ {
+ manager.Token.ThrowIfCancellationRequested();
+ ClusterJobInformation info = this.GetJobInfo(DfsFile.PathFromUri(this.config.JobsFolderUri, job));
+ if (info != null)
+ {
+ // ReSharper disable once AssignNullToNotNullAttribute
+ this.clusterJobs.Add(info.ClusterJobID, info);
+ }
+ manager.Progress(100 * done++ / jobs.Count);
+ }
+ manager.Progress(100);
+ }
+ }
+ ///
+ /// Cluster status of a WebHdfs cluster.
+ ///
+ public class HdfsClusterStatus : DfsClusterStatus
+ {
+ private HdfsClusterConfiguration config;
+ ///
+ /// Yarn client to access job status.
+ ///
+ private NativeYarnClient yarnClient;
+
+ ///
+ /// Create a cluster containing just the local machine.
+ ///
+ /// Configuration for the local machine.
+ public HdfsClusterStatus(ClusterConfiguration conf)
+ : base(conf)
+ {
+ if (!(conf is HdfsClusterConfiguration))
+ throw new ArgumentException("Expected an HdfsClusterConfiguration, got a " + conf.GetType());
+ this.config = conf as HdfsClusterConfiguration;
+ // make a fake call to initialize the cluster on the foreground thread
+ // HDFS does not work if initialized on the background thread.
+ Uri uri = DfsFile.UriFromPath(this.config.JobsFolderUri, "");
+ this.config.DfsClient.IsFileExists(uri); // ignore result
+ this.yarnClient = new NativeYarnClient(this.config.StatusNode, this.config.StatusNodePort, new HdfsClient(this.config.UserName));
+ }
+
+ ///
+ /// Extract the job information from a folder with logs on the local machine.
+ ///
+ /// Folder with logs for the specified job.
+ /// The job information, or null if not found.
+ private ClusterJobInformation GetJobInfo(string jobRootFolder)
+ {
+ Uri uri = DfsFile.UriFromPath(this.config.JobsFolderUri, jobRootFolder);
+ long time;
+ long size;
+ this.config.DfsClient.GetFileStatus(uri, out time, out size);
+
+ DateTime date = DfsFile.TimeFromLong(time);
+ ClusterJobInformation.ClusterJobStatus status = ClusterJobInformation.ClusterJobStatus.Unknown;
+ string jobName = Path.GetFileName(jobRootFolder);
+
+ string errorMsg = "";
+
+ try
+ {
+ var jobinfo = this.yarnClient.QueryJob(jobName, uri);
+ var jobstatus = jobinfo.GetStatus();
+ errorMsg = jobinfo.ErrorMsg;
+ switch (jobstatus)
+ {
+ case JobStatus.NotSubmitted:
+ case JobStatus.Waiting:
+ status = ClusterJobInformation.ClusterJobStatus.Unknown;
+ break;
+ case JobStatus.Running:
+ status = ClusterJobInformation.ClusterJobStatus.Running;
+ break;
+ case JobStatus.Success:
+ status = ClusterJobInformation.ClusterJobStatus.Succeeded;
+ break;
+ case JobStatus.Cancelled:
+ status = ClusterJobInformation.ClusterJobStatus.Cancelled;
+ break;
+ case JobStatus.Failure:
+ status = ClusterJobInformation.ClusterJobStatus.Failed;
+ break;
+ default:
+ throw new ArgumentOutOfRangeException();
+ }
+ }
+ catch (Exception)
+ {
+ }
+
+ TimeSpan running = TimeSpan.Zero;
+ var info = new ClusterJobInformation(config.Name, "", jobName, jobName, Environment.UserName, date, running, status);
+ return info;
+ }
+
+ ///
+ /// Force the recomputation of the cluster job list.
+ ///
+ /// Virtual cluster to use (defined only for some cluster types).
+ /// Communication manager.
+ // ReSharper disable once UnusedParameter.Global
+ protected override void RecomputeClusterJobList(string virtualCluster, CommManager manager)
+ {
+ this.clusterJobs = new Dictionary();
+ var uri = DfsFile.UriFromPath(this.config.JobsFolderUri, "");
+ var jobs = this.config.DfsClient.EnumerateSubdirectories(uri).ToList();
+
+ int done = 0;
+ foreach (var job in jobs)
+ {
+ manager.Token.ThrowIfCancellationRequested();
+ ClusterJobInformation info = this.GetJobInfo(DfsFile.PathFromUri(this.config.JobsFolderUri, job));
+ if (info != null)
+ {
+ // ReSharper disable once AssignNullToNotNullAttribute
+ this.clusterJobs.Add(info.ClusterJobID, info);
+ }
+ manager.Progress(100 * done++ / jobs.Count);
+ }
+ manager.Progress(100);
}
}
}
diff --git a/JobBrowser/JOM/JobObjectModel.csproj b/JobBrowser/JOM/JobObjectModel.csproj
index 626d232..2547ec9 100644
--- a/JobBrowser/JOM/JobObjectModel.csproj
+++ b/JobBrowser/JOM/JobObjectModel.csproj
@@ -1,6 +1,12 @@

-
+
+
+
+
+
+
+
Debug
AnyCPU
@@ -9,39 +15,17 @@
{77739535-7FAC-4487-887F-FEBA197E7572}
Library
Properties
- DistributedDataCollection
- DistributedDataCollection
+ JobObjectModel
+ JobObjectModel
v4.5
512
-
-
-
-
-
-
- 3.5
- publish\
- true
- Disk
- false
- Foreground
- 7
- Days
- false
- false
- true
- 0
- 1.0.0.%2a
- false
- false
- true
-
+ 71873e18
true
bin\x64\Debug\
DEBUG;TRACE
- bin\Debug\DistributedDataCollection.XML
+ bin\Debug\JobObjectModel.XML
full
x64
prompt
@@ -57,151 +41,137 @@
AllRules.ruleset
-
- False
- ..\packages\Microsoft.Data.Edm.5.6.1\lib\net40\Microsoft.Data.Edm.dll
-
-
+
+
+
+
+
+
+
+
+
+
+
+ {20B91AAF-AAD4-47DF-9F1D-494DE6E066F9}
+ Tools
+
+
+
+
+
+ Designer
+
+
+
+
False
- ..\packages\Microsoft.Data.OData.5.6.1\lib\net40\Microsoft.Data.OData.dll
+ ..\packages\Microsoft.Data.Edm.5.6.2\lib\net40\Microsoft.Data.Edm.dll
-
+
False
- ..\packages\Microsoft.Data.Services.Client.5.6.1\lib\net40\Microsoft.Data.Services.Client.dll
+ ..\packages\Microsoft.Data.OData.5.6.2\lib\net40\Microsoft.Data.OData.dll
-
+
False
- ..\packages\Microsoft.Hadoop.Client.1.1.0.7\lib\net40\Microsoft.Hadoop.Client.dll
+ ..\packages\Microsoft.Data.Services.Client.5.6.2\lib\net40\Microsoft.Data.Services.Client.dll
-
+
False
- ..\packages\Microsoft.Research.Peloponnese.0.7.2-beta\lib\net45\Microsoft.Research.Peloponnese.HadoopBridge.dll
+ ..\packages\Microsoft.Hadoop.Client.1.3.1.6\lib\net40\Microsoft.Hadoop.Client.dll
-
+
False
- ..\packages\Microsoft.Research.Peloponnese.0.7.2-beta\lib\net45\Microsoft.Research.Peloponnese.Utils.dll
+ ..\packages\Microsoft.Hadoop.Client.1.3.1.6\lib\net40\Microsoft.HDInsight.Net.Http.Formatting.dll
- ..\packages\Microsoft.Bcl.Async.1.0.166\lib\net40\Microsoft.Threading.Tasks.dll
+ ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.dll
- ..\packages\Microsoft.Bcl.Async.1.0.166\lib\net40\Microsoft.Threading.Tasks.Extensions.dll
+ ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.dll
- ..\packages\Microsoft.Bcl.Async.1.0.166\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll
+ ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll
- ..\packages\Microsoft.WindowsAzure.Common.1.0.1\lib\net45\Microsoft.WindowsAzure.Common.dll
+ ..\packages\Microsoft.WindowsAzure.Common.1.3.0\lib\net45\Microsoft.WindowsAzure.Common.dll
- ..\packages\Microsoft.WindowsAzure.Common.1.0.1\lib\net45\Microsoft.WindowsAzure.Common.NetFramework.dll
+ ..\packages\Microsoft.WindowsAzure.Common.1.3.0\lib\net45\Microsoft.WindowsAzure.Common.NetFramework.dll
..\packages\Microsoft.WindowsAzure.ConfigurationManager.2.0.3\lib\net40\Microsoft.WindowsAzure.Configuration.dll
-
- ..\packages\Microsoft.WindowsAzure.Management.1.0.0\lib\net40\Microsoft.WindowsAzure.Management.dll
-
-
+
False
- ..\packages\Microsoft.WindowsAzure.Management.HDInsight.1.1.0.7\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.dll
+ ..\packages\Microsoft.WindowsAzure.Management.HDInsight.1.3.1.6\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.dll
-
+
False
- ..\packages\Microsoft.Hadoop.Client.1.1.0.7\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.dll
+ ..\packages\Microsoft.Hadoop.Client.1.3.1.6\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.dll
-
+
False
- ..\packages\Microsoft.Hadoop.Client.1.1.0.7\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.Core.dll
+ ..\packages\Microsoft.Hadoop.Client.1.3.1.6\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.Core.dll
-
- ..\packages\Microsoft.WindowsAzure.Management.Storage.1.0.0\lib\net40\Microsoft.WindowsAzure.Management.Storage.dll
+
+ False
+ ..\packages\Microsoft.WindowsAzure.Management.Storage.3.0.0\lib\net40\Microsoft.WindowsAzure.Management.Storage.dll
-
+
False
- ..\packages\WindowsAzure.Storage.3.1.0.1\lib\net40\Microsoft.WindowsAzure.Storage.dll
+ ..\packages\WindowsAzure.Storage.4.3.0\lib\net40\Microsoft.WindowsAzure.Storage.dll
-
- ..\packages\Newtonsoft.Json.6.0.2\lib\net45\Newtonsoft.Json.dll
+
+ False
+ ..\packages\Newtonsoft.Json.6.0.5\lib\net45\Newtonsoft.Json.dll
-
-
- 3.5
+
+ ..\..\..\..\Program Files (x86)\Reference Assemblies\Microsoft\Framework\.NETFramework\v4.5\System.dll
-
+
- ..\packages\Microsoft.Net.Http.2.2.19\lib\net45\System.Net.Http.Extensions.dll
+ ..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Extensions.dll
- ..\packages\Microsoft.Net.Http.2.2.19\lib\net45\System.Net.Http.Primitives.dll
+ ..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Primitives.dll
-
+
False
- ..\packages\System.Spatial.5.6.1\lib\net40\System.Spatial.dll
+ ..\packages\System.Spatial.5.6.2\lib\net40\System.Spatial.dll
-
- 3.5
-
-
- 3.5
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {20B91AAF-AAD4-47DF-9F1D-494DE6E066F9}
- Tools
-
-
-
-
- False
- .NET Framework 3.5 SP1 Client Profile
- false
-
-
- False
- .NET Framework 3.5 SP1
- true
-
-
- False
- Windows Installer 3.1
- true
-
-
-
-
-
+
+
-
-
-
-
-
This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
@@ -30,37 +30,57 @@ Dryad provides reliable, distributed computing on thousands of servers for large
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ProcessService/Constants.cs b/ProcessService/Constants.cs
index c0897cc..eafb70f 100644
--- a/ProcessService/Constants.cs
+++ b/ProcessService/Constants.cs
@@ -49,20 +49,13 @@ internal class Constants
public const Int32 HdfsServiceDefaultHttpPort = 50070;
// Recognized values are: OFF, CRITICAL, ERROR, WARN, INFO, VERBOSE
- public const string traceLevelEnvVar = "DRYAD_TRACE_LEVEL";
- public const string traceOff = "OFF";
- public const string traceCritical = "CRITICAL";
- public const string traceError = "ERROR";
- public const string traceWarning = "WARN";
- public const string traceInfo = "INFO";
- public const string traceVerbose = "VERBOSE";
-
- public const int traceOffNum = 0;
- public const int traceCriticalNum = 1;
- public const int traceErrorNum = 3;
- public const int traceWarningNum = 7;
- public const int traceInfoNum = 15;
- public const int traceVerboseNum = 31;
+ public const string loggingLevelEnvVar = "DRYAD_LOGGING_LEVEL";
+ public const string loggingOff = "OFF";
+ public const string loggingCritical = "CRITICAL";
+ public const string loggingError = "ERROR";
+ public const string loggingWarning = "WARN";
+ public const string loggingInfo = "INFO";
+ public const string loggingVerbose = "VERBOSE";
// DrError.h values used in managed code
// need to keep this section in sync with drerror.h changes...
diff --git a/ProcessService/ProcessService.csproj b/ProcessService/ProcessService.csproj
index 8b6c0a7..5e0b612 100644
--- a/ProcessService/ProcessService.csproj
+++ b/ProcessService/ProcessService.csproj
@@ -1,6 +1,7 @@

-
+
+
Debug
@@ -9,12 +10,13 @@
Exe
Properties
Microsoft.Research.Dryad.ProcessService
- ProcessService
+ Microsoft.Research.Dryad.ProcessService
v4.5
512
..\
true
+ 4923fadf
true
@@ -35,99 +37,12 @@
MinimumRecommendedRules.ruleset
-
- False
- ..\packages\Microsoft.Data.Edm.5.6.1\lib\net40\Microsoft.Data.Edm.dll
-
-
- False
- ..\packages\Microsoft.Data.OData.5.6.1\lib\net40\Microsoft.Data.OData.dll
-
-
- False
- ..\packages\Microsoft.Data.Services.Client.5.6.1\lib\net40\Microsoft.Data.Services.Client.dll
-
-
- False
- ..\packages\Microsoft.Hadoop.Client.1.1.1.8\lib\net40\Microsoft.Hadoop.Client.dll
-
-
- False
- ..\packages\Microsoft.Research.Peloponnese.0.7.5-beta\lib\net45\Microsoft.Research.Peloponnese.HadoopBridge.dll
-
-
- False
- ..\packages\Microsoft.Research.Peloponnese.0.7.5-beta\lib\net45\Microsoft.Research.Peloponnese.Utils.dll
-
-
- False
- ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.dll
-
-
- False
- ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.dll
-
-
- False
- ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Common.1.1.1\lib\net45\Microsoft.WindowsAzure.Common.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Common.1.1.1\lib\net45\Microsoft.WindowsAzure.Common.NetFramework.dll
-
-
- ..\packages\Microsoft.WindowsAzure.ConfigurationManager.2.0.3\lib\net40\Microsoft.WindowsAzure.Configuration.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Management.1.2.0\lib\net40\Microsoft.WindowsAzure.Management.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Management.HDInsight.1.1.1.8\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.dll
-
-
- False
- ..\packages\Microsoft.Hadoop.Client.1.1.1.8\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.dll
-
-
- False
- ..\packages\Microsoft.Hadoop.Client.1.1.1.8\lib\net40\Microsoft.WindowsAzure.Management.HDInsight.Framework.Core.dll
-
-
- False
- ..\packages\Microsoft.WindowsAzure.Management.Storage.1.1.1\lib\net40\Microsoft.WindowsAzure.Management.Storage.dll
-
-
- False
- ..\packages\WindowsAzure.Storage.4.0.1\lib\net40\Microsoft.WindowsAzure.Storage.dll
-
-
- False
- ..\packages\Newtonsoft.Json.6.0.3\lib\net45\Newtonsoft.Json.dll
-
-
- False
- ..\packages\Microsoft.Net.Http.2.2.22\lib\net45\System.Net.Http.Extensions.dll
-
-
- False
- ..\packages\Microsoft.Net.Http.2.2.22\lib\net45\System.Net.Http.Primitives.dll
-
-
- False
- ..\packages\System.Spatial.5.6.1\lib\net40\System.Spatial.dll
-
@@ -136,6 +51,9 @@
+
+ Properties\SharedAssemblyInfo.cs
+
@@ -149,19 +67,17 @@
-
-
-
-
-
This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
+
+
+
+
-
+
+