Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Add Python3 GPU+MKLDNN unittests to dev menu, fix CMake options parsing #13458

Merged
merged 1 commit into from
Dec 3, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Fix cmake options parsing in dev_menu
Add GPU+MKLDNN unittests to dev_menu
  • Loading branch information
larroy committed Dec 3, 2018
commit 985c6b207b374af2252a3a2847a5628acfecd640
63 changes: 32 additions & 31 deletions cmake/cmake_options.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,34 +16,35 @@
# under the License.

--- # CMake configuration
USE_CUDA: OFF # Build with CUDA support
USE_OLDCMAKECUDA: OFF # Build with old cmake cuda
USE_NCCL: OFF # Use NVidia NCCL with CUDA
USE_OPENCV: ON # Build with OpenCV support
USE_OPENMP: ON # Build with Openmp support
USE_CUDNN: ON # Build with cudnn support) # one could set CUDNN_ROOT for search path
USE_SSE: ON # Build with x86 SSE instruction support IF NOT ARM
USE_F16C: ON # Build with x86 F16C instruction support) # autodetects support if ON
USE_LAPACK: ON # Build with lapack support
USE_MKL_IF_AVAILABLE: ON # Use MKL if found
USE_MKLML_MKL: ON # Use MKLDNN variant of MKL (if MKL found) IF USE_MKL_IF_AVAILABLE AND (NOT APPLE)
USE_MKLDNN: ON # Use MKLDNN variant of MKL (if MKL found) IF USE_MKL_IF_AVAILABLE AND (NOT APPLE)
USE_OPERATOR_TUNING: ON # Enable auto-tuning of operators IF NOT MSVC
USE_GPERFTOOLS: ON # Build with GPerfTools support (if found)
USE_JEMALLOC: ON # Build with Jemalloc support
USE_PROFILER: ON # Build with Profiler support
USE_DIST_KVSTORE: OFF # Build with DIST_KVSTORE support
USE_PLUGINS_WARPCTC: OFF # Use WARPCTC Plugins
USE_PLUGIN_CAFFE: OFF # Use Caffe Plugin
USE_CPP_PACKAGE: OFF # Build C++ Package
USE_MXNET_LIB_NAMING: ON # Use MXNet library naming conventions.
USE_GPROF: OFF # Compile with gprof (profiling) flag
USE_CXX14_IF_AVAILABLE: OFF # Build with C++14 if the compiler supports it
USE_VTUNE: OFF # Enable use of Intel Amplifier XE (VTune)) # one could set VTUNE_ROOT for search path
ENABLE_CUDA_RTC: ON # Build with CUDA runtime compilation support
BUILD_CPP_EXAMPLES: ON # Build cpp examples
INSTALL_EXAMPLES: OFF # Install the example source files.
USE_SIGNAL_HANDLER: OFF # Print stack traces on segfaults.
USE_TENSORRT: OFF # Enable infeference optimization with TensorRT.
USE_ASAN: OFF # Enable Clang/GCC ASAN sanitizers.
ENABLE_TESTCOVERAGE: OFF # Enable compilation with test coverage metric output
USE_CUDA: "ON" # Build with CUDA support
USE_OLDCMAKECUDA: "OFF" # Build with old cmake cuda
USE_NCCL: "OFF" # Use NVidia NCCL with CUDA
USE_OPENCV: "ON" # Build with OpenCV support
USE_OPENMP: "ON" # Build with Openmp support
USE_CUDNN: "ON" # Build with cudnn support) # one could set CUDNN_ROOT for search path
USE_SSE: "ON" # Build with x86 SSE instruction support IF NOT ARM
USE_F16C: "ON" # Build with x86 F16C instruction support) # autodetects support if "ON"
USE_LAPACK: "ON" # Build with lapack support
USE_MKL_IF_AVAILABLE: "ON" # Use MKL if found
USE_MKLML_MKL: "ON" # Use MKLDNN variant of MKL (if MKL found) IF USE_MKL_IF_AVAILABLE AND (NOT APPLE)
USE_MKLDNN: "ON" # Use MKLDNN variant of MKL (if MKL found) IF USE_MKL_IF_AVAILABLE AND (NOT APPLE)
USE_OPERATOR_TUNING: "ON" # Enable auto-tuning of operators IF NOT MSVC
USE_GPERFTOOLS: "ON" # Build with GPerfTools support (if found)
USE_JEMALLOC: "ON" # Build with Jemalloc support
USE_PROFILER: "ON" # Build with Profiler support
USE_DIST_KVSTORE: "OFF" # Build with DIST_KVSTORE support
USE_PLUGINS_WARPCTC: "OFF" # Use WARPCTC Plugins
USE_PLUGIN_CAFFE: "OFF" # Use Caffe Plugin
USE_CPP_PACKAGE: "OFF" # Build C++ Package
USE_MXNET_LIB_NAMING: "ON" # Use MXNet library naming conventions.
USE_GPROF: "OFF" # Compile with gprof (profiling) flag
USE_CXX14_IF_AVAILABLE: "OFF" # Build with C++14 if the compiler supports it
USE_VTUNE: "OFF" # Enable use of Intel Amplifier XE (VTune)) # one could set VTUNE_ROOT for search path
ENABLE_CUDA_RTC: "ON" # Build with CUDA runtime compilation support
BUILD_CPP_EXAMPLES: "ON" # Build cpp examples
INSTALL_EXAMPLES: "OFF" # Install the example source files.
USE_SIGNAL_HANDLER: "ON" # Print stack traces on segfaults.
USE_TENSORRT: "OFF" # Enable infeference optimization with TensorRT.
USE_ASAN: "OFF" # Enable Clang/GCC ASAN sanitizers.
ENABLE_TESTCOVERAGE: "OFF" # Enable compilation with test coverage metric output
CMAKE_BUILD_TYPE: "Debug"
20 changes: 12 additions & 8 deletions dev_menu.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,12 @@ def __call__(self):
resp = input("Please answer yes or no: ")

class CMake(object):
def __init__(self, cmake_options_yaml='cmake/cmake_options.yml'):
self.cmake_options_yaml = cmake_options_yaml
def __init__(self, cmake_options_yaml='cmake_options.yml', cmake_options_yaml_default='cmake/cmake_options.yml'):
if os.path.exists(cmake_options_yaml):
self.cmake_options_yaml = cmake_options_yaml
else:
self.cmake_options_yaml = cmake_options_yaml_default
logging.info('Using {} for CMake configuration'.format(self.cmake_options_yaml))
self.cmake_options = None
self.read_config()

Expand All @@ -58,13 +62,8 @@ def read_config(self):

def _cmdlineflags(self):
res = []
def _bool_ON_OFF(x):
if x:
return 'ON'
else:
return 'OFF'
for opt,v in self.cmake_options.items():
res.append('-D{}={}'.format(opt,_bool_ON_OFF(v)))
res.append('-D{}={}'.format(opt,v))
return res

def cmake_command(self) -> str:
Expand Down Expand Up @@ -103,6 +102,11 @@ def __call__(self, build_dir='build', generator='Ninja', build_cmd='ninja'):
"ci/build.py --platform ubuntu_gpu /work/runtime_functions.sh build_ubuntu_gpu",
"ci/build.py --nvidiadocker --platform ubuntu_gpu /work/runtime_functions.sh unittest_ubuntu_python3_gpu",
]),
('[Docker] Python3 GPU+MKLDNN unittests',
[
"ci/build.py --platform ubuntu_gpu /work/runtime_functions.sh build_ubuntu_gpu_cmake_mkldnn",
"ci/build.py --nvidiadocker --platform ubuntu_gpu /work/runtime_functions.sh unittest_ubuntu_python3_gpu",
]),
('[Docker] Python3 CPU Intel MKLDNN unittests',
[
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh build_ubuntu_cpu_mkldnn",
Expand Down