From 7d8afc3356ac48341a5ffd11ebdda1044cfe4a71 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 15 Jan 2018 00:41:58 -0800 Subject: [PATCH 1/5] "fix gpu init" --- paddle/framework/init.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/paddle/framework/init.cc b/paddle/framework/init.cc index 4ef82a541efaa..ed9ee1797593e 100644 --- a/paddle/framework/init.cc +++ b/paddle/framework/init.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include // for strdup #include +#include #include #include "paddle/framework/init.h" @@ -48,13 +49,17 @@ void InitDevices() { places.emplace_back(platform::CPUPlace()); #ifdef PADDLE_WITH_CUDA - int count = platform::GetCUDADeviceCount(); - for (int i = 0; i < count; ++i) { - places.emplace_back(platform::CUDAPlace(i)); + try { + int count = platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + places.emplace_back(platform::CUDAPlace(i)); + } + } catch (const std::exception &exp) { + LOG(WARNING) << "Compiled with WITH_CUDA, but no CUDA found in runtime."; } #else LOG(WARNING) - << "'GPU' is not supported, Please re-compile with WITH_GPU option"; + << "'CUDA' is not supported, Please re-compile with WITH_CUDA option"; #endif platform::DeviceContextPool::Init(places); From 1edd1c00655aa94a7aca45b0bfb8ec33a28de78d Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 15 Jan 2018 03:02:26 -0800 Subject: [PATCH 2/5] "set env variable default value for share gpu" --- paddle/framework/init.cc | 14 ++++++++------ python/paddle/v2/fluid/__init__.py | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/paddle/framework/init.cc b/paddle/framework/init.cc index ed9ee1797593e..3f6ea121b3994 100644 --- a/paddle/framework/init.cc +++ b/paddle/framework/init.cc @@ -47,21 +47,23 @@ void InitDevices() { std::vector places; places.emplace_back(platform::CPUPlace()); + int count = 0; #ifdef PADDLE_WITH_CUDA try { - int count = platform::GetCUDADeviceCount(); - for (int i = 0; i < count; ++i) { - places.emplace_back(platform::CUDAPlace(i)); - } + count = platform::GetCUDADeviceCount(); } catch (const std::exception &exp) { - LOG(WARNING) << "Compiled with WITH_CUDA, but no CUDA found in runtime."; + LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime."; } #else LOG(WARNING) - << "'CUDA' is not supported, Please re-compile with WITH_CUDA option"; + << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; #endif + for (int i = 0; i < count; ++i) { + places.emplace_back(platform::CUDAPlace(i)); + } + platform::DeviceContextPool::Init(places); } diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 5afc663822cac..b873f140a39f3 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -72,11 +72,26 @@ def __bootstrap__(): os.environ['OMP_NUM_THREADS'] = str(num_threads) read_env_flags = ['use_pinned_memory', 'check_nan_inf'] + if core.is_compile_gpu(): read_env_flags += ['fraction_of_gpu_memory_to_use', 'op_sync'] + core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) + + gpu_devices = os.getenv("CUDA_VISIBLE_DEVICES", '') + if core.is_compile_gpu(): + if len(gpu_devices.split(",")) >= 1: + print( + 'WARNING: CUDA_VISIBLE_DEVICES set to {0}, not empty . The computation ' + 'speed will not be optimized if you use multi-gpu. It will ' + 'fail if this PaddlePaddle binary is compiled without GPU option' + .format(gpu_devices), + file=sys.stderr) + else: + gpu_devices = "0" + os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices core.init_devices() From d1a3ba9abc3ef9200f62b1bc0f61b6a88fa85d88 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 22 Jan 2018 22:49:21 -0800 Subject: [PATCH 3/5] "fix ci" --- paddle/framework/init_test.cc | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/paddle/framework/init_test.cc b/paddle/framework/init_test.cc index f837a965d3be7..d1747a39d5135 100644 --- a/paddle/framework/init_test.cc +++ b/paddle/framework/init_test.cc @@ -20,7 +20,26 @@ TEST(InitDevices, CPU) { using paddle::framework::InitDevices; using paddle::platform::DeviceContextPool; +#ifndef PADDLE_WITH_CUDA InitDevices(); DeviceContextPool& pool = DeviceContextPool::Instance(); - ASSERT_GE(pool.size(), 1U); + ASSERT_EQ(pool.size(), 1U); +#endif +} + +TEST(InitDevices, CUDA) { + using paddle::framework::InitDevices; + using paddle::platform::DeviceContextPool; + + int count = 0; + try { + count = paddle::platform::GetCUDADeviceCount(); + } catch (const std::exception& exp) { + } + +#ifdef PADDLE_WITH_CUDA + InitDevices(); + DeviceContextPool& pool = DeviceContextPool::Instance(); + ASSERT_EQ(pool.size(), 1U + static_cast(count)); +#endif } From c17051e009345349219700fbbf37581887d46d56 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 30 Jan 2018 23:51:27 +0800 Subject: [PATCH 4/5] "removed CUDA_VISIBLE_DEVICES default" --- python/paddle/v2/fluid/__init__.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index dc78c3f4f2e06..787416aed1acf 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -89,25 +89,11 @@ def __bootstrap__(): read_env_flags = [ 'use_pinned_memory', 'check_nan_inf', 'do_memory_benchmark' ] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): read_env_flags += ['fraction_of_gpu_memory_to_use', 'op_sync'] - core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) - - gpu_devices = os.getenv("CUDA_VISIBLE_DEVICES", '') - if core.is_compile_gpu(): - if len(gpu_devices.split(",")) >= 1: - print( - 'WARNING: CUDA_VISIBLE_DEVICES set to {0}, not empty . The computation ' - 'speed will not be optimized if you use multi-gpu. It will ' - 'fail if this PaddlePaddle binary is compiled without GPU option' - .format(gpu_devices), - file=sys.stderr) - else: - gpu_devices = "0" - os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices core.init_devices() From 44c56cc10bfb63e0e65413626e43a70048483207 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Wed, 31 Jan 2018 11:28:26 +0800 Subject: [PATCH 5/5] "removed" --- paddle/framework/init_test.cc | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/paddle/framework/init_test.cc b/paddle/framework/init_test.cc index d1747a39d5135..01e076dd8ea24 100644 --- a/paddle/framework/init_test.cc +++ b/paddle/framework/init_test.cc @@ -31,13 +31,8 @@ TEST(InitDevices, CUDA) { using paddle::framework::InitDevices; using paddle::platform::DeviceContextPool; - int count = 0; - try { - count = paddle::platform::GetCUDADeviceCount(); - } catch (const std::exception& exp) { - } - #ifdef PADDLE_WITH_CUDA + int count = paddle::platform::GetCUDADeviceCount(); InitDevices(); DeviceContextPool& pool = DeviceContextPool::Instance(); ASSERT_EQ(pool.size(), 1U + static_cast(count));