提交 162f2d41 编写于 作者: P peizhilin

disable the openblas multi-thread on windows since no support

adjust the python script
上级 d1429ac4
...@@ -29,6 +29,12 @@ namespace platform { ...@@ -29,6 +29,12 @@ namespace platform {
void SetNumThreads(int num_threads) { void SetNumThreads(int num_threads) {
#ifdef PADDLE_USE_OPENBLAS #ifdef PADDLE_USE_OPENBLAS
// windows has no support for openblas multi-thread
#ifdef _WIN32
if (num_threads > 1) {
num_threads = 1;
}
#endif
int real_num_threads = num_threads > 1 ? num_threads : 1; int real_num_threads = num_threads > 1 ? num_threads : 1;
openblas_set_num_threads(real_num_threads); openblas_set_num_threads(real_num_threads);
#elif defined(PADDLE_WITH_MKLML) #elif defined(PADDLE_WITH_MKLML)
......
...@@ -113,13 +113,6 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) { ...@@ -113,13 +113,6 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) {
places.emplace_back(platform::CPUPlace()); places.emplace_back(platform::CPUPlace());
platform::DeviceContextPool::Init(places); platform::DeviceContextPool::Init(places);
// windows has no support for openblas multi-thread
#ifdef _WIN32
if (FLAGS_paddle_num_threads > 1) {
FLAGS_paddle_num_threads = 1;
}
#endif
#ifndef PADDLE_WITH_MKLDNN #ifndef PADDLE_WITH_MKLDNN
platform::SetNumThreads(FLAGS_paddle_num_threads); platform::SetNumThreads(FLAGS_paddle_num_threads);
#endif #endif
......
...@@ -47,7 +47,8 @@ from . import profiler ...@@ -47,7 +47,8 @@ from . import profiler
from . import unique_name from . import unique_name
from . import recordio_writer from . import recordio_writer
from . import parallel_executor from . import parallel_executor
from .parallel_executor import * if os.name != 'nt':
from .parallel_executor import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable from paddle.fluid.layers.math_op_patch import monkey_patch_variable
Tensor = LoDTensor Tensor = LoDTensor
......
...@@ -15,15 +15,13 @@ ...@@ -15,15 +15,13 @@
from __future__ import print_function from __future__ import print_function
import contextlib import contextlib
import os
from .. import core from .. import core
from .. import executor from .. import executor
from .. import framework from .. import framework
from .. import io from .. import io
if os.name != 'nt': from .. import parallel_executor
from .. import parallel_executor
from .. import unique_name from .. import unique_name
from .trainer import check_and_get_place from .trainer import check_and_get_place
......
...@@ -28,8 +28,7 @@ from .. import framework ...@@ -28,8 +28,7 @@ from .. import framework
from .. import io from .. import io
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module # optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
from .. import optimizer as opt_module from .. import optimizer as opt_module
if os.name != 'nt': from .. import parallel_executor
from .. import parallel_executor
from ..transpiler import distribute_transpiler from ..transpiler import distribute_transpiler
__all__ = [ __all__ = [
......
...@@ -25,11 +25,12 @@ import os ...@@ -25,11 +25,12 @@ import os
__all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy'] __all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy']
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy if os.name != 'nt':
BuildStrategy = core.ParallelExecutor.BuildStrategy ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
class ParallelExecutor(object): class ParallelExecutor(object):
""" """
ParallelExecutor is designed for data parallelism, which focuses on distributing ParallelExecutor is designed for data parallelism, which focuses on distributing
the data across different nodes and every node operates on the data in parallel. the data across different nodes and every node operates on the data in parallel.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册