提交 6fb310ae 编写于 作者: L Leo Chen 提交者: Zeng Jinle

Fix bug of getting bool Flags from os.environ (#19349)

* fix bug of getting bool Flags from os.environ, test=develop

* add empty loss_name in CompiledProgram for inplace grad test, test=develop
上级 80489920
......@@ -86,6 +86,9 @@ DEFINE_bool(reader_queue_speed_test_mode, false,
"If set true, the queue.pop will only get data from queue but not "
"remove the data from queue for speed testing");
DECLARE_bool(use_mkldnn);
#ifdef PADDLE_WITH_NGRAPH
DECLARE_bool(use_ngraph);
#endif
// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
......@@ -740,6 +743,9 @@ All parameter, weight, gradient are variables in Paddle.
return framework::OpInfoMap::Instance().Get(op_type).HasInferInplace();
});
m.def("get_flags_use_mkldnn", []() { return FLAGS_use_mkldnn; });
#ifdef PADDLE_WITH_NGRAPH
m.def("get_flags_use_ngraph", []() { return FLAGS_use_ngraph; });
#endif
m.def("prune", [](const ProgramDesc &origin,
const std::vector<std::array<size_t, 2>> &targets) {
......
......@@ -162,7 +162,7 @@ class TestMKLDNNPostTrainingQuantStrategy(unittest.TestCase):
fetch_targets] = fluid.io.load_inference_model(
model_path, exe, 'model', 'params')
use_mkldnn = bool(os.getenv("FLAGS_use_mkldnn", False))
use_mkldnn = fluid.core.get_flags_use_mkldnn()
if (use_mkldnn):
graph = IrGraph(
core.Graph(inference_program.desc), for_test=True)
......
......@@ -680,7 +680,8 @@ class OpTest(unittest.TestCase):
return []
places = [fluid.CPUPlace()]
cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False
use_ngraph = bool(os.getenv("FLAGS_use_ngraph", False))
use_ngraph = fluid.core.is_compiled_with_ngraph(
) and fluid.core.get_flags_use_ngraph()
if use_ngraph:
cpu_only = True
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册