提交 b80fe826 编写于 作者: X Xin Pan

polish

test=develop
上级 93c16d96
develop 2.0.1-rocm-post Ligoml-patch-1 OliverLPH-patch-1 OliverLPH-patch-2 PaddlePM-patch-1 PaddlePM-patch-2 ZHUI-patch-1 add_default_att add_model_benchmark_ci add_some_yaml_config addfile all_new_design_exec ascendrc ascendrelease cherry_undefined_var compile_windows delete_2.0.1-rocm-post delete_add_default_att delete_all_new_design_exec delete_ascendrc delete_compile_windows delete_delete_addfile delete_disable_iterable_dataset_unittest delete_fix_dataloader_memory_leak delete_fix_imperative_dygraph_error delete_fix_retry_ci delete_fix_undefined_var delete_improve_sccache delete_incubate/lite delete_paddle_tiny_install delete_paralleltest delete_prv-disable-more-cache delete_revert-31068-fix_conv3d_windows delete_revert-31562-mean delete_revert-33630-bug-fix delete_revert-34159-add_npu_bce_logical_dev delete_revert-34910-spinlocks_for_allocator delete_revert-35069-revert-34910-spinlocks_for_allocator delete_revert-36057-dev/read_flags_in_ut dingjiaweiww-patch-1 disable_iterable_dataset_unittest dy2static enable_eager_model_test final_state_gen_python_c final_state_intermediate fix-numpy-issue fix_concat_slice fix_dataloader_memory_leak fix_imperative_dygraph_error fix_npu_ci fix_op_flops fix_retry_ci fix_rnn_docs fix_tensor_type fix_undefined_var fixiscan fixiscan1 fixiscan2 fixiscan3 github/fork/123malin/netifaces github/fork/123malin/tdm_abacus github/fork/AshburnLee/dev_unique github/fork/ForFishes/fix_memory_matmul github/fork/ForFishes/rm_fluid github/fork/LielinJiang/move-2.0-api github/fork/LielinJiang/visual-dl-cb github/fork/LiuChiachi/add-transformer-generate-square-subsequent-mask-api github/fork/LiuChiachi/fix-example-code-for-hapi-Model github/fork/LiuChiachi/remove-input-requirment-in-dygraph-Model github/fork/MrChengmo/fix_ps_profiler github/fork/MrChengmo/update_ps_heter github/fork/PWhiddy/patch-1 github/fork/Shixiaowei02/dev/save_load_upgrade github/fork/TCChenlong/fix_hapi github/fork/TCChenlong/fix_inden github/fork/Thunderbrook/xpu_slice github/fork/XieYunshen/disable_ut_test_parallel_executor_fetch_isolated_var github/fork/XieYunshen/disable_ut_test_parallel_executor_fetch_isolated_var_2 github/fork/XieYunshen/disable_ut_test_parallel_executor_fetch_isolated_var_3 github/fork/XieYunshen/timeout_20S_ut github/fork/ZeyuChen/remove-nltk github/fork/arlesniak/arlesniak/selective__mkldnn_flags github/fork/baiyfbupt/code_doc_mig github/fork/chalsliu/set_timeout github/fork/chen-zhiyu/develop github/fork/chenwhql/ci/try_to_find_test_buffer_shared_memory_reuse_pass_error github/fork/chenwhql/dygraph/remove_scale_loss_and_apply_collective_grads github/fork/chenwhql/saveload/add_get_inference_program github/fork/chenwhql/saveload/remove_save_load_config github/fork/cryoco/pass-compatibility-trt github/fork/danleifeng/isempty_api2.0 github/fork/frankwhzhang/api_transfer github/fork/hbwx24/error_msg/cuda_kernel_error_msg github/fork/heavengate/cherry_yolo_box github/fork/heavengate/update_yolo_box github/fork/iclementine/rnn_fix github/fork/iducn/testestse github/fork/jczaja/prv-25537-fix github/fork/jeff41404/release/1.8 github/fork/jiweibo/api_2.0 github/fork/jiweibo/fix_lite_resnet50_test github/fork/juncaipeng/fix_doc_1 github/fork/lfchener/sample_code github/fork/littletomatodonkey/fix_reg_doc github/fork/liym27/dy2stat_update_assign_to_rc20 github/fork/luotao1/profiler_ut github/fork/mapingshuo/add_wait github/fork/mapingshuo/doc_2.0 github/fork/mapingshuo/zero-0.5 github/fork/miraiwk/dev github/fork/pangyoki/add-Categorical-class-branch github/fork/pangyoki/add-multinomial-op-branch github/fork/pangyoki/fix-test_distritbution-CI github/fork/qjing666/doublegrad github/fork/qjing666/fix_hdfs_download github/fork/sandyhouse/add_gather_etc github/fork/sandyhouse/add_send_recv_alltoall_etc github/fork/sandyhouse/pipeline_exe_run github/fork/seiriosPlus/feature/large_scale_kv_save_delta github/fork/seiriosPlus/fix/paddle_errors_fix github/fork/seiriosPlus/fix/paddle_op_errors github/fork/shangzhizhou/fix_test_activation_op_random_bug github/fork/smallv0221/yxp0924 github/fork/smallv0221/yxp0925 github/fork/swtkiwi/del-matplotlib github/fork/tianshuo78520a/kunlun_test github/fork/tianshuo78520a/update_dockerfile github/fork/wanghaoshuang/bert_fuse github/fork/wanghaoshuang/label_smooth github/fork/wanghuancoder/develop_CUDASynchronize github/fork/wanghuancoder/develop_Layer_doc github/fork/wanghuancoder/develop_ParameterList_doc github/fork/wanghuancoder/develop_Sequential_doc github/fork/wanghuancoder/develop_bilinear_tensor_product github/fork/wanghuancoder/develop_coverage_build_sh github/fork/wanghuancoder/develop_in_dynamic_mode_doc github/fork/wanghuancoder/develop_unique_name_doc github/fork/wangxicoding/fleet_meta_combine github/fork/wawltor/error_message_fix_5 github/fork/willthefrog/remove_l2_norm github/fork/windstamp/momentum_op github/fork/windstamp/mv_op_5 github/fork/windstamp/normal_api github/fork/wojtuss/wojtuss/fusion_gru_quantization github/fork/wojtuss/wojtuss/quantization-with-shift github/fork/wzzju/fix_err_info github/fork/wzzju/pure_fp16 github/fork/xiemoyuan/op_error_message github/fork/xiemoyuan/optimize_error_message github/fork/yaoxuefeng6/fix_doc github/fork/yaoxuefeng6/mod_dataset_v2 github/fork/yongqiangma/lod github/fork/ysh329/fix-clip-by-norm-error github/fork/ysh329/fix-error-clip-by-value github/fork/yukavio/error_info github/fork/zhangting2020/conv_filter_grad github/fork/zhangting2020/is_compile_with_cuda github/fork/zhangting2020/place_doc github/fork/zhangting2020/program github/fork/zhhsplendid/fix_any github/fork/zhhsplendid/refine_api2 github/fork/zhhsplendid/refine_api2_test github/fork/zhhsplendid/refine_api_test_ptb_lm github/fork/zhhsplendid/refine_api_test_resnet github/fork/zhhsplendid/refine_api_test_simnet github/fork/zhiqiu/dev/refine_initializer github/fork/zhiqiu/dev/remove_inplace_argument github/fork/zlsh80826/nvinfer_plugin_var_len_cuda11 improve_sccache incubate/infrt incubate/lite inplace_addto make_flag_adding_easier move_embedding_to_phi move_histogram_to_pten move_sgd_to_phi move_slice_to_pten move_temporal_shift_to_phi move_yolo_box_to_phi npu_fix_alloc numel paddle_tiny_install paralleltest preln_ernie prv-disable-more-cache prv-md-even-more prv-onednn-2.5 pten_tensor_refactor release/1.3 release/1.4 release/1.5 release/1.6 release/1.7 release/1.8 release/2.0 release/2.0-alpha release/2.0-beta release/2.0-rc release/2.0-rc1 release/2.1 release/2.2 release/2.3 release/2.3-fc-ernie-fix release/2.4 release/lite-0.1 revert-24981-add_device_attr_for_regulization revert-26856-strategy_example2 revert-27520-disable_pr revert-31068-fix_conv3d_windows revert-31562-mean revert-32290-develop-hardlabel revert-33037-forci revert-33475-fix_cifar_label_dimension revert-33630-bug-fix revert-34159-add_npu_bce_logical_dev revert-34406-add_copy_from_tensor revert-34910-spinlocks_for_allocator revert-35069-revert-34910-spinlocks_for_allocator revert-36057-dev/read_flags_in_ut revert-36201-refine_fast_threaded_ssa_graph_executor revert-36985-add_license revert-37318-refactor_dygraph_to_eager revert-37926-eager_coreops_500 revert-37956-revert-37727-pylayer_support_tuple revert-38100-mingdong revert-38301-allocation_rearrange_pr revert-38703-numpy_bf16_package_reupload revert-38732-remove_useless_header_in_elementwise_mul_grad revert-38959-Reduce_Grad revert-39143-adjust_empty revert-39227-move_trace_op_to_pten revert-39268-dev/remove_concat_fluid_kernel revert-40170-support_partial_grad revert-41056-revert-40727-move_some_activaion_to_phi revert-41065-revert-40993-mv_ele_floordiv_pow revert-41068-revert-40790-phi_new revert-41944-smaller_inference_api_test revert-42149-do-not-reset-default-stream-for-stream-safe-cuda-allocator revert-43155-fix_ut_tempfile revert-43882-revert-41944-smaller_inference_api_test revert-45808-phi/simplify_size_op revert-46827-deform_comment rocm_dev_0217 support_weight_transpose test_benchmark_ci test_feature_precision_test_c test_model_benchmark test_model_benchmark_ci zhiqiu-patch-1 v2.4.0-rc0 v2.3.2 v2.3.1 v2.3.0 v2.3.0-rc0 v2.2.2 v2.2.1 v2.2.0 v2.2.0-rc0 v2.2.0-bak0 v2.1.3 v2.1.2 v2.1.1 v2.1.0 v2.1.0-rc0 v2.0.2 v2.0.1 v2.0.0 v2.0.0-rc1 v2.0.0-rc0 v2.0.0-beta0 v2.0.0-alpha0 v1.8.5 v1.8.4 v1.8.3 v1.8.2 v1.8.1 v1.8.0 v1.7.2 v1.7.1 v1.7.0 v1.6.3 v1.6.2 v1.6.1 v1.6.0 v1.6.0-rc0 v1.5.2 v1.5.1 v1.5.0 v1.4.1 v1.4.0 v1.3.2 v1.3.1 v1.3.0 lite-v0.1
无相关合并请求
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
syntax = "proto2"; syntax = "proto2";
// option optimize_for = LITE_RUNTIME; option optimize_for = LITE_RUNTIME;
package paddle.framework.proto; package paddle.framework.proto;
// Any incompatible changes to ProgramDesc and its dependencies should // Any incompatible changes to ProgramDesc and its dependencies should
......
...@@ -43,24 +43,31 @@ void CreateGradOp(const framework::OpDesc& op_desc, ...@@ -43,24 +43,31 @@ void CreateGradOp(const framework::OpDesc& op_desc,
class Tracer { class Tracer {
public: public:
Tracer() {} explicit Tracer(framework::BlockDesc* root_block) : root_block_(root_block) {
root_scope_ = new framework::Scope();
scopes_[root_block_] = root_scope_;
}
virtual ~Tracer() { delete root_scope_; }
void Trace(OpBase* op, const std::vector<VarBase*>& inputs, void Trace(OpBase* op, const std::vector<VarBase*>& inputs,
const std::vector<VarBase*>& outputs) { const std::vector<VarBase*>& outputs,
framework::BlockDesc* block) {
framework::Scope* scope = GetScope(block);
framework::OpDesc* op_desc = op->op_desc_; framework::OpDesc* op_desc = op->op_desc_;
LOG(ERROR) << "tracer tracing " << op_desc->Type(); LOG(ERROR) << "tracer tracing " << op_desc->Type();
op_desc->InferShape(*block_); op_desc->InferShape(*block);
op_desc->InferVarType(block_); op_desc->InferVarType(block);
std::unique_ptr<framework::OperatorBase> op_base = std::unique_ptr<framework::OperatorBase> op_base =
framework::OpRegistry::CreateOp(*op_desc); framework::OpRegistry::CreateOp(*op_desc);
*op->input_vars_ = inputs; *op->input_vars_ = inputs;
for (VarBase* input : inputs) { for (VarBase* input : inputs) {
const std::string vname = input->var_desc_->Name(); const std::string vname = input->var_desc_->Name();
framework::Variable* var = scope_->Var(vname); framework::Variable* var = scope->Var(vname);
input->var_ = var; input->var_ = var;
if (!var->IsInitialized()) { if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname); framework::VarDesc* var_desc = block->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>(); var->GetMutable<framework::LoDTensor>();
} else { } else {
...@@ -78,9 +85,9 @@ class Tracer { ...@@ -78,9 +85,9 @@ class Tracer {
*op->output_vars_ = outputs; *op->output_vars_ = outputs;
for (size_t i = 0; i < outputs.size(); ++i) { for (size_t i = 0; i < outputs.size(); ++i) {
const std::string vname = outputs[i]->var_desc_->Name(); const std::string vname = outputs[i]->var_desc_->Name();
framework::Variable* var = scope_->Var(vname); framework::Variable* var = scope->Var(vname);
if (!var->IsInitialized()) { if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname); framework::VarDesc* var_desc = block->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>(); var->GetMutable<framework::LoDTensor>();
} else { } else {
...@@ -91,28 +98,30 @@ class Tracer { ...@@ -91,28 +98,30 @@ class Tracer {
outputs[i]->pre_op_ = op; outputs[i]->pre_op_ = op;
outputs[i]->pre_op_out_idx_ = i; outputs[i]->pre_op_out_idx_ = i;
} }
op_base->Run(*scope_, platform::CPUPlace()); op_base->Run(*scope, platform::CPUPlace());
framework::OpDesc* grad_op_desc; framework::OpDesc* grad_op_desc;
auto grad_to_var = new std::unordered_map<std::string, std::string>(); auto grad_to_var = new std::unordered_map<std::string, std::string>();
CreateGradOp(*op_desc, {}, {block_}, &grad_op_desc, grad_to_var); CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var);
op->grad_op_desc_ = grad_op_desc; op->grad_op_desc_ = grad_op_desc;
op->grad_to_var_ = grad_to_var; op->grad_to_var_ = grad_to_var;
op->block_ = block_; op->block_ = block;
} }
void SetScope(framework::Scope* scope) { scope_ = scope; } framework::Scope* GetScope(framework::BlockDesc* block) {
if (scopes_.find(block) != scopes_.end()) {
void SetBlock(framework::BlockDesc* block) { block_ = block; } return scopes_.at(block);
}
framework::Scope* Scope() const { return scope_; } framework::BlockDesc* parent_block = block->ParentBlock();
PADDLE_ENFORCE(scopes_.find(parent_block) != scopes_.end());
framework::BlockDesc* Block() const { return block_; } framework::Scope* scope = &scopes_[parent_block]->NewScope();
scopes_[block] = scope;
return scope;
}
private: private:
framework::BlockDesc* block_; std::map<framework::BlockDesc*, framework::Scope*> scopes_;
framework::Scope* scope_; framework::BlockDesc* root_block_;
std::vector<Runnable*> runnables_; framework::Scope* root_scope_;
}; };
} // namespace imperative } // namespace imperative
......
...@@ -23,20 +23,13 @@ namespace pybind { ...@@ -23,20 +23,13 @@ namespace pybind {
// Bind Methods // Bind Methods
void BindTracer(pybind11::module *m) { void BindTracer(pybind11::module *m) {
pybind11::class_<imperative::Tracer>(*m, "Tracer", "") pybind11::class_<imperative::Tracer>(*m, "Tracer", "")
.def(pybind11::init<>()) .def("__init__",
[](imperative::Tracer &self, framework::BlockDesc *root_block) {
new (&self) imperative::Tracer(root_block);
})
.def("trace", &imperative::Tracer::Trace) .def("trace", &imperative::Tracer::Trace)
.def_property("scope", .def("get_scope", &imperative::Tracer::GetScope,
[](const imperative::Tracer &self) { return self.Scope(); }, pybind11::return_value_policy::reference);
[](imperative::Tracer &self, framework::Scope *scope) {
self.SetScope(scope);
},
R"DOC()DOC")
.def_property("block",
[](const imperative::Tracer &self) { return self.Block(); },
[](imperative::Tracer &self, framework::BlockDesc *block) {
self.SetBlock(block);
},
R"DOC()DOC");
} }
} // namespace pybind } // namespace pybind
......
...@@ -358,11 +358,13 @@ class Variable(core.VarBase): ...@@ -358,11 +358,13 @@ class Variable(core.VarBase):
self.stop_gradient = stop_gradient self.stop_gradient = stop_gradient
self.is_data = is_data self.is_data = is_data
def numpy(self, scope): def numpy(self):
scope = _imperative_tracer().get_scope(self.block.desc)
tensor = core.get_variable_tensor(scope, self.desc.name()) tensor = core.get_variable_tensor(scope, self.desc.name())
return np.array(tensor) return np.array(tensor)
def backward(self, scope): def backward(self):
scope = _imperative_tracer().get_scope(self.block.desc)
self._run_backward(scope) self._run_backward(scope)
def grad(self): def grad(self):
...@@ -668,14 +670,14 @@ class Operator(core.OpBase): ...@@ -668,14 +670,14 @@ class Operator(core.OpBase):
for inp in inputs.values(): for inp in inputs.values():
if isinstance(inp, Variable): if isinstance(inp, Variable):
input_vars.append(inp) input_vars.append(inp)
elif isinstance(inp, list): elif isinstance(inp, list) or isinstance(inp, tuple):
input_vars.extend(inp[:]) input_vars.extend(inp[:])
self.inputs = input_vars self.inputs = input_vars
output_vars = [] output_vars = []
for out in outputs.values(): for out in outputs.values():
if isinstance(out, Variable): if isinstance(out, Variable):
output_vars.append(out) output_vars.append(out)
elif isinstance(inp, list): elif isinstance(out, list) or isinstance(out, tuple):
output_vars.extend(out[:]) output_vars.extend(out[:])
self.outputs = output_vars self.outputs = output_vars
...@@ -1246,7 +1248,7 @@ class Block(object): ...@@ -1246,7 +1248,7 @@ class Block(object):
if _in_imperative_mode(): if _in_imperative_mode():
op_desc = core.OpDesc() op_desc = core.OpDesc()
op = Operator(block=self, desc=op_desc, *args, **kwargs) op = Operator(block=self, desc=op_desc, *args, **kwargs)
_imperative_tracer().trace(op, op.inputs, op.outputs) _imperative_tracer().trace(op, op.inputs, op.outputs, self.desc)
else: else:
op_desc = self.desc.append_op() op_desc = self.desc.append_op()
op = Operator(block=self, desc=op_desc, *args, **kwargs) op = Operator(block=self, desc=op_desc, *args, **kwargs)
...@@ -2257,9 +2259,9 @@ def _get_var(name, program=None): ...@@ -2257,9 +2259,9 @@ def _get_var(name, program=None):
@contextlib.contextmanager @contextlib.contextmanager
def _imperative_guard(): def _imperative_guard(tracer):
global _imperative_tracer_ global _imperative_tracer_
tmp_trace = _imperative_tracer_ tmp_trace = _imperative_tracer_
_imperative_tracer_ = core.Tracer() _imperative_tracer_ = tracer
yield yield
_imperative_tracer_ = tmp_trace _imperative_tracer_ = tmp_trace
...@@ -12,10 +12,12 @@ ...@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import contextlib import contextlib
import numpy as np
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import framework from paddle.fluid import framework
__all__ = ['enabled', 'guard'] __all__ = ['enabled', 'guard', 'to_variable']
def enabled(): def enabled():
...@@ -26,8 +28,29 @@ def enabled(): ...@@ -26,8 +28,29 @@ def enabled():
def guard(): def guard():
train = framework.Program() train = framework.Program()
startup = framework.Program() startup = framework.Program()
tracer = core.Tracer(train.current_block().desc)
with framework.program_guard(train, startup): with framework.program_guard(train, startup):
with framework.unique_name.guard(): with framework.unique_name.guard():
with framework._imperative_guard(): with framework._imperative_guard(tracer):
yield yield
# TODO: check train, startup not changed.
def to_variable(value, block=None):
if isinstance(value, np.ndarray):
if not block:
block = framework.default_main_program().current_block()
py_var = framework.Variable(
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=value.shape,
dtype=value.dtype)
scope = framework._imperative_tracer().get_scope(block.desc)
var = scope.var(py_var.name)
tensor = var.get_tensor()
tensor.set(value, core.CPUPlace())
return py_var
elif isinstance(value, framework.Variable):
return value
else:
raise ValueError("Unsupported type %s" % type(value))
...@@ -18,51 +18,32 @@ import numpy as np ...@@ -18,51 +18,32 @@ import numpy as np
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import framework from paddle.fluid import framework
from paddle.fluid.imperative import base
__all__ = ['PyLayer'] __all__ = ['PyLayer']
@contextlib.contextmanager
def trace_scope(scope, block):
tmp_scope = framework._imperative_tracer().scope
tmp_block = framework._imperative_tracer().block
framework._imperative_tracer().scope = scope
framework._imperative_tracer().block = block
yield
framework._imperative_tracer().scope = tmp_scope
framework._imperative_tracer().block = tmp_block
class PyLayer(core.Layer): class PyLayer(core.Layer):
def __init__(self): def __init__(self):
self._scope = core.Scope() pass
self._block = framework.default_main_program().current_block()
def __call__(self, inputs): def __call__(self, inputs):
with trace_scope(self._scope, self._block.desc): # TODO(panyx0718): Support declarative mode as well.
if not isinstance(inputs, list) and not isinstance(inputs, tuple): assert base.enabled()
inputs = [inputs] if not isinstance(inputs, list) and not isinstance(inputs, tuple):
inputs = [inputs]
var_inputs = []
for x in inputs: var_inputs = []
if isinstance(x, np.ndarray): for x in inputs:
py_var = framework.Variable( if isinstance(x, np.ndarray):
self._block, py_var = base.to_variable(x)
type=core.VarDesc.VarType.LOD_TENSOR, var_inputs.append(py_var)
name=None, elif isinstance(x, framework.Variable):
shape=x.shape, var_inputs.append(x)
dtype=x.dtype) else:
var = self._scope.var(py_var.name) raise ValueError("not var or ndarray %s" % type(x))
tensor = var.get_tensor() outputs = self.forward(var_inputs)
tensor.set(x, core.CPUPlace()) return outputs
var_inputs.append(py_var)
elif isinstance(x, framework.Variable):
var_inputs.append(x)
else:
raise ValueError("not var or ndarray %s" % type(x))
outputs = self.forward(var_inputs)
return outputs
def forward(self, inputs): def forward(self, inputs):
print("at python.")
return [] return []
...@@ -23,6 +23,7 @@ import numpy as np ...@@ -23,6 +23,7 @@ import numpy as np
from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
from . import unique_name from . import unique_name
from paddle.fluid.initializer import Constant, Xavier from paddle.fluid.initializer import Constant, Xavier
from paddle.fluid.imperative import base
from .param_attr import ParamAttr, WeightNormParamAttr from .param_attr import ParamAttr, WeightNormParamAttr
from . import core from . import core
from six.moves import zip from six.moves import zip
...@@ -62,7 +63,7 @@ class LayerHelper(object): ...@@ -62,7 +63,7 @@ class LayerHelper(object):
if isinstance(x, Variable): if isinstance(x, Variable):
return x return x
elif isinstance(x, np.ndarray): elif isinstance(x, np.ndarray):
return self._np_to_variable(x) return base.to_variable(x, self.main_program.current_block())
else: else:
raise ValueError("inputs wrong type %s\n" % x) raise ValueError("inputs wrong type %s\n" % x)
......
...@@ -17,7 +17,6 @@ All layers just related to the neural network. ...@@ -17,7 +17,6 @@ All layers just related to the neural network.
from __future__ import print_function from __future__ import print_function
import sys
import numpy as np import numpy as np
import os import os
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
......
...@@ -43,8 +43,8 @@ class TestImperative(unittest.TestCase): ...@@ -43,8 +43,8 @@ class TestImperative(unittest.TestCase):
l = MyLayer() l = MyLayer()
x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0] x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0]
self.assertIsNotNone(x) self.assertIsNotNone(x)
sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope))) sys.stderr.write("%s output: %s\n" % (x, x.numpy()))
x.backward(l._scope) x.backward()
sys.stderr.write("grad %s\n" % l._x_for_debug.grad()) sys.stderr.write("grad %s\n" % l._x_for_debug.grad())
......
...@@ -101,6 +101,7 @@ packages=['paddle', ...@@ -101,6 +101,7 @@ packages=['paddle',
'paddle.dataset', 'paddle.dataset',
'paddle.reader', 'paddle.reader',
'paddle.fluid', 'paddle.fluid',
'paddle.fluid.imperative',
'paddle.fluid.proto', 'paddle.fluid.proto',
'paddle.fluid.proto.profiler', 'paddle.fluid.proto.profiler',
'paddle.fluid.layers', 'paddle.fluid.layers',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部