diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index d71d792b4e0147afc0ec09808d2004cb4b48ba46..c0df4c2dc6cd46a90d4205b26123fb6c44fcaf88 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -13,6 +13,7 @@ paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, d paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '7d9a51fc9cf3c5245b5227080a8064c3')) paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '4c0cd83f0b401fc2ff84c70974e5d210')) paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd0c3ebd813c39958c92b78e3eef7e912')) +paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'f06314a1cb30c96b5808dde2219c2dae')) paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'f5369953dd0c443961cf79f7a00e1a03')) paddle.fluid.Executor.infer_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', '9c7decb955b9c4f718114179c8985581')) @@ -155,10 +156,10 @@ paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon' paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', 'c317aa595deb31649083c8faa91cdb97')) paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '12c5bbb8b38c42e623fbc47611d766e1')) paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)), ('document', '1ba0508d573f65feecf3564dce22aa1d')) -paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1)), ('document', '7a1966d7c3a48f1fc0881cdaf5d83b0b')) +paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1)), ('document', 'd1b08c11bb9277386fcf6ae70b6622d1')) paddle.fluid.layers.image_resize_short (ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)), ('document', '06211aefc50c5a3e940d7204d859cdf7')) -paddle.fluid.layers.resize_bilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1)), ('document', 'e4fb4ed511b2293b8f04f7e872afbfd7')) -paddle.fluid.layers.resize_nearest (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners'], varargs=None, keywords=None, defaults=(None, None, None, None, True)), ('document', '735fa9758a6d7ff3b47d7b827f961c1d')) +paddle.fluid.layers.resize_bilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1)), ('document', 'c45591fbc4f64a178fbca219e1546a58')) +paddle.fluid.layers.resize_nearest (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners'], varargs=None, keywords=None, defaults=(None, None, None, None, True)), ('document', 'ae6d73cdc7f3a138d8a338ecdb33c1ae')) paddle.fluid.layers.gather (ArgSpec(args=['input', 'index'], varargs=None, keywords=None, defaults=None), ('document', '98f1c86716b9b7f4dda83f20e2adeee2')) paddle.fluid.layers.scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '65f8e9d8ddfd0b412f940579c4faa342')) paddle.fluid.layers.sequence_scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '15b522457dfef103f0c20ca9d397678b')) diff --git a/paddle/fluid/framework/details/inplace_op_pass.cc b/paddle/fluid/framework/details/inplace_op_pass.cc index 79150f719e379ca4e2b87d2e7db1b2daeee9aa67..84c9e4a379a5e07dc3a8e85409c804eebc390c73 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.cc +++ b/paddle/fluid/framework/details/inplace_op_pass.cc @@ -305,6 +305,12 @@ void InplacePass::TryInplaceOpInputOutput(ir::Node* op, VLOG(4) << "Try to inplace " << in_var_name << " with " << out_var_name; + if (var_nodes_[in_var_name].back() != in_node) { + VLOG(4) << "SKIP since " << in_var_name + << " is also used as output by other ops"; + continue; + } + bool can_replace = true; if (in_var_name == out_var_name) { can_replace = false; @@ -527,6 +533,9 @@ void GraphView::Build(ir::Graph* g) { }; for (auto& node : g->Nodes()) { if (!node->IsOp()) continue; + // avoid optimize the variable used in sub-blocks + if (OpHasSubBlock(node->Op())) update_skip_set(node); + if (node->Name() == "send") update_skip_set(node); if (node->Name() == "recv") update_skip_set(node); if (node->Name() == "prefetch") update_skip_set(node); diff --git a/paddle/fluid/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h index a9a4fb08a2ca4689e8b6a6f10f83d065332ac192..18de595983f52e56dba4f5069257f354132db51b 100644 --- a/paddle/fluid/framework/details/op_registry.h +++ b/paddle/fluid/framework/details/op_registry.h @@ -233,6 +233,12 @@ struct OpInfoFiller { } }; +// A fake OpInfoFiller of void +template <> +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const {} +}; + } // namespace details } // namespace framework diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index de564dbb40b3fed8cb165e34877a8cdc3ee5e349..9b0873aecb545067180723c363a38bed1552fb2a 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -123,8 +123,8 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { // will enhance this pass later. "runtime_context_cache_pass", // "attention_lstm_fuse_pass", // - "seqpool_concat_fuse_pass", // "seqconv_eltadd_relu_fuse_pass", // + // "seqpool_concat_fuse_pass", // // "embedding_fc_lstm_fuse_pass", // "fc_lstm_fuse_pass", // "mul_lstm_fuse_pass", // diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc index d6f7f468a6c83bd6c4ac087931d0c6b7cac3cc1c..3cebf8e96984fad0de8d8c6775990f7c6a6cabe5 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc @@ -150,6 +150,9 @@ void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false) { if (use_mkldnn) { cfg->EnableMKLDNN(); } + // Enable seqpool_concat_fuse_pass, disabled by default since it takes much + // time + cfg->pass_builder()->InsertPass(2, "seqpool_concat_fuse_pass"); } void profile(bool use_mkldnn = false) { diff --git a/paddle/fluid/op_use_default_grad_op_maker.spec b/paddle/fluid/op_use_default_grad_op_maker.spec index 4e833dd144399fd50108764c4cb360fde0982b94..21a25ce7d5e2bad172cf50cee6138ef4b44b07c1 100644 --- a/paddle/fluid/op_use_default_grad_op_maker.spec +++ b/paddle/fluid/op_use_default_grad_op_maker.spec @@ -1,14 +1,7 @@ -abs -acos -asin -atan attention_lstm -brelu conv_shift -cos cos_sim dequantize -elu fc flatten fsp @@ -21,13 +14,8 @@ fusion_seqconv_eltadd_relu fusion_seqexpand_concat_fc fusion_seqpool_concat fusion_squared_mat_sub -gelu gru -hard_shrink hierarchical_sigmoid -leaky_relu -log -logsigmoid lrn lstm_unit lstmp @@ -38,7 +26,6 @@ modified_huber_loss nce pool2d pool3d -pow prelu quantize rank_loss @@ -50,20 +37,10 @@ reduce_sum requantize reshape rnn_memory_helper -round sequence_softmax -sin -softplus -softshrink -softsign spp -square squeeze -stanh -swish -tanh_shrink tensor_array_to_tensor -thresholded_relu transpose unpool unsqueeze diff --git a/paddle/fluid/operators/activation_cudnn_op.cu.cc b/paddle/fluid/operators/activation_cudnn_op.cu.cc index a382414d5c473a9c36f92a9af56837da819e96a4..f03355eb441f99b54d78fe90bcb3bea116db58f1 100644 --- a/paddle/fluid/operators/activation_cudnn_op.cu.cc +++ b/paddle/fluid/operators/activation_cudnn_op.cu.cc @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/platform/cudnn_desc.h" @@ -82,6 +85,8 @@ template struct CudnnReluGradFunctor : public CudnnActivationGradFunctor { explicit CudnnReluGradFunctor(const CUDADeviceContext& ctx) : CudnnActivationGradFunctor(ctx, 0.0, CUDNN_ACTIVATION_RELU) {} + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template @@ -94,6 +99,8 @@ struct CudnnRelu6GradFunctor : public CudnnActivationGradFunctor { explicit CudnnRelu6GradFunctor(const CUDADeviceContext& ctx) : CudnnActivationGradFunctor(ctx, 6.0, CUDNN_ACTIVATION_CLIPPED_RELU) { } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template @@ -105,6 +112,8 @@ template struct CudnnSigmoidGradFunctor : public CudnnActivationGradFunctor { explicit CudnnSigmoidGradFunctor(const CUDADeviceContext& ctx) : CudnnActivationGradFunctor(ctx, 0.0, CUDNN_ACTIVATION_SIGMOID) {} + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template @@ -116,6 +125,8 @@ template struct CudnnTanhGradFunctor : public CudnnActivationGradFunctor { explicit CudnnTanhGradFunctor(const CUDADeviceContext& ctx) : CudnnActivationGradFunctor(ctx, 0.0, CUDNN_ACTIVATION_TANH) {} + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template @@ -140,10 +151,13 @@ class CudnnActivationGradKernel public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& context) const override { + static_assert(Functor::FwdDeps() == kDepOut, "Forward deps must be Out."); + const framework::Tensor *X, *Out, *dOut; X = Out = dOut = nullptr; framework::Tensor* dX = nullptr; - ExtractActivationGradTensor(context, &X, &Out, &dOut, &dX); + ExtractActivationGradTensor(context, &X, &Out, &dOut, + &dX); dX->mutable_data(context.GetPlace()); auto& dev_ctx = context.template device_context(); Functor functor(dev_ctx); diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index c87e4b22b37027efd1293e74f72598283946e62d..1e5d63fc11d1d81350525e2b3390a3ae44f00f8d 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -15,7 +15,9 @@ limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" #include #include +#include #include +#include #include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h" #include "paddle/fluid/platform/port.h" #ifdef PADDLE_WITH_CUDA @@ -27,6 +29,25 @@ namespace operators { using paddle::framework::Tensor; +template +static constexpr bool CanInplaceAct() { + return GradFunctor::FwdDeps() == kDepOut || GradFunctor::FwdDeps() == kNoDeps; +} + +std::unique_ptr> GetInplaceOpSet() { + std::unique_ptr> ret( + new std::unordered_set()); +#define INSERT_INTO_INPLACE_OP_SET(op_type, __omitted, fwd_functor, \ + bwd_functor) \ + if (CanInplaceAct>()) { \ + ret->insert(#op_type); \ + } + + FOR_EACH_ACTIVATION_OP(INSERT_INTO_INPLACE_OP_SET); +#undef INSERT_INTO_INPLACE_OP_SET + return ret; +} + #define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ class OP_NAME##OpMaker \ : public ::paddle::framework::OpProtoAndCheckerMaker { \ @@ -50,26 +71,32 @@ using paddle::framework::Tensor; } \ } -#define REGISTER_ACTIVATION_OP_GRAD_MAKER(OP_NAME, KERNEL_TYPE) \ - class OP_NAME##GradMaker \ - : public ::paddle::framework::SingleGradOpDescMaker { \ - public: \ - using ::paddle::framework::SingleGradOpDescMaker::SingleGradOpDescMaker; \ - \ - protected: \ - std::unique_ptr<::paddle::framework::OpDesc> Apply() const override { \ - auto* op = new ::paddle::framework::OpDesc(); \ - op->SetType(#KERNEL_TYPE "_grad"); \ - op->SetInput("Out", Output("Out")); \ - op->SetInput(::paddle::framework::GradVarName("Out"), \ - OutputGrad("Out")); \ - \ - op->SetAttrMap(Attrs()); \ - \ - op->SetOutput(::paddle::framework::GradVarName("X"), InputGrad("X")); \ - return std::unique_ptr<::paddle::framework::OpDesc>(op); \ - } \ +template +class ActivationGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType(ForwardOpType() + "_grad"); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + + if (static_cast(kDepValue) & + static_cast(ActBwdOpFwdDeps::kDepX)) { + op->SetInput("X", Input("X")); + } + + if (static_cast(kDepValue) & + static_cast(ActBwdOpFwdDeps::kDepOut)) { + op->SetInput("Out", Output("Out")); + } + + return op; } +}; framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx, const framework::OperatorWithKernel& oper, @@ -129,14 +156,15 @@ class ActivationOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - ctx->ShareDim("Out", framework::GradVarName("X")); - ctx->ShareLoD("Out", framework::GradVarName("X")); + auto out_grad_name = framework::GradVarName("Out"); + ctx->ShareDim(out_grad_name, framework::GradVarName("X")); + ctx->ShareLoD(out_grad_name, framework::GradVarName("X")); } protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return GetKernelType(ctx, *this, "Out"); + return GetKernelType(ctx, *this, framework::GradVarName("Out")); } }; @@ -558,79 +586,27 @@ REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc); REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc); REGISTER_ACTIVATION_OP_MAKER(Softplus, SoftplusDoc); REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc); - -REGISTER_ACTIVATION_OP_GRAD_MAKER(Sigmoid, sigmoid); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Relu, relu); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Gelu, gelu); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Exp, exp); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Tanh, tanh); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Ceil, ceil); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Floor, floor); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Sqrt, sqrt); -REGISTER_ACTIVATION_OP_GRAD_MAKER(SoftRelu, soft_relu); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Relu6, relu6); -REGISTER_ACTIVATION_OP_GRAD_MAKER(Reciprocal, reciprocal); -REGISTER_ACTIVATION_OP_GRAD_MAKER(HardSigmoid, hard_sigmoid); } // namespace operators } // namespace paddle namespace ops = paddle::operators; -#define FOR_EACH_INPLACE_OP_FUNCTOR(__macro) \ - __macro(Sigmoid, sigmoid); \ - __macro(Relu, relu); \ - __macro(Exp, exp); \ - __macro(Tanh, tanh); \ - __macro(Ceil, ceil); \ - __macro(Floor, floor); \ - __macro(Sqrt, sqrt); \ - __macro(SoftRelu, soft_relu); \ - __macro(Relu6, relu6); \ - __macro(Reciprocal, reciprocal); \ - __macro(HardSigmoid, hard_sigmoid); - -#define FOR_EACH_OP_FUNCTOR(__macro) \ - __macro(LogSigmoid, logsigmoid); \ - __macro(SoftShrink, softshrink); \ - __macro(Abs, abs); \ - __macro(Cos, cos); \ - __macro(Acos, acos); \ - __macro(Sin, sin); \ - __macro(Asin, asin); \ - __macro(Atan, atan); \ - __macro(Round, round); \ - __macro(Log, log); \ - __macro(Square, square); \ - __macro(Gelu, gelu); \ - __macro(BRelu, brelu); \ - __macro(Pow, pow); \ - __macro(STanh, stanh); \ - __macro(Softplus, softplus); \ - __macro(Softsign, softsign); \ - __macro(LeakyRelu, leaky_relu); \ - __macro(TanhShrink, tanh_shrink); \ - __macro(ELU, elu); \ - __macro(HardShrink, hard_shrink); \ - __macro(Swish, swish); \ - __macro(ThresholdedRelu, thresholded_relu); - -#define REGISTER_INPLACE_ACTIVATION_OP(OP_NAME, KERNEL_TYPE) \ - REGISTER_OPERATOR(KERNEL_TYPE, ::paddle::operators::ActivationOp, \ - ::paddle::operators::OP_NAME##OpMaker, \ - ::paddle::operators::ActivationOpInferVarType, \ - ::paddle::operators::OP_NAME##GradMaker, \ - ::paddle::framework::SingleOpInplaceInToOut); \ - REGISTER_OPERATOR(KERNEL_TYPE##_grad, ::paddle::operators::ActivationOpGrad, \ - ::paddle::framework::SingleOpInplaceInToOut) - -#define REGISTER_ACTIVATION_OP(OP_NAME, KERNEL_TYPE) \ - REGISTER_OPERATOR(KERNEL_TYPE, ::paddle::operators::ActivationOp, \ - ::paddle::operators::OP_NAME##OpMaker, \ - ::paddle::operators::ActivationOpInferVarType, \ - ::paddle::framework::DefaultGradOpDescMaker); \ - REGISTER_OPERATOR(KERNEL_TYPE##_grad, ::paddle::operators::ActivationOpGrad) - -#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ +#define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \ + REGISTER_OPERATOR( \ + KERNEL_TYPE, ops::ActivationOp, ops::OP_NAME##OpMaker, \ + ops::ActivationOpInferVarType, \ + ops::ActivationGradOpDescMaker::FwdDeps()>, \ + std::conditional>(), \ + ::paddle::framework::SingleOpInplaceInToOut, \ + void>::type); \ + REGISTER_OPERATOR( \ + KERNEL_TYPE##_grad, ops::ActivationOpGrad, \ + std::conditional>(), \ + ::paddle::framework::SingleOpInplaceInToOut, \ + void>::type) + +#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, op_name, functor, \ + grad_functor) \ REGISTER_OP_CPU_KERNEL( \ act_type, ops::ActivationKernel>, \ @@ -643,6 +619,5 @@ namespace ops = paddle::operators; ops::ActivationGradKernel>); -FOR_EACH_OP_FUNCTOR(REGISTER_ACTIVATION_OP); -FOR_EACH_INPLACE_OP_FUNCTOR(REGISTER_INPLACE_ACTIVATION_OP); -FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); +FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_OP); +FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_CPU_KERNEL); diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index d3a7ceed466a9b5e4d773f1531d198adff97eac2..9c7a8d8971cba4090db1bbc32c7eabf2285e7eff 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -15,7 +15,8 @@ limitations under the License. */ namespace ops = paddle::operators; namespace plat = paddle::platform; -#define REGISTER_ACTIVATION_CUDA_KERNEL(act_type, functor, grad_functor) \ +#define REGISTER_ACTIVATION_CUDA_KERNEL(act_type, op_name, functor, \ + grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, \ ops::ActivationKernel>, \ @@ -30,4 +31,4 @@ namespace plat = paddle::platform; ops::ActivationGradKernel>); -FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CUDA_KERNEL); +FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_CUDA_KERNEL); diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index ff7e623f6f383ed2a8b8a40b3186d9c439ff1d86..915632a328feb99c021ec062a9b22a04623eff4a 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -12,6 +12,7 @@ limitations under the License. */ #pragma once #include #include +#include #include #include #include @@ -35,21 +36,29 @@ limitations under the License. */ namespace paddle { namespace operators { -/* Use ugly global variable, for the using in python layer side - Please refer to the layer_helper.py and get the details. - */ -static std::unordered_set InplaceOpSet = { - "sigmoid", "exp", "relu", "tanh", "sqrt", "ceil", - "floor", "reciprocal", "relu6", "soft_relu", "hard_sigmoid"}; +enum ActBwdOpFwdDeps { + kNoDeps = 0x00, // Do not need any forward input/output + kDepX = 0x01, // Only need forward input X + kDepOut = 0x02, // Only need forward output Out + + // Never add kDepXOut, because Out can be always calculated + // by forward input X in backward part. + // FIXME(zjl): but in MKLDNN abs, X and Out are all needed... + // Developers should not rely on this enum value! + kDepXOut = 0x03 +}; + +std::unique_ptr> GetInplaceOpSet(); static bool IsInplace(const std::string& op) { - bool inplace = InplaceOpSet.count(op); + static auto InplaceOpSet = GetInplaceOpSet(); + bool inplace = InplaceOpSet->count(op); // for op_grad const int kGradSuffixLen = 4; if (op.size() > kGradSuffixLen && op.compare(op.size() - kGradSuffixLen - 1, kGradSuffixLen, "grad")) { inplace = - InplaceOpSet.count(op.substr(0, op.size() - (kGradSuffixLen + 1))); + InplaceOpSet->count(op.substr(0, op.size() - (kGradSuffixLen + 1))); } return inplace; } @@ -85,16 +94,21 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context, context.op().Output("Out")); } +template inline void ExtractActivationGradTensor( const framework::ExecutionContext& context, const framework::Tensor** X, const framework::Tensor** Out, const framework::Tensor** dOut, framework::Tensor** dX) { - auto out_var = context.InputVar("Out"); auto out_grad_var = context.InputVar(framework::GradVarName("Out")); auto x_grad_var = context.OutputVar(framework::GradVarName("X")); - PADDLE_ENFORCE(out_var != nullptr, - "Cannot get input Variable Out, variable name = %s", - context.op().Input("Out")); + const framework::Variable* out_var = nullptr; + + if (static_cast(kDepValue) & static_cast(kDepOut)) { + out_var = context.InputVar("Out"); + PADDLE_ENFORCE(out_var != nullptr, + "Cannot get input Variable Out, variable name = %s", + context.op().Input("Out")); + } PADDLE_ENFORCE(out_grad_var != nullptr, "Cannot get input Variable %s, variable name = %s", framework::GradVarName("Out"), @@ -105,23 +119,36 @@ inline void ExtractActivationGradTensor( context.op().Output(framework::GradVarName("X"))); if (CanBeUsedBySelectedRows.count(context.op().Type())) { - *Out = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*out_var); *dOut = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar( *out_grad_var); *dX = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar( x_grad_var); + + if (out_var) { + *Out = + paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*out_var); + } else { + *Out = *dOut; // fake out + } + } else { *Out = context.Input("Out"); *dOut = context.Input(framework::GradVarName("Out")); *dX = context.Output(framework::GradVarName("X")); + + if (out_var) { + *Out = &(out_var->Get()); + } else { + *Out = *dOut; // fake out + } } + PADDLE_ENFORCE(*dX != nullptr, "Cannot get output tensor %s, variable name = %s", framework::GradVarName("X"), context.op().Output(framework::GradVarName("X"))); - bool inplace = IsInplace(context.op().Type()); - if (!inplace) { + if (static_cast(kDepValue) & static_cast(kDepX)) { auto x_var = context.InputVar("X"); PADDLE_ENFORCE(x_var != nullptr, "Cannot get input tensor X, variable name = %s", @@ -172,7 +199,8 @@ class ActivationGradKernel const framework::Tensor *X, *Out, *dOut; framework::Tensor* dX = nullptr; X = Out = dOut = nullptr; - ExtractActivationGradTensor(context, &X, &Out, &dOut, &dX); + ExtractActivationGradTensor(context, &X, &Out, &dOut, + &dX); dX->mutable_data(context.GetPlace()); auto dout = framework::EigenVector::Flatten(detail::Ref(dOut)); auto out = framework::EigenVector::Flatten(detail::Ref(Out)); @@ -222,6 +250,8 @@ struct SigmoidGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * out * (static_cast(1) - out); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // Originally: logsigmoid(x) = -log (1 + exp(-x)) @@ -258,6 +288,8 @@ struct LogSigmoidGradFunctor : public BaseActivationFunctor { dx.device(d) = dout * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp())); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // exp(x) = e^x @@ -276,6 +308,8 @@ struct ExpGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * out; } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // relu(x) = max(x, 0) @@ -294,6 +328,8 @@ struct ReluGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (out > static_cast(0)).template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2))) @@ -338,6 +374,8 @@ struct GeluGradFunctor : BaseActivationFunctor { (-static_cast(0.5) * x.square()).exp(); dx.device(d) = dout * (first + second); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) @@ -356,6 +394,8 @@ struct TanhGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) - out * out); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // tanhshrink(x) = x - tanh(x) @@ -375,6 +415,8 @@ struct TanhShrinkGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (x.tanh() * x.tanh()); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // tanhshrink(x) = x - tanh(x) @@ -409,6 +451,8 @@ struct HardShrinkGradFunctor : public BaseActivationFunctor { auto temp2 = (x > static_cast(threshold)).template cast().eval(); dx.device(d) = dout * (temp1 + temp2).template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < -lambda; 0 @@ -443,6 +487,8 @@ struct SoftShrinkGradFunctor : public BaseActivationFunctor { auto temp2 = (x < -lambdaT).template cast().eval(); dx.device(d) = dout * (temp1 + temp2).template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // sqrt(x) = x^(1/2) @@ -461,6 +507,8 @@ struct SqrtGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = static_cast(0.5) * dout / out; } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // ceil(x) = ceiling(x) @@ -479,6 +527,8 @@ struct ZeroGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = static_cast(0) / out; } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kNoDeps; } }; // floor(x) = flooring(x) @@ -522,6 +572,8 @@ struct CosGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = -dout * x.unaryExpr(Sine()); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // cosine(x) = cos(x) @@ -541,6 +593,8 @@ struct SinGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * x.unaryExpr(Cosine()); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // sine(x) = sin(x) @@ -582,6 +636,8 @@ struct AcosGradFunctor : public BaseActivationFunctor { dx.device(d) = -dout * static_cast(1) / (static_cast(1) - x.square()).sqrt(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -614,6 +670,8 @@ struct AsinGradFunctor : public BaseActivationFunctor { dx.device(d) = dout * static_cast(1) / (static_cast(1) - x.square()).sqrt(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -645,6 +703,8 @@ struct AtanGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (static_cast(1) + x.square()); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // round(x) = [x] @@ -672,6 +732,8 @@ struct AbsGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * x.sign(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepXOut; } }; // reciprocal(x) = 1 / x @@ -690,6 +752,8 @@ struct ReciprocalGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(-1) * out * out; } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // log(x) = natural logarithm of x @@ -708,6 +772,8 @@ struct LogGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) / x); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // square(x) = x^2 @@ -726,6 +792,8 @@ struct SquareGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(2) * x; } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -760,6 +828,8 @@ struct BReluGradFunctor : public BaseActivationFunctor { ((x > static_cast(t_min)) * (x < static_cast(t_max))) .template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // relu6(x) = min(max(0, x), 6) @@ -792,6 +862,8 @@ struct Relu6GradFunctor : public BaseActivationFunctor { ((out > static_cast(0)) * (out < static_cast(threshold))) .template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; // softplus(x) = log(1 + exp(x)) @@ -821,6 +893,8 @@ struct SoftplusGradFunctor : public BaseActivationFunctor { dx.device(d) = dout * ((x - temp).exp() / ((-temp).exp() + (x - temp).exp())); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // softsign(x) = x / (1 + |x|) @@ -842,6 +916,8 @@ struct SoftsignGradFunctor : public BaseActivationFunctor { dx.device(d) = dout * (static_cast(1) / (static_cast(1) + x.abs()).square()); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -872,6 +948,8 @@ struct SoftReluGradFunctor : public BaseActivationFunctor { auto temp = ((out > -tmp) * (out < tmp)).template cast().eval(); dx.device(d) = dout * (static_cast(1) - (-out).exp()) * temp; } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template @@ -901,6 +979,8 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor { auto temp2 = (x >= static_cast(0)).template cast().eval(); dx.device(d) = dout * (temp1 + temp2).template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -928,9 +1008,11 @@ struct ELUGradFunctor : public BaseActivationFunctor { typename dX> void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (x > static_cast(0)).template cast() + - dout * (out + static_cast(alpha)) * + dout * static_cast(alpha) * x.exp() * (x < static_cast(0)).template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; // FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5198 @@ -958,6 +1040,8 @@ struct PowGradFunctor : public BaseActivationFunctor { dx.device(d) = dout * static_cast(factor) * x.pow(static_cast(factor) - static_cast(1)); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -991,6 +1075,8 @@ struct STanhGradFunctor : public BaseActivationFunctor { auto temp = (a * x).tanh() * (a * x).tanh(); dx.device(d) = dout * a * b * (static_cast(1) - temp); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -1020,6 +1106,8 @@ struct ThresholdedReluGradFunctor : public BaseActivationFunctor { auto th = static_cast(threshold); dx.device(d) = dout * (x > th).template cast(); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template @@ -1053,6 +1141,8 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor { .template cast() * static_cast(slope); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template @@ -1077,49 +1167,54 @@ struct SwishGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out fake_out, dOut dout, dX dx) const { auto temp1 = static_cast(1) / (static_cast(1) + (static_cast(-beta) * x).exp()); + auto out = x * temp1; auto temp2 = temp1 * (static_cast(1) - (static_cast(beta) * out)); dx.device(d) = dout * ((static_cast(beta) * out) + temp2); } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; } // namespace operators } // namespace paddle -#define FOR_EACH_KERNEL_FUNCTOR(__macro) \ - __macro(sigmoid, SigmoidFunctor, SigmoidGradFunctor); \ - __macro(logsigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor); \ - __macro(exp, ExpFunctor, ExpGradFunctor); \ - __macro(relu, ReluFunctor, ReluGradFunctor); \ - __macro(gelu, GeluFunctor, GeluGradFunctor); \ - __macro(tanh, TanhFunctor, TanhGradFunctor); \ - __macro(atan, AtanFunctor, AtanGradFunctor); \ - __macro(softshrink, SoftShrinkFunctor, SoftShrinkGradFunctor); \ - __macro(sqrt, SqrtFunctor, SqrtGradFunctor); \ - __macro(abs, AbsFunctor, AbsGradFunctor); \ - __macro(ceil, CeilFunctor, ZeroGradFunctor); \ - __macro(floor, FloorFunctor, ZeroGradFunctor); \ - __macro(cos, CosFunctor, CosGradFunctor); \ - __macro(acos, AcosFunctor, AcosGradFunctor); \ - __macro(sin, SinFunctor, SinGradFunctor); \ - __macro(asin, AsinFunctor, AsinGradFunctor); \ - __macro(round, RoundFunctor, ZeroGradFunctor); \ - __macro(reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ - __macro(log, LogFunctor, LogGradFunctor); \ - __macro(square, SquareFunctor, SquareGradFunctor); \ - __macro(brelu, BReluFunctor, BReluGradFunctor); \ - __macro(soft_relu, SoftReluFunctor, SoftReluGradFunctor); \ - __macro(pow, PowFunctor, PowGradFunctor); \ - __macro(stanh, STanhFunctor, STanhGradFunctor); \ - __macro(softplus, SoftplusFunctor, SoftplusGradFunctor); \ - __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ - __macro(relu6, Relu6Functor, Relu6GradFunctor); \ - __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ - __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor); \ - __macro(elu, ELUFunctor, ELUGradFunctor); \ - __macro(hard_shrink, HardShrinkFunctor, HardShrinkGradFunctor); \ - __macro(hard_sigmoid, HardSigmoidFunctor, HardSigmoidGradFunctor); \ - __macro(swish, SwishFunctor, SwishGradFunctor); \ - __macro(thresholded_relu, ThresholdedReluFunctor, ThresholdedReluGradFunctor); +#define FOR_EACH_ACTIVATION_OP(__macro) \ + __macro(sigmoid, Sigmoid, SigmoidFunctor, SigmoidGradFunctor); \ + __macro(logsigmoid, LogSigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor); \ + __macro(exp, Exp, ExpFunctor, ExpGradFunctor); \ + __macro(relu, Relu, ReluFunctor, ReluGradFunctor); \ + __macro(gelu, Gelu, GeluFunctor, GeluGradFunctor); \ + __macro(tanh, Tanh, TanhFunctor, TanhGradFunctor); \ + __macro(atan, Atan, AtanFunctor, AtanGradFunctor); \ + __macro(softshrink, SoftShrink, SoftShrinkFunctor, SoftShrinkGradFunctor); \ + __macro(sqrt, Sqrt, SqrtFunctor, SqrtGradFunctor); \ + __macro(abs, Abs, AbsFunctor, AbsGradFunctor); \ + __macro(ceil, Ceil, CeilFunctor, ZeroGradFunctor); \ + __macro(floor, Floor, FloorFunctor, ZeroGradFunctor); \ + __macro(cos, Cos, CosFunctor, CosGradFunctor); \ + __macro(acos, Acos, AcosFunctor, AcosGradFunctor); \ + __macro(sin, Sin, SinFunctor, SinGradFunctor); \ + __macro(asin, Asin, AsinFunctor, AsinGradFunctor); \ + __macro(round, Round, RoundFunctor, ZeroGradFunctor); \ + __macro(reciprocal, Reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ + __macro(log, Log, LogFunctor, LogGradFunctor); \ + __macro(square, Square, SquareFunctor, SquareGradFunctor); \ + __macro(brelu, BRelu, BReluFunctor, BReluGradFunctor); \ + __macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor); \ + __macro(pow, Pow, PowFunctor, PowGradFunctor); \ + __macro(stanh, STanh, STanhFunctor, STanhGradFunctor); \ + __macro(softplus, Softplus, SoftplusFunctor, SoftplusGradFunctor); \ + __macro(softsign, Softsign, SoftsignFunctor, SoftsignGradFunctor); \ + __macro(relu6, Relu6, Relu6Functor, Relu6GradFunctor); \ + __macro(leaky_relu, LeakyRelu, LeakyReluFunctor, LeakyReluGradFunctor); \ + __macro(tanh_shrink, TanhShrink, TanhShrinkFunctor, TanhShrinkGradFunctor); \ + __macro(elu, ELU, ELUFunctor, ELUGradFunctor); \ + __macro(hard_shrink, HardShrink, HardShrinkFunctor, HardShrinkGradFunctor); \ + __macro(hard_sigmoid, HardSigmoid, HardSigmoidFunctor, \ + HardSigmoidGradFunctor); \ + __macro(swish, Swish, SwishFunctor, SwishGradFunctor); \ + __macro(thresholded_relu, ThresholdedRelu, ThresholdedReluFunctor, \ + ThresholdedReluGradFunctor); diff --git a/paddle/fluid/operators/distributed/grpc/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc/grpc_serde.cc index 6e65aa5fae83536d229be63fbaf7874bd45f967d..91c398d0c84db1fc67740cd2368d178610ef0841 100644 --- a/paddle/fluid/operators/distributed/grpc/grpc_serde.cc +++ b/paddle/fluid/operators/distributed/grpc/grpc_serde.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #endif #include +#include #include // NOLINT #include "google/protobuf/io/coded_stream.h" @@ -104,8 +105,10 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload->memory_size()); if (payload->memory_size() >= std::numeric_limits::max()) { - LOG(FATAL) << "AppendZeroCopy varname:" << name - << ", vlen:" << payload->memory_size(); + LOG(FATAL) << "FATAL error: varname:" << name + << ", vlen:" << payload->memory_size() + << " >= std::numeric_limits::max():" + << std::numeric_limits::max() << ", so exit!"; } // steal reference of tensor data ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows diff --git a/paddle/fluid/operators/interpolate_op.cc b/paddle/fluid/operators/interpolate_op.cc index edee8c08d070742d54f761083592466658a445c9..9f2e3ad4a5ac1786096c67154d5a9ef5ea62855c 100644 --- a/paddle/fluid/operators/interpolate_op.cc +++ b/paddle/fluid/operators/interpolate_op.cc @@ -37,10 +37,19 @@ class InterpolateOp : public framework::OperatorWithKernel { "Interpolation method can only be \"bilinear\" or \"nearest\"."); auto dim_x = ctx->GetInputDim("X"); // NCHW format - int out_h = ctx->Attrs().Get("out_h"); - int out_w = ctx->Attrs().Get("out_w"); PADDLE_ENFORCE_EQ(dim_x.size(), 4, "X's dimension must be 4"); + int out_h, out_w; + float scale = ctx->Attrs().Get("scale"); + if (scale > 0) { + // round down + out_h = static_cast(dim_x[2] * scale); + out_w = static_cast(dim_x[3] * scale); + } else { + out_h = ctx->Attrs().Get("out_h"); + out_w = ctx->Attrs().Get("out_w"); + } + if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { auto out_size_dim = ctx->GetInputDim("OutSize"); PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, @@ -77,6 +86,7 @@ class InterpolateOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("out_h", "output height of interpolate op."); AddAttr("out_w", "output width of interpolate op."); + AddAttr("scale", "scale factor of interpolate op.").SetDefault(0.); AddAttr("interp_method", "(string, default \"bilinear\"), interpolation " "method, can be \"bilinear\" for " diff --git a/paddle/fluid/operators/interpolate_op.cu b/paddle/fluid/operators/interpolate_op.cu index b887878ea2291d6c56fec91738784e338606b84f..35177a4e9ade26831f50de84bbb943d856cb98d9 100644 --- a/paddle/fluid/operators/interpolate_op.cu +++ b/paddle/fluid/operators/interpolate_op.cu @@ -192,9 +192,21 @@ class InterpolateOpCUDAKernel : public framework::OpKernel { auto* output = ctx.Output("Out"); auto* input_data = input->data(); + int n = input->dims()[0]; + int c = input->dims()[1]; + int in_h = input->dims()[2]; + int in_w = input->dims()[3]; + auto interp_method = ctx.Attr("interp_method"); int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + + float scale = ctx.Attr("scale"); + if (scale > 0) { + out_h = in_h * scale; + out_w = in_w * scale; + } + auto out_size = ctx.Input("OutSize"); if (out_size != nullptr) { Tensor sizes; @@ -207,11 +219,6 @@ class InterpolateOpCUDAKernel : public framework::OpKernel { bool align_corners = ctx.Attr("align_corners"); int align_mode = ctx.Attr("align_mode"); - int n = input->dims()[0]; - int c = input->dims()[1]; - int in_h = input->dims()[2]; - int in_w = input->dims()[3]; - auto* output_data = output->mutable_data({n, c, out_h, out_w}, ctx.GetPlace()); @@ -268,14 +275,20 @@ class InterpolateGradOpCUDAKernel : public framework::OpKernel { math::SetConstant zero; zero(device_ctx, input_grad, static_cast(0.0)); + int n = input_grad->dims()[0]; + int c = input_grad->dims()[1]; + int in_h = input_grad->dims()[2]; + int in_w = input_grad->dims()[3]; + auto interp_method = ctx.Attr("interp_method"); int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + float scale = ctx.Attr("scale"); + if (scale > 0) { + out_h = in_h * scale; + out_w - in_w* scale; + } auto out_size = ctx.Input("OutSize"); - - bool align_corners = ctx.Attr("align_corners"); - int align_mode = ctx.Attr("align_mode"); - if (out_size != nullptr) { Tensor sizes; framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes); @@ -284,10 +297,8 @@ class InterpolateGradOpCUDAKernel : public framework::OpKernel { out_w = size_data[1]; } - int n = input_grad->dims()[0]; - int c = input_grad->dims()[1]; - int in_h = input_grad->dims()[2]; - int in_w = input_grad->dims()[3]; + bool align_corners = ctx.Attr("align_corners"); + int align_mode = ctx.Attr("align_mode"); int in_hw = in_h * in_w; int out_hw = out_h * out_w; diff --git a/paddle/fluid/operators/interpolate_op.h b/paddle/fluid/operators/interpolate_op.h index c631ad1dd158ce114169602f073d69b2291b5b3b..5fd42809dfec6dd821c9b27bc97d61de94b5d326 100644 --- a/paddle/fluid/operators/interpolate_op.h +++ b/paddle/fluid/operators/interpolate_op.h @@ -163,9 +163,21 @@ class InterpolateKernel : public framework::OpKernel { auto* input = ctx.Input("X"); auto* output = ctx.Output("Out"); + const int n = input->dims()[0]; + const int c = input->dims()[1]; + const int in_h = input->dims()[2]; + const int in_w = input->dims()[3]; + std::string interp_method = ctx.Attr("interp_method"); int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + + float scale = ctx.Attr("scale"); + if (scale > 0) { + out_h = static_cast(in_h * scale); + out_w = static_cast(in_w * scale); + } + auto out_size = ctx.Input("OutSize"); if (out_size != nullptr) { auto out_size_data = out_size->data(); @@ -175,11 +187,6 @@ class InterpolateKernel : public framework::OpKernel { bool align_corners = ctx.Attr("align_corners"); int align_mode = ctx.Attr("align_mode"); - const int n = input->dims()[0]; - const int c = input->dims()[1]; - const int in_h = input->dims()[2]; - const int in_w = input->dims()[3]; - output->mutable_data({n, c, out_h, out_w}, ctx.GetPlace()); auto& device_ctx = ctx.template device_context(); @@ -221,23 +228,31 @@ class InterpolateGradKernel : public framework::OpKernel { auto* input_grad = ctx.Output(framework::GradVarName("X")); auto* output_grad = ctx.Input(framework::GradVarName("Out")); + const int n = input->dims()[0]; + const int c = input->dims()[1]; + const int in_h = input->dims()[2]; + const int in_w = input->dims()[3]; + std::string interp_method = ctx.Attr("interp_method"); int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + + float scale = ctx.Attr("scale"); + if (scale > 0) { + out_h = static_cast(in_h * scale); + out_w = static_cast(in_w * scale); + } + auto out_size = ctx.Input("OutSize"); if (out_size != nullptr) { auto out_size_data = out_size->data(); out_h = out_size_data[0]; out_w = out_size_data[1]; } + bool align_corners = ctx.Attr("align_corners"); int align_mode = ctx.Attr("align_mode"); - const int n = input->dims()[0]; - const int c = input->dims()[1]; - const int in_h = input->dims()[2]; - const int in_w = input->dims()[3]; - input_grad->mutable_data({n, c, in_h, in_w}, ctx.GetPlace()); auto& device_ctx = ctx.template device_context(); diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index fc52c281c4f0de2b05ab2b58aa81cdbf1216e6a7..43db744b5edeb48f87c4413961e6ded3bd45f86d 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -291,8 +291,12 @@ function build() { Building in /paddle/build ... ============================================ EOF + parallel_number=`nproc` + if [[ "$1" != "" ]]; then + parallel_number=$1 + fi make clean - make -j `nproc` + make -j ${parallel_number} make install -j `nproc` } @@ -737,9 +741,13 @@ function gen_fluid_lib() { Generating fluid library for train and inference ... ======================================== EOF + parallel_number=`nproc` + if [[ "$1" != "" ]]; then + parallel_number=$1 + fi cmake .. -DWITH_DISTRIBUTE=OFF -DON_INFER=ON - make -j `nproc` fluid_lib_dist - make -j `nproc` inference_lib_dist + make -j ${parallel_number} fluid_lib_dist + make -j ${parallel_number} inference_lib_dist } function tar_fluid_lib() { @@ -770,11 +778,22 @@ EOF function main() { local CMD=$1 + local parallel_number=$2 init case $CMD in + build_only) + cmake_gen ${PYTHON_ABI:-""} + build ${parallel_number} + ;; + build_and_check) + cmake_gen ${PYTHON_ABI:-""} + build ${parallel_number} + assert_api_not_changed ${PYTHON_ABI:-""} + assert_api_spec_approvals + ;; build) cmake_gen ${PYTHON_ABI:-""} - build + build ${parallel_number} gen_dockerfile ${PYTHON_ABI:-""} ;; test) @@ -797,7 +816,7 @@ function main() { ;; fluid_inference_lib) cmake_gen ${PYTHON_ABI:-""} - gen_fluid_lib + gen_fluid_lib ${parallel_number} tar_fluid_lib test_fluid_lib ;; @@ -806,16 +825,16 @@ function main() { ;; cicheck) cmake_gen ${PYTHON_ABI:-""} - build + build ${parallel_number} assert_api_not_changed ${PYTHON_ABI:-""} run_test - gen_fluid_lib + gen_fluid_lib ${parallel_number} test_fluid_lib assert_api_spec_approvals ;; cicheck_brpc) cmake_gen ${PYTHON_ABI:-""} - build + build ${parallel_number} run_brpc_test ;; assert_api) @@ -823,7 +842,7 @@ function main() { assert_api_spec_approvals ;; test_inference) - gen_fluid_lib + gen_fluid_lib ${parallel_number} test_fluid_lib ;; assert_api_approvals) @@ -840,7 +859,7 @@ function main() { ;; cicheck_py35) cmake_gen ${PYTHON_ABI:-""} - build + build ${parallel_number} run_test assert_api_not_changed ${PYTHON_ABI:-""} ;; @@ -848,7 +867,7 @@ function main() { cmake_gen ${PYTHON_ABI:-""} ;; gen_fluid_lib) - gen_fluid_lib + gen_fluid_lib ${parallel_number} ;; test_fluid_lib) test_fluid_lib diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 0af883764e157db24e17a1a4ef1bff27f9b39b0f..983d8243b1d8aa6c8d01855d6dbeab76c335f70c 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -66,6 +66,8 @@ from . import compiler from .compiler import * from paddle.fluid.layers.math_op_patch import monkey_patch_variable from . import install_check +from .dygraph.nn import * +from .dygraph.layers import * Tensor = LoDTensor diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index d55dbbb9c72cb887e169849c3a3e32a13c202a7b..bf484b35c7bf9a2b17126789ff247bd73095fe7b 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -22,7 +22,7 @@ __all__ = ['enabled', 'guard', 'to_variable'] def enabled(): - return framework._in_dygraph_mode() + return framework.in_dygraph_mode() @signature_safe_contextmanager diff --git a/python/paddle/fluid/dygraph/checkpoint.py b/python/paddle/fluid/dygraph/checkpoint.py index f992ae0576c81ed98a3e9f7a446b0c2e808622ea..f2b01aece7bf86b1a195296ba49a626721213b7a 100644 --- a/python/paddle/fluid/dygraph/checkpoint.py +++ b/python/paddle/fluid/dygraph/checkpoint.py @@ -97,20 +97,12 @@ def load_persistables(vardict, dirname, filename=None): Examples: .. code-block:: python - my_layer = layer(fluid.dygraph.Layer) + my_layer = layer(fluid.Layer) param_path = "./my_paddle_model" param_dict = fluid.dygraph.load_persistables(my_layer.parameters(), param_path) param_1 = param_dict['PtbModel_0.w_1'] - or: - my_layer = layer(fluid.dygraph.Layer) - param_path = "./my_paddle_model" - filename = "model.file" - param_dict = fluid.dygraph.load_persistables(my_layer.state_dict(), param_path, - filename=filename) - param_1 = param_dict['PtbModel_0.w_1'] - """ if isinstance(vardict, collections.OrderedDict): return _load_var_from_file(vardict, dirname, filename) diff --git a/python/paddle/fluid/dygraph/layer_object_helper.py b/python/paddle/fluid/dygraph/layer_object_helper.py index f0be5ff3bf2394f1f7da8fbcc341a0d2dfacdab3..9fd1e392791f2bf7a19942749eae87001ec3ede8 100644 --- a/python/paddle/fluid/dygraph/layer_object_helper.py +++ b/python/paddle/fluid/dygraph/layer_object_helper.py @@ -16,7 +16,7 @@ from __future__ import print_function import copy import six -from ..framework import Parameter, _in_dygraph_mode +from ..framework import Parameter, in_dygraph_mode from ..param_attr import ParamAttr from .. import core from six.moves import zip diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 014ee41f4c5aa280fb5b366d8f1704290cc067d4..39e06e3486cd5479f69cbdb67811f03bd9646123 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -139,14 +139,14 @@ class Layer(core.Layer): def clear_gradients(self): for p in self.parameters(): - p._clear_gradient() + p.clear_gradient() - def _build_once(self, *args): + def build_once(self, *args): pass def __call__(self, *inputs): if not self._built: - self._build_once(*inputs) + self.build_once(*inputs) outputs = self.forward(*inputs) self._built = True diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 71abb9e3eca974138fe2d8bedd41e4d58983f80c..0ab981518beb4cc48e18c17e4f0f91c22b60dbb7 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -19,7 +19,7 @@ from six.moves import reduce from .. import core from ..layers import utils from . import layers -from ..framework import Variable, _in_dygraph_mode, OpProtoHolder, Parameter +from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter from ..param_attr import ParamAttr from ..initializer import Normal, Constant, NumpyArrayInitializer import numpy as np @@ -33,6 +33,109 @@ __all__ = [ class Conv2D(layers.Layer): + """ + The convolution2D layer calculates the output based on the input, filter + and strides, paddings, dilations, groups parameters. Input and + Output are in NCHW format, where N is batch size, C is the number of + channels, H is the height of the feature, and W is the width of the feature. + Filter is in MCHW format, where M is the number of output image channels, + C is the number of input image channels, H is the height of the filter, + and W is the width of the filter. If the groups is greater than 1, + C will equal the number of input image channels divided by the groups. + Please refer to UFLDL's `convolution + `_ + for more detials. + If bias attribution and activation type are provided, bias is added to the + output of the convolution, and the corresponding activation function is + applied to the final result. + + For each input :math:`X`, the equation is: + + .. math:: + + Out = \sigma (W \\ast X + b) + + Where: + + * :math:`X`: Input value, a tensor with NCHW format. + * :math:`W`: Filter value, a tensor with MCHW format. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. + + Example: + + - Input: + + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` + + Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` + + - Output: + + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` + + Where + + .. math:: + + H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ + W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 + + Args: + input (Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + filter_size (int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. + stride (int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + padding (int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. Default: padding = 0. + dilation (int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. + groups (int): The groups number of the Conv2d Layer. According to grouped + convolution in Alex Krizhevsky's Deep CNN paper: when group=2, + the first half of the filters is only connected to the first half + of the input channels, while the second half of the filters is only + connected to the second half of the input channels. Default: groups=1. + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of conv2d. If it is set to None or one attribute of ParamAttr, conv2d + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with :math:`Normal(0.0, std)`, + and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d. + If it is set to False, no bias will be added to the output units. + If it is set to None or one attribute of ParamAttr, conv2d + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + act (str): Activation type, if it is set to None, activation is not appended. + Default: None + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Default: None + + Returns: + Variable: The tensor variable storing the convolution and \ + non-linearity activation result. + + Raises: + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") + """ + def __init__(self, name_scope, num_channels, @@ -265,7 +368,7 @@ class Conv3D(layers.Layer): self._param_attr = param_attr self._bias_attr = bias_attr - def _build_once(self, input): + def build_once(self, input): num_channels = input.shape[1] self._dtype = self._helper.input_dtype(input) @@ -332,6 +435,116 @@ class Conv3D(layers.Layer): class Conv3DTranspose(layers.Layer): + """ + **Convlution3D transpose layer** + + The convolution3D transpose layer calculates the output based on the input, + filter, and dilations, strides, paddings. Input(Input) and output(Output) + are in NCDHW format. Where N is batch size, C is the number of channels, + D is the depth of the feature, H is the height of the feature, and W + is the width of the feature. Parameters(dilations, strides, paddings) are + two elements. These two elements represent height and width, respectively. + The details of convolution transpose layer, please refer to the following + explanation and references `therein `_. + If bias attribution and activation type are provided, bias is added to + the output of the convolution, and the corresponding activation function + is applied to the final result. + + For each input :math:`X`, the equation is: + + .. math:: + + Out = \sigma (W \\ast X + b) + + In the above equation: + + * :math:`X`: Input value, a tensor with NCDHW format. + * :math:`W`: Filter value, a tensor with MCDHW format. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. + + Example: + + - Input: + + Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + + - Output: + + Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + + Where + + .. math:: + + D_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\ + H_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\ + W_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 + + Args: + input(Variable): The input image with [N, C, D, H, W] format. + num_filters(int): The number of the filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain three integers, (image_D, image_H, image_W). This + parameter only works when filter_size is None. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain three integers, (filter_size_D, filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size. + padding(int|tuple): The padding size. If padding is a tuple, it must + contain three integers, (padding_D, padding_H, padding_W). Otherwise, the + padding_D = padding_H = padding_W = padding. Default: padding = 0. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain three integers, (stride_D, stride_H, stride_W). Otherwise, the + stride_D = stride_H = stride_W = stride. Default: stride = 1. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the + dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1. + groups(int): The groups number of the Conv3d transpose layer. Inspired by + grouped convolution in Alex Krizhevsky's Deep CNN paper, in which + when group=2, the first half of the filters is only connected to the + first half of the input channels, while the second half of the + filters is only connected to the second half of the input channels. + Default: groups=1 + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d_transpose. + If it is set to False, no bias will be added to the output units. + If it is set to None or one attribute of ParamAttr, conv3d_transpose + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + act (str): Activation type, if it is set to None, activation is not appended. + Default: None. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The tensor variable storing the convolution transpose result. + + Raises: + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. + + Examples: + .. code-block:: python + + conv3d_transpose = nn.Conv3DTranspose( + 'Conv3DTranspose', + num_filters=12, + filter_size=12, + use_cudnn=False) + transpose_res = conv3d_transpose(base.to_variable(input_array)) + """ + def __init__(self, name_scope, num_filters, @@ -362,7 +575,7 @@ class Conv3DTranspose(layers.Layer): self._bias_attr = bias_attr self._act = act - def _build_once(self, input): + def build_once(self, input): self._dtype = self._helper.input_dtype(input) self._input_channel = input.shape[1] @@ -436,6 +649,54 @@ class Conv3DTranspose(layers.Layer): class Pool2D(layers.Layer): + """ + ${comment} + + Args: + input (Variable): The input tensor of pooling operator. The format of + input tensor is NCHW, where N is batch size, C is + the number of channels, H is the height of the + feature, and W is the width of the feature. + pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, + it must contain two integers, (pool_size_Height, pool_size_Width). + Otherwise, the pool kernel size will be a square of an int. + pool_type: ${pooling_type_comment} + pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list, + it must contain two integers, (pool_stride_Height, pool_stride_Width). + Otherwise, the pool stride size will be a square of an int. + pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple, + it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width). + Otherwise, the pool padding size will be a square of an int. + global_pooling (bool): ${global_pooling_comment} + use_cudnn (bool): ${use_cudnn_comment} + ceil_mode (bool): ${ceil_mode_comment} + name (str|None): A name for this layer(optional). If set None, the + layer will be named automatically. + exclusive (bool): Whether to exclude padding points in average pooling + mode, default is true + + Returns: + Variable: The pooling result. + + Raises: + ValueError: If 'pool_type' is not "max" nor "avg" + ValueError: If 'global_pooling' is False and 'pool_size' is -1 + ValueError: If 'use_cudnn' is not a bool value. + + Examples: + + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[3, 32, 32], dtype='float32') + pool2d = fluid.Pool2D("pool2d",pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False) + + pool2d_res = pool2d(data) + """ + def __init__(self, name_scope, pool_size=-1, @@ -495,6 +756,102 @@ class Pool2D(layers.Layer): class FC(layers.Layer): + """ + **Fully Connected Layer** + + This function creates a fully connected layer in the network. It can take + one or multiple tensors as its inputs(input can be a list of Variable, see + Args in detail). It creates a variable called weights for each input tensor, + which represents a fully connected weight matrix from each input unit to + each output unit. The fully connected layer multiplies each input tensor + with its corresponding weight to produce an output Tensor with shape [M, `size`], + where M is batch size. If multiple input tensors are given, the results of + multiple output tensors with shape [M, `size`] will be summed up. If bias_attr + is not None, a bias variable will be created and added to the output. + Finally, if activation is not None, it will be applied to the output as well. + + When the input is single tensor: + + .. math:: + + Out = Act({XW + b}) + + When the input are multiple tensors: + + .. math:: + + Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) + + In the above equation: + + * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable. + * :math:`X_i`: The i-th input tensor. + * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor. + * :math:`b`: The bias parameter created by this layer (if needed). + * :math:`Act`: The activation function. + * :math:`Out`: The output tensor. + + See below for an example. + + .. code-block:: text + + Given: + data_1.data = [[[0.1, 0.2], + [0.3, 0.4]]] + data_1.shape = (1, 2, 2) # 1 is batch_size + + data_2 = [[[0.1, 0.2, 0.3]]] + data_2.shape = (1, 1, 3) + + out = fluid.layers.fc(input=[data_1, data_2], size=2) + + Then: + out.data = [[0.18669507, 0.1893476]] + out.shape = (1, 2) + + Args: + input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of + the input tensor(s) is at least 2. + size(int): The number of output units in this layer. + num_flatten_dims (int, default 1): The fc layer can accept an input tensor with more than + two dimensions. If this happens, the multidimensional tensor will first be flattened + into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input + tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1) + dimensions will be flatten to form the first dimension of the final matrix (height of + the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to + form the second dimension of the final matrix (width of the matrix). For example, suppose + `X` is a 5-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. + Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable + parameters/weights of this layer. + bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias + of this layer. If it is set to False, no bias will be added to the output units. + If it is set to None, the bias is initialized zero. Default: None. + act (str, default None): Activation to be applied to the output of this layer. + is_test(bool): A flag indicating whether execution is in test phase. + name (str, default None): The name of this layer. + + Returns: + Variable: The transformation result. + + Raises: + ValueError: If rank of the input tensor is less than 2. + + Examples: + .. code-block:: python + + # when input is single tensor + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.FC("fc", size=1000, act="tanh") + fc_res = fc(data) + + # when input are multiple tensors + data_1 = fluid.layers.data(name="data_1", shape=[32, 32], dtype="float32") + data_2 = fluid.layers.data(name="data_2", shape=[24, 36], dtype="float32") + fc = fluid.FC("fc", size=1000, act="tanh") + fc_res = fc([data_1, data_2]) + """ + def __init__(self, name_scope, size, @@ -522,7 +879,7 @@ class FC(layers.Layer): assert isinstance(value, Parameter) self.__w[i] = value - def _build_once(self, input): + def build_once(self, input): i = 0 for inp, param in self._helper.iter_inputs_and_params(input, self._param_attr): @@ -591,6 +948,91 @@ class FC(layers.Layer): class BatchNorm(layers.Layer): + """ + **Batch Normalization Layer** + + Can be used as a normalizer function for conv2d and fully_connected operations. + The required data format for this layer is one of the following: + + 1. NHWC `[batch, in_height, in_width, in_channels]` + + 2. NCHW `[batch, in_channels, in_height, in_width]` + + Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing + Internal Covariate Shift `_ + for more details. + + :math:`input` is the input features over a mini-batch. + + .. math:: + + \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ + \ mini-batch\ mean \\\\ + \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ + \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ + \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ + \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ + y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift + + + When use_global_stats = True, the :math:`\\mu_{\\beta}` + and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch. + They are global (or running) statistics. (It usually got from the + pre-trained model.) + The training and testing (or inference) have the same behavior: + + .. math:: + + \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ + \\sigma_{\\beta}^{2} + \\epsilon}} \\\\ + y_i &\\gets \\gamma \\hat{x_i} + \\beta + + Args: + input(variable): The rank of input variable can be 2, 3, 4, 5. + act(string, Default None): Activation type, linear|relu|prelu|... + is_test (bool, Default False): A flag indicating whether it is in + test phrase or not. + momentum(float, Default 0.9): The value used for the moving_mean and + moving_var computation. The updated formula is: + :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` + :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` + Default is 0.9. + epsilon(float, Default 1e-05): A value added to the denominator for + numerical stability. Default is 1e-5. + param_attr(ParamAttr|None): The parameter attribute for Parameter `scale` + of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm. + If it is set to None or one attribute of ParamAttr, batch_norm + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + data_layout(string, default NCHW): NCHW|NHWC + in_place(bool, Default False): Make the input and output of batch norm reuse memory. + name(string, Default None): A name for this layer(optional). If set None, the layer + will be named automatically. + moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. + moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance. + do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not. + fuse_with_relu (bool): if True, this OP performs relu after batch norm. + use_global_stats(bool, Default False): Whether to use global mean and + variance. In inference or test mode, set use_global_stats to true + or is_test to true, and the behavior is equivalent. + In train mode, when setting use_global_stats True, the global mean + and variance are also used during train period. + + Returns: + Variable: A tensor variable which is the result after applying batch normalization on the input. + + Examples: + + .. code-block:: python + fc = fluid.FC('fc', size=200, param_attr='fc1.w') + hidden1 = fc(x) + batch_norm = fluid.BatchNorm("batch_norm", 10) + hidden2 = batch_norm(hidden1) + """ + def __init__(self, name_scope, num_channels, @@ -629,7 +1071,7 @@ class BatchNorm(layers.Layer): dtype=self._dtype, default_initializer=Constant(1.0)) if use_global_stats and self._param_attr.learning_rate == 0.: - self._scale._stop_gradient = True + self._scale.stop_gradient = True self._bias = self.create_parameter( attr=self._param_attr, @@ -637,7 +1079,7 @@ class BatchNorm(layers.Layer): dtype=self._dtype, is_bias=True) if use_global_stats and self._param_attr.learning_rate == 0.: - self._bias._stop_gradient = True + self._bias.stop_gradient = True self._mean = self.create_parameter( attr=ParamAttr( @@ -647,7 +1089,7 @@ class BatchNorm(layers.Layer): do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=self._dtype) - self._mean._stop_gradient = True + self._mean.stop_gradient = True self._variance = self.create_parameter( attr=ParamAttr( @@ -657,7 +1099,7 @@ class BatchNorm(layers.Layer): do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=self._dtype) - self._variance._stop_gradient = True + self._variance.stop_gradient = True self._in_place = in_place self._momentum = momentum @@ -666,7 +1108,7 @@ class BatchNorm(layers.Layer): self._fuse_with_relu = fuse_with_relu self._use_global_stats = use_global_stats - def _build_once(self, input): + def build_once(self, input): pass def forward(self, input): @@ -747,7 +1189,7 @@ class Embedding(layers.Layer): dict_size = len(dataset.ids) input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32') - embedding = fluid.dygraph.Embedding(size=[dict_size, 16]) + embedding = fluid.Embedding(size=[dict_size, 16]) fc = embedding(input) """ @@ -797,70 +1239,70 @@ class Embedding(layers.Layer): class LayerNorm(layers.Layer): - def __init__(self, - name_scope, - scale=True, - shift=True, - begin_norm_axis=1, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - act=None): - """ - ${comment} + """ + ${comment} - The formula is as follows: + The formula is as follows: - .. math:: + .. math:: - \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i + \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i - \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}(a_i - \\mu)^2} + \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}(a_i - \\mu)^2} - h & = f(\\frac{g}{\\sigma}(a - \\mu) + b) + h & = f(\\frac{g}{\\sigma}(a - \\mu) + b) - * :math:`a`: the vector representation of the summed inputs to the neurons - in that layer. + * :math:`a`: the vector representation of the summed inputs to the neurons + in that layer. - * :math:`H`: the number of hidden units in a layers + * :math:`H`: the number of hidden units in a layers - * :math:`g`: the trainable scale parameter. + * :math:`g`: the trainable scale parameter. - * :math:`b`: the trainable bias parameter. + * :math:`b`: the trainable bias parameter. - Args: - input(Variable): The input tensor variable. - scale(bool): Whether to learn the adaptive gain :math:`g` after - normalization. Default True. - shift(bool): Whether to learn the adaptive bias :math:`b` after - normalization. Default True. - begin_norm_axis(int): The normalization will be performed along - dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`. - Default 1. - epsilon(float): The small value added to the variance to prevent - division by zero. Default 1e-05. - param_attr(ParamAttr|None): The parameter attribute for the learnable - gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is - omitted. If :attr:`scale` is True and :attr:`param_attr` is None, - a default :code:`ParamAttr` would be added as scale. The - :attr:`param_attr` is initialized as 1 if it is added. Default None. - bias_attr(ParamAttr|None): The parameter attribute for the learnable - bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is - omitted. If :attr:`shift` is True and :attr:`param_attr` is None, - a default :code:`ParamAttr` would be added as bias. The - :attr:`bias_attr` is initialized as 0 if it is added. Default None. - act(str): Activation to be applied to the output of layer normalizaiton. - Default None. - Returns: - ${y_comment} + Args: + input(Variable): The input tensor variable. + scale(bool): Whether to learn the adaptive gain :math:`g` after + normalization. Default True. + shift(bool): Whether to learn the adaptive bias :math:`b` after + normalization. Default True. + begin_norm_axis(int): The normalization will be performed along + dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`. + Default 1. + epsilon(float): The small value added to the variance to prevent + division by zero. Default 1e-05. + param_attr(ParamAttr|None): The parameter attribute for the learnable + gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is + omitted. If :attr:`scale` is True and :attr:`param_attr` is None, + a default :code:`ParamAttr` would be added as scale. The + :attr:`param_attr` is initialized as 1 if it is added. Default None. + bias_attr(ParamAttr|None): The parameter attribute for the learnable + bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is + omitted. If :attr:`shift` is True and :attr:`param_attr` is None, + a default :code:`ParamAttr` would be added as bias. The + :attr:`bias_attr` is initialized as 0 if it is added. Default None. + act(str): Activation to be applied to the output of layer normalizaiton. + Default None. + Returns: + ${y_comment} - Examples: + Examples: - >>> data = fluid.layers.data(name='data', shape=[3, 32, 32], - >>> dtype='float32') - >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) - """ + >>> data = fluid.layers.data(name='data', shape=[3, 32, 32], + >>> dtype='float32') + >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) + """ + def __init__(self, + name_scope, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None): super(LayerNorm, self).__init__(name_scope) self._scale = scale self._shift = shift @@ -870,7 +1312,7 @@ class LayerNorm(layers.Layer): self._bias_attr = bias_attr self._act = act - def _build_once(self, input): + def build_once(self, input): self._dtype = self._helper.input_dtype(input) input_shape = input.shape param_shape = [ @@ -1232,7 +1674,7 @@ class NCE(layers.Layer): 'remote_prefetch': remote_prefetch } - def _build_once(self, input, label, sample_weight=None): + def build_once(self, input, label, sample_weight=None): assert isinstance(input, Variable) assert isinstance(label, Variable) @@ -1318,7 +1760,7 @@ class PRelu(layers.Layer): raise ValueError('mode should be one of all, channel, element.') self._alpha_shape = [1] - def _build_once(self, input): + def build_once(self, input): if self._mode == 'channel': self._alpha_shape = [1, input.shape[1], 1, 1] elif self._mode == 'element': @@ -1396,7 +1838,7 @@ class BilinearTensorProduct(layers.Layer): self._name = name self._inputs = dict() - def _build_once(self, x, y): + def build_once(self, x, y): self._dtype = self._helper.input_dtype(x) param_shape = [self._size, x.shape[1], y.shape[1]] @@ -1572,7 +2014,7 @@ class Conv2DTranspose(layers.Layer): self._output_size = output_size self._op_type = 'conv2d_transpose' - def _build_once(self, input): + def build_once(self, input): input_channel = input.shape[1] if (input_channel == self._groups and self._num_filters == input_channel and not self._use_cudnn): @@ -1686,7 +2128,7 @@ class SequenceConv(layers.Layer): bias_attr=None, param_attr=None, act=None): - assert not _in_dygraph_mode( + assert not in_dygraph_mode( ), "SequenceConv is not supported by dynamic graph mode yet!" super(SequenceConv, self).__init__(name_scope) self._num_filters = num_filters @@ -1696,7 +2138,7 @@ class SequenceConv(layers.Layer): self._bias_attr = bias_attr self._param_attr = param_attr - def _build_once(self, input): + def build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._filter_size * input.shape[1], self._num_filters] self._filter_param = self.create_parameter( @@ -1726,14 +2168,14 @@ class RowConv(layers.Layer): future_context_size, param_attr=None, act=None): - assert not _in_dygraph_mode( + assert not in_dygraph_mode( ), "RowConv is not supported by dynamic graph mode yet!" super(RowConv, self).__init__(name_scope) self._act = act self._param_attr = param_attr self._future_context_size = future_context_size - def _build_once(self, input): + def build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._future_context_size + 1, input.shape[1]] self._filter_param = self.create_parameter( @@ -1796,7 +2238,7 @@ class GroupNorm(layers.Layer): if data_layout != 'NCHW': raise ValueError("unsupported data layout:" + data_layout) - def _build_once(self, input): + def build_once(self, input): self._dtype = self._helper.input_dtype(input) param_shape = [input.shape[1]] if self._bias_attr: @@ -1849,7 +2291,7 @@ class SpectralNorm(layers.Layer): self._eps = eps self._dim = dim - def _build_once(self, weight): + def build_once(self, weight): self._dtype = self._helper.input_dtype(weight) input_shape = weight.shape h = input_shape[self._dim] @@ -1904,7 +2346,7 @@ class TreeConv(layers.Layer): self._bias_attr = bias_attr self._param_attr = param_attr - def _build_once(self, nodes_vector, edge_set): + def build_once(self, nodes_vector, edge_set): assert isinstance(nodes_vector, Variable) assert isinstance(edge_set, Variable) self._dtype = self._helper.input_dtype(nodes_vector) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 7953d98bcbb826267fa21f6503e55049c8aff5ba..c05e5fb9e3a46e721c20fd9288b89009e32afcbe 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -67,6 +67,7 @@ __all__ = [ 'cuda_places', 'cpu_places', 'cuda_pinned_places', + 'in_dygraph_mode', ] EMPTY_VAR_NAME = core.kEmptyVarName() @@ -79,7 +80,10 @@ _dygraph_tracer_ = None _dygraph_current_expected_place_ = None -def _in_dygraph_mode(): +def in_dygraph_mode(): + ''' + Returns(bool): True if the program is running in dynamic graph mode + ''' return _dygraph_tracer_ is not None @@ -396,7 +400,7 @@ class Variable(object): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if _in_dygraph_mode(): + if in_dygraph_mode(): # record vars in tracer rather than blocks self._ivar = kwargs.get("ivar", None) if not self._ivar: @@ -482,21 +486,21 @@ class Variable(object): self.block.vars[name] = self self.op = None - self.stop_gradient = stop_gradient + self._stop_gradient = stop_gradient self.is_data = is_data - def _numpy(self): + def numpy(self): new_ivar = self._ivar._copy_to(core.CPUPlace(), True) return np.array(new_ivar.value().get_tensor()) - def _backward(self): + def backward(self): self._ivar._run_backward() - def _gradient(self): + def gradient(self): new_ivar = self._ivar._grad_ivar()._copy_to(core.CPUPlace(), True) return np.array(new_ivar.value().get_tensor()) - def _clear_gradient(self): + def clear_gradient(self): self._ivar._clear_gradient() def __str__(self): @@ -516,7 +520,7 @@ class Variable(object): Returns: str: The debug string. """ - if _in_dygraph_mode(): + if in_dygraph_mode(): # TODO(panyx0718): add more dygraph debug info. return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype, self.shape) @@ -535,7 +539,7 @@ class Variable(object): __repr__ = __str__ - def _set_desc(self, input): + def set_desc(self, input): """ Set the variable description. @@ -548,43 +552,43 @@ class Variable(object): self.desc = input @property - def _stop_gradient(self): - if _in_dygraph_mode(): + def stop_gradient(self): + if in_dygraph_mode(): return self._ivar.stop_gradient else: - return self.stop_gradient + return self._stop_gradient - @_stop_gradient.setter - def _stop_gradient(self, s): - if _in_dygraph_mode(): + @stop_gradient.setter + def stop_gradient(self, s): + if in_dygraph_mode(): self._ivar.stop_gradient = s else: - self.stop_gradient = s + self._stop_gradient = s @property def persistable(self): - if _in_dygraph_mode(): + if in_dygraph_mode(): return self._ivar.persistable else: return self.desc.persistable() @persistable.setter def persistable(self, p): - if _in_dygraph_mode(): + if in_dygraph_mode(): return self._ivar.persistable else: self.desc.set_persistable(p) @property def name(self): - if _in_dygraph_mode(): + if in_dygraph_mode(): return self._ivar.name else: return cpt.to_text(self.desc.name()) @name.setter def name(self, new_name): - if _in_dygraph_mode(): + if in_dygraph_mode(): self._ivar.name = new_name else: self.desc.set_name(new_name) @@ -592,14 +596,14 @@ class Variable(object): @property def shape(self): # convert to tuple, make it as same as numpy API. - if _in_dygraph_mode(): + if in_dygraph_mode(): return self._ivar.shape else: return tuple(self.desc.shape()) @property def dtype(self): - if _in_dygraph_mode(): + if in_dygraph_mode(): return self._ivar.dtype else: return self.desc.dtype() @@ -611,7 +615,7 @@ class Variable(object): @property def type(self): - if _in_dygraph_mode(): + if in_dygraph_mode(): return self._ivar.dtype else: return self.desc.type() @@ -721,7 +725,7 @@ class Variable(object): name=unique_name.generate(".".join(self.name)), dtype=self.dtype, persistable=self.persistable, - stop_gradient=self._stop_gradient, ) + stop_gradient=self.stop_gradient, ) else: return self @@ -930,7 +934,7 @@ class Operator(object): inputs=None, outputs=None, attrs=None): - if _in_dygraph_mode(): + if in_dygraph_mode(): if type is None: raise ValueError( "`type` to initialized an Operator can not be None.") @@ -1049,7 +1053,7 @@ class Operator(object): for arg in out_args: out_arg_names.append(cpt.to_text(arg.name)) # TODO(minqiyang): could we remove variable's op in static mode? - if not _in_dygraph_mode(): + if not in_dygraph_mode(): arg.op = self self.desc.set_output(out_proto.name, out_arg_names) @@ -1095,7 +1099,7 @@ class Operator(object): @property def type(self): - if _in_dygraph_mode(): + if in_dygraph_mode(): return self.iop.type else: return self.desc.type() @@ -1638,7 +1642,7 @@ class Block(object): Returns: Operator: the append Operator. """ - if _in_dygraph_mode(): + if in_dygraph_mode(): op = Operator( block=self, desc=None, @@ -1710,7 +1714,7 @@ class Block(object): return self.ops[start:end] def _prepend_op(self, *args, **kwargs): - if _in_dygraph_mode(): + if in_dygraph_mode(): op = Operator( self, None, diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 6aff93dceaf5cfd299bdc9f68246ed579f248f3c..da2591b98058a2283275cc222194e89240e87ae1 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -165,7 +165,7 @@ class ConstantInitializer(Initializer): 'force_cpu': self._force_cpu or force_init_on_cpu() }, stop_gradient=True) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -245,7 +245,7 @@ class UniformInitializer(Initializer): attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -324,7 +324,7 @@ class NormalInitializer(Initializer): outputs={"Out": var}, attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -403,7 +403,7 @@ class TruncatedNormalInitializer(Initializer): outputs={"Out": var}, attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -509,7 +509,7 @@ class XavierInitializer(Initializer): "seed": self._seed }, stop_gradient=True) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -610,7 +610,7 @@ class MSRAInitializer(Initializer): "seed": self._seed }, stop_gradient=True) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -709,7 +709,7 @@ class BilinearInitializer(Initializer): 'shape': list(shape), value_name: values }) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op @@ -768,7 +768,7 @@ class NumpyArrayInitializer(Initializer): value_name: values }, stop_gradient=True) - if not framework._in_dygraph_mode(): + if not framework.in_dygraph_mode(): var.op = op return op diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 7eb912645e5077d35a2d11d7d09a033d28345e15..11e3c4938bef4a3c97a724798e2f7273c25f06ed 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -17,7 +17,7 @@ from __future__ import print_function import copy import six -from .framework import Parameter, dtype_is_floating, _in_dygraph_mode +from .framework import Parameter, dtype_is_floating, in_dygraph_mode from . import unique_name from paddle.fluid.initializer import Constant, Xavier from .param_attr import ParamAttr diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 869a5f54e9cdf5740c5e216917d92880d7d61e2d..9eed00b16185d00f30dfd75f03e31fb45cf9567c 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -17,7 +17,7 @@ from __future__ import print_function import copy import numpy as np -from .framework import Variable, default_main_program, default_startup_program, _in_dygraph_mode, _current_expected_place +from .framework import Variable, default_main_program, default_startup_program, in_dygraph_mode, _current_expected_place from . import unique_name from .param_attr import ParamAttr, WeightNormParamAttr from . import core @@ -54,7 +54,7 @@ class LayerHelperBase(object): Return Variable construct from value """ if isinstance(value, np.ndarray): - assert _in_dygraph_mode( + assert in_dygraph_mode( ), "to_variable could only be called in dygraph mode" if not block: @@ -302,7 +302,7 @@ class LayerHelperBase(object): param = self._create_weight_normalize(attr, shape, dtype) WeightNormParamAttr.params_with_weight_norm.append(param) return param - if _in_dygraph_mode(): + if in_dygraph_mode(): # In dygraph mode, we want the returned parameter to be # initialized so that it can be used imperatively. return self.main_program.global_block().create_parameter( @@ -370,7 +370,7 @@ class LayerHelperBase(object): initializer: initializer to use """ assert isinstance(var, Variable) - if _in_dygraph_mode(): + if in_dygraph_mode(): initializer(var, var.block) else: self.startup_program.global_block().create_var( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a5d4d3947ab9b9699f6fc8ac0d7f088ede345290..e70ea9da38f7640fa09a39e7f8cbfeb91ddc9832 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -23,7 +23,7 @@ import os import inspect from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer -from ..framework import Variable, OpProtoHolder, _in_dygraph_mode +from ..framework import Variable, OpProtoHolder, in_dygraph_mode from ..dygraph import base from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ @@ -481,7 +481,7 @@ def dynamic_lstm(input, forward, _ = fluid.layers.dynamic_lstm( input=forward_proj, size=hidden_dim * 4, use_peepholes=False) """ - assert _in_dygraph_mode( + assert in_dygraph_mode( ) is not True, "please use lstm instead of dynamic_lstm in dygraph mode!" assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp." helper = LayerHelper('lstm', **locals()) @@ -867,7 +867,7 @@ def dynamic_lstmp(input, proj_activation="tanh") """ - assert _in_dygraph_mode( + assert in_dygraph_mode( ) is not True, "please use lstm instead of dynamic_lstmp in dygraph mode!" assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp." @@ -1041,7 +1041,7 @@ def dynamic_gru(input, hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim) """ - assert _in_dygraph_mode( + assert in_dygraph_mode( ) is not True, "please use gru instead of dynamic_gru in dygraph mode!" helper = LayerHelper('gru', **locals()) @@ -1760,7 +1760,7 @@ def sequence_conv(input, Variable: output of sequence_conv """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() @@ -1821,7 +1821,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): dtype='float32', lod_level=1) x_sequence_softmax = fluid.layers.sequence_softmax(input=x) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_softmax', **locals()) dtype = helper.input_dtype() @@ -2315,7 +2315,7 @@ def sequence_pool(input, pool_type, is_test=False): last_x = fluid.layers.sequence_pool(input=x, pool_type='last') first_x = fluid.layers.sequence_pool(input=x, pool_type='first') """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() @@ -2356,7 +2356,7 @@ def sequence_concat(input, name=None): out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3]) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_concat', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) @@ -2485,7 +2485,7 @@ def sequence_slice(input, offset, length, name=None): subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset, length=length) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper("sequence_slice", **locals()) dtype = helper.input_dtype() @@ -3307,7 +3307,7 @@ def layer_norm(input, >>> dtype='float32') >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) """ - assert _in_dygraph_mode( + assert in_dygraph_mode( ) is not True, "please use FC instead of fc in dygraph mode!" helper = LayerHelper('layer_norm', **locals()) dtype = helper.input_dtype() @@ -3946,7 +3946,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): dtype='float32', lod_level=1) out = layers.sequence_expand(x=x, y=y, ref_level=0) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_expand', input=x, **locals()) dtype = helper.input_dtype() @@ -4014,7 +4014,7 @@ def sequence_expand_as(x, y, name=None): dtype='float32', lod_level=1) out = layers.sequence_expand_as(x=x, y=y) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_expand_as', input=x, **locals()) dtype = helper.input_dtype() @@ -4062,7 +4062,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): out = fluid.layers.sequence_pad(x=x, pad_value=pad_value) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_pad', input=x, **locals()) dtype = helper.input_dtype() @@ -4130,7 +4130,7 @@ def sequence_unpad(x, length, name=None): out = fluid.layers.sequence_unpad(x=x, length=len) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_unpad', input=x, **locals()) dtype = helper.input_dtype() @@ -5305,7 +5305,7 @@ def sequence_reshape(input, new_dim): x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1) x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_reshape', **locals()) out = helper.create_variable_for_type_inference(helper.input_dtype()) @@ -5841,7 +5841,7 @@ def im2sequence(input, input=layer, stride=[1, 1], filter_size=[2, 2]) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") if isinstance(filter_size, int): @@ -6485,7 +6485,7 @@ def squeeze(input, axes, name=None): x = layers.data(name='x', shape=[5, 1, 10]) y = layers.sequeeze(input=x, axes=[1]) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "squeeze layer is not supported in dygraph mode yet.") helper = LayerHelper("squeeze", **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) @@ -7138,10 +7138,10 @@ def image_resize(input, out_shape(list|tuple|Variable|None): Output shape of image resize layer, the shape is (out_h, out_w). Default: None - scale(float|None): The multiplier for the input height or width. - At least one of out_shape or scale must be set. - And out_shape has a higher priority than scale. - Default: None + scale(float|None): The multiplier for the input height or width. At + least one of :attr:`out_shape` or :attr:`scale` must be set. + And :attr:`out_shape` has a higher priority than :attr:`scale`. + Default: None. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. resample(str): The resample method. It supports 'BILINEAR' and 'NEAREST' @@ -7179,6 +7179,7 @@ def image_resize(input, or 'NEAREST' currently. ValueError: One of out_shape and scale must not be None. ValueError: out_shape length should be 2. + ValueError: scale should be greater than zero. TypeError: align_corners shoule be a bool value ValueError: align_mode can only be '0' or '1' @@ -7210,26 +7211,36 @@ def image_resize(input, def _is_list_or_turple_(data): return (isinstance(data, list) or isinstance(data, tuple)) - out_h = 0 - out_w = 0 inputs = {"X": input} + attrs = { + "out_h": 0, + "out_w": 0, + "interp_method": resample_type, + "align_corners": align_corners, + "align_mode": align_mode + } + if out_shape is not None: if isinstance(out_shape, Variable): warnings.warn("out_shape as Variable type is deprecated, \ it is recommended to use actual_shape instead of \ out_shape to specify output shape dynamically.") inputs['OutSize'] = out_shape - elif not (_is_list_or_turple_(out_shape)): - raise TypeError("out_shape should be a list or tuple or Variable.") - elif len(out_shape) != 2: - raise ValueError("out_shape length should be 2.") - - out_shape = list(map(int, out_shape)) - out_h = out_shape[0] - out_w = out_shape[1] + else: + if not (_is_list_or_turple_(out_shape)): + raise TypeError( + "out_shape should be a list or tuple or Variable.") + if len(out_shape) != 2: + raise ValueError("out_shape length should be 2.") + + out_shape = list(map(int, out_shape)) + attrs['out_h'] = out_shape[0] + attrs['out_w'] = out_shape[1] + else: - out_h = int(input.shape[2] * scale) - out_w = int(input.shape[3] * scale) + if scale <= 0: + raise ValueError("scale should be greater than zero.") + attrs['scale'] = float(scale) if isinstance(actual_shape, Variable): inputs["OutSize"] = actual_shape @@ -7241,13 +7252,7 @@ def image_resize(input, type='{}_interp'.format(resample_type), inputs=inputs, outputs={"Out": out}, - attrs={ - "out_h": out_h, - "out_w": out_w, - "interp_method": resample_type, - "align_corners": align_corners, - "align_mode": align_mode - }) + attrs=attrs) return out @@ -7315,11 +7320,14 @@ def resize_bilinear(input, Args: input(${x_type}): ${x_comment}. - out_shape(${out_size_type}): ${out_size_comment}. + out_shape(list|tuple|Variable|None): Output shape of resize bilinear + layer, the shape is (out_h, out_w). + Default: None scale(float|None): The multiplier for the input height or width. At - least one of out_shape or scale must be set. And out_shape has - a higher priority than scale. Default: None. + least one of :attr:`out_shape` or :attr:`scale` must be set. + And :attr:`out_shape` has a higher priority than :attr:`scale`. + Default: None. name(str|None): The output variable name. actual_shape(Variable): An optional input to specify output shape @@ -7406,11 +7414,14 @@ def resize_nearest(input, Args: input(${x_type}): ${x_comment}. - out_shape(${out_size_type}): ${out_size_comment}. + out_shape(list|tuple|Variable|None): Output shape of resize nearest + layer, the shape is (out_h, out_w). + Default: None scale(float|None): The multiplier for the input height or width. At - least one of out_shape or scale must be set. And out_shape has - a higher priority than scale. Default: None. + least one of :attr:`out_shape` or :attr:`scale` must be set. + And :attr:`out_shape` has a higher priority than :attr:`scale`. + Default: None. name(str|None): The output variable name. actual_shape(Variable): An optional input to specify output shape @@ -7620,7 +7631,7 @@ def sequence_scatter(input, index, updates, name=None): output = fluid.layers.sequence_scatter(input, index, updates) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_scatter', **locals()) dtype = helper.input_dtype() @@ -8710,7 +8721,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1) out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0) """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_enumerate', **locals()) out = helper.create_variable_for_type_inference( @@ -8751,7 +8762,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): Variable: The output sequence mask. """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_mask', **locals()) @@ -9230,7 +9241,7 @@ def _elementwise_op(helper): op_type = helper.layer_type x = helper.kwargs.get('x', None) y = helper.kwargs.get('y', None) - if _in_dygraph_mode(): + if in_dygraph_mode(): x = base.to_variable(x) y = base.to_variable(y) @@ -9803,7 +9814,7 @@ def sequence_reverse(x, name=None): Returns: out(${y_type}): ${y_comment} """ - assert not _in_dygraph_mode(), ( + assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper("sequence_reverse", **locals()) if name is None: diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 17a34371049a1491481310803d639dc6304a13d1..625fcf78b340217116a81af3fd3abd762cb39c95 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -55,7 +55,7 @@ class Optimizer(object): """ def __init__(self, learning_rate, regularization=None, name=None): - if framework._in_dygraph_mode(): + if framework.in_dygraph_mode(): if not isinstance(learning_rate, float) and \ not isinstance(learning_rate, LearningRateDecay): raise TypeError( @@ -205,7 +205,7 @@ class Optimizer(object): name = self._name + "_" + name if (name in self._accumulators and param.name in self._accumulators[name]): - if framework._in_dygraph_mode(): + if framework.in_dygraph_mode(): return self._accumulators[name][param.name] raise Exception("Accumulator {} already exists for parameter {}". format(name, param.name)) @@ -374,7 +374,7 @@ class Optimizer(object): See examples in `apply_gradients`. """ self._dtype = loss.dtype - if framework._in_dygraph_mode(): + if framework.in_dygraph_mode(): if parameter_list is not None: parameters = parameter_list else: @@ -459,7 +459,7 @@ class Optimizer(object): Returns: list: A list of operators appended to the current program. """ - if framework._in_dygraph_mode(): + if framework.in_dygraph_mode(): with program_guard(framework.default_main_program(), framework.default_startup_program()): optimize_ops = self._create_optimization_pass(params_grads) @@ -639,16 +639,16 @@ class DGCMomentumOptimizer(MomentumOptimizer): Original paper is https://arxiv.org/abs/1712.01887 - DGC reduce the communication bandwidth by sending only the important gradients (sparse update):\ + DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\ only gradients larger than a threshold are transmitted. - To avoid losing information, DGC accumulate the rest of the gradients locally. + To avoid losing information, DGC accumulates the rest of the gradients locally. Eventually, these gradients become large enough to be transmitted. - Thus, DGC send the large gradients immediately but eventually send all of the gradients over time. + Thus, DGC sends the large gradients immediately but eventually send all of the gradients over time. - To ensure no loss of accuracy, DGC employs momentum correc-tionandlocal gradient clipping on top of the gradient sparsification to maintain model performance. + To ensure no loss of accuracy, DGC employs momentum correction and local gradient clipping on top of the gradient sparsification to maintain model performance. DGC also uses momentum factor masking and warmup training to overcome the staleness problem caused by reduced communication. @@ -663,7 +663,7 @@ class DGCMomentumOptimizer(MomentumOptimizer): learning_rate (float|Variable): the learning rate used to update parameters. \ Can be a float value or a Variable with one float value as data element. momentum (float): Momentum factor. - rampup_begin_step (int): The begining step from which gradient compression is implemented. + rampup_begin_step (int): The beginning step from which gradient compression is implemented. rampup_step (int): How long it use the sparsity periods. Default is 1. for example: If the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 5, \ it will use 0.75 at 0 step, and 0.9375 at 1 step, and so on. And when reach sparsity array ends, \ @@ -671,9 +671,9 @@ class DGCMomentumOptimizer(MomentumOptimizer): sparsity (list[float]): Get top important element from gradient tensor, the ratio is (1 - current sparsity). use_nesterov (bool): Enables Nesterov momentum. True means use nesterov. local_grad_clip_norm (float): Clip norm value if needed. - num_trainers: The number of training node. + num_trainers: The number of training nodes. regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. + name: An optional name prefix. Examples: .. code-block:: python diff --git a/python/paddle/fluid/tests/unittests/fake_reader.py b/python/paddle/fluid/tests/unittests/fake_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..34a256e15dd2f3a8a83aaba4e178efe52c8d8547 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/fake_reader.py @@ -0,0 +1,34 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import six + + +def fake_imdb_reader(word_dict_size, + sample_num, + lower_seq_len=100, + upper_seq_len=200, + class_dim=2): + def __reader__(): + for _ in six.moves.range(sample_num): + length = np.random.random_integers( + low=lower_seq_len, high=upper_seq_len, size=[1])[0] + ids = np.random.random_integers( + low=0, high=word_dict_size - 1, size=[length]).astype('int64') + label = np.random.random_integers( + low=0, high=class_dim - 1, size=[1]).astype('int64')[0] + yield ids, label + + return __reader__ diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index 9cb88d4a8553f3b750f6cf3b24115b4d188ed1d6..04a36f7cafe7b4445125c4e9bd58f6d30d6c71aa 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -18,7 +18,7 @@ import numpy as np import paddle.fluid as fluid -class L1(fluid.dygraph.Layer): +class L1(fluid.Layer): def __init__(self, prefix): super(L1, self).__init__(prefix) self._param_attr = fluid.ParamAttr( @@ -32,7 +32,7 @@ class L1(fluid.dygraph.Layer): return self.w1 + self.w2 -class L2(fluid.dygraph.Layer): +class L2(fluid.Layer): def __init__(self, prefix): super(L2, self).__init__(prefix) self.layer1 = L1(self.full_name()) @@ -42,7 +42,7 @@ class L2(fluid.dygraph.Layer): return self.layer1() + self.layer2() -class L3(fluid.dygraph.Layer): +class L3(fluid.Layer): def __init__(self, prefix): super(L3, self).__init__(prefix) self.layer1 = L2(self.full_name()) @@ -59,7 +59,7 @@ class TestBaseLayer(unittest.TestCase): ret = l() self.assertEqual(l.w1.name, "test_one_level/L1_0.w_0") self.assertEqual(l.w2.name, "test_one_level/L1_0.w_1") - self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2]))) + self.assertTrue(np.allclose(ret.numpy(), 0.2 * np.ones([2, 2]))) def test_three_level(self): with fluid.dygraph.guard(): @@ -72,7 +72,7 @@ class TestBaseLayer(unittest.TestCase): self.assertEqual(names[3], "test_three_level/L3_0/L2_0/L1_1.w_1") self.assertEqual(names[4], "test_three_level/L3_0/L2_1/L1_0.w_0") self.assertEqual(names[5], "test_three_level/L3_0/L2_1/L1_0.w_1") - self.assertTrue(np.allclose(ret._numpy(), 0.8 * np.ones([2, 2]))) + self.assertTrue(np.allclose(ret.numpy(), 0.8 * np.ones([2, 2]))) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index f60ed1d79ae5778f751d6101fde386ae3a90c0f7..963a17e7d697512e871a97ef24cb1c4ba37a7547 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -91,17 +91,26 @@ class TestBilinearInterpOp(OpTest): self.op_type = "bilinear_interp" input_np = np.random.random(self.input_shape).astype("float32") - output_np = bilinear_interp_np(input_np, self.out_h, self.out_w, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + if self.scale > 0: + out_h = int(self.input_shape[2] * self.scale) + out_w = int(self.input_shape[3] * self.scale) + else: + out_h = self.out_h + out_w = self.out_w + + output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size, + self.actual_shape, self.align_corners, + self.align_mode) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size if self.actual_shape is not None: self.inputs['OutSize'] = self.actual_shape + self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, + 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode @@ -119,6 +128,7 @@ class TestBilinearInterpOp(OpTest): self.input_shape = [2, 3, 4, 4] self.out_h = 2 self.out_w = 2 + self.scale = 0. self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -130,6 +140,7 @@ class TestBilinearInterpCase1(TestBilinearInterpOp): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 + self.scale = 0. self.align_corners = True self.align_mode = 1 @@ -140,6 +151,7 @@ class TestBilinearInterpCase2(TestBilinearInterpOp): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 + self.scale = 0. self.align_corners = True self.align_mode = 1 @@ -150,6 +162,7 @@ class TestBilinearInterpCase3(TestBilinearInterpOp): self.input_shape = [1, 1, 128, 64] self.out_h = 64 self.out_w = 128 + self.scale = 0. self.align_corners = True self.align_mode = 1 @@ -160,6 +173,7 @@ class TestBilinearInterpCase4(TestBilinearInterpOp): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 + self.scale = 0. self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -171,6 +185,7 @@ class TestBilinearInterpCase5(TestBilinearInterpOp): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 + self.scale = 0. self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -182,6 +197,7 @@ class TestBilinearInterpCase6(TestBilinearInterpOp): self.input_shape = [1, 1, 128, 64] self.out_h = 64 self.out_w = 128 + self.scale = 0. self.out_size = np.array([65, 129]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -193,6 +209,7 @@ class TestBilinearInterpActualShape(TestBilinearInterpOp): self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 + self.scale = 0. self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -206,15 +223,25 @@ class TestBilinearInterpOpUint8(OpTest): self.op_type = "bilinear_interp" input_np = np.random.randint( low=0, high=256, size=self.input_shape).astype("uint8") - output_np = bilinear_interp_np(input_np, self.out_h, self.out_w, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + + if self.scale > 0: + out_h = int(self.input_shape[2] * self.scale) + out_w = int(self.input_shape[3] * self.scale) + else: + out_h = self.out_h + out_w = self.out_w + + output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size, + self.actual_shape, self.align_corners, + self.align_mode) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size + self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, + 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode @@ -229,6 +256,7 @@ class TestBilinearInterpOpUint8(OpTest): self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 + self.scale = 0. self.align_corners = True self.align_mode = 1 @@ -239,6 +267,7 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): self.input_shape = [2, 3, 128, 64] self.out_h = 120 self.out_w = 50 + self.scale = 0. self.align_corners = True self.align_mode = 1 @@ -249,6 +278,7 @@ class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): self.input_shape = [4, 1, 7, 8] self.out_h = 5 self.out_w = 13 + self.scale = 0. self.out_size = np.array([6, 15]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -272,5 +302,38 @@ class TestBilinearInterpWithMethod3(TestBilinearInterpOp): self.align_mode = 0 +class TestBilinearInterpScale1(TestBilinearInterpOp): + def init_test_case(self): + self.interp_method = 'bilinear' + self.input_shape = [2, 3, 16, 32] + self.out_h = 60 + self.out_w = 25 + self.scale = 2. + self.align_corners = True + self.align_mode = 1 + + +class TestBilinearInterpScale2(TestBilinearInterpOp): + def init_test_case(self): + self.interp_method = 'bilinear' + self.input_shape = [2, 3, 16, 32] + self.out_h = 60 + self.out_w = 25 + self.scale = 1. + self.align_corners = True + self.align_mode = 1 + + +class TestBilinearInterpScale3(TestBilinearInterpOp): + def init_test_case(self): + self.interp_method = 'bilinear' + self.input_shape = [2, 3, 16, 32] + self.out_h = 60 + self.out_w = 25 + self.scale = 1.5 + self.align_corners = True + self.align_mode = 1 + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index 377014510b55633f697ef7bf2f5f597281e5f5a5..0fbf0d42f5dcc34947235d9bd1db6f8b1c07d59a 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -19,7 +19,7 @@ import time import six import unittest -EPOCH_NUM = 60 +EPOCH_NUM = 20 BATCH_SIZE = 32 CLASS_NUM = 10 diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py index d4c043d9c76f21482f17b9bb20c4fde5ce7cc6e7..eb3832ca9ffb7ac9b4261de1036c85c93c6d0a81 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py @@ -22,6 +22,8 @@ import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler +import numpy as np +from fake_reader import fake_imdb_reader def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): @@ -35,16 +37,16 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): ) return - word_dict = paddle.dataset.imdb.word_dict() - train_reader = paddle.batch( - paddle.dataset.imdb.train(word_dict), batch_size=batch_size) + word_dict_size = 5147 + reader = fake_imdb_reader(word_dict_size, batch_size * 40) + train_reader = paddle.batch(reader, batch_size=batch_size) data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost = network(data, label, len(word_dict)) + cost = network(data, label, word_dict_size) cost.persistable = True optimizer = fluid.optimizer.Adagrad(learning_rate=0.2) optimizer.minimize(cost) diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index e49239da6d3918211fbbc302d2c56818460b6d51..470187e6421173d1cb1213d06660331c164859c4 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -19,6 +19,8 @@ import numpy as np import paddle import paddle.fluid.core as core import paddle.fluid as fluid +import six +from fake_reader import fake_imdb_reader def bow_net(data, @@ -48,11 +50,10 @@ def bow_net(data, class TestGradientClip(unittest.TestCase): def setUp(self): - self.word_dict = paddle.dataset.imdb.word_dict() + self.word_dict_len = 5147 self.BATCH_SIZE = 2 - self.train_data = paddle.batch( - paddle.dataset.imdb.train(self.word_dict), - batch_size=self.BATCH_SIZE) + reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100) + self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE) def get_places(self): places = [core.CPUPlace()] @@ -131,7 +132,7 @@ class TestGradientClip(unittest.TestCase): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost = bow_net(data, label, len(self.word_dict)) + cost = bow_net(data, label, self.word_dict_len) fluid.clip.set_gradient_clip( clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 8b3094cb2a3a633cf89e571e85e494ce7e887063..8404a57eb85a30edda6889150e588cab783be685 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -18,11 +18,11 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.dygraph.nn import FC +from paddle.fluid import FC from test_imperative_base import new_program_scope -class MyLayer(fluid.dygraph.Layer): +class MyLayer(fluid.Layer): def __init__(self, name_scope): super(MyLayer, self).__init__(name_scope) @@ -34,7 +34,7 @@ class MyLayer(fluid.dygraph.Layer): return [x] -class MyPyLayer(fluid.dygraph.PyLayer): +class MyPyLayer(fluid.PyLayer): def __init__(self): super(MyPyLayer, self).__init__() @@ -48,7 +48,7 @@ class MyPyLayer(fluid.dygraph.PyLayer): return np.array(dout) * (1 - np.square(np.array(out))) -class MLP(fluid.dygraph.Layer): +class MLP(fluid.Layer): def __init__(self, name_scope): super(MLP, self).__init__(name_scope) self._fc1 = FC(self.full_name(), @@ -71,7 +71,7 @@ class MLP(fluid.dygraph.Layer): return x -class SimpleRNNCell(fluid.dygraph.Layer): +class SimpleRNNCell(fluid.Layer): def __init__(self, name_scope, step_input_size, hidden_size, output_size, param_attr): super(SimpleRNNCell, self).__init__(name_scope) @@ -81,7 +81,7 @@ class SimpleRNNCell(fluid.dygraph.Layer): self._dtype = core.VarDesc.VarType.FP32 self.param_attr = param_attr - def _build_once(self, inputs, pre_hidden): + def build_once(self, inputs, pre_hidden): i2h_param_shape = [self.step_input_size, self.hidden_size] h2h_param_shape = [self.hidden_size, self.hidden_size] h2o_param_shape = [self.output_size, self.hidden_size] @@ -159,7 +159,7 @@ class SimpleRNNCell(fluid.dygraph.Layer): return reduce_out, hidden -class SimpleRNN(fluid.dygraph.Layer): +class SimpleRNN(fluid.Layer): def __init__(self, name_scope): super(SimpleRNN, self).__init__(name_scope) self.seq_len = 4 @@ -200,22 +200,22 @@ class TestImperative(unittest.TestCase): inputs.append(fluid.dygraph.base.to_variable(x)) ret = fluid.layers.sums(inputs) loss = fluid.layers.reduce_sum(ret) - loss._backward() - self.assertTrue(np.allclose(ret._numpy(), x * 10)) - self.assertTrue(np.allclose(inputs[0]._gradient(), x)) + loss.backward() + self.assertTrue(np.allclose(ret.numpy(), x * 10)) + self.assertTrue(np.allclose(inputs[0].gradient(), x)) def test_layer(self): with fluid.dygraph.guard(): cl = core.Layer() cl.forward([]) - l = fluid.dygraph.Layer("l") + l = fluid.Layer("l") self.assertRaises(NotImplementedError, l.forward, []) def test_pylayer_func_id(self): with fluid.dygraph.guard(): - class PyLayer1(fluid.dygraph.PyLayer): + class PyLayer1(fluid.PyLayer): def __init__(self): super(PyLayer1, self).__init__() @@ -227,7 +227,7 @@ class TestImperative(unittest.TestCase): def backward(input): return input - class PyLayer2(fluid.dygraph.PyLayer): + class PyLayer2(fluid.PyLayer): def __init__(self): super(PyLayer2, self).__init__() @@ -257,9 +257,9 @@ class TestImperative(unittest.TestCase): my_py_layer = MyPyLayer() var_inp = fluid.dygraph.base.to_variable(np_inp) outs = my_py_layer(var_inp) - dy_out = np.sum(outs[0]._numpy()) - outs[0]._backward() - dy_grad = var_inp._gradient() + dy_out = np.sum(outs[0].numpy()) + outs[0].backward() + dy_grad = var_inp.gradient() with new_program_scope(): inp = fluid.layers.data( @@ -287,9 +287,9 @@ class TestImperative(unittest.TestCase): l = MyLayer("my_layer") x = l(var_inp)[0] self.assertIsNotNone(x) - dy_out = x._numpy() - x._backward() - dy_grad = l._x_for_debug._gradient() + dy_out = x.numpy() + x.backward() + dy_grad = l._x_for_debug.gradient() with new_program_scope(): inp = fluid.layers.data( @@ -314,9 +314,9 @@ class TestImperative(unittest.TestCase): var_inp = fluid.dygraph.base.to_variable(np_inp) mlp = MLP("mlp") out = mlp(var_inp) - dy_out = out._numpy() - out._backward() - dy_grad = mlp._fc1._w._gradient() + dy_out = out.numpy() + out.backward() + dy_grad = mlp._fc1._w.gradient() with new_program_scope(): inp = fluid.layers.data( @@ -358,7 +358,7 @@ class TestImperative(unittest.TestCase): x = fluid.layers.elementwise_add(inp1, inp2) else: x = fluid.layers.elementwise_sub(inp1, inp2) - dygraph_result = x._numpy() + dygraph_result = x.numpy() # static graph with new_program_scope(): @@ -407,11 +407,11 @@ class TestImperative(unittest.TestCase): var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) simple_rnn = SimpleRNN("simple_rnn") outs, pre_hiddens = simple_rnn.forward(var_inp) - dy_out = outs[3]._numpy() - outs[3]._backward() - dy_grad_h2o = simple_rnn._cell._h2o_w._gradient() - dy_grad_h2h = simple_rnn._cell._h2h_w._gradient() - dy_grad_i2h = simple_rnn._cell._i2h_w._gradient() + dy_out = outs[3].numpy() + outs[3].backward() + dy_grad_h2o = simple_rnn._cell._h2o_w.gradient() + dy_grad_h2h = simple_rnn._cell._h2h_w.gradient() + dy_grad_i2h = simple_rnn._cell._i2h_w.gradient() with new_program_scope(): inp = fluid.layers.data( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py b/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py index a92b7d62fa598a3ec9b53bade2805cc033f4b9d9..c28058100a43eb4f7da8331d9ac75db9c090bdf9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py @@ -18,11 +18,11 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC +from paddle.fluid import Conv2D, Pool2D, FC from paddle.fluid.dygraph.base import to_variable -class SimpleImgConvPool(fluid.dygraph.Layer): +class SimpleImgConvPool(fluid.Layer): def __init__(self, name_scope, num_channels, @@ -71,7 +71,7 @@ class SimpleImgConvPool(fluid.dygraph.Layer): return x -class MNIST(fluid.dygraph.Layer): +class MNIST(fluid.Layer): def __init__(self, name_scope): super(MNIST, self).__init__(name_scope) @@ -125,21 +125,21 @@ class TestDygraphCheckpoint(unittest.TestCase): img = to_variable(dy_x_data) label = to_variable(y_data) - label._stop_gradient = True + label.stop_gradient = True cost = mnist(img) loss = fluid.layers.cross_entropy(cost, label) avg_loss = fluid.layers.mean(loss) - dy_out = avg_loss._numpy() + dy_out = avg_loss.numpy() - avg_loss._backward() + avg_loss.backward() sgd.minimize(avg_loss) fluid.dygraph.save_persistables(mnist, "save_dir") mnist.clear_gradients() for param in mnist.parameters(): - dy_param_init_value[param.name] = param._numpy() + dy_param_init_value[param.name] = param.numpy() mnist.load_dict( fluid.dygraph.load_persistables(mnist, "save_dir")) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index ccebd4a54727f383bd4e46ff57bfdc9381577d05..ca2cffa9c75cc851f0911cb0063f4e82bb2a41eb 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -32,11 +32,11 @@ NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5)) NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1)) -class DMF(fluid.dygraph.Layer): +class DMF(fluid.Layer): def __init__(self, name_scope): super(DMF, self).__init__(name_scope) - self._user_latent = fluid.dygraph.FC(self.full_name(), 256) - self._item_latent = fluid.dygraph.FC(self.full_name(), 256) + self._user_latent = fluid.FC(self.full_name(), 256) + self._item_latent = fluid.FC(self.full_name(), 256) self._user_layers = [] self._item_layers = [] @@ -45,13 +45,11 @@ class DMF(fluid.dygraph.Layer): self._user_layers.append( self.add_sublayer( 'user_layer_%d' % i, - fluid.dygraph.FC( - self.full_name(), self._hid_sizes[i], act='relu'))) + fluid.FC(self.full_name(), self._hid_sizes[i], act='relu'))) self._item_layers.append( self.add_sublayer( 'item_layer_%d' % i, - fluid.dygraph.FC( - self.full_name(), self._hid_sizes[i], act='relu'))) + fluid.FC(self.full_name(), self._hid_sizes[i], act='relu'))) def forward(self, users, items): users = self._user_latent(users) @@ -63,19 +61,18 @@ class DMF(fluid.dygraph.Layer): return fluid.layers.elementwise_mul(users, items) -class MLP(fluid.dygraph.Layer): +class MLP(fluid.Layer): def __init__(self, name_scope): super(MLP, self).__init__(name_scope) - self._user_latent = fluid.dygraph.FC(self.full_name(), 256) - self._item_latent = fluid.dygraph.FC(self.full_name(), 256) + self._user_latent = fluid.FC(self.full_name(), 256) + self._item_latent = fluid.FC(self.full_name(), 256) self._match_layers = [] self._hid_sizes = [128, 64] for i in range(len(self._hid_sizes)): self._match_layers.append( self.add_sublayer( 'match_layer_%d' % i, - fluid.dygraph.FC( - self.full_name(), self._hid_sizes[i], act='relu'))) + fluid.FC(self.full_name(), self._hid_sizes[i], act='relu'))) self._mat def forward(self, users, items): @@ -88,7 +85,7 @@ class MLP(fluid.dygraph.Layer): return match_vec -class DeepCF(fluid.dygraph.Layer): +class DeepCF(fluid.Layer): def __init__(self, name_scope, num_users, num_items, matrix): super(DeepCF, self).__init__(name_scope) self._num_users = num_users @@ -99,11 +96,11 @@ class DeepCF(fluid.dygraph.Layer): matrix.dtype, is_bias=False, default_initializer=fluid.initializer.NumpyArrayInitializer(matrix)) - self._rating_matrix._stop_gradient = True + self._rating_matrix.stop_gradient = True self._mlp = MLP(self.full_name()) self._dmf = DMF(self.full_name()) - self._match_fc = fluid.dygraph.FC(self.full_name(), 1, act='sigmoid') + self._match_fc = fluid.FC(self.full_name(), 1, act='sigmoid') def forward(self, users, items): # users_emb = self._user_emb(users) @@ -255,10 +252,10 @@ class TestDygraphDeepCF(unittest.TestCase): fluid.layers.log_loss(prediction, to_variable(labels_np[ slice:slice + BATCH_SIZE]))) - loss._backward() + loss.backward() adam.minimize(loss) deepcf.clear_gradients() - dy_loss = loss._numpy() + dy_loss = loss.numpy() sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss)) self.assertEqual(static_loss, dy_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 58faa1cb85af9cedb70f3a12244cfeb44e0f4f52..5d773ec1c9db160cd63a28c634043037260e0b82 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -22,12 +22,12 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC +from paddle.fluid import Conv2D, Pool2D, FC from test_imperative_base import new_program_scope from paddle.fluid.dygraph.base import to_variable -class Discriminator(fluid.dygraph.Layer): +class Discriminator(fluid.Layer): def __init__(self, name_scope): super(Discriminator, self).__init__(name_scope) self._fc1 = FC(self.full_name(), size=32, act='elu') @@ -38,7 +38,7 @@ class Discriminator(fluid.dygraph.Layer): return self._fc2(x) -class Generator(fluid.dygraph.Layer): +class Generator(fluid.Layer): def __init__(self, name_scope): super(Generator, self).__init__(name_scope) self._fc1 = FC(self.full_name(), size=64, act='elu') @@ -150,7 +150,7 @@ class TestDygraphGAN(unittest.TestCase): x=d_fake, label=to_variable(np.zeros([2, 1], np.float32)))) d_loss = d_loss_real + d_loss_fake - d_loss._backward() + d_loss.backward() sgd.minimize(d_loss) discriminator.clear_gradients() generator.clear_gradients() @@ -160,15 +160,15 @@ class TestDygraphGAN(unittest.TestCase): g_loss = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( x=d_fake, label=to_variable(np.ones([2, 1], np.float32)))) - g_loss._backward() + g_loss.backward() sgd.minimize(g_loss) for p in discriminator.parameters(): - dy_params[p.name] = p._numpy() + dy_params[p.name] = p.numpy() for p in generator.parameters(): - dy_params[p.name] = p._numpy() + dy_params[p.name] = p.numpy() - dy_g_loss = g_loss._numpy() - dy_d_loss = d_loss._numpy() + dy_g_loss = g_loss.numpy() + dy_d_loss = d_loss.numpy() self.assertEqual(dy_g_loss, static_g_loss) self.assertEqual(dy_d_loss, static_d_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index a8fb9ecfe4be16b73ac2144259f25ed3859ece7e..234fcd60404286977309083257c24d941db77449 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -15,14 +15,12 @@ import contextlib import unittest import numpy as np -import six import sys import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.optimizer import AdamOptimizer -from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC from test_imperative_base import new_program_scope from paddle.fluid.dygraph.base import to_variable @@ -31,7 +29,7 @@ def gen_data(): pass -class GraphConv(fluid.dygraph.Layer): +class GraphConv(fluid.Layer): def __init__(self, name_scope, in_features, out_features): super(GraphConv, self).__init__(name_scope) @@ -50,7 +48,7 @@ class GraphConv(fluid.dygraph.Layer): return fluid.layers.matmul(adj, support) + self.bias -class GCN(fluid.dygraph.Layer): +class GCN(fluid.Layer): def __init__(self, name_scope, num_hidden): super(GCN, self).__init__(name_scope) self.gc = GraphConv(self.full_name(), num_hidden, 32) @@ -134,10 +132,9 @@ class TestDygraphGNN(unittest.TestCase): loss = fluid.layers.reduce_sum(loss) adam = AdamOptimizer(learning_rate=1e-3) adam.minimize(loss) - self.assertEqual(static_loss, loss._numpy()) - self.assertTrue( - np.allclose(static_weight, model.gc.weight._numpy())) - sys.stderr.write('%s %s\n' % (static_loss, loss._numpy())) + self.assertEqual(static_loss, loss.numpy()) + self.assertTrue(np.allclose(static_weight, model.gc.weight.numpy())) + sys.stderr.write('%s %s\n' % (static_loss, loss.numpy())) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index 5ab01839fbc20bbd3c242878c4ea23a00f7b0dca..76b8d3aa3943e44a17ab822618d8d1cb85aaa551 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -128,25 +128,25 @@ class TestImperativeMnist(unittest.TestCase): img = to_variable(dy_x_data) label = to_variable(y_data) - label._stop_gradient = True + label.stop_gradient = True cost = mnist(img) loss = fluid.layers.cross_entropy(cost, label) avg_loss = fluid.layers.mean(loss) - dy_out = avg_loss._numpy() + dy_out = avg_loss.numpy() if epoch == 0 and batch_id == 0: for param in mnist.parameters(): - dy_param_init_value[param.name] = param._numpy() + dy_param_init_value[param.name] = param.numpy() - avg_loss._backward() + avg_loss.backward() sgd.minimize(avg_loss) mnist.clear_gradients() dy_param_value = {} for param in mnist.parameters(): - dy_param_value[param.name] = param._numpy() + dy_param_value[param.name] = param.numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 8b659a3e08e381dd6f55b666d9f5f1b172a51930..b9f93119e83159c5bc3052b0292168a9ef641d3e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -28,7 +28,7 @@ from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope -class MLP(fluid.dygraph.Layer): +class MLP(fluid.Layer): def __init__(self, name_scope, param_attr=None, bias_attr=None): super(MLP, self).__init__(name_scope) @@ -75,18 +75,18 @@ class TestImperativeOptimizerBase(unittest.TestCase): cost = mlp(img) avg_loss = fluid.layers.reduce_mean(cost) - dy_out = avg_loss._numpy() + dy_out = avg_loss.numpy() if batch_id == 0: for param in mlp.parameters(): - dy_param_init_value[param.name] = param._numpy() + dy_param_init_value[param.name] = param.numpy() - avg_loss._backward() + avg_loss.backward() optimizer.minimize(avg_loss) mlp.clear_gradients() dy_param_value = {} for param in mlp.parameters(): - dy_param_value[param.name] = param._numpy() + dy_param_value[param.name] = param.numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 82eb61ba654636ccc3c2acee8508dfabb62ee9cb..088d36be2327a91da0efc639d7f970ed9e43d151 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -24,10 +24,9 @@ from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope import numpy as np import six -from paddle.fluid.backward import append_backward -class SimpleLSTMRNN(fluid.dygraph.Layer): +class SimpleLSTMRNN(fluid.Layer): def __init__(self, name_scope, hidden_size, @@ -45,7 +44,7 @@ class SimpleLSTMRNN(fluid.dygraph.Layer): self.cell_array = [] self.hidden_array = [] - def _build_once(self, input_embedding, init_hidden=None, init_cell=None): + def build_once(self, input_embedding, init_hidden=None, init_cell=None): self.weight_1_arr = [] self.weight_2_arr = [] self.bias_arr = [] @@ -132,7 +131,7 @@ class SimpleLSTMRNN(fluid.dygraph.Layer): return real_res, last_hidden, last_cell -class PtbModel(fluid.dygraph.Layer): +class PtbModel(fluid.Layer): def __init__(self, name_scope, hidden_size, @@ -177,7 +176,7 @@ class PtbModel(fluid.dygraph.Layer): default_initializer=fluid.initializer.UniformInitializer( low=-self.init_scale, high=self.init_scale)) - def _build_once(self, input, label, init_hidden, init_cell): + def build_once(self, input, label, init_hidden, init_cell): pass def forward(self, input, label, init_hidden, init_cell): @@ -260,13 +259,13 @@ class TestDygraphPtbRnn(unittest.TestCase): init_cell) if i == 0: for param in ptb_model.parameters(): - dy_param_init[param.name] = param._numpy() - dy_loss._backward() + dy_param_init[param.name] = param.numpy() + dy_loss.backward() sgd.minimize(dy_loss) ptb_model.clear_gradients() if i == batch_num - 1: for param in ptb_model.parameters(): - dy_param_updated[param.name] = param._numpy() + dy_param_updated[param.name] = param.numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed @@ -334,11 +333,11 @@ class TestDygraphPtbRnn(unittest.TestCase): static_param_updated[static_param_name_list[k - 3]] = out[k] - self.assertTrue(np.array_equal(static_loss_value, dy_loss._numpy())) + self.assertTrue(np.array_equal(static_loss_value, dy_loss.numpy())) self.assertTrue( - np.array_equal(static_last_cell_value, last_cell._numpy())) + np.array_equal(static_last_cell_value, last_cell.numpy())) self.assertTrue( - np.array_equal(static_last_hidden_value, last_hidden._numpy())) + np.array_equal(static_last_hidden_value, last_hidden.numpy())) for key, value in six.iteritems(static_param_init): self.assertTrue(np.array_equal(value, dy_param_init[key])) for key, value in six.iteritems(static_param_updated): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 1d786d584632769e4318bcdeb24ef7ef8ea18597..d9ef08b3c491b24323bb1469165ed5482737013a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -21,7 +21,7 @@ import paddle import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, FC +from paddle.fluid import Conv2D, Pool2D, BatchNorm, FC from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope @@ -68,7 +68,7 @@ def optimizer_setting(params): return optimizer -class ConvBNLayer(fluid.dygraph.Layer): +class ConvBNLayer(fluid.Layer): def __init__(self, name_scope, num_channels, @@ -99,7 +99,7 @@ class ConvBNLayer(fluid.dygraph.Layer): return y -class BottleneckBlock(fluid.dygraph.Layer): +class BottleneckBlock(fluid.Layer): def __init__(self, name_scope, num_channels, @@ -156,7 +156,7 @@ class BottleneckBlock(fluid.dygraph.Layer): return layer_helper.append_activation(y) -class ResNet(fluid.dygraph.Layer): +class ResNet(fluid.Layer): def __init__(self, name_scope, layers=50, class_dim=102): super(ResNet, self).__init__(name_scope) @@ -247,7 +247,7 @@ class TestDygraphResnet(unittest.TestCase): dy_param_init_value = {} for param in resnet.parameters(): - dy_param_init_value[param.name] = param._numpy() + dy_param_init_value[param.name] = param.numpy() for batch_id, data in enumerate(train_reader()): if batch_id >= batch_num: @@ -260,20 +260,20 @@ class TestDygraphResnet(unittest.TestCase): img = to_variable(dy_x_data) label = to_variable(y_data) - label._stop_gradient = True + label.stop_gradient = True out = resnet(img) loss = fluid.layers.cross_entropy(input=out, label=label) avg_loss = fluid.layers.mean(x=loss) - dy_out = avg_loss._numpy() + dy_out = avg_loss.numpy() if batch_id == 0: for param in resnet.parameters(): if param.name not in dy_param_init_value: - dy_param_init_value[param.name] = param._numpy() + dy_param_init_value[param.name] = param.numpy() - avg_loss._backward() + avg_loss.backward() dy_grad_value = {} for param in resnet.parameters(): @@ -288,7 +288,7 @@ class TestDygraphResnet(unittest.TestCase): dy_param_value = {} for param in resnet.parameters(): - dy_param_value[param.name] = param._numpy() + dy_param_value[param.name] = param.numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index 91a9c9d37668096c305ac0817c3be1c6bcb81746..3f3f92cde57c80fa4ba3d2f1389cc47efd74ca5b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -333,7 +333,7 @@ class TestImperativeResneXt(unittest.TestCase): dy_param_init_value = {} for param in se_resnext.parameters(): - dy_param_init_value[param.name] = param._numpy() + dy_param_init_value[param.name] = param.numpy() for epoch_id in range(epoch_num): for batch_id, data in enumerate(train_reader()): @@ -349,19 +349,19 @@ class TestImperativeResneXt(unittest.TestCase): img = to_variable(dy_x_data) label = to_variable(y_data) - label._stop_gradient = True + label.stop_gradient = True out = se_resnext(img) loss = fluid.layers.cross_entropy(input=out, label=label) avg_loss = fluid.layers.mean(x=loss) - dy_out = avg_loss._numpy() + dy_out = avg_loss.numpy() if batch_id == 0: for param in se_resnext.parameters(): if param.name not in dy_param_init_value: - dy_param_init_value[param.name] = param._numpy() - avg_loss._backward() + dy_param_init_value[param.name] = param.numpy() + avg_loss.backward() #dy_grad_value = {} #for param in se_resnext.parameters(): @@ -375,7 +375,7 @@ class TestImperativeResneXt(unittest.TestCase): dy_param_value = {} for param in se_resnext.parameters(): - dy_param_value[param.name] = param._numpy() + dy_param_value[param.name] = param.numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py index d871d2dd8501d76c7706febb0722c512db2f3ada..b24bab210a15528f308804c71732bd71eb6105a4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py @@ -16,7 +16,8 @@ from __future__ import print_function import unittest import paddle.fluid as fluid -from paddle.fluid.dygraph import Embedding, LayerNorm, FC, to_variable, Layer, guard +from paddle.fluid import Embedding, LayerNorm, FC, Layer +from paddle.fluid.dygraph import to_variable, guard from test_imperative_base import new_program_scope from paddle.fluid import core import numpy as np @@ -116,7 +117,7 @@ class ModelHyperParams(object): # to process after each sub-layer postprocess_cmd = "da" # dropout + residual connection # random seed used in dropout for CE. - dropout_seed = 1 + dropout_seed = None # the flag indicating whether to share embedding and softmax weights. # vocabularies in source and target should be same for weight sharing. weight_sharing = True @@ -166,15 +167,21 @@ def create_data(is_static=False): ] else: enc_inputs = [ - to_variable(src_word_np), to_variable(src_pos_np), - to_variable(src_slf_attn_bias_np) + to_variable( + src_word_np, name='src_word'), to_variable( + src_pos_np, name='src_pos'), to_variable( + src_slf_attn_bias_np, name='src_slf_attn_bias') ] dec_inputs = [ - to_variable(trg_word_np), to_variable(trg_pos_np), - to_variable(trg_slf_attn_bias_np), to_variable(trg_src_attn_bias_np) + to_variable( + trg_word_np, name='trg_word'), to_variable( + trg_pos_np, name='trg_pos'), to_variable( + trg_slf_attn_bias_np, name='trg_slf_attn_bias'), + to_variable( + trg_src_attn_bias_np, name='trg_src_attn_bias') ] - label = to_variable(lbl_word_np) - weight = to_variable(lbl_weight_np) + label = to_variable(lbl_word_np, name='lbl_word') + weight = to_variable(lbl_weight_np, name='lbl_weight') return enc_inputs, dec_inputs, label, weight @@ -211,7 +218,7 @@ def make_all_inputs(input_fields): # The placeholder for batch_size in compile time. Must be -1 currently to be # consistent with some ops' infer-shape output in compile time, such as the # sequence_expand op used in beamsearch decoder. -batch_size = 32 +batch_size = -1 # The placeholder for squence length in compile time. seq_len = ModelHyperParams.max_length # Here list the data shapes and data types of all inputs. @@ -305,54 +312,40 @@ sync = False # how many batches we use batch_num = 5 -np.random.seed = 1 +np.random.seed = 90 src_word_np = np.random.randint( 1, ModelHyperParams.src_vocab_size - 1, - size=(batch_size, seq_len, 1), + size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') src_pos_np = np.random.randint( - 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -src_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, - seq_len, seq_len).astype('float32') + 1, seq_len, size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') +src_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, + ModelHyperParams.n_head, seq_len, + seq_len).astype('float32') trg_word_np = np.random.randint( 1, ModelHyperParams.src_vocab_size - 1, - size=(batch_size, seq_len, 1), + size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') trg_pos_np = np.random.randint( - 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -trg_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, - seq_len, seq_len).astype('float32') -trg_src_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, - seq_len, seq_len).astype('float32') + 1, seq_len, size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') +trg_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, + ModelHyperParams.n_head, seq_len, + seq_len).astype('float32') +trg_src_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, + ModelHyperParams.n_head, seq_len, + seq_len).astype('float32') lbl_word_np = np.random.randint( 1, ModelHyperParams.src_vocab_size - 1, - size=(batch_size * seq_len, 1), + size=(TrainTaskConfig.batch_size * seq_len, 1), dtype='int64') -lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') - -# np.random.seed = 1 -# src_word_np = np.arange(0, 10).reshape([batch_size, seq_len, 1]).astype('int64') -# src_pos_np = np.random.randint( -# 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -# src_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, -# seq_len, seq_len).astype('float32') -# -# trg_word_np = np.arange(0, 10).reshape([batch_size, seq_len, 1]).astype('int64') -# trg_pos_np = np.random.randint( -# 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -# trg_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, -# seq_len, seq_len).astype('float32') -# trg_src_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, -# seq_len, seq_len).astype('float32') -# -# lbl_word_np = np.arange(0, 10).reshape([batch_size * seq_len, 1]).astype('int64') -# lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') -# +lbl_weight_np = np.random.randn(TrainTaskConfig.batch_size * seq_len, + 1).astype('float32') + pos_inp1 = position_encoding_init(ModelHyperParams.max_length, ModelHyperParams.d_model) pos_inp2 = position_encoding_init(ModelHyperParams.max_length, @@ -466,7 +459,7 @@ class MultiHeadAttentionLayer(Layer): x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False) transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) - #scale dot product attention + # scale dot product attention product = fluid.layers.matmul( x=transpose_q, y=transpose_k, @@ -739,7 +732,7 @@ class DecoderSubLayer(Layer): enc_attn_output_pp = self._multihead_attention_layer2( pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias) enc_attn_output = self._post_process_layer2( - slf_attn_output, enc_attn_output_pp, self._postprocess_cmd, + slf_attn_output_pp, enc_attn_output_pp, self._postprocess_cmd, self._prepostprcess_dropout) pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output, self._preprocess_cmd, @@ -990,16 +983,18 @@ class TestDygraphTransformer(unittest.TestCase): enc_inputs, dec_inputs, label, weights = create_data() dy_sum_cost, dy_avg_cost, dy_predict, dy_token_num = transformer( enc_inputs, dec_inputs, label, weights) + if i == 0: for param in transformer.parameters(): - dy_param_init[param.name] = param._numpy() + dy_param_init[param.name] = param.numpy() - dy_avg_cost._backward() + dy_avg_cost.backward() optimizer.minimize(dy_avg_cost) transformer.clear_gradients() + if i == batch_num - 1: for param in transformer.parameters(): - dy_param_updated[param.name] = param._numpy() + dy_param_updated[param.name] = param.numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed @@ -1043,7 +1038,6 @@ class TestDygraphTransformer(unittest.TestCase): static_param_name_list = list() static_sum_cost, static_avg_cost, static_predict, static_token_num = transformer( enc_inputs, dec_inputs, label, weights) - optimizer.minimize(static_avg_cost) for param in transformer.parameters(): static_param_name_list.append(param.name) @@ -1061,8 +1055,8 @@ class TestDygraphTransformer(unittest.TestCase): static_sum_cost, static_avg_cost, static_predict, static_token_num ] - fetch_list.extend(static_param_name_list) + fetch_list.extend(static_param_name_list) out = exe.run(fluid.default_main_program(), feed=feed_dict, fetch_list=fetch_list) @@ -1076,13 +1070,14 @@ class TestDygraphTransformer(unittest.TestCase): 4]] = out[k] self.assertTrue( - np.array_equal(static_avg_cost_value, dy_avg_cost._numpy())) + np.array_equal(static_avg_cost_value, dy_avg_cost.numpy())) self.assertTrue( - np.array_equal(static_sum_cost_value, dy_sum_cost._numpy())) + np.array_equal(static_sum_cost_value, dy_sum_cost.numpy())) self.assertTrue( - np.array_equal(static_predict_value, dy_predict._numpy())) + np.array_equal(static_predict_value, dy_predict.numpy())) self.assertTrue( - np.array_equal(static_token_num_value, dy_token_num._numpy())) + np.array_equal(static_token_num_value, dy_token_num.numpy())) + for key, value in six.iteritems(static_param_init): self.assertTrue(np.array_equal(value, dy_param_init[key])) for key, value in six.iteritems(static_param_updated): diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 98b39256aad8435a1b54fe11fbb8d4677f18e99c..38d0533a7ec820241c6a08f2180a7426984068f2 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -114,7 +114,7 @@ class TestLayer(LayerTest): dy_ret = fc2(ret) self.assertTrue(np.array_equal(static_ret, static_ret2)) - self.assertTrue(np.array_equal(static_ret, dy_ret._numpy())) + self.assertTrue(np.array_equal(static_ret, dy_ret.numpy())) def test_layer_norm(self): inp = np.ones([3, 32, 32], dtype='float32') @@ -142,7 +142,7 @@ class TestLayer(LayerTest): dy_ret = lm(base.to_variable(inp)) self.assertTrue(np.allclose(static_ret, static_ret2)) - self.assertTrue(np.allclose(dy_ret._numpy(), static_ret2)) + self.assertTrue(np.allclose(dy_ret.numpy(), static_ret2)) def test_relu(self): with self.static_graph(): @@ -156,7 +156,7 @@ class TestLayer(LayerTest): t = np.ones([3, 3], dtype='float32') dy_ret = layers.relu(base.to_variable(t)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) def test_matmul(self): with self.static_graph(): @@ -177,7 +177,7 @@ class TestLayer(LayerTest): t2 = np.ones([3, 3], dtype='float32') dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) def test_conv2d(self): with self.static_graph(): @@ -204,7 +204,7 @@ class TestLayer(LayerTest): 'conv2d', num_channels=3, num_filters=3, filter_size=[2, 2]) dy_ret = conv2d(base.to_variable(images)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, static_ret2)) def test_gru_unit(self): @@ -246,7 +246,7 @@ class TestLayer(LayerTest): for i in range(len(static_ret)): self.assertTrue(np.allclose(static_ret[i], static_ret2[i])) - self.assertTrue(np.allclose(static_ret[i], dy_ret[i]._numpy())) + self.assertTrue(np.allclose(static_ret[i], dy_ret[i].numpy())) def test_elementwise_math(self): n = np.ones([3, 3], dtype='float32') @@ -288,8 +288,8 @@ class TestLayer(LayerTest): ret = layers.elementwise_sub(ret, n5) dy_ret = layers.elementwise_mul(ret, n6) self.assertTrue( - np.allclose(static_ret, dy_ret._numpy()), - '%s vs %s' % (static_ret, dy_ret._numpy())) + np.allclose(static_ret, dy_ret.numpy()), + '%s vs %s' % (static_ret, dy_ret.numpy())) def test_elementwise_minmax(self): n = np.ones([3, 3], dtype='float32') @@ -299,8 +299,8 @@ class TestLayer(LayerTest): min_ret = layers.elementwise_min(n, n2) max_ret = layers.elementwise_max(n, n2) - self.assertTrue(np.allclose(n, min_ret._numpy())) - self.assertTrue(np.allclose(n2, max_ret._numpy())) + self.assertTrue(np.allclose(n, min_ret.numpy())) + self.assertTrue(np.allclose(n2, max_ret.numpy())) def test_sequence_conv(self): inp_np = np.arange(12).reshape([3, 4]).astype('float32') @@ -367,7 +367,7 @@ class TestLayer(LayerTest): 'conv2d_transpose', num_filters=10, output_size=28) dy_rlt = conv2d_transpose(base.to_variable(inp_np)) self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt)) def test_bilinear_tensor_product(self): inp_np_x = np.array([[1, 2, 3]]).astype('float32') @@ -410,7 +410,7 @@ class TestLayer(LayerTest): dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt)) def test_prelu(self): inp_np = np.ones([5, 200, 100, 100]).astype('float32') @@ -451,7 +451,7 @@ class TestLayer(LayerTest): dy_rlt = prelu(base.to_variable(inp_np)) self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt)) def test_embeding(self): inp_word = np.array([[[1]]]).astype('int64') @@ -484,7 +484,7 @@ class TestLayer(LayerTest): static_rlt3 = emb2(base.to_variable(inp_word)) self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(static_rlt3._numpy(), static_rlt)) + self.assertTrue(np.allclose(static_rlt3.numpy(), static_rlt)) def test_nce(self): window_size = 5 @@ -598,7 +598,7 @@ class TestLayer(LayerTest): nce_loss3 = nce(embs3, words[label_word]) self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(nce_loss3._numpy(), static_rlt)) + self.assertTrue(np.allclose(nce_loss3.numpy(), static_rlt)) def test_conv3d(self): with self.static_graph(): @@ -625,7 +625,7 @@ class TestLayer(LayerTest): conv3d = nn.Conv3D('conv3d', num_filters=3, filter_size=2) dy_ret = conv3d(base.to_variable(images)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, static_ret2)) def test_row_conv(self): @@ -719,7 +719,7 @@ class TestLayer(LayerTest): groupNorm = nn.GroupNorm('GroupNorm', groups=2) dy_ret = groupNorm(base.to_variable(input)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, static_ret2)) def test_spectral_norm(self): @@ -769,7 +769,7 @@ class TestLayer(LayerTest): spectralNorm = nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2) dy_ret = spectralNorm(base.to_variable(input)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, static_ret2)) def test_tree_conv(self): @@ -842,7 +842,7 @@ class TestLayer(LayerTest): dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj)) self.assertTrue(np.allclose(static_ret, static_ret2)) - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) def test_conv3d_transpose(self): input_array = np.arange(0, 48).reshape( @@ -872,7 +872,7 @@ class TestLayer(LayerTest): use_cudnn=False) dy_rlt = conv3d_transpose(base.to_variable(input_array)) self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt)) class TestBook(LayerTest): @@ -907,7 +907,7 @@ class TestBook(LayerTest): if isinstance(dy_result, tuple): dy_result = dy_result[0] - self.assertTrue(np.array_equal(static_result[0], dy_result._numpy())) + self.assertTrue(np.array_equal(static_result[0], dy_result.numpy())) def _get_np_data(self, shape, dtype, append_batch_size=True): np.random.seed(self.seed) diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index 5bb2260ef7a143670dd75fc88769603d1437173d..eb82af75e4a2bf834c010aede79d50b0d73c98bc 100644 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -73,7 +73,14 @@ class TestNearestInterpOp(OpTest): self.op_type = "nearest_interp" input_np = np.random.random(self.input_shape).astype("float32") - output_np = nearest_neighbor_interp_np(input_np, self.out_h, self.out_w, + if self.scale > 0: + out_h = int(self.input_shape[2] * self.scale) + out_w = int(self.input_shape[3] * self.scale) + else: + out_h = self.out_h + out_w = self.out_w + + output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, self.out_size, self.actual_shape, self.align_corners) self.inputs = {'X': input_np} @@ -84,6 +91,7 @@ class TestNearestInterpOp(OpTest): self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, + 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, } @@ -100,6 +108,7 @@ class TestNearestInterpOp(OpTest): self.input_shape = [2, 3, 4, 4] self.out_h = 2 self.out_w = 2 + self.scale = 0. self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True @@ -110,6 +119,7 @@ class TestNearestNeighborInterpCase1(TestNearestInterpOp): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 + self.scale = 0. self.align_corners = True @@ -119,6 +129,7 @@ class TestNearestNeighborInterpCase2(TestNearestInterpOp): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 + self.scale = 0. self.align_corners = True @@ -128,6 +139,7 @@ class TestNearestNeighborInterpCase3(TestNearestInterpOp): self.input_shape = [1, 1, 128, 64] self.out_h = 64 self.out_w = 128 + self.scale = 0. self.align_corners = True @@ -137,6 +149,7 @@ class TestNearestNeighborInterpCase4(TestNearestInterpOp): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 + self.scale = 0. self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True @@ -147,6 +160,7 @@ class TestNearestNeighborInterpCase5(TestNearestInterpOp): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 + self.scale = 0. self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True @@ -157,6 +171,7 @@ class TestNearestNeighborInterpCase6(TestNearestInterpOp): self.input_shape = [1, 1, 128, 64] self.out_h = 64 self.out_w = 128 + self.scale = 0. self.out_size = np.array([65, 129]).astype("int32") self.align_corners = True @@ -167,6 +182,7 @@ class TestNearestNeighborInterpActualShape(TestNearestInterpOp): self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 + self.scale = 0. self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True @@ -179,7 +195,15 @@ class TestNearestInterpOpUint8(OpTest): self.op_type = "nearest_interp" input_np = np.random.randint( low=0, high=256, size=self.input_shape).astype("uint8") - output_np = nearest_neighbor_interp_np(input_np, self.out_h, self.out_w, + + if self.scale > 0: + out_h = int(self.input_shape[2] * self.scale) + out_w = int(self.input_shape[3] * self.scale) + else: + out_h = self.out_h + out_w = self.out_w + + output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, self.out_size, self.actual_shape, self.align_corners) self.inputs = {'X': input_np} @@ -188,6 +212,7 @@ class TestNearestInterpOpUint8(OpTest): self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, + 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners } @@ -201,6 +226,7 @@ class TestNearestInterpOpUint8(OpTest): self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 + self.scale = 0. self.align_corners = True @@ -210,6 +236,7 @@ class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): self.input_shape = [2, 3, 128, 64] self.out_h = 120 self.out_w = 50 + self.scale = 0. self.align_corners = True @@ -219,6 +246,7 @@ class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): self.input_shape = [4, 1, 7, 8] self.out_h = 5 self.out_w = 13 + self.scale = 0. self.out_size = np.array([6, 15]).astype("int32") self.align_corners = True @@ -228,5 +256,38 @@ class TestNearestInterpWithoutCorners(TestNearestInterpOp): self.align_corners = False +class TestNearestNeighborInterpScale1(TestNearestInterpOp): + def init_test_case(self): + self.interp_method = 'nearest' + self.input_shape = [3, 2, 32, 16] + self.out_h = 64 + self.out_w = 32 + self.scale = 2. + self.out_size = np.array([66, 40]).astype("int32") + self.align_corners = True + + +class TestNearestNeighborInterpScale2(TestNearestInterpOp): + def init_test_case(self): + self.interp_method = 'nearest' + self.input_shape = [3, 2, 32, 16] + self.out_h = 64 + self.out_w = 32 + self.scale = 1.5 + self.out_size = np.array([66, 40]).astype("int32") + self.align_corners = True + + +class TestNearestNeighborInterpScale3(TestNearestInterpOp): + def init_test_case(self): + self.interp_method = 'nearest' + self.input_shape = [3, 2, 32, 16] + self.out_h = 64 + self.out_w = 32 + self.scale = 1. + self.out_size = np.array([66, 40]).astype("int32") + self.align_corners = True + + if __name__ == "__main__": unittest.main()