diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 3aa2c7b9ea013dd977ef0051700df54e26a81307..a145b2fafe64f8c80ac7808583d6670ca0218c06 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -42,12 +42,10 @@ if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base) pass_library(conv_bias_mkldnn_fuse_pass inference) pass_library(conv_relu_mkldnn_fuse_pass inference) + pass_library(conv_elementwise_add_mkldnn_fuse_pass inference) endif() cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector ) -if(WITH_MKLDNN) - pass_library(conv_elementwise_add_mkldnn_fuse_pass inference) -endif() set(GLOB_PASS_LIB ${PASS_LIBRARY} CACHE INTERNAL "Global PASS library") diff --git a/paddle/fluid/framework/ir/graph_helper_test.cc b/paddle/fluid/framework/ir/graph_helper_test.cc index cea902809339f9d45b0e2525163f08a3c1c44c95..260a73ae763bd2cdea9948e4d928377a7c718dda 100644 --- a/paddle/fluid/framework/ir/graph_helper_test.cc +++ b/paddle/fluid/framework/ir/graph_helper_test.cc @@ -200,15 +200,15 @@ TEST(GraphHelperTest, GraphNum) { Graph g(prog); BuildZeroGraph(&g); - ASSERT_EQ(GraphNum(g), 0); + ASSERT_EQ(GraphNum(g), 0UL); Graph g2(prog); BuildOneGraph(&g2); - ASSERT_EQ(GraphNum(g2), 1); + ASSERT_EQ(GraphNum(g2), 1UL); Graph g3(prog); BuildTwoGraphs(&g3); - ASSERT_EQ(GraphNum(g3), 2); + ASSERT_EQ(GraphNum(g3), 2UL); } } // namespace ir diff --git a/paddle/fluid/framework/ir/graph_test.cc b/paddle/fluid/framework/ir/graph_test.cc index cadda49c399a6d65079cacedfea61f4fd580a69a..7ed2f96eb24239d87965192d73f4ba200ff5dbeb 100644 --- a/paddle/fluid/framework/ir/graph_test.cc +++ b/paddle/fluid/framework/ir/graph_test.cc @@ -124,7 +124,7 @@ TEST(GraphTest, Basic) { ASSERT_EQ(n->outputs.size(), 0UL); } } - ASSERT_EQ(nodes.size(), 5); + ASSERT_EQ(nodes.size(), 5UL); } TEST(GraphTest, WriteAfterRead) { diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 121e00b1a3d761f570138092a3e76ae2b722d28a..c293cf92b4f3d530109c76850df184af9cad7399 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -515,20 +515,14 @@ void OpDesc::InferShape(const BlockDesc &block) const { } void OpDesc::InferVarType(BlockDesc *block) const { + // There are a few places that var type can be set. + // When VarDesc is created, default set to LOD_TENSOR. + // When output variable is created, default is defaut set to LOD_TENSOR. + // We limit here to be the only place that operator defines its customized + // var type inference. Hence, we don't do any "default" setting here. auto &info = OpInfoMap::Instance().Get(this->Type()); if (info.infer_var_type_) { info.infer_var_type_(*this, block); - } else { - // all output type is LoDTensor by default - VLOG(10) << this->Type() - << " has not registered InferVarType. Set output variables to " - "LOD_TENSOR"; - for (auto &out_pair : this->outputs_) { - for (auto &out_var_name : out_pair.second) { - block->FindRecursiveOrCreateVar(out_var_name) - .SetType(proto::VarType::LOD_TENSOR); - } - } } } diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index 7e689a37da8a16bd9b1ac6650b9322d2eb5a2c85..48bde2785e6a51afc0d2905ac31fe20a3c3019b6 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -103,7 +103,7 @@ TEST(ProgramDesc, copy_ctor) { ASSERT_EQ(1, op->GetBlockAttrId("sub_block")); found_sub_block = true; - ASSERT_EQ(2, op->GetBlocksAttrIds("sub_blocks").size()); + ASSERT_EQ(2UL, op->GetBlocksAttrIds("sub_blocks").size()); found_sub_blocks = true; } } diff --git a/paddle/fluid/framework/reader_test.cc b/paddle/fluid/framework/reader_test.cc index 50aca4b5a4ba7a93a1584a03cc16fe5d712a32b5..d812417a38200bcfdbdeac78800190647510a144 100644 --- a/paddle/fluid/framework/reader_test.cc +++ b/paddle/fluid/framework/reader_test.cc @@ -40,7 +40,7 @@ TEST(READER, decorate_chain) { auto endpoints = root->GetEndPoints(); ASSERT_EQ(endpoints.size(), 2U); ASSERT_NE(endpoints.count(end_point1.get()), 0UL); - ASSERT_NE(endpoints.count(end_point2.get()), 0); + ASSERT_NE(endpoints.count(end_point2.get()), 0UL); } { diff --git a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc index 67668298440e9af279e792f786a8123b71172a66..c2151eea0823f80feb17b014c1f739d2a15ae862 100644 --- a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc @@ -71,7 +71,7 @@ void profile(bool use_mkldnn = false) { } TEST(Analyzer_resnet50, profile) { profile(); } -#ifndef PADDLE_WITH_MKLDNN +#ifdef PADDLE_WITH_MKLDNN TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); } #endif diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index b1ee1080030b23e1ef7adefe3a0880f38e9099f5..5589b58b0618523b22f169f9ba9930c3ff3e3c48 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -50,7 +50,7 @@ void CompareResult(const std::vector &outputs, auto &ref_out = ref_outputs[i]; size_t size = VecReduceToInt(out.shape); size_t ref_size = VecReduceToInt(ref_out.shape); - EXPECT_GT(size, 0); + EXPECT_GT(size, 0UL); EXPECT_EQ(size, ref_size); EXPECT_EQ(out.dtype, ref_out.dtype); switch (out.dtype) { diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 6c95f4b9c5b87dad13959d3d7678a19b79dd96d2..78ef6f207eadea6799864fe22889103b468d1780 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -284,10 +284,10 @@ op_library(max_sequence_len_op DEPS lod_rank_table) op_library(sequence_conv_op DEPS context_project) op_library(sequence_pool_op DEPS sequence_pooling) if (NOT WIN32) -op_library(lstm_op DEPS sequence2batch lstm_compute) -op_library(hierarchical_sigmoid_op DEPS matrix_bit_code) -op_library(lstmp_op DEPS sequence2batch lstm_compute) -op_library(gru_op DEPS sequence2batch gru_compute) + op_library(lstm_op DEPS sequence2batch lstm_compute) + op_library(hierarchical_sigmoid_op DEPS matrix_bit_code) + op_library(lstmp_op DEPS sequence2batch lstm_compute) + op_library(gru_op DEPS sequence2batch gru_compute) endif(NOT WIN32) op_library(recurrent_op DEPS executor) op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) @@ -316,7 +316,7 @@ op_library(save_op DEPS lod_tensor) op_library(load_op DEPS lod_tensor) op_library(save_combine_op DEPS lod_tensor) op_library(load_combine_op DEPS lod_tensor) -op_library(concat_op DEPS concat) +op_library(concat_op DEPS concat_and_split) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) @@ -348,6 +348,6 @@ cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory) cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) if(NOT WIN32) -nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) + nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) endif() nv_test(dropout_op_test SRCS dropout_op_test.cc DEPS dropout_op tensor) diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index b8b8b2290a0f002fd379032e28590b84a1da38e9..6257e04b010d8c580e69e466759e8e80d344c105 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include "paddle/fluid/framework/lod_rank_table.h" diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index b2c6495c442cd02679825425becc2160c303dcc6..bd474be0facb349c53a8766412311296383a86c5 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math/concat.h" +#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { @@ -89,29 +89,17 @@ class ConcatGradKernel : public framework::OpKernel { outputs.push_back(nullptr); } } + auto& dev_ctx = ctx.template device_context(); // Sometimes direct copies will be faster, this maybe need deeply analysis. if (axis == 0 && outs.size() < 10) { - size_t input_offset = 0; - const auto in_stride = framework::stride_numel(out_grad->dims()); - - for (size_t i = 0; i < outs.size(); ++i) { - auto out_stride = framework::stride_numel(ins[i]->dims()); - auto* out = outputs[i]; - if (out != nullptr) { - StridedNumelCopyWithAxis( - ctx.device_context(), axis, out->data(), out_stride, - out_grad->data() + input_offset, in_stride, out_stride[axis]); - } - input_offset += out_stride[axis]; - } + std::vector ref_shape; + ref_shape.insert(ref_shape.begin(), ins.begin(), ins.end()); + StridedMemcpyWithAxis0(dev_ctx, *out_grad, ref_shape, &outputs); } else { - auto& dev_ctx = ctx.template device_context(); - paddle::operators::math::ConcatGradFunctor - concat_grad_functor; - concat_grad_functor(dev_ctx, *out_grad, - ctx.MultiInput("X"), - static_cast(axis), &outputs); + math::SplitFunctor split_functor; + split_functor(dev_ctx, *out_grad, ctx.MultiInput("X"), + static_cast(axis), &outputs); } } }; diff --git a/paddle/fluid/operators/detection/generate_proposal_labels_op.cc b/paddle/fluid/operators/detection/generate_proposal_labels_op.cc index d7a53f1bef98ecda3ba7b36323678a11a632a15c..339e63a2be13cec7b641b3a9eeb083480fc4b86e 100644 --- a/paddle/fluid/operators/detection/generate_proposal_labels_op.cc +++ b/paddle/fluid/operators/detection/generate_proposal_labels_op.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/gather.h" -#include "paddle/fluid/operators/math/concat.h" +#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index 8eab83fcd247fcd099ae1fa5dab1e67c2081bf9c..e72337a3e6f7884c3a05372e8732647e5910f3e4 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/operators/math/concat.h" +#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/port.h" @@ -79,7 +79,7 @@ struct LoDTensorToArrayFunctor : public boost::static_visitor { template template void LoDTensorToArrayFunctorImpl::apply() { - math::ConcatGradFunctor func; + math::SplitFunctor func; func(*dev_ctx_, prev_functor_->input_, prev_functor_->ref_inputs_, 0, &prev_functor_->outputs_); } diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index c7bdec354735773a15b4c99baf9f7798f2d92564..5d0c0b4228d8e2890c8b8d8bd10e0df080251350 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -1,5 +1,5 @@ if (NOT WIN32) -add_subdirectory(detail) + add_subdirectory(detail) endif(NOT WIN32) function(math_library TARGET) @@ -35,7 +35,7 @@ function(math_library TARGET) endfunction() # please add new math_library in alphabetical order -math_library(concat) +math_library(concat_and_split) math_library(context_project DEPS im2col math_function) math_library(cross_entropy) math_library(cos_sim_functor) @@ -43,8 +43,8 @@ math_library(depthwise_conv) math_library(im2col) if (NOT WIN32) # windows do not support avx functions yet. -math_library(gru_compute DEPS activation_functions math_function) -math_library(lstm_compute DEPS activation_functions) + math_library(gru_compute DEPS activation_functions math_function) + math_library(lstm_compute DEPS activation_functions) endif (NOT WIN32) cc_library(blas SRCS blas.cc DEPS cblas framework_proto device_context) @@ -58,7 +58,7 @@ math_library(sequence_pooling DEPS math_function) math_library(sequence_scale) math_library(softmax DEPS math_function) if (NOT WIN32) -math_library(matrix_bit_code) + math_library(matrix_bit_code) endif (NOT WIN32) math_library(unpooling) math_library(vol2col) @@ -72,7 +72,7 @@ if(WITH_GPU) nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function) nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor math_function) endif() -cc_test(concat_test SRCS concat_test.cc DEPS concat) +cc_test(concat_test SRCS concat_test.cc DEPS concat_and_split) cc_test(cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info) cc_library(jit_kernel SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_lstm.cc diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat_and_split.cc similarity index 95% rename from paddle/fluid/operators/math/concat.cc rename to paddle/fluid/operators/math/concat_and_split.cc index 7b79f10e33d4474e279c6e46208722d6b52277fc..c6e17fd042f19bbeee3507e4cd64f49cff369682 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat_and_split.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/math/concat.h" +#include "paddle/fluid/operators/math/concat_and_split.h" #include namespace paddle { @@ -67,7 +67,7 @@ class ConcatFunctor { * each dimension must be the same, except the axis dimension. */ template -class ConcatGradFunctor { +class SplitFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, @@ -111,7 +111,7 @@ class ConcatGradFunctor { }; #define DEFINE_FUNCTOR(type) \ template class ConcatFunctor; \ - template class ConcatGradFunctor; + template class SplitFunctor; FOR_ALL_TYPES(DEFINE_FUNCTOR); diff --git a/paddle/fluid/operators/math/concat.cu b/paddle/fluid/operators/math/concat_and_split.cu similarity index 90% rename from paddle/fluid/operators/math/concat.cu rename to paddle/fluid/operators/math/concat_and_split.cu index b59d86e661aff25eba8e770247e85845365d628b..760a065c1081d1e55901774b258ba524471b856b 100644 --- a/paddle/fluid/operators/math/concat.cu +++ b/paddle/fluid/operators/math/concat_and_split.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/mixed_vector.h" -#include "paddle/fluid/operators/math/concat.h" +#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" @@ -24,7 +24,7 @@ namespace operators { namespace math { template -__global__ void KernelConcat(T** inputs, const int* input_cols, int col_size, +__global__ void ConcatKernel(T** inputs, const int* input_cols, int col_size, const int output_rows, const int output_cols, T* output) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; @@ -50,7 +50,7 @@ __global__ void KernelConcat(T** inputs, const int* input_cols, int col_size, } template -__global__ void KernelConcat(T** inputs_data, const int fixed_in_col, +__global__ void ConcatKernel(T** inputs_data, const int fixed_in_col, const int out_rows, const int out_cols, T* output_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; @@ -67,9 +67,9 @@ __global__ void KernelConcat(T** inputs_data, const int fixed_in_col, } template -__global__ void KernelConcatGrad(const T* input_data, const int in_row, - const int in_col, const int* out_cols, - int out_cols_size, T** outputs_data) { +__global__ void SplitKernel(const T* input_data, const int in_row, + const int in_col, const int* out_cols, + int out_cols_size, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = out_cols[0]; @@ -94,9 +94,9 @@ __global__ void KernelConcatGrad(const T* input_data, const int in_row, } template -__global__ void KernelConcatGrad(const T* input_data, const int in_row, - const int in_col, const int fixed_out_col, - T** outputs_data) { +__global__ void SplitKernel(const T* input_data, const int in_row, + const int in_col, const int fixed_out_col, + T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int split = tid_x / fixed_out_col; @@ -170,11 +170,11 @@ class ConcatFunctor { dim3 grid_size = dim3(grid_cols, grid_rows, 1); if (sameShape) { - KernelConcat<<>>( + ConcatKernel<<>>( dev_ins_data, in_col, out_row, out_col, output->data()); } else { const int* dev_ins_col_data = inputs_col.CUDAData(context.GetPlace()); - KernelConcat<<>>( + ConcatKernel<<>>( dev_ins_data, dev_ins_col_data, static_cast(inputs_col.size()), out_row, out_col, output->data()); } @@ -189,7 +189,7 @@ class ConcatFunctor { * each dimension must be the same, except the axis dimension. */ template -class ConcatGradFunctor { +class SplitFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, @@ -248,11 +248,11 @@ class ConcatGradFunctor { dim3 grid_size = dim3(grid_cols, grid_rows, 1); if (sameShape) { - KernelConcatGrad<<>>( + SplitKernel<<>>( input.data(), in_row, in_col, out0_col, dev_out_gpu_data); } else { const int* dev_outs_col_data = outputs_cols.CUDAData(context.GetPlace()); - KernelConcatGrad<<>>( + SplitKernel<<>>( input.data(), in_row, in_col, dev_outs_col_data, static_cast(outputs_cols.size()), dev_out_gpu_data); } @@ -264,7 +264,7 @@ class ConcatGradFunctor { #define DEFINE_FUNCTOR(type) \ template class ConcatFunctor; \ - template class ConcatGradFunctor + template class SplitFunctor FOR_ALL_TYPES(DEFINE_FUNCTOR); diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat_and_split.h similarity index 98% rename from paddle/fluid/operators/math/concat.h rename to paddle/fluid/operators/math/concat_and_split.h index 867a84fa873a2e90bdab7a5eecbb1755cb4b02d1..3a5eddcbf4af699a89ae1a21571337155699a1f3 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat_and_split.h @@ -54,7 +54,7 @@ class ConcatFunctor { * Output[1] = [[5,6]] */ template -class ConcatGradFunctor { +class SplitFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, const std::vector& ref_inputs, diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index a46f2d51ca64501a622b5b48b424dffa16efc5b4..8ba9e8e8ec1344edc3beaf7f4a58f99107cc0e9c 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/math/concat.h" #include #include #include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/operators/math/concat_and_split.h" template void testConcat() { diff --git a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc index 8cd505806056f1af33712e2c92b7661d87485708..dc0940ac0b78d295b5088cb6ae26300da1dc883d 100644 --- a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc +++ b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc @@ -237,7 +237,7 @@ TEST(BlockingQueue, speed_test_mode) { } for (size_t i = 0; i < queue_size; ++i) { q2.Receive(&b); - EXPECT_EQ(b, 0); + EXPECT_EQ(b, 0UL); } EXPECT_EQ(q2.Size(), queue_size); } diff --git a/paddle/fluid/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_concat_op.h index 33e9babff274af888b84d33c991cc0a5b70333ae..ff035f421c4907ba940b973b3fd2a9421ed2dbae 100644 --- a/paddle/fluid/operators/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_concat_op.h @@ -17,7 +17,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/operators/math/concat.h" +#include "paddle/fluid/operators/math/concat_and_split.h" namespace paddle { namespace operators { @@ -106,7 +106,7 @@ class SeqConcatGradKernel : public framework::OpKernel { } } - math::ConcatGradFunctor functor; + math::SplitFunctor functor; std::vector sliced_x_ptr; std::vector sliced_dx_ptr; for (auto &x : sliced_x) { diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index d661b276bc31bf0c3ab181d706ffdccec89f0632..a05582ae09e16ee17194d299d713d321f28ccace 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -111,11 +111,10 @@ Example: } // namespace paddle namespace ops = paddle::operators; -USE_CPU_ONLY_OP(concat); REGISTER_OPERATOR(split, ops::SplitOp, ops::SplitOpMaker, ops::SplitGradMaker); -REGISTER_OP_CPU_KERNEL(split, - ops::SplitOpKernel, - ops::SplitOpKernel, - ops::SplitOpKernel, - ops::SplitOpKernel); +REGISTER_OP_CPU_KERNEL( + split, ops::SplitOpKernel, + ops::SplitOpKernel, + ops::SplitOpKernel, + ops::SplitOpKernel); diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index f0c417c70521b1bb3816f884d6ab7393473999e4..6f4a25ab5ed86937f2f5db532a9eba22b5a2c5be 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include // NOLINT #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { @@ -28,18 +29,22 @@ class SplitOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); auto outs = ctx.MultiOutput("Out"); - auto in_stride = framework::stride_numel(in->dims()); - int64_t axis = static_cast(ctx.Attr("axis")); + int axis = ctx.Attr("axis"); auto place = ctx.GetPlace(); - size_t input_offset = 0; - for (auto& out : outs) { - out->mutable_data(ctx.GetPlace()); - auto out_stride = framework::stride_numel(out->dims()); - StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), - out_stride, in->data() + input_offset, - in_stride, out_stride[axis]); - input_offset += out_stride[axis]; + std::vector shape_refer; + for (size_t j = 0; j < outs.size(); ++j) { + outs[j]->mutable_data(ctx.GetPlace()); + shape_refer.emplace_back(outs[j]); + } + + auto& dev_ctx = ctx.template device_context(); + // Sometimes direct copies will be faster, this maybe need deeply analysis. + if (axis == 0 && outs.size() < 10) { + StridedMemcpyWithAxis0(dev_ctx, *in, shape_refer, &outs); + } else { + math::SplitFunctor functor; + functor(dev_ctx, *in, shape_refer, axis, &outs); } } }; diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 7a10218e1556698f3e0a1828db5de8851dd1c90b..c3d83a06f23a34ec8cf27d585863135ebfd56a4f 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -13,8 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/detail/strided_memcpy.h" - namespace paddle { namespace operators { @@ -98,5 +99,26 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, } } +template +inline void StridedMemcpyWithAxis0( + const platform::DeviceContext& dev_ctx, const framework::Tensor& input, + const std::vector& shape_refer, + std::vector* outputs) { + const framework::DDim in_stride = stride_numel(input.dims()); + const int axis = 0; + size_t input_offset = 0; + + for (size_t i = 0; i < outputs->size(); ++i) { + auto out_stride = stride_numel(shape_refer[i]->dims()); + auto out = outputs->at(i); + if (out != nullptr) { + StridedNumelCopyWithAxis(dev_ctx, axis, out->data(), out_stride, + input.data() + input_offset, in_stride, + out_stride[axis]); + } + input_offset += out_stride[axis]; + } +} + } // namespace operators } // namespace paddle diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index bd9727b6ac0208b199091db00bd0fd5fae74d53b..dc317de9abbd06f4021e64b87ea88ba6af8809c9 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -324,10 +324,19 @@ class LayerHelper(object): raise ValueError("no Parameter name %s found" % name) return param - def create_tmp_variable(self, dtype, stop_gradient=False): + def create_variable_for_type_inference(self, dtype, stop_gradient=False): + """Create a temporary variable that should be type inferred layer. + + Note: + The default type will be set to LOD_TENSOR. However, when + the var is used as operator output, its type will be updated + based on operator's `VarTypeInference` implementation in + infer_var_type. + """ return self.main_program.current_block().create_var( name=unique_name.generate(".".join([self.name, 'tmp'])), dtype=dtype, + type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, stop_gradient=stop_gradient) @@ -388,7 +397,7 @@ class LayerHelper(object): b = self.create_parameter( attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True) - tmp = self.create_tmp_variable(dtype=input_var.dtype) + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type='elementwise_add', inputs={'X': [input_var], @@ -414,7 +423,7 @@ class LayerHelper(object): tmp = input_var # NOTE(dzhwinter): some activation support inplace compution. if not core.IsInplace(act_type): - tmp = self.create_tmp_variable(dtype=input_var.dtype) + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 4af97e8632a47fbd981362dc8249a3f6b7269ecd..459be4339b01228ba1b8f18e17e472ed72928511 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -80,8 +80,8 @@ def split_lod_tensor(input, mask, level=0): """ helper = LayerHelper('split_lod_tensor', **locals()) - out_true = helper.create_tmp_variable(dtype=input.dtype) - out_false = helper.create_tmp_variable(dtype=input.dtype) + out_true = helper.create_variable_for_type_inference(dtype=input.dtype) + out_false = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='split_lod_tensor', inputs={ @@ -131,7 +131,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): in_true=out_true, in_false=out_false, mask=y, x=x, level=level) """ helper = LayerHelper('merge_lod_tensor', **locals()) - out = helper.create_tmp_variable(dtype=in_true.dtype) + out = helper.create_variable_for_type_inference(dtype=in_true.dtype) helper.append_op( type='merge_lod_tensor', inputs={'X': x, @@ -524,7 +524,7 @@ class StaticRNN(object): if not isinstance(o, Variable): raise TypeError("step output takes a Variable") - tmp_o = self.helper.create_tmp_variable(dtype=o.dtype) + tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype) self.helper.append_op( type='rnn_memory_helper', inputs={'X': [o]}, @@ -606,7 +606,8 @@ class StaticRNN(object): pre_memories.append(mem.pre_mem.name) mem_var = rnn_block.var(mem.mem.name) assert isinstance(mem_var, Variable) - new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype) + new_mem = self.helper.create_variable_for_type_inference( + dtype=mem_var.dtype) rnn_block.append_op( type='rnn_memory_helper', @@ -813,7 +814,7 @@ def max_sequence_len(rank_table): ${out_comment}. """ helper = LayerHelper("max_seqence_len", **locals()) - res = helper.create_tmp_variable(dtype="int64") + res = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="max_sequence_len", inputs={"RankTable": rank_table}, @@ -884,7 +885,7 @@ def array_to_lod_tensor(x, table): lod_tensor = fluid.layers.array_to_lod_tensor(array, table) """ helper = LayerHelper("array_to_lod_tensor", **locals()) - tmp = helper.create_tmp_variable(dtype=x.dtype) + tmp = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="array_to_lod_tensor", inputs={'X': x, @@ -915,7 +916,7 @@ def increment(x, value=1.0, in_place=True): """ helper = LayerHelper("increment", **locals()) if not in_place: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = x helper.append_op( @@ -1012,7 +1013,7 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored): """ helper = LayerHelper("less_than", **locals()) if cond is None: - cond = helper.create_tmp_variable(dtype='bool') + cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True attrs = dict() @@ -1051,7 +1052,7 @@ def equal(x, y, cond=None, **ignored): """ helper = LayerHelper("equal", **locals()) if cond is None: - cond = helper.create_tmp_variable(dtype='bool') + cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True helper.append_op( @@ -1098,7 +1099,7 @@ def array_read(array, i): array, Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: raise TypeError("array should be tensor array vairable") - out = helper.create_tmp_variable(dtype=array.dtype) + out = helper.create_variable_for_type_inference(dtype=array.dtype) helper.append_op( type='read_from_array', inputs={'X': [array], @@ -1133,7 +1134,7 @@ def shrink_memory(x, i, table): usage. """ helper = LayerHelper('shrink_memory', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='shrink_rnn_memory', inputs={'X': [x], @@ -1170,7 +1171,7 @@ def array_length(array): """ helper = LayerHelper('array_length', **locals()) - tmp = helper.create_tmp_variable(dtype='int64') + tmp = helper.create_variable_for_type_inference(dtype='int64') tmp.stop_gradient = True helper.append_op( type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}) @@ -1590,7 +1591,7 @@ class DynamicRNN(object): self.mem_dict = dict() self.output_array = [] self.outputs = [] - self.cond = self.helper.create_tmp_variable(dtype='bool') + self.cond = self.helper.create_variable_for_type_inference(dtype='bool') self.cond.stop_gradient = False self.while_op = While(self.cond) self.input_array = [] @@ -1924,7 +1925,7 @@ def reorder_lod_tensor_by_rank(x, rank_table): helper.is_instance('x', Variable) helper.is_instance('rank_table', Variable) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='reorder_lod_tensor_by_rank', inputs={'X': [x], @@ -1958,7 +1959,7 @@ def is_empty(x, cond=None, **ignored): """ helper = LayerHelper("is_empty", **locals()) if cond is None: - cond = helper.create_tmp_variable(dtype='bool') + cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True elif not isinstance(cond, Variable): raise TypeError("cond takes a variable") diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 1cfcbbb9c1614f21848e62cce79befc673e1739c..b94b59631a3d8999f569acf1027c71d3019f5c56 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -147,10 +147,11 @@ def rpn_target_assign(bbox_pred, helper = LayerHelper('rpn_target_assign', **locals()) # Assign target label to anchors - loc_index = helper.create_tmp_variable(dtype='int32') - score_index = helper.create_tmp_variable(dtype='int32') - target_label = helper.create_tmp_variable(dtype='int32') - target_bbox = helper.create_tmp_variable(dtype=anchor_box.dtype) + loc_index = helper.create_variable_for_type_inference(dtype='int32') + score_index = helper.create_variable_for_type_inference(dtype='int32') + target_label = helper.create_variable_for_type_inference(dtype='int32') + target_bbox = helper.create_variable_for_type_inference( + dtype=anchor_box.dtype) helper.append_op( type="rpn_target_assign", inputs={ @@ -282,7 +283,8 @@ def detection_output(loc, scores = nn.reshape(x=scores, shape=compile_shape, actual_shape=run_shape) scores = nn.transpose(scores, perm=[0, 2, 1]) scores.stop_gradient = True - nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) + nmsed_outs = helper.create_variable_for_type_inference( + dtype=decoded_box.dtype) helper.append_op( type="multiclass_nms", inputs={'Scores': scores, @@ -314,7 +316,7 @@ def iou_similarity(x, y, name=None): """ helper = LayerHelper("iou_similarity", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -351,7 +353,8 @@ def box_coder(prior_box, helper = LayerHelper("box_coder", **locals()) if name is None: - output_box = helper.create_tmp_variable(dtype=prior_box.dtype) + output_box = helper.create_variable_for_type_inference( + dtype=prior_box.dtype) else: output_box = helper.create_variable( name=name, dtype=prior_box.dtype, persistable=False) @@ -382,7 +385,7 @@ def polygon_box_transform(input, name=None): """ helper = LayerHelper("polygon_box_transform", **locals()) if name is None: - output = helper.create_tmp_variable(dtype=input.dtype) + output = helper.create_variable_for_type_inference(dtype=input.dtype) else: output = helper.create_variable( name=name, dtype=prior_box.input, persistable=False) @@ -450,7 +453,7 @@ def detection_map(detect_res, helper = LayerHelper("detection_map", **locals()) def __create_var(type): - return helper.create_tmp_variable(dtype=type) + return helper.create_variable_for_type_inference(dtype=type) map_out = __create_var('float32') accum_pos_count_out = out_states[0] if out_states else __create_var('int32') @@ -557,8 +560,9 @@ def bipartite_match(dist_matrix, >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou) """ helper = LayerHelper('bipartite_match', **locals()) - match_indices = helper.create_tmp_variable(dtype='int32') - match_distance = helper.create_tmp_variable(dtype=dist_matrix.dtype) + match_indices = helper.create_variable_for_type_inference(dtype='int32') + match_distance = helper.create_variable_for_type_inference( + dtype=dist_matrix.dtype) helper.append_op( type='bipartite_match', inputs={'DistMat': dist_matrix}, @@ -644,8 +648,8 @@ def target_assign(input, gt, matched_indices, mismatch_value=0) """ helper = LayerHelper('target_assign', **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) - out_weight = helper.create_tmp_variable(dtype='float32') + out = helper.create_variable_for_type_inference(dtype=input.dtype) + out_weight = helper.create_variable_for_type_inference(dtype='float32') helper.append_op( type='target_assign', inputs={ @@ -816,9 +820,10 @@ def ssd_loss(location, conf_loss = nn.reshape( x=conf_loss, shape=(num, num_prior), actual_shape=actual_shape) conf_loss.stop_gradient = True - neg_indices = helper.create_tmp_variable(dtype='int32') + neg_indices = helper.create_variable_for_type_inference(dtype='int32') dtype = matched_indices.dtype - updated_matched_indices = helper.create_tmp_variable(dtype=dtype) + updated_matched_indices = helper.create_variable_for_type_inference( + dtype=dtype) helper.append_op( type='mine_hard_examples', inputs={ @@ -998,8 +1003,8 @@ def prior_box(input, max_sizes = [max_sizes] attrs['max_sizes'] = max_sizes - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) + box = helper.create_variable_for_type_inference(dtype) + var = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prior_box", inputs={"Input": input, @@ -1337,8 +1342,8 @@ def anchor_generator(input, 'offset': offset } - anchor = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) + anchor = helper.create_variable_for_type_inference(dtype) + var = helper.create_variable_for_type_inference(dtype) helper.append_op( type="anchor_generator", inputs={"Input": input}, @@ -1384,7 +1389,7 @@ def roi_perspective_transform(input, """ helper = LayerHelper('roi_perspective_transform', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="roi_perspective_transform", inputs={"X": input, @@ -1418,11 +1423,15 @@ def generate_proposal_labels(rpn_rois, helper = LayerHelper('generate_proposal_labels', **locals()) - rois = helper.create_tmp_variable(dtype=rpn_rois.dtype) - labels_int32 = helper.create_tmp_variable(dtype=gt_classes.dtype) - bbox_targets = helper.create_tmp_variable(dtype=rpn_rois.dtype) - bbox_inside_weights = helper.create_tmp_variable(dtype=rpn_rois.dtype) - bbox_outside_weights = helper.create_tmp_variable(dtype=rpn_rois.dtype) + rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype) + labels_int32 = helper.create_variable_for_type_inference( + dtype=gt_classes.dtype) + bbox_targets = helper.create_variable_for_type_inference( + dtype=rpn_rois.dtype) + bbox_inside_weights = helper.create_variable_for_type_inference( + dtype=rpn_rois.dtype) + bbox_outside_weights = helper.create_variable_for_type_inference( + dtype=rpn_rois.dtype) helper.append_op( type="generate_proposal_labels", @@ -1504,8 +1513,10 @@ def generate_proposals(scores, """ helper = LayerHelper('generate_proposals', **locals()) - rpn_rois = helper.create_tmp_variable(dtype=bbox_deltas.dtype) - rpn_roi_probs = helper.create_tmp_variable(dtype=scores.dtype) + rpn_rois = helper.create_variable_for_type_inference( + dtype=bbox_deltas.dtype) + rpn_roi_probs = helper.create_variable_for_type_inference( + dtype=scores.dtype) helper.append_op( type="generate_proposals", inputs={ diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index dcd5a064a85cbfeedf41b7dc122cacba987e6570..95e13669ad927cdda56153c7fe58325f4a3ec474 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -954,7 +954,7 @@ def read_file(reader): """ helper = LayerHelper('read_file') out = [ - helper.create_tmp_variable( + helper.create_variable_for_type_inference( stop_gradient=True, dtype='float32') for _ in range(len(reader.desc.shapes())) ] diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 8c11921d9bde0920f33368837302d39f36f45556..eea0a362a0c31083f304a2167d0fdadfb30fb640 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -202,10 +202,12 @@ def generate_layer_fn(op_type): out_var = out[0] if (isinstance(out, list) or isinstance(out, tuple)) else out else: - out_var = helper.create_tmp_variable(dtype=dtype) + out_var = helper.create_variable_for_type_inference(dtype=dtype) outputs[o_name] = [out_var] for name in intermediate_output_names: - outputs[name] = [helper.create_tmp_variable(dtype=dtype)] + outputs[name] = [ + helper.create_variable_for_type_inference(dtype=dtype) + ] helper.append_op( type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) return helper.append_activation(out_var) @@ -229,7 +231,7 @@ def generate_layer_fn_noattr(op_type): def func(x, name=None): helper = LayerHelper(op_type, **locals()) - output = helper.create_tmp_variable(dtype=x.dtype) + output = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output}) return output diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index a3064b565d096f7feda18379c66ffc8bf2f4a55c..b2d2c93ead80d781d0a55ca541a1b0bb4232ad81 100644 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -58,11 +58,11 @@ def accuracy(input, label, k=1, correct=None, total=None): """ helper = LayerHelper("accuracy", **locals()) topk_out, topk_indices = nn.topk(input, k=k) - acc_out = helper.create_tmp_variable(dtype="float32") + acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: - correct = helper.create_tmp_variable(dtype="int64") + correct = helper.create_variable_for_type_inference(dtype="int64") if total is None: - total = helper.create_tmp_variable(dtype="int64") + total = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="accuracy", inputs={ @@ -124,8 +124,8 @@ def auc(input, auc_out=fluid.layers.auc(input=prediction, label=label) """ helper = LayerHelper("auc", **locals()) - auc_out = helper.create_tmp_variable(dtype="float64") - batch_auc_out = helper.create_tmp_variable(dtype="float64") + auc_out = helper.create_variable_for_type_inference(dtype="float64") + batch_auc_out = helper.create_variable_for_type_inference(dtype="float64") # make tp, tn, fp, fn persistable, so that can accumulate all batches. # for batch auc diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e0a6c52c9e42732d320c79a6d27935c6103264c6..3a227841f7fcd1faed724e6f54f77a985b1c05f8 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -243,7 +243,7 @@ def fc(input, w = helper.create_parameter( attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type="mul", inputs={"X": input_var, @@ -256,7 +256,7 @@ def fc(input, if len(mul_results) == 1: pre_bias = mul_results[0] else: - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sum", inputs={"X": mul_results}, @@ -315,7 +315,7 @@ def embedding(input, helper = LayerHelper('embedding', **locals()) w = helper.create_parameter( attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( size[0] + padding_idx) helper.append_op( @@ -419,10 +419,10 @@ def dynamic_lstm(input, bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - hidden = helper.create_tmp_variable(dtype) - cell = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_cell_pre_act = helper.create_tmp_variable(dtype) + hidden = helper.create_variable_for_type_inference(dtype) + cell = helper.create_variable_for_type_inference(dtype) + batch_gate = helper.create_variable_for_type_inference(dtype) + batch_cell_pre_act = helper.create_variable_for_type_inference(dtype) inputs = {'Input': input, 'Weight': weight, 'Bias': bias} batch_size = input.shape[0] if h_0: @@ -622,12 +622,12 @@ def dynamic_lstmp(input, bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - projection = helper.create_tmp_variable(dtype) - cell = helper.create_tmp_variable(dtype) - ordered_proj0 = helper.create_tmp_variable(dtype) - batch_hidden = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_cell_pre_act = helper.create_tmp_variable(dtype) + projection = helper.create_variable_for_type_inference(dtype) + cell = helper.create_variable_for_type_inference(dtype) + ordered_proj0 = helper.create_variable_for_type_inference(dtype) + batch_hidden = helper.create_variable_for_type_inference(dtype) + batch_gate = helper.create_variable_for_type_inference(dtype) + batch_cell_pre_act = helper.create_variable_for_type_inference(dtype) helper.append_op( type='lstmp', @@ -752,10 +752,10 @@ def dynamic_gru(input, ), 'The shape of h0 should be(batch_size, %d)' % size inputs['H0'] = h_0 - hidden = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_reset_hidden_prev = helper.create_tmp_variable(dtype) - batch_hidden = helper.create_tmp_variable(dtype) + hidden = helper.create_variable_for_type_inference(dtype) + batch_gate = helper.create_variable_for_type_inference(dtype) + batch_reset_hidden_prev = helper.create_variable_for_type_inference(dtype) + batch_hidden = helper.create_variable_for_type_inference(dtype) helper.append_op( type='gru', @@ -845,9 +845,9 @@ def gru_unit(input, weight = helper.create_parameter( attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) - gate = helper.create_tmp_variable(dtype) - reset_hidden_pre = helper.create_tmp_variable(dtype) - updated_hidden = helper.create_tmp_variable(dtype) + gate = helper.create_variable_for_type_inference(dtype) + reset_hidden_pre = helper.create_variable_for_type_inference(dtype) + updated_hidden = helper.create_variable_for_type_inference(dtype) inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight} # create bias if helper.bias_attr: @@ -897,10 +897,14 @@ def linear_chain_crf(input, label, param_attr=None): attr=helper.param_attr, shape=[size + 2, size], dtype=helper.input_dtype()) - alpha = helper.create_tmp_variable(dtype=helper.input_dtype()) - emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) - transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) - log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype()) + alpha = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + emission_exps = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + transition_exps = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + log_likelihood = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='linear_chain_crf', inputs={"Emission": [input], @@ -939,7 +943,8 @@ def crf_decoding(input, param_attr, label=None): """ helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) - viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) + viterbi_path = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='crf_decoding', inputs={"Emission": [input], @@ -963,9 +968,9 @@ def cos_sim(X, Y): Variable: the output of cosine(X, Y). """ helper = LayerHelper('cos_sim', **locals()) - out = helper.create_tmp_variable(dtype=X.dtype) - xnorm = helper.create_tmp_variable(dtype=X.dtype) - ynorm = helper.create_tmp_variable(dtype=X.dtype) + out = helper.create_variable_for_type_inference(dtype=X.dtype) + xnorm = helper.create_variable_for_type_inference(dtype=X.dtype) + ynorm = helper.create_variable_for_type_inference(dtype=X.dtype) helper.append_op( type='cos_sim', inputs={'X': [X], @@ -1009,8 +1014,9 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): """ helper = LayerHelper('dropout', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - mask = helper.create_tmp_variable(dtype=x.dtype, stop_gradient=True) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + mask = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True) if (seed is None or seed == 0) and helper.main_program.random_seed != 0: seed = helper.main_program.random_seed @@ -1095,7 +1101,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100): cost = fluid.layers.cross_entropy(input=predict, label=label) """ helper = LayerHelper('cross_entropy', **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='cross_entropy', inputs={'X': [input], @@ -1142,14 +1148,14 @@ def square_error_cost(input, label): """ helper = LayerHelper('square_error_cost', **locals()) - minus_out = helper.create_tmp_variable(dtype=input.dtype) + minus_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='elementwise_sub', inputs={'X': [input], 'Y': [label]}, outputs={'Out': [minus_out]}) - square_out = helper.create_tmp_variable(dtype=input.dtype) + square_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='square', inputs={'X': [minus_out]}, outputs={'Out': [square_out]}) @@ -1255,12 +1261,13 @@ def chunk_eval(input, helper = LayerHelper("chunk_eval", **locals()) # prepare output - precision = helper.create_tmp_variable(dtype="float32") - recall = helper.create_tmp_variable(dtype="float32") - f1_score = helper.create_tmp_variable(dtype="float32") - num_infer_chunks = helper.create_tmp_variable(dtype="int64") - num_label_chunks = helper.create_tmp_variable(dtype="int64") - num_correct_chunks = helper.create_tmp_variable(dtype="int64") + precision = helper.create_variable_for_type_inference(dtype="float32") + recall = helper.create_variable_for_type_inference(dtype="float32") + f1_score = helper.create_variable_for_type_inference(dtype="float32") + num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64") + num_label_chunks = helper.create_variable_for_type_inference(dtype="int64") + num_correct_chunks = helper.create_variable_for_type_inference( + dtype="int64") helper.append_op( type="chunk_eval", @@ -1327,7 +1334,7 @@ def sequence_conv(input, filter_shape = [filter_size * input.shape[1], num_filters] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sequence_conv', @@ -1383,7 +1390,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): """ helper = LayerHelper('sequence_softmax', **locals()) dtype = helper.input_dtype() - softmax_out = helper.create_tmp_variable(dtype) + softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_softmax", inputs={"X": input}, @@ -1437,7 +1444,7 @@ def softmax(input, use_cudnn=True, name=None): """ helper = LayerHelper('softmax', **locals()) dtype = helper.input_dtype() - softmax_out = helper.create_tmp_variable(dtype) + softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="softmax", inputs={"X": input}, @@ -1600,7 +1607,7 @@ def conv2d(input, dtype=dtype, default_initializer=_get_default_param_initializer()) - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -1771,7 +1778,7 @@ def conv3d(input, dtype=dtype, default_initializer=_get_default_param_initializer()) - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -1850,8 +1857,8 @@ def sequence_pool(input, pool_type): """ helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) - max_index = helper.create_tmp_variable(dtype) + pool_out = helper.create_variable_for_type_inference(dtype) + max_index = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_pool", @@ -1887,7 +1894,7 @@ def sequence_concat(input, name=None): out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3]) """ helper = LayerHelper('sequence_concat', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]}) return out @@ -2014,7 +2021,7 @@ def sequence_slice(input, offset, length, name=None): """ helper = LayerHelper("sequence_slice", **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) offset.stop_gradient = True length.stop_gradient = True @@ -2100,7 +2107,7 @@ def pool2d(input, helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) + pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -2168,7 +2175,7 @@ def pool3d(input, l_type = "pool3d" helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) + pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -2311,10 +2318,13 @@ def batch_norm(input, mean_out = mean # variance and variance out share the same memory variance_out = variance - saved_mean = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - saved_variance = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + saved_variance = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) - batch_norm_out = input if in_place else helper.create_tmp_variable(dtype) + batch_norm_out = input if in_place else helper.create_variable_for_type_inference( + dtype) helper.append_op( type="batch_norm", @@ -2431,9 +2441,11 @@ def layer_norm(input, inputs['Bias'] = bias # create output - mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - layer_norm_out = helper.create_tmp_variable(dtype) + mean_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + variance_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + layer_norm_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="layer_norm", @@ -2620,7 +2632,7 @@ def conv2d_transpose(input, img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) - pre_bias = helper.create_tmp_variable(dtype=input.dtype) + pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=op_type, inputs={'Input': [input], @@ -2798,7 +2810,7 @@ def conv3d_transpose(input, img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) - pre_bias = helper.create_tmp_variable(dtype=input.dtype) + pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=l_type, inputs={'Input': [input], @@ -2877,7 +2889,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): """ helper = LayerHelper('sequence_expand', input=x, **locals()) dtype = helper.input_dtype() - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sequence_expand', inputs={'X': x, @@ -2943,7 +2955,7 @@ def sequence_expand_as(x, y, name=None): """ helper = LayerHelper('sequence_expand_as', input=x, **locals()) dtype = helper.input_dtype() - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sequence_expand_as', inputs={'X': x, @@ -2988,8 +3000,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): helper = LayerHelper('sequence_pad', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) - length = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) + length = helper.create_variable_for_type_inference(dtype) pad_value.stop_gradient = True length.stop_gradient = True @@ -3054,7 +3066,7 @@ def sequence_unpad(x, length, name=None): helper = LayerHelper('sequence_unpad', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) length.stop_gradient = True @@ -3153,8 +3165,9 @@ def beam_search(pre_ids, score_type = scores.dtype id_type = ids.dtype - selected_scores = helper.create_tmp_variable(dtype=score_type) - selected_ids = helper.create_tmp_variable(dtype=id_type) + selected_scores = helper.create_variable_for_type_inference( + dtype=score_type) + selected_ids = helper.create_variable_for_type_inference(dtype=id_type) helper.append_op( type='beam_search', @@ -3211,8 +3224,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): ids, scores, beam_size=5, end_id=0) """ helper = LayerHelper('beam_search_decode', **locals()) - sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) - sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) + sentence_ids = helper.create_variable_for_type_inference(dtype=ids.dtype) + sentence_scores = helper.create_variable_for_type_inference(dtype=ids.dtype) helper.append_op( type="beam_search_decode", @@ -3342,8 +3355,8 @@ def lstm_unit(x_t, param_attr=param_attr, bias_attr=bias_attr) dtype = x_t.dtype - c = helper.create_tmp_variable(dtype) - h = helper.create_tmp_variable(dtype) + c = helper.create_variable_for_type_inference(dtype) + h = helper.create_variable_for_type_inference(dtype) helper.append_op( type='lstm_unit', @@ -3397,7 +3410,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): """ helper = LayerHelper('reduce_sum', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3454,7 +3467,7 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_mean(x, dim=[0, 1]) # [4.0, 5.0] """ helper = LayerHelper('reduce_mean', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3509,7 +3522,7 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_max(x, dim=[0, 1]) # [7.0, 8.0] """ helper = LayerHelper('reduce_max', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3564,7 +3577,7 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_min(x, dim=[0, 1]) # [1.0, 2.0] """ helper = LayerHelper('reduce_min', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3620,7 +3633,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_prod(x, dim=[0, 1]) # [105.0, 384.0] """ helper = LayerHelper('reduce_prod', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3680,7 +3693,7 @@ def split(input, num_or_sections, dim=-1, name=None): dim], 'len(num_or_sections) must not be more than input.shape[dim].' num = len(num_or_sections) outs = [ - helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] helper.append_op( @@ -3737,8 +3750,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): axis = 0 helper = LayerHelper("l2_normalize", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - norm = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + norm = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="norm", inputs={"X": x}, @@ -3847,7 +3860,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): __check_input(x, y) helper = LayerHelper('matmul', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul', inputs={'X': x, @@ -3918,8 +3931,8 @@ def topk(input, k, name=None): top5_values, top5_indices = layers.topk(input, k=5) """ helper = LayerHelper("top_k", **locals()) - values = helper.create_tmp_variable(dtype=input.dtype) - indices = helper.create_tmp_variable(dtype="int64") + values = helper.create_variable_for_type_inference(dtype=input.dtype) + indices = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="top_k", inputs={"X": [input]}, @@ -3977,8 +3990,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None): # remove some tokens from input and labels if ignored_tokens is not None and len(ignored_tokens) > 0: - erased_input = helper.create_tmp_variable(dtype="int64") - erased_label = helper.create_tmp_variable(dtype="int64") + erased_input = helper.create_variable_for_type_inference(dtype="int64") + erased_label = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="sequence_erase", @@ -3995,8 +4008,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None): label = erased_label # edit distance op - edit_distance_out = helper.create_tmp_variable(dtype="int64") - sequence_num = helper.create_tmp_variable(dtype="int64") + edit_distance_out = helper.create_variable_for_type_inference(dtype="int64") + sequence_num = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="edit_distance", inputs={"Hyps": [input], @@ -4071,7 +4084,7 @@ def ctc_greedy_decoder(input, blank, name=None): _, topk_indices = topk(input, k=1) # ctc align op - ctc_out = helper.create_tmp_variable(dtype="int64") + ctc_out = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="ctc_align", inputs={"Input": [topk_indices]}, @@ -4121,8 +4134,8 @@ def warpctc(input, label, blank=0, norm_by_times=False): """ helper = LayerHelper('warpctc', **locals()) - loss_out = helper.create_tmp_variable(dtype=input.dtype) - grad_out = helper.create_tmp_variable(dtype=input.dtype) + loss_out = helper.create_variable_for_type_inference(dtype=input.dtype) + grad_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='warpctc', inputs={'Logits': [input], @@ -4183,7 +4196,7 @@ def sequence_reshape(input, new_dim): x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) """ helper = LayerHelper('sequence_reshape', **locals()) - out = helper.create_tmp_variable(helper.input_dtype()) + out = helper.create_variable_for_type_inference(helper.input_dtype()) helper.append_op( type='sequence_reshape', inputs={'X': [input]}, @@ -4280,9 +4293,9 @@ def nce(input, is_bias=True, dtype=input.dtype) inputs['Bias'] = b - cost = helper.create_tmp_variable(dtype=input.dtype) - sample_logits = helper.create_tmp_variable(dtype=input.dtype) - sample_labels = helper.create_tmp_variable(dtype=label.dtype) + cost = helper.create_variable_for_type_inference(dtype=input.dtype) + sample_logits = helper.create_variable_for_type_inference(dtype=input.dtype) + sample_labels = helper.create_variable_for_type_inference(dtype=label.dtype) if num_neg_samples is None: num_neg_samples = 10 @@ -4358,8 +4371,8 @@ def hsigmoid(input, helper = LayerHelper('hierarchical_sigmoid', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) - pre_out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) + pre_out = helper.create_variable_for_type_inference(dtype) dim = input.shape[1] if num_classes < 2: raise ValueError("num_classes must not be less than 2.") @@ -4419,8 +4432,8 @@ def transpose(x, perm, name=None): (idx, perm[idx], len(x.shape))) helper = LayerHelper('transpose', **locals()) - out = helper.create_tmp_variable(x.dtype) - x_shape = helper.create_tmp_variable(x.dtype) + out = helper.create_variable_for_type_inference(x.dtype) + x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='transpose2', inputs={'X': [x]}, @@ -4562,7 +4575,7 @@ def im2sequence(input, inputs["Y"] = input_image_size attrs["out_stride"] = out_stride helper = LayerHelper('im2sequence', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @@ -4595,7 +4608,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None): filter_shape = [future_context_size + 1, input.shape[1]] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='row_conv', inputs={'X': [input], @@ -4628,7 +4641,7 @@ def multiplex(inputs, index): raise ValueError("inputs should be a list object and contains at least " "2 elements.") - out = helper.create_tmp_variable(inputs[0].dtype) + out = helper.create_variable_for_type_inference(inputs[0].dtype) helper.append_op( type='multiplex', inputs={'X': inputs, @@ -4699,8 +4712,8 @@ def softmax_with_cross_entropy(logits, logits=fc, label=label) """ helper = LayerHelper('softmax_with_cross_entropy', **locals()) - softmax = helper.create_tmp_variable(dtype=logits.dtype) - loss = helper.create_tmp_variable(dtype=logits.dtype) + softmax = helper.create_variable_for_type_inference(dtype=logits.dtype) + loss = helper.create_variable_for_type_inference(dtype=logits.dtype) helper.append_op( type='softmax_with_cross_entropy', inputs={'Logits': logits, @@ -4750,8 +4763,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): """ helper = LayerHelper('smooth_l1_loss', **locals()) - diff = helper.create_tmp_variable(dtype=x.dtype) - loss = helper.create_tmp_variable(dtype=x.dtype) + diff = helper.create_variable_for_type_inference(dtype=x.dtype) + loss = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='smooth_l1_loss', inputs={ @@ -4784,7 +4797,7 @@ def one_hot(input, depth): one_hot_label = layers.one_hot(input=label, depth=10) """ helper = LayerHelper("one_hot", **locals()) - one_hot_out = helper.create_tmp_variable(dtype='float32') + one_hot_out = helper.create_variable_for_type_inference(dtype='float32') helper.append_op( type="one_hot", inputs={'X': input}, @@ -4926,8 +4939,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): "except one unknown dimension.") helper = LayerHelper("reshape2", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - x_shape = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="reshape2", inputs=inputs, @@ -4976,8 +4989,8 @@ def squeeze(input, axes, name=None): y = layers.sequeeze(input=x, axes=[1]) """ helper = LayerHelper("squeeze", **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) - x_shape = helper.create_tmp_variable(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=input.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="squeeze2", inputs={"X": input}, @@ -5013,8 +5026,8 @@ def unsqueeze(input, axes, name=None): y = layers.unsequeeze(input=x, axes=[1]) """ helper = LayerHelper("unsqueeze", **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) - x_shape = helper.create_tmp_variable(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=input.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="unsqueeze2", inputs={"X": input}, @@ -5104,7 +5117,7 @@ def lod_reset(x, y=None, target_lod=None): out = layers.lod_reset(x=x, y=y) """ helper = LayerHelper("lod_reset", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) if y is not None: helper.append_op( type="lod_reset", inputs={'X': x, @@ -5173,8 +5186,9 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): "dims of input must be 4(not %d), and it's order must be NCHW" % (dims)) - mid_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - lrn_out = helper.create_tmp_variable(dtype) + mid_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + lrn_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="lrn", inputs={"X": input}, @@ -5239,7 +5253,7 @@ def pad(x, paddings, pad_value=0., name=None): """ helper = LayerHelper('pad', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad', inputs={'X': x}, @@ -5319,7 +5333,7 @@ def pad_constant_like(x, y, pad_value=0., name=None): """ helper = LayerHelper('pad_constant_like', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad_constant_like', inputs={'X': x, @@ -5384,7 +5398,7 @@ def label_smooth(label, raise ValueError("The value of epsilon must be between 0 and 1.") helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True - smooth_label = helper.create_tmp_variable(dtype) + smooth_label = helper.create_variable_for_type_inference(dtype) helper.append_op( type="label_smooth", inputs={"X": label, @@ -5416,8 +5430,8 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): """ helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) - argmaxes = helper.create_tmp_variable(dtype='int32') + pool_out = helper.create_variable_for_type_inference(dtype) + argmaxes = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="roi_pool", inputs={"X": input, @@ -5465,7 +5479,7 @@ def roi_align(input, """ helper = LayerHelper('roi_align', **locals()) dtype = helper.input_dtype() - align_out = helper.create_tmp_variable(dtype) + align_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="roi_align", inputs={"X": input, @@ -5590,7 +5604,7 @@ def image_resize(input, out_h = int(input.shape[2] * scale) out_w = int(input.shape[3] * scale) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=resample_methods[resample], inputs=inputs, @@ -5699,7 +5713,7 @@ def gather(input, index): """ helper = LayerHelper('gather', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather", inputs={"X": input, @@ -5739,7 +5753,7 @@ def scatter(input, index, updates, name=None): """ helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="scatter", inputs={"X": input, @@ -5799,7 +5813,7 @@ def sequence_scatter(input, index, updates, name=None): """ helper = LayerHelper('sequence_scatter', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_scatter", inputs={"X": input, @@ -5829,7 +5843,7 @@ def random_crop(x, shape, seed=None): """ helper = LayerHelper("random_crop", **locals()) dtype = x.dtype - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) if seed is None: seed = np.random.randint(-65536, 65536) op_attrs = {"shape": shape} @@ -5875,7 +5889,7 @@ def log(x, name=None): """ helper = LayerHelper('log', **locals()) dtype = helper.input_dtype(input_param_name='x') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) return out @@ -5906,7 +5920,7 @@ def relu(x, name=None): """ helper = LayerHelper('relu', **locals()) dtype = helper.input_dtype(input_param_name='x') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="relu", inputs={"X": x}, outputs={"Out": out}) return out @@ -5945,9 +5959,9 @@ def mean_iou(input, label, num_classes): """ helper = LayerHelper('mean_iou', **locals()) dtype = helper.input_dtype() - out_mean_iou = helper.create_tmp_variable(dtype='float32') - out_wrong = helper.create_tmp_variable(dtype='int32') - out_correct = helper.create_tmp_variable(dtype='int32') + out_mean_iou = helper.create_variable_for_type_inference(dtype='float32') + out_wrong = helper.create_variable_for_type_inference(dtype='int32') + out_correct = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="mean_iou", inputs={"Predictions": input, @@ -6039,7 +6053,7 @@ def crop(x, shape=None, offsets=None, name=None): if offsets is None: offsets = [0] * len(x.shape) - out = helper.create_tmp_variable(x.dtype) + out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x} attrs = {} if isinstance(shape, Variable): @@ -6119,7 +6133,7 @@ def rank_loss(label, left, right, name=None): if not (isinstance(right, Variable)): raise ValueError("The right should be a Variable") - out = helper.create_tmp_variable("float32") + out = helper.create_variable_for_type_inference("float32") helper.append_op( type='rank_loss', @@ -6165,8 +6179,8 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None): raise ValueError("The left should be a Variable.") if not isinstance(right, Variable): raise ValueError("The right should be a Variable.") - out = helper.create_tmp_variable(left.dtype) - act = helper.create_tmp_variable(left.dtype) + out = helper.create_variable_for_type_inference(left.dtype) + act = helper.create_variable_for_type_inference(left.dtype) helper.append_op( type='margin_rank_loss', inputs={"Label": label, @@ -6251,7 +6265,7 @@ def pad2d(input, helper = LayerHelper('pad2d', **locals()) dtype = helper.input_dtype(input_param_name='input') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad2d', inputs={'X': input}, @@ -6280,7 +6294,7 @@ def elu(x, alpha=1.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('elu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='elu', inputs={'X': x}, @@ -6303,7 +6317,7 @@ def relu6(x, threshold=6.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('relu6', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='relu6', inputs={'X': x}, @@ -6326,7 +6340,7 @@ def pow(x, factor=1.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('pow', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='pow', inputs={'X': x}, @@ -6350,7 +6364,7 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('stanh', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='stanh', inputs={'X': x}, @@ -6375,7 +6389,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('hard_sigmoid', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='hard_sigmoid', inputs={'X': x}, @@ -6399,7 +6413,7 @@ def swish(x, beta=1.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('swish', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='swish', inputs={'X': x}, @@ -6451,7 +6465,7 @@ def prelu(x, mode, param_attr=None, name=None): dtype='float32', is_bias=False, default_initializer=Constant(1.0)) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prelu", inputs={"X": x, @@ -6475,7 +6489,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('brelu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='brelu', inputs={'X': x}, @@ -6498,7 +6512,7 @@ def leaky_relu(x, alpha=0.02, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('leaky_relu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='leaky_relu', inputs={'X': x}, @@ -6520,7 +6534,7 @@ def soft_relu(x, threshold=40.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('soft_relu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='soft_relu', inputs={'X': x}, @@ -6587,8 +6601,8 @@ def flatten(x, axis=1, name=None): if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0: raise ValueError("The axis should be a int, and in range [0, rank(x)]") - out = helper.create_tmp_variable(x.dtype) - x_shape = helper.create_tmp_variable(x.dtype) + out = helper.create_variable_for_type_inference(x.dtype) + x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='flatten2', inputs={"X": x}, @@ -6634,7 +6648,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0) """ helper = LayerHelper('sequence_enumerate', **locals()) - out = helper.create_tmp_variable(helper.input_dtype(), stop_gradient=True) + out = helper.create_variable_for_type_inference( + helper.input_dtype(), stop_gradient=True) helper.append_op( type='sequence_enumerate', inputs={'X': input}, @@ -6674,9 +6689,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): helper = LayerHelper('sequence_mask', **locals()) if name is None: - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) else: - out = helper.create_tmp_variable(dtype=dtype, name=name) + out = helper.create_variable_for_type_inference(dtype=dtype, name=name) helper.append_op( type='sequence_mask', @@ -6719,7 +6734,7 @@ def stack(x, axis=0): if not isinstance(x, list) and not isinstance(x, tuple): x = [x] - out = helper.create_tmp_variable(x[0].dtype) + out = helper.create_variable_for_type_inference(x[0].dtype) helper.append_op( type='stack', inputs={'X': x}, outputs={'Y': out}, attrs={'axis': axis}) @@ -6757,7 +6772,7 @@ def unstack(x, axis=0, num=None): outs = [] for _ in num: - outs.append(helper.create_tmp_variable(x.dtype)) + outs.append(helper.create_variable_for_type_inference(x.dtype)) helper.append_op( type='unstack', @@ -6809,7 +6824,7 @@ def expand(x, expand_times, name=None): """ helper = LayerHelper('expand', input=x, **locals()) dtype = helper.input_dtype(input_param_name='x') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='expand', inputs={'X': x}, @@ -6848,7 +6863,7 @@ def uniform_random_batch_size_like(input, """ helper = LayerHelper('uniform_random_batch_size_like', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='uniform_random_batch_size_like', @@ -6885,7 +6900,7 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'): """ helper = LayerHelper('gaussian_random', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='gaussian_random', @@ -6920,7 +6935,7 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): """ helper = LayerHelper('sampling_id', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sampling_id', inputs={'X': x}, @@ -6959,7 +6974,7 @@ def gaussian_random_batch_size_like(input, """ helper = LayerHelper('gaussian_random_batch_size_like', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='gaussian_random_batch_size_like', @@ -6991,7 +7006,8 @@ def sum(x): """ helper = LayerHelper('sum', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype('x')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype('x')) helper.append_op( type='sum', inputs={'X': x}, @@ -7018,7 +7034,8 @@ def slice(input, axes, starts, ends): """ helper = LayerHelper('slice', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype('input')) helper.append_op( type='slice', inputs={'Input': input}, @@ -7044,7 +7061,8 @@ def shape(input): """ helper = LayerHelper('shape', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype('input')) helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}) @@ -7061,7 +7079,7 @@ def _elementwise_op(helper): use_mkldnn = helper.kwargs.get('use_mkldnn', False) name = helper.kwargs.get('name', None) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7095,7 +7113,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): helper = LayerHelper('scale', **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7161,7 +7179,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if out is None: if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7269,7 +7287,7 @@ def clip(x, min, max, name=None): helper = LayerHelper("clip", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7301,7 +7319,7 @@ def clip_by_norm(x, max_norm, name=None): helper = LayerHelper("clip_by_norm", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7331,7 +7349,7 @@ def mean(x, name=None): helper = LayerHelper("mean", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7361,7 +7379,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): helper = LayerHelper("mul", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7395,7 +7413,7 @@ def sigmoid_cross_entropy_with_logits(x, label, name=None): helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7425,7 +7443,7 @@ def maxout(x, groups, name=None): helper = LayerHelper("maxout", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7452,7 +7470,7 @@ def sequence_reverse(x, name=None): """ helper = LayerHelper("sequence_reverse", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7491,7 +7509,7 @@ def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None): helper = LayerHelper("affine_channel", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 9c6a2112a69637aad4baff9a0296330620e3c52f..09a7cb8dc9339afa666f8cf09e92a27ffba8a9b3 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -152,7 +152,7 @@ def cast(x, dtype): result = fluid.layers.cast(x=data, dtype='float64') """ helper = LayerHelper('cast', **locals()) - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='cast', inputs={'X': [x]}, @@ -184,7 +184,7 @@ def concat(input, axis=0, name=None): out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) """ helper = LayerHelper('concat', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='concat', inputs={'X': input}, @@ -221,7 +221,8 @@ def sums(input, out=None): """ helper = LayerHelper('sum', **locals()) if out is None: - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='sum', inputs={'X': input}, @@ -252,7 +253,7 @@ def assign(input, output=None): """ helper = LayerHelper('assign', **locals()) if output is None: - output = helper.create_tmp_variable(dtype=input.dtype) + output = helper.create_variable_for_type_inference(dtype=input.dtype) if isinstance(input, Variable): helper.append_op( type='assign', inputs={'X': [input]}, outputs={'Out': [output]}) @@ -311,7 +312,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): helper = LayerHelper("fill_constant", **locals()) if out is None: - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='fill_constant', inputs={}, @@ -358,7 +359,7 @@ def fill_constant_batch_size_like(input, ${out_comment}. """ helper = LayerHelper("fill_constant_batch_size_like", **locals()) - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='fill_constant_batch_size_like', inputs={'Input': input}, @@ -396,7 +397,7 @@ def argmin(x, axis=0): out = fluid.layers.argmin(x=in, axis=-1) """ helper = LayerHelper("arg_min", **locals()) - out = helper.create_tmp_variable(VarDesc.VarType.INT64) + out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='arg_min', inputs={'X': x}, @@ -427,7 +428,7 @@ def argmax(x, axis=0): out = fluid.layers.argmax(x=in, axis=-1) """ helper = LayerHelper("arg_max", **locals()) - out = helper.create_tmp_variable(VarDesc.VarType.INT64) + out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='arg_max', inputs={'X': x}, @@ -477,8 +478,10 @@ def argsort(input, axis=-1, name=None): out, indices = fluid.layers.argsort(input, axis=0) """ helper = LayerHelper("argsort", **locals()) - out = helper.create_tmp_variable(dtype=input.dtype, stop_gradient=True) - ids = helper.create_tmp_variable(VarDesc.VarType.INT64, stop_gradient=True) + out = helper.create_variable_for_type_inference( + dtype=input.dtype, stop_gradient=True) + ids = helper.create_variable_for_type_inference( + VarDesc.VarType.INT64, stop_gradient=True) helper.append_op( type='argsort', inputs={'X': input}, @@ -562,7 +565,7 @@ def reverse(x, axis): if isinstance(axis, int): axis = [axis] helper = LayerHelper("reverse", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='reverse', inputs={'Input': x}, @@ -654,7 +657,7 @@ def has_inf(x): Variable: The tensor variable storing the output, only a bool value. """ helper = LayerHelper("isinf", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) return out @@ -670,7 +673,7 @@ def has_nan(x): Variable: The tensor variable storing the output, only a bool value. """ helper = LayerHelper("isnan", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) return out @@ -687,6 +690,6 @@ def isfinite(x): Variable: The tensor variable storing the output, contains a bool value. """ helper = LayerHelper("isfinite", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out}) return out diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 97644df007117be35ccd4d66495d8ca0ec7b0e88..c151fbd17208bb6e3104e8d0f6590392c6095987 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -151,7 +151,7 @@ class L2DecayRegularizer(WeightDecayRegularizer): decay = block.create_var( dtype="float32", shape=param.shape, - type=core.VarDesc.VarType.SELECTED_ROWS) + type=core.VarDesc.VarType.LOD_TENSOR) block.append_op( type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) block.append_op( @@ -228,7 +228,7 @@ class L1DecayRegularizer(WeightDecayRegularizer): decay = block.create_var( dtype="float32", shape=param.shape, - type=core.VarDesc.VarType.SELECTED_ROWS) + type=core.VarDesc.VarType.LOD_TENSOR) block.append_op( type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) block.append_op( diff --git a/python/paddle/fluid/tests/CMakeLists.txt b/python/paddle/fluid/tests/CMakeLists.txt index d6568cd38e714bf9eb9d34da8a1c6a5cdb6677e3..7ad923d3321ec8a88b60d7f4f7777e12fad8faa6 100644 --- a/python/paddle/fluid/tests/CMakeLists.txt +++ b/python/paddle/fluid/tests/CMakeLists.txt @@ -1,8 +1,4 @@ -if(NOT APPLE) - set(PYTHON_TESTS_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE PATH "python tests directory") -else() - set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) -endif(NOT APPLE) +set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests CACHE INTERNAL "python tests directory") file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") diff --git a/python/paddle/fluid/tests/unittests/test_slice_var.py b/python/paddle/fluid/tests/unittests/test_slice_var.py index fab63b7d5631829feffd26fc1dce2bd338d2036b..b16c744603534ad07f9318a5a195f1d7550b1f10 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_var.py +++ b/python/paddle/fluid/tests/unittests/test_slice_var.py @@ -30,7 +30,6 @@ class TestSliceVar(unittest.TestCase): var = program.global_block().create_var( name=str(random.randint(10000, 99999)), persistable=True, - # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) blocks = slice_variable(var_list, 10, min_size)