未验证 提交 b0ca369b 编写于 作者: 0 0x45f 提交者: GitHub

Add fill_constant_batch_size YAML and UT (#41474)

上级 ad4193fe
...@@ -846,6 +846,18 @@ def fill_constant_batch_size_like(input, ...@@ -846,6 +846,18 @@ def fill_constant_batch_size_like(input,
input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0] input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
""" """
if in_dygraph_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
place = _current_expected_place()
if force_cpu:
place = core.CPUPlace()
out = _C_ops.final_state_full_batch_size_like(
input, shape, dtype, value, input_dim_idx, output_dim_idx, place)
out.stop_gradient = True
return out
helper = LayerHelper("fill_constant_batch_size_like", **locals()) helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
attrs = { attrs = {
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid.core as core
from paddle.static import program_guard, Program
import paddle.compat as cpt
import unittest
import numpy as np
from op_test import OpTest
from paddle.fluid.framework import convert_np_dtype_to_dtype_
paddle.enable_static()
def fill_constant_batch_size_like(input,
shape,
value,
data_type,
input_dim_idx=0,
output_dim_idx=0,
force_cpu=False):
return paddle.fluid.layers.fill_constant_batch_size_like(
input, shape, data_type, value, input_dim_idx, output_dim_idx,
force_cpu)
class TestFillConstatnBatchSizeLike1(OpTest):
# test basic
def setUp(self):
self.op_type = "fill_constant_batch_size_like"
self.python_api = fill_constant_batch_size_like
self.init_data()
input = np.zeros(self.shape)
out = np.full_like(input, self.value, self.dtype)
self.inputs = {'Input': input}
self.outputs = {'Out': out}
self.attrs = {
'shape': self.shape,
'dtype': convert_np_dtype_to_dtype_(self.dtype),
'value': self.value,
'input_dim_idx': self.input_dim_idx,
'output_dim_idx': self.output_dim_idx,
'force_cpu': self.force_cpu
}
def init_data(self):
self.shape = [10, 10]
self.dtype = np.float32
self.value = 100
self.input_dim_idx = 0
self.output_dim_idx = 0
self.force_cpu = False
def test_check_output(self):
self.check_output(check_eager=True)
if __name__ == "__main__":
unittest.main()
...@@ -718,6 +718,18 @@ ...@@ -718,6 +718,18 @@
data_type : dtype data_type : dtype
backend : place backend : place
- api : full_batch_size_like
args : (Tensor input, int[] shape, DataType dtype, Scalar value, int input_dim_idx, int output_dim_idx, Place place=CPUPlace())
output: Tensor
infer_meta :
func : FullBatchSizeLikeInferMeta
param : [input, shape, value, dtype, input_dim_idx, output_dim_idx]
kernel :
func : full_batch_size_like
param : [input, shape, value, dtype, input_dim_idx, output_dim_idx]
data_type : dtype
backend : place
- api : full_like - api : full_like
args : (Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {}) args : (Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {})
output: Tensor output: Tensor
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册