Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2e597696
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2e597696
编写于
9月 11, 2020
作者:
F
furnace
提交者:
GitHub
9月 11, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add empty op (c++, python, unit test) (#26659)
上级
b6715386
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
588 addition
and
29 deletion
+588
-29
paddle/fluid/operators/empty_op.cc
paddle/fluid/operators/empty_op.cc
+132
-0
paddle/fluid/operators/empty_op.cu.cc
paddle/fluid/operators/empty_op.cu.cc
+26
-0
paddle/fluid/operators/empty_op.h
paddle/fluid/operators/empty_op.h
+45
-0
paddle/fluid/operators/fill_constant_op.h
paddle/fluid/operators/fill_constant_op.h
+1
-23
paddle/fluid/operators/gaussian_random_op.cc
paddle/fluid/operators/gaussian_random_op.cc
+1
-2
paddle/fluid/operators/gaussian_random_op.cu
paddle/fluid/operators/gaussian_random_op.cu
+1
-2
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
+1
-2
paddle/fluid/operators/utils.h
paddle/fluid/operators/utils.h
+21
-0
python/paddle/__init__.py
python/paddle/__init__.py
+1
-0
python/paddle/fluid/tests/unittests/test_empty_op.py
python/paddle/fluid/tests/unittests/test_empty_op.py
+270
-0
python/paddle/tensor/__init__.py
python/paddle/tensor/__init__.py
+1
-0
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+88
-0
未找到文件。
paddle/fluid/operators/empty_op.cc
0 → 100644
浏览文件 @
2e597696
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/empty_op.h"
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
class
EmptyOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"ShapeTensor"
,
"(Tensor<int>), optional). The shape of the output."
"It has a higher priority than Attr(shape)."
)
.
AsDispensable
();
AddInput
(
"ShapeTensorList"
,
"(vector<Tensor<int>>, optional). The shape of the output. "
"It has a higher priority than Attr(shape)."
"The shape of the element in vector must be [1]."
)
.
AsDuplicable
()
.
AsDispensable
();
AddAttr
<
std
::
vector
<
int64_t
>>
(
"shape"
,
"(vector<int64_t>) The shape of the output"
)
.
SetDefault
({});
AddAttr
<
int
>
(
"dtype"
,
"The data type of output tensor, Default is float"
)
.
SetDefault
(
framework
::
proto
::
VarType
::
FP32
);
AddOutput
(
"Out"
,
"(Tensor) The output tensor."
);
AddComment
(
R"DOC(empty operator
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument shape.
The type of the tensor is specify by `dtype`.
)DOC"
);
}
};
class
EmptyOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
context
)
const
override
{
OP_INOUT_CHECK
(
context
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"empty"
);
if
(
context
->
HasInput
(
"ShapeTensor"
))
{
auto
dims
=
context
->
GetInputDim
(
"ShapeTensor"
);
int
num_ele
=
1
;
for
(
int
i
=
0
;
i
<
dims
.
size
();
++
i
)
{
num_ele
*=
dims
[
i
];
}
context
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
({
num_ele
}));
}
else
if
(
context
->
HasInputs
(
"ShapeTensorList"
))
{
std
::
vector
<
int
>
out_dims
;
auto
dims_list
=
context
->
GetInputsDim
(
"ShapeTensorList"
);
for
(
size_t
i
=
0
;
i
<
dims_list
.
size
();
++
i
)
{
auto
&
dims
=
dims_list
[
i
];
PADDLE_ENFORCE_EQ
(
dims
,
framework
::
make_ddim
({
1
}),
"ShapeError: The shape of Tensor in list must be [1]. "
"But received the shape "
"is [%s]"
,
dims
);
out_dims
.
push_back
(
dims
[
0
]);
}
context
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
out_dims
));
}
else
{
auto
&
shape
=
context
->
Attrs
().
Get
<
std
::
vector
<
int64_t
>>
(
"shape"
);
context
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
shape
));
}
}
protected:
framework
::
OpKernelType
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
framework
::
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
override
{
if
(
var_name
==
"ShapeTensor"
||
var_name
==
"ShapeTensorList"
)
{
return
expected_kernel_type
;
}
else
{
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
tensor
.
layout
());
}
}
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
return
framework
::
OpKernelType
(
framework
::
proto
::
VarType
::
Type
(
context
.
Attr
<
int
>
(
"dtype"
)),
context
.
GetPlace
());
}
};
class
EmptyOpVarTypeInference
:
public
framework
::
VarTypeInference
{
public:
void
operator
()(
framework
::
InferVarTypeContext
*
context
)
const
override
{
auto
data_type
=
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
BOOST_GET_CONST
(
int
,
context
->
GetAttr
(
"dtype"
)));
context
->
SetOutputDataType
(
"Out"
,
data_type
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
namespace
plat
=
paddle
::
platform
;
REGISTER_OPERATOR
(
empty
,
ops
::
EmptyOp
,
ops
::
EmptyOpMaker
,
ops
::
EmptyOpVarTypeInference
,
paddle
::
framework
::
EmptyGradOpMaker
<
paddle
::
framework
::
OpDesc
>
,
paddle
::
framework
::
EmptyGradOpMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OP_CPU_KERNEL
(
empty
,
ops
::
EmptyKernel
<
plat
::
CPUDeviceContext
,
bool
>
,
ops
::
EmptyKernel
<
plat
::
CPUDeviceContext
,
int
>
,
ops
::
EmptyKernel
<
plat
::
CPUDeviceContext
,
int64_t
>
,
ops
::
EmptyKernel
<
plat
::
CPUDeviceContext
,
float
>
,
ops
::
EmptyKernel
<
plat
::
CPUDeviceContext
,
double
>
,
ops
::
EmptyKernel
<
plat
::
CPUDeviceContext
,
plat
::
float16
>
);
paddle/fluid/operators/empty_op.cu.cc
0 → 100644
浏览文件 @
2e597696
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/empty_op.h"
namespace
ops
=
paddle
::
operators
;
namespace
plat
=
paddle
::
platform
;
REGISTER_OP_CUDA_KERNEL
(
empty
,
ops
::
EmptyKernel
<
plat
::
CUDADeviceContext
,
bool
>
,
ops
::
EmptyKernel
<
plat
::
CUDADeviceContext
,
int
>
,
ops
::
EmptyKernel
<
plat
::
CUDADeviceContext
,
int64_t
>
,
ops
::
EmptyKernel
<
plat
::
CUDADeviceContext
,
float
>
,
ops
::
EmptyKernel
<
plat
::
CUDADeviceContext
,
double
>
,
ops
::
EmptyKernel
<
plat
::
CUDADeviceContext
,
plat
::
float16
>
);
paddle/fluid/operators/empty_op.h
0 → 100644
浏览文件 @
2e597696
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
DeviceContext
,
typename
T
>
class
EmptyKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
dtype
=
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
context
.
Attr
<
int
>
(
"dtype"
));
Tensor
*
out_tensor
=
context
.
Output
<
Tensor
>
(
"Out"
);
auto
shape
=
GetShape
(
context
);
out_tensor
->
Resize
(
shape
);
out_tensor
->
mutable_data
(
context
.
GetPlace
(),
dtype
);
}
};
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/fill_constant_op.h
浏览文件 @
2e597696
...
...
@@ -27,27 +27,6 @@ namespace operators {
using
Tensor
=
framework
::
Tensor
;
inline
framework
::
DDim
GetShape
(
const
framework
::
ExecutionContext
&
ctx
,
std
::
string
op_type
)
{
// 1. shape is a Tensor
if
(
ctx
.
HasInput
(
"ShapeTensor"
))
{
auto
*
shape_tensor
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"ShapeTensor"
);
auto
vec_shape
=
GetDataFromTensor
<
int
>
(
shape_tensor
);
return
framework
::
make_ddim
(
vec_shape
);
}
// 2. shape is a list/tuple containing Tensor
auto
shape_tensor_list
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"ShapeTensorList"
);
if
(
shape_tensor_list
.
size
()
>
0
)
{
auto
vec_shape
=
GetDataFromTensorList
(
shape_tensor_list
);
return
framework
::
make_ddim
(
vec_shape
);
}
// 3. shape is a list/tuple without containing Tensor
auto
vec_shape
=
ctx
.
Attr
<
std
::
vector
<
int64_t
>>
(
"shape"
);
return
framework
::
make_ddim
(
vec_shape
);
}
template
<
typename
T
>
class
FillConstantKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
...
...
@@ -93,8 +72,7 @@ class FillConstantKernel : public framework::OpKernel<T> {
}
value
=
tensor_data
[
0
];
}
const
std
::
string
op_type
=
"fill_constant"
;
auto
shape
=
GetShape
(
ctx
,
op_type
);
auto
shape
=
GetShape
(
ctx
);
if
(
out_var
->
IsType
<
framework
::
LoDTensor
>
())
{
tensor
=
out_var
->
GetMutable
<
framework
::
LoDTensor
>
();
...
...
paddle/fluid/operators/gaussian_random_op.cc
浏览文件 @
2e597696
...
...
@@ -34,8 +34,7 @@ class CPUGaussianRandomKernel : public framework::OpKernel<T> {
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
"Out"
);
std
::
normal_distribution
<
T
>
dist
(
mean
,
std
);
const
std
::
string
op_type
=
"gaussian_random"
;
auto
shape
=
GetShape
(
context
,
op_type
);
auto
shape
=
GetShape
(
context
);
tensor
->
Resize
(
shape
);
int64_t
size
=
tensor
->
numel
();
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
paddle/fluid/operators/gaussian_random_op.cu
浏览文件 @
2e597696
...
...
@@ -58,8 +58,7 @@ class GPUGaussianRandomKernel : public framework::OpKernel<T> {
T
mean
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"mean"
));
T
std
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"std"
));
thrust
::
counting_iterator
<
unsigned
int
>
index_sequence_begin
(
0
);
const
std
::
string
op_type
=
"gaussian_random"
;
auto
shape
=
GetShape
(
context
,
op_type
);
auto
shape
=
GetShape
(
context
);
tensor
->
Resize
(
shape
);
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
浏览文件 @
2e597696
...
...
@@ -30,8 +30,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
float
std
=
context
.
Attr
<
float
>
(
"std"
);
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
"Out"
);
const
std
::
string
op_type
=
"gaussian_random"
;
auto
shape
=
GetShape
(
context
,
op_type
);
auto
shape
=
GetShape
(
context
);
tensor
->
Resize
(
shape
);
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int64_t
size
=
tensor
->
numel
();
...
...
paddle/fluid/operators/utils.h
浏览文件 @
2e597696
...
...
@@ -81,5 +81,26 @@ inline std::vector<T> GetDataFromTensorList(
}
return
vec_new_data
;
}
inline
framework
::
DDim
GetShape
(
const
framework
::
ExecutionContext
&
ctx
)
{
// 1. shape is a Tensor
if
(
ctx
.
HasInput
(
"ShapeTensor"
))
{
auto
*
shape_tensor
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"ShapeTensor"
);
auto
vec_shape
=
GetDataFromTensor
<
int
>
(
shape_tensor
);
return
framework
::
make_ddim
(
vec_shape
);
}
// 2. shape is a list/tuple containing Tensor
auto
shape_tensor_list
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"ShapeTensorList"
);
if
(
shape_tensor_list
.
size
()
>
0
)
{
auto
vec_shape
=
GetDataFromTensorList
(
shape_tensor_list
);
return
framework
::
make_ddim
(
vec_shape
);
}
// 3. shape is a list/tuple without containing Tensor
auto
vec_shape
=
ctx
.
Attr
<
std
::
vector
<
int64_t
>>
(
"shape"
);
return
framework
::
make_ddim
(
vec_shape
);
}
}
// namespace operators
}
// namespace paddle
python/paddle/__init__.py
浏览文件 @
2e597696
...
...
@@ -75,6 +75,7 @@ from .tensor.creation import full_like #DEFINE_ALIAS
from
.tensor.creation
import
triu
#DEFINE_ALIAS
from
.tensor.creation
import
tril
#DEFINE_ALIAS
from
.tensor.creation
import
meshgrid
#DEFINE_ALIAS
from
.tensor.creation
import
empty
#DEFINE_ALIAS
from
.tensor.linalg
import
matmul
#DEFINE_ALIAS
from
.tensor.linalg
import
dot
#DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
...
...
python/paddle/fluid/tests/unittests/test_empty_op.py
0 → 100644
浏览文件 @
2e597696
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.framework
import
convert_np_dtype_to_dtype_
# Situation 1: Attr(shape) is a list(without tensor)
class
TestEmptyOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"empty"
self
.
init_config
()
def
test_check_output
(
self
):
self
.
check_output_customized
(
self
.
verify_output
)
def
verify_output
(
self
,
outs
):
data_type
=
outs
[
0
].
dtype
if
data_type
in
[
'float32'
,
'float64'
,
'int32'
,
'int64'
]:
max_value
=
np
.
nanmax
(
outs
[
0
])
min_value
=
np
.
nanmin
(
outs
[
0
])
always_full_zero
=
max_value
==
0.0
and
min_value
==
0.0
always_non_full_zero
=
max_value
>
min_value
self
.
assertTrue
(
always_full_zero
or
always_non_full_zero
,
'always_full_zero or always_non_full_zero.'
)
elif
data_type
in
[
'bool'
]:
total_num
=
outs
[
0
].
size
true_num
=
np
.
sum
(
outs
[
0
]
==
True
)
false_num
=
np
.
sum
(
outs
[
0
]
==
False
)
self
.
assertTrue
(
total_num
==
true_num
+
false_num
,
'The value should always be True or False.'
)
else
:
self
.
assertTrue
(
False
,
'invalid data type'
)
def
init_config
(
self
):
shape
=
[
500
,
3
]
dtype
=
'float32'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
self
.
attrs
=
{
'shape'
:
shape
,
'dtype'
:
dtype_inner
}
self
.
inputs
=
{}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
shape
).
astype
(
dtype
)}
class
TestEmptyOp2
(
TestEmptyOp
):
def
init_config
(
self
):
shape
=
[
500
,
3
]
dtype
=
'float64'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
self
.
attrs
=
{
'shape'
:
shape
,
'dtype'
:
dtype_inner
}
self
.
inputs
=
{}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
shape
).
astype
(
dtype
)}
class
TestEmptyOp3
(
TestEmptyOp
):
def
init_config
(
self
):
shape
=
[
500
,
3
]
dtype
=
'int32'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
self
.
attrs
=
{
'shape'
:
shape
,
'dtype'
:
dtype_inner
}
self
.
inputs
=
{}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
shape
).
astype
(
dtype
)}
class
TestEmptyOp4
(
TestEmptyOp
):
def
init_config
(
self
):
shape
=
[
500
,
3
]
dtype
=
'int64'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
self
.
attrs
=
{
'shape'
:
shape
,
'dtype'
:
dtype_inner
}
self
.
inputs
=
{}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
shape
).
astype
(
dtype
)}
class
TestEmptyOp5
(
TestEmptyOp
):
def
init_config
(
self
):
shape
=
[
500
,
3
]
dtype
=
'bool'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
self
.
attrs
=
{
'shape'
:
shape
,
'dtype'
:
dtype_inner
}
self
.
inputs
=
{}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
shape
).
astype
(
dtype
)}
# Situation 2: shape is a tensor
class
TestEmptyOp_ShapeTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"empty"
self
.
init_config
()
def
init_config
(
self
):
self
.
shape
=
[
500
,
3
]
dtype
=
'float32'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
self
.
attrs
=
{
'dtype'
:
dtype_inner
}
self
.
inputs
=
{
"ShapeTensor"
:
np
.
array
(
self
.
shape
).
astype
(
"int32"
)}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
self
.
shape
).
astype
(
dtype
)}
def
test_check_output
(
self
):
self
.
check_output_customized
(
self
.
verify_output
)
def
verify_output
(
self
,
outs
):
data_type
=
outs
[
0
].
dtype
if
data_type
in
[
'float32'
,
'float64'
,
'int32'
,
'int64'
]:
max_value
=
np
.
nanmax
(
outs
[
0
])
min_value
=
np
.
nanmin
(
outs
[
0
])
always_full_zero
=
max_value
==
0.0
and
min_value
==
0.0
always_non_full_zero
=
max_value
>
min_value
self
.
assertTrue
(
always_full_zero
or
always_non_full_zero
,
'always_full_zero or always_non_full_zero.'
)
elif
data_type
in
[
'bool'
]:
total_num
=
outs
[
0
].
size
true_num
=
np
.
sum
(
outs
[
0
]
==
True
)
false_num
=
np
.
sum
(
outs
[
0
]
==
False
)
self
.
assertTrue
(
total_num
==
true_num
+
false_num
,
'The value should always be True or False.'
)
else
:
self
.
assertTrue
(
False
,
'invalid data type'
)
# Situation 3: Attr(shape) is a list(with tensor)
class
TestEmptyOp_ShapeTensorList
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"empty"
self
.
init_config
()
def
init_config
(
self
):
self
.
shape
=
[
123
,
92
]
self
.
infer_shape
=
[
-
1
,
92
]
dtype
=
'float32'
dtype_inner
=
convert_np_dtype_to_dtype_
(
dtype
)
shape_tensor_list
=
[]
for
index
,
ele
in
enumerate
(
self
.
shape
):
shape_tensor_list
.
append
((
"x"
+
str
(
index
),
np
.
ones
(
(
1
)).
astype
(
'int32'
)
*
ele
))
self
.
inputs
=
{
"ShapeTensorList"
:
shape_tensor_list
}
self
.
attrs
=
{
'shape'
:
self
.
infer_shape
,
'dtype'
:
dtype_inner
}
self
.
outputs
=
{
'Out'
:
np
.
zeros
(
self
.
shape
).
astype
(
dtype
)}
def
test_check_output
(
self
):
self
.
check_output_customized
(
self
.
verify_output
)
def
verify_output
(
self
,
outs
):
data_type
=
outs
[
0
].
dtype
if
data_type
in
[
'float32'
,
'float64'
,
'int32'
,
'int64'
]:
max_value
=
np
.
nanmax
(
outs
[
0
])
min_value
=
np
.
nanmin
(
outs
[
0
])
always_full_zero
=
max_value
==
0.0
and
min_value
==
0.0
always_non_full_zero
=
max_value
>
min_value
self
.
assertTrue
(
always_full_zero
or
always_non_full_zero
,
'always_full_zero or always_non_full_zero.'
)
elif
data_type
in
[
'bool'
]:
total_num
=
outs
[
0
].
size
true_num
=
np
.
sum
(
outs
[
0
]
==
True
)
false_num
=
np
.
sum
(
outs
[
0
]
==
False
)
self
.
assertTrue
(
total_num
==
true_num
+
false_num
,
'The value should always be True or False.'
)
else
:
self
.
assertTrue
(
False
,
'invalid data type'
)
class
TestEmptyAPI
(
unittest
.
TestCase
):
def
__check_out__
(
self
,
out
,
dtype
=
'float32'
):
max_value
=
np
.
nanmax
(
np
.
array
(
out
))
min_value
=
np
.
nanmin
(
np
.
array
(
out
))
always_non_full_zero
=
max_value
>
min_value
always_full_zero
=
max_value
==
0.0
and
min_value
==
0.0
self
.
assertTrue
(
always_full_zero
or
always_non_full_zero
,
'always_full_zero or always_non_full_zero.'
)
def
test_dygraph_api_out
(
self
):
paddle
.
disable_static
()
shape
=
[
200
,
3
]
out
=
paddle
.
empty
(
shape
=
shape
)
self
.
__check_out__
(
out
)
paddle
.
enable_static
()
def
test_dygraph_api_out_2
(
self
):
paddle
.
disable_static
()
shape_data
=
np
.
array
([
200
,
3
]).
astype
(
'int32'
)
shape
=
paddle
.
to_tensor
(
shape_data
)
out
=
paddle
.
empty
(
shape
=
shape
)
self
.
__check_out__
(
out
)
paddle
.
enable_static
()
def
test_dygraph_api_out_3
(
self
):
paddle
.
disable_static
()
shape_data
=
np
.
array
([
200
,
3
]).
astype
(
'int64'
)
shape
=
paddle
.
to_tensor
(
shape_data
)
out
=
paddle
.
empty
(
shape
=
shape
)
self
.
__check_out__
(
out
)
paddle
.
enable_static
()
def
test_dygraph_api_attr
(
self
):
paddle
.
disable_static
()
shape
=
[
200
,
3
]
dtype
=
'float64'
out
=
paddle
.
empty
(
shape
=
shape
,
dtype
=
dtype
)
self
.
__check_out__
(
out
,
dtype
)
paddle
.
enable_static
()
def
test_static_graph
(
self
):
dtype
=
'float64'
positive_2_int32
=
fluid
.
layers
.
fill_constant
([
1
],
"int32"
,
3
)
positive_2_int64
=
fluid
.
layers
.
fill_constant
([
1
],
"int64"
,
3
)
shape_tensor_int32
=
fluid
.
data
(
name
=
"shape_tensor_int32"
,
shape
=
[
2
],
dtype
=
"int32"
)
shape_tensor_int64
=
fluid
.
data
(
name
=
"shape_tensor_int64"
,
shape
=
[
2
],
dtype
=
"int64"
)
out_1
=
paddle
.
empty
(
shape
=
[
200
,
3
],
dtype
=
dtype
)
out_2
=
paddle
.
empty
(
shape
=
shape_tensor_int32
,
dtype
=
dtype
)
out_3
=
paddle
.
empty
(
shape
=
shape_tensor_int64
,
dtype
=
dtype
)
out_4
=
paddle
.
empty
(
shape
=
[
200
,
positive_2_int32
],
dtype
=
dtype
)
out_5
=
paddle
.
empty
(
shape
=
[
200
,
positive_2_int64
],
dtype
=
dtype
)
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
res_1
,
res_2
,
res_3
,
res_4
,
res_5
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"shape_tensor_int32"
:
np
.
array
([
200
,
3
]).
astype
(
"int32"
),
"shape_tensor_int64"
:
np
.
array
([
200
,
3
]).
astype
(
"int64"
),
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
])
self
.
__check_out__
(
res_1
,
dtype
)
self
.
__check_out__
(
res_2
,
dtype
)
self
.
__check_out__
(
res_3
,
dtype
)
self
.
__check_out__
(
res_4
,
dtype
)
self
.
__check_out__
(
res_5
,
dtype
)
class
TestEmptyError
(
unittest
.
TestCase
):
def
test_attr
(
self
):
def
test_dtype
():
shape
=
[
200
,
3
]
dtype
=
'uint8'
result
=
paddle
.
empty
(
shape
=
shape
,
dtype
=
dtype
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/tensor/__init__.py
浏览文件 @
2e597696
...
...
@@ -40,6 +40,7 @@ from .creation import full_like #DEFINE_ALIAS
from
.creation
import
triu
#DEFINE_ALIAS
from
.creation
import
tril
#DEFINE_ALIAS
from
.creation
import
meshgrid
#DEFINE_ALIAS
from
.creation
import
empty
#DEFINE_ALIAS
from
.io
import
save
#DEFINE_ALIAS
from
.io
import
load
#DEFINE_ALIAS
from
.linalg
import
matmul
#DEFINE_ALIAS
...
...
python/paddle/tensor/creation.py
浏览文件 @
2e597696
...
...
@@ -48,6 +48,7 @@ __all__ = [
'eye'
,
'full'
,
'full_like'
,
'empty'
,
'triu'
,
'tril'
,
'meshgrid'
...
...
@@ -981,3 +982,90 @@ def diag(x, offset=0, padding_value=0, name=None):
out
.
stop_gradient
=
True
return
out
def
empty
(
shape
,
dtype
=
None
,
name
=
None
):
"""
This Op returns a Tensor with uninitialized data which size is same as ``shape``.
Args:
shape(list|tuple|Tensor): Shape of the Tensor to be created.
The data type of dimension of shape is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor.
dtype(np.dtype|str, optional): Data type of the output Tensor
which can be bool, float16, float32, float64, int32, int64, if dytpe is `None`, the data
type of created Tensor use global default dtype (see ``get_default_dtype``
for details).
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: Tensor which is created according to ``shape`` and ``dtype``, and is uninitialized.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static() # Now we are in imperative mode
paddle.set_device("cpu") # and use cpu device
# example 1: argument ``shape`` is a list which doesn't contain Tensor.
data1 = paddle.empty(shape=[2,3], dtype='float32')
#[[4.3612203e+27 1.8176809e+31 1.3555911e-19] # uninitialized
# [1.1699684e-19 1.3563156e-19 3.6408321e-11]] # uninitialized
# example 2: argument ``shape`` is a Tensor, the data type must be int64 or int32.
shape_data = np.array([2, 3]).astype('int32')
shape = paddle.to_tensor(shape_data)
data2 = paddle.empty(shape=shape, dtype='float32')
#[[1.7192326e-37 4.8125365e-38 1.9866003e-36] # uninitialized
# [1.3284029e-40 7.1117408e-37 2.5353012e+30]] # uninitialized
# example 3: argument ``shape`` is a list which contains Tensor.
dim2_data = np.array([3]).astype('int32')
dim2 = paddle.to_tensor(dim2_data)
data3 = paddle.empty(shape=[2, dim2], dtype='float32')
#[[1.1024214e+24 7.0379409e+22 6.5737699e-34] # uninitialized
# [7.5563101e+31 7.7130405e+31 2.8020654e+20]] # uninitialized
"""
if
dtype
is
None
:
dtype
=
paddle
.
get_default_dtype
()
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
core
.
ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"empty"
,
**
locals
())
inputs
=
{}
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty'
)
check_type
(
shape
,
'shape'
,
(
Variable
,
list
,
tuple
),
'empty'
)
if
isinstance
(
shape
,
Variable
):
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'empty'
)
attrs
=
{}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
)
out
.
stop_gradient
=
True
return
out
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录