未验证 提交 28fa467b 编写于 作者: Z zhongpu 提交者: GitHub

delete paddle api (#24183)

* delete paddle.nn  api, test=develop

* fix optest, test=develop

* delete paddle.optimizer, paddle.meric, paddle.framework, paddle.io, test=develop

* fix optest, test=develop

* fix test_trace_op.py, test=develop

* fix test_activation_op.py, test=develop
上级 4c3a2f54
......@@ -34,36 +34,4 @@ import paddle.compat
import paddle.distributed
batch = batch.batch
import paddle.sysconfig
import paddle.nn
import paddle.framework
import paddle.imperative
import paddle.complex
# from .framework.framework import set_default_dtype #DEFINE_ALIAS
# from .framework.framework import get_default_dtype #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS
# from .framework import append_backward #DEFINE_ALIAS
# from .framework import gradients #DEFINE_ALIAS
# from .framework import Executor #DEFINE_ALIAS
# from .framework import global_scope #DEFINE_ALIAS
# from .framework import scope_guard #DEFINE_ALIAS
# from .framework import BuildStrategy #DEFINE_ALIAS
# from .framework import CompiledProgram #DEFINE_ALIAS
# from .framework import default_main_program #DEFINE_ALIAS
# from .framework import default_startup_program #DEFINE_ALIAS
# from .framework import create_global_var #DEFINE_ALIAS
# from .framework import create_parameter #DEFINE_ALIAS
# from .framework import create_py_reader_by_data #DEFINE_ALIAS
# from .framework import Print #DEFINE_ALIAS
# from .framework import py_func #DEFINE_ALIAS
# from .framework import ExecutionStrategy #DEFINE_ALIAS
# from .framework import in_dygraph_mode #DEFINE_ALIAS
# from .framework import name_scope #DEFINE_ALIAS
# from .framework import ParallelExecutor #DEFINE_ALIAS
# from .framework import ParamAttr #DEFINE_ALIAS
# from .framework import Program #DEFINE_ALIAS
# from .framework import program_guard #DEFINE_ALIAS
# from .framework import Variable #DEFINE_ALIAS
# from .framework import WeightNormParamAttr #DEFINE_ALIAS
# from .framework import Model #DEFINE_ALIAS
# from .framework import Sequential #DEFINE_ALIAS
......@@ -209,9 +209,6 @@ if (APPLE OR WIN32)
list(REMOVE_ITEM TEST_OPS test_imperative_data_loader_fds_clear)
list(REMOVE_ITEM TEST_OPS test_imperative_data_loader_exit_func)
list(REMOVE_ITEM TEST_OPS test_imperative_signal_handler)
list(REMOVE_ITEM TEST_OPS test_multiprocess_dataloader_static)
list(REMOVE_ITEM TEST_OPS test_multiprocess_dataloader_dynamic)
list(REMOVE_ITEM TEST_OPS test_multiprocess_dataloader_exception)
endif()
if(NOT WITH_GPU OR WIN32 OR APPLE)
......@@ -383,7 +380,4 @@ if(NOT WIN32 AND NOT APPLE)
set_tests_properties(test_imperative_data_loader_base PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" RUN_SERIAL TRUE)
set_tests_properties(test_imperative_data_loader_exception PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" RUN_SERIAL TRUE)
set_tests_properties(test_imperative_data_loader_fds_clear PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" RUN_SERIAL TRUE)
set_tests_properties(test_multiprocess_dataloader_static PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" RUN_SERIAL TRUE)
set_tests_properties(test_multiprocess_dataloader_dynamic PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" RUN_SERIAL TRUE)
set_tests_properties(test_multiprocess_dataloader_exception PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" RUN_SERIAL TRUE)
endif()
......@@ -21,8 +21,6 @@ from op_test import OpTest
from scipy.special import expit, erf
import paddle
import paddle.fluid as fluid
import paddle.nn as nn
import paddle.nn.functional as functional
from paddle.fluid import compiler, Program, program_guard
......@@ -1203,140 +1201,5 @@ create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
create_test_act_fp16_class(TestHardSwish)
class TestNNReluAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [10, 12]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.y = self.ref_forward(self.x)
def ref_forward(self, x):
return np.maximum(x, 0)
def ref_backward(self, y, dy):
y_t = y.copy()
y_t[y_t > 0] = 1
return y_t * dy
def check_api(self, place=fluid.CPUPlace(), inplace=False):
main_program = Program()
myrelu = nn.ReLU(inplace)
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
x.stop_gradient = False
y = myrelu(x)
fluid.backward.append_backward(fluid.layers.mean(y))
exe = fluid.Executor(place)
out = exe.run(main_program,
feed={'x': self.x},
fetch_list=[y, y.grad_name, x.grad_name])
self.assertTrue(np.allclose(out[0], self.y))
self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1])))
with fluid.dygraph.guard(place):
x = fluid.dygraph.to_variable(self.x)
y = myrelu(x)
self.assertTrue(np.allclose(y.numpy(), self.y))
def test_check_api(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
for inplace in [True, False]:
self.check_api(place, inplace)
class TestNNFunctionalReluAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [10, 12]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.y = self.ref_forward(self.x)
def ref_forward(self, x):
return np.maximum(x, 0)
def test_check_api(self):
main_program = Program()
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
y = functional.relu(x)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
self.assertTrue(np.allclose(out[0], self.y))
class TestNNSigmoidAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [10, 15]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.y = self.ref_forward(self.x)
def ref_forward(self, x):
return 1 / (1 + np.exp(-x))
def ref_backward(self, y, dy):
return dy * y * (1 - y)
def check_api(self, place=fluid.CPUPlace(), inplace=False):
main_program = Program()
mysigmoid = nn.Sigmoid(inplace)
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
x.stop_gradient = False
y = mysigmoid(x)
fluid.backward.append_backward(fluid.layers.mean(y))
exe = fluid.Executor(place)
out = exe.run(main_program,
feed={'x': self.x},
fetch_list=[y, y.grad_name, x.grad_name])
self.assertTrue(np.allclose(out[0], self.y))
self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1])))
with fluid.dygraph.guard(place):
x = fluid.dygraph.to_variable(self.x)
y = mysigmoid(x)
self.assertTrue(np.allclose(y.numpy(), self.y))
def test_check_api(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
for inplace in [True, False]:
self.check_api(place, inplace)
class TestNNFunctionalSigmoidAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [10, 15]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.y = self.ref_forward(self.x)
def ref_forward(self, x):
return 1 / (1 + np.exp(-x))
def test_check_api(self):
main_program = Program()
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
y = functional.sigmoid(x)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
self.assertTrue(np.allclose(out[0], self.y))
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import unittest
import paddle.fluid as fluid
from paddle.io import BatchSampler, Dataset
class RandomDataset(Dataset):
def __init__(self, sample_num, class_num):
self.sample_num = sample_num
self.class_num = class_num
def __getitem__(self, idx):
np.random.seed(idx)
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.sample_num
class TestBatchSampler(unittest.TestCase):
def setUp(self):
self.num_samples = 1000
self.num_classes = 10
self.batch_size = 32
self.shuffle = False
self.drop_last = False
def init_batch_sampler(self):
dataset = RandomDataset(self.num_samples, self.num_classes)
bs = BatchSampler(
dataset=dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=self.drop_last)
return bs
def test_main(self):
bs = self.init_batch_sampler()
# length check
bs_len = (self.num_samples + int(not self.drop_last) \
* (self.batch_size - 1)) // self.batch_size
self.assertTrue(bs_len == len(bs))
# output indices check
if not self.shuffle:
index = 0
for indices in bs:
for idx in indices:
self.assertTrue(index == idx)
index += 1
class TestBatchSamplerDropLast(TestBatchSampler):
def setUp(self):
self.num_samples = 1000
self.num_classes = 10
self.batch_size = 32
self.shuffle = False
self.drop_last = True
class TestBatchSamplerShuffle(TestBatchSampler):
def setUp(self):
self.num_samples = 1000
self.num_classes = 10
self.batch_size = 32
self.shuffle = True
self.drop_last = True
class TestBatchSamplerWithIndices(TestBatchSampler):
def init_batch_sampler(self):
bs = BatchSampler(
indices=list(range(self.num_samples)),
batch_size=self.batch_size,
drop_last=self.drop_last)
return bs
class TestBatchSamplerWithIndicesAndDataSource(unittest.TestCase):
def setUp(self):
self.num_samples = 1000
self.num_classes = 10
self.batch_size = 32
self.shuffle = False
self.drop_last = True
def test_main(self):
try:
dataset = RandomDataset(self.num_samples, self.num_classes)
bs = BatchSampler(
dataset=dataset,
indices=list(range(self.num_samples)),
batch_size=self.batch_size,
drop_last=self.drop_last)
self.assertTrue(False)
except AssertionError:
pass
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import paddle.fluid.initializer as I
import unittest
class Conv2DTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=4,
spartial_shape=(16, 16),
num_channels=6,
num_filters=8,
filter_size=3,
padding=0,
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCHW",
dtype="float32"):
super(Conv2DTestCase, self).__init__(methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.num_filters = num_filters
self.spartial_shape = spartial_shape
self.filter_size = filter_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
def setUp(self):
self.channel_last = self.data_format == "NHWC"
if self.channel_last:
input_shape = (self.batch_size, ) + self.spartial_shape + (
self.num_channels, )
else:
input_shape = (self.batch_size, self.num_channels
) + self.spartial_shape
self.input = np.random.randn(*input_shape).astype(self.dtype)
if isinstance(self.filter_size, int):
filter_size = [self.filter_size] * 2
else:
filter_size = self.filter_size
self.weight_shape = weight_shape = (self.num_filters, self.num_channels
// self.groups) + tuple(filter_size)
self.weight = np.random.uniform(
-1, 1, size=weight_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(
-1, 1, size=(self.num_filters, )).astype(self.dtype)
else:
self.bias = None
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1,self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
weight_attr = I.NumpyArrayInitializer(self.weight)
if self.bias is None:
bias_attr = False
else:
bias_attr = I.NumpyArrayInitializer(self.bias)
y_var = fluid.layers.conv2d(
x_var,
self.num_filters,
self.filter_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def functional(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1,self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
w_var = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv2d(
x_var,
w_var,
b_var if not self.no_bias else None,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
feed_dict["bias"] = self.bias
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv2D(
self.num_channels,
self.num_filters,
self.filter_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
place = fluid.CPUPlace()
result1 = self.fluid_layer(place)
result2 = self.functional(place)
with dg.guard(place):
result3 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self._test_equivalence(place)
class Conv2DErrorTestCase(Conv2DTestCase):
def runTest(self):
place = fluid.CPUPlace()
with dg.guard(place):
with self.assertRaises(ValueError):
self.paddle_nn_layer()
def add_cases(suite):
suite.addTest(Conv2DTestCase(methodName='runTest'))
suite.addTest(
Conv2DTestCase(
methodName='runTest', stride=[1, 2], dilation=2))
suite.addTest(
Conv2DTestCase(
methodName='runTest', stride=2, dilation=(2, 1)))
suite.addTest(
Conv2DTestCase(
methodName='runTest', padding="same", no_bias=True, act="sigmoid"))
suite.addTest(
Conv2DTestCase(
methodName='runTest', filter_size=(3, 3), padding='valid'))
suite.addTest(Conv2DTestCase(methodName='runTest', padding=(2, 3)))
suite.addTest(Conv2DTestCase(methodName='runTest', padding=[1, 2, 2, 1]))
suite.addTest(
Conv2DTestCase(
methodName='runTest', padding=[[0, 0], [0, 0], [1, 2], [2, 1]]))
suite.addTest(Conv2DTestCase(methodName='runTest', data_format="NHWC"))
suite.addTest(
Conv2DTestCase(
methodName='runTest',
data_format="NHWC",
padding=[[0, 0], [1, 1], [2, 2], [0, 0]]))
suite.addTest(
Conv2DTestCase(
methodName='runTest', groups=2, padding="valid"))
suite.addTest(
Conv2DTestCase(
methodName='runTest',
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
def add_error_cases(suite):
suite.addTest(
Conv2DErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv2DErrorTestCase(
methodName='runTest', num_channels=5, groups=2))
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
add_cases(suite)
add_error_cases(suite)
return suite
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import paddle.fluid.initializer as I
import unittest
class Conv2DTransposeTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=4,
spartial_shape=(16, 16),
num_channels=6,
num_filters=8,
filter_size=3,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCHW",
dtype="float32"):
super(Conv2DTransposeTestCase, self).__init__(methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.num_filters = num_filters
self.spartial_shape = spartial_shape
self.filter_size = filter_size
self.output_size = output_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
def setUp(self):
self.channel_last = self.data_format == "NHWC"
if self.channel_last:
input_shape = (self.batch_size, ) + self.spartial_shape + (
self.num_channels, )
else:
input_shape = (self.batch_size, self.num_channels
) + self.spartial_shape
self.input = np.random.randn(*input_shape).astype(self.dtype)
if isinstance(self.filter_size, int):
filter_size = [self.filter_size] * 2
else:
filter_size = self.filter_size
self.weight_shape = weight_shape = (self.num_channels, self.num_filters
// self.groups) + tuple(filter_size)
self.weight = np.random.uniform(
-1, 1, size=weight_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(
-1, 1, size=(self.num_filters, )).astype(self.dtype)
else:
self.bias = None
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1,self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
weight_attr = I.NumpyArrayInitializer(self.weight)
if self.bias is None:
bias_attr = False
else:
bias_attr = I.NumpyArrayInitializer(self.bias)
y_var = fluid.layers.conv2d_transpose(
x_var,
self.num_filters,
filter_size=self.filter_size,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def functional(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1,self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
w_var = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv2d_transpose(
x_var,
w_var,
None if self.no_bias else b_var,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
feed_dict["bias"] = self.bias
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv2DTranspose(
self.num_channels,
self.num_filters,
self.filter_size,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
place = fluid.CPUPlace()
result1 = self.fluid_layer(place)
result2 = self.functional(place)
with dg.guard(place):
result3 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self._test_equivalence(place)
class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase):
def runTest(self):
place = fluid.CPUPlace()
with dg.guard(place):
with self.assertRaises(ValueError):
self.paddle_nn_layer()
def add_cases(suite):
suite.addTest(Conv2DTransposeTestCase(methodName='runTest', act="relu"))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', stride=[1, 2], no_bias=True, dilation=2))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest',
filter_size=(3, 3),
output_size=[20, 36],
stride=[1, 2],
dilation=2))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', stride=2, dilation=(2, 1)))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', padding="valid"))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', padding='valid'))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', filter_size=1, padding=(2, 3)))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', padding=[1, 2, 2, 1]))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', padding=[[0, 0], [0, 0], [1, 2], [2, 1]]))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', data_format="NHWC"))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest',
data_format="NHWC",
padding=[[0, 0], [1, 1], [2, 2], [0, 0]]))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', groups=2, padding="valid"))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest',
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
def add_error_cases(suite):
suite.addTest(
Conv2DTransposeErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv2DTransposeErrorTestCase(
methodName='runTest', num_channels=5, groups=2))
suite.addTest(
Conv2DTransposeErrorTestCase(
methodName='runTest', output_size="not_valid"))
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
add_cases(suite)
add_error_cases(suite)
return suite
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import paddle.fluid.initializer as I
import unittest
class Conv3DTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=4,
spartial_shape=(8, 8, 8),
num_channels=6,
num_filters=8,
filter_size=3,
padding=0,
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCDHW",
dtype="float32"):
super(Conv3DTestCase, self).__init__(methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.num_filters = num_filters
self.spartial_shape = spartial_shape
self.filter_size = filter_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
def setUp(self):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
input_shape = (self.batch_size, ) + self.spartial_shape + (
self.num_channels, )
else:
input_shape = (self.batch_size, self.num_channels
) + self.spartial_shape
self.input = np.random.randn(*input_shape).astype(self.dtype)
if isinstance(self.filter_size, int):
filter_size = [self.filter_size] * 3
else:
filter_size = self.filter_size
self.weight_shape = weight_shape = (self.num_filters, self.num_channels
// self.groups) + tuple(filter_size)
self.weight = np.random.uniform(
-1, 1, size=weight_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(
-1, 1, size=(self.num_filters, )).astype(self.dtype)
else:
self.bias = None
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1, -1, self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
weight_attr = I.NumpyArrayInitializer(self.weight)
if self.bias is None:
bias_attr = False
else:
bias_attr = I.NumpyArrayInitializer(self.bias)
y_var = fluid.layers.conv3d(
x_var,
self.num_filters,
self.filter_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def functional(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1, -1, self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
w_var = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv3d(
x_var,
w_var,
None if self.no_bias else b_var,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
feed_dict["bias"] = self.bias
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv3D(
self.num_channels,
self.num_filters,
self.filter_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
place = fluid.CPUPlace()
result1 = self.fluid_layer(place)
result2 = self.functional(place)
with dg.guard(place):
result3 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self._test_equivalence(place)
class Conv3DErrorTestCase(Conv3DTestCase):
def runTest(self):
place = fluid.CPUPlace()
with dg.guard(place):
with self.assertRaises(ValueError):
self.paddle_nn_layer()
def add_cases(suite):
suite.addTest(Conv3DTestCase(methodName='runTest'))
suite.addTest(
Conv3DTestCase(
methodName='runTest', stride=[1, 2, 1], dilation=2))
suite.addTest(
Conv3DTestCase(
methodName='runTest', stride=2, dilation=(2, 1, 2)))
suite.addTest(
Conv3DTestCase(
methodName='runTest', padding="same", no_bias=True))
suite.addTest(
Conv3DTestCase(
methodName='runTest', filter_size=(3, 2, 3), padding='valid'))
suite.addTest(Conv3DTestCase(methodName='runTest', padding=(2, 3, 1)))
suite.addTest(
Conv3DTestCase(
methodName='runTest', padding=[1, 2, 2, 1, 2, 3]))
suite.addTest(
Conv3DTestCase(
methodName='runTest',
padding=[[0, 0], [0, 0], [1, 2], [2, 1], [2, 2]]))
suite.addTest(Conv3DTestCase(methodName='runTest', data_format="NDHWC"))
suite.addTest(
Conv3DTestCase(
methodName='runTest',
data_format="NDHWC",
padding=[[0, 0], [1, 1], [3, 3], [2, 2], [0, 0]]))
suite.addTest(
Conv3DTestCase(
methodName='runTest', groups=2, padding="valid"))
suite.addTest(
Conv3DTestCase(
methodName='runTest',
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
def add_error_cases(suite):
suite.addTest(
Conv3DErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv3DErrorTestCase(
methodName='runTest', num_channels=5, groups=2))
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
add_cases(suite)
add_error_cases(suite)
return suite
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import paddle.fluid.initializer as I
import unittest
class Conv3DTransposeTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=2,
spartial_shape=(8, 8, 8),
num_channels=6,
num_filters=8,
filter_size=3,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCDHW",
dtype="float32"):
super(Conv3DTransposeTestCase, self).__init__(methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.num_filters = num_filters
self.spartial_shape = spartial_shape
self.filter_size = filter_size
self.output_size = output_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
def setUp(self):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
input_shape = (self.batch_size, ) + self.spartial_shape + (
self.num_channels, )
else:
input_shape = (self.batch_size, self.num_channels
) + self.spartial_shape
self.input = np.random.randn(*input_shape).astype(self.dtype)
if isinstance(self.filter_size, int):
filter_size = [self.filter_size] * 3
else:
filter_size = self.filter_size
self.weight_shape = weight_shape = (self.num_channels, self.num_filters
// self.groups) + tuple(filter_size)
self.weight = np.random.uniform(
-1, 1, size=weight_shape).astype(self.dtype)
if self.no_bias:
self.bias = None
else:
self.bias = np.random.uniform(
-1, 1, size=(self.num_filters, )).astype(self.dtype)
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1, -1, self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
weight_attr = I.NumpyArrayInitializer(self.weight)
if self.bias is None:
bias_attr = False
else:
bias_attr = I.NumpyArrayInitializer(self.bias)
y_var = fluid.layers.conv3d_transpose(
x_var,
self.num_filters,
filter_size=self.filter_size,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def functional(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
input_shape = (-1, -1, -1, -1, self.num_channels) \
if self.channel_last else (-1, self.num_channels, -1, -1, -1)
x_var = fluid.data("input", input_shape, dtype=self.dtype)
w_var = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv3d_transpose(
x_var,
w_var,
None if self.no_bias else b_var,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
feed_dict["bias"] = self.bias
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv3DTranspose(
self.num_channels,
self.num_filters,
self.filter_size,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
place = fluid.CPUPlace()
result1 = self.fluid_layer(place)
result2 = self.functional(place)
with dg.guard(place):
result3 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self._test_equivalence(place)
class Conv3DTransposeErrorTestCase(Conv3DTransposeTestCase):
def runTest(self):
place = fluid.CPUPlace()
with dg.guard(place):
with self.assertRaises(ValueError):
self.paddle_nn_layer()
def add_cases(suite):
suite.addTest(Conv3DTransposeTestCase(methodName='runTest', act="tanh"))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', stride=[1, 2, 1], dilation=2, no_bias=True))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest',
output_size=[12, 19, 12],
stride=[1, 2, 1],
dilation=2))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', stride=2, dilation=(2, 1, 2)))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', padding="valid"))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', padding='valid'))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', filter_size=1, padding=(2, 3, 1)))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', padding=[1, 2, 2, 3, 2, 1]))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest',
padding=[[0, 0], [0, 0], [2, 3], [1, 2], [2, 1]]))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', data_format="NDHWC"))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest',
data_format="NDHWC",
padding=[[0, 0], [1, 1], [2, 2], [3, 3], [0, 0]]))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', groups=2, padding="valid"))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest',
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
def add_error_cases(suite):
suite.addTest(
Conv3DTransposeErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv3DTransposeErrorTestCase(
methodName='runTest', num_channels=5, groups=2))
suite.addTest(
Conv3DTransposeErrorTestCase(
methodName='runTest', output_size="not_valid"))
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
add_cases(suite)
add_error_cases(suite)
return suite
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.io import *
class TestDatasetAbstract(unittest.TestCase):
def test_main(self):
dataset = Dataset()
try:
d = dataset[0]
self.assertTrue(False)
except NotImplementedError:
pass
try:
l = len(dataset)
self.assertTrue(False)
except NotImplementedError:
pass
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv2D(TestCase):
batch_size = 4
spatial_shape = (16, 16)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 2
else:
filter_shape = tuple(self.filter_shape)
self.weight = np.random.uniform(
-1, 1, (self.out_channels, self.in_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
self.channel_last = (self.data_format == "NHWC")
if self.channel_last:
self.input_shape = (self.batch_size, ) + self.spatial_shape + (
self.in_channels, )
else:
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
def static_graph_case_1(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = fluid.data(
"input", (-1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1),
dtype=self.dtype)
y = fluid.layers.conv2d(
x,
self.out_channels,
self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def static_graph_case_2(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv2d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
if not self.no_bias:
feed_dict["bias"] = self.bias
out, = exe.run(main, feed=feed_dict, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv2d(
x,
weight,
bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
out = y.numpy()
return out
def _test_identity(self):
self.prepare()
out1 = self.static_graph_case_1()
out2 = self.static_graph_case_2()
out3 = self.dygraph_case()
np.testing.assert_array_almost_equal(out1, out2)
np.testing.assert_array_almost_equal(out2, out3)
def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
class TestFunctionalConv2DError(TestCase):
batch_size = 4
spatial_shape = (16, 16)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def test_exception(self):
self.prepare()
with self.assertRaises(ValueError):
self.static_graph_case()
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 2
else:
filter_shape = tuple(self.filter_shape)
self.weight_shape = (self.out_channels, self.in_channels // self.groups
) + filter_shape
self.bias_shape = (self.out_channels, )
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
self.channel_last = self.data_format == "NHWC"
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv2d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
class TestFunctionalConv2DCase2(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase3(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 3, 1]
self.stride = 2
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase4(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 1, 2, 2]
self.stride = 1
self.dilation = 2
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase5(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [2, 2], [0, 0]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase6(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [2, 2]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DCase7(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 6
self.out_channels = 8
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DCase8(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 6
self.out_channels = 12
self.filter_shape = 3
self.padding = "valid"
self.stride = 1
self.dilation = 1
self.groups = 6
self.no_bias = True
self.act = None
self.use_cudnn = False
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 2], [3, 4], [5, 6]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "not_valid"
class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 3
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase6(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = -5
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [3, 2], [1, 2]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase10(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NHWC"
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv2D(TestCase):
batch_size = 4
spatial_shape = (16, 16)
dtype = "float32"
output_size = None
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 2
else:
filter_shape = tuple(self.filter_shape)
self.weight = np.random.uniform(
-1, 1, (self.in_channels, self.out_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
self.channel_last = (self.data_format == "NHWC")
if self.channel_last:
self.input_shape = (self.batch_size, ) + self.spatial_shape + (
self.in_channels, )
else:
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
def static_graph_case_1(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = fluid.data(
"input", (-1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1),
dtype=self.dtype)
y = fluid.layers.conv2d_transpose(
x,
self.out_channels,
output_size=self.output_size,
filter_size=self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def static_graph_case_2(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv2d_transpose(
x,
weight,
None if self.no_bias else bias,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
if not self.no_bias:
feed_dict["bias"] = self.bias
out, = exe.run(main, feed=feed_dict, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv2d_transpose(
x,
weight,
bias,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
out = y.numpy()
return out
def _test_identity(self):
self.prepare()
out1 = self.static_graph_case_1()
out2 = self.static_graph_case_2()
out3 = self.dygraph_case()
np.testing.assert_array_almost_equal(out1, out2)
np.testing.assert_array_almost_equal(out2, out3)
def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
class TestFunctionalConv2DError(TestCase):
batch_size = 4
spatial_shape = (16, 16)
dtype = "float32"
output_size = None
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def test_exception(self):
self.prepare()
with self.assertRaises(ValueError):
self.static_graph_case()
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 2
else:
filter_shape = tuple(self.filter_shape)
self.weight_shape = (self.in_channels, self.out_channels // self.groups
) + filter_shape
self.bias_shape = (self.out_channels, )
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
self.channel_last = self.data_format == "NHWC"
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv2d_transpose(
x,
weight,
None if self.no_bias else bias,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
class TestFunctionalConv2DCase2(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase3(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = True
self.act = None
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DCase4(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase5(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase6(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = "valid"
self.stride = (1, 2)
self.dilation = (2, 1)
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase7(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 4
self.filter_shape = 3
self.padding = "valid"
self.stride = (1, 2)
self.dilation = 1
self.groups = 4
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NHWC"
class TestFunctionalConv2DCase8(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 4
self.filter_shape = 3
self.padding = "valid"
self.output_size = [18, 34]
self.stride = (1, 2)
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DCase9(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [[0, 0], [1, 2], [2, 1], [0, 0]]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DCase10(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [2, 2]]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DCase11(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [1, 1, 2, 2]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DCase12(TestFunctionalConv2D):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [1, 2]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 2, 1, 3]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 2], [2, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 2], [0, 0], [2, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase5(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = -2
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase6(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.output_size = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv3D(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight = np.random.uniform(
-1, 1, (self.out_channels, self.in_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
self.channel_last = (self.data_format == "NDHWC")
if self.channel_last:
self.input_shape = (self.batch_size, ) + self.spatial_shape + (
self.in_channels, )
else:
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
def static_graph_case_1(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
y = fluid.layers.conv3d(
x,
self.out_channels,
self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def static_graph_case_2(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv3d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
if not self.no_bias:
feed_dict["bias"] = self.bias
out, = exe.run(main, feed=feed_dict, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv3d(
x,
weight,
bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
out = y.numpy()
return out
def _test_identity(self):
self.prepare()
out1 = self.static_graph_case_1()
out2 = self.static_graph_case_2()
out3 = self.dygraph_case()
np.testing.assert_array_almost_equal(out1, out2)
np.testing.assert_array_almost_equal(out2, out3)
def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
class TestFunctionalConv3DError(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def test_exception(self):
self.prepare()
with self.assertRaises(ValueError):
self.static_graph_case()
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight_shape = (self.out_channels, self.in_channels // self.groups
) + filter_shape
self.bias_shape = (self.out_channels, )
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv3d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
class TestFunctionalConv3DCase2(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DCase3(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 3, 1, 2, 3]
self.stride = 2
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DCase4(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 1, 2, 2, 3, 3]
self.stride = 1
self.dilation = 2
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DCase5(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [2, 2], [1, 1], [0, 0]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DCase6(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [2, 2], [2, 2]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DCase7(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 6
self.out_channels = 8
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DCase8(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 6
self.out_channels = 12
self.filter_shape = 3
self.padding = "valid"
self.stride = 1
self.dilation = 1
self.groups = 6
self.no_bias = True
self.act = None
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase2(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [1, 2], [3, 4], [5, 6]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase3(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "not_valid"
class TestFunctionalConv3DErrorCase4(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 3
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase6(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase7(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
class TestFunctionalConv3DErrorCase8(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase9(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = -5
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [3, 2], [1, 2], [1, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NDHWC"
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv3DTranspose(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
output_size = None
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight = np.random.uniform(
-1, 1, (self.in_channels, self.out_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
self.channel_last = (self.data_format == "NDHWC")
if self.channel_last:
self.input_shape = (self.batch_size, ) + self.spatial_shape + (
self.in_channels, )
else:
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
def static_graph_case_1(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
y = fluid.layers.conv3d_transpose(
x,
self.out_channels,
output_size=self.output_size,
filter_size=self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def static_graph_case_2(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv3d_transpose(
x,
weight,
None if self.no_bias else bias,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
if not self.no_bias:
feed_dict["bias"] = self.bias
out, = exe.run(main, feed=feed_dict, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv3d_transpose(
x,
weight,
bias,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
out = y.numpy()
return out
def _test_identity(self):
self.prepare()
out1 = self.static_graph_case_1()
out2 = self.static_graph_case_2()
out3 = self.dygraph_case()
np.testing.assert_array_almost_equal(out1, out2)
np.testing.assert_array_almost_equal(out2, out3)
def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
class TestFunctionalConv3DTransposeError(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
output_size = None
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def test_exception(self):
self.prepare()
with self.assertRaises(ValueError):
self.static_graph_case()
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight_shape = (self.in_channels, self.out_channels // self.groups
) + filter_shape
self.bias_shape = (self.out_channels, )
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv3d_transpose(
x,
weight,
None if self.no_bias else bias,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
class TestFunctionalConv3DTransposeCase2(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeCase3(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeCase4(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = True
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeCase5(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = "valid"
self.stride = (1, 2, 1)
self.dilation = (2, 1, 1)
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeCase6(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 4
self.filter_shape = 3
self.padding = "valid"
self.stride = (1, 2, 1)
self.dilation = 1
self.groups = 4
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeCase7(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 4
self.filter_shape = 3
self.padding = "valid"
self.output_size = (10, 17, 10)
self.stride = (1, 2, 1)
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeCase8(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [[0, 0], [1, 2], [1, 2], [2, 1], [0, 0]]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeCase9(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [1, 1], [2, 2]]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeCase10(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [1, 1, 2, 2, 1, 1]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeCase11(TestFunctionalConv3DTranspose):
def setUp(self):
self.in_channels = 4
self.out_channels = 6
self.filter_shape = 3
self.padding = [1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase2(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 2, 1, 3]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeErrorCase3(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [1, 2], [2, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
class TestFunctionalConv3DTransposeErrorCase4(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 2], [1, 1], [0, 0], [2, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase5(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = -2
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase6(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase7(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.output_size = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase8(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
class TestFunctionalConv3DTransposeErrorCase9(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import paddle.fluid.initializer as I
import numpy as np
import unittest
class HSigmoidTestCase(unittest.TestCase):
def __init__(self,
methodName="runTest",
batch_size=4,
feature_size=6,
num_classes=8,
labels=None,
path_code=None,
path_table=None,
is_sparse=False,
dtype="float32"):
super(HSigmoidTestCase, self).__init__()
self.batch_size = batch_size
self.feature_size = feature_size
self.num_classes = num_classes
self.dtype = dtype
self.is_sparse = is_sparse
self.labels = labels
self.path_code = path_code
self.path_table = path_table
self.is_custom = path_code is not None and path_table is not None
def setUp(self):
input_shape = (self.batch_size, self.feature_size)
self.input = np.random.uniform(
-1, 1, size=input_shape).astype(self.dtype)
if self.labels is None:
self.labels = np.random.randint(
0, self.num_classes, size=(self.batch_size, 1)).astype(np.int64)
C = self.num_classes if self.is_custom else self.num_classes - 1
self.weight_shape = (C, self.feature_size)
self.weight = np.random.randn(*self.weight_shape).astype(self.dtype)
self.bias_shape = (C, 1)
self.bias = np.random.randn(*self.bias_shape).astype(self.dtype)
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, self.feature_size], dtype=self.dtype)
label = fluid.data("labels", [-1, 1], dtype="int64")
if self.is_custom:
path_table = fluid.data(
"path_table", [-1, -1], dtype="int64")
path_code = fluid.data("path_code", [-1, -1], dtype="int64")
else:
path_table = path_code = None
y = fluid.layers.hsigmoid(
x,
label,
self.num_classes,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=I.NumpyArrayInitializer(self.bias),
path_table=path_table,
path_code=path_code,
is_custom=self.is_custom,
is_sparse=self.is_sparse, )
exe = fluid.Executor(place)
exe.run(start)
feed_dict = {"input": self.input, "labels": self.labels}
if self.is_custom:
feed_dict["path_code"] = self.path_code
feed_dict["path_table"] = self.path_table
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y])
return y_np
def functional(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, self.feature_size], dtype=self.dtype)
label = fluid.data("labels", [-1, 1], dtype="int64")
if self.is_custom:
path_table = fluid.data(
"path_table", [-1, -1], dtype="int64")
path_code = fluid.data("path_code", [-1, -1], dtype="int64")
else:
path_table = path_code = None
w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
b = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.hsigmoid(
x,
label,
w,
b,
self.num_classes,
is_sparse=self.is_sparse,
path_table=path_table,
path_code=path_code)
exe = fluid.Executor(place)
exe.run(start)
feed_dict = {
"input": self.input,
"labels": self.labels,
"weight": self.weight,
"bias": self.bias
}
if self.is_custom:
feed_dict["path_code"] = self.path_code
feed_dict["path_table"] = self.path_table
y_np, = exe.run(main, feed=feed_dict, fetch_list=[y])
return y_np
def nn_layer(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
label_var = dg.to_variable(self.labels)
if self.is_custom:
path_code_var = dg.to_variable(self.path_code)
path_table_var = dg.to_variable(self.path_table)
else:
path_code_var = path_table_var = None
hierarchical_softmax = nn.HSigmoid(
self.feature_size,
self.num_classes,
is_custom=self.is_custom,
is_sparse=self.is_sparse,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=I.NumpyArrayInitializer(self.bias),
dtype=self.dtype)
y_var = hierarchical_softmax(
x_var,
label_var,
path_table=path_table_var,
path_code=path_code_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
result1 = self.fluid_layer(place)
result2 = self.functional(place)
result3 = self.nn_layer(place)
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
class HSigmoidTestErrorCase(HSigmoidTestCase):
def runTest(self):
place = fluid.CPUPlace()
with dg.guard(place):
with self.assertRaises(ValueError):
self.nn_layer()
def nn_layer(self):
x_var = dg.to_variable(self.input)
label_var = dg.to_variable(self.labels)
if self.is_custom:
path_code_var = dg.to_variable(self.path_code)
path_table_var = dg.to_variable(self.path_table)
else:
path_code_var = path_table_var = None
hierarchical_softmax = nn.HSigmoid(
self.feature_size,
self.num_classes,
is_custom=self.is_custom,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=I.NumpyArrayInitializer(self.bias),
dtype=self.dtype)
y_var = hierarchical_softmax(
x_var,
label_var,
path_table=path_table_var,
path_code=path_code_var)
y_np = y_var.numpy()
return y_np
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
suite.addTest(HSigmoidTestCase(methodName="runTest"))
suite.addTest(
HSigmoidTestCase(
methodName="runTest",
batch_size=4,
feature_size=6,
num_classes=8,
labels=np.array([0, 1, 4, 5]).astype(np.int64),
path_table=np.array([(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (
0, 1, 4, -1, -1), (0, 2, -1, -1, -1)]).astype(np.int64),
path_code=np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (
1, 0, 0, -1, -1), (0, 1, -1, -1, -1)]).astype(np.int64)))
suite.addTest(HSigmoidTestErrorCase(methodName="runTest", num_classes=1))
return suite
if __name__ == "__main__":
unittest.main()
......@@ -21,7 +21,6 @@ from paddle.fluid import core
from paddle.fluid import Linear
from test_imperative_base import new_program_scope
import paddle.fluid.dygraph_utils as dygraph_utils
import paddle
class MyLayer(fluid.Layer):
......@@ -246,24 +245,6 @@ class TestImperative(unittest.TestCase):
self.assertTrue(tmp._grad_ivar() is None)
self.assertTrue(l0.weight._grad_ivar() is not None)
def test_paddle_imperative_no_grad_guard(self):
data = np.array([[2, 3], [4, 5]]).astype('float32')
with fluid.dygraph.guard():
l0 = fluid.Linear(2, 2)
self.assertTrue(l0.weight._grad_ivar() is None)
l1 = fluid.Linear(2, 2)
with paddle.imperative.no_grad():
self.assertTrue(l1.weight.stop_gradient is False)
tmp = l1.weight * 2
self.assertTrue(tmp.stop_gradient)
x = fluid.dygraph.to_variable(data)
y = l0(x) + tmp
o = l1(y)
o.backward()
self.assertTrue(tmp._grad_ivar() is None)
self.assertTrue(l0.weight._grad_ivar() is not None)
def test_sum_op(self):
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
......
......@@ -17,7 +17,6 @@ from __future__ import print_function
import unittest
import paddle.fluid as fluid
import numpy as np
import paddle
class MyLayer(fluid.Layer):
......@@ -32,20 +31,12 @@ class MyLayer(fluid.Layer):
class TestImperativeContainer(unittest.TestCase):
def fluid_dygraph_list(self):
return fluid.dygraph.LayerList(
[fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)])
def paddle_imperative_list(self):
return paddle.imperative.LayerList(
[fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)])
def layer_list(self, use_fluid_api):
def test_layer_list(self):
data_np = np.random.uniform(-1, 1, [5, 1]).astype('float32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data_np)
layerlist = self.fluid_dygraph_list(
) if use_fluid_api else self.paddle_imperative_list()
layerlist = fluid.dygraph.LayerList(
[fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)])
size = len(layerlist)
model = MyLayer(layerlist)
......@@ -84,10 +75,6 @@ class TestImperativeContainer(unittest.TestCase):
self.assertListEqual(res8.shape, [5, 3**3])
res8.backward()
def test_layer_list(self):
self.layer_list(True)
self.layer_list(False)
if __name__ == '__main__':
unittest.main()
......@@ -17,25 +17,13 @@ from __future__ import print_function
import unittest
import paddle.fluid as fluid
import numpy as np
import paddle
class MyLayer(fluid.Layer):
def __init__(self, num_stacked_param, use_fluid_api):
def __init__(self, num_stacked_param):
super(MyLayer, self).__init__()
# create ParameterList with iterable Parameters
self.params = self.fluid_dygraph_ParameterList(
num_stacked_param
) if use_fluid_api else self.paddle_imperative_ParameterList(
num_stacked_param)
def fluid_dygraph_ParameterList(self, num_stacked_param):
return fluid.dygraph.ParameterList(
[fluid.layers.create_parameter(
shape=[2, 2], dtype='float32')] * num_stacked_param)
def paddle_imperative_ParameterList(self, num_stacked_param):
return paddle.imperative.ParameterList(
self.params = fluid.dygraph.ParameterList(
[fluid.layers.create_parameter(
shape=[2, 2], dtype='float32')] * num_stacked_param)
......@@ -54,12 +42,12 @@ class MyLayer(fluid.Layer):
class TestImperativeContainerParameterList(unittest.TestCase):
def paramter_list(self, use_fluid_api):
def test_paramter_list(self):
data_np = np.random.uniform(-1, 1, [5, 2]).astype('float32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data_np)
num_stacked_param = 4
model = MyLayer(num_stacked_param, use_fluid_api)
model = MyLayer(num_stacked_param)
self.assertEqual(len(model.params), num_stacked_param)
res = model(x)
self.assertListEqual(res.shape, [5, 2])
......@@ -79,10 +67,6 @@ class TestImperativeContainerParameterList(unittest.TestCase):
loss = fluid.layers.reduce_mean(res)
loss.backward()
def test_paramter_list(self):
self.paramter_list(True)
self.paramter_list(False)
if __name__ == '__main__':
unittest.main()
......@@ -17,7 +17,6 @@ import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.io import Dataset, DataLoader
def get_random_images_and_labels(image_shape, label_shape):
......@@ -36,20 +35,6 @@ def batch_generator_creator(batch_size, batch_num):
return __reader__
class RandomDataset(Dataset):
def __init__(self, sample_num):
self.sample_num = sample_num
def __getitem__(self, idx):
np.random.seed(idx)
image = np.random.random([784]).astype('float32')
label = np.random.randint(0, 9, (1, )).astype('int64')
return image, label
def __len__(self):
return self.sample_num
class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase):
def setUp(self):
self.batch_size = 8
......@@ -89,19 +74,5 @@ class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase):
self.run_one_epoch_with_break(loader)
class TestMultiProcessDataLoaderMmapFdsClear(TestDygraphDataLoaderMmapFdsClear):
def prepare_data_loader(self):
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
dataset = RandomDataset(self.batch_size * self.batch_num)
loader = DataLoader(
dataset,
places=place,
batch_size=self.batch_size,
drop_last=True,
num_workers=2)
return loader
if __name__ == '__main__':
unittest.main()
......@@ -43,7 +43,7 @@ class MLP(fluid.Layer):
class TestDataParallelStateDict(unittest.TestCase):
def test_data_parallel_state_dict(self):
with fluid.dygraph.guard():
strategy = paddle.imperative.prepare_context()
strategy = dygraph.parallel.prepare_context()
mlp = MLP()
parallel_mlp = dygraph.parallel.DataParallel(mlp, strategy)
......
......@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope
from utils import DyGraphProgramDescTracerTestHelper, is_equal_program
from paddle.fluid.dygraph import TracedLayer
class SimpleImgConvPool(fluid.dygraph.Layer):
......@@ -153,7 +154,7 @@ class TestImperativeMnist(unittest.TestCase):
label.stop_gradient = True
if batch_id % 10 == 0:
cost, traced_layer = paddle.imperative.TracedLayer.trace(
cost, traced_layer = TracedLayer.trace(
mnist, inputs=img)
if program is not None:
self.assertTrue(program, traced_layer.program)
......
......@@ -15,7 +15,6 @@
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle
class MyLayer(fluid.Layer):
......@@ -67,7 +66,7 @@ class TestImperativeNamedParameters(unittest.TestCase):
fc1 = fluid.Linear(10, 3)
fc2 = fluid.Linear(3, 10, bias_attr=False)
custom = MyLayer(3, 10)
model = paddle.imperative.Sequential(fc1, fc2, custom)
model = fluid.dygraph.Sequential(fc1, fc2, custom)
named_parameters = list(model.named_parameters())
expected_named_parameters = list()
......
......@@ -26,7 +26,6 @@ from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from test_imperative_base import new_program_scope
import numpy as np
import six
import paddle
class SimpleLSTMRNN(fluid.Layer):
......@@ -881,18 +880,17 @@ class TestDygraphPtbRnn(unittest.TestCase):
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
paddle.imperative.save_dygraph(state_dict,
os.path.join('saved_dy', 'emb_dy'))
fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
para_state_dict, opti_state_dict = paddle.imperative.load_dygraph(
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy'))
self.assertTrue(opti_state_dict == None)
para_state_dict, opti_state_dict = paddle.imperative.load_dygraph(
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy.pdparams'))
para_state_dict, opti_state_dict = paddle.imperative.load_dygraph(
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy.pdopt'))
......
......@@ -21,10 +21,9 @@ from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
import numpy as np
import paddle.fluid.core as core
import paddle
class SimpleNet(paddle.imperative.Layer):
class SimpleNet(fluid.Layer):
def __init__(self, vocab_size, hidden_size, dtype):
super(SimpleNet, self).__init__()
self.emb = fluid.dygraph.Embedding(
......@@ -47,13 +46,13 @@ class TestSimpleNet(unittest.TestCase):
for place in places:
for dtype in ["float32", "float64"]:
for sort_sum_gradient in [True, False]:
with paddle.imperative.guard(place):
backward_strategy = paddle.imperative.BackwardStrategy()
with fluid.dygraph.guard(place):
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = sort_sum_gradient
# grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
input_word = np.array([[1, 2], [2, 1]]).astype('int64')
input = paddle.imperative.to_variable(input_word)
input = to_variable(input_word)
simplenet = SimpleNet(20, 32, dtype)
adam = SGDOptimizer(
......
......@@ -1258,61 +1258,6 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_ret, dy_rlt_value))
self.assertTrue(np.allclose(static_ret, static_ret2))
def test_instance_norm(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
shape = (2, 4, 3, 3)
input = np.random.random(shape).astype('float32')
with self.static_graph():
X = fluid.layers.data(
name='X', shape=shape, dtype='float32', append_batch_size=False)
ret = layers.instance_norm(input=X)
static_ret = self.get_static_graph_result(
feed={'X': input}, fetch_list=[ret])[0]
with self.static_graph():
X = fluid.layers.data(
name='X', shape=shape, dtype='float32', append_batch_size=False)
instanceNorm = nn.InstanceNorm(num_channels=shape[1])
ret = instanceNorm(X)
static_ret2 = self.get_static_graph_result(
feed={'X': input}, fetch_list=[ret])[0]
with self.dynamic_graph():
instanceNorm = nn.InstanceNorm(num_channels=shape[1])
dy_ret = instanceNorm(base.to_variable(input))
dy_rlt_value = dy_ret.numpy()
with self.dynamic_graph():
instanceNorm = paddle.nn.InstanceNorm(num_channels=shape[1])
dy_ret = instanceNorm(base.to_variable(input))
dy_rlt_value2 = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_rlt_value))
self.assertTrue(np.allclose(static_ret, dy_rlt_value2))
self.assertTrue(np.allclose(static_ret, static_ret2))
with self.static_graph():
# the input of InstanceNorm must be Variable.
def test_Variable():
instanceNorm = paddle.nn.InstanceNorm(num_channels=shape[1])
ret1 = instanceNorm(input)
self.assertRaises(TypeError, test_Variable)
# the input dtype of InstanceNorm must be float32 or float64
def test_type():
input = np.random.random(shape).astype('int32')
instanceNorm = paddle.nn.InstanceNorm(num_channels=shape[1])
ret2 = instanceNorm(input)
self.assertRaises(TypeError, test_type)
def test_spectral_norm(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
......
......@@ -16,7 +16,6 @@ import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn as nn
def stable_softmax(x):
......@@ -35,40 +34,6 @@ def ref_log_softmax(x, axis=None, dtype=None):
return np.log(out)
class TestNNLogSoftmaxAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.x_shape = [2, 3, 4, 5]
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
def check_api(self, place=fluid.CPUPlace(), axis=None):
ref_out = ref_log_softmax(self.x, axis)
main_program = fluid.Program()
mylogsoftmax = nn.LogSoftmax(axis)
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
y = mylogsoftmax(x)
exe = fluid.Executor(place)
out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
self.assertTrue(np.allclose(out[0], ref_out))
with fluid.dygraph.guard(place):
x = fluid.dygraph.to_variable(self.x)
y = mylogsoftmax(x)
self.assertTrue(np.allclose(y.numpy(), ref_out))
def test_check_api(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
for axis in [None, 2]:
self.check_api(place, axis)
class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
def setUp(self):
self.init_data()
......@@ -80,7 +45,6 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
def check_api(self, place=fluid.CPUPlace(), axis=None, dtype=None):
ref_out = ref_log_softmax(self.x, axis, dtype)
main_program = fluid.Program()
mylogsoftmax = nn.LogSoftmax(axis)
with fluid.program_guard(main_program):
x = fluid.data(name='x', shape=self.x_shape)
y = fluid.layers.log_softmax(x, axis, dtype)
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from paddle.framework import manual_seed
from paddle.fluid.framework import Program, default_main_program, default_startup_program
class TestManualSeed(unittest.TestCase):
def test_manual_seed(self):
local_program = Program()
local_main_prog = default_main_program()
local_start_prog = default_startup_program()
self.assertEqual(0, local_program.random_seed)
self.assertEqual(0, local_main_prog.random_seed)
self.assertEqual(0, local_start_prog.random_seed)
manual_seed(102)
global_program1 = Program()
global_program2 = Program()
global_main_prog = default_main_program()
global_start_prog = default_startup_program()
self.assertEqual(102, global_program1.random_seed)
self.assertEqual(102, global_program2.random_seed)
self.assertEqual(102, global_main_prog.random_seed)
self.assertEqual(102, global_start_prog.random_seed)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import sys
import six
import time
import unittest
import multiprocessing
import numpy as np
import paddle.fluid as fluid
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.dygraph.base import to_variable
from test_multiprocess_dataloader_static import RandomDataset, prepare_places
EPOCH_NUM = 5
BATCH_SIZE = 16
IMAGE_SIZE = 784
SAMPLE_NUM = 400
CLASS_NUM = 10
class SimpleFCNet(fluid.dygraph.Layer):
def __init__(self):
super(SimpleFCNet, self).__init__()
param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.8))
bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.5))
self._fcs = []
in_channel = IMAGE_SIZE
for hidden_size in [10, 20, 30]:
self._fcs.append(
Linear(
in_channel,
hidden_size,
act='tanh',
param_attr=param_attr,
bias_attr=bias_attr))
in_channel = hidden_size
self._fcs.append(
Linear(
in_channel,
CLASS_NUM,
act='softmax',
param_attr=param_attr,
bias_attr=bias_attr))
def forward(self, image):
out = image
for fc in self._fcs:
out = fc(out)
return out
class TestDygraphDataLoader(unittest.TestCase):
def run_main(self, num_workers, places):
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
with fluid.dygraph.guard(places[0]):
fc_net = SimpleFCNet()
optimizer = fluid.optimizer.Adam(parameter_list=fc_net.parameters())
dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM)
dataloader = DataLoader(
dataset,
places=places,
num_workers=num_workers,
batch_size=BATCH_SIZE,
drop_last=True)
assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE)
step_list = []
loss_list = []
start_t = time.time()
for _ in six.moves.range(EPOCH_NUM):
step = 0
for image, label in dataloader():
out = fc_net(image)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.reduce_mean(loss)
avg_loss.backward()
optimizer.minimize(avg_loss)
fc_net.clear_gradients()
loss_list.append(np.mean(avg_loss.numpy()))
step += 1
step_list.append(step)
end_t = time.time()
ret = {
"time": end_t - start_t,
"step": step_list,
"loss": np.array(loss_list)
}
print("time cost", ret['time'], 'step_list', ret['step'])
return ret
def test_main(self):
# dynamic graph do not run with_data_parallel
for p in prepare_places(False):
results = []
for num_workers in [0, 2]:
print(self.__class__.__name__, p, num_workers)
sys.stdout.flush()
ret = self.run_main(num_workers=num_workers, places=p)
results.append(ret)
diff = np.max(
np.abs(results[0]['loss'] - results[1]['loss']) /
np.abs(results[0]['loss']))
self.assertLess(diff, 1e-2)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import sys
import six
import time
import unittest
import multiprocessing
import numpy as np
import paddle.fluid as fluid
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.dygraph.base import to_variable
class RandomDataset(Dataset):
def __init__(self, sample_num):
self.sample_num = sample_num
def __getitem__(self, idx):
np.random.seed(idx)
image = np.random.random([784]).astype('float32')
label = np.random.randint(0, 9, (1, )).astype('int64')
return image, label
def __len__(self):
return self.sample_num
class TestDataLoaderAssert(unittest.TestCase):
def test_main(self):
place = fluid.cpu_places()[0]
with fluid.dygraph.guard(place):
dataset = RandomDataset(100)
batch_sampler = BatchSampler(dataset=dataset, batch_size=4)
# dataset is not instance of Dataset
try:
loader = DataLoader(dataset=batch_sampler, places=place)
self.assertTrue(False)
except AssertionError:
pass
# places is None
try:
loader = DataLoader(dataset=dataset, places=None)
self.assertTrue(False)
except AssertionError:
pass
# num_workers < 0
try:
loader = DataLoader(
dataset=dataset, places=place, num_workers=-1)
self.assertTrue(False)
except AssertionError:
pass
# timeout < 0
try:
loader = DataLoader(dataset=dataset, places=place, timeout=-1)
self.assertTrue(False)
except AssertionError:
pass
# batch_sampler is not instance of BatchSampler
try:
loader = DataLoader(
dataset=dataset, places=place, batch_sampler=dataset)
self.assertTrue(False)
except AssertionError:
pass
# set batch_sampler and shuffle/batch_size/drop_last
try:
loader = DataLoader(
dataset=dataset,
places=place,
batch_sampler=batch_sampler,
shuffle=True,
drop_last=True)
self.assertTrue(False)
except AssertionError:
pass
# set batch_sampler correctly
try:
loader = DataLoader(
dataset=dataset, places=place, batch_sampler=batch_sampler)
self.assertTrue(True)
except AssertionError:
self.assertTrue(False)
# CI Converage cannot record stub in subprocess,
# HACK a _worker_loop in main process call here
class TestDataLoaderWorkerLoop(unittest.TestCase):
def run_without_worker_done(self, use_shared_memory=True):
try:
place = fluid.cpu_places()[0]
with fluid.dygraph.guard(place):
dataset = RandomDataset(800)
# test init_fn
def _init_fn(worker_id):
pass
# test collate_fn
def _collate_fn(sample_list):
return [
np.stack(
s, axis=0) for s in list(zip(*sample_list))
]
loader = DataLoader(
dataset,
num_workers=1,
places=place,
use_shared_memory=use_shared_memory)
assert loader.num_workers > 0, \
"go to AssertionError and pass in Mac and Windows"
loader = iter(loader)
print("loader length", len(loader))
indices_queue = multiprocessing.Queue()
for i in range(10):
indices_queue.put([i, i + 10])
indices_queue.put(None)
loader._worker_loop(
loader._dataset, indices_queue, loader._data_queue,
loader._workers_done_event, _collate_fn, _init_fn, 0)
self.assertTrue(False)
except AssertionError:
pass
except Exception:
self.assertTrue(False)
def run_with_worker_done(self, use_shared_memory=True):
try:
place = fluid.cpu_places()[0]
with fluid.dygraph.guard(place):
dataset = RandomDataset(800)
# test init_fn
def _init_fn(worker_id):
pass
# test collate_fn
def _collate_fn(sample_list):
return [
np.stack(
s, axis=0) for s in list(zip(*sample_list))
]
loader = DataLoader(
dataset,
num_workers=1,
places=place,
use_shared_memory=use_shared_memory)
assert loader.num_workers > 0, \
"go to AssertionError and pass in Mac and Windows"
loader = iter(loader)
print("loader length", len(loader))
indices_queue = multiprocessing.Queue()
for i in range(10):
indices_queue.put([i, i + 10])
indices_queue.put(None)
loader._workers_done_event.set()
loader._worker_loop(
loader._dataset, indices_queue, loader._data_queue,
loader._workers_done_event, _collate_fn, _init_fn, 0)
self.assertTrue(True)
except AssertionError:
pass
except Exception:
self.assertTrue(False)
def test_main(self):
for use_shared_memory in [True, False]:
self.run_without_worker_done(use_shared_memory)
self.run_with_worker_done(use_shared_memory)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import sys
import six
import time
import unittest
import multiprocessing
import numpy as np
import paddle.fluid as fluid
from paddle.io import Dataset, BatchSampler, DataLoader
EPOCH_NUM = 5
BATCH_SIZE = 16
IMAGE_SIZE = 784
SAMPLE_NUM = 400
CLASS_NUM = 10
class RandomDataset(Dataset):
def __init__(self, sample_num, class_num):
self.sample_num = sample_num
self.class_num = class_num
def __getitem__(self, idx):
np.random.seed(idx)
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, self.class_num - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.sample_num
def simple_fc_net_static():
startup_prog = fluid.Program()
main_prog = fluid.Program()
startup_prog.random_seed = 1
main_prog.random_seed = 1
with fluid.unique_name.guard():
with fluid.program_guard(main_prog, startup_prog):
image = fluid.data(
name='image', shape=[None, IMAGE_SIZE], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
hidden = image
param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.8))
bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.5))
for hidden_size in [10, 20, 30]:
hidden = fluid.layers.fc(hidden,
size=hidden_size,
act='tanh',
param_attr=param_attr,
bias_attr=bias_attr)
predict_label = fluid.layers.fc(hidden,
size=CLASS_NUM,
act='softmax',
param_attr=param_attr,
bias_attr=bias_attr)
loss = fluid.layers.reduce_mean(
fluid.layers.cross_entropy(
input=predict_label, label=label))
optimizer = fluid.optimizer.Adam()
optimizer.minimize(loss)
return startup_prog, main_prog, image, label, loss
def prepare_places(with_data_parallel, with_cpu=False, with_gpu=True):
places = []
if with_cpu:
places.append([fluid.CPUPlace()])
if with_data_parallel:
places.append([fluid.CPUPlace()] * 2)
if with_gpu and fluid.core.is_compiled_with_cuda():
tmp = fluid.cuda_places()[:2]
assert len(tmp) > 0, "no gpu detected"
if with_data_parallel:
places.append(tmp)
places.append([tmp[0]])
return places
class TestStaticDataLoader(unittest.TestCase):
def run_main(self, num_workers, places):
scope = fluid.Scope()
with fluid.scope_guard(scope):
startup_prog, main_prog, image, label, loss = simple_fc_net_static()
dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM)
dataloader = DataLoader(
dataset,
feed_list=[image, label],
places=places,
num_workers=num_workers,
batch_size=BATCH_SIZE,
drop_last=True)
assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE)
exe = fluid.Executor(place=places[0])
exe.run(startup_prog)
prog = fluid.CompiledProgram(main_prog)
if len(places) > 1:
prog = prog.with_data_parallel(
loss_name=loss.name, places=places)
step_list = []
loss_list = []
start_t = time.time()
for _ in six.moves.range(EPOCH_NUM):
step = 0
for d in dataloader:
assert len(d) == len(places), "{} != {}".format(
len(d), len(places))
for i, item in enumerate(d):
image = item['image']
label = item['label']
assert image.shape() == [BATCH_SIZE, IMAGE_SIZE]
assert label.shape() == [BATCH_SIZE, 1]
assert image._place()._equals(places[i])
assert label._place()._equals(places[i])
L, = exe.run(program=prog,
feed=d,
fetch_list=[loss],
use_program_cache=True)
loss_list.append(np.mean(L))
step += 1
step_list.append(step)
end_t = time.time()
ret = {
"time": end_t - start_t,
"step": step_list,
"loss": np.array(loss_list)
}
print("time cost", ret['time'], 'step_list', ret['step'])
return ret
def test_main(self):
for p in prepare_places(True):
results = []
for num_workers in [0, 2]:
print(self.__class__.__name__, p, num_workers)
sys.stdout.flush()
ret = self.run_main(num_workers=num_workers, places=p)
results.append(ret)
diff = np.max(
np.abs(results[0]['loss'] - results[1]['loss']) /
np.abs(results[0]['loss']))
self.assertLess(diff, 1e-2)
if __name__ == '__main__':
unittest.main()
......@@ -17,7 +17,6 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.nn.functional as F
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import framework api under this directory
# __all__ = ['append_backward',
# 'gradients',
# 'Executor',
# 'global_scope',
# 'scope_guard',
# 'BuildStrategy',
# 'CompiledProgram',
# 'default_main_program',
# 'default_startup_program',
# 'create_global_var',
# 'create_parameter',
# 'create_py_reader_by_data',
# 'Print',
# 'py_func',
# 'ExecutionStrategy',
# 'in_dygraph_mode',
# 'name_scope',
# 'ParallelExecutor',
# 'ParamAttr',
# 'Program',
# 'program_guard',
# 'Variable',
# 'WeightNormParamAttr',
# 'Model',
# 'Sequential']
from . import random
from .random import manual_seed
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define framework api
# __all__ = ['set_default_dtype',
# 'get_default_dtype']
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define random api
import paddle.fluid as fluid
__all__ = ['manual_seed']
def manual_seed(seed):
"""
Set global manual seed for program
Args:
manual_seed(int): random seed for program
Returns:
None.
Examples:
.. code-block:: python
from paddle.framework import manual_seed
manual_seed(102)
"""
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
program = fluid.Program()
program.global_seed(seed)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# define api used to run in imperative mode
__all__ = [
'BackwardStrategy', 'guard', 'Layer', 'LayerList', 'load_dygraph',
'save_dygraph', 'prepare_context', 'to_variable', 'TracedLayer', 'no_grad',
'ParameterList', 'Sequential'
]
from paddle.fluid import core
from ..fluid.dygraph.base import guard, no_grad, to_variable
from ..fluid.dygraph.layers import Layer
from ..fluid.dygraph.container import LayerList, ParameterList, Sequential
from ..fluid.dygraph.checkpoint import load_dygraph, save_dygraph
from ..fluid.dygraph.parallel import prepare_context
from ..fluid.dygraph.jit import TracedLayer
BackwardStrategy = core.BackwardStrategy
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define all functions about input & output in this directory
__all__ = [
'Dataset',
'BatchSampler',
# 'Transform',
'DataLoader',
# 'load',
# 'save',
# 'load_program_state',
# 'set_program_state',
# 'load_inference_model',
# 'save_inference_model',
# 'batch',
# 'shuffle',
# 'buffered',
# 'cache',
# 'chain',
# 'firstn',
# 'compose',
# 'map_readers',
# 'xmap_readers'
]
from ..fluid.io import DataLoader
from ..fluid.dataloader import Dataset, BatchSampler
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the functions to calculate metric in this directory
# __all__ = ['Accuracy',
# 'Auc',
# 'ChunkEvaluator',
# 'CompositeMetric',
# 'DetectionMAP',
# 'EditDistance',
# 'Precesion',
# 'Recall',
# 'accuracy',
# 'auc',
# 'chunk_eval',
# 'cos_sim',
# 'mean_iou']
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import all neural network related api under this directory,
# including layers, linear, conv, rnn etc.
from .layer import norm
__all__ = []
__all__ += norm.__all__
# TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS
# from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS
# from .clip import GradientClipByNorm #DEFINE_ALIAS
# from .clip import GradientClipByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS
# from .clip import clip #DEFINE_ALIAS
# from .clip import clip_by_norm #DEFINE_ALIAS
# from .initalizer import Bilinear #DEFINE_ALIAS
# from .initalizer import Constant #DEFINE_ALIAS
# from .initalizer import MSRA #DEFINE_ALIAS
# from .initalizer import Normal #DEFINE_ALIAS
# from .initalizer import TruncatedNormal #DEFINE_ALIAS
# from .initalizer import Uniform #DEFINE_ALIAS
# from .initalizer import Xavier #DEFINE_ALIAS
# from .decode import BeamSearchDecoder #DEFINE_ALIAS
# from .decode import Decoder #DEFINE_ALIAS
# from .decode import beam_search #DEFINE_ALIAS
# from .decode import beam_search_decode #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS
# from .decode import gather_tree #DEFINE_ALIAS
# from .bin.conv import 0 #DEFINE_ALIAS
# from .control_flow import case #DEFINE_ALIAS
# from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS
# from .control_flow import switch_case #DEFINE_ALIAS
# from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS
# from .layer.conv import Conv2D #DEFINE_ALIAS
# from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
# from .layer.conv import Conv3D #DEFINE_ALIAS
# from .layer.conv import Conv3DTranspose #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS
# from .layer.loss import NCELoss #DEFINE_ALIAS
from .layer.loss import CrossEntropyLoss #DEFINE_ALIAS
# from .layer.loss import MSELoss #DEFINE_ALIAS
from .layer.loss import L1Loss #DEFINE_ALIAS
from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS
from .layer.conv import Conv2D, Conv2DTranspose, Conv3D, Conv3DTranspose #DEFINE_ALIAS
from .layer.loss import NLLLoss #DEFINE_ALIAS
from .layer.loss import BCELoss #DEFINE_ALIAS
# from .layer.learning_rate import CosineDecay #DEFINE_ALIAS
# from .layer.learning_rate import ExponentialDecay #DEFINE_ALIAS
# from .layer.learning_rate import InverseTimeDecay #DEFINE_ALIAS
# from .layer.learning_rate import NaturalExpDecay #DEFINE_ALIAS
# from .layer.learning_rate import NoamDecay #DEFINE_ALIAS
# from .layer.learning_rate import PiecewiseDecay #DEFINE_ALIAS
# from .layer.learning_rate import PolynomialDecay #DEFINE_ALIAS
# from .layer.transformer import #DEFINE_ALIAS
# from .layer.norm import BatchNorm #DEFINE_ALIAS
# from .layer.norm import GroupNorm #DEFINE_ALIAS
# from .layer.norm import LayerNorm #DEFINE_ALIAS
from .layer.norm import InstanceNorm #DEFINE_ALIAS
# from .layer.norm import SpectralNorm #DEFINE_ALIAS
from .layer.activation import HSigmoid #DEFINE_ALIAS
# from .layer.activation import PReLU #DEFINE_ALIAS
from .layer.activation import ReLU #DEFINE_ALIAS
from .layer.activation import Sigmoid #DEFINE_ALIAS
# from .layer.activation import Softmax #DEFINE_ALIAS
# from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.extension import RowConv #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
# from .layer.rnn import RNNCell #DEFINE_ALIAS
# from .layer.rnn import GRUCell #DEFINE_ALIAS
# from .layer.rnn import LSTMCell #DEFINE_ALIAS
# from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
# from .layer.common import Pool2D #DEFINE_ALIAS
# from .layer.common import Embedding #DEFINE_ALIAS
# from .layer.common import Linear #DEFINE_ALIAS
# from .layer.common import UpSample #DEFINE_ALIAS
from .functional.conv import conv2d #DEFINE_ALIAS
from .functional.conv import conv2d_transpose #DEFINE_ALIAS
from .functional.conv import conv3d #DEFINE_ALIAS
from .functional.conv import conv3d_transpose #DEFINE_ALIAS
# from .functional.loss import bpr_loss #DEFINE_ALIAS
# from .functional.loss import center_loss #DEFINE_ALIAS
# from .functional.loss import cross_entropy #DEFINE_ALIAS
# from .functional.loss import dice_loss #DEFINE_ALIAS
# from .functional.loss import edit_distance #DEFINE_ALIAS
# from .functional.loss import huber_loss #DEFINE_ALIAS
# from .functional.loss import iou_similarity #DEFINE_ALIAS
# from .functional.loss import kldiv_loss #DEFINE_ALIAS
# from .functional.loss import log_loss #DEFINE_ALIAS
# from .functional.loss import margin_rank_loss #DEFINE_ALIAS
# from .functional.loss import mse_loss #DEFINE_ALIAS
# from .functional.loss import nce #DEFINE_ALIAS
# from .functional.loss import npair_loss #DEFINE_ALIAS
# from .functional.loss import rank_loss #DEFINE_ALIAS
# from .functional.loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
# from .functional.loss import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS
# from .functional.loss import sigmoid_focal_loss #DEFINE_ALIAS
# from .functional.loss import smooth_l1 #DEFINE_ALIAS
# from .functional.loss import softmax_with_cross_entropy #DEFINE_ALIAS
# from .functional.loss import square_error_cost #DEFINE_ALIAS
# from .functional.loss import ssd_loss #DEFINE_ALIAS
# from .functional.loss import teacher_student_sigmoid_loss #DEFINE_ALIAS
# from .functional.learning_rate import cosine_decay #DEFINE_ALIAS
# from .functional.learning_rate import exponential_decay #DEFINE_ALIAS
# from .functional.learning_rate import inverse_time_decay #DEFINE_ALIAS
# from .functional.learning_rate import natural_exp_decay #DEFINE_ALIAS
# from .functional.learning_rate import noam_decay #DEFINE_ALIAS
# from .functional.learning_rate import piecewise_decay #DEFINE_ALIAS
# from .functional.learning_rate import polynomial_decay #DEFINE_ALIAS
# from .functional.learning_rate import linear_lr_warmup #DEFINE_ALIAS
# from .functional.transformer import #DEFINE_ALIAS
# from .functional.pooling import pool2d #DEFINE_ALIAS
# from .functional.pooling import pool3d #DEFINE_ALIAS
# from .functional.pooling import adaptive_pool2d #DEFINE_ALIAS
# from .functional.pooling import adaptive_pool3d #DEFINE_ALIAS
# from .functional.norm import batch_norm #DEFINE_ALIAS
# from .functional.norm import data_norm #DEFINE_ALIAS
# from .functional.norm import group_norm #DEFINE_ALIAS
# from .functional.norm import instance_norm #DEFINE_ALIAS
# from .functional.norm import l2_normalize #DEFINE_ALIAS
# from .functional.norm import layer_norm #DEFINE_ALIAS
# from .functional.norm import lrn #DEFINE_ALIAS
# from .functional.norm import spectral_norm #DEFINE_ALIAS
# from .functional.vision import affine_channel #DEFINE_ALIAS
# from .functional.vision import affine_grid #DEFINE_ALIAS
# from .functional.vision import anchor_generator #DEFINE_ALIAS
# from .functional.vision import bipartite_match #DEFINE_ALIAS
# from .functional.vision import box_clip #DEFINE_ALIAS
# from .functional.vision import box_coder #DEFINE_ALIAS
# from .functional.vision import box_decoder_and_assign #DEFINE_ALIAS
# from .functional.vision import collect_fpn_proposals #DEFINE_ALIAS
# from .functional.vision import deformable_conv #DEFINE_ALIAS
# from .functional.vision import deformable_roi_pooling #DEFINE_ALIAS
# from .functional.vision import density_prior_box #DEFINE_ALIAS
# from .functional.vision import detection_output #DEFINE_ALIAS
# from .functional.vision import distribute_fpn_proposals #DEFINE_ALIAS
# from .functional.vision import fsp_matrix #DEFINE_ALIAS
# from .functional.vision import generate_mask_labels #DEFINE_ALIAS
# from .functional.vision import generate_proposal_labels #DEFINE_ALIAS
# from .functional.vision import generate_proposals #DEFINE_ALIAS
# from .functional.vision import grid_sampler #DEFINE_ALIAS
# from .functional.vision import image_resize #DEFINE_ALIAS
# from .functional.vision import image_resize_short #DEFINE_ALIAS
# from .functional.vision import multi_box_head #DEFINE_ALIAS
# from .functional.vision import pixel_shuffle #DEFINE_ALIAS
# from .functional.vision import prior_box #DEFINE_ALIAS
# from .functional.vision import prroi_pool #DEFINE_ALIAS
# from .functional.vision import psroi_pool #DEFINE_ALIAS
# from .functional.vision import resize_bilinear #DEFINE_ALIAS
# from .functional.vision import resize_nearest #DEFINE_ALIAS
# from .functional.vision import resize_trilinear #DEFINE_ALIAS
# from .functional.vision import retinanet_detection_output #DEFINE_ALIAS
# from .functional.vision import retinanet_target_assign #DEFINE_ALIAS
# from .functional.vision import roi_align #DEFINE_ALIAS
# from .functional.vision import roi_perspective_transform #DEFINE_ALIAS
# from .functional.vision import roi_pool #DEFINE_ALIAS
# from .functional.vision import shuffle_channel #DEFINE_ALIAS
# from .functional.vision import space_to_depth #DEFINE_ALIAS
# from .functional.vision import yolo_box #DEFINE_ALIAS
# from .functional.vision import yolov3_loss #DEFINE_ALIAS
# from .functional.activation import brelu #DEFINE_ALIAS
# from .functional.activation import elu #DEFINE_ALIAS
# from .functional.activation import erf #DEFINE_ALIAS
# from .functional.activation import gelu #DEFINE_ALIAS
# from .functional.activation import hard_shrink #DEFINE_ALIAS
# from .functional.activation import hard_sigmoid #DEFINE_ALIAS
# from .functional.activation import hard_swish #DEFINE_ALIAS
from .functional.activation import hsigmoid #DEFINE_ALIAS
# from .functional.activation import leaky_relu #DEFINE_ALIAS
# from .functional.activation import logsigmoid #DEFINE_ALIAS
# from .functional.activation import maxout #DEFINE_ALIAS
# from .functional.activation import prelu #DEFINE_ALIAS
from .functional.activation import relu #DEFINE_ALIAS
# from .functional.activation import relu6 #DEFINE_ALIAS
# from .functional.activation import selu #DEFINE_ALIAS
from .functional.activation import sigmoid #DEFINE_ALIAS
# from .functional.activation import soft_relu #DEFINE_ALIAS
# from .functional.activation import softmax #DEFINE_ALIAS
# from .functional.activation import softplus #DEFINE_ALIAS
# from .functional.activation import softshrink #DEFINE_ALIAS
# from .functional.activation import softsign #DEFINE_ALIAS
# from .functional.activation import swish #DEFINE_ALIAS
# from .functional.activation import tanh_shrink #DEFINE_ALIAS
# from .functional.activation import thresholded_relu #DEFINE_ALIAS
from .functional.activation import log_softmax #DEFINE_ALIAS
# from .functional.rnn import gru_unit #DEFINE_ALIAS
# from .functional.rnn import lstm #DEFINE_ALIAS
# from .functional.rnn import lstm_unit #DEFINE_ALIAS
# from .functional.lod import sequence_concat #DEFINE_ALIAS
# from .functional.lod import sequence_conv #DEFINE_ALIAS
# from .functional.lod import sequence_enumerate #DEFINE_ALIAS
# from .functional.lod import sequence_expand_as #DEFINE_ALIAS
# from .functional.lod import sequence_expand #DEFINE_ALIAS
# from .functional.lod import sequence_first_step #DEFINE_ALIAS
# from .functional.lod import sequence_last_step #DEFINE_ALIAS
# from .functional.lod import sequence_mask #DEFINE_ALIAS
# from .functional.lod import sequence_pad #DEFINE_ALIAS
# from .functional.lod import sequence_pool #DEFINE_ALIAS
# from .functional.lod import sequence_reshape #DEFINE_ALIAS
# from .functional.lod import sequence_reverse #DEFINE_ALIAS
# from .functional.lod import sequence_scatter #DEFINE_ALIAS
# from .functional.lod import sequence_slice #DEFINE_ALIAS
# from .functional.lod import sequence_softmax #DEFINE_ALIAS
# from .functional.lod import sequence_unpad #DEFINE_ALIAS
# from .functional.lod import array_length #DEFINE_ALIAS
# from .functional.lod import array_read #DEFINE_ALIAS
# from .functional.lod import array_write #DEFINE_ALIAS
# from .functional.lod import create_array #DEFINE_ALIAS
# from .functional.lod import hash #DEFINE_ALIAS
# from .functional.lod import im2sequence #DEFINE_ALIAS
# from .functional.lod import lod_append #DEFINE_ALIAS
# from .functional.lod import lod_reset #DEFINE_ALIAS
# from .functional.lod import reorder_lod_tensor_by_rank #DEFINE_ALIAS
# from .functional.lod import tensor_array_to_tensor #DEFINE_ALIAS
# from .functional.lod import dynamic_gru #DEFINE_ALIAS
# from .functional.lod import dynamic_lstm #DEFINE_ALIAS
# from .functional.lod import dynamic_lstmp #DEFINE_ALIAS
# from .functional.common import dropout #DEFINE_ALIAS
# from .functional.common import embedding #DEFINE_ALIAS
# from .functional.common import fc #DEFINE_ALIAS
# from .functional.common import label_smooth #DEFINE_ALIAS
# from .functional.common import one_hot #DEFINE_ALIAS
# from .functional.common import pad #DEFINE_ALIAS
# from .functional.common import pad_constant_like #DEFINE_ALIAS
# from .functional.common import pad2d #DEFINE_ALIAS
# from .functional.common import unfold #DEFINE_ALIAS
# from .functional.common import bilinear_tensor_product #DEFINE_ALIAS
# from .functional.common import assign #DEFINE_ALIAS
# from .functional.common import interpolate #DEFINE_ALIAS
# from .input import data #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the functions to clip gradient of parameter
# __all__ = ['ErrorClipByValue',
# 'GradientClipByGlobalNorm',
# 'GradientClipByNorm',
# 'GradientClipByValue',
# 'set_gradient_clip',
# 'clip',
# 'clip_by_norm']
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册