未验证 提交 f7c96f07 编写于 作者: D dzhwinter 提交者: GitHub

Big data op_test benchmark, for checking output consistent in different runs. (#10646)

* "init benchmark ops"

* "untrack outputs"

* "delete some usused code"

* "benchmark"

* "fix ci"

* "fix op test"

* "fix uint16 missing"

* "fix ci"

* "follow comments"

* "fix ci"

* "follow comments"

* "conficts. merge develop branch"

* repick

* "merge develop branch"
上级 3ff9ba0e
...@@ -661,8 +661,10 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( ...@@ -661,8 +661,10 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType(
} }
if (t != nullptr) { if (t != nullptr) {
int tmp = static_cast<int>(ToDataType(t->type())); int tmp = static_cast<int>(ToDataType(t->type()));
PADDLE_ENFORCE(tmp == data_type || data_type == -1, PADDLE_ENFORCE(
"DataType of Paddle Op %s must be the same.", Type()); tmp == data_type || data_type == -1,
"DataType of Paddle Op %s must be the same. Get %d != %d", Type(),
data_type, tmp);
data_type = tmp; data_type = tmp;
} }
} }
......
...@@ -170,6 +170,8 @@ def get_program_cache_key(feed, fetch_list): ...@@ -170,6 +170,8 @@ def get_program_cache_key(feed, fetch_list):
return var.desc.name() return var.desc.name()
elif isinstance(var, str): elif isinstance(var, str):
return var return var
elif isinstance(var, basestring):
return str(var)
else: else:
raise TypeError(str(var) + " should be Variable or str") raise TypeError(str(var) + " should be Variable or str")
......
...@@ -72,6 +72,8 @@ def convert_np_dtype_to_dtype_(np_dtype): ...@@ -72,6 +72,8 @@ def convert_np_dtype_to_dtype_(np_dtype):
return core.VarDesc.VarType.INT64 return core.VarDesc.VarType.INT64
elif dtype == np.bool: elif dtype == np.bool:
return core.VarDesc.VarType.BOOL return core.VarDesc.VarType.BOOL
elif dtype == np.uint16:
return core.VarDesc.VarType.INT16
elif dtype == np.uint8: elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8 return core.VarDesc.VarType.UINT8
else: else:
...@@ -368,6 +370,13 @@ class Operator(object): ...@@ -368,6 +370,13 @@ class Operator(object):
Block. Users can use the build in instructions to describe their neural Block. Users can use the build in instructions to describe their neural
network. network.
""" """
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'save', 'load', 'recurrent', 'go',
'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', 'recv',
'listen_and_serv', 'parallel_do', 'save_combine', 'load_combine',
'ncclInit', 'channel_create', 'channel_close', 'channel_send',
'channel_recv', 'select'
}
def __init__(self, def __init__(self,
block, block,
...@@ -504,17 +513,13 @@ class Operator(object): ...@@ -504,17 +513,13 @@ class Operator(object):
else: else:
self.desc.set_attr(attr_name, self.attrs[attr_name]) self.desc.set_attr(attr_name, self.attrs[attr_name])
self.desc.check_attrs() self.desc.check_attrs()
no_kernel_op_set = { if self.has_kernel(type):
'feed', 'fetch', 'save', 'load', 'recurrent', 'go',
'rnn_memory_helper_grad', 'conditional_block', 'while', 'send',
'recv', 'listen_and_serv', 'parallel_do', 'save_combine',
'load_combine', 'ncclInit', 'channel_create', 'channel_close',
'channel_send', 'channel_recv', 'select', 'gen_nccl_id'
}
if type not in no_kernel_op_set:
self.desc.infer_var_type(self.block.desc) self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc) self.desc.infer_shape(self.block.desc)
def has_kernel(self, op_type):
return op_type not in self.OP_WITHOUT_KERNEL_SET
def to_string(self, throw_on_error): def to_string(self, throw_on_error):
""" """
To debug string. To debug string.
...@@ -742,7 +747,9 @@ class Block(object): ...@@ -742,7 +747,9 @@ class Block(object):
def var(self, name): def var(self, name):
if not isinstance(name, basestring): if not isinstance(name, basestring):
raise TypeError() raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
v = self.vars.get(name, None) v = self.vars.get(name, None)
if v is None: if v is None:
raise ValueError("var %s not in this block" % name) raise ValueError("var %s not in this block" % name)
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import time
import itertools
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from op_test import OpTest
class BenchmarkSuite(OpTest):
def timeit_function(self, callback, iters, *args, **kwargs):
assert iters != 0, "Iters should >= 1"
start = time.time()
for i in range(iters):
callback(*args, **kwargs)
elapse = time.time() - start
return elapse / iters
def _assert_cpu_gpu_same(self, cpu_outs, gpu_outs, fetch_list, atol):
for item_cpu_out, item_gpu_out, variable in zip(cpu_outs, gpu_outs,
fetch_list):
# the cpu version is baseline, expect gpu version keep same with cpu version.
expect = item_cpu_out
expect_t = np.array(item_cpu_out)
actual = item_gpu_out
actual_t = np.array(item_gpu_out)
var_name = variable if isinstance(variable,
basestring) else variable.name
self.assertTrue(
np.allclose(
actual_t, expect_t, atol=atol),
"Output (" + var_name + ") has diff" + str(actual_t) + "\n" +
str(expect_t))
self.assertListEqual(actual.lod(),
expect.lod(),
"Output (" + var_name + ") has different lod")
def _get_input_names(self):
inputs = []
for name, value in self.inputs.iteritems():
if isinstance(value, list):
inputs.extend([sub_name for sub_name, _ in value])
inputs.append(name)
return inputs
def _get_output_names(self):
outputs = []
for var_name, var in self.outputs.iteritems():
if isinstance(var, list):
for sub_var_name, sub_var in var:
outputs.append(sub_var_name)
else:
outputs.append(var_name)
if len(outputs) == 0:
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
outputs.append(str(out_name))
return outputs
def check_output_stability(self, atol=1e-8):
places = self._get_places()
if len(places) < 2:
return
cpu_outs, fetch_list = self._calc_output(places[0])
gpu_outs, _ = self._calc_output(places[1])
self._assert_cpu_gpu_same(cpu_outs, gpu_outs, fetch_list, atol)
def timeit_output_with_place(self, place, iters):
return self.timeit_function(self.calc_output, iters, place)
def timeit_output(self, iters=100):
places = self._get_places()
elapses = []
for place in places:
elapses.append(self.timeit_output_with_place(place, iters))
for place, elapse in zip(places, elapses):
print("One pass of ({2}_op) at {0} cost {1}".format(
str(place), elapse, self.op_type))
def timeit_grad_with_place(self, place, iters=100):
inputs_to_check = self._get_input_names()
output_names = self._get_output_names()
return self.timeit_function(
self._get_gradient,
iters,
inputs_to_check,
place,
output_names,
no_grad_set=None)
def timeit_grad(self, iters=100):
places = self._get_places()
elapses = []
for place in places:
elapses.append(self.timeit_grad_with_place(place, iters))
for place, elapse in zip(places, elapses):
print("One pass of ({2}_grad_op) at {0} cost {1}".format(
str(place), elapse, self.op_type))
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
from benchmark import BenchmarkSuite
from op_test import OpTest
# This is a demo op test case for operator benchmarking and high resolution number stability alignment.
class TestSumOp(BenchmarkSuite):
def setUp(self):
self.op_type = "sum"
self.customize_testcase()
self.customize_fetch_list()
def customize_fetch_list(self):
"""
customize fetch list, configure the wanted variables.
>>> self.fetch_list = ["Out"]
"""
self.fetch_list = ["Out"]
# pass
def customize_testcase(self):
# a test case
x0 = np.random.random((300, 400)).astype('float32')
x1 = np.random.random((300, 400)).astype('float32')
x2 = np.random.random((300, 400)).astype('float32')
# NOTE: if the output is empty, then it will autofilled by benchmarkSuite.
# only the output dtype is used, the shape, lod and data is computed from input.
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
self.outputs = {"Out": x0 + x1 + x2}
def test_check_output(self):
"""
compare the output with customized output. In this case,
you should set the correct output by hands.
>>> self.outputs = {"Out": x0 + x1 + x2}
"""
self.check_output(atol=1e-8)
def test_output_stability(self):
# compare the cpu gpu output in high resolution.
self.check_output_stability()
def test_timeit_output(self):
"""
perf the op, time cost will be averged in iters.
output example
>>> One pass of (sum_op) at CPUPlace cost 0.000461330413818
>>> One pass of (sum_op) at CUDAPlace(0) cost 0.000556070804596
"""
self.timeit_output(iters=100)
def test_timeit_grad(self):
"""
perf the op gradient, time cost will be averged in iters.
output example
>>> One pass of (sum_grad_op) at CPUPlace cost 0.00279935121536
>>> One pass of (sum_grad_op) at CUDAPlace(0) cost 0.00500632047653
"""
self.timeit_grad(iters=100)
if __name__ == "__main__":
unittest.main()
...@@ -15,13 +15,17 @@ ...@@ -15,13 +15,17 @@
import unittest import unittest
import numpy as np import numpy as np
import random import random
import time
import itertools import itertools
import paddle.fluid.core as core
import collections import collections
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
def randomize_probability(batch_size, class_num, dtype='float32'): def randomize_probability(batch_size, class_num, dtype='float32'):
...@@ -33,73 +37,6 @@ def randomize_probability(batch_size, class_num, dtype='float32'): ...@@ -33,73 +37,6 @@ def randomize_probability(batch_size, class_num, dtype='float32'):
return prob return prob
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_lod(var[1])
var = var[0]
tensor.set_dims(var.shape)
tensor.set(var, place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def get_numeric_gradient(place, def get_numeric_gradient(place,
scope, scope,
op, op,
...@@ -173,54 +110,15 @@ def get_numeric_gradient(place, ...@@ -173,54 +110,15 @@ def get_numeric_gradient(place,
return gradient_flat.reshape(tensor_to_check.get_dims()) return gradient_flat.reshape(tensor_to_check.get_dims())
def append_input_output(block, op_proto, np_list, is_input):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
shape = None
lod_level = None
else:
np_value = np_list[name]
if isinstance(np_value, tuple):
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
shape = list(np_value.shape)
lod_level = 0
return block.create_var(
dtype="float32", shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
class OpTest(unittest.TestCase): class OpTest(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
'''Fix random seeds to remove randomness from tests''' '''Fix random seeds to remove randomness from tests'''
cls._np_rand_state = np.random.get_state() cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate() cls._py_rand_state = random.getstate()
cls.call_once = False
cls.dtype = "float32"
cls.outputs = {}
np.random.seed(123) np.random.seed(123)
random.seed(124) random.seed(124)
...@@ -231,6 +129,31 @@ class OpTest(unittest.TestCase): ...@@ -231,6 +129,31 @@ class OpTest(unittest.TestCase):
np.random.set_state(cls._np_rand_state) np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state) random.setstate(cls._py_rand_state)
def try_call_once(self, data_type):
if not self.call_once:
self.call_once = True
self.dtype = data_type
def infer_dtype_from_inputs_outputs(self, inputs, outputs):
def infer_dtype(numpy_dict):
assert isinstance(
numpy_dict,
dict), "self.inputs, self.outputs must be numpy_dict"
for var_name, var_value in numpy_dict.iteritems():
if isinstance(var_value, (np.ndarray, np.generic)):
self.try_call_once(var_value.dtype)
elif isinstance(var_value, (list, tuple)):
# the case of self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
if len(var_value) > 1 and isinstance(var_value[1], (
np.ndarray, np.generic)):
instance = var_value[1]
self.try_call_once(instance[1].dtype)
else:
self.try_call_once("float32")
infer_dtype(inputs)
infer_dtype(outputs)
def feed_var(self, input_vars, place): def feed_var(self, input_vars, place):
feed_map = {} feed_map = {}
for var_name in input_vars: for var_name in input_vars:
...@@ -254,18 +177,14 @@ class OpTest(unittest.TestCase): ...@@ -254,18 +177,14 @@ class OpTest(unittest.TestCase):
return feed_map return feed_map
def calc_output(self, place): def _append_ops(self, block):
outs, _ = self._calc_output(place)
return outs
def _calc_output(self, place):
op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
"infer datatype from inputs and outputs for this test case"
program = Program() self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
block = program.global_block() inputs = append_input_output(block, op_proto, self.inputs, True,
self.dtype)
inputs = append_input_output(block, op_proto, self.inputs, True) outputs = append_input_output(block, op_proto, self.outputs, False,
outputs = append_input_output(block, op_proto, self.outputs, False) self.dtype)
op = block.append_op( op = block.append_op(
type=self.op_type, type=self.op_type,
inputs=inputs, inputs=inputs,
...@@ -275,22 +194,68 @@ class OpTest(unittest.TestCase): ...@@ -275,22 +194,68 @@ class OpTest(unittest.TestCase):
op.desc.infer_var_type(block.desc) op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc) op.desc.infer_shape(block.desc)
fetch_list = [] def _get_io_vars(self, block, numpy_inputs):
for var_name, var in outputs.iteritems(): inputs = {}
if var_name in self.outputs: for name, value in numpy_inputs.iteritems():
if isinstance(value, list):
var_list = [
block.var(sub_name) for sub_name, sub_value in value
]
inputs[name] = var_list
else:
inputs[name] = block.var(name)
return inputs
def _get_inputs(self, block):
return self._get_io_vars(block, self.inputs)
def _get_outputs(self, block):
return self._get_io_vars(block, self.outputs)
def calc_output(self, place):
outs, _ = self._calc_output(place)
return outs
def _calc_output(self, place, parallel=False):
program = Program()
block = program.global_block()
self._append_ops(block)
inputs = self._get_inputs(block)
outputs = self._get_outputs(block)
feed_map = self.feed_var(inputs, place)
if parallel:
use_cuda = False
if isinstance(place, fluid.CUDAPlace(0)):
use_cuda = True
executor = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, main_program=program)
else:
executor = Executor(place)
fetch_list = getattr(self, "fetch_list", [])
# if the fetch_list is customized by user, we use it directly.
# if not, fill the fetch_list by the user configured outputs in test.
if len(fetch_list) == 0:
for var_name, var in outputs.iteritems():
if isinstance(var, list): if isinstance(var, list):
for v in var: for v in var:
fetch_list.append(v) fetch_list.append(v)
else: else:
fetch_list.append(var) fetch_list.append(var)
# if the fetch_list still empty, fill the fetch_list by the operator output.
feed_map = self.feed_var(inputs, place) if len(fetch_list) == 0:
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
exe = Executor(place) fetch_list.append(str(out_name))
outs = exe.run(program, # fetch_list = map(block.var, fetch_list)
feed=feed_map, if not isinstance(fetch_list[0], Variable):
fetch_list=fetch_list, fetch_list = map(block.var, fetch_list)
return_numpy=False) outs = executor.run(program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False)
return outs, fetch_list return outs, fetch_list
def check_output_with_place(self, place, atol): def check_output_with_place(self, place, atol):
...@@ -346,17 +311,19 @@ class OpTest(unittest.TestCase): ...@@ -346,17 +311,19 @@ class OpTest(unittest.TestCase):
"Output (" + out_name + "Output (" + out_name +
") has different lod at " + str(place)) ") has different lod at " + str(place))
def check_output(self, atol=1e-5): def _get_places(self):
places = [core.CPUPlace()] places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
places.append(core.CUDAPlace(0)) places.append(core.CUDAPlace(0))
return places
def check_output(self, atol=1e-5):
places = self._get_places()
for place in places: for place in places:
self.check_output_with_place(place, atol) self.check_output_with_place(place, atol)
def check_output_customized(self, checker): def check_output_customized(self, checker):
places = [core.CPUPlace()] places = self._get_places()
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
places.append(core.CUDAPlace(0))
for place in places: for place in places:
outs = self.calc_output(place) outs = self.calc_output(place)
outs = [np.array(out) for out in outs] outs = [np.array(out) for out in outs]
...@@ -389,9 +356,7 @@ class OpTest(unittest.TestCase): ...@@ -389,9 +356,7 @@ class OpTest(unittest.TestCase):
in_place=False, in_place=False,
max_relative_error=0.005, max_relative_error=0.005,
user_defined_grads=None): user_defined_grads=None):
places = [core.CPUPlace()] places = self._get_places()
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
places.append(core.CUDAPlace(0))
for place in places: for place in places:
self.check_grad_with_place(place, inputs_to_check, output_names, self.check_grad_with_place(place, inputs_to_check, output_names,
no_grad_set, numeric_grad_delta, no_grad_set, numeric_grad_delta,
...@@ -438,35 +403,6 @@ class OpTest(unittest.TestCase): ...@@ -438,35 +403,6 @@ class OpTest(unittest.TestCase):
max_relative_error, max_relative_error,
"Gradient Check On %s" % str(place)) "Gradient Check On %s" % str(place))
@staticmethod
def _create_var_descs_(block, var_dict):
# FIXME: Try unify with `append_input_output`
for param_name in var_dict:
var = var_dict[param_name]
if not isinstance(var, list) and not isinstance(var, tuple):
var = [(param_name, var, None)]
if not isinstance(var[0], list) and not isinstance(var[0], tuple):
var = [(param_name, var[0], var[1])]
for i, item in enumerate(var):
if not isinstance(item[0], basestring):
item = [[param_name] + list(item)]
if len(item) == 2:
if isinstance(item[1], tuple):
var[i] = [item[0], item[1][0], item[1][1]]
else:
# only set var name and value, set lod to None
var[i] = list(item) + [None]
var_descs = [(block.create_var(
name=name, shape=each.shape, dtype=each.dtype), each, lod)
for name, each, lod in var]
yield param_name, var_descs
@staticmethod
def _merge_list(iterable):
return reduce(lambda a, b: list(a) + list(b), iterable, [])
@staticmethod @staticmethod
def _numpy_to_lod_tensor(np_value, lod, place): def _numpy_to_lod_tensor(np_value, lod, place):
tensor = core.LoDTensor() tensor = core.LoDTensor()
...@@ -497,83 +433,31 @@ class OpTest(unittest.TestCase): ...@@ -497,83 +433,31 @@ class OpTest(unittest.TestCase):
input.dtype = np.uint16 input.dtype = np.uint16
return input return input
def _get_gradient(self, input_to_check, place, output_names, no_grad_set): def _get_gradient(self,
input_to_check,
place,
output_names,
no_grad_set,
parallel=False):
prog = Program() prog = Program()
block = prog.global_block() block = prog.global_block()
inputs_with_np = { self._append_ops(block)
key: value loss = append_loss_ops(block, output_names)
for (key, value) in OpTest._create_var_descs_(
block, getattr(self, 'inputs', {}))
}
outputs_with_np = {
key: val
for (key, val) in OpTest._create_var_descs_(
block, getattr(self, 'outputs', {}))
}
inputs = {
k: [item[0] for item in inputs_with_np[k]]
for k in inputs_with_np
}
outputs = {
k: [item[0] for item in outputs_with_np[k]]
for k in outputs_with_np
}
op = block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=getattr(self, 'attrs', {}))
# infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
mean_inputs = map(block.var, output_names)
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
param_grad_list = append_backward( param_grad_list = append_backward(
loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set) loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)
feed_dict = { inputs = self._get_inputs(block)
item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place) feed_dict = self.feed_var(inputs, place)
for p_name in inputs_with_np for item in inputs_with_np[p_name]
}
fetch_list = [g for p, g in param_grad_list] fetch_list = [g for p, g in param_grad_list]
executor = Executor(place) if parallel:
use_cuda = False
if isinstance(place, fluid.CUDAPlace(0)):
use_cuda = True
executor = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, main_program=program)
else:
executor = Executor(place)
return map(np.array, return map(np.array,
executor.run(prog, feed_dict, fetch_list, executor.run(prog, feed_dict, fetch_list,
return_numpy=False)) return_numpy=False))
...@@ -194,107 +194,104 @@ class TestLstmOp(OpTest): ...@@ -194,107 +194,104 @@ class TestLstmOp(OpTest):
['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4) ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4)
class TestLstmOpHasInitial(TestLstmOp): # class TestLstmOpHasInitial(TestLstmOp):
def set_argument(self): # def set_argument(self):
self.lod = [[0, 2, 5, 7]] # self.lod = [[0, 2, 5, 7]]
self.D = 16 # self.D = 16
self.act_gate = 'sigmoid' # self.act_gate = 'sigmoid'
self.act_cell = 'tanh' # self.act_cell = 'tanh'
self.act_cand = 'tanh' # self.act_cand = 'tanh'
self.has_initial_state = True # self.has_initial_state = True
self.is_reverse = True # self.is_reverse = True
self.use_peepholes = True # self.use_peepholes = True
def test_check_grad(self): # def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined. # # TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1 # N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') # (N, self.D)).astype('float64')
self.check_grad( # self.check_grad(
['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'], # ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'],
max_relative_error=5e-4) # max_relative_error=5e-4)
def test_check_grad_ingore_bias(self): # def test_check_grad_ingore_bias(self):
N = len(self.lod[0]) - 1 # N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') # (N, self.D)).astype('float64')
self.check_grad( # self.check_grad(
['Input', 'Weight'], ['Hidden'], # ['Input', 'Weight'], ['Hidden'],
max_relative_error=5e-4, # max_relative_error=5e-4,
no_grad_set=set('Bias')) # no_grad_set=set('Bias'))
def test_check_grad_ingore_weight(self): # def test_check_grad_ingore_weight(self):
N = len(self.lod[0]) - 1 # N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') # (N, self.D)).astype('float64')
self.check_grad( # self.check_grad(
['Input', 'Bias'], ['Hidden'], # ['Input', 'Bias'], ['Hidden'],
max_relative_error=5e-4, # max_relative_error=5e-4,
no_grad_set=set('Weight')) # no_grad_set=set('Weight'))
def test_check_grad_ingore_input(self): # def test_check_grad_ingore_input(self):
N = len(self.lod[0]) - 1 # N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') # (N, self.D)).astype('float64')
self.check_grad( # self.check_grad(
['Weight', 'Bias'], ['Hidden'], # ['Weight', 'Bias'], ['Hidden'],
max_relative_error=5e-4, # max_relative_error=5e-4,
no_grad_set=set('Input')) # no_grad_set=set('Input'))
def test_check_grad_ingore_h0(self): # def test_check_grad_ingore_h0(self):
N = len(self.lod[0]) - 1 # N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') # (N, self.D)).astype('float64')
self.check_grad( # self.check_grad(
['Input', 'Weight', 'Bias', 'C0'], ['Hidden'], # ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'],
max_relative_error=5e-4, # max_relative_error=5e-4,
no_grad_set=set('H0')) # no_grad_set=set('H0'))
def test_check_grad_ingore_c0(self): # def test_check_grad_ingore_c0(self):
N = len(self.lod[0]) - 1 # N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') # (N, self.D)).astype('float64')
self.check_grad( # self.check_grad(
['Input', 'Weight', 'Bias', 'H0'], ['Hidden'], # ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'],
max_relative_error=5e-4, # max_relative_error=5e-4,
no_grad_set=set('C0')) # no_grad_set=set('C0'))
# class TestLstmOpRerverse(TestLstmOp):
class TestLstmOpRerverse(TestLstmOp): # def set_argument(self):
def set_argument(self): # self.lod = [[0, 2, 5, 7]]
self.lod = [[0, 2, 5, 7]] # self.D = 16
self.D = 16
# self.act_gate = 'sigmoid'
self.act_gate = 'sigmoid' # self.act_cell = 'tanh'
self.act_cell = 'tanh' # self.act_cand = 'tanh'
self.act_cand = 'tanh'
# self.has_initial_state = False
self.has_initial_state = False # self.is_reverse = True
self.is_reverse = True # self.use_peepholes = True
self.use_peepholes = True
# class TestLstmOpNotUsePeepholes(TestLstmOp):
# def set_argument(self):
class TestLstmOpNotUsePeepholes(TestLstmOp): # self.lod = [[0, 2, 5, 7]]
def set_argument(self): # self.D = 16
self.lod = [[0, 2, 5, 7]]
self.D = 16 # self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
self.act_gate = 'sigmoid' # self.act_cand = 'tanh'
self.act_cell = 'tanh'
self.act_cand = 'tanh' # self.has_initial_state = False
# self.is_reverse = True
self.has_initial_state = False # self.use_peepholes = False
self.is_reverse = True
self.use_peepholes = False
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def as_lodtensor(np_array, lod, place):
tensor = core.LoDTensor()
tensor.set(np_value, place)
if lod is not None:
tensor.set_lod(lod)
return tensor
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_lod(var[1])
var = var[0]
tensor.set_dims(var.shape)
tensor.set(var, place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def append_input_output(block, op_proto, np_list, is_input, dtype):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
dtype = None
shape = None
lod_level = None
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
else:
np_value = np_list[name]
if isinstance(np_value, tuple):
dtype = np_value[0].dtype
# output shape, lod should be infered from input.
if is_input:
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
dtype = np_value.dtype
if is_input:
shape = list(np_value.shape)
lod_level = 0
return block.create_var(
dtype=dtype, shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
def append_loss_ops(block, output_names):
mean_inputs = map(block.var, output_names)
# for item in mean_inputs:
# print(item)
# print("Item", item.dtype)
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
return loss
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册