提交 d7f0eb6b 编写于 作者: Y Yu Yang

Merge branch 'feature/change_op_creation' into feature/uniform_random_op

...@@ -7,7 +7,7 @@ INCLUDE_DIRECTORIES(${ANY_SOURCE_DIR}/src/extern_lib_any) ...@@ -7,7 +7,7 @@ INCLUDE_DIRECTORIES(${ANY_SOURCE_DIR}/src/extern_lib_any)
ExternalProject_Add( ExternalProject_Add(
extern_lib_any extern_lib_any
${EXTERNAL_PROJECT_LOG_ARGS} ${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/thelink2012/any.git" GIT_REPOSITORY "https://github.com/PaddlePaddle/any.git"
GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020" GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020"
PREFIX ${ANY_SOURCE_DIR} PREFIX ${ANY_SOURCE_DIR}
UPDATE_COMMAND "" UPDATE_COMMAND ""
......
...@@ -69,8 +69,13 @@ ENDIF(NOT ${CBLAS_FOUND}) ...@@ -69,8 +69,13 @@ ENDIF(NOT ${CBLAS_FOUND})
MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}") MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}")
INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
ADD_LIBRARY(cblas STATIC IMPORTED) # FIXME(gangliao): generate cblas target to track all high performance
SET_PROPERTY(TARGET cblas PROPERTY IMPORTED_LOCATION ${CBLAS_LIBRARIES}) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas)
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
ADD_LIBRARY(cblas STATIC ${dummyfile})
TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
IF(NOT ${CBLAS_FOUND}) IF(NOT ${CBLAS_FOUND})
ADD_DEPENDENCIES(cblas extern_openblas) ADD_DEPENDENCIES(cblas extern_openblas)
LIST(APPEND external_project_dependencies cblas) LIST(APPEND external_project_dependencies cblas)
......
...@@ -403,3 +403,16 @@ function(py_proto_compile TARGET_NAME) ...@@ -403,3 +403,16 @@ function(py_proto_compile TARGET_NAME)
protobuf_generate_python(py_srcs ${py_proto_compile_SRCS}) protobuf_generate_python(py_srcs ${py_proto_compile_SRCS})
add_custom_target(${TARGET_NAME} ALL DEPENDS ${py_srcs}) add_custom_target(${TARGET_NAME} ALL DEPENDS ${py_srcs})
endfunction() endfunction()
function(py_test TARGET_NAME)
if(WITH_TESTING)
set(options STATIC static SHARED shared)
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS)
cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
add_test(NAME ${TARGET_NAME}
COMMAND env PYTHONPATH=${PADDLE_PYTHON_PACKAGE_DIR}
python2 ${py_test_SRCS}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endif()
endfunction()
add_python_test(test_swig_api py_test(testTrain SRCS testTrain.py)
testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py testTrainer.py) py_test(testMatrix SRCS testMatrix.py)
py_test(testVector SRCS testVector.py)
py_test(testTrainer SRCS testTrainer.py)
py_test(testArguments SRCS testArguments.py)
py_test(testGradientMachine SRCS testGradientMachine.py)
...@@ -22,14 +22,14 @@ namespace framework { ...@@ -22,14 +22,14 @@ namespace framework {
template <> template <>
Eigen::DefaultDevice& ExecutionContext::GetEigenDevice< Eigen::DefaultDevice& ExecutionContext::GetEigenDevice<
platform::CPUPlace, Eigen::DefaultDevice>() const { platform::CPUPlace, Eigen::DefaultDevice>() const {
return *device_context_.get_eigen_device<Eigen::DefaultDevice>(); return *device_context_->get_eigen_device<Eigen::DefaultDevice>();
} }
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
template <> template <>
Eigen::GpuDevice& Eigen::GpuDevice&
ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const { ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const {
return *device_context_.get_eigen_device<Eigen::GpuDevice>(); return *device_context_->get_eigen_device<Eigen::GpuDevice>();
} }
#endif #endif
......
...@@ -252,7 +252,7 @@ struct EigenDeviceConverter<platform::GPUPlace> { ...@@ -252,7 +252,7 @@ struct EigenDeviceConverter<platform::GPUPlace> {
class ExecutionContext : public OperatorContext { class ExecutionContext : public OperatorContext {
public: public:
ExecutionContext(const OperatorBase* op, const Scope& scope, ExecutionContext(const OperatorBase* op, const Scope& scope,
const platform::DeviceContext& device_context) const platform::DeviceContext* device_context)
: OperatorContext(op, scope), device_context_(device_context) {} : OperatorContext(op, scope), device_context_(device_context) {}
template <typename PlaceType, template <typename PlaceType,
...@@ -260,9 +260,9 @@ class ExecutionContext : public OperatorContext { ...@@ -260,9 +260,9 @@ class ExecutionContext : public OperatorContext {
typename EigenDeviceConverter<PlaceType>::EigenDeviceType> typename EigenDeviceConverter<PlaceType>::EigenDeviceType>
DeviceType& GetEigenDevice() const; DeviceType& GetEigenDevice() const;
platform::Place GetPlace() const { return device_context_.GetPlace(); } platform::Place GetPlace() const { return device_context_->GetPlace(); }
const platform::DeviceContext& device_context_; const platform::DeviceContext* device_context_;
}; };
class OpKernel { class OpKernel {
...@@ -311,7 +311,7 @@ class OperatorWithKernel : public OperatorBase { ...@@ -311,7 +311,7 @@ class OperatorWithKernel : public OperatorBase {
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const final { const platform::DeviceContext& dev_ctx) const final {
auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx)); auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx));
opKernel->Compute(ExecutionContext(this, scope, dev_ctx)); opKernel->Compute(ExecutionContext(this, scope, &dev_ctx));
} }
static std::unordered_map<std::string /* op_type */, OpKernelMap>& static std::unordered_map<std::string /* op_type */, OpKernelMap>&
......
...@@ -145,6 +145,16 @@ class OpDescCreationMethod(object): ...@@ -145,6 +145,16 @@ class OpDescCreationMethod(object):
return False return False
class OpInfo(object):
def __init__(self, name, method, inputs, outputs, attrs, no_temp_outputs):
self.name = name
self.method = method
self.inputs = inputs
self.outputs = outputs
self.attrs = attrs
self.no_temp_outputs = no_temp_outputs
def create_op_creation_method(op_proto): def create_op_creation_method(op_proto):
""" """
Generate op creation method for an OpProto Generate op creation method for an OpProto
...@@ -155,15 +165,15 @@ def create_op_creation_method(op_proto): ...@@ -155,15 +165,15 @@ def create_op_creation_method(op_proto):
opdesc = method(*args, **kwargs) opdesc = method(*args, **kwargs)
return core.Operator.create(opdesc.SerializeToString()) return core.Operator.create(opdesc.SerializeToString())
return { return OpInfo(
'method': __impl__, method=__impl__,
'name': op_proto.type, name=op_proto.type,
'all_inputs': [var.name for var in op_proto.inputs], inputs=[var.name for var in op_proto.inputs],
'all_outputs': [var.name for var in op_proto.outputs], outputs=[var.name for var in op_proto.outputs],
'all_attrs': [attr.name for attr in op_proto.attrs], attrs=[attr.name for attr in op_proto.attrs],
'all_no_temp_outputs': no_temp_outputs=[
[var.name for var in op_proto.outputs if not var.temporary] var.name for var in op_proto.outputs if not var.temporary
} ])
class OperatorFactory(object): class OperatorFactory(object):
...@@ -171,7 +181,7 @@ class OperatorFactory(object): ...@@ -171,7 +181,7 @@ class OperatorFactory(object):
self.op_methods = dict() self.op_methods = dict()
for op_proto in get_all_op_protos(): for op_proto in get_all_op_protos():
method = create_op_creation_method(op_proto) method = create_op_creation_method(op_proto)
self.op_methods[method['name']] = method self.op_methods[method.name] = method
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
if 'type' in kwargs: if 'type' in kwargs:
...@@ -185,27 +195,27 @@ class OperatorFactory(object): ...@@ -185,27 +195,27 @@ class OperatorFactory(object):
"argument except type") "argument except type")
t = args[0] t = args[0]
return self.get_op_creation_info(t)['method'](**kwargs) return self.get_op_info(t).method(**kwargs)
def types(self): def types(self):
return self.op_methods.keys() return self.op_methods.keys()
def get_op_creation_info(self, t): def get_op_info(self, t):
if t not in self.op_methods: if t not in self.op_methods:
raise ValueError("operator %s is not registered", t) raise ValueError("operator %s is not registered", t)
return self.op_methods.get(t) return self.op_methods.get(t)
def get_op_input_names(self, type): def get_op_input_names(self, type):
return self.get_op_creation_info(type)['all_inputs'] return self.get_op_info(type).inputs
def get_op_output_names(self, type): def get_op_output_names(self, type):
return self.get_op_creation_info(type)['all_outputs'] return self.get_op_info(type).outputs
def get_op_attr_names(self, type): def get_op_attr_names(self, type):
return self.get_op_creation_info(type)['all_attrs'] return self.get_op_info(type).attrs
def get_op_no_temp_output_names(self, type): def get_op_no_temp_output_names(self, type):
return self.get_op_creation_info(type)['all_no_temp_outputs'] return self.get_op_info(type).no_temp_outputs
Operator = OperatorFactory() # Default global factory Operator = OperatorFactory() # Default global factory
add_python_test(test_framework py_test(test_net SRCS test_net.py)
test_protobuf.py
test_scope.py py_test(test_fc_op SRCS test_fc_op.py)
test_operator.py py_test(test_scope SRCS test_scope.py)
test_default_scope_funcs.py
test_net.py py_test(test_tensor SRCS test_tensor.py)
test_tensor.py py_test(test_mul_op SRCS test_mul_op.py)
test_fc_op.py
test_add_two_op.py py_test(test_mean_op SRCS test_mean_op.py)
test_sgd_op.py
test_mul_op.py py_test(test_protobuf SRCS test_protobuf.py)
test_mean_op.py
test_sigmoid_op.py py_test(test_add_two_op SRCS test_add_two_op.py)
test_softmax_op.py py_test(test_sigmoid_op SRCS test_sigmoid_op.py)
test_rowwise_add_op.py py_test(test_softmax_op SRCS test_softmax_op.py)
gradient_checker.py
test_uniform_random_op.py) py_test(gradient_checker SRCS gradient_checker.py)
py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
py_test(test_operator SRCS test_operator.py)
py_test(test_uniform_random_op SRCS test_uniform_random_op.py)
...@@ -29,23 +29,28 @@ class OpTestMeta(type): ...@@ -29,23 +29,28 @@ class OpTestMeta(type):
for place in places: for place in places:
for in_name in Operator.get_op_input_names(self.type): for in_name in Operator.get_op_input_names(self.type):
if hasattr(self, in_name): if hasattr(self, "inputs") and in_name in self.inputs:
kwargs[in_name] = in_name kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor() var = scope.new_var(in_name).get_tensor()
arr = getattr(self, in_name) arr = self.inputs[in_name]
var.set_dims(arr.shape) var.set_dims(arr.shape)
var.set(arr, place) var.set(arr, place)
else: else:
kwargs[in_name] = "@EMPTY@" kwargs[in_name] = "@EMPTY@"
for out_name in Operator.get_op_output_names(self.type): for out_name in Operator.get_op_output_names(self.type):
if hasattr(self, out_name): if not hasattr(self, "outputs"):
kwargs[out_name] = out_name raise ValueError(
scope.new_var(out_name).get_tensor() "The test op must set self.outputs dict.")
if out_name not in self.outputs:
raise ValueError("The %s is not in self.outputs dict." %
(out_name))
kwargs[out_name] = out_name
scope.new_var(out_name).get_tensor()
for attr_name in Operator.get_op_attr_names(self.type): for attr_name in Operator.get_op_attr_names(self.type):
if hasattr(self, attr_name): if hasattr(self, "attrs") and attr_name in self.attrs:
kwargs[attr_name] = getattr(self, attr_name) kwargs[attr_name] = self.attrs[attr_name]
op = Operator(self.type, **kwargs) op = Operator(self.type, **kwargs)
...@@ -56,7 +61,7 @@ class OpTestMeta(type): ...@@ -56,7 +61,7 @@ class OpTestMeta(type):
for out_name in Operator.get_op_output_names(self.type): for out_name in Operator.get_op_output_names(self.type):
actual = numpy.array(scope.find_var(out_name).get_tensor()) actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = getattr(self, out_name) expect = self.outputs[out_name]
numpy.isclose(actual, expect) numpy.isclose(actual, expect)
obj.test_all = test_all obj.test_all = test_all
......
...@@ -12,9 +12,11 @@ class TestAddOp(unittest.TestCase): ...@@ -12,9 +12,11 @@ class TestAddOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "add_two" self.type = "add_two"
self.X = numpy.random.random((102, 105)).astype("float32") self.inputs = {
self.Y = numpy.random.random((102, 105)).astype("float32") 'X': numpy.random.random((102, 105)).astype("float32"),
self.Out = self.X + self.Y 'Y': numpy.random.random((102, 105)).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
class TestAddGradOp(unittest.TestCase): class TestAddGradOp(unittest.TestCase):
......
...@@ -7,15 +7,17 @@ class TestSGD(unittest.TestCase): ...@@ -7,15 +7,17 @@ class TestSGD(unittest.TestCase):
__metaclass__ = OpTestMeta __metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
# TODO this unit test is not passed
self.type = "onehot_cross_entropy" self.type = "onehot_cross_entropy"
batch_size = 100 batch_size = 100
class_num = 10 class_num = 10
self.X = numpy.random.random((batch_size, class_num)).astype("float32") X = numpy.random.random((batch_size, class_num)).astype("float32")
self.label = 5 * numpy.ones(batch_size).astype("int32") label = 5 * numpy.ones(batch_size).astype("int32")
self.inputs = {'X': X, 'label': label}
Y = [] Y = []
for i in range(0, batch_size): for i in range(0, batch_size):
Y.append(-numpy.log(self.X[i][self.label[i]])) Y.append(-numpy.log(X[i][label[i]]))
self.Y = numpy.array(Y).astype("float32") self.outputs = {'Y': numpy.array(Y).astype("float32")}
# TODO(superjom) add gradient check # TODO(superjom) add gradient check
......
...@@ -8,8 +8,8 @@ class TestMeanOp(unittest.TestCase): ...@@ -8,8 +8,8 @@ class TestMeanOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "mean" self.type = "mean"
self.X = np.random.random((32, 784)).astype("float32") self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.Out = np.mean(self.X) self.outputs = {'Out': np.mean(self.inputs['X'])}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -8,9 +8,11 @@ class TestMulOp(unittest.TestCase): ...@@ -8,9 +8,11 @@ class TestMulOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "mul" self.type = "mul"
self.X = np.random.random((32, 84)).astype("float32") self.inputs = {
self.Y = np.random.random((84, 100)).astype("float32") 'X': np.random.random((32, 84)).astype("float32"),
self.Out = np.dot(self.X, self.Y) 'Y': np.random.random((84, 100)).astype("float32")
}
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -8,9 +8,11 @@ class TestRowwiseAddOp(unittest.TestCase): ...@@ -8,9 +8,11 @@ class TestRowwiseAddOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "rowwise_add" self.type = "rowwise_add"
self.X = np.random.random((32, 84)).astype("float32") self.inputs = {
self.b = np.random.random(84).astype("float32") 'X': np.random.random((32, 84)).astype("float32"),
self.Out = np.add(self.X, self.b) 'b': np.random.random(84).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -8,10 +8,13 @@ class TestSGD(unittest.TestCase): ...@@ -8,10 +8,13 @@ class TestSGD(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "sgd" self.type = "sgd"
self.param = numpy.random.random((102, 105)).astype("float32") w = numpy.random.random((102, 105)).astype("float32")
self.grad = numpy.random.random((102, 105)).astype("float32") g = numpy.random.random((102, 105)).astype("float32")
self.learning_rate = 0.1 lr = 0.1
self.param_out = self.param - self.learning_rate * self.grad
self.inputs = {'param': w, 'grad': g}
self.attrs = {'learning_rate': lr}
self.outputs = {'param_out': w - lr * g}
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -8,8 +8,8 @@ class TestSigmoidOp(unittest.TestCase): ...@@ -8,8 +8,8 @@ class TestSigmoidOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "sigmoid" self.type = "sigmoid"
self.X = np.random.random((32, 100)).astype("float32") self.inputs = {'X': np.random.random((32, 100)).astype("float32")}
self.Y = 1 / (1 + np.exp(-self.X)) self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -19,8 +19,10 @@ class TestSoftmaxOp(unittest.TestCase): ...@@ -19,8 +19,10 @@ class TestSoftmaxOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "softmax" self.type = "softmax"
self.X = np.random.random((32, 100)).astype("float32") self.inputs = {'X': np.random.random((32, 100)).astype("float32")}
self.Y = np.apply_along_axis(stable_softmax, 1, self.X) self.outputs = {
'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
}
class TestSoftmaxGradOp(unittest.TestCase): class TestSoftmaxGradOp(unittest.TestCase):
......
if (NOT APPLE) if (NOT APPLE)
# The Mac OS X backend will not be able to function correctly if Python is # The Mac OS X backend will not be able to function correctly if Python is
# not installed as a framework. # not installed as a framework.
add_python_test(test_ploter test_ploter.py) py_test(test_ploter SRCS test_ploter.py)
endif() endif()
add_python_test(reader_tests creator_test.py decorator_test.py) py_test(creator_test SRCS creator_test.py)
py_test(decorator_test SRCS decorator_test.py)
add_python_test(test_v2_api test_data_feeder.py test_op.py test_parameters.py py_test(test_op SRCS test_op.py)
test_layer.py test_rnn_layer.py test_topology.py test_image.py) py_test(test_image SRCS test_image.py)
py_test(test_layer SRCS test_layer.py)
py_test(test_topology SRCS test_topology.py)
py_test(test_rnn_layer SRCS test_rnn_layer.py)
py_test(test_parameters SRCS test_parameters.py)
py_test(test_data_feeder SRCS test_data_feeder.py)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册