提交 31cbb343 编写于 作者: Y Yibing Liu

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into reshape_op_dev

......@@ -53,27 +53,27 @@ bool DeConv3DLayer::init(const LayerMap &layerMap,
size_t DeConv3DLayer::getSize() {
CHECK_NE(inputLayers_.size(), 0UL);
outputH_.clear();
outputW_.clear();
outputD_.clear();
imgSizeW_.clear();
imgSizeH_.clear();
imgSizeD_.clear();
N_.clear();
NOut_.clear();
size_t layerSize = 0;
for (size_t i = 0; i < inputLayers_.size(); ++i) {
outputW_.push_back(
imageSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i], true));
outputH_.push_back(imageSize(
imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true));
outputD_.push_back(imageSize(
imgSizeD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true));
NOut_.push_back(outputD_[i] * outputH_[i] * outputW_[i]);
N_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]);
imgSizeW_.push_back(
imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i], true));
imgSizeH_.push_back(imageSize(
outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true));
imgSizeD_.push_back(imageSize(
outputD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true));
NOut_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]);
N_.push_back(outputD_[i] * outputH_[i] * outputW_[i]);
CHECK(layerSize == 0 || N_[i] * size_t(numFilters_) == layerSize);
layerSize += NOut_[i] * numFilters_;
}
getOutput().setFrameHeight(outputH_[0]);
getOutput().setFrameWidth(outputW_[0]);
getOutput().setFrameDepth(outputD_[0]);
getOutput().setFrameHeight(imgSizeH_[0]);
getOutput().setFrameWidth(imgSizeW_[0]);
getOutput().setFrameDepth(imgSizeD_[0]);
return layerSize;
}
......@@ -103,9 +103,9 @@ void DeConv3DLayer::forward(PassType passType) {
}
colBuf_->col2Vol(outMat->getData() + n * outMat->getStride(),
numFilters_,
outputD_[i],
outputH_[i],
outputW_[i],
imgSizeD_[i],
imgSizeH_[i],
imgSizeW_[i],
filterSizeZ_[i],
filterSizeY_[i],
filterSize_[i],
......@@ -144,9 +144,9 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) {
colBuf_->vol2Col(
getOutputGrad()->getData() + n * getOutputGrad()->getStride(),
numFilters_,
outputD_[i],
outputH_[i],
outputW_[i],
imgSizeD_[i],
imgSizeH_[i],
imgSizeW_[i],
filterSizeZ_[i],
filterSizeY_[i],
filterSize_[i],
......
......@@ -77,24 +77,6 @@ void MKLDNNFcLayer::convertWeightsToPaddle() {
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
}
void MKLDNNFcLayer::convertOutputToOtherDevice() {
copyOutputInfoToOtherDevice();
// find other cpu device and reorder output to cpu device
int cnt = 0;
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
// fc cpu output value do not need convert
// just share point
outputOtherDevice_[i].value = output_.value;
++cnt;
}
}
if (cnt > 1) {
LOG(WARNING) << "should not have more than one CPU devie";
}
}
void MKLDNNFcLayer::reshape() {
const Argument& input = getInput(0, getPrev(0)->getDeviceId());
int batchSize = input.getBatchSize();
......@@ -155,7 +137,10 @@ void MKLDNNFcLayer::resetFwd() {
// change original output value to mkldnn output value
output_.value = std::dynamic_pointer_cast<Matrix>(outVal_);
if (!outputIsOnlyMKLDNN()) {
convertOutputToOtherDevice();
copyOutputInfoToOtherDevice();
// fc cpu output value do not need create convert
// just share point
getOutput(CPU_DEVICE).value->setData(output_.value->getData());
}
// create forward handle
......@@ -235,13 +220,12 @@ void MKLDNNFcLayer::resetBwd() {
pipelineBwd_.push_back(*bwdWgt_);
/// backward data
device = inputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
const MatrixPtr& in = getInputGrad(0, device);
const MatrixPtr& in = inputLayers_[0]->getOutput().grad;
if (in == nullptr) {
return;
}
if (getInput(0, device).getAllCount() > 1) {
// TODO(TJ): use outputMaps_ ways when merge outgrad done
if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) {
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
} else {
inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc());
}
......@@ -258,13 +242,21 @@ void MKLDNNFcLayer::resetBwd() {
pipelineBwd_.push_back(*bwdData_);
}
void MKLDNNFcLayer::updateInputData() {
if (inputLayers_[0]->getType() != "data") {
return;
}
real* iData = getInputValue(0, CPU_DEVICE)->getData();
inVal_->setData(iData);
}
void MKLDNNFcLayer::forward(PassType passType) {
Layer::forward(passType);
reshape();
{
REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
syncInputValue();
updateInputData();
// just submit forward pipeline
stream_->submit(pipelineFwd_);
......@@ -286,7 +278,6 @@ void MKLDNNFcLayer::backward(const UpdateCallback& callback) {
REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
resetBwd();
syncOutputGrad();
// just sumbmit backward pipeline
stream_->submit(pipelineBwd_);
}
......
......@@ -53,6 +53,8 @@ public:
void backward(const UpdateCallback& callback) override;
void updateInputData() override;
protected:
/**
* reshape the input image sizes
......@@ -72,8 +74,6 @@ protected:
* only would be called when needed
*/
void resetBwd();
void convertOutputToOtherDevice() override;
};
} // namespace paddle
......@@ -114,10 +114,10 @@ public:
virtual void convertWeightsToPaddle() {}
/**
* convert MKLDNN output to other device.
* only support CPU device yet
* Update input value data when input layer is "data" type.
* Since the input value data address might be changed.
*/
virtual void convertOutputToOtherDevice() {}
virtual void updateInputData() {}
/**
* print info about sizes
......@@ -155,6 +155,7 @@ protected:
* copy base info and do not copy data value
*/
void copyOutputInfoToOtherDevice() {
int cnt = 0;
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight());
outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth());
......@@ -163,6 +164,12 @@ protected:
outputOtherDevice_[i].subSequenceStartPositions =
output_.subSequenceStartPositions;
outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
++cnt;
}
}
if (cnt > 1) {
LOG(WARNING) << "should not have more than one CPU devie";
}
}
......@@ -193,32 +200,6 @@ protected:
return outputOtherDevice_.size() == 0;
}
/**
* Sync input value data
*/
void syncInputValue() {
if (inputIsOnlyMKLDNN()) {
return;
}
real* iData = getInputValue(0, CPU_DEVICE)->getData();
// update input data
// since it might be changed if this is after data layer
inVal_->updateData(iData);
}
/**
* Sync output grad data
*/
void syncOutputGrad() {
if (outputIsOnlyMKLDNN()) {
return;
}
// update diff
real* oDiff = getOutput(CPU_DEVICE).grad->getData();
outGrad_->updateData(oDiff);
}
/**
* Set deviceId of this layer.
*/
......
......@@ -2302,26 +2302,27 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) {
conv->set_stride(2);
conv->set_stride_y(2);
conv->set_stride_z(2);
conv->set_img_size(IMAGE_SIZE);
conv->set_img_size_y(IMAGE_SIZE_Y);
conv->set_img_size_z(IMAGE_SIZE_Z);
conv->set_output_x(imageSize(conv->img_size(),
conv->set_output_x(IMAGE_SIZE);
conv->set_output_y(IMAGE_SIZE_Y);
conv->set_output_z(IMAGE_SIZE_Z);
conv->set_img_size(imageSize(conv->output_x(),
conv->filter_size(),
conv->padding(),
conv->stride(),
true));
conv->set_output_y(imageSize(conv->img_size_y(),
conv->set_img_size_y(imageSize(conv->output_y(),
conv->filter_size_y(),
conv->padding_y(),
conv->stride_y(),
true));
conv->set_output_z(imageSize(conv->img_size_z(),
conv->set_img_size_z(imageSize(conv->output_z(),
conv->filter_size_z(),
conv->padding_z(),
conv->stride_z(),
true));
config.layerConfig.set_size(conv->output_x() * conv->output_y() *
conv->output_z() * NUM_FILTERS);
config.layerConfig.set_size(conv->img_size() * conv->img_size_y() *
conv->img_size_z() * NUM_FILTERS);
conv->set_groups(1);
conv->set_filter_channels(conv->channels() / conv->groups());
config.inputDefs.push_back(
......
......@@ -33,14 +33,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) {
size_t width = cnts / dims[0];
m = Matrix::create(height, width, false, false);
}
CHECK(m) << " Matrix should not be empty";
CpuMatrixPtr cpuMatrix = std::dynamic_pointer_cast<CpuMatrix>(m);
CHECK(cpuMatrix) << "Only support create from CPU matrix yet";
CHECK_EQ(cnts, m->getElementCnt()) << "Count size does not match";
return std::make_shared<MKLDNNMatrix>(
m->getData(), m->getHeight(), m->getWidth(), pd);
CHECK_EQ(cpuMatrix->getElementCnt(), cnts) << "Count size does not match";
return std::make_shared<MKLDNNMatrix>(cpuMatrix, pd);
}
MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m,
......@@ -138,7 +136,7 @@ void MKLDNNMatrix::downSpatial() {
mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
"could not create a memory primitive");
reset(result);
set_data_handle(getData());
set_data_handle(data_);
}
} // namespace paddle
......@@ -30,11 +30,10 @@ typedef std::shared_ptr<MKLDNNMatrix> MKLDNNMatrixPtr;
*/
class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory {
public:
MKLDNNMatrix(real* data,
size_t height,
size_t width,
mkldnn::memory::primitive_desc pd)
: CpuMatrix(data, height, width, false), mkldnn::memory(pd, data) {}
MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd)
: CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false),
mkldnn::memory(pd, m->getData()),
m_(m) {}
~MKLDNNMatrix() {}
......@@ -81,11 +80,29 @@ public:
void downSpatial();
/**
* Update the memory data handle.
* set the memory data handle.
* Caution: This will not check the buffer size of the data,
* it should be coverd by user.
*/
void updateData(void* data) { set_data_handle(data); }
void setData(real* data) {
set_data_handle(data);
CpuMatrix::setData(data);
m_.reset();
}
/**
* override Matrix::getData
* check data before return
*/
real* getData() override {
CHECK_EQ((void*)data_, get_data_handle());
return data_;
}
const real* getData() const override {
CHECK_EQ((void*)data_, get_data_handle());
return data_;
}
/**
* Get primitive descriptor.
......@@ -143,6 +160,10 @@ protected:
memory::format srcFmt,
memory::format dstFmt,
memory::dims dm);
private:
// save the CpuMatrixPtr in case the buffer released outside
CpuMatrixPtr m_;
};
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/concat_op.h"
#include <vector>
namespace paddle {
namespace operators {
using framework::Tensor;
class ConcatOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();
PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1.");
auto out_dims = ins[0]->dims();
size_t in_zero_dims_size = out_dims.size();
for (size_t i = 1; i < n; i++) {
for (size_t j = 0; j < in_zero_dims_size; j++) {
if (j == axis) {
out_dims[axis] += ins[i]->dims()[j];
continue;
}
PADDLE_ENFORCE_EQ(out_dims[j], ins[i]->dims()[j],
"Input tensors should have the same "
"elements except the specify axis.")
}
}
out->Resize(out_dims);
}
};
class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "the input tensors of concat operator.").AsDuplicable();
AddOutput("Out", "the output tensor of concat operator.");
AddComment(R"DOC(
Join the input tensors along with the axis.
Examples:
Input[0] = [[1,2],[3,4]]
Input[1] = [[5,6]]
axis = 0
Output = [[1,2],
[3,4],
[5,6]]
)DOC");
AddAttr<int>("axis", "The axis which the inputs will be joined with.")
.SetDefault(0);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(concat, ops::ConcatOp, ops::ConcatOpMaker)
REGISTER_OP_CPU_KERNEL(concat,
ops::ConcatKernel<paddle::platform::CPUPlace, float>)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/concat_op.h"
namespace ops = paddle::operators;
// TODO(Yancey1989) Add GPU kernel
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class ConcatKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");
int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();
size_t output_axis_dim = 0;
size_t before = 1, after = 1;
for (size_t i = 0; i < n; i++) {
output_axis_dim += ins[i]->dims()[axis];
}
auto& input_zero = ins[0];
for (int64_t i = 0; i < input_zero->dims().size(); i++) {
if (i == axis) {
continue;
}
if (i < axis) {
before *= input_zero->dims()[i];
} else {
after *= input_zero->dims()[i];
}
}
size_t output_offset = 0;
for (size_t i = 0; i < n; i++) {
auto& in = ins[i];
auto axis_dim = in->dims()[axis];
for (size_t j = 0; j < before; j++) {
size_t len = axis_dim * after * sizeof(T);
const T* src = in->data<T>() + axis_dim * after * j;
T* out_data = out->mutable_data<T>(platform::CPUPlace());
T* dest = out_data + output_offset + output_axis_dim * after * j;
memcpy(dest, src, len);
}
output_offset += axis_dim * after;
}
}
};
} // namespace operators
} // namespace paddle
......@@ -49,6 +49,7 @@ USE_OP(minus);
USE_OP(cos_sim);
USE_CPU_ONLY_OP(gather);
USE_CPU_ONLY_OP(scatter);
USE_CPU_ONLY_OP(concat);
USE_OP(top_k);
USE_OP(squared_l2_distance);
USE_OP(sum);
......
......@@ -43,7 +43,6 @@ class OpDescCreationMethod(object):
if len(args) != 0:
raise ValueError("Only keyword arguments are supported.")
op_desc = framework_pb2.OpDesc()
for input_parameter in self.__op_proto__.inputs:
input_arguments = kwargs.get(input_parameter.name, [])
if is_str(input_arguments):
......
......@@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py)
py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py)
py_test(test_top_k_op SRCS test_top_k_op.py)
py_test(gradient_checker SRCS gradient_checker.py)
py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
......@@ -35,5 +33,6 @@ py_test(test_lookup_table SRCS test_lookup_table.py)
py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py)
py_test(test_sum_op SRCS test_sum_op.py)
py_test(mnist SRCS mnist.py)
py_test(test_concat_op SRCS test_concat_op.py)
py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py)
py_test(test_reshape_op SRCS test_reshape_op.py)
import unittest
import numpy
import itertools
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
__all__ = ['get_numeric_gradient']
def create_op(op_type):
# TODO need to set attrs
kwargs = dict()
for in_name in Operator.get_op_input_names(op_type):
kwargs[in_name] = in_name
for out_name in Operator.get_op_output_names(op_type):
kwargs[out_name] = out_name
return Operator(op_type, **kwargs)
def grad_var_name(var_name):
return var_name + "@GRAD"
def empty_var_name():
return "@EMPTY@"
def get_numeric_gradient(op,
input_values,
output_name,
input_to_check,
delta=0.005,
local_scope=None,
in_place=False):
"""
Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network
:param input_values: The input variables. Should be an dictionary, key is
variable name. Value is numpy array.
:param output_name: The final output variable name.
:param input_to_check: The input variable need to get gradient.
:param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is
too small, it could occur numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format.
"""
if local_scope is None:
local_scope = core.Scope()
# Create all input variable in local_scope
for var_name in input_values:
var = local_scope.new_var(var_name)
tensor = var.get_tensor()
tensor.set_dims(input_values[var_name].shape)
tensor.alloc_float(core.CPUPlace())
tensor.set(input_values[var_name], core.CPUPlace())
# Create all output variable in local_scope
opts = op.outputs()
for key in opts:
for output in opts[key]:
if local_scope.find_var(output) is None:
local_scope.new_var(output).get_tensor()
op.infer_shape(local_scope)
# allocate output memory
for key in opts:
for output in opts[key]:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace(
))
cpu_ctx = core.DeviceContext.create(core.CPUPlace())
def get_output():
op.run(local_scope, cpu_ctx)
return numpy.array(local_scope.find_var(output_name).get_tensor()).sum()
def product(dim):
return reduce(lambda a, b: a * b, dim, 1)
def restore_inputs():
for var_name in input_values:
tensor_ = local_scope.find_var(var_name).get_tensor()
tensor_.set(numpy.copy(input_values[var_name]), core.CPUPlace())
# get the input tensor that we want to get it's numeric gradient.
tensor_to_check = local_scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
# prepare a numpy array to store the gradient.
gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32')
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
restore_inputs()
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
# plus delta to this element, run op and get the sum of the result tensor.
if in_place:
restore_inputs()
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
# restore old value
tensor_to_check.set_float_element(i, origin)
# compute the gradient of this element and store it into a numpy array.
gradient_flat[i] = (y_pos - y_neg) / delta / 2
# reshape the gradient result to the shape of the source tensor.
return gradient_flat.reshape(tensor_to_check.get_dims())
class GradientChecker(unittest.TestCase):
def __get_gradient(self, forward_op, backward_op, input_value, grad_names,
place):
"""Get the input gradients after running forward and backward operators
on the given places.
:param forward_op: forward operator
:type forward_op: Operator
:param backward_op: backward operator
:type backward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param grad_names: the names of returned input gradients.
:type input_value: a list of string
:param place: the device type.
:type place: CPUPlace or GPUPlace
:return: the input grdients of given grad_names.
:rtype: a list of numpy.array
"""
scope = core.Scope()
ctx = core.DeviceContext.create(place)
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
outputs = forward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
# create input var and set value
for name, value in input_value.iteritems():
if name not in in_names:
raise ValueError(name + "does not exist in Op's inputs.")
var = scope.new_var(name).get_tensor()
var.set_dims(value.shape)
var.set(value, place)
# run forward op
for out_name in out_names:
scope.new_var(out_name)
forward_op.infer_shape(scope)
forward_op.run(scope, ctx)
# set output var's shape
# set output grad to ones
for name in out_names:
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = numpy.ones(out_tensor.shape(), dtype=numpy.float32)
grad_tensor.set(data, place)
# run backward op
backward_outs = backward_op.outputs()
backward_names = [
item for key in backward_outs for item in backward_outs[key]
]
for name in backward_names:
scope.new_var(name)
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
outs = [
numpy.array(scope.find_var(name).get_tensor())
for name in grad_names
]
return outs
def compare_grad(self, forward_op, input_value, no_grad_set=None):
""" Compare the input gradients between CPU and GPU for the given forward
operator.
:param forward_op: forward operator
:type forward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param no_grad_set: the set of variables names without gradients.
:type no_grad_set: a set of string
:raises: AssertionError, there is different gradient value.
"""
if no_grad_set is None:
no_grad_set = set()
backward_op = core.Operator.backward(forward_op, no_grad_set)
# return if not compile with GPU or not implementing GPU kernel
if not (core.is_compile_gpu() and backward_op.support_gpu()):
return
outputs = backward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
out_names = filter(lambda x: x != empty_var_name(), out_names)
cpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.CPUPlace())
gpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.GPUPlace(0))
for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads,
out_names):
self.assertTrue(
numpy.allclose(
c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff")
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
"""Use relative error for the comparison.
:param numeric_grads: the numerical graidents.
:type numeric_grads: a list of numpy.array
:param analytic_grads: the analytical graidents.
:type analytic_grads: a list of numpy.array
:param name: the names of gradients, used to print for debug.
:type names: a list of string
:param msg_prefix: string info, used to print for debug.
:type msf_prefix: string
"""
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = numpy.abs(a)
# if abs_a is nearly zero, then use abs error for a, not relative
# error.
abs_a[abs_a < 1e-3] = 1
diff_mat = numpy.abs(a - b) / abs_a
max_diff = numpy.max(diff_mat)
def err_msg():
offset = numpy.argmax(diff_mat > max_relative_error)
return "%s Variable %s max gradient diff %f over limit %f, the first " \
"error element is %d" % (
msg_prefix, name, max_diff, max_relative_error, offset)
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self,
forward_op,
input_vars,
inputs_to_check,
output_name,
no_grad_set=None,
only_cpu=False,
in_place=False,
max_relative_error=0.005):
"""
:param forward_op: used to create backward_op
:param input_vars: numpy value of input variable. The following
computation will use these variables.
:param inputs_to_check: inputs var names that should check gradient.
:param output_name: the output variable name of forward network.
:param max_relative_error: The relative tolerance parameter.
:param no_grad_set: used when create backward ops
:param only_cpu: only compute and check gradient on cpu kernel.
:return:
"""
if no_grad_set is None:
no_grad_set = set()
no_tmp_out = forward_op.no_intermediate_outputs()
if len(no_tmp_out) != 1:
raise ValueError("non temp out_names should be 1")
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
for no_grad in no_grad_set:
if no_grad not in in_names:
raise ValueError("no_grad should be in in_names")
if no_grad in inputs_to_check:
raise ValueError("no_grad should not be in inputs_to_check")
backward_op = core.Operator.backward(forward_op, no_grad_set)
places = [core.CPUPlace()]
if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu():
places.append(core.GPUPlace(0))
# get numerical gradients
numeric_grads = [
get_numeric_gradient(
forward_op, input_vars, output_name, name, in_place=in_place)
for name in inputs_to_check
]
check_names = [grad_var_name(name) for name in inputs_to_check]
for place in places:
analytic_grads = self.__get_gradient(forward_op, backward_op,
input_vars, check_names, place)
self.__assert_is_close(numeric_grads, analytic_grads, check_names,
max_relative_error,
"Gradient Check On %s" % str(place))
......@@ -9,7 +9,7 @@ def grad_var_name(var_name):
return var_name + "@GRAD"
def create_op(scope, op_type, inputs, outputs, attrs=None):
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
for in_name, in_dup in Operator.get_op_inputs(op_type):
......@@ -17,7 +17,7 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for sub_in_name in sub_in:
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[in_name].append(sub_in_name)
else:
......@@ -29,14 +29,15 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
kwargs[out_name] = []
if out_dup:
sub_in = outputs[out_name]
for sun_in_name in sub_in:
var = scope.new_var(sun_in_name)
kwargs[out_name].append(sun_in_name)
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[out_name].append(sub_in_name)
else:
var = scope.new_var(out_name)
kwargs[out_name].append(out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
......@@ -46,12 +47,11 @@ def set_input(scope, op, inputs, place):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for sub_in_name in sub_in:
for sub_in_name, sub_in_array in sub_in:
var = scope.find_var(sub_in_name)
tensor = var.get_tensor()
arr = sub_in[sub_in_name]
tensor.set_dims(arr.shape)
tensor.set(arr, place)
tensor.set_dims(sub_in_array.shape)
tensor.set(sub_in_array, place)
else:
var = scope.find_var(in_name)
tensor = var.get_tensor()
......@@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place):
if out_name in outputs:
if out_dup:
sub_out = outputs[out_name]
for sub_out_name in sub_out:
for sub_out_name, _ in sub_out:
out_tensor = scope.find_var(sub_out_name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(
sub_out_name)).get_tensor()
......@@ -110,7 +110,7 @@ def get_numeric_gradient(scope,
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
set_input(op, inputs, core.CPUPlace())
set_input(scope, op, inputs, core.CPUPlace())
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
......@@ -120,7 +120,7 @@ def get_numeric_gradient(scope,
y_pos = get_output()
if in_place:
set_input(op, inputs, core.CPUPlace())
set_input(scope, op, inputs, core.CPUPlace())
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
......@@ -168,7 +168,10 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place,
class OpTest(unittest.TestCase):
def check_output_with_place(self, place):
self.scope = core.Scope()
self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs)
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
op_attrs)
if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
return
set_input(self.scope, self.op, self.inputs, place)
......@@ -227,7 +230,10 @@ class OpTest(unittest.TestCase):
in_place=False,
max_relative_error=0.005):
self.scope = core.Scope()
self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs)
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
op_attrs)
if no_grad_set is None:
no_grad_set = set()
......
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class OpTestMeta(type):
"""
Operator Test ClassMeta.
It injects `test_all` method into user's OperatorTest class, to make Python
unittest module run that method.
The `test_all` read what value is stored in `self`. It use self's values to
create and run a operator, and check whether that op is OK or not.
See `test_add_two_op` for example usage.
"""
def __new__(cls, name, bases, attrs):
obj = super(OpTestMeta, cls).__new__(cls, name, bases, attrs)
def test_all(self):
scope = core.Scope()
kwargs = dict()
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
for in_name in Operator.get_op_input_names(self.type):
if hasattr(self, "inputs") and in_name in self.inputs:
kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor()
arr = self.inputs[in_name]
var.set_dims(arr.shape)
var.set(arr, place)
else:
kwargs[in_name] = "@EMPTY@"
for out_name in Operator.get_op_output_names(self.type):
if not hasattr(self, "outputs"):
raise ValueError(
"The test op must set self.outputs dict.")
if out_name not in self.outputs:
raise ValueError("The %s is not in self.outputs dict." %
(out_name))
kwargs[out_name] = out_name
scope.new_var(out_name).get_tensor()
for attr_name in Operator.get_op_attr_names(self.type):
if hasattr(self, "attrs") and attr_name in self.attrs:
kwargs[attr_name] = self.attrs[attr_name]
op = Operator(self.type, **kwargs)
if isinstance(place, core.GPUPlace) and not op.support_gpu():
return
op.infer_shape(scope)
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)
for out_name in Operator.get_op_output_names(self.type):
actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name]
self.assertTrue(
numpy.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + " has diff")
obj.test_all = test_all
return obj
import unittest
import numpy as np
from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
from op_test_util import OpTestMeta
class TestAddOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestAddOp(OpTest):
def setUp(self):
self.type = "add"
self.op_type = "add"
self.inputs = {
'X': numpy.random.random((102, 105)).astype("float32"),
'Y': numpy.random.random((102, 105)).astype("float32")
'X': np.random.random((102, 105)).astype("float32"),
'Y': np.random.random((102, 105)).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestConcatOp(OpTest):
def setUp(self):
self.op_type = "concat"
x0 = np.random.random((2, 3, 2, 5)).astype('float32')
x1 = np.random.random((2, 3, 3, 5)).astype('float32')
x2 = np.random.random((2, 3, 4, 5)).astype('float32')
axis = 2
self.inputs = {'X': [('x0', x0), ('x1', x1), ('x2', x2)]}
self.attrs = {'axis': axis}
self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
from op_test import OpTest
class TestCosSimOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestCosSimOp(OpTest):
def setUp(self):
self.type = "cos_sim"
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((32, 64)).astype("float32"),
'Y': np.random.random((32, 64)).astype("float32")
'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
......@@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase):
'Out': np.expand_dims(expect_out, 1)
}
def test_check_output(self):
self.check_output()
class TestCosSimGradOp(GradientChecker):
def setUp(self):
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
def test_ignore_x(self):
def test_check_grad_ingore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.05,
no_grad_set={"X"})
['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X'))
def test_ignore_y(self):
def test_check_grad_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.05,
no_grad_set={"Y"})
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
......@@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Y")
self.check_grad(['X'], 'Y')
if __name__ == "__main__":
......
import unittest
from op_test_util import OpTestMeta
import numpy
import numpy as np
from op_test import OpTest
class TestFillZerosLikeOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestFillZerosLikeOp(OpTest):
def setUp(self):
self.type = "fill_zeros_like"
self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")}
self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])}
self.op_type = "fill_zeros_like"
self.inputs = {'Src': np.random.random((219, 232)).astype("float32")}
self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
import numpy as np
from op_test import OpTest
class TestGatherOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestGatherOp(OpTest):
def setUp(self):
self.type = "gather"
xnp = numpy.random.random((10, 20)).astype("float32")
self.inputs = {
'X': xnp,
'Index': numpy.array([1, 3, 5]).astype("int32")
}
self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]}
self.op_type = "gather"
xnp = np.random.random((10, 20)).astype("float32")
self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")}
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self):
self.check_output()
class TestGatherGradOp(GradientChecker):
def test_gather_grad(self):
op = create_op("gather")
xnp = numpy.random.random((10, 20)).astype("float32")
inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")}
self.check_grad(op, inputs, set("X"), "Out")
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
......
......@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase):
def gaussian_random_test(self, place):
scope = core.Scope()
scope.new_var("Out").get_tensor()
scope.new_var('Out').get_tensor()
op = Operator(
"gaussian_random",
Out="Out",
Out='Out',
dims=[1000, 784],
mean=.0,
std=1.,
......@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase):
op.infer_shape(scope)
context = core.DeviceContext.create(place)
op.run(scope, context)
tensor = numpy.array(scope.find_var("Out").get_tensor())
tensor = numpy.array(scope.find_var('Out').get_tensor())
self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
import numpy
from paddle.v2.framework.op import Operator
from gradient_checker import GradientChecker
from gradient_checker import get_numeric_gradient
import numpy as np
import paddle.v2.framework.core as core
from op_test import get_numeric_gradient
from op_test import create_op
class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self):
add_op = Operator("add", X="X", Y="Y", Out="Z")
x = numpy.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32")
arr = get_numeric_gradient(add_op, {"X": x, "Y": y}, "Z", "X")
x = np.random.random((10, 1)).astype("float32")
y = np.random.random((10, 1)).astype("float32")
z = x + y
scope = core.Scope()
add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict())
arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
def test_softmax_op(self):
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x)
exps = numpy.exp(shiftx)
return exps / numpy.sum(exps)
shiftx = x - np.max(x)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def label_softmax_grad(Y, dY):
dX = Y * 0.0
for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :])
d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX
softmax_op = Operator("softmax", X="X", Y="Y")
X = numpy.random.random((2, 2)).astype("float32")
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
X = np.random.random((2, 2)).astype("float32")
Y = np.apply_along_axis(stable_softmax, 1, X)
dY = np.ones(Y.shape)
dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, "Y", "X")
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2)
scope = core.Scope()
softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict())
arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y")
np.testing.assert_almost_equal(arr, dX, decimal=1e-2)
if __name__ == "__main__":
......
import unittest
import numpy as np
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
from op_test import OpTest
class TestLookupTableOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestLookupTableOp(OpTest):
def setUp(self):
self.type = 'lookup_table'
table = np.random.random((17, 31)).astype('float32')
ids = np.random.randint(0, 17, 4).astype('int32')
self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype("float32")
ids = np.random.randint(0, 17, 4).astype("int32")
self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]}
def test_check_output(self):
self.check_output()
class TestLookupTableGradOp(GradientChecker):
def test_grad(self):
op = create_op('lookup_table')
table = np.random.random((17, 31)).astype('float32')
ids = np.random.randint(0, 17, 4).astype('int32')
inputs = {'W': table, 'Ids': ids}
# comapre gradients
self.compare_grad(op, inputs, set(['Ids']))
# check gradients
self.check_grad(op, inputs, set('W'), 'Out')
def test_check_grad(self):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np
from op_test import OpTest
class TestMeanOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestMeanOp(OpTest):
def setUp(self):
self.type = "mean"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.outputs = {'Out': np.mean(self.inputs['X'])}
self.op_type = "mean"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self):
self.check_output()
class MeanGradOpTest(GradientChecker):
def test_normal(self):
op = create_op("mean")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
def test_checkout_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
from op_test import OpTest
class MinusOpTest(unittest.TestCase):
__metaclass__ = OpTestMeta
class MinusOpTest(OpTest):
def setUp(self):
self.type = "minus"
self.op_type = "minus"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((32, 84)).astype("float32")
}
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
def test_check_output(self):
self.check_output()
class MinusGradTest(GradientChecker):
def test_left(self):
op = create_op("minus")
inputs = {
"X": np.random.random((10, 10)).astype("float32"),
"Y": np.random.random((10, 10)).astype("float32")
}
self.check_grad(op, inputs, ["X", 'Y'], "Out")
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
from paddle.v2.framework.op import Operator
from op_test import OpTest
class TestMulOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestMulOp(OpTest):
def setUp(self):
self.type = "mul"
self.op_type = "mul"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
class TestMulOp2(unittest.TestCase):
__metaclass__ = OpTestMeta
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
class TestMulOp2(OpTest):
def setUp(self):
self.type = "mul"
self.op_type = "mul"
self.inputs = {
'X': np.random.random((15, 4, 12, 10)).astype("float32"),
'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32")
......@@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase):
self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9))
}
def test_check_output(self):
self.check_output()
class TestMulGradOp(GradientChecker):
def setUp(self):
self.op = create_op("mul")
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
# mul op will enlarge the relative error
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
class TestMulGradTest2(GradientChecker):
def setUp(self):
self.op = Operator(
"mul", X="X", Y="Y", Out="Out", x_num_col_dims=2, y_num_col_dims=2)
self.inputs = {
"X": np.random.random((15, 4, 12, 10)).astype("float32"),
"Y": np.random.random((4, 30, 8, 2, 9)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
def test_ignore_x(self):
def test_check_grad_ingore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X'))
def test_ignore_y(self):
def test_check_grad_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
......@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
self.assertEqual(expected, "\n" + str(net))
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
from op_test import OpTest
class TestRowwiseAddOp(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "rowwise_add"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'b': np.random.random(84).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
class TestRowwiseAddOp2(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestRowwiseAddOp(OpTest):
def setUp(self):
self.type = "rowwise_add"
self.op_type = "rowwise_add"
self.inputs = {
'X': np.random.random((13, 6, 7, 8)).astype("float32"),
'b': np.random.random((7, 8)).astype("float32")
'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
'b': np.random.uniform(0.1, 1, [10]).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
class TestRowwiseAddGradOp(GradientChecker):
def setUp(self):
self.op = create_op("rowwise_add")
self.inputs = {
"X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
"b": np.random.uniform(0.1, 1, [10]).astype("float32")
}
def test_check_grad_normal(self):
self.check_grad(['X', 'b'], 'Out')
def test_normal(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out")
def test_check_grad_ingore_b(self):
self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_b(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"})
def test_check_grad_ingore_x(self):
self.check_grad(['b'], 'Out', no_grad_set=set('X'))
def test_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"})
class TestRowwiseAddGradOp2(GradientChecker):
class TestRowwiseAddOp2(OpTest):
def setUp(self):
self.op = create_op("rowwise_add")
self.op_type = "rowwise_add"
self.inputs = {
"X": np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"),
"b": np.random.uniform(0.1, 1, [2, 5]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"),
'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
def test_normal(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out")
def test_check_grad_normal(self):
self.check_grad(['X', 'b'], 'Out')
def test_ignore_b(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"})
def test_check_grad_ignore_b(self):
self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"})
def test_check_grad_ignore_x(self):
self.check_grad(['b'], 'Out', no_grad_set=set('X'))
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np
from paddle.v2.framework.op import Operator
from op_test import OpTest
class IdentityTest(unittest.TestCase):
__metaclass__ = OpTestMeta
class IdentityTest(OpTest):
def setUp(self):
self.type = "identity"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.op_type = "identity"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
class IdentityGradOpTest(GradientChecker):
def test_normal(self):
op = create_op("identity")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class ScaleTest(unittest.TestCase):
__metaclass__ = OpTestMeta
class ScaleTest(OpTest):
def setUp(self):
self.type = "scale"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.op_type = "scale"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'scale': -2.3}
self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']}
def test_check_output(self):
self.check_output()
class ScaleGradTest(GradientChecker):
def test_normal(self):
op = Operator("scale", X="X", Out="Out", scale=3.2)
self.check_grad(op,
{"X": np.random.random((10, 10)).astype("float32")},
set("X"), "Out")
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
import numpy as np
from op_test import OpTest
class TestScatterOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestScatterOp(OpTest):
def setUp(self):
self.type = "scatter"
ref_np = numpy.ones((3, 3)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 3)).astype("float32")
output_np = numpy.copy(ref_np)
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] += updates_np
self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
class TestScatterGradOp(GradientChecker):
def test_scatter_grad(self):
op = create_op("scatter")
# test data setup
ref_np = numpy.ones((3, 10)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 10)).astype("float32")
output_np = numpy.copy(ref_np)
output_np[index_np] += updates_np
inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.check_grad(
op, inputs, set(["Updates", "Ref"]), "Out", in_place=True)
def test_check_grad(self):
self.check_grad(['Updates', 'Ref'], 'Out', in_place=True)
if __name__ == "__main__":
......
import unittest
import numpy
from op_test_util import OpTestMeta
import numpy as np
from op_test import OpTest
class TestSGD(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestSGD(OpTest):
def setUp(self):
self.type = "sgd"
w = numpy.random.random((102, 105)).astype("float32")
g = numpy.random.random((102, 105)).astype("float32")
self.op_type = "sgd"
w = np.random.random((102, 105)).astype("float32")
g = np.random.random((102, 105)).astype("float32")
lr = 0.1
self.inputs = {'param': w, 'grad': g}
self.attrs = {'learning_rate': lr}
self.outputs = {'param_out': w - lr * g}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
from op_test import OpTest
def stable_softmax(x):
......@@ -13,26 +10,21 @@ def stable_softmax(x):
return exps / np.sum(exps)
class TestSoftmaxOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestSoftmaxOp(OpTest):
def setUp(self):
self.type = "softmax"
self.inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.op_type = "softmax"
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
self.outputs = {
"Y": np.apply_along_axis(stable_softmax, 1, self.inputs["X"])
'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
}
def test_check_output(self):
self.check_output()
class TestSoftmaxGradOp(GradientChecker):
def setUp(self):
self.op = create_op("softmax")
self.inputs = {
"X": np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
def test_softmax_grad(self):
self.check_grad(self.op, self.inputs, ["X"], "Y")
def test_check_grad(self):
self.check_grad(['X'], 'Y')
if __name__ == "__main__":
......
import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np
from op_test import OpTest
class TestSquaredL2DistanceOp_f0(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestSquaredL2DistanceOp_f0(OpTest):
def setUp(self):
self.type = 'squared_l2_distance'
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'),
'Y': np.random.uniform(0.1, 1., (32, 64)).astype('float32')
'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32")
}
sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res
......@@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase):
'Out': np.expand_dims(output.sum(1), 1)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f1(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestSquaredL2DistanceOp_f1(OpTest):
def setUp(self):
self.type = 'squared_l2_distance'
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'),
'Y': np.random.uniform(0.1, 1., (1, 64)).astype('float32')
'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32")
}
sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res
......@@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase):
'Out': np.expand_dims(output.sum(1), 1)
}
def test_check_output(self):
self.check_output()
class TestSquaredL2DistanceOp_f2(unittest.TestCase):
__metaclass__ = OpTestMeta
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f2(OpTest):
def setUp(self):
self.type = 'squared_l2_distance'
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64, 128)).astype('float32'),
'Y': np.random.uniform(0.1, 1., (1, 64, 128)).astype('float32')
'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"),
'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32")
}
sub_res = self.inputs['X'] - self.inputs['Y']
sub_res = sub_res.reshape((32, 64 * 128))
sub_res = sub_res.reshape((2, 3 * 4))
output = sub_res * sub_res
self.outputs = {
'sub_result': sub_res,
'Out': np.expand_dims(output.sum(1), 1)
}
def test_check_output(self):
self.check_output()
class TestSquaredL2DistanceGradOp(GradientChecker):
def test_squared_l2_distance_b0(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (2, 3)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_squared_l2_distance_b1(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (1, 3)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_squared_l2_distance_b2(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3, 4)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (1, 3, 4)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
......@@ -9,7 +9,7 @@ class TestSumOp(OpTest):
x0 = np.random.random((3, 4)).astype('float32')
x1 = np.random.random((3, 4)).astype('float32')
x2 = np.random.random((3, 4)).astype('float32')
self.inputs = {"X": {"x0": x0, "x1": x1, "x2": x2}}
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
y = x0 + x1 + x2
self.outputs = {'Out': y}
......@@ -17,8 +17,8 @@ class TestSumOp(OpTest):
self.check_output()
def test_check_grad(self):
self.check_grad(["x0"], "Out")
self.check_grad(['x0'], 'Out')
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
from op_test import OpTest
class TestTopkOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestTopkOp(OpTest):
def setUp(self):
self.type = "top_k"
self.op_type = "top_k"
k = 1
input = np.random.random((32, 84)).astype("float32")
output = np.ndarray((32, k))
......@@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase):
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3d(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestTopkOp3d(OpTest):
def setUp(self):
self.type = "top_k"
self.op_type = "top_k"
k = 1
input = np.random.random((32, 2, 84)).astype("float32")
input_flat_2d = input.reshape(64, 84)
......@@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase):
self.outputs = {'Out': output, 'Indices': indices}
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
......@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase):
def uniform_random_test(self, place):
scope = core.Scope()
scope.new_var("X").get_tensor()
scope.new_var('X').get_tensor()
op = Operator(
"uniform_random",
Out="X",
Out='X',
dims=[1000, 784],
min=-5.0,
max=10.0,
......@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase):
op.infer_shape(scope)
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)
tensor = numpy.array(scope.find_var("X").get_tensor())
tensor = numpy.array(scope.find_var('X').get_tensor())
self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1)
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()
......@@ -2,6 +2,7 @@ import numpy
import collections
import topology
import minibatch
import cPickle
__all__ = ['infer', 'Inference']
......@@ -25,11 +26,23 @@ class Inference(object):
:type parameters: paddle.v2.parameters.Parameters
"""
def __init__(self, output_layer, parameters):
def __init__(self, parameters, output_layer=None, fileobj=None):
import py_paddle.swig_paddle as api
if output_layer is not None:
topo = topology.Topology(output_layer)
gm = api.GradientMachine.createFromConfigProto(
topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE])
self.__data_types__ = topo.data_type()
elif fileobj is not None:
tmp = cPickle.load(fileobj)
gm = api.GradientMachine.createByConfigProtoStr(
tmp['protobin'], api.CREATE_MODE_TESTING,
[api.PARAMETER_VALUE])
self.__data_types__ = tmp['data_type']
else:
raise ValueError("Either output_layer or fileobj must be set")
for param in gm.getParameters():
val = param.getBuf(api.PARAMETER_VALUE)
name = param.getName()
......@@ -43,7 +56,6 @@ class Inference(object):
# called here, but it's better to call this function in one place.
param.setValueUpdated()
self.__gradient_machine__ = gm
self.__data_types__ = topo.data_type()
def iter_infer(self, input, feeding=None):
from data_feeder import DataFeeder
......
......@@ -18,6 +18,7 @@ from paddle.proto.ModelConfig_pb2 import ModelConfig
import paddle.trainer_config_helpers as conf_helps
import layer as v2_layer
import config_base
import cPickle
__all__ = ['Topology']
......@@ -100,6 +101,14 @@ class Topology(object):
return layer
return None
def serialize_for_inference(self, stream):
protobin = self.proto().SerializeToString()
data_type = self.data_type()
cPickle.dump({
'protobin': protobin,
'data_type': data_type
}, stream, cPickle.HIGHEST_PROTOCOL)
def __check_layer_type__(layer):
if not isinstance(layer, config_base.Layer):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册