提交 3fa3f5e7 编写于 作者: D dangqingqing

Update code and resolve conflicts.

...@@ -13,7 +13,7 @@ define_py_data_sources2( ...@@ -13,7 +13,7 @@ define_py_data_sources2(
settings( settings(
batch_size=batch_size, batch_size=batch_size,
learning_rate=0.01 / batch_size, learning_rate=0.001 / batch_size,
learning_method=MomentumOptimizer(0.9), learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size)) regularization=L2Regularization(0.0005 * batch_size))
......
...@@ -46,16 +46,20 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML") ...@@ -46,16 +46,20 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML")
MESSAGE(STATUS "Build MKLDNN with ${MKLDNN_MKLROOT}") MESSAGE(STATUS "Build MKLDNN with ${MKLDNN_MKLROOT}")
ENDIF() ENDIF()
SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} -Wno-error=strict-overflow")
SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} -Wno-error=strict-overflow")
ExternalProject_Add( ExternalProject_Add(
${MKLDNN_PROJECT} ${MKLDNN_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS} ${EXTERNAL_PROJECT_LOG_ARGS}
DEPENDS ${MKLDNN_DEPENDS} DEPENDS ${MKLDNN_DEPENDS}
GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git" GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git"
GIT_TAG "v0.10" GIT_TAG "v0.11"
PREFIX ${MKLDNN_SOURCES_DIR} PREFIX ${MKLDNN_SOURCES_DIR}
UPDATE_COMMAND "" UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR}
CMAKE_ARGS -DMKLROOT=${MKLDNN_MKLROOT} CMAKE_ARGS -DMKLROOT=${MKLDNN_MKLROOT}
CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR} CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR}
-DMKLROOT:PATH=${MKLDNN_MKLROOT} -DMKLROOT:PATH=${MKLDNN_MKLROOT}
) )
......
...@@ -27,8 +27,8 @@ ENDIF() ...@@ -27,8 +27,8 @@ ENDIF()
INCLUDE(ExternalProject) INCLUDE(ExternalProject)
SET(MKLML_PROJECT "extern_mklml") SET(MKLML_PROJECT "extern_mklml")
SET(MKLML_VER "mklml_lnx_2018.0.20170720") SET(MKLML_VER "mklml_lnx_2018.0.1.20171007")
SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.10/${MKLML_VER}.tgz") SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.11/${MKLML_VER}.tgz")
SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml") SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml")
SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}") SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}")
SET(MKLML_DST_DIR "mklml") SET(MKLML_DST_DIR "mklml")
......
...@@ -2,112 +2,9 @@ ...@@ -2,112 +2,9 @@
Data Reader Interface and DataSets Data Reader Interface and DataSets
================================== ==================================
.. toctree::
:maxdepth: 1
DataTypes data/data_reader.rst
========= data/image.rst
data/dataset.rst
.. automodule:: paddle.v2.data_type
:members:
:noindex:
DataFeeder
==========
.. automodule:: paddle.v2.data_feeder
:members:
:noindex:
Reader
======
.. automodule:: paddle.v2.reader
:members:
:noindex:
.. automodule:: paddle.v2.reader.creator
:members:
:noindex:
minibatch
=========
.. automodule:: paddle.v2.minibatch
:members:
:noindex:
Dataset
=======
.. automodule:: paddle.v2.dataset
:members:
:noindex:
mnist
+++++
.. automodule:: paddle.v2.dataset.mnist
:members:
:noindex:
cifar
+++++
.. automodule:: paddle.v2.dataset.cifar
:members:
:noindex:
conll05
+++++++
.. automodule:: paddle.v2.dataset.conll05
:members: get_dict,get_embedding,test
:noindex:
imdb
++++
.. automodule:: paddle.v2.dataset.imdb
:members:
:noindex:
imikolov
++++++++
.. automodule:: paddle.v2.dataset.imikolov
:members:
:noindex:
movielens
+++++++++
.. automodule:: paddle.v2.dataset.movielens
:members:
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.UserInfo
:noindex:
sentiment
+++++++++
.. automodule:: paddle.v2.dataset.sentiment
:members:
:noindex:
uci_housing
+++++++++++
.. automodule:: paddle.v2.dataset.uci_housing
:members:
:noindex:
wmt14
+++++
.. automodule:: paddle.v2.dataset.wmt14
:members:
:noindex:
=====================
Data Reader Interface
=====================
DataTypes
=========
.. automodule:: paddle.v2.data_type
:members:
:noindex:
DataFeeder
==========
.. automodule:: paddle.v2.data_feeder
:members:
:noindex:
Reader
======
.. automodule:: paddle.v2.reader
:members:
:noindex:
.. automodule:: paddle.v2.reader.creator
:members:
:noindex:
minibatch
=========
.. automodule:: paddle.v2.minibatch
:members:
:noindex:
Dataset
=======
.. automodule:: paddle.v2.dataset
:members:
:noindex:
mnist
+++++
.. automodule:: paddle.v2.dataset.mnist
:members:
:noindex:
cifar
+++++
.. automodule:: paddle.v2.dataset.cifar
:members:
:noindex:
conll05
+++++++
.. automodule:: paddle.v2.dataset.conll05
:members: get_dict,get_embedding,test
:noindex:
imdb
++++
.. automodule:: paddle.v2.dataset.imdb
:members:
:noindex:
imikolov
++++++++
.. automodule:: paddle.v2.dataset.imikolov
:members:
:noindex:
movielens
+++++++++
.. automodule:: paddle.v2.dataset.movielens
:members:
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.UserInfo
:noindex:
sentiment
+++++++++
.. automodule:: paddle.v2.dataset.sentiment
:members:
:noindex:
uci_housing
+++++++++++
.. automodule:: paddle.v2.dataset.uci_housing
:members:
:noindex:
wmt14
+++++
.. automodule:: paddle.v2.dataset.wmt14
:members:
:noindex:
Image Interface
===============
.. automodule:: paddle.v2.image
:members:
# Design: Sequence Decoder Generating LoDTensors
In tasks such as machine translation and image to text,
a [sequence decoder](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.md) is necessary to generate sequences.
This documentation describes how to implement the sequence decoder as an operator.
## Beam Search based Decoder
The [beam search algorithm](https://en.wikipedia.org/wiki/Beam_search) is necessary when generating sequences,
it is a heuristic search algorithm that explores the paths by expanding the most promising node in a limited set.
In the old version of PaddlePaddle, a C++ class `RecurrentGradientMachine` implements the general sequence decoder based on beam search,
due to the complexity, the implementation relays on a lot of special data structures,
quite trivial and hard to be customized by users.
There are a lot of heuristic tricks in the sequence generation tasks,
so the flexibility of sequence decoder is very important to users.
During PaddlePaddle's refactoring work,
some new concept is proposed such as [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) and [TensorArray](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/tensor_array.md) that can better support sequence usage,
and they can help to make the implementation of beam search based sequence decoder **more transparent and modular** .
For example, the RNN sates, candidates IDs and probabilities of beam search can be represented as `LoDTensors`;
the selected candidate's IDs in each time step can be stored in a `TensorArray`, and `Packed` to the sentences translated.
## Changing LoD's absolute offset to relative offsets
The current `LoDTensor` is designed to store levels of variable-length sequences,
it stores several arrays of integers each represents a level.
The integers in each level represents the begin and end (not inclusive) offset of a sequence **in the underlying tensor**,
let's call this format the **absolute-offset LoD** for clear.
The relative-offset LoD can fast retrieve any sequence but fails to represent empty sequences, for example, a two-level LoD is as follows
```python
[[0, 3, 9]
[0, 2, 3, 3, 3, 9]]
```
The first level tells that there are two sequences:
- the first's offset is `[0, 3)`
- the second's offset is `[3, 9)`
while on the second level, there are several empty sequences that both begin and end at `3`.
It is impossible to tell how many empty second-level sequences exist in the first-level sequences.
There are many scenarios that relay on empty sequence representation,
such as machine translation or image to text, one instance has no translations or the empty candidate set for a prefix.
So let's introduce another format of LoD,
it stores **the offsets of the lower level sequences** and is called **relative-offset** LoD.
For example, to represent the same sequences of the above data
```python
[[0, 3, 6]
[0, 2, 3, 3, 3, 9]]
```
the first level represents that there are two sequences,
their offsets in the second-level LoD is `[0, 3)` and `[3, 5)`.
The second level is the same with the relative offset example because the lower level is a tensor.
It is easy to find out the second sequence in the first-level LoD has two empty sequences.
The following demos are based on relative-offset LoD.
## Usage in a simple machine translation model
Let's start from a simple machine translation model that is simplified from [machine translation chapter](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation) to draw a simple blueprint of what a sequence decoder can do and how to use it.
The model has an encoder that learns the semantic vector from a sequence,
and a decoder which uses the sequence decoder to generate new sentences.
**Encoder**
```python
import paddle as pd
dict_size = 8000
source_dict_size = dict_size
target_dict_size = dict_size
word_vector_dim = 128
encoder_dim = 128
decoder_dim = 128
beam_size = 5
max_length = 120
# encoder
src_word_id = pd.data(
name='source_language_word',
type=pd.data.integer_value_sequence(source_dict_dim))
src_embedding = pd.embedding(size=source_dict_size, size=word_vector_dim)
src_word_vec = pd.lookup(src_embedding, src_word_id)
encoder_out_seq = pd.gru(input=src_word_vec, size=encoder_dim)
encoder_ctx = pd.last_seq(encoder_out_seq)
# encoder_ctx_proj is the learned semantic vector
encoder_ctx_proj = pd.fc(
encoder_ctx, size=decoder_dim, act=pd.activation.Tanh(), bias=None)
```
**Decoder**
```python
def generate():
decoder = pd.while_loop()
with decoder.step():
decoder_mem = decoder.memory(init=encoder_ctx) # mark the memory
generated_ids = decoder.memory() # TODO init to batch_size <s>s
generated_scores = decoder.memory() # TODO init to batch_size 1s or 0s
target_word = pd.lookup(trg_embedding, gendrated_ids)
# expand encoder_ctx's batch to fit target_word's lod
# for example
# decoder_mem.lod is
# [[0 1 3],
# [0 1 3 6]]
# its tensor content is [a1 a2 a3 a4 a5]
# which means there are 2 sentences to translate
# - the first sentence has 1 translation prefixes, the offsets are [0, 1)
# - the second sentence has 2 translation prefixes, the offsets are [1, 3) and [3, 6)
# the target_word.lod is
# [[0, 1, 6]
# [0, 2, 4, 7, 9 12]]
# which means 2 sentences to translate, each has 1 and 5 prefixes
# the first prefix has 2 candidates
# the following has 2, 3, 2, 3 candidates
# the encoder_ctx_expanded's content will be
# [a1 a1 a2 a2 a3 a3 a3 a4 a4 a5 a5 a5]
encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word)
decoder_input = pd.fc(
act=pd.activation.Linear(),
input=[target_word, encoder_ctx],
size=3 * decoder_dim)
gru_out, cur_mem = pd.gru_step(
decoder_input, mem=decoder_mem, size=decoder_dim)
scores = pd.fc(
gru_out,
size=trg_dic_size,
bias=None,
act=pd.activation.Softmax())
# K is an config
topk_scores, topk_ids = pd.top_k(scores, K)
topk_generated_scores = pd.add_scalar(topk_scores, generated_scores)
selected_ids, selected_generation_scores = decoder.beam_search(
topk_ids, topk_generated_scores)
# update the states
decoder_mem.update(cur_mem) # tells how to update state
generated_ids.update(selected_ids)
generated_scores.update(selected_generation_scores)
decoder.output(selected_ids)
decoder.output(selected_generation_scores)
translation_ids, translation_scores = decoder()
```
The `decoder.beam_search` is a operator that given the candidates and the scores of translations including the candidates,
return the result of the beam search algorithm.
In this way, users can customize anything on the inputs or outputs of beam search, for example, two ways to prune some translation prefixes
1. meke the correspondind elements in `topk_generated_scores` zero or some small values, beam_search will discard this candidate.
2. remove some specific candidate in `selected_ids`
3. get the final `translation_ids`, remove the translation sequence in it.
The implementation of sequence decoder can reuse the C++ class [RNNAlgorithm](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/paddle/operators/dynamic_recurrent_op.h#L30),
so the python syntax is quite similar to a [RNN](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/doc/design/block.md#blocks-with-for-and-rnnop).
Both of them are two-level `LoDTensors`
- the first level represents `batch_size` of (source) sentences;
- the second level represents the candidate ID sets for translation prefix.
for example, 3 source sentences to translate, and has 2, 3, 1 candidates.
Unlike an RNN, in sequence decoder, the previous state and the current state have different LoD and shape,
a `lod_expand` operator is used to expand the LoD of the previous state to fit the current state.
For example, the previous state
* LoD is `[0, 1, 3][0, 2, 5, 6]`
* content of tensor is `a1 a2 b1 b2 b3 c1`
the current state stored in `encoder_ctx_expanded`
* LoD is `[0, 2, 7][0 3 5 8 9 11 11]`
* the content is
- a1 a1 a1 (a1 has 3 candidates, so the state should be copied 3 times for each candidates)
- a2 a2
- b1 b1 b1
- b2
- b3 b3
- None (c1 has 0 candidates, so c1 is dropped)
Benefit from the relative offset LoD, empty candidate set can be represented naturally.
the status in each time step can be stored in `TensorArray`, and `Pack`ed to a final LoDTensor, the corresponding syntax is
```python
decoder.output(selected_ids)
decoder.output(selected_generation_scores)
```
the `selected_ids` is the candidate ids for the prefixes,
it will be `Packed` by `TensorArray` to a two-level `LoDTensor`,
the first level represents the source sequences,
the second level represents generated sequences.
Pack the `selected_scores` will get a `LoDTensor` that stores scores of each candidate of translations.
Pack the `selected_generation_scores` will get a `LoDTensor`, and each tail is the probability of the translation.
## LoD and shape changes during decoding
<p align="center">
<img src="./images/LOD-and-shape-changes-during-decoding.jpg"/>
</p>
According the image above, the only phrase to change LoD is beam search.
## Beam search design
The beam search algorthm will be implemented as one method of the sequence decoder, it has 3 inputs
1. `topk_ids`, top K candidate ids for each prefix.
2. `topk_scores`, the corresponding scores for `topk_ids`
3. `generated_scores`, the score of the prefixes.
All of the are LoDTensors, so that the sequence affilication is clear.
Beam search will keep a beam for each prefix and select a smaller candidate set for each prefix.
It will return three variables
1. `selected_ids`, the final candidate beam search function selected for the next step.
2. `selected_scores`, the scores for the candidates.
3. `generated_scores`, the updated scores for each prefixes (with the new candidates appended).
## Introducing the LoD-based `Pack` and `Unpack` methods in `TensorArray`
The `selected_ids`, `selected_scores` and `generated_scores` are LoDTensors,
and they exist in each time step,
so it is natural to store them in arrays.
Currently, PaddlePaddle has a module called `TensorArray` which can store an array of tensors,
the results of beam search are better to store in a `TensorArray`.
The `Pack` and `UnPack` in `TensorArray` are used to package tensors in the array to a `LoDTensor` or split the `LoDTensor` to an array of tensors.
It needs some extensions to support pack or unpack an array of `LoDTensors`.
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "paddle/framework/var_desc.h" #include "paddle/framework/var_desc.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
USE_OP(fill_constant); USE_NO_KERNEL_OP(fill_constant);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -34,6 +34,21 @@ inline DataType ToDataType(std::type_index type) { ...@@ -34,6 +34,21 @@ inline DataType ToDataType(std::type_index type) {
} }
} }
inline std::type_index ToTypeIndex(DataType type) {
switch (type) {
case DataType::FP32:
return typeid(float);
case DataType::FP64:
return typeid(double);
case DataType::INT32:
return typeid(int);
case DataType::INT64:
return typeid(int64_t);
default:
PADDLE_THROW("Not support type %d", type);
}
}
template <typename Visitor> template <typename Visitor>
inline void VisitDataType(DataType type, Visitor visitor) { inline void VisitDataType(DataType type, Visitor visitor) {
switch (type) { switch (type) {
......
...@@ -79,6 +79,13 @@ DDim make_ddim(const std::vector<int64_t>& dims) { ...@@ -79,6 +79,13 @@ DDim make_ddim(const std::vector<int64_t>& dims) {
return result; return result;
} }
DDim make_ddim(const std::vector<int>& dims) {
std::vector<int64_t> res(dims.size());
std::transform(dims.begin(), dims.end(), res.begin(),
[](int d) { return static_cast<int64_t>(d); });
return make_ddim(res);
}
/// @cond HIDDEN /// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors // XXX For some reason, putting this in an anonymous namespace causes errors
class DynamicMutableIndexer : public boost::static_visitor<int64_t&> { class DynamicMutableIndexer : public boost::static_visitor<int64_t&> {
......
...@@ -81,6 +81,8 @@ struct DDim { ...@@ -81,6 +81,8 @@ struct DDim {
*/ */
DDim make_ddim(const std::vector<int64_t>& dims); DDim make_ddim(const std::vector<int64_t>& dims);
DDim make_ddim(const std::vector<int>& dims);
/** /**
* \brief Make a DDim from an initializer list * \brief Make a DDim from an initializer list
* *
......
...@@ -45,6 +45,7 @@ if(WITH_GPU) ...@@ -45,6 +45,7 @@ if(WITH_GPU)
add_simple_unittest(BlockExpandOpTest) add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest) add_simple_unittest(CropOpTest)
add_simple_unittest(SwitchOpTest) add_simple_unittest(SwitchOpTest)
add_simple_unittest(ScaleSubRegionOpTest)
endif() endif()
add_simple_unittest(Im2ColTest) add_simple_unittest(Im2ColTest)
......
...@@ -110,6 +110,7 @@ public: ...@@ -110,6 +110,7 @@ public:
function2_(FunctionBase::funcRegistrar_.createByType(name2)) { function2_(FunctionBase::funcRegistrar_.createByType(name2)) {
function1_->init(config); function1_->init(config);
function2_->init(config); function2_->init(config);
initArgsCallback_ = nullptr;
} }
~Compare2Function() {} ~Compare2Function() {}
...@@ -170,6 +171,10 @@ public: ...@@ -170,6 +171,10 @@ public:
*seq2_)); *seq2_));
} }
void registerInitCallback(std::function<void(BufferArg&, size_t)> callback) {
initArgsCallback_ = callback;
}
// output need only contains shape, do not contains data. // output need only contains shape, do not contains data.
void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) { void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) {
size_t size = size_t size =
...@@ -340,6 +345,10 @@ protected: ...@@ -340,6 +345,10 @@ protected:
initArg(*func1Inputs_[i]); initArg(*func1Inputs_[i]);
} }
if (initArgsCallback_ != nullptr) {
initArgsCallback_(*func1Inputs_[i], i);
}
copyArg_(*func1Inputs_[i], *func2Inputs_[i]); copyArg_(*func1Inputs_[i], *func2Inputs_[i]);
} }
} }
...@@ -386,6 +395,7 @@ protected: ...@@ -386,6 +395,7 @@ protected:
std::shared_ptr<SequenceIdArg> seq1_; std::shared_ptr<SequenceIdArg> seq1_;
std::shared_ptr<SequenceIdArg> seq2_; std::shared_ptr<SequenceIdArg> seq2_;
test::CopyArgument<DType1, DType2> copyArg_; test::CopyArgument<DType1, DType2> copyArg_;
std::function<void(BufferArg&, size_t)> initArgsCallback_;
}; };
class CpuGpuFuncCompare class CpuGpuFuncCompare
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ScaleSubRegionOp.h"
#include "paddle/function/TensorShape.h"
namespace paddle {
template <>
void ScaleSubRegion<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
memcpy(outputs, inputs, number * channel * height * width * sizeof(real));
for (int n = 0; n < number; ++n) {
// indices start from 1
int offset = n * 6;
for (int c = indices[offset] - 1; c < indices[offset + 1]; ++c) {
for (int h = indices[offset + 2] - 1; h < indices[offset + 3]; ++h) {
for (int w = indices[offset + 4] - 1; w < indices[offset + 5]; ++w) {
int idx = ((n * channel + c) * height + h) * width + w;
outputs[idx] *= value;
}
}
}
}
}
template <>
void ScaleSubRegionGrad<DEVICE_TYPE_CPU>(const real* inGrad,
real* outGrad,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
for (int n = 0; n < number; ++n) {
for (int c = 0; c < channel; ++c) {
for (int h = 0; h < height; ++h) {
for (int w = 0; w < width; ++w) {
int idx = ((n * channel + c) * height + h) * width + w;
int offset = n * 6;
if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
h >= (indices[offset + 2] - 1) &&
h <= (indices[offset + 3] - 1) &&
w >= (indices[offset + 4] - 1) &&
w <= (indices[offset + 5] - 1)) {
outGrad[idx] += inGrad[idx] * value;
} else {
outGrad[idx] += inGrad[idx];
}
}
}
}
}
}
/**
* \brief For each instance, ScaleSubRegion can be used to multiply a value to
* a specified sub continuous region. By providing start index and end
* index for C/H/W, you can specify the location and shape of the region.
*
* Argument in this Function:
* \param inputs A 4-D tensor with shape [N, C, H, W], only one input.
* \param indices A 2-D tensor with shape [N, 6], indicates the sub region.
* \param outputs A 4-D tensor with same shape as inputs, output value.
*/
template <DeviceType Device>
class ScaleSubRegionFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(2UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO);
TensorShape shape = inputs[0].shape();
ScaleSubRegion<Device>(outputs[0].data<real>(),
inputs[0].data<real>(),
inputs[1].data<real>(),
shape,
conf_);
}
private:
FuncConfig conf_;
};
/**
* \brief The backward propagation of ScaleSubRegion Function.
*
* Argument in this Function:
* \param inputs A 4-D tensor with shape [N, C, H, W], output gradient.
* \param indices A 2-D tensor with shape [N, 6], indicates the sub region.
* \param outputs A 4-D tensor with shape [N, C, H, W], gradient of input value.
*/
template <DeviceType Device>
class ScaleSubRegionGradFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(2UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
CHECK_EQ(outputs[0].getArgType(), ADD_TO);
TensorShape shape = inputs[0].shape();
ScaleSubRegionGrad<Device>(inputs[0].data<real>(),
outputs[0].data<real>(),
inputs[1].data<real>(),
shape,
conf_);
}
private:
FuncConfig conf_;
};
REGISTER_TYPED_FUNC(ScaleSubRegion, CPU, ScaleSubRegionFunc);
REGISTER_TYPED_FUNC(ScaleSubRegionGrad, CPU, ScaleSubRegionGradFunc);
#ifdef PADDLE_WITH_CUDA
REGISTER_TYPED_FUNC(ScaleSubRegion, GPU, ScaleSubRegionFunc);
REGISTER_TYPED_FUNC(ScaleSubRegionGrad, GPU, ScaleSubRegionGradFunc);
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Function.h"
namespace paddle {
/**
* \brief Function to multiply a value to values in specified sub continuous
* region. Indices must be provided to indcate the location and shape of
* the region and the multiplied value is passed by configure variable.
*
*
* \param[out] outputs Output value.
* \param[in] inputs Input data which contains NCHW information.
* \param[in] indices Indices data to indcate the sub region.
* \param[in] shape Tensor shape of input value.
* \param[in] conf Configure variable which contains the multiplied value.
*/
template <DeviceType Device>
void ScaleSubRegion(real* outputs,
const real* inputs,
const real* indices,
const TensorShape shape,
const FuncConfig& conf);
/**
* \brief Backward propagation function of ScaleSubRegion.
*
* \param[out] inGrad Gradients of previous layer.
* \param[in] outGrad Output gradient.
* \param[in] indices Indices data.
* \param[in] shape The Shape of input tensor.
* \param[in] conf Configure variable.
*/
template <DeviceType Device>
void ScaleSubRegionGrad(const real* inGrad,
real* outGrad,
const real* indices,
const TensorShape shape,
const FuncConfig& conf);
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ScaleSubRegionOp.h"
#include "hl_base.h"
namespace paddle {
__global__ void KeScaleSubRegion(real* outputs,
const real* inputs,
const real* indices,
real value,
int channel,
int height,
int width,
int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channel;
const int n = idx / width / height / channel;
const int offset = n * 6;
if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
h >= (indices[offset + 2] - 1) && h <= (indices[offset + 3] - 1) &&
w >= (indices[offset + 4] - 1) && w <= (indices[offset + 5] - 1)) {
outputs[idx] = inputs[idx] * value;
} else {
outputs[idx] = inputs[idx];
}
}
}
template <>
void ScaleSubRegion<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
size_t nth = number * channel * height * width;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
KeScaleSubRegion<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, indices, value, channel, height, width, nth);
CHECK_SYNC("ScaleSubRegion");
}
__global__ void KeScaleSubRegionDiff(const real* inGrad,
real* outGrad,
const real* indices,
real value,
int channel,
int height,
int width,
int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channel;
const int n = idx / width / height / channel;
const int offset = n * 6;
if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
h >= (indices[offset + 2] - 1) && h <= (indices[offset + 3] - 1) &&
w >= (indices[offset + 4] - 1) && w <= (indices[offset + 5] - 1)) {
outGrad[idx] += inGrad[idx] * value;
} else {
outGrad[idx] += inGrad[idx];
}
}
}
template <>
void ScaleSubRegionGrad<DEVICE_TYPE_GPU>(const real* inGrad,
real* outGrad,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
size_t nth = number * channel * height * width;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
KeScaleSubRegionDiff<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
inGrad, outGrad, indices, value, channel, height, width, nth);
CHECK_SYNC("ScaleSubRegionGrad");
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "FunctionTest.h"
namespace paddle {
TEST(ScaleSubRegion, real) {
for (size_t numSamples : {5, 32}) {
for (size_t channels : {5, 32}) {
for (size_t imgSizeH : {5, 33}) {
for (size_t imgSizeW : {5, 32}) {
for (real value : {-0.5, 0.0, 0.5}) {
for (bool firstHalf : {false, true}) {
VLOG(3) << " numSamples=" << numSamples
<< " channels=" << channels << " imgSizeH=" << imgSizeH
<< " imgSizeW=" << imgSizeW;
for (bool testGrad : {false, true}) {
CpuGpuFuncCompare compare(
testGrad ? "ScaleSubRegionGrad" : "ScaleSubRegion",
FuncConfig().set<real>("value", value));
TensorShape shape{numSamples, channels, imgSizeH, imgSizeW};
TensorShape indicesShape{numSamples, 6};
compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, indicesShape));
compare.registerInitCallback([=](BufferArg& arg, size_t index) {
if (index == 1) {
real* data = (real*)arg.data();
for (size_t i = 0; i < numSamples; ++i) {
size_t offset = i * 6;
data[offset] = firstHalf ? 1 : channels / 2;
data[offset + 1] = firstHalf ? channels / 2 : channels;
data[offset + 2] = firstHalf ? 1 : imgSizeH / 2;
data[offset + 3] = firstHalf ? imgSizeH / 2 : imgSizeH;
data[offset + 4] = firstHalf ? 1 : imgSizeW / 2;
data[offset + 5] = firstHalf ? imgSizeW / 2 : imgSizeW;
}
}
});
compare.addOutputs(
BufferArg(
VALUE_TYPE_FLOAT, shape, testGrad ? ADD_TO : ASSIGN_TO),
testGrad ? ADD_TO : ASSIGN_TO);
compare.run();
}
}
}
}
}
}
}
}
} // namespace paddle
...@@ -62,16 +62,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline, ...@@ -62,16 +62,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
if (biases_) { resetFwdBuffers(inVals_, bias, out);
LOG(FATAL) << "not implemented yet";
}
resetFwdBuffers(inVals_, out);
in = inVals_[0]; in = inVals_[0];
std::shared_ptr<sum::primitive_desc> fwdPD; std::shared_ptr<sum::primitive_desc> fwdPD;
resetFwdPD(fwdPD, inVals_, out); std::shared_ptr<sum::primitive_desc> biasPD;
resetFwdPD(fwdPD, biasPD, inVals_, bias, out);
resetFwdPipeline(pipeline, fwdPD, inVals_, out); resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, bias, out);
} }
void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline, void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
...@@ -79,7 +77,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline, ...@@ -79,7 +77,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
resetBwdBuffers(inGrads_, out); resetBwdBuffers(inGrads_, bias, out);
in = inGrads_[0]; in = inGrads_[0];
// backward only need share output grad to input grad // backward only need share output grad to input grad
...@@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline, ...@@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData()); inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData());
} }
} }
// backward bias
bwdBias_ = nullptr;
if (bias) {
std::vector<float> scales(bs_, 1.0);
std::vector<memory::primitive_desc> srcPDs(bs_, bias->getPrimitiveDesc());
auto biasPD = sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs);
std::vector<primitive::at> srcs;
for (size_t i = 0; i < grads_.size(); ++i) {
srcs.push_back(*(grads_[i]));
}
bwdBias_.reset(new sum(biasPD, srcs, *bias));
pipeline.push_back(*bwdBias_);
}
} }
void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) { void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
...@@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) { ...@@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
} }
} }
void MKLDNNAddtoLayer::prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
std::vector<MKLDNNMatrixPtr>& outs) {
auto pd = MKLDNNMatrix::createPrimitiveDesc(
{(int)layerSize_}, memory::format::x, engine_);
bias = MKLDNNMatrix::create(pd, biasMat);
outs.clear();
real* data = out->getData();
CHECK_EQ(bs_ * layerSize_, out->getElementCnt());
for (int i = 0; i < bs_; ++i) {
MatrixPtr tmp =
Matrix::create(data + i * layerSize_, 1, layerSize_, false, false);
outs.push_back(MKLDNNMatrix::create(bias->getPrimitiveDesc(), tmp));
}
}
void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
inputs.resize(inputLayers_.size()); inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) { for (size_t i = 0; i < inputs.size(); i++) {
...@@ -110,12 +140,20 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, ...@@ -110,12 +140,20 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
} }
resetOutValue(out, inputs[0]->getPrimitiveDesc()); resetOutValue(out, inputs[0]->getPrimitiveDesc());
if (biases_ && biases_->getW()) {
prepareBias(bias, biases_->getW(), out, vals_);
} else {
bias = nullptr;
}
} }
void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd, void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
std::shared_ptr<sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out) { MKLDNNMatrixPtr out) {
std::vector<double> scales(inputs.size(), 1.0); std::vector<float> scales(inputs.size(), 1.0);
std::vector<memory::primitive_desc> srcPDs; std::vector<memory::primitive_desc> srcPDs;
for (size_t i = 0; i < inputs.size(); i++) { for (size_t i = 0; i < inputs.size(); i++) {
srcPDs.push_back(inputs[i]->getPrimitiveDesc()); srcPDs.push_back(inputs[i]->getPrimitiveDesc());
...@@ -123,12 +161,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd, ...@@ -123,12 +161,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
CHECK(out); CHECK(out);
pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs)); pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc()); CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc());
biasPD = nullptr;
if (bias) {
std::vector<float> scales(2, 1.0);
std::vector<memory::primitive_desc> srcPDs(2, bias->getPrimitiveDesc());
biasPD.reset(
new sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(bias, biasPD->dst_primitive_desc());
}
} }
void MKLDNNAddtoLayer::resetFwdPipeline( void MKLDNNAddtoLayer::resetFwdPipeline(
std::vector<primitive>& pipeline, std::vector<primitive>& pipeline,
std::shared_ptr<sum::primitive_desc>& pd, std::shared_ptr<sum::primitive_desc>& pd,
std::shared_ptr<sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
std::vector<primitive::at> srcs; std::vector<primitive::at> srcs;
for (size_t i = 0; i < inputs.size(); i++) { for (size_t i = 0; i < inputs.size(); i++) {
...@@ -136,9 +185,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline( ...@@ -136,9 +185,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline(
} }
fwd_.reset(new sum(*pd, srcs, *out)); fwd_.reset(new sum(*pd, srcs, *out));
pipeline.push_back(*fwd_); pipeline.push_back(*fwd_);
fwdBias_.clear();
if (biasPD == nullptr || bias == nullptr) {
return;
}
fwdBias_.resize(vals_.size());
for (size_t i = 0; i < vals_.size(); ++i) {
std::vector<primitive::at> srcs;
srcs.push_back(*(vals_[i]));
srcs.push_back(*bias);
fwdBias_[i].reset(new sum(*biasPD, srcs, *vals_[i]));
pipeline.push_back(*fwdBias_[i]);
}
} }
void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
CHECK(outVal_); CHECK(outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc()); resetOutGrad(out, outVal_->getPrimitiveDesc());
...@@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, ...@@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i); resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc()); CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc());
} }
if (biases_ && biases_->getWGrad()) {
prepareBias(bias, biases_->getWGrad(), out, grads_);
} else {
bias = nullptr;
}
} }
} // namespace paddle } // namespace paddle
...@@ -32,9 +32,15 @@ protected: ...@@ -32,9 +32,15 @@ protected:
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed // layer size == ic * ih * iw == oc * oh *ow, and can not be changed
size_t layerSize_; size_t layerSize_;
// TODO(TJ): this part has not been optimized by MKL-DNN
std::unique_ptr<Weight> biases_; std::unique_ptr<Weight> biases_;
// buffers for adding bias
std::vector<MKLDNNMatrixPtr> vals_;
std::vector<MKLDNNMatrixPtr> grads_;
// primitives for adding bias
std::vector<std::shared_ptr<mkldnn::primitive>> fwdBias_;
std::shared_ptr<mkldnn::primitive> bwdBias_;
public: public:
explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {} explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {}
...@@ -91,20 +97,34 @@ protected: ...@@ -91,20 +97,34 @@ protected:
* reset pipeline. * reset pipeline.
*/ */
void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out); MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<mkldnn::sum::primitive_desc>& pd, void resetFwdPD(std::shared_ptr<mkldnn::sum::primitive_desc>& pd,
std::shared_ptr<mkldnn::sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out); MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline, void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<mkldnn::sum::primitive_desc>& pd, std::shared_ptr<mkldnn::sum::primitive_desc>& pd,
std::shared_ptr<mkldnn::sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out); MKLDNNMatrixPtr& out);
/** /**
* Backward functions: reset buffers(inputs, output, bias) * Backward functions: reset buffers(inputs, output, bias)
*/ */
void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out); MKLDNNMatrixPtr& out);
/**
* prepare for bias
*/
void prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
std::vector<MKLDNNMatrixPtr>& outs);
}; };
} // namespace paddle } // namespace paddle
...@@ -287,7 +287,7 @@ void MKLDNNLayer::resetMergeGrad(MKLDNNMatrixPtr& out) { ...@@ -287,7 +287,7 @@ void MKLDNNLayer::resetMergeGrad(MKLDNNMatrixPtr& out) {
return; return;
} }
CHECK(out) << "should have reset internal ouput grad"; CHECK(out) << "should have reset internal ouput grad";
std::vector<double> scales(outputMap_.size(), 1.0); std::vector<float> scales(outputMap_.size(), 1.0);
std::vector<memory::primitive_desc> srcPDs; std::vector<memory::primitive_desc> srcPDs;
std::vector<primitive::at> srcs; std::vector<primitive::at> srcs;
for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) { for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ScaleSubRegionLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(scale_sub_region, ScaleSubRegionLayer);
bool ScaleSubRegionLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
CHECK_EQ(static_cast<int>(inputLayers_.size()), 2);
auto& conf = config_.inputs(0).scale_sub_region_conf();
value_ = conf.value();
createFunction(forward_, "ScaleSubRegion", FuncConfig().set("value", value_));
createFunction(
backward_, "ScaleSubRegionGrad", FuncConfig().set("value", value_));
return true;
}
void ScaleSubRegionLayer::forward(PassType passType) {
Layer::forward(passType);
auto in0 = getInput(0);
imgH_ = in0.getFrameHeight();
imgW_ = in0.getFrameWidth();
if (imgH_ == 0 || imgW_ == 0) {
auto& conf = config_.inputs(0).scale_sub_region_conf();
imgH_ = conf.image_conf().img_size_y();
imgW_ = conf.image_conf().img_size();
}
MatrixPtr imgV = in0.value;
size_t batchSize = imgV->getHeight();
size_t spatialSize = imgH_ * imgW_;
channelsNum_ = imgV->getWidth() / spatialSize;
shape_ = TensorShape({batchSize, channelsNum_, imgH_, imgW_});
resetOutput(batchSize, imgV->getWidth());
auto& out = getOutput();
out.setFrameHeight(imgH_);
out.setFrameWidth(imgW_);
MatrixPtr indicesV = getInputValue(1);
indicesShape_ = TensorShape({batchSize, 6});
REGISTER_TIMER_INFO("ScaleSubRegionForward", getName().c_str());
BufferArgs inArgs;
BufferArgs outArgs;
inArgs.addArg(*imgV, shape_);
inArgs.addArg(*indicesV, indicesShape_);
outArgs.addArg(*out.value, shape_, ASSIGN_TO);
forward_[0]->calc(inArgs, outArgs);
}
void ScaleSubRegionLayer::backward(const UpdateCallback& callback) {
REGISTER_TIMER_INFO("ScaleSubRegionBackward", getName().c_str());
BufferArgs inArgs;
BufferArgs outArgs;
inArgs.addArg(*getOutputGrad(), shape_);
inArgs.addArg(*getInputValue(1), indicesShape_);
outArgs.addArg(*getInputGrad(0), shape_, ADD_TO);
backward_[0]->calc(inArgs, outArgs);
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Layer.h"
namespace paddle {
/**
* \brief For each instance, this layer can be used to multiply a value to a
* specified sub continuous region. By providing start index and end
* index for C/H/W, you can specify the location and shape of the
* region.
*
* input_0: Input value.
* input_1: Indices value to specify the location an shape of the
* region.
*/
class ScaleSubRegionLayer : public Layer {
public:
explicit ScaleSubRegionLayer(const LayerConfig& config) : Layer(config) {}
~ScaleSubRegionLayer() {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
void forward(PassType passType);
void backward(const UpdateCallback& callback = nullptr);
protected:
TensorShape shape_;
TensorShape indicesShape_;
size_t imgH_;
size_t imgW_;
size_t channelsNum_;
real value_;
};
} // namespace paddle
...@@ -53,7 +53,7 @@ TEST(Operator, dot_mul) { ...@@ -53,7 +53,7 @@ TEST(Operator, dot_mul) {
TEST(Projection, context) { TEST(Projection, context) {
for (auto contextStart : {-5, -3, -1, 0, 3}) { for (auto contextStart : {-5, -3, -1, 0, 3}) {
for (auto contextLength : {1, 2, 5, 7}) { for (auto contextLength : {1, 2, 5, 7}) {
for (auto batchSize : {1, 2, 5, 20, 50}) { for (auto batchSize : {1, 2, 5, 20}) {
for (auto trainablePadding : {false, true}) { for (auto trainablePadding : {false, true}) {
LOG(INFO) << " contextStart=" << contextStart LOG(INFO) << " contextStart=" << contextStart
<< " contextLength=" << contextLength << " contextLength=" << contextLength
...@@ -585,14 +585,14 @@ TEST(Layer, maxoutLayer) { ...@@ -585,14 +585,14 @@ TEST(Layer, maxoutLayer) {
} }
void testFcLayer(string format, size_t nnz) { void testFcLayer(string format, size_t nnz) {
TestConfig config; TestConfig config;
config.biasSize = 4096; config.biasSize = 1024;
config.layerConfig.set_type("fc"); config.layerConfig.set_type("fc");
config.layerConfig.set_size(4096); config.layerConfig.set_size(1024);
config.layerConfig.set_active_type("sigmoid"); config.layerConfig.set_active_type("sigmoid");
config.layerConfig.set_drop_rate(0.1); config.layerConfig.set_drop_rate(0.1);
config.inputDefs.push_back( config.inputDefs.push_back(
{INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); {INPUT_DATA, "layer_0", 2048, nnz, ParaSparse(format)});
config.layerConfig.add_inputs(); config.layerConfig.add_inputs();
LOG(INFO) << config.inputDefs[0].sparse.sparse << " " LOG(INFO) << config.inputDefs[0].sparse.sparse << " "
...@@ -609,9 +609,9 @@ void testFcLayer(string format, size_t nnz) { ...@@ -609,9 +609,9 @@ void testFcLayer(string format, size_t nnz) {
} }
TEST(Layer, fcLayer) { TEST(Layer, fcLayer) {
testFcLayer("", 4096 * 4096 * 2); testFcLayer("", 1024 * 1024 * 2);
testFcLayer("csc", 4096 * 40); testFcLayer("csc", 1024 * 10);
testFcLayer("csr", 4096 * 40); testFcLayer("csr", 1024 * 10);
} }
TEST(Layer, SelectiveFullyConnectedLayer) { TEST(Layer, SelectiveFullyConnectedLayer) {
...@@ -1995,7 +1995,7 @@ TEST(Layer, multibox_loss) { ...@@ -1995,7 +1995,7 @@ TEST(Layer, multibox_loss) {
TEST(Layer, TransLayer) { TEST(Layer, TransLayer) {
TestConfig config; TestConfig config;
const int height = 128; const int height = 128;
const int width = 1028; const int width = 256;
config.layerConfig.set_type("trans"); config.layerConfig.set_type("trans");
config.layerConfig.set_size(width); config.layerConfig.set_size(width);
...@@ -2358,6 +2358,38 @@ TEST(Layer, ScaleShiftLayer) { ...@@ -2358,6 +2358,38 @@ TEST(Layer, ScaleShiftLayer) {
} }
} }
TEST(Layer, ScaleSubRegionLayer) {
const size_t batchSize = 64;
const size_t size = 4096;
TestConfig config;
config.layerConfig.set_type("scale_sub_region");
config.inputDefs.push_back({INPUT_DATA, "input", size, 0});
MatrixPtr indicesV = Matrix::create(batchSize, 6, false, false);
auto* data = indicesV->getData();
for (size_t i = 0; i < batchSize; ++i) {
data[i * 2] = 2;
data[i * 2 + 1] = 4;
data[i * 2 + 2] = 16;
data[i * 2 + 3] = 32;
data[i * 2 + 4] = 16;
data[i * 2 + 5] = 32;
}
config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, "indices", indicesV, {}});
LayerInputConfig* input = config.layerConfig.add_inputs();
ScaleSubRegionConfig* scaleSubRegionConf =
input->mutable_scale_sub_region_conf();
ImageConfig* imgConf = scaleSubRegionConf->mutable_image_conf();
imgConf->set_img_size(32);
imgConf->set_img_size_y(32);
imgConf->set_channels(4);
scaleSubRegionConf->set_value(2.0);
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config, "scale_sub_region", batchSize, false, useGpu, false);
}
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
initMain(argc, argv); initMain(argc, argv);
......
...@@ -300,13 +300,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) { ...@@ -300,13 +300,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) {
TestConfig dnnConfig; TestConfig dnnConfig;
getAddtoConfig(dnnConfig, pm, nInputs); getAddtoConfig(dnnConfig, pm, nInputs);
dnnConfig.layerConfig.set_type("mkldnn_addto"); dnnConfig.layerConfig.set_type("mkldnn_addto");
// TODO(TJ): test with bias for (auto withBias : {false, true}) {
for (auto withBias : {false}) { dnnConfig.biasSize = withBias ? pm.ic * pm.ih * pm.iw : 0;
if (withBias) {
dnnConfig.biasSize = pm.ic * pm.ih * pm.iw;
} else {
dnnConfig.biasSize = 0;
}
RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm) RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm)
} }
} }
......
...@@ -169,7 +169,7 @@ void TensorCheck(AssertEq compare, ...@@ -169,7 +169,7 @@ void TensorCheck(AssertEq compare,
count++; count++;
} }
} }
EXPECT_EQ(count, 0) << "There are " << count << " different element."; EXPECT_EQ(count, 0) << "There are " << count << " different elements.";
} }
template <typename AssertEq, typename Tensor1, typename Tensor2> template <typename AssertEq, typename Tensor1, typename Tensor2>
......
...@@ -195,8 +195,13 @@ op_library(sequence_pool_op DEPS sequence_pooling) ...@@ -195,8 +195,13 @@ op_library(sequence_pool_op DEPS sequence_pooling)
op_library(lstm_op DEPS sequence2batch lstm_compute) op_library(lstm_op DEPS sequence2batch lstm_compute)
op_library(conv_transpose_op DEPS vol2col) op_library(conv_transpose_op DEPS vol2col)
op_library(gru_op DEPS sequence2batch gru_compute) op_library(gru_op DEPS sequence2batch gru_compute)
op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc if(WITH_TESTING)
DEPS net_op tensor_array) op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS net_op tensor_array gtest)
else()
op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS net_op tensor_array)
endif()
op_library(recurrent_op SRCS recurrent_op.cc DEPS executor) op_library(recurrent_op SRCS recurrent_op.cc DEPS executor)
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
......
...@@ -65,7 +65,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> { ...@@ -65,7 +65,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
size_t num_samples = inference->dims()[0]; size_t num_samples = inference->dims()[0];
size_t infer_width = inference->dims()[1]; size_t infer_width = inference->dims()[1];
cudaMemset((void**)&accuracy_data, 0, sizeof(float)); PADDLE_ENFORCE(cudaMemset(accuracy_data, 0, sizeof(float)));
if (num_samples == 0) { if (num_samples == 0) {
return; return;
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
class ArrayOp : public framework::OperatorBase {
public:
ArrayOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
protected:
size_t GetOffset(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const {
auto *i = scope.FindVar(Input("I"));
PADDLE_ENFORCE(i != nullptr, "I must be set");
auto &i_tensor = i->Get<framework::LoDTensor>();
PADDLE_ENFORCE_EQ(i_tensor.numel(), 1);
size_t offset;
if (platform::is_gpu_place(i_tensor.place())) {
// FIXME: Avoid copy from GPU to CPU
framework::Tensor t;
t.CopyFrom(i_tensor, platform::CPUPlace(), dev_ctx);
dev_ctx.Wait();
offset = static_cast<size_t>(*t.data<int64_t>());
} else {
offset = static_cast<size_t>(*i_tensor.data<int64_t>());
}
return offset;
}
};
} // namespace operators
} // namespace paddle
...@@ -140,6 +140,23 @@ class ArrayToLoDTensorInferShape : public framework::InferShapeBase { ...@@ -140,6 +140,23 @@ class ArrayToLoDTensorInferShape : public framework::InferShapeBase {
"ArrayToLoDTensorOp must has input X."); "ArrayToLoDTensorOp must has input X.");
PADDLE_ENFORCE(context->HasInput("RankTable"), PADDLE_ENFORCE(context->HasInput("RankTable"),
"ArrayToLoDTensorOp must has input RankTable."); "ArrayToLoDTensorOp must has input RankTable.");
context->SetOutputDim("Out", context->GetInputDim("X"));
}
};
class ArrayToLoDTensorGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *grad_op = new framework::OpDescBind();
grad_op->SetType("lod_tensor_to_array");
grad_op->SetInput("X", OutputGrad("Out"));
grad_op->SetInput("RankTable", Input("RankTable"));
grad_op->SetOutput("Out", InputGrad("X"));
grad_op->SetAttrMap(Attrs());
return std::unique_ptr<framework::OpDescBind>(grad_op);
} }
}; };
...@@ -149,4 +166,5 @@ class ArrayToLoDTensorInferShape : public framework::InferShapeBase { ...@@ -149,4 +166,5 @@ class ArrayToLoDTensorInferShape : public framework::InferShapeBase {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(array_to_lod_tensor, ops::ArrayToLoDTensorOp, REGISTER_OPERATOR(array_to_lod_tensor, ops::ArrayToLoDTensorOp,
ops::ArrayToLoDTensorOpProtoMaker, ops::ArrayToLoDTensorOpProtoMaker,
ops::ArrayToLoDTensorInferShape); ops::ArrayToLoDTensorInferShape,
ops::ArrayToLoDTensorGradMaker);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/clip_by_norm_op.h"
namespace paddle {
namespace operators {
class ClipByNormOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of ClipByNormOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of ClipByNormOp should not be null.");
auto max_norm = ctx->Attrs().Get<float>("max_norm");
PADDLE_ENFORCE_GT(max_norm, 0, "max_norm should be greater than 0.");
auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", x_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};
class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ClipByNormOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(Tensor) The input of clip_by_norm op."
"The number of dimensions must be between [1, 9].");
AddOutput("Out",
"(Tensor) The output of clip_by_norm op with shape as input(X)");
AddAttr<float>("max_norm", "(float) The maximum norm value.");
AddComment(R"DOC(
ClipByNorm operator limits the L2 norm of the input 'X' within 'max_norm'.
If the L2 norm of 'X' is less than or equal to 'max_norm', 'Out' will be
the same as 'X'. If the L2 norm of 'X' is greater than 'max_norm', 'X' will
be linearly scaled to make the L2 norm of 'Out' equal to 'max_norm', as
shown in the following formula:
'Out' = 'max_norm' * 'X' / norm('X'),
where norm('X') represents the L2 norm of 'X'.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm, ops::ClipByNormOp,
ops::ClipByNormOpMaker);
REGISTER_OP_CPU_KERNEL(
clip_by_norm, ops::ClipByNormKernel<paddle::platform::CPUPlace, float>);
...@@ -12,12 +12,8 @@ ...@@ -12,12 +12,8 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/framework/op_registry.h" #include "paddle/operators/clip_by_norm_op.h"
#include "paddle/operators/fill_constant_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
fill_constant, ops::FillConstantOpKernel<paddle::platform::GPUPlace, float>, clip_by_norm, ops::ClipByNormKernel<paddle::platform::GPUPlace, float>);
ops::FillConstantOpKernel<paddle::platform::GPUPlace, double>,
ops::FillConstantOpKernel<paddle::platform::GPUPlace, int>,
ops::FillConstantOpKernel<paddle::platform::GPUPlace, int64_t>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename Place, typename T>
class ClipByNormKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto max_norm = context.Attr<T>("max_norm");
auto* input = context.Input<Tensor>("X");
auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace());
auto x = EigenVector<T>::Flatten(*input);
auto out = EigenVector<T>::Flatten(*output);
auto x_norm = x.square().sum().sqrt();
auto place = context.GetEigenDevice<Place>();
auto temp = (x_norm <= max_norm).template cast<T>().eval();
auto scaling = temp + (static_cast<T>(1) - temp) * max_norm / x_norm;
Eigen::array<int, 1> one_dim{{1}};
Eigen::DSizes<int, 1> m_dsize(input->numel());
out.device(place) = x * scaling.reshape(one_dim).broadcast(m_dsize);
}
};
} // namespace operators
} // namespace paddle
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "paddle/operators/compare_op.h" #include "paddle/operators/compare_op.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename OpComment> template <typename OpComment>
...@@ -61,19 +62,34 @@ class CompareOpInferShape : public framework::InferShapeBase { ...@@ -61,19 +62,34 @@ class CompareOpInferShape : public framework::InferShapeBase {
} }
}; };
class CompareOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
framework::OpKernelType kt = OperatorWithKernel::GetKernelType(ctx);
// CompareOp kernel's device type is decided by input tensor place
kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
return kt;
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
#define REGISTER_LOGICAL_OP(op_type, _equation) \ #define REGISTER_LOGICAL_OP(op_type, _equation) \
struct _##op_type##Comment { \ struct _##op_type##Comment { \
static char type[]; \ static char type[]; \
static char equation[]; \ static char equation[]; \
}; \ }; \
char _##op_type##Comment::type[]{#op_type}; \ char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \ char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OP_WITH_KERNEL( \ REGISTER_OPERATOR( \
op_type, ::paddle::operators::CompareOpProtoMaker<_##op_type##Comment>, \ op_type, ::paddle::operators::CompareOp, \
::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \ ::paddle::operators::CompareOpProtoMaker<_##op_type##Comment>, \
::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker); ::paddle::framework::EmptyGradOpMaker);
REGISTER_LOGICAL_OP(less_than, "Out = X < Y"); REGISTER_LOGICAL_OP(less_than, "Out = X < Y");
......
...@@ -75,10 +75,10 @@ class FillConstantBatchSizeLikeOpMaker ...@@ -75,10 +75,10 @@ class FillConstantBatchSizeLikeOpMaker
"with the specified value"); "with the specified value");
AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output"); AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
AddAttr<int>("input_dim_idx", AddAttr<int>("input_dim_idx",
"(int, default 0) the index of input's batch size dimension") "(int, default 0) The index of input's batch size dimension")
.SetDefault(0); .SetDefault(0);
AddAttr<int>("output_dim_idx", AddAttr<int>("output_dim_idx",
"(int, default 0) the index of output's batch size dimension") "(int, default 0) The index of output's batch size dimension")
.SetDefault(0); .SetDefault(0);
AddAttr<float>("value", "(float, default 0) The value to be filled") AddAttr<float>("value", "(float, default 0) The value to be filled")
.SetDefault(0.0f); .SetDefault(0.0f);
...@@ -100,5 +100,5 @@ REGISTER_OPERATOR(fill_constant_batch_size_like, ...@@ -100,5 +100,5 @@ REGISTER_OPERATOR(fill_constant_batch_size_like,
ops::FillConstantBatchSizeLikeOpMaker); ops::FillConstantBatchSizeLikeOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
fill_constant_batch_size_like, fill_constant_batch_size_like,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, float>, ops::FillConstantBatchSizeLikeOpKernel<paddle::platform::CPUPlace, float>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, double>); ops::FillConstantBatchSizeLikeOpKernel<paddle::platform::CPUPlace, double>);
...@@ -18,5 +18,5 @@ ...@@ -18,5 +18,5 @@
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
fill_constant_batch_size_like, fill_constant_batch_size_like,
ops::FillConstantOpKernel<paddle::platform::GPUPlace, float>, ops::FillConstantBatchSizeLikeOpKernel<paddle::platform::GPUPlace, float>,
ops::FillConstantOpKernel<paddle::platform::GPUPlace, double>); ops::FillConstantBatchSizeLikeOpKernel<paddle::platform::GPUPlace, double>);
...@@ -21,7 +21,7 @@ namespace paddle { ...@@ -21,7 +21,7 @@ namespace paddle {
namespace operators { namespace operators {
template <typename Place, typename T> template <typename Place, typename T>
class FillConstantOpKernel : public framework::OpKernel<T> { class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* out = ctx.Output<framework::Tensor>("Out"); auto* out = ctx.Output<framework::Tensor>("Out");
......
...@@ -12,33 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,33 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/fill_constant_op.h" #include "paddle/framework/data_type.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class FillConstantOp : public framework::OperatorWithKernel { class FillConstantInferShape : public framework::InferShapeBase {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; void operator()(framework::InferShapeContext *ctx) const override {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of FillConstantOp should not be null."); "Output(Out) of FillConstantOp should not be null.");
auto &shape = ctx->Attrs().Get<std::vector<int>>("shape"); auto &shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> shape_int64(shape.size(), 0); ctx->SetOutputDim("Out", framework::make_ddim(shape));
std::transform(shape.begin(), shape.end(), shape_int64.begin(),
[](int a) { return static_cast<int64_t>(a); });
auto dims = framework::make_ddim(shape_int64);
ctx->SetOutputDim("Out", dims);
} }
};
protected: class FillConstantOp : public framework::OperatorBase {
framework::OpKernelType GetKernelType( public:
const framework::ExecutionContext &ctx) const override { using framework::OperatorBase::OperatorBase;
int data_type = ctx.Attr<int>("data_type"); void Run(const framework::Scope &scope,
VLOG(10) << " FillConstant data_type = " << data_type; const platform::DeviceContext &dev_ctx) const override {
return framework::OpKernelType(static_cast<framework::DataType>(data_type), auto data_type = static_cast<framework::DataType>(Attr<int>("data_type"));
ctx.device_context()); auto value = Attr<float>("value");
auto force_cpu = Attr<bool>("force_cpu");
auto &out =
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
out.Resize(framework::make_ddim(Attr<std::vector<int>>("shape")));
if (force_cpu) {
auto cpu = platform::CPUPlace();
out.mutable_data(cpu, framework::ToTypeIndex(data_type));
} else {
out.mutable_data(dev_ctx.GetPlace(), framework::ToTypeIndex(data_type));
}
math::set_constant(dev_ctx, &out, value);
} }
}; };
...@@ -54,6 +62,11 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -54,6 +62,11 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output"); AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
AddAttr<float>("value", "(float, default 0) The value to be filled") AddAttr<float>("value", "(float, default 0) The value to be filled")
.SetDefault(0.0f); .SetDefault(0.0f);
AddAttr<bool>("force_cpu",
"(bool, default false) Force fill output variable to cpu "
"memory. Otherwise, fill output variable to the running "
"device")
.SetDefault(false);
AddOutput("Out", AddOutput("Out",
"(Tensor) Tensor of specified shape will be filled " "(Tensor) Tensor of specified shape will be filled "
"with the specified value"); "with the specified value");
...@@ -69,10 +82,6 @@ Fill up a variable with specified constant value. ...@@ -69,10 +82,6 @@ Fill up a variable with specified constant value.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(fill_constant, ops::FillConstantOp, REGISTER_OPERATOR(fill_constant, ops::FillConstantOp,
ops::FillConstantOpMaker); ops::FillConstantInferShape, ops::FillConstantOpMaker,
REGISTER_OP_CPU_KERNEL( paddle::framework::EmptyGradOpMaker);
fill_constant, ops::FillConstantOpKernel<paddle::platform::CPUPlace, float>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, double>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, int>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, int64_t>);
...@@ -133,6 +133,22 @@ class LoDTensorToArrayInferVarType : public framework::VarTypeInference { ...@@ -133,6 +133,22 @@ class LoDTensorToArrayInferVarType : public framework::VarTypeInference {
} }
}; };
class LoDTensorToArrayGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *grad_op = new framework::OpDescBind();
grad_op->SetType("array_to_lod_tensor");
grad_op->SetInput("X", OutputGrad("Out"));
grad_op->SetInput("RankTable", Input("RankTable"));
grad_op->SetOutput("Out", InputGrad("X"));
grad_op->SetAttrMap(Attrs());
return std::unique_ptr<framework::OpDescBind>(grad_op);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -140,4 +156,5 @@ namespace ops = paddle::operators; ...@@ -140,4 +156,5 @@ namespace ops = paddle::operators;
REGISTER_OPERATOR(lod_tensor_to_array, ops::LoDTensorToArrayOp, REGISTER_OPERATOR(lod_tensor_to_array, ops::LoDTensorToArrayOp,
ops::LoDTensorToArrayOpProtoMaker, ops::LoDTensorToArrayOpProtoMaker,
ops::LoDTensorToArrayInferShape, ops::LoDTensorToArrayInferShape,
ops::LoDTensorToArrayInferVarType); ops::LoDTensorToArrayInferVarType,
ops::LoDTensorToArrayGradMaker);
...@@ -34,10 +34,10 @@ class LstmUnitOp : public framework::OperatorWithKernel { ...@@ -34,10 +34,10 @@ class LstmUnitOp : public framework::OperatorWithKernel {
auto c_prev_dims = ctx->GetInputDim("C_prev"); auto c_prev_dims = ctx->GetInputDim("C_prev");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE(x_dims[0] == c_prev_dims[0], PADDLE_ENFORCE_EQ(x_dims[0], c_prev_dims[0],
"Batch size of inputs and states must be equal"); "Batch size of inputs and states must be equal");
PADDLE_ENFORCE(x_dims[1] == c_prev_dims[1] * 4, PADDLE_ENFORCE_EQ(x_dims[1], c_prev_dims[1] * 4,
"Dimension of FC should equal to prev state * 4"); "Dimension of FC should equal to prev state * 4");
int b_size = c_prev_dims[0]; // batch size int b_size = c_prev_dims[0]; // batch size
int s_dim = c_prev_dims[1]; // state dim int s_dim = c_prev_dims[1]; // state dim
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
#include "paddle/framework/data_type.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -233,6 +234,52 @@ void gemv<platform::CPUPlace, double>(const platform::DeviceContext& context, ...@@ -233,6 +234,52 @@ void gemv<platform::CPUPlace, double>(const platform::DeviceContext& context,
template struct SetConstant<platform::CPUPlace, float>; template struct SetConstant<platform::CPUPlace, float>;
struct TensorSetConstant {
TensorSetConstant(framework::Tensor* tensor, float value)
: tensor_(tensor), value_(value) {}
template <typename T>
void operator()() const {
auto cpu = platform::CPUPlace();
auto* begin = tensor_->mutable_data<T>(cpu);
std::fill(begin, begin + tensor_->numel(), static_cast<T>(value_));
}
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CPUPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(framework::ToDataType(tensor->type()),
TensorSetConstant(tensor, value));
}
struct TensorSetConstantWithPlace : public boost::static_visitor<void> {
TensorSetConstantWithPlace(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename Place>
void operator()(Place place) const {
set_constant_with_place<Place>(context_, tensor_, value_);
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
void set_constant(const platform::DeviceContext& context,
framework::Tensor* tensor, float value) {
TensorSetConstantWithPlace func(context, tensor, value);
#ifdef PADDLE_WITH_CUDA
tensor->place().apply_visitor(func);
#else
func(platform::CPUPlace());
#endif
}
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/framework/data_type.h"
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
...@@ -232,6 +233,30 @@ void gemv<platform::GPUPlace, double>(const platform::DeviceContext& context, ...@@ -232,6 +233,30 @@ void gemv<platform::GPUPlace, double>(const platform::DeviceContext& context,
template struct SetConstant<platform::GPUPlace, float>; template struct SetConstant<platform::GPUPlace, float>;
struct TensorSetConstant {
TensorSetConstant(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void operator()() const {
SetConstant<platform::GPUPlace, T> functor;
functor(context_, tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::GPUPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(framework::ToDataType(tensor->type()),
TensorSetConstant(context, tensor, value));
}
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -108,6 +108,13 @@ struct SetConstant { ...@@ -108,6 +108,13 @@ struct SetConstant {
} }
}; };
template <typename Place>
void set_constant_with_place(const platform::DeviceContext& context,
framework::Tensor* tensor, float value);
void set_constant(const platform::DeviceContext& context,
framework::Tensor* tensor, float value);
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -139,3 +139,15 @@ TEST(math_function, gemv) { ...@@ -139,3 +139,15 @@ TEST(math_function, gemv) {
GemvTest<float>(12, 7, true); GemvTest<float>(12, 7, true);
GemvTest<double>(7, 9, true); GemvTest<double>(7, 9, true);
} }
TEST(math_funciton, set_constant) {
paddle::framework::Tensor t;
t.Resize({10, 10});
t.mutable_data<int>(paddle::platform::CPUPlace());
auto* ctx = new paddle::platform::CPUDeviceContext();
paddle::operators::math::set_constant(*ctx, &t, 10);
for (int64_t i = 0; i < t.numel(); ++i) {
PADDLE_ENFORCE_EQ(10, t.data<int>()[i]);
}
delete ctx;
}
...@@ -51,6 +51,7 @@ class MeanGradOp : public framework::OperatorWithKernel { ...@@ -51,6 +51,7 @@ class MeanGradOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
ctx->ShareLoD("X", framework::GradVarName("X"));
} }
}; };
......
...@@ -37,11 +37,11 @@ class PoolCudnnOpKernel : public framework::OpKernel<T> { ...@@ -37,11 +37,11 @@ class PoolCudnnOpKernel : public framework::OpKernel<T> {
const T *input_data = input->data<T>(); const T *input_data = input->data<T>();
T *output_data = output->mutable_data<T>(ctx.GetPlace()); T *output_data = output->mutable_data<T>(ctx.GetPlace());
std::string pooling_type = ctx.Attr<std::string>("poolingType"); std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
if (ctx.Attr<bool>("globalPooling")) { if (ctx.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(input->dims()[i + 2]); ksize[i] = static_cast<int>(input->dims()[i + 2]);
...@@ -92,12 +92,12 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> { ...@@ -92,12 +92,12 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> {
ctx.Input<Tensor>(framework::GradVarName("Out")); ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor *input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); Tensor *input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = ctx.Attr<std::string>("poolingType"); std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
if (ctx.Attr<bool>("globalPooling")) { if (ctx.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(input->dims()[i + 2]); ksize[i] = static_cast<int>(input->dims()[i + 2]);
......
...@@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
auto in_x_dims = ctx->GetInputDim("X"); auto in_x_dims = ctx->GetInputDim("X");
std::string pooling_type = ctx->Attrs().Get<std::string>("poolingType"); std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize"); std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides"); std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings"); std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
...@@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
"Pooling intput should be 4-D or 5-D tensor."); "Pooling intput should be 4-D or 5-D tensor.");
if (ctx->Attrs().Get<bool>("globalPooling")) { if (ctx->Attrs().Get<bool>("global_pooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2); ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
...@@ -83,20 +83,20 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, ...@@ -83,20 +83,20 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
"H is the height of the feature, " "H is the height of the feature, "
"and W is the width of the feature."); "and W is the width of the feature.");
AddAttr<std::string>("poolingType", AddAttr<std::string>("pooling_type",
"(string), pooling type, can be \"max\" for max-pooling " "(string), pooling type, can be \"max\" for max-pooling "
"and \"avg\" for average-pooling.") "and \"avg\" for average-pooling.")
.InEnum({"max", "avg"}); .InEnum({"max", "avg"});
AddAttr<std::vector<int>>("ksize", AddAttr<std::vector<int>>("ksize",
"(vector<int>) The pooling window " "(vector<int>) The pooling window "
"size(height, width) of the pooling operator. " "size(height, width) of the pooling operator. "
"If globalPooling = true, ksize and paddings will " "If global_pooling = true, ksize and paddings will "
"be ignored."); // TODO(Chengduo): Add checker. "be ignored."); // TODO(Chengduo): Add checker.
// (Currently, // (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>("globalPooling", AddAttr<bool>("global_pooling",
"(bool, default false) Whether to use the global pooling. " "(bool, default false) Whether to use the global pooling. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>("strides",
"(vector<int>, default {1, 1}), strides(height, " "(vector<int>, default {1, 1}), strides(height, "
...@@ -107,7 +107,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, ...@@ -107,7 +107,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
"paddings", "paddings",
"(vector<int>, defalut {0,0}), paddings(height, width) of pooling " "(vector<int>, defalut {0,0}), paddings(height, width) of pooling "
"operator." "operator."
"If globalPooling = true, paddings and ksize will be ignored.") "If global_pooling = true, paddings and ksize will be ignored.")
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
...@@ -115,7 +115,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, ...@@ -115,7 +115,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
Pool2d Operator. Pool2d Operator.
The pooling2d operation calculates the output based on The pooling2d operation calculates the output based on
the input, poolingType and ksize, strides, paddings parameters. the input, pooling_type and ksize, strides, paddings parameters.
Input(X) and output(Out) are in NCHW format, where N is batch size, C is the Input(X) and output(Out) are in NCHW format, where N is batch size, C is the
number of channels, H is the height of the feature, and W is the width of the feature. number of channels, H is the height of the feature, and W is the width of the feature.
Parameters(ksize, strides, paddings) are two elements. Parameters(ksize, strides, paddings) are two elements.
...@@ -152,7 +152,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -152,7 +152,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
"the number of channels, and D, H and W is the depth, height and " "the number of channels, and D, H and W is the depth, height and "
"width of the feature, respectively."); "width of the feature, respectively.");
AddAttr<std::string>("poolingType", AddAttr<std::string>("pooling_type",
"(string) Pooling type, can be \"max\" for max-pooling " "(string) Pooling type, can be \"max\" for max-pooling "
"and \"avg\" for average-pooling.") "and \"avg\" for average-pooling.")
.InEnum({"max", "avg"}); .InEnum({"max", "avg"});
...@@ -160,13 +160,14 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -160,13 +160,14 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
"ksize", "ksize",
"(vector<int>) The pooling window size(depth, height, " "(vector<int>) The pooling window size(depth, height, "
"width) of pooling operator. " "width) of pooling operator. "
"If globalPooling = true, ksize and paddings will " "If global_pooling = true, ksize and paddings will "
"be ignored."); // TODO(Chengduo): Add checker. "be ignored."); // TODO(Chengduo): Add checker.
// (Currently, // (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>("globalPooling", AddAttr<bool>(
"(bool, default false) Whether to use the global pooling. " "global_pooling",
"If globalPooling = true, ksize and paddings wille be ignored.") "(bool, default false) Whether to use the global pooling. "
"If global_pooling = true, ksize and paddings wille be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>( AddAttr<std::vector<int>>(
"strides", "strides",
...@@ -178,7 +179,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -178,7 +179,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
"paddings", "paddings",
"(vector<int>, defalut {0,0,0}), paddings(depth, height, " "(vector<int>, defalut {0,0,0}), paddings(depth, height, "
"width) of pooling operator. " "width) of pooling operator. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
...@@ -186,7 +187,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -186,7 +187,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
Pool3d Operator. Pool3d Operator.
The pooling3d operation calculates the output based on The pooling3d operation calculates the output based on
the input, poolingType, ksize, strides, and paddings parameters. the input, pooling_type, ksize, strides, and paddings parameters.
Input(X) and output(Out) are in NCDHW format, where N is batch Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, and D, H and W are the depth, height and size, C is the number of channels, and D, H and W are the depth, height and
width of the feature, respectively. Parameters(ksize, strides, paddings) width of the feature, respectively. Parameters(ksize, strides, paddings)
......
...@@ -57,11 +57,11 @@ class PoolKernel : public framework::OpKernel<T> { ...@@ -57,11 +57,11 @@ class PoolKernel : public framework::OpKernel<T> {
const Tensor* in_x = context.Input<Tensor>("X"); const Tensor* in_x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out"); Tensor* out = context.Output<Tensor>("Out");
std::string pooling_type = context.Attr<std::string>("poolingType"); std::string pooling_type = context.Attr<std::string>("pooling_type");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
...@@ -119,12 +119,12 @@ class PoolGradKernel : public framework::OpKernel<T> { ...@@ -119,12 +119,12 @@ class PoolGradKernel : public framework::OpKernel<T> {
context.Input<Tensor>(framework::GradVarName("Out")); context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X")); Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = context.Attr<std::string>("poolingType"); std::string pooling_type = context.Attr<std::string>("pooling_type");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
......
...@@ -44,7 +44,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -44,7 +44,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
"Pooling intput should be 4-D or 5-D tensor."); "Pooling intput should be 4-D or 5-D tensor.");
if (ctx->Attrs().Get<bool>("globalPooling")) { if (ctx->Attrs().Get<bool>("global_pooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2); ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
...@@ -110,14 +110,14 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -110,14 +110,14 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("ksize", AddAttr<std::vector<int>>("ksize",
"(vector<int>) The pooling window size(height, " "(vector<int>) The pooling window size(height, "
"width) of pooling operator. " "width) of pooling operator. "
"If globalPooling = true, ksize and paddings " "If global_pooling = true, ksize and paddings "
"will be ignored."); // TODO(Chengduo): Add "will be ignored."); // TODO(Chengduo): Add
// checker. (Currently, // checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>( AddAttr<bool>(
"globalPooling", "global_pooling",
"(bool, default false) Whether to use the global pooling. " "(bool, default false) Whether to use the global pooling. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>("strides",
"(vector<int>, default {1, 1}), strides(height, " "(vector<int>, default {1, 1}), strides(height, "
...@@ -128,7 +128,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -128,7 +128,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
"paddings", "paddings",
"(vector<int>, defalut {0, 0}), paddings(height, width) of pooling " "(vector<int>, defalut {0, 0}), paddings(height, width) of pooling "
"operator. " "operator. "
"If globalPooling = true, paddings and will be ignored.") "If global_pooling = true, paddings and will be ignored.")
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
...@@ -188,14 +188,14 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -188,14 +188,14 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("ksize", AddAttr<std::vector<int>>("ksize",
"(vector<int>) The pooling window size(depth, " "(vector<int>) The pooling window size(depth, "
"height, width) of pooling operator. " "height, width) of pooling operator. "
"If globalPooling = true, ksize and paddings " "If global_pooling = true, ksize and paddings "
"will be ignored."); // TODO(Chengduo): Add "will be ignored."); // TODO(Chengduo): Add
// checker. (Currently, // checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>( AddAttr<bool>(
"globalPooling", "global_pooling",
"(bool, default false) Whether to use the global pooling. " "(bool, default false) Whether to use the global pooling. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>("strides",
"(vector<int>, default {1,1,1}), strides(depth, " "(vector<int>, default {1,1,1}), strides(depth, "
...@@ -206,7 +206,7 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -206,7 +206,7 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
"paddings", "paddings",
"(vector, defalut {0,0,0}), paddings(depth, " "(vector, defalut {0,0,0}), paddings(depth, "
"height, width) of pooling operator. " "height, width) of pooling operator. "
"If globalPooling = true, paddings and ksize will be ignored.") "If global_pooling = true, paddings and ksize will be ignored.")
.SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
......
...@@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T> { ...@@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T> {
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
...@@ -72,7 +72,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> { ...@@ -72,7 +72,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> {
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x_grad->dims()[i + 2]); ksize[i] = static_cast<int>(in_x_grad->dims()[i + 2]);
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_rank_table.h"
#include "paddle/operators/array_operator.h"
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
class ShrinkRNNMemoryOp : public ArrayOp {
public:
ShrinkRNNMemoryOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto *x_var = scope.FindVar(Input("X"));
PADDLE_ENFORCE(x_var != nullptr, "Input X must be set");
auto &x_tensor = x_var->Get<framework::LoDTensor>();
size_t offset = this->GetOffset(scope, dev_ctx);
auto *rank_table_var = scope.FindVar(Input("RankTable"));
PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set");
auto &rank_table = rank_table_var->Get<framework::LoDRankTable>();
auto &rank_items = rank_table.items();
int dst_num_rows =
std::lower_bound(rank_items.begin(), rank_items.end(), offset,
[](const framework::LoDRankTable::TableItem &a,
size_t b) { return a.length > b; }) -
rank_items.begin();
auto *out_var = scope.FindVar(Output("Out"));
PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set");
auto &out_tensor = *out_var->GetMutable<framework::LoDTensor>();
if (dst_num_rows != 0) {
out_tensor.ShareDataWith(x_tensor.Slice(0, dst_num_rows));
}
}
};
class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
ShrinkRNNMemoryOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "");
AddInput("RankTable", "");
AddInput("I", "");
AddOutput("Out", "");
AddComment("");
}
};
class ShrinkRNNMemoryInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasInput("I"));
PADDLE_ENFORCE(context->HasInput("RankTable"));
context->SetOutputDim("Out", context->GetInputDim("X"));
}
};
class ShrinkRNNMemoryGradOp : public ArrayOp {
public:
ShrinkRNNMemoryGradOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out")));
auto *dx_var = scope.FindVar(Output(framework::GradVarName("X")));
PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr");
auto *x_var = scope.FindVar(Input("X"));
PADDLE_ENFORCE(x_var != nullptr);
auto &x_tensor = x_var->Get<framework::LoDTensor>();
auto &dx_tensor = *dx_var->GetMutable<framework::LoDTensor>();
dx_tensor.Resize(x_tensor.dims());
dx_tensor.mutable_data(x_tensor.place(), x_tensor.type());
if (dout_var == nullptr) { // dx_tensor fill zero
math::set_constant(dev_ctx, &dx_tensor, 0.0f);
} else {
auto &dout_tensor = dout_var->Get<framework::LoDTensor>();
auto height = dout_tensor.dims()[0];
dx_tensor.Slice(0, static_cast<int>(height))
.CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx);
if (dx_tensor.dims()[0] < height) {
auto rest_tensor = dx_tensor.Slice(
static_cast<int>(height), static_cast<int>(dout_tensor.dims()[0]));
math::set_constant(dev_ctx, &rest_tensor, 0.0f);
}
}
}
};
class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X")));
context->SetOutputDim(framework::GradVarName("X"),
context->GetInputDim("X"));
}
};
class ShrinkRNNGradOpMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *op = new framework::OpDescBind();
op->SetType("shrink_rnn_memory_grad");
op->SetInput("X", Input("X"));
op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
op->SetAttrMap(Attrs());
return std::unique_ptr<framework::OpDescBind>(op);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(shrink_rnn_memory, ops::ShrinkRNNMemoryOp,
ops::ShrinkRNNMemoryInferShape,
ops::ShrinkRNNMemoryOpProtoMaker, ops::ShrinkRNNGradOpMaker);
REGISTER_OPERATOR(shrink_rnn_memory_grad, ops::ShrinkRNNMemoryGradOp,
ops::ShrinkRNNMemoryGradInferShape);
...@@ -11,48 +11,18 @@ ...@@ -11,48 +11,18 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/framework/lod_tensor_array.h" #include "paddle/operators/array_operator.h"
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class ArrayOpBase : public framework::OperatorBase {
public:
ArrayOpBase(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {}
protected:
size_t GetOffset(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const {
auto *i = scope.FindVar(Input("I"));
PADDLE_ENFORCE(i != nullptr, "I must be set");
auto &i_tensor = i->Get<framework::LoDTensor>();
PADDLE_ENFORCE_EQ(i_tensor.numel(), 1);
size_t offset;
if (platform::is_gpu_place(i_tensor.place())) {
// FIXME: Avoid copy from GPU to CPU
framework::Tensor t;
t.CopyFrom(i_tensor, platform::CPUPlace(), dev_ctx);
dev_ctx.Wait();
offset = static_cast<size_t>(*t.data<int64_t>());
} else {
offset = static_cast<size_t>(*i_tensor.data<int64_t>());
}
return offset;
}
};
class WriteToArrayOp : public ArrayOpBase { class WriteToArrayOp : public ArrayOp {
public: public:
WriteToArrayOp(const std::string &type, WriteToArrayOp(const std::string &type,
const framework::VariableNameMap &inputs, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs, const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs) const framework::AttributeMap &attrs)
: ArrayOpBase(type, inputs, outputs, attrs) {} : ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope, void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override { const platform::DeviceContext &dev_ctx) const override {
...@@ -122,13 +92,13 @@ class WriteToArrayInferVarType : public framework::VarTypeInference { ...@@ -122,13 +92,13 @@ class WriteToArrayInferVarType : public framework::VarTypeInference {
} }
}; };
class ReadFromArrayOp : public ArrayOpBase { class ReadFromArrayOp : public ArrayOp {
public: public:
ReadFromArrayOp(const std::string &type, ReadFromArrayOp(const std::string &type,
const framework::VariableNameMap &inputs, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs, const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs) const framework::AttributeMap &attrs)
: ArrayOpBase(type, inputs, outputs, attrs) {} : ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope, void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override { const platform::DeviceContext &dev_ctx) const override {
auto *x = scope.FindVar(Input("X")); auto *x = scope.FindVar(Input("X"));
......
...@@ -49,8 +49,6 @@ struct Transform<platform::CPUPlace> { ...@@ -49,8 +49,6 @@ struct Transform<platform::CPUPlace> {
template <typename InputIter, typename OutputIter, typename UnaryOperation> template <typename InputIter, typename OutputIter, typename UnaryOperation>
void operator()(const DeviceContext& context, InputIter first, InputIter last, void operator()(const DeviceContext& context, InputIter first, InputIter last,
OutputIter result, UnaryOperation op) { OutputIter result, UnaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_cpu_place(place), "It must use CPU place.");
std::transform(first, last, result, op); std::transform(first, last, result, op);
} }
...@@ -59,8 +57,6 @@ struct Transform<platform::CPUPlace> { ...@@ -59,8 +57,6 @@ struct Transform<platform::CPUPlace> {
void operator()(const DeviceContext& context, InputIter1 first1, void operator()(const DeviceContext& context, InputIter1 first1,
InputIter1 last1, InputIter2 first2, OutputIter result, InputIter1 last1, InputIter2 first2, OutputIter result,
BinaryOperation op) { BinaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_cpu_place(place), "It must use CPU place.");
std::transform(first1, last1, first2, result, op); std::transform(first1, last1, first2, result, op);
} }
}; };
......
...@@ -321,6 +321,11 @@ message ClipConfig { ...@@ -321,6 +321,11 @@ message ClipConfig {
required double max = 2; required double max = 2;
} }
message ScaleSubRegionConfig {
required ImageConfig image_conf = 1;
required float value = 2;
}
message LayerInputConfig { message LayerInputConfig {
required string input_layer_name = 1; required string input_layer_name = 1;
optional string input_parameter_name = 2; optional string input_parameter_name = 2;
...@@ -342,6 +347,7 @@ message LayerInputConfig { ...@@ -342,6 +347,7 @@ message LayerInputConfig {
optional MultiBoxLossConfig multibox_loss_conf = 16; optional MultiBoxLossConfig multibox_loss_conf = 16;
optional DetectionOutputConfig detection_output_conf = 17; optional DetectionOutputConfig detection_output_conf = 17;
optional ClipConfig clip_conf = 18; optional ClipConfig clip_conf = 18;
optional ScaleSubRegionConfig scale_sub_region_conf = 19;
} }
message LayerConfig { message LayerConfig {
......
...@@ -3801,6 +3801,25 @@ class SwitchOrderLayer(LayerBase): ...@@ -3801,6 +3801,25 @@ class SwitchOrderLayer(LayerBase):
self.config.reshape_conf.width_axis.extend(reshape['width']) self.config.reshape_conf.width_axis.extend(reshape['width'])
@config_layer('scale_sub_region')
class ScaleSubRegionLayer(LayerBase):
def __init__(self, name, inputs, value, **xargs):
super(ScaleSubRegionLayer, self).__init__(
name, 'scale_sub_region', 0, inputs=inputs, **xargs)
scale_sub_region_conf = self.config.inputs[0].scale_sub_region_conf
scale_sub_region_conf.value = value
# get channel, width and height from input_0 layer
input_layer = self.get_input_layer(0)
image_conf = scale_sub_region_conf.image_conf
image_conf.img_size = input_layer.width
image_conf.img_size_y = input_layer.height
image_conf.channels = input_layer.size / (input_layer.width *
input_layer.height)
self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size,
image_conf.channels)
# Deprecated, use a new layer specific class instead # Deprecated, use a new layer specific class instead
@config_func @config_func
def Layer(name, type, **xargs): def Layer(name, type, **xargs):
......
...@@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la ...@@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer
test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer
test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer) test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer test_scale_sub_region_layer)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "data"
type: "data"
size: 2016
active_type: ""
height: 48
width: 42
}
layers {
name: "indices"
type: "data"
size: 6
active_type: ""
}
layers {
name: "__scale_sub_region_0__"
type: "scale_sub_region"
size: 2016
active_type: ""
inputs {
input_layer_name: "data"
scale_sub_region_conf {
image_conf {
channels: 1
img_size: 42
img_size_y: 48
}
value: 0.0
}
}
inputs {
input_layer_name: "indices"
}
height: 48
width: 42
}
input_layer_names: "data"
input_layer_names: "indices"
output_layer_names: "__scale_sub_region_0__"
sub_models {
name: "root"
layer_names: "data"
layer_names: "indices"
layer_names: "__scale_sub_region_0__"
input_layer_names: "data"
input_layer_names: "indices"
output_layer_names: "__scale_sub_region_0__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
data = data_layer(name='data', size=2016, height=48, width=42)
indices = data_layer(name='indices', size=6)
scale_sub_region = scale_sub_region_layer(
input=data, indices=indices, value=0.0)
outputs(scale_sub_region)
...@@ -22,6 +22,7 @@ parse training set and test set into paddle reader creators. ...@@ -22,6 +22,7 @@ parse training set and test set into paddle reader creators.
import numpy as np import numpy as np
import os import os
import paddle.v2.dataset.common import paddle.v2.dataset.common
from paddle.v2.parameters import Parameters
__all__ = ['train', 'test'] __all__ = ['train', 'test']
...@@ -34,7 +35,8 @@ feature_names = [ ...@@ -34,7 +35,8 @@ feature_names = [
UCI_TRAIN_DATA = None UCI_TRAIN_DATA = None
UCI_TEST_DATA = None UCI_TEST_DATA = None
URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar'
MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b'
def feature_range(maximums, minimums): def feature_range(maximums, minimums):
import matplotlib import matplotlib
...@@ -111,6 +113,13 @@ def test(): ...@@ -111,6 +113,13 @@ def test():
return reader return reader
def model():
tar_file = paddle.v2.dataset.common.download(URL_MODEL, 'fit_a_line.tar', MD5_MODEL)
with open(tar_file, 'r') as f:
parameters = Parameters.from_tar(f)
return parameters
def fetch(): def fetch():
paddle.v2.dataset.common.download(URL, 'uci_housing', MD5) paddle.v2.dataset.common.download(URL, 'uci_housing', MD5)
......
...@@ -87,7 +87,8 @@ def data(name, ...@@ -87,7 +87,8 @@ def data(name,
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True, append_batch_size=True,
main_program=None, main_program=None,
startup_program=None): startup_program=None,
stop_gradient=True):
helper = LayerHelper('data', **locals()) helper = LayerHelper('data', **locals())
shape = list(shape) shape = list(shape)
for i in xrange(len(shape)): for i in xrange(len(shape)):
...@@ -101,7 +102,11 @@ def data(name, ...@@ -101,7 +102,11 @@ def data(name,
shape = [-1] + shape # append batch size as -1 shape = [-1] + shape # append batch size as -1
return helper.create_global_variable( return helper.create_global_variable(
name=name, shape=shape, dtype=data_type, type=type, stop_gradient=True) name=name,
shape=shape,
dtype=data_type,
type=type,
stop_gradient=stop_gradient)
def _convert_(name): def _convert_(name):
...@@ -134,9 +139,7 @@ def _create_op_func_(op_type): ...@@ -134,9 +139,7 @@ def _create_op_func_(op_type):
o_name = not_intermediate_outputs[0].name o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs] intermediate_output_names = [output.name for output in intermediate_outputs]
def func(**kwargs): def infer_and_check_data_type(op_proto, **kwargs):
helper = LayerHelper(op_type, **kwargs)
inputs = dict()
dtype = None dtype = None
for ipt in op_proto.inputs: for ipt in op_proto.inputs:
name = _convert_(ipt.name) name = _convert_(ipt.name)
...@@ -153,6 +156,20 @@ def _create_op_func_(op_type): ...@@ -153,6 +156,20 @@ def _create_op_func_(op_type):
elif dtype != each.data_type: elif dtype != each.data_type:
raise ValueError( raise ValueError(
"operator {0} must input same dtype".format(op_type)) "operator {0} must input same dtype".format(op_type))
return dtype
def func(**kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_data_type(op_proto, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
inputs[ipt.name] = val inputs[ipt.name] = val
outputs = dict() outputs = dict()
...@@ -178,6 +195,20 @@ _create_op_func_('reshape') ...@@ -178,6 +195,20 @@ _create_op_func_('reshape')
_create_op_func_('elementwise_add') _create_op_func_('elementwise_add')
_create_op_func_('sigmoid') _create_op_func_('sigmoid')
_create_op_func_('scale') _create_op_func_('scale')
_create_op_func_('reshape')
_create_op_func_('transpose')
def fill_constant(data_type, shape, value=None, program=None):
helper = LayerHelper('fill_constant', **locals())
out = helper.create_tmp_variable(dtype=data_type)
helper.append_op(
type='fill_constant',
outputs={'Out': [out]},
attrs={'data_type': data_type,
'shape': shape,
'value': value})
return out
def cast(x, data_type, main_program=None): def cast(x, data_type, main_program=None):
...@@ -414,9 +445,9 @@ def pool2d(input, ...@@ -414,9 +445,9 @@ def pool2d(input,
inputs={"X": input}, inputs={"X": input},
outputs={"Out": pool_out}, outputs={"Out": pool_out},
attrs={ attrs={
"poolingType": pool_type, "pooling_type": pool_type,
"ksize": pool_size, "ksize": pool_size,
"globalPooling": global_pooling, "global_pooling": global_pooling,
"strides": pool_stride, "strides": pool_stride,
"paddings": pool_padding "paddings": pool_padding
}) })
...@@ -762,6 +793,46 @@ class StaticRNN(object): ...@@ -762,6 +793,46 @@ class StaticRNN(object):
}) })
def lstm(x,
c_pre_init,
hidden_dim,
forget_bias=None,
main_program=None,
startup_program=None):
helper = LayerHelper('lstm_unit', **locals())
rnn = StaticRNN()
with rnn.step():
c_pre = rnn.memory(init=c_pre_init)
x_t = rnn.step_input(x)
before_fc = concat(
input=[x_t, c_pre],
axis=1,
main_program=main_program,
startup_program=startup_program)
after_fc = fc(input=before_fc,
size=hidden_dim * 4,
main_program=main_program,
startup_program=startup_program)
data_type = x.data_type
c = helper.create_tmp_variable(data_type)
h = helper.create_tmp_variable(data_type)
helper.append_op(
type='lstm_unit',
inputs={"X": after_fc,
"C_prev": c_pre},
outputs={"C": c,
"H": h},
attrs={"forget_bias": forget_bias})
rnn.update_memory(c_pre, c)
rnn.output(h)
return rnn()
def lod_rank_table(x, level=0, main_program=None): def lod_rank_table(x, level=0, main_program=None):
helper = LayerHelper("lod_rank_table", **locals()) helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable( table = helper.create_variable(
...@@ -779,7 +850,8 @@ def lod_tensor_to_array(x, table, main_program=None): ...@@ -779,7 +850,8 @@ def lod_tensor_to_array(x, table, main_program=None):
helper = LayerHelper("lod_tensor_to_array", **locals()) helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable( array = helper.create_variable(
name=unique_name("lod_tensor_to_array"), name=unique_name("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY) type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.data_type)
helper.append_op( helper.append_op(
type='lod_tensor_to_array', type='lod_tensor_to_array',
inputs={'X': x, inputs={'X': x,
...@@ -825,13 +897,13 @@ def zeros(shape, dtype, main_program=None): ...@@ -825,13 +897,13 @@ def zeros(shape, dtype, main_program=None):
def increment(x, value=1.0, main_program=None): def increment(x, value=1.0, main_program=None):
helper = LayerHelper("increment", **locals()) helper = LayerHelper("increment", **locals())
tmp = helper.create_tmp_variable(dtype=x.data_type) out = helper.create_tmp_variable(dtype=x.data_type)
helper.append_op( helper.append_op(
type='increment', type='increment',
inputs={'X': [x]}, inputs={'X': [x]},
outputs={'Out': [tmp]}, outputs={'Out': [out]},
attrs={'step': value}) attrs={'step': value})
return tmp return out
def array_write(x, i, array=None, main_program=None): def array_write(x, i, array=None, main_program=None):
...@@ -862,3 +934,16 @@ def array_read(array, i, main_program=None): ...@@ -862,3 +934,16 @@ def array_read(array, i, main_program=None):
'I': [i]}, 'I': [i]},
outputs={'Out': [out]}) outputs={'Out': [out]})
return out return out
def shrink_memory(x, i, table, main_program=None):
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.data_type)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
...@@ -26,5 +26,4 @@ class TestAccuracyOp(OpTest): ...@@ -26,5 +26,4 @@ class TestAccuracyOp(OpTest):
if __name__ == '__main__': if __name__ == '__main__':
exit(0)
unittest.main() unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestClipByNormOp(OpTest):
def setUp(self):
self.max_relative_error = 0.006
self.initTestCase()
input = np.random.random(self.shape).astype("float32")
input[np.abs(input) < self.max_relative_error] = 0.5
self.op_type = "clip_by_norm"
self.inputs = {'X': input, }
self.attrs = {}
self.attrs['max_norm'] = self.max_norm
norm = np.sqrt(np.sum(np.square(input)))
if norm > self.max_norm:
output = self.max_norm * input / norm
else:
output = input
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def initTestCase(self):
self.shape = (100, )
self.max_norm = 1.0
class TestCase1(TestClipByNormOp):
def initTestCase(self):
self.shape = (100, )
self.max_norm = 1e20
class TestCase2(TestClipByNormOp):
def initTestCase(self):
self.shape = (16, 16)
self.max_norm = 0.1
class TestCase3(TestClipByNormOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max_norm = 1.0
if __name__ == '__main__':
unittest.main()
...@@ -4,6 +4,7 @@ import numpy ...@@ -4,6 +4,7 @@ import numpy
import paddle.v2.framework.layers as layers import paddle.v2.framework.layers as layers
from paddle.v2.framework.framework import Program from paddle.v2.framework.framework import Program
from paddle.v2.framework.executor import Executor from paddle.v2.framework.executor import Executor
from paddle.v2.framework.backward import append_backward_ops
class TestCPULoDTensorArrayOps(unittest.TestCase): class TestCPULoDTensorArrayOps(unittest.TestCase):
...@@ -123,5 +124,42 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -123,5 +124,42 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
self.assertEqual(actual.lod(), expect.lod()) self.assertEqual(actual.lod(), expect.lod())
class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
x = layers.data(
name='x',
shape=[1],
data_type='float32',
main_program=program,
stop_gradient=False)
table = layers.lod_rank_table(x, level=0, main_program=program)
array = layers.lod_tensor_to_array(x, table, main_program=program)
result = layers.array_to_lod_tensor(array, table, main_program=program)
mean = layers.mean(x=result, main_program=program)
append_backward_ops(mean)
tensor = core.LoDTensor()
tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]])
g_vars = program.global_block().var(x.name + "@GRAD")
exe = Executor(place)
g_out = [
item.sum()
for item in map(
numpy.array,
exe.run(program, feed={'x': tensor}, fetch_list=[g_vars]))
]
g_out_sum = numpy.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -61,8 +61,8 @@ class TestPool2d_Op(OpTest): ...@@ -61,8 +61,8 @@ class TestPool2d_Op(OpTest):
'strides': self.strides, 'strides': self.strides,
'paddings': self.paddings, 'paddings': self.paddings,
'ksize': self.ksize, 'ksize': self.ksize,
'poolingType': self.pool_type, 'pooling_type': self.pool_type,
'globalPooling': self.global_pool, 'global_pooling': self.global_pool,
} }
self.outputs = {'Out': output.astype('float32')} self.outputs = {'Out': output.astype('float32')}
......
...@@ -67,8 +67,8 @@ class TestPool3d_Op(OpTest): ...@@ -67,8 +67,8 @@ class TestPool3d_Op(OpTest):
'strides': self.strides, 'strides': self.strides,
'paddings': self.paddings, 'paddings': self.paddings,
'ksize': self.ksize, 'ksize': self.ksize,
'poolingType': self.pool_type, 'pooling_type': self.pool_type,
'globalPooling': self.global_pool, 'global_pooling': self.global_pool,
} }
self.outputs = {'Out': output.astype('float32')} self.outputs = {'Out': output.astype('float32')}
......
...@@ -86,7 +86,7 @@ class TestMaxPoolWithIndex_Op(OpTest): ...@@ -86,7 +86,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
'strides': self.strides, 'strides': self.strides,
'paddings': self.paddings, 'paddings': self.paddings,
'ksize': self.ksize, 'ksize': self.ksize,
'globalPooling': self.global_pool, 'global_pooling': self.global_pool,
} }
self.inputs = {'X': input} self.inputs = {'X': input}
......
import unittest
import paddle.v2.framework.core as core
from paddle.v2.framework.executor import Executor
import paddle.v2.framework.layers as layers
from paddle.v2.framework.backward import append_backward_ops
from paddle.v2.framework.framework import g_main_program
import numpy
class TestShrinkRNNMemory(unittest.TestCase):
def test_shrink_rnn_memory(self):
x = layers.data('x', shape=[100], data_type='float32')
x.stop_gradient = False
table = layers.lod_rank_table(x=x)
i = layers.zeros(dtype='int64', shape=[1])
mem1 = layers.shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem3 = layers.shrink_memory(x=mem2, i=i, table=table)
cpu = core.CPUPlace()
tensor = core.LoDTensor()
tensor.set_lod([[0, 2, 5, 6]])
tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
tensor.set(tensor_np, cpu)
exe = Executor(cpu)
outs = map(numpy.array,
exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]))
self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))
mem3_mean = layers.mean(x=mem3)
append_backward_ops(loss=mem3_mean)
x_grad = map(numpy.array,
exe.run(feed={'x': tensor},
fetch_list=[
g_main_program.global_block().var('x@GRAD')
]))[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
if __name__ == '__main__':
unittest.main()
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import g_main_program, g_startup_program
from paddle.v2.framework.executor import Executor
import numpy as np
def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50):
data = layers.data(
name="words",
shape=[seq_len * batch_size, 1],
append_batch_size=False,
data_type="int64")
label = layers.data(
name="label",
shape=[batch_size, 1],
append_batch_size=False,
data_type="int64")
emb = layers.embedding(input=data, size=[dict_dim, emb_dim])
emb = layers.reshape(x=emb, shape=[batch_size, seq_len, emb_dim])
emb = layers.transpose(x=emb, axis=[1, 0, 2])
c_pre_init = layers.fill_constant(
dtype=emb.data_type, shape=[batch_size, emb_dim], value=0.0)
layer_1_out = layers.lstm(emb, c_pre_init=c_pre_init, hidden_dim=emb_dim)
layer_1_out = layers.transpose(x=layer_1_out, axis=[1, 0, 2])
prediction = layers.fc(input=layer_1_out, size=class_dim, act="softmax")
cost = layers.cross_entropy(input=prediction, label=label)
avg_cost = layers.mean(x=cost)
adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002)
opts = adam_optimizer.minimize(avg_cost)
acc = layers.accuracy(input=prediction, label=label)
return avg_cost, acc
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = core.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
def chop_data(data, chop_len=80, batch_len=50):
data = [(x[0][:chop_len], x[1]) for x in data if len(x[0]) >= chop_len]
return data[:batch_len]
def prepare_feed_data(data, place):
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
label = np.array(map(lambda x: x[1], data)).astype("int64")
label = label.reshape([50, 1])
tensor_label = core.LoDTensor()
tensor_label.set(label, place)
return tensor_words, tensor_label
def main():
word_dict = paddle.dataset.imdb.word_dict()
cost, acc = lstm_net(dict_dim=len(word_dict), class_dim=2)
batch_size = 100
train_data = paddle.batch(
paddle.reader.buffered(
paddle.dataset.imdb.train(word_dict), size=batch_size * 10),
batch_size=batch_size)
data = chop_data(next(train_data()))
place = core.CPUPlace()
tensor_words, tensor_label = prepare_feed_data(data, place)
exe = Executor(place)
exe.run(g_startup_program)
while True:
outs = exe.run(g_main_program,
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if acc_val > 0.9:
break
if __name__ == '__main__':
main()
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
import os
import tarfile
import cPickle
__all__ = [
"load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop",
"random_crop", "left_right_flip", "simple_transform", "load_and_transform",
"batch_images_from_tar"
]
""" """
This file contains some common interfaces for image preprocess. This file contains some common interfaces for image preprocess.
Many users are confused about the image layout. We introduce Many users are confused about the image layout. We introduce
the image layout as follows. the image layout as follows.
- CHW Layout - CHW Layout
- The abbreviations: C=channel, H=Height, W=Width - The abbreviations: C=channel, H=Height, W=Width
- The default layout of image opened by cv2 or PIL is HWC. - The default layout of image opened by cv2 or PIL is HWC.
PaddlePaddle only supports the CHW layout. And CHW is simply PaddlePaddle only supports the CHW layout. And CHW is simply
a transpose of HWC. It must transpose the input image. a transpose of HWC. It must transpose the input image.
- Color format: RGB or BGR - Color format: RGB or BGR
OpenCV use BGR color format. PIL use RGB color format. Both OpenCV use BGR color format. PIL use RGB color format. Both
formats can be used for training. Noted that, the format should formats can be used for training. Noted that, the format should
be keep consistent between the training and inference peroid. be keep consistent between the training and inference peroid.
""" """
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
import os
import tarfile
import cPickle
__all__ = [
"load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop",
"random_crop", "left_right_flip", "simple_transform", "load_and_transform",
"batch_images_from_tar"
]
def batch_images_from_tar(data_file, def batch_images_from_tar(data_file,
...@@ -36,17 +38,18 @@ def batch_images_from_tar(data_file, ...@@ -36,17 +38,18 @@ def batch_images_from_tar(data_file,
num_per_batch=1024): num_per_batch=1024):
""" """
Read images from tar file and batch them into batch file. Read images from tar file and batch them into batch file.
param data_file: path of image tar file
type data_file: string :param data_file: path of image tar file
param dataset_name: 'train','test' or 'valid' :type data_file: string
type dataset_name: string :param dataset_name: 'train','test' or 'valid'
param img2label: a dic with image file name as key :type dataset_name: string
:param img2label: a dic with image file name as key
and image's label as value and image's label as value
type img2label: dic :type img2label: dic
param num_per_batch: image number per batch file :param num_per_batch: image number per batch file
type num_per_batch: int :type num_per_batch: int
return: path of list file containing paths of batch file :return: path of list file containing paths of batch file
rtype: string :rtype: string
""" """
batch_dir = data_file + "_batch" batch_dir = data_file + "_batch"
out_path = "%s/%s" % (batch_dir, dataset_name) out_path = "%s/%s" % (batch_dir, dataset_name)
...@@ -99,14 +102,16 @@ def load_image_bytes(bytes, is_color=True): ...@@ -99,14 +102,16 @@ def load_image_bytes(bytes, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
with open('cat.jpg') as f: with open('cat.jpg') as f:
im = load_image_bytes(f.read()) im = load_image_bytes(f.read())
:param bytes: the input image bytes array. :param bytes: the input image bytes array.
:type file: str :type bytes: str
:param is_color: If set is_color True, it will load and :param is_color: If set is_color True, it will load and
return a color image. Otherwise, it will return a color image. Otherwise, it will
load and return a gray image. load and return a gray image.
:type is_color: bool
""" """
flag = 1 if is_color else 0 flag = 1 if is_color else 0
file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8) file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8)
...@@ -121,6 +126,7 @@ def load_image(file, is_color=True): ...@@ -121,6 +126,7 @@ def load_image(file, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_image('cat.jpg') im = load_image('cat.jpg')
:param file: the input image path. :param file: the input image path.
...@@ -128,6 +134,7 @@ def load_image(file, is_color=True): ...@@ -128,6 +134,7 @@ def load_image(file, is_color=True):
:param is_color: If set is_color True, it will load and :param is_color: If set is_color True, it will load and
return a color image. Otherwise, it will return a color image. Otherwise, it will
load and return a gray image. load and return a gray image.
:type is_color: bool
""" """
# cv2.IMAGE_COLOR for OpenCV3 # cv2.IMAGE_COLOR for OpenCV3
# cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version
...@@ -147,6 +154,7 @@ def resize_short(im, size): ...@@ -147,6 +154,7 @@ def resize_short(im, size):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_image('cat.jpg') im = load_image('cat.jpg')
im = resize_short(im, 256) im = resize_short(im, 256)
...@@ -175,6 +183,7 @@ def to_chw(im, order=(2, 0, 1)): ...@@ -175,6 +183,7 @@ def to_chw(im, order=(2, 0, 1)):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_image('cat.jpg') im = load_image('cat.jpg')
im = resize_short(im, 256) im = resize_short(im, 256)
im = to_chw(im) im = to_chw(im)
...@@ -196,6 +205,7 @@ def center_crop(im, size, is_color=True): ...@@ -196,6 +205,7 @@ def center_crop(im, size, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = center_crop(im, 224) im = center_crop(im, 224)
:param im: the input image with HWC layout. :param im: the input image with HWC layout.
...@@ -223,6 +233,7 @@ def random_crop(im, size, is_color=True): ...@@ -223,6 +233,7 @@ def random_crop(im, size, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = random_crop(im, 224) im = random_crop(im, 224)
:param im: the input image with HWC layout. :param im: the input image with HWC layout.
...@@ -251,6 +262,7 @@ def left_right_flip(im): ...@@ -251,6 +262,7 @@ def left_right_flip(im):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = left_right_flip(im) im = left_right_flip(im)
:paam im: input image with HWC layout :paam im: input image with HWC layout
...@@ -275,6 +287,7 @@ def simple_transform(im, ...@@ -275,6 +287,7 @@ def simple_transform(im,
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = simple_transform(im, 256, 224, True) im = simple_transform(im, 256, 224, True)
:param im: The input image with HWC layout. :param im: The input image with HWC layout.
...@@ -285,6 +298,11 @@ def simple_transform(im, ...@@ -285,6 +298,11 @@ def simple_transform(im,
:type crop_size: int :type crop_size: int
:param is_train: Whether it is training or not. :param is_train: Whether it is training or not.
:type is_train: bool :type is_train: bool
:param is_color: whether the image is color or not.
:type is_color: bool
:param mean: the mean values, which can be element-wise mean values or
mean values per channel.
:type mean: numpy array | list
""" """
im = resize_short(im, resize_size) im = resize_short(im, resize_size)
if is_train: if is_train:
...@@ -324,6 +342,7 @@ def load_and_transform(filename, ...@@ -324,6 +342,7 @@ def load_and_transform(filename,
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_and_transform('cat.jpg', 256, 224, True) im = load_and_transform('cat.jpg', 256, 224, True)
:param filename: The file name of input image. :param filename: The file name of input image.
...@@ -334,6 +353,11 @@ def load_and_transform(filename, ...@@ -334,6 +353,11 @@ def load_and_transform(filename,
:type crop_size: int :type crop_size: int
:param is_train: Whether it is training or not. :param is_train: Whether it is training or not.
:type is_train: bool :type is_train: bool
:param is_color: whether the image is color or not.
:type is_color: bool
:param mean: the mean values, which can be element-wise mean values or
mean values per channel.
:type mean: numpy array | list
""" """
im = load_image(filename) im = load_image(filename)
im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean) im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册