提交 e59ca752 编写于 作者: Z zchen0211

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop

# Prune
## Motivation
We want to support running inference, training and checkpointing in one `ProgramDesc`. We implement
`void Prune(const ProgramDesc* input, ProgramDesc* output)` function, which takes a `ProgramDesc`
and generate a pruned `ProgramDesc`.
## Challenge
Pruning need to support both variables and operators being evaluation targets. Consider the following
different situations.
```python
# Case 1: run foward pass.
cost_np = session.run(target=cost)
# Case 2: run backward passing.
opts_np, _ = session.run(target=[cost, opt])
# Case 3: run checkpointing
_ = session.run(target=checkpoint)
```
## Solution
To support evaluation of operators, we add `is_target` field in the `OpDesc`.
```c++
message OpDesc {
required string type = 3;
repeated Var inputs = 1;
repeated Var outputs = 2;
repeated Attr attrs = 4;
optional bool is_target = 5 [ default = false ];
};
```
To support evaluation of variables, we add [fetch_op](https://github.com/PaddlePaddle/Paddle/pull/4599).
For each variable in the `target`, we insert a `fetch_op` into the `ProgramDesc` with `variable` being
`fetch_op`'s input. Then we also set `fetch_op` is a target.
### Algorithm
If an operator needs to be run, it must fall into one of the following cases:
1. It is the target.
2. It is depended by some other ops, meaning its output is some other op's input.
The first case can be checked by `op_desc.is_traget()` . The second case can be implement as
```c++
bool HasDependentVar(const OpDesc& op_desc, const std::set<string>& dependent_vars) {
for (auto& var : op_desc.outputs()) {
for (auto& argu : var.arguments()) {
if (dependent_vars.count(argu) != 0) {
return true;
}
}
}
return false;
}
```
Then the whole algorithm can be implemented as the following [code](https://github.com/tonyyang-svail/Paddle/blob/prune_impl/paddle/framework/prune.cc).
...@@ -177,9 +177,6 @@ REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class) ...@@ -177,9 +177,6 @@ REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class)
REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class)
``` ```
### USE Macros
Make sure the registration process is executed and linked.
--- ---
# Registration Process # Registration Process
1. Write an Op class and its gradient Op class, if required. 1. Write an Op class and its gradient Op class, if required.
...@@ -188,8 +185,6 @@ Make sure the registration process is executed and linked. ...@@ -188,8 +185,6 @@ Make sure the registration process is executed and linked.
1. Call maker class to complete `proto` and `checker` 1. Call maker class to complete `proto` and `checker`
2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap` 2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap`
4. Invoke the `USE` macro in which the Op is used to make sure that it is linked.
--- ---
# Backward Module (1/2) # Backward Module (1/2)
### Create Backward Operator ### Create Backward Operator
......
import gzip
import math
import paddle.v2 as paddle
embsize = 32
hiddensize = 256
N = 5
def wordemb(inlayer):
wordemb = paddle.layer.embedding(
input=inlayer,
size=embsize,
param_attr=paddle.attr.Param(
name="_proj",
initial_std=0.001,
learning_rate=1,
l2_rate=0,
sparse_update=True))
return wordemb
def main():
# for local training
cluster_train = False
if not cluster_train:
paddle.init(use_gpu=False, trainer_count=1)
else:
paddle.init(
use_gpu=False,
trainer_count=2,
port=7164,
ports_num=1,
ports_num_for_sparse=1,
num_gradient_servers=1)
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
firstword = paddle.layer.data(
name="firstw", type=paddle.data_type.integer_value(dict_size))
secondword = paddle.layer.data(
name="secondw", type=paddle.data_type.integer_value(dict_size))
thirdword = paddle.layer.data(
name="thirdw", type=paddle.data_type.integer_value(dict_size))
fourthword = paddle.layer.data(
name="fourthw", type=paddle.data_type.integer_value(dict_size))
nextword = paddle.layer.data(
name="fifthw", type=paddle.data_type.integer_value(dict_size))
Efirst = wordemb(firstword)
Esecond = wordemb(secondword)
Ethird = wordemb(thirdword)
Efourth = wordemb(fourthword)
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
hidden1 = paddle.layer.fc(input=contextemb,
size=hiddensize,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embsize * 8),
learning_rate=1))
predictword = paddle.layer.fc(input=hidden1,
size=dict_size,
bias_attr=paddle.attr.Param(learning_rate=2),
act=paddle.activation.Softmax())
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
with gzip.open("batch-" + str(event.batch_id) + ".tar.gz",
'w') as f:
trainer.save_parameter_to_tar(f)
result = trainer.test(
paddle.batch(
paddle.dataset.imikolov.test(word_dict, N), 32))
print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics,
result.metrics)
cost = paddle.layer.classification_cost(input=predictword, label=nextword)
parameters = paddle.parameters.create(cost)
adagrad = paddle.optimizer.AdaGrad(
learning_rate=3e-3,
regularization=paddle.optimizer.L2Regularization(8e-4))
trainer = paddle.trainer.SGD(cost,
parameters,
adagrad,
is_local=not cluster_train)
trainer.train(
paddle.batch(paddle.dataset.imikolov.train(word_dict, N), 32),
num_passes=30,
event_handler=event_handler)
if __name__ == '__main__':
main()
import math
import os
import paddle.v2 as paddle
import pickle
embsize = 32
hiddensize = 256
N = 5
cluster_train_file = "./train_data_dir/train/train.txt"
cluster_test_file = "./test_data_dir/test/test.txt"
node_id = os.getenv("OMPI_COMM_WORLD_RANK")
if not node_id:
raise EnvironmentError("must provied OMPI_COMM_WORLD_RANK")
def wordemb(inlayer):
wordemb = paddle.layer.embedding(
input=inlayer,
size=embsize,
param_attr=paddle.attr.Param(
name="_proj",
initial_std=0.001,
learning_rate=1,
l2_rate=0,
sparse_update=True))
return wordemb
def cluster_reader_cluster(filename, node_id):
def cluster_reader():
with open("-".join([filename, "%05d" % int(node_id)]), "r") as f:
for l in f:
csv_data = [int(cell) for cell in l.split(",")]
yield tuple(csv_data)
return cluster_reader
def main():
# get arguments from env
# for local training
TRUTH = ["true", "True", "TRUE", "1", "yes", "Yes", "YES"]
cluster_train = os.getenv('PADDLE_CLUSTER_TRAIN', "False") in TRUTH
use_gpu = os.getenv('PADDLE_INIT_USE_GPU', "False")
if not cluster_train:
paddle.init(
use_gpu=use_gpu,
trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")))
else:
paddle.init(
use_gpu=use_gpu,
trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")),
port=int(os.getenv("PADDLE_INIT_PORT", "7164")),
ports_num=int(os.getenv("PADDLE_INIT_PORTS_NUM", "1")),
ports_num_for_sparse=int(
os.getenv("PADDLE_INIT_PORTS_NUM_FOR_SPARSE", "1")),
num_gradient_servers=int(
os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS", "1")),
trainer_id=int(os.getenv("PADDLE_INIT_TRAINER_ID", "0")),
pservers=os.getenv("PADDLE_INIT_PSERVERS", "127.0.0.1"))
fn = open("thirdparty/wuyi_train_thdpty/word_dict.pickle", "r")
word_dict = pickle.load(fn)
fn.close()
dict_size = len(word_dict)
firstword = paddle.layer.data(
name="firstw", type=paddle.data_type.integer_value(dict_size))
secondword = paddle.layer.data(
name="secondw", type=paddle.data_type.integer_value(dict_size))
thirdword = paddle.layer.data(
name="thirdw", type=paddle.data_type.integer_value(dict_size))
fourthword = paddle.layer.data(
name="fourthw", type=paddle.data_type.integer_value(dict_size))
nextword = paddle.layer.data(
name="fifthw", type=paddle.data_type.integer_value(dict_size))
Efirst = wordemb(firstword)
Esecond = wordemb(secondword)
Ethird = wordemb(thirdword)
Efourth = wordemb(fourthword)
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
hidden1 = paddle.layer.fc(input=contextemb,
size=hiddensize,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embsize * 8),
learning_rate=1))
predictword = paddle.layer.fc(input=hidden1,
size=dict_size,
bias_attr=paddle.attr.Param(learning_rate=2),
act=paddle.activation.Softmax())
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
result = trainer.test(
paddle.batch(
cluster_reader_cluster(cluster_test_file, node_id), 32))
print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics,
result.metrics)
cost = paddle.layer.classification_cost(input=predictword, label=nextword)
parameters = paddle.parameters.create(cost)
adagrad = paddle.optimizer.AdaGrad(
learning_rate=3e-3,
regularization=paddle.optimizer.L2Regularization(8e-4))
trainer = paddle.trainer.SGD(cost,
parameters,
adagrad,
is_local=not cluster_train)
trainer.train(
paddle.batch(cluster_reader_cluster(cluster_train_file, node_id), 32),
num_passes=30,
event_handler=event_handler)
if __name__ == '__main__':
main()
import paddle.v2 as paddle
import tarfile
import os
import pickle
SPLIT_COUNT = 3
N = 5
def file_len(fd):
for i, l in enumerate(fd):
pass
return i + 1
def split_from_reader_by_line(filename, reader, split_count):
fn = open(filename, "w")
for batch_id, batch_data in enumerate(reader()):
batch_data_str = [str(d) for d in batch_data]
fn.write(",".join(batch_data_str))
fn.write("\n")
fn.close()
fn = open(filename, "r")
total_line_count = file_len(fn)
fn.close()
per_file_lines = total_line_count / split_count + 1
cmd = "split -d -a 5 -l %d %s %s-" % (per_file_lines, filename, filename)
os.system(cmd)
word_dict = paddle.dataset.imikolov.build_dict()
with open("word_dict.pickle", "w") as dict_f:
pickle.dump(word_dict, dict_f)
split_from_reader_by_line("train.txt",
paddle.dataset.imikolov.train(word_dict, N),
SPLIT_COUNT)
split_from_reader_by_line("test.txt",
paddle.dataset.imikolov.test(word_dict, N),
SPLIT_COUNT)
...@@ -20,13 +20,14 @@ proto_library(framework_proto SRCS framework.proto) ...@@ -20,13 +20,14 @@ proto_library(framework_proto SRCS framework.proto)
cc_library(attribute SRCS attribute.cc DEPS framework_proto) cc_library(attribute SRCS attribute.cc DEPS framework_proto)
cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute ddim op_info) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute ddim op_info)
cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc)
cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute)
cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker)
cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto)
cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc glog)
cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry)
cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator) cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator glog)
cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry)
py_proto_compile(framework_py_proto SRCS framework.proto) py_proto_compile(framework_py_proto SRCS framework.proto)
...@@ -42,7 +43,10 @@ add_custom_command(TARGET framework_py_proto POST_BUILD ...@@ -42,7 +43,10 @@ add_custom_command(TARGET framework_py_proto POST_BUILD
cc_library(backward SRCS backward.cc DEPS net_op) cc_library(backward SRCS backward.cc DEPS net_op)
cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context)
cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward) cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward glog)
cc_library(prune SRCS prune.cc DEPS framework_proto)
cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context)
cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor) cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor)
cc_test(tensor_array_test SRCS tensor_array_test.cc DEPS tensor_array place) cc_test(tensor_array_test SRCS tensor_array_test.cc DEPS tensor_array place)
......
...@@ -107,6 +107,19 @@ BlockDesc *BlockDescBind::Proto() { ...@@ -107,6 +107,19 @@ BlockDesc *BlockDescBind::Proto() {
Flush(); Flush();
return desc_; return desc_;
} }
BlockDescBind::BlockDescBind(const BlockDescBind &other, BlockDesc *desc,
ProgramDescBind *prog)
: prog_(prog), desc_(desc) {
need_update_ = true;
for (auto &op : other.ops_) {
ops_.emplace_back(new OpDescBind(*op));
}
for (auto &it : other.vars_) {
auto *var = new VarDescBind(*it.second);
vars_[it.first].reset(var);
}
}
void BlockDescBind::ClearPBOps() { void BlockDescBind::ClearPBOps() {
auto ops = this->desc_->mutable_ops(); auto ops = this->desc_->mutable_ops();
......
...@@ -16,8 +16,10 @@ limitations under the License. */ ...@@ -16,8 +16,10 @@ limitations under the License. */
#include <deque> #include <deque>
#include <memory> #include <memory>
#include <set>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "paddle/framework/op_desc.h" #include "paddle/framework/op_desc.h"
#include "paddle/framework/var_desc.h" #include "paddle/framework/var_desc.h"
#include "paddle/platform/macros.h" #include "paddle/platform/macros.h"
...@@ -36,6 +38,9 @@ class BlockDescBind { ...@@ -36,6 +38,9 @@ class BlockDescBind {
BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) BlockDescBind(ProgramDescBind *prog, BlockDesc *desc)
: prog_(prog), desc_(desc), need_update_(false) {} : prog_(prog), desc_(desc), need_update_(false) {}
BlockDescBind(const BlockDescBind &other, BlockDesc *desc,
ProgramDescBind *prog);
~BlockDescBind() { ~BlockDescBind() {
this->ClearPBVars(); this->ClearPBVars();
this->ClearPBOps(); this->ClearPBOps();
...@@ -51,6 +56,14 @@ class BlockDescBind { ...@@ -51,6 +56,14 @@ class BlockDescBind {
bool HasVar(const std::string &var_name) const; bool HasVar(const std::string &var_name) const;
std::set<std::string> LocalVarNames() const {
std::set<std::string> var_names;
for (auto &var : vars_) {
var_names.insert(var.first);
}
return var_names;
}
std::vector<VarDescBind *> AllVars() const; std::vector<VarDescBind *> AllVars() const;
BlockDescBind *ParentBlock() const; BlockDescBind *ParentBlock() const;
......
...@@ -68,9 +68,13 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { ...@@ -68,9 +68,13 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) {
for (auto& var : block.vars()) { for (auto& var : block.vars()) {
if (var.persistable()) { if (var.persistable()) {
scope->Var(var.name()); auto* ptr = scope->Var(var.name());
VLOG(3) << "Create Variable " << var.name()
<< " global, which pointer is " << ptr;
} else { } else {
local_scope.Var(var.name()); auto* ptr = local_scope.Var(var.name());
VLOG(3) << "Create Variable " << var.name()
<< " locally, which pointer is " << ptr;
} }
} }
......
...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "glog/logging.h"
#include "paddle/framework/feed_fetch_type.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
#include "paddle/framework/variable.h" #include "paddle/framework/variable.h"
...@@ -24,6 +26,7 @@ void SetFeedVariable(const LoDTensor& input, const std::string& var_name, ...@@ -24,6 +26,7 @@ void SetFeedVariable(const LoDTensor& input, const std::string& var_name,
size_t index) { size_t index) {
// If var_name Variable is not found in GlobalScope, a new variable will // If var_name Variable is not found in GlobalScope, a new variable will
// be created. // be created.
VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index;
Variable* g_feed_value = GetGlobalScope().Var(var_name); Variable* g_feed_value = GetGlobalScope().Var(var_name);
auto& feed_inputs = auto& feed_inputs =
*(g_feed_value->GetMutable<std::vector<paddle::framework::LoDTensor>>()); *(g_feed_value->GetMutable<std::vector<paddle::framework::LoDTensor>>());
...@@ -40,10 +43,15 @@ LoDTensor& GetFetchVariable(const std::string& var_name, size_t index) { ...@@ -40,10 +43,15 @@ LoDTensor& GetFetchVariable(const std::string& var_name, size_t index) {
// Since we want to fetch LodTensor from a variable, the variable must // Since we want to fetch LodTensor from a variable, the variable must
// be created alreadly. // be created alreadly.
Variable* g_fetch_value = GetGlobalScope().FindVar(var_name); Variable* g_fetch_value = GetGlobalScope().FindVar(var_name);
auto& fetch_outputs = PADDLE_ENFORCE(g_fetch_value->IsType<FeedFetchList>(),
*(g_fetch_value->GetMutable<std::vector<paddle::framework::LoDTensor>>()); "Only %s can be invoked by GetFetchVariable",
typeid(FeedFetchList).name());
auto& fetch_outputs = *g_fetch_value->GetMutable<FeedFetchList>();
auto& tensor = fetch_outputs[index];
VLOG(3) << "Fetch " << var_name << " with index " << index
<< " shape= " << tensor.dims();
PADDLE_ENFORCE_LT(index, fetch_outputs.size()); PADDLE_ENFORCE_LT(index, fetch_outputs.size());
return fetch_outputs[index]; return tensor;
} }
} // namespace framework } // namespace framework
......
...@@ -55,6 +55,7 @@ message OpDesc { ...@@ -55,6 +55,7 @@ message OpDesc {
repeated Var inputs = 1; repeated Var inputs = 1;
repeated Var outputs = 2; repeated Var outputs = 2;
repeated Attr attrs = 4; repeated Attr attrs = 4;
optional bool is_target = 5 [ default = false ];
}; };
// OpProto describes a C++ framework::OperatorBase derived class. // OpProto describes a C++ framework::OperatorBase derived class.
...@@ -111,6 +112,8 @@ message VarDesc { ...@@ -111,6 +112,8 @@ message VarDesc {
enum VarType { enum VarType {
LOD_TENSOR = 1; LOD_TENSOR = 1;
SELECTED_ROWS = 2; SELECTED_ROWS = 2;
FEED_MINIBATCH = 3;
FETCH_LIST = 4;
} }
required string name = 1; required string name = 1;
required VarType type = 2; required VarType type = 2;
......
...@@ -74,12 +74,12 @@ class LoDTensor : public Tensor { ...@@ -74,12 +74,12 @@ class LoDTensor : public Tensor {
LoD lod() const { return lod_; } LoD lod() const { return lod_; }
/* /*
* Get a element from LoD. * Get the start offset and end offset of an element from LoD.
*/ */
size_t lod_element(size_t level, size_t elem) const { std::pair<size_t, size_t> lod_element(size_t level, size_t elem) const {
PADDLE_ENFORCE_LT(level, NumLevels()); PADDLE_ENFORCE_LT(level, NumLevels());
PADDLE_ENFORCE_LT(elem, NumElements(level)); PADDLE_ENFORCE_LT(elem, NumElements(level));
return (lod_)[level][elem]; return std::make_pair((lod_)[level][elem], (lod_)[level][elem + 1]);
} }
/* /*
......
...@@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) { ...@@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) {
lod_tensor.mutable_data<float>(place); lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod); lod_tensor.set_lod(src_lod);
CHECK_EQ(lod_tensor.lod_element(0, 2), 4UL); CHECK_EQ(lod_tensor.lod_element(0, 2).first, 4UL);
CHECK_EQ(lod_tensor.lod_element(0, 4), 8UL); CHECK_EQ(lod_tensor.lod_element(0, 4).first, 8UL);
auto lod = lod_tensor.lod(); auto lod = lod_tensor.lod();
......
...@@ -20,6 +20,8 @@ limitations under the License. */ ...@@ -20,6 +20,8 @@ limitations under the License. */
#include <typeinfo> #include <typeinfo>
#include <unordered_map> #include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include "glog/logging.h" // For VLOG()
#include "paddle/framework/attribute.h" #include "paddle/framework/attribute.h"
#include "paddle/framework/details/op_registry.h" #include "paddle/framework/details/op_registry.h"
#include "paddle/framework/framework.pb.h" #include "paddle/framework/framework.pb.h"
......
...@@ -20,12 +20,13 @@ limitations under the License. */ ...@@ -20,12 +20,13 @@ limitations under the License. */
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "op_info.h" #include "glog/logging.h" // For VLOG
#include "paddle/framework/attribute.h" #include "paddle/framework/attribute.h"
#include "paddle/framework/block_desc.h" #include "paddle/framework/block_desc.h"
#include "paddle/framework/data_type.h" #include "paddle/framework/data_type.h"
#include "paddle/framework/framework.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/lod_tensor.h" #include "paddle/framework/lod_tensor.h"
#include "paddle/framework/op_info.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
#include "paddle/framework/shape_inference.h" #include "paddle/framework/shape_inference.h"
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
...@@ -573,6 +574,7 @@ class OperatorWithKernel : public OperatorBase { ...@@ -573,6 +574,7 @@ class OperatorWithKernel : public OperatorBase {
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const final { const platform::DeviceContext& dev_ctx) const final {
VLOG(3) << "Running operator " << this->Type();
RuntimeInferShapeContext infer_shape_ctx(*this, scope); RuntimeInferShapeContext infer_shape_ctx(*this, scope);
this->InferShape(&infer_shape_ctx); this->InferShape(&infer_shape_ctx);
......
...@@ -39,5 +39,14 @@ ProgramDescBind::ProgramDescBind() { ...@@ -39,5 +39,14 @@ ProgramDescBind::ProgramDescBind() {
block->set_parent_idx(-1); block->set_parent_idx(-1);
blocks_.emplace_back(new BlockDescBind(this, block)); blocks_.emplace_back(new BlockDescBind(this, block));
} }
ProgramDescBind::ProgramDescBind(const ProgramDescBind &o) {
prog_ = o.prog_;
for (int i = 0; i < prog_.blocks_size(); ++i) {
auto *block = prog_.mutable_blocks(i);
blocks_.emplace_back(new BlockDescBind(*o.blocks_[i], block, this));
}
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -28,6 +28,8 @@ class ProgramDescBind { ...@@ -28,6 +28,8 @@ class ProgramDescBind {
public: public:
ProgramDescBind(); ProgramDescBind();
ProgramDescBind(const ProgramDescBind &o);
BlockDescBind *AppendBlock(const BlockDescBind &parent); BlockDescBind *AppendBlock(const BlockDescBind &parent);
BlockDescBind *Block(size_t idx) { return blocks_[idx].get(); } BlockDescBind *Block(size_t idx) { return blocks_[idx].get(); }
...@@ -40,8 +42,6 @@ class ProgramDescBind { ...@@ -40,8 +42,6 @@ class ProgramDescBind {
ProgramDesc prog_; ProgramDesc prog_;
std::vector<std::unique_ptr<BlockDescBind>> blocks_; std::vector<std::unique_ptr<BlockDescBind>> blocks_;
DISABLE_COPY_AND_ASSIGN(ProgramDescBind);
}; };
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/program_desc.h"
#include "gtest/gtest.h"
#include "paddle/framework/block_desc.h"
namespace paddle {
namespace framework {
TEST(ProgramDesc, copy_ctor) {
ProgramDescBind program;
auto* global_block = program.Block(0);
auto* x = global_block->Var("X");
x->SetType(VarDesc_VarType_LOD_TENSOR);
x->SetLoDLevel(0);
x->SetDataType(FP32);
x->SetShape({1000, 784});
auto* y = global_block->Var("Y");
y->SetType(VarDesc_VarType_LOD_TENSOR);
y->SetLoDLevel(0);
y->SetDataType(FP32);
y->SetShape({784, 100});
auto* op = global_block->AppendOp();
op->SetType("mul");
op->SetInput("X", {x->Name()});
op->SetInput("Y", {y->Name()});
auto* out = global_block->Var("Out");
out->SetType(VarDesc_VarType_LOD_TENSOR);
op->SetOutput("Y", {out->Name()});
ProgramDescBind program_copy(program);
auto* global_block_copy = program_copy.Block(0);
ASSERT_NE(global_block, global_block_copy);
auto assert_same_var = [&](const std::string& name, VarDescBind* var_before) {
ASSERT_TRUE(global_block_copy->HasVar(name));
auto* copy = global_block_copy->Var(name);
ASSERT_NE(copy, var_before);
ASSERT_EQ(copy->Name(), var_before->Name());
ASSERT_EQ(copy->GetType(), var_before->GetType());
ASSERT_EQ(copy->Shape(), var_before->Shape());
ASSERT_EQ(copy->Proto()->SerializeAsString(),
var_before->Proto()->SerializeAsString());
};
ASSERT_EQ(global_block->LocalVarNames(), global_block_copy->LocalVarNames());
ASSERT_EQ(3, global_block_copy->LocalVarNames().size());
assert_same_var("X", x);
assert_same_var("Y", y);
assert_same_var("Out", out);
for (size_t i = 0; i < global_block->OpSize(); ++i) {
auto op_origin = global_block->Op(i);
auto op_copy = global_block->Op(i);
ASSERT_EQ(op_origin->Type(), op_copy->Type());
ASSERT_EQ(op_origin->Inputs(), op_copy->Inputs());
ASSERT_EQ(op_origin->Outputs(), op_copy->Outputs());
ASSERT_EQ(op_copy->Proto()->SerializeAsString(),
op_origin->Proto()->SerializeAsString());
}
// Not check block's protostr are same it because the order of vars could be
// different and it is correct.
}
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/prune.h"
#include <algorithm>
#include <set>
#include <string>
#include <vector>
#include <glog/logging.h>
namespace paddle {
namespace framework {
const std::string kFeedOpType = "feed";
const std::string kFetchOpType = "fetch";
bool HasDependentVar(const OpDesc& op_desc,
const std::set<std::string>& dependent_vars) {
for (auto& var : op_desc.outputs()) {
for (auto& argu : var.arguments()) {
if (dependent_vars.count(argu) != 0) {
return true;
}
}
}
return false;
}
bool IsTarget(const OpDesc& op_desc) {
if (op_desc.has_is_target()) {
return op_desc.is_target();
}
return false;
}
void prune_impl(const ProgramDesc& input, ProgramDesc& output, int block_id) {
// TODO(tonyyang-svail):
// - will change to use multiple blocks for RNN op and Cond Op
auto& block = input.blocks(block_id);
auto& ops = block.ops();
bool expect_feed = true;
for (auto& op_desc : ops) {
PADDLE_ENFORCE(op_desc.type() != kFeedOpType || expect_feed,
"All FeedOps are at the beginning of the ProgramDesc");
expect_feed = (op_desc.type() == kFeedOpType);
}
bool expect_fetch = true;
for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) {
auto& op_desc = *op_iter;
PADDLE_ENFORCE(op_desc.type() != kFetchOpType || expect_fetch,
"All FetchOps must at the end of the ProgramDesc");
expect_fetch = (op_desc.type() == kFetchOpType);
}
std::set<std::string> dependent_vars;
std::vector<bool> should_run;
for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) {
auto& op_desc = *op_iter;
if (IsTarget(op_desc) || HasDependentVar(op_desc, dependent_vars)) {
// insert its input to the dependency graph
for (auto& var : op_desc.inputs()) {
for (auto& argu : var.arguments()) {
dependent_vars.insert(argu);
}
}
should_run.push_back(true);
} else {
should_run.push_back(false);
}
}
// since we are traversing the ProgramDesc in reverse order
// we reverse the should_run vector
std::reverse(should_run.begin(), should_run.end());
output = input;
auto* op_field = output.mutable_blocks(block_id)->mutable_ops();
op_field->Clear();
for (size_t i = 0; i < should_run.size(); ++i) {
if (should_run[i]) {
*op_field->Add() = input.blocks(block_id).ops(i);
}
}
}
void Prune(const ProgramDesc& input, ProgramDesc& output) {
prune_impl(input, output, 0);
}
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/framework.pb.h"
#include "paddle/platform/enforce.h"
namespace paddle {
namespace framework {
void Prune(const ProgramDesc& input, ProgramDesc& output);
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/prune.h"
#include "paddle/framework/attribute.h"
#include "paddle/framework/operator.h"
#include "paddle/operators/net_op.h"
#include "paddle/framework/block_desc.h"
#include "paddle/framework/op_desc.h"
#include "paddle/framework/program_desc.h"
#include <gtest/gtest.h>
namespace f = paddle::framework;
namespace ops = paddle::operators;
void AddOp(const std::string &type, const f::VariableNameMap &inputs,
const f::VariableNameMap &outputs, f::AttributeMap attrs,
paddle::framework::BlockDescBind *block) {
// insert output
for (auto kv : outputs) {
for (auto v : kv.second) {
auto var = block->Var(v);
var->SetDataType(paddle::framework::DataType::FP32);
}
}
// insert op
auto op = block->AppendOp();
op->SetType(type);
for (auto &kv : inputs) {
op->SetInput(kv.first, kv.second);
}
for (auto &kv : outputs) {
op->SetOutput(kv.first, kv.second);
}
op->SetAttrMap(attrs);
}
TEST(Prune, one_operator) {
f::ProgramDescBind program;
f::BlockDescBind *block = program.Block(0);
AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block);
f::ProgramDesc *pdesc = program.Proto();
f::ProgramDesc pruned;
Prune(*pdesc, pruned);
PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 0);
pdesc->mutable_blocks(0)->mutable_ops(0)->set_is_target(true);
Prune(*pdesc, pruned);
PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 1);
}
TEST(Prune, forward) {
f::ProgramDescBind program;
f::BlockDescBind *block = program.Block(0);
AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block);
AddOp("one_one", {{"input", {"b"}}}, {{"output", {"c"}}}, {}, block);
AddOp("one_one", {{"input", {"c"}}}, {{"output", {"d"}}}, {}, block);
AddOp("one_one", {{"input", {"d"}}}, {{"output", {"e"}}}, {}, block);
f::ProgramDesc *pdesc = program.Proto();
for (int i = 0; i < pdesc->blocks(0).ops_size(); ++i) {
f::ProgramDesc pruned;
pdesc->mutable_blocks(0)->mutable_ops(i)->set_is_target(true);
Prune(*pdesc, pruned);
PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), i + 1);
}
}
TEST(Prune, multi_input_op) {
f::ProgramDescBind program;
f::BlockDescBind *block = program.Block(0);
AddOp("one_one", {{"input", {"a0"}}}, {{"output", {"b0"}}}, {}, block);
AddOp("one_one", {{"input", {"a1"}}}, {{"output", {"b1"}}}, {}, block);
AddOp("one_one", {{"input", {"a2"}}}, {{"output", {"b2"}}}, {}, block);
AddOp("three_one", {{"input", {"b0", "b1", "b2"}}}, {{"output", {"c"}}}, {},
block);
f::ProgramDesc *pdesc = program.Proto();
pdesc->mutable_blocks(0)->mutable_ops(3)->set_is_target(true);
f::ProgramDesc pruned;
Prune(*pdesc, pruned);
PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 4);
}
TEST(Prune, multi_output_op) {
f::ProgramDescBind program;
f::BlockDescBind *block = program.Block(0);
AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block);
AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block);
AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, {}, block);
f::ProgramDesc *pdesc = program.Proto();
pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true);
f::ProgramDesc pruned;
Prune(*pdesc, pruned);
PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 2);
}
TEST(Prune, multi_target) {
f::ProgramDescBind program;
f::BlockDescBind *block = program.Block(0);
AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block);
AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block);
AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, {}, block);
f::ProgramDesc *pdesc = program.Proto();
pdesc->mutable_blocks(0)->mutable_ops(1)->set_is_target(true);
pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true);
f::ProgramDesc pruned;
Prune(*pdesc, pruned);
PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 3);
}
...@@ -79,6 +79,10 @@ class VarDescBind { ...@@ -79,6 +79,10 @@ class VarDescBind {
void SetType(VarDesc::VarType type) { desc_.set_type(type); } void SetType(VarDesc::VarType type) { desc_.set_type(type); }
bool Persistable() const { return desc_.persistable(); }
void SetPersistable(bool persistable) { desc_.set_persistable(persistable); }
private: private:
const TensorDesc &tensor_desc() const; const TensorDesc &tensor_desc() const;
TensorDesc *mutable_tensor_desc(); TensorDesc *mutable_tensor_desc();
......
...@@ -25,7 +25,10 @@ class Variable { ...@@ -25,7 +25,10 @@ class Variable {
public: public:
template <typename T> template <typename T>
const T& Get() const { const T& Get() const {
PADDLE_ENFORCE(IsType<T>(), "Variable must be type %s", typeid(T).name()); PADDLE_ENFORCE(holder_ != nullptr, "Variable must hold some thing");
PADDLE_ENFORCE(IsType<T>(),
"Variable must be type %s, the holding type is %s",
typeid(T).name(), holder_->Type().name());
return *static_cast<const T*>(holder_->Ptr()); return *static_cast<const T*>(holder_->Ptr());
} }
......
...@@ -43,10 +43,6 @@ class AdamOp : public framework::OperatorWithKernel { ...@@ -43,10 +43,6 @@ class AdamOp : public framework::OperatorWithKernel {
"Output(Moment1Out) of AdamOp should not be null."); "Output(Moment1Out) of AdamOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Moment2Out"), PADDLE_ENFORCE(ctx->HasOutput("Moment2Out"),
"Output(Moment2Out) of AdamOp should not be null."); "Output(Moment2Out) of AdamOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"),
"Output(Beta1PowOut) of AdamOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Beta2PowOut"),
"Output(Beta2PowOut) of AdamOp should not be null.");
auto lr_dims = ctx->GetInputDim("LearningRate"); auto lr_dims = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
...@@ -72,8 +68,6 @@ class AdamOp : public framework::OperatorWithKernel { ...@@ -72,8 +68,6 @@ class AdamOp : public framework::OperatorWithKernel {
ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("ParamOut", param_dims);
ctx->SetOutputDim("Moment1Out", param_dims); ctx->SetOutputDim("Moment1Out", param_dims);
ctx->SetOutputDim("Moment2Out", param_dims); ctx->SetOutputDim("Moment2Out", param_dims);
ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims);
ctx->SetOutputDim("Beta2PowOut", beta2_pow_dims);
} }
}; };
...@@ -92,8 +86,6 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -92,8 +86,6 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("ParamOut", "(Tensor) Output parameter"); AddOutput("ParamOut", "(Tensor) Output parameter");
AddOutput("Moment1Out", "(Tensor) Output first moment"); AddOutput("Moment1Out", "(Tensor) Output first moment");
AddOutput("Moment2Out", "(Tensor) Output second moment"); AddOutput("Moment2Out", "(Tensor) Output second moment");
AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator");
AddOutput("Beta2PowOut", "(Tensor) Output beta2 power accumulator");
AddAttr<float>("beta1", AddAttr<float>("beta1",
"(float, default 0.9) " "(float, default 0.9) "
...@@ -121,10 +113,8 @@ Adam updates: ...@@ -121,10 +113,8 @@ Adam updates:
moment1_out = beta1 * moment1 + (1 − beta1) * grad moment1_out = beta1 * moment1 + (1 − beta1) * grad
moment2_out = beta2 * moment2 + (1 − beta2) * grad * grad moment2_out = beta2 * moment2 + (1 − beta2) * grad * grad
beta1_pow_out = beta1_pow * beta1
beta2_pow_out = beta2_pow * beta2
learning_rate_t = learning_rate_t * learning_rate_t = learning_rate_t *
sqrt(1 - beta2_pow_out) / (1 - beta1_pow_out) sqrt(1 - beta2_pow) / (1 - beta1_pow)
param_out = param - learning_rate_t * moment1/ (sqrt(moment2) + epsilon) param_out = param - learning_rate_t * moment1/ (sqrt(moment2) + epsilon)
References: References:
......
...@@ -26,14 +26,10 @@ class AdamOpKernel : public framework::OpKernel<T> { ...@@ -26,14 +26,10 @@ class AdamOpKernel : public framework::OpKernel<T> {
auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut"); auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
auto moment1_out_tensor = ctx.Output<framework::Tensor>("Moment1Out"); auto moment1_out_tensor = ctx.Output<framework::Tensor>("Moment1Out");
auto moment2_out_tensor = ctx.Output<framework::Tensor>("Moment2Out"); auto moment2_out_tensor = ctx.Output<framework::Tensor>("Moment2Out");
auto beta1_pow_out_tensor = ctx.Output<framework::Tensor>("Beta1PowOut");
auto beta2_pow_out_tensor = ctx.Output<framework::Tensor>("Beta2PowOut");
param_out_tensor->mutable_data<T>(ctx.GetPlace()); param_out_tensor->mutable_data<T>(ctx.GetPlace());
moment1_out_tensor->mutable_data<T>(ctx.GetPlace()); moment1_out_tensor->mutable_data<T>(ctx.GetPlace());
moment2_out_tensor->mutable_data<T>(ctx.GetPlace()); moment2_out_tensor->mutable_data<T>(ctx.GetPlace());
beta1_pow_out_tensor->mutable_data<T>(ctx.GetPlace());
beta2_pow_out_tensor->mutable_data<T>(ctx.GetPlace());
float beta1 = ctx.Attr<float>("beta1"); float beta1 = ctx.Attr<float>("beta1");
float beta2 = ctx.Attr<float>("beta2"); float beta2 = ctx.Attr<float>("beta2");
...@@ -56,18 +52,13 @@ class AdamOpKernel : public framework::OpKernel<T> { ...@@ -56,18 +52,13 @@ class AdamOpKernel : public framework::OpKernel<T> {
auto param_out = framework::EigenVector<T>::Flatten(*param_out_tensor); auto param_out = framework::EigenVector<T>::Flatten(*param_out_tensor);
auto moment1_out = framework::EigenVector<T>::Flatten(*moment1_out_tensor); auto moment1_out = framework::EigenVector<T>::Flatten(*moment1_out_tensor);
auto moment2_out = framework::EigenVector<T>::Flatten(*moment2_out_tensor); auto moment2_out = framework::EigenVector<T>::Flatten(*moment2_out_tensor);
auto beta1_pow_out =
framework::EigenVector<T>::Flatten(*beta1_pow_out_tensor);
auto beta2_pow_out =
framework::EigenVector<T>::Flatten(*beta2_pow_out_tensor);
auto place = ctx.GetEigenDevice<Place>(); auto place = ctx.GetEigenDevice<Place>();
moment1_out.device(place) = beta1 * moment1 + (1 - beta1) * grad; moment1_out.device(place) = beta1 * moment1 + (1 - beta1) * grad;
moment2_out.device(place) = beta2 * moment2 + (1 - beta2) * grad.square(); moment2_out.device(place) = beta2 * moment2 + (1 - beta2) * grad.square();
beta1_pow_out.device(place) = beta1_pow * beta1;
beta2_pow_out.device(place) = beta2_pow * beta2;
// All of these are tensors of 1 element // All of these are tensors of 1 element
auto lr_t = lr * (1 - beta2_pow_out).sqrt() / (1 - beta1_pow_out); auto lr_t = lr * (1 - beta2_pow).sqrt() / (1 - beta1_pow);
// Eigen does not support automatic broadcast // Eigen does not support automatic broadcast
// Get dimensions of moment vector to broadcast lr_t // Get dimensions of moment vector to broadcast lr_t
Eigen::DSizes<int, 1> m_dsize(moment1_out_tensor->numel()); Eigen::DSizes<int, 1> m_dsize(moment1_out_tensor->numel());
......
...@@ -41,8 +41,6 @@ class AdamaxOp : public framework::OperatorWithKernel { ...@@ -41,8 +41,6 @@ class AdamaxOp : public framework::OperatorWithKernel {
"Output(MomentOut) of AdamaxOp should not be null."); "Output(MomentOut) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"), PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"),
"Output(InfNormOut) of AdamaxOp should not be null."); "Output(InfNormOut) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"),
"Output(Beta1PowOut) of AdamaxOp should not be null.");
auto lr_dims = ctx->GetInputDim("LearningRate"); auto lr_dims = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
...@@ -64,7 +62,6 @@ class AdamaxOp : public framework::OperatorWithKernel { ...@@ -64,7 +62,6 @@ class AdamaxOp : public framework::OperatorWithKernel {
ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("ParamOut", param_dims);
ctx->SetOutputDim("MomentOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims);
ctx->SetOutputDim("InfNormOut", param_dims); ctx->SetOutputDim("InfNormOut", param_dims);
ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims);
} }
}; };
...@@ -86,7 +83,6 @@ class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -86,7 +83,6 @@ class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("InfNormOut", AddOutput("InfNormOut",
"(Tensor) " "(Tensor) "
"Output exponentially weighted infinity norm"); "Output exponentially weighted infinity norm");
AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator");
AddAttr<float>("beta1", AddAttr<float>("beta1",
"(float, default 0.9) " "(float, default 0.9) "
...@@ -113,8 +109,7 @@ Adamax updates: ...@@ -113,8 +109,7 @@ Adamax updates:
moment_out = beta1 * moment + (1 - beta1) * grad moment_out = beta1 * moment + (1 - beta1) * grad
inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad)) inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad))
beta1_pow_out = beta1_pow * beta1 learning_rate_t = learning_rate/(1 - beta1_pow)
learning_rate_t = learning_rate/(1 - beta1_pow_out)
param_out = param - learning_rate_t * moment_out/inf_norm_out param_out = param - learning_rate_t * moment_out/inf_norm_out
The original paper does not have an epsilon attribute. The original paper does not have an epsilon attribute.
......
...@@ -26,12 +26,10 @@ class AdamaxOpKernel : public framework::OpKernel<T> { ...@@ -26,12 +26,10 @@ class AdamaxOpKernel : public framework::OpKernel<T> {
auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut"); auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
auto moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut"); auto moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
auto inf_norm_out_tensor = ctx.Output<framework::Tensor>("InfNormOut"); auto inf_norm_out_tensor = ctx.Output<framework::Tensor>("InfNormOut");
auto beta1_pow_out_tensor = ctx.Output<framework::Tensor>("Beta1PowOut");
param_out_tensor->mutable_data<T>(ctx.GetPlace()); param_out_tensor->mutable_data<T>(ctx.GetPlace());
moment_out_tensor->mutable_data<T>(ctx.GetPlace()); moment_out_tensor->mutable_data<T>(ctx.GetPlace());
inf_norm_out_tensor->mutable_data<T>(ctx.GetPlace()); inf_norm_out_tensor->mutable_data<T>(ctx.GetPlace());
beta1_pow_out_tensor->mutable_data<T>(ctx.GetPlace());
float beta1 = ctx.Attr<float>("beta1"); float beta1 = ctx.Attr<float>("beta1");
float beta2 = ctx.Attr<float>("beta2"); float beta2 = ctx.Attr<float>("beta2");
...@@ -53,15 +51,12 @@ class AdamaxOpKernel : public framework::OpKernel<T> { ...@@ -53,15 +51,12 @@ class AdamaxOpKernel : public framework::OpKernel<T> {
auto moment_out = framework::EigenVector<T>::Flatten(*moment_out_tensor); auto moment_out = framework::EigenVector<T>::Flatten(*moment_out_tensor);
auto inf_norm_out = auto inf_norm_out =
framework::EigenVector<T>::Flatten(*inf_norm_out_tensor); framework::EigenVector<T>::Flatten(*inf_norm_out_tensor);
auto beta1_pow_out =
framework::EigenVector<T>::Flatten(*beta1_pow_out_tensor);
auto place = ctx.GetEigenDevice<Place>(); auto place = ctx.GetEigenDevice<Place>();
moment_out.device(place) = beta1 * moment + (1 - beta1) * grad; moment_out.device(place) = beta1 * moment + (1 - beta1) * grad;
inf_norm_out.device(place) = inf_norm_out.device(place) =
grad.abs().cwiseMax((beta2 * inf_norm) + epsilon); grad.abs().cwiseMax((beta2 * inf_norm) + epsilon);
beta1_pow_out.device(place) = beta1_pow * beta1; auto lr_t = lr / (1 - beta1_pow);
auto lr_t = lr / (1 - beta1_pow_out);
Eigen::DSizes<int, 1> m_dsize(moment_out_tensor->numel()); Eigen::DSizes<int, 1> m_dsize(moment_out_tensor->numel());
param_out.device(place) = param_out.device(place) =
param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out); param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out);
......
...@@ -26,8 +26,9 @@ class FeedOp : public framework::OperatorBase { ...@@ -26,8 +26,9 @@ class FeedOp : public framework::OperatorBase {
: OperatorBase(type, inputs, outputs, attrs) {} : OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope, void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override { const platform::DeviceContext &dev_ctx) const override {
auto feed_var_name = Input("Input"); auto feed_var_name = Input("X");
auto *feed_var = scope.FindVar(feed_var_name); auto *feed_var = scope.FindVar(feed_var_name);
PADDLE_ENFORCE(feed_var != nullptr, PADDLE_ENFORCE(feed_var != nullptr,
"Cannot find feed_var in scope, feed_var_name is %s", "Cannot find feed_var in scope, feed_var_name is %s",
feed_var_name); feed_var_name);
...@@ -40,6 +41,9 @@ class FeedOp : public framework::OperatorBase { ...@@ -40,6 +41,9 @@ class FeedOp : public framework::OperatorBase {
auto col = Attr<int>("col"); auto col = Attr<int>("col");
VLOG(3) << "Feed Var " << feed_var_name << "'s " << col << " column to var"
<< out_name;
auto &feed_list = feed_var->Get<framework::FeedFetchList>(); auto &feed_list = feed_var->Get<framework::FeedFetchList>();
auto &feed_item = feed_list.at(static_cast<size_t>(col)); auto &feed_item = feed_list.at(static_cast<size_t>(col));
auto *out_item = out_var->GetMutable<framework::FeedFetchType>(); auto *out_item = out_var->GetMutable<framework::FeedFetchType>();
...@@ -48,10 +52,21 @@ class FeedOp : public framework::OperatorBase { ...@@ -48,10 +52,21 @@ class FeedOp : public framework::OperatorBase {
} }
}; };
class FeedOpInfoMaker : public framework::OpProtoAndCheckerMaker {
public:
FeedOpInfoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of feed op");
AddOutput("Out", "The output of feed op");
AddComment("feed op, it should not be configured by users directly");
AddAttr<int>("col", "column of feed");
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
// We do not need to register OpInfoMaker,
// since feed operator will not be used by end users directly
REGISTER_OPERATOR(feed, paddle::operators::FeedOp, REGISTER_OPERATOR(feed, paddle::operators::FeedOp,
paddle::framework::EmptyGradOpMaker); paddle::framework::EmptyGradOpMaker,
paddle::operators::FeedOpInfoMaker);
...@@ -27,7 +27,7 @@ class FetchOp : public framework::OperatorBase { ...@@ -27,7 +27,7 @@ class FetchOp : public framework::OperatorBase {
void Run(const framework::Scope &scope, void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override { const platform::DeviceContext &dev_ctx) const override {
auto fetch_var_name = Input("Input"); auto fetch_var_name = Input("X");
auto *fetch_var = scope.FindVar(fetch_var_name); auto *fetch_var = scope.FindVar(fetch_var_name);
PADDLE_ENFORCE(fetch_var != nullptr, PADDLE_ENFORCE(fetch_var != nullptr,
"Cannot find fetch variable in scope, fetch_var_name is %s", "Cannot find fetch variable in scope, fetch_var_name is %s",
...@@ -52,13 +52,25 @@ class FetchOp : public framework::OperatorBase { ...@@ -52,13 +52,25 @@ class FetchOp : public framework::OperatorBase {
// FIXME(yuyang18): Should we assume the fetch operator always generate // FIXME(yuyang18): Should we assume the fetch operator always generate
// CPU outputs? // CPU outputs?
dst_item.CopyFromTensor(src_item, platform::CPUPlace(), dev_ctx); dst_item.CopyFromTensor(src_item, platform::CPUPlace(), dev_ctx);
VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name;
} }
}; };
class FetchOpInfoMaker : public framework::OpProtoAndCheckerMaker {
public:
FetchOpInfoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of fetch op");
AddOutput("Out", "The output of fetch op");
AddComment("fetch op, it should not be configured by users directly");
AddAttr<int>("col", "column of fetch");
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
// We do not need to register OpInfoMaker,
// since fetch operator will not be used by end users directly
REGISTER_OPERATOR(fetch, paddle::operators::FetchOp, REGISTER_OPERATOR(fetch, paddle::operators::FetchOp,
paddle::framework::EmptyGradOpMaker); paddle::framework::EmptyGradOpMaker,
paddle::operators::FetchOpInfoMaker);
...@@ -21,7 +21,7 @@ class SGDOp : public framework::OperatorWithKernel { ...@@ -21,7 +21,7 @@ class SGDOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"), PADDLE_ENFORCE(ctx->HasInput("Param"),
"Input(Param) of SGDOp should not be null."); "Input(Param) of SGDOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Grad"), PADDLE_ENFORCE(ctx->HasInput("Grad"),
...@@ -35,15 +35,15 @@ class SGDOp : public framework::OperatorWithKernel { ...@@ -35,15 +35,15 @@ class SGDOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
"Learning rate should have 1 element"); "Learning rate should have 1 element");
auto param_dim = ctx->GetInputDim("Param"); auto param_dim = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), // TODO(qijun): check dimensions of Param and Grad at complie
"Two input of SGD Op's dimension must be same."); // and run time.
ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("ParamOut", param_dim);
} }
}; };
class SGDOpMaker : public framework::OpProtoAndCheckerMaker { class SGDOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) SGDOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Param", "Input parameter"); AddInput("Param", "Input parameter");
AddInput("LearningRate", "Learning rate of SGD"); AddInput("LearningRate", "Learning rate of SGD");
...@@ -58,6 +58,38 @@ param_out = param - learning_rate * grad; ...@@ -58,6 +58,38 @@ param_out = param - learning_rate * grad;
)DOC"); )DOC");
} }
}; };
template <typename T>
struct SparseSGDFunctor<platform::CPUPlace, T> {
void operator()(const platform::DeviceContext& context,
const framework::SelectedRows& input,
const framework::Tensor& learning_rate,
framework::Tensor* output) {
auto in_height = input.height();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in_height, out_dims[0]);
auto& in_value = input.value();
auto& in_rows = input.rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height);
auto* in_data = in_value.data<T>();
auto* out_data = output->data<T>();
auto* lr = learning_rate.data<T>();
for (size_t i = 0; i < in_rows.size(); i++) {
for (int64_t j = 0; j < in_row_numel; j++) {
out_data[in_rows[i] * in_row_numel + j] -=
lr[0] * in_data[i * in_row_numel + j];
}
}
}
};
template struct SparseSGDFunctor<platform::CPUPlace, float>;
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
......
...@@ -14,6 +14,66 @@ ...@@ -14,6 +14,66 @@
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/sgd_op.h" #include "paddle/operators/sgd_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace {
template <typename T>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate, T* tensor_out,
int64_t row_numel, int block_size) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(
tensor_out + index, -1.0 * learning_rate[0] * selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SparseSGDFunctor<platform::GPUPlace, T> {
void operator()(const platform::DeviceContext& context,
const framework::SelectedRows& input,
const framework::Tensor& learning_rate,
framework::Tensor* output) {
auto in_height = input.height();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in_height, out_dims[0]);
auto& in_value = input.value();
auto& in_rows = input.rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height);
auto* in_data = in_value.data<T>();
auto* out_data = output->data<T>();
int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in_rows.size());
SparseSGDFunctorKernel<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(in_data, in_rows.data(), learning_rate.data<T>(),
out_data, in_row_numel, block_size);
}
};
template struct SparseSGDFunctor<platform::GPUPlace, float>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(sgd, REGISTER_OP_GPU_KERNEL(sgd,
......
...@@ -15,20 +15,32 @@ limitations under the License. */ ...@@ -15,20 +15,32 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/selected_rows.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename Place, typename T>
struct SparseSGDFunctor {
void operator()(const platform::DeviceContext& context,
const framework::SelectedRows& input,
const framework::Tensor& learning_rate,
framework::Tensor* output);
};
template <typename Place, typename T> template <typename Place, typename T>
class SGDOpKernel : public framework::OpKernel<T> { class SGDOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto param = ctx.Input<framework::Tensor>("Param"); auto* param = ctx.Input<framework::Tensor>("Param");
auto grad = ctx.Input<framework::Tensor>("Grad"); auto* param_out = ctx.Output<framework::Tensor>("ParamOut");
auto param_out = ctx.Output<framework::Tensor>("ParamOut"); auto* learning_rate = ctx.Input<framework::Tensor>("LearningRate");
auto learning_rate = ctx.Input<framework::Tensor>("LearningRate");
auto* grad_var = ctx.InputVar("Grad");
// Actually, all tensors are LoDTensor except SelectedRows.
if (grad_var->IsType<framework::LoDTensor>()) {
param_out->mutable_data<T>(ctx.GetPlace()); param_out->mutable_data<T>(ctx.GetPlace());
auto* grad = ctx.Input<framework::Tensor>("Grad");
auto p = framework::EigenVector<T>::Flatten(*param); auto p = framework::EigenVector<T>::Flatten(*param);
auto g = framework::EigenVector<T>::Flatten(*grad); auto g = framework::EigenVector<T>::Flatten(*grad);
...@@ -38,8 +50,18 @@ class SGDOpKernel : public framework::OpKernel<T> { ...@@ -38,8 +50,18 @@ class SGDOpKernel : public framework::OpKernel<T> {
Eigen::DSizes<int, 1> grad_dsize(grad->numel()); Eigen::DSizes<int, 1> grad_dsize(grad->numel());
o.device(place) = p - lr.broadcast(grad_dsize) * g; o.device(place) = p - lr.broadcast(grad_dsize) * g;
} else if (grad_var->IsType<framework::SelectedRows>()) {
// TODO(qijun): In Sparse SGD operator, in-place update is enforced.
// This manual optimization brings difficulty to track data dependency.
// It's better to find a more elegant solution.
PADDLE_ENFORCE_EQ(param, param_out);
auto* grad = ctx.Input<framework::SelectedRows>("Grad");
SparseSGDFunctor<Place, T> functor;
functor(ctx.device_context(), *grad, *learning_rate, param_out);
} else {
PADDLE_THROW("Unsupported Variable Type of Grad");
}
} }
}; };
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -265,6 +265,10 @@ public: ...@@ -265,6 +265,10 @@ public:
addParameterType(PARAMETER_SECOND_MOMENTUM); addParameterType(PARAMETER_SECOND_MOMENTUM);
} }
virtual void startBatch(int64_t numSamplesProcessed) {
learningRate_ = calcLearningRate(numSamplesProcessed, pass_);
}
virtual void finishBatch() { ++step_; } virtual void finishBatch() { ++step_; }
virtual void update(const VectorPtr vecs[], virtual void update(const VectorPtr vecs[],
......
...@@ -101,6 +101,10 @@ using namespace paddle::framework; // NOLINT ...@@ -101,6 +101,10 @@ using namespace paddle::framework; // NOLINT
void BindProgramDesc(py::module &m) { void BindProgramDesc(py::module &m) {
py::class_<ProgramDescBind>(m, "ProgramDesc", "") py::class_<ProgramDescBind>(m, "ProgramDesc", "")
.def(py::init<>()) .def(py::init<>())
.def("__init__",
[](ProgramDescBind &self, const ProgramDescBind &other) {
new (&self) ProgramDescBind(other);
})
.def("append_block", &ProgramDescBind::AppendBlock, .def("append_block", &ProgramDescBind::AppendBlock,
py::return_value_policy::reference) py::return_value_policy::reference)
.def("append_backward", .def("append_backward",
...@@ -202,7 +206,8 @@ void BindVarDsec(py::module &m) { ...@@ -202,7 +206,8 @@ void BindVarDsec(py::module &m) {
.def("set_lod_level", &VarDescBind::SetLoDLevel) .def("set_lod_level", &VarDescBind::SetLoDLevel)
.def("type", &VarDescBind::GetType) .def("type", &VarDescBind::GetType)
.def("set_type", &VarDescBind::SetType) .def("set_type", &VarDescBind::SetType)
.def("serialize_to_string", [](VarDescBind &var_desc) -> py::bytes { .def("serialize_to_string",
[](VarDescBind &var_desc) -> py::bytes {
const VarDesc *desc = var_desc.Proto(); const VarDesc *desc = var_desc.Proto();
PADDLE_ENFORCE(desc->IsInitialized(), PADDLE_ENFORCE(desc->IsInitialized(),
"VarDesc has not been initialized."); "VarDesc has not been initialized.");
...@@ -211,11 +216,15 @@ void BindVarDsec(py::module &m) { ...@@ -211,11 +216,15 @@ void BindVarDsec(py::module &m) {
desc->SerializeToString(&res), desc->SerializeToString(&res),
"Serialize VarDesc Error. This could be a bug of Paddle."); "Serialize VarDesc Error. This could be a bug of Paddle.");
return res; return res;
}); })
.def("persistable", &VarDescBind::Persistable)
.def("set_persistable", &VarDescBind::SetPersistable);
py::enum_<VarDesc::VarType>(var_desc, "VarType", "") py::enum_<VarDesc::VarType>(var_desc, "VarType", "")
.value("LOD_TENSOR", VarDesc::LOD_TENSOR) .value("LOD_TENSOR", VarDesc::LOD_TENSOR)
.value("SELECTED_ROWS", VarDesc::SELECTED_ROWS); .value("SELECTED_ROWS", VarDesc::SELECTED_ROWS)
.value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH)
.value("FETCH_LIST", VarDesc::FETCH_LIST);
} }
void BindOpDesc(py::module &m) { void BindOpDesc(py::module &m) {
......
...@@ -111,6 +111,7 @@ PYBIND11_PLUGIN(core) { ...@@ -111,6 +111,7 @@ PYBIND11_PLUGIN(core) {
new (&instance) LoDTensor(new_lod); new (&instance) LoDTensor(new_lod);
#endif #endif
}) })
.def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
.def("set_lod", .def("set_lod",
[](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) { [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
...@@ -154,7 +155,15 @@ PYBIND11_PLUGIN(core) { ...@@ -154,7 +155,15 @@ PYBIND11_PLUGIN(core) {
py::return_value_policy::reference) py::return_value_policy::reference)
.def("set_height", &SelectedRows::set_height) .def("set_height", &SelectedRows::set_height)
.def("height", &SelectedRows::height) .def("height", &SelectedRows::height)
.def("set_rows", &SelectedRows::set_rows) .def("set_rows",
[](SelectedRows &self, std::vector<int64_t> rows) {
#ifndef PADDLE_WITH_CUDA
self.set_rows(rows);
#else
Vector<int64_t> new_rows(rows);
self.set_rows(new_rows);
#endif
})
.def("rows", [](SelectedRows &self) { .def("rows", [](SelectedRows &self) {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
return self.rows(); return self.rows();
...@@ -187,6 +196,11 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -187,6 +196,11 @@ All parameter, weight, gradient are variables in Paddle.
return self.GetMutable<LoDTensor>(); return self.GetMutable<LoDTensor>();
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("get_selected_rows",
[](Variable &self) -> SelectedRows * {
return self.GetMutable<SelectedRows>();
},
py::return_value_policy::reference)
.def("get_net", .def("get_net",
[](Variable &self) -> operators::NetOp * { [](Variable &self) -> operators::NetOp * {
return self.GetMutable<operators::NetOp>(); return self.GetMutable<operators::NetOp>();
...@@ -203,7 +217,8 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -203,7 +217,8 @@ All parameter, weight, gradient are variables in Paddle.
.def(py::init<>()) .def(py::init<>())
.def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); }, .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("drop_kids", &Scope::DropKids); .def("drop_kids", &Scope::DropKids)
.def_static("global_scope", &GetGlobalScope);
//! @note: Be careful! PyBind will return std::string as an unicode, not //! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python. //! Python str. If you want a str object, you should cast them in Python.
...@@ -251,6 +266,17 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -251,6 +266,17 @@ All parameter, weight, gradient are variables in Paddle.
.def(py::init<>()) .def(py::init<>())
.def("__str__", string::to_string<const platform::CPUPlace &>); .def("__str__", string::to_string<const platform::CPUPlace &>);
py::class_<platform::Place>(m, "Place")
.def(py::init<>())
.def("set_place",
[](platform::Place &self, const platform::CPUPlace &cpu_place) {
self = cpu_place;
})
.def("set_place",
[](platform::Place &self, const platform::GPUPlace &gpu_place) {
self = gpu_place;
});
py::class_<OperatorBase>(m, "Operator") py::class_<OperatorBase>(m, "Operator")
.def_static("create", .def_static("create",
[](py::bytes protobin) { [](py::bytes protobin) {
...@@ -424,14 +450,15 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -424,14 +450,15 @@ All parameter, weight, gradient are variables in Paddle.
py::class_<framework::Executor>(m, "Executor") py::class_<framework::Executor>(m, "Executor")
.def(py::init<std::vector<platform::Place> &>()) .def(py::init<std::vector<platform::Place> &>())
.def("run", .def("run",
[](Executor &self, const ProgramDesc &program_desc, int block_id) { [](Executor &self, ProgramDescBind *program_bind, int block_id) {
framework::Scope &global_scope = GetGlobalScope(); framework::Scope &global_scope = GetGlobalScope();
self.Run(program_desc, &global_scope, block_id); self.Run(*program_bind->Proto(), &global_scope, block_id);
}); });
m.def("unique_integer", UniqueIntegerGenerator); m.def("unique_integer", UniqueIntegerGenerator);
m.def("is_compile_gpu", IsCompileGPU); m.def("is_compile_gpu", IsCompileGPU);
//! FIXME: it is no need to `set_xxx_float/double/int`
m.def("set_feed_variable_float", framework::SetFeedVariable<float>); m.def("set_feed_variable_float", framework::SetFeedVariable<float>);
m.def("set_feed_variable_double", framework::SetFeedVariable<double>); m.def("set_feed_variable_double", framework::SetFeedVariable<double>);
m.def("set_feed_variable_int", framework::SetFeedVariable<int>); m.def("set_feed_variable_int", framework::SetFeedVariable<int>);
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HOSTS = [
"root@10.1.9.7",
"root@10.1.18.7",
"root@10.1.32.9",
]
'''
workspace configuration
'''
#root dir for workspace, can be set as any director with real user account
ROOT_DIR = "/root"
'''
network configuration
'''
#pserver nics
PADDLE_NIC = "eth0"
#pserver port
PADDLE_PORT = 7164
#pserver ports num
PADDLE_PORTS_NUM = 1
#pserver sparse ports num
PADDLE_PORTS_NUM_FOR_SPARSE = 1
#trainer whether use gpu
PADDLE_USE_GPU = "False"
#environments setting for all processes in cluster job
LD_LIBRARY_PATH = "/usr/local/cuda/lib64:/usr/lib64"
FROM docker.paddlepaddlehub.com/paddle:0.10.0rc2
RUN apt-get update && apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:root' |chpasswd
RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ssh-servers
spec:
replicas: 3
template:
metadata:
labels:
app: ssh-servers
spec:
containers:
- name: ssh-servers
image: docker.paddlepaddlehub.com/paddlessh
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 500m
memory: 1Gi
ports:
- containerPort: 22
#!/bin/bash
python paddle.py \
--job_dispatch_package="/root/wuyi/fabric_submit/workspace" \
--dot_period=10 \
--ports_num_for_sparse=1 \
--log_period=50 \
--num_passes=5 \
--trainer_count=2 \
--saving_period=1 \
--local=0 \
--config=./trainer_config.py \
--save_dir=./output \
--use_gpu=0
# Build this image: docker build -t mpi .
#
FROM paddledev/paddle:0.10.0rc3
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update -y && \
apt-get upgrade -y && \
apt-get install -y openssh-server zip unzip vim sudo \
gcc gfortran openmpi-checkpoint binutils wget curl git openmpi-bin openmpi-common libopenmpi-dev && \
pip install mpi4py numpy virtualenv scipy matplotlib lxml sqlalchemy suds ipython obspy && \
mkdir /var/run/sshd && \
echo 'root:tutorial' | chpasswd && \
sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config && \
# SSH login fix. Otherwise user is kicked off after login
sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd && \
echo "export VISIBLE=now" >> /etc/profile && \
adduser --disabled-password --gecos "" tutorial && \
echo "tutorial ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers && \
mkdir /home/tutorial/.ssh/
ENV HOME /home/tutorial
ENV NOTVISIBLE "in users profile"
# ------------------------------------------------------------
# Set-Up SSH with our Github deploy key
# ------------------------------------------------------------
ADD ssh/config /home/tutorial/.ssh/config
ADD ssh/id_rsa.mpi /home/tutorial/.ssh/id_rsa
ADD ssh/id_rsa.mpi.pub /home/tutorial/.ssh/id_rsa.pub
ADD ssh/id_rsa.mpi.pub /home/tutorial/.ssh/authorized_keys
#---------------------------------------------------------------
#LD_LIBRARY_PATH
#---------------------------------------------------------------
RUN export LD_LIBRARY_PATH=/usr/lib/openmpi/lib/
WORKDIR /home/tutorial
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: mpi-header
labels:
app: mpi-header
spec:
replicas: 1
template:
metadata:
labels:
app: mpi-header
spec:
containers:
- image: typhoon1986/paddle-openmpi
name : mpi-header
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
ports:
- containerPort: 22
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: mpi-nodes
labels:
app: mpi-nodes
spec:
replicas: 3
template:
metadata:
labels:
app: mpi-nodes
spec:
containers:
- image: typhoon1986/paddle-openmpi
name : mpi-nodes
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
ports:
- containerPort: 22
imagePullPolicy: Always
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA7PWLZmgdJ508dD15T6+xqGDvL9Ehzo9SgsnN6xJ+qpUvvOi4
1axW0AqR4MnPTg/uuvk+x4tUpuufOW4w22UTGjsdvmIVWa9ujLtcRiN3YPY+SU+Y
O5FfqKg7r/hBn+/GMcSoffwSs7vVgmhBBnp/mJh2O1cOAFZEe98/47mbg3/kHBAk
36NOQktaU3l48B38EhBTnjWfcEGm1HcTRPFxXV5Wiko6ZhKFEuHcTVKng4ROtUqE
mgHyI0aB7TAxg4na0ejItsYWEPWGeDOw6ms/4MwylxNosWzHFPW9p4zgLCLNr+b6
bDDfYKjXZflAuTQtQhLmJUwD9uuYLAijpSE2fQIDAQABAoIBADgcgRET8Gt0CV/B
OtvKz/f+VEVvcWD3gWNlJDTZIVOFllNWjIZUlA4ZoqenQkbK8Q4nfV1FOht4yjCQ
TlN1oMtiWk297i5Zo4UBzPzy4w774I39oh/g8dT/WXr2/5s+7SDV38xNh6Q2A34o
79T35wUcfUrZ93/O7dKjb/6d8hx2FMha0wVKqY4lmG1lQE3bbx3kakec0PdvU5kO
YHKlpqj3pMR7CpMa+4yL/iXFwWYmnK+uu+zw7JR7PwvH1CzrnvW438wjQ1QmYbSx
mHHOE89X67Lsl5hn81qYWBhpwAlBwi1qscsE0cV9GcFyKqWFqZsj5coM9u3CRfvy
lrWe1OUCgYEA+LBUFEd3Hxs4sFiYElJ8R9SAs1udaqPvAl01hTEijJLfYlMMVs/y
rgNN7j22zjDak2f8QdyMJZX7EZdRmdYcHO0csYOwbYvalzcnwk+U3mxmdD3r4xSo
DSvkJ70fogAqUlcVIg2re6fCmZVJQTvMQYTVEM8zQomJRt/Lb2esSfsCgYEA8+zv
44aToe8uqiDs4w8guRW7LCDkTw4z4IVo9JUibIaPjaAs5bZEBXSB43EEywXCR75H
fML0rU1PVvKh1rqcvZdVzm+XMWVr3asPk0sapaiHaTcmyZvJRDxxqbLFp0zRP1T6
cCtXNFdHWU4KiuKrUi6cDyOKchpfkSZa4seiT+cCgYB+n4FgBfdQPlMB70oW4irn
g/q32CjxuGCk6oKqu5bkzo+xB6obtavSEFqouIGQwO056tNVUY+GP7Rjg5GH663K
yKw4cl3tmS0Gm43B8TVSfw03mKO3rrfWZQe5eCFYIg9qd26KNT2gK435FzsCXQkm
PxUhhu6JrW/ZR2/U3Iur6wKBgADrWLAb1ryagSuE+j+U1AO+kDkHWrTtkcZ72jxp
v3p3O11GSEUJXdJDcSXhTCpTuDq6/dv7hB6PFwh126RKicKxKlKf2wsFndV1Cpb8
hnovW2tLGOtTmfuW2rrQAKyzvmolsNfxYd/BoHQ2thV16z1hDZeFA8WQUeHjKh6G
sBbrAoGATdtQlaUxx4izua6k02ihkxx/cRYwDl2N8UDvDBHokS7vJFMX8b8NpsGg
zMElnqSpu/pe/0UG7N2MtPF6uyMcX8AZzzcsRkiMkDvWJzYt8Jpf+Eyd/uryF+Yv
yrXaOEY83tm6x/fny5ZaZmk8lNth7bfWywuTMkZLX3fYpWtIeE4=
-----END RSA PRIVATE KEY-----
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDs9YtmaB0nnTx0PXlPr7GoYO8v0SHOj1KCyc3rEn6qlS+86LjVrFbQCpHgyc9OD+66+T7Hi1Sm6585bjDbZRMaOx2+YhVZr26Mu1xGI3dg9j5JT5g7kV+oqDuv+EGf78YxxKh9/BKzu9WCaEEGen+YmHY7Vw4AVkR73z/juZuDf+QcECTfo05CS1pTeXjwHfwSEFOeNZ9wQabUdxNE8XFdXlaKSjpmEoUS4dxNUqeDhE61SoSaAfIjRoHtMDGDidrR6Mi2xhYQ9YZ4M7Dqaz/gzDKXE2ixbMcU9b2njOAsIs2v5vpsMN9gqNdl+UC5NC1CEuYlTAP265gsCKOlITZ9 oweidner@peahi
#!/bin/bash
# General trainning configurations
NICS=eth0
PADDLE_INIT_PORT=7164
PADDLE_INIT_PORTS_NUM=1
PADDLE_INIT_PORTS_NUM_FOR_SPARSE=1
PADDLE_INIT_PSERVERS=$(cat machines | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/,/g')
PADDLE_INIT_USE_GPU=False
PADDLE_INIT_NUM_GRADIENT_SERVERS=${OMPI_COMM_WORLD_SIZE}
PADDLE_INIT_TRAINER_ID=${OMPI_COMM_WORLD_RANK}
PADDLE_CLUSTER_TRAIN=True
env
# start pserver
stdbuf -oL nohup paddle pserver --port=$PADDLE_INIT_PORT --ports_num=$PADDLE_INIT_PORTS_NUM \
--ports_num_for_sparse=$PADDLE_INIT_PORTS_NUM_FOR_SPARSE --nics=$NICS \
--comment=paddle_cluster_pserver \
--num_gradient_servers=$PADDLE_INIT_NUM_GRADIENT_SERVERS &> logs/pserver.log &
# start trainer
# NOTE: train.py will use the above environment variables as configuration
python train.py &> logs/train.log
# kill background pservers when train finishes
ps -ef | grep pserver | awk '{print $2}' | xargs kill
import paddle.v2.framework.core as core
from paddle.v2.framework.framework import Block, Program
class Executor(object):
def __init__(self, places):
if not isinstance(places, list) and not isinstance(places, tuple):
places = [places]
act_places = []
for each in places:
p = core.Place()
p.set_place(each)
act_places.append(p)
self.executor = core.Executor(act_places)
def run(self,
program,
feed,
fetch_list,
feed_var_name='feed',
fetch_var_name='fetch'):
if not isinstance(program, Program):
raise TypeError()
program = program.clone()
global_block = program.global_block()
feed_var = global_block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
for i, name in enumerate(feed):
out = global_block.var(name)
global_block.prepend_op(
'feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
# FIXME
core.set_feed_variable_float(feed[name], feed_var.name, i)
fetch_var = global_block.create_var(
name=fetch_var_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
for i, var in enumerate(fetch_list):
global_block.append_op(
type='fetch',
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
self.executor.run(program.desc, 0)
return [
core.get_fetch_variable(fetch_var_name, i)
for i in xrange(len(fetch_list))
]
...@@ -15,6 +15,7 @@ class Variable(object): ...@@ -15,6 +15,7 @@ class Variable(object):
shape=None, shape=None,
dtype=None, dtype=None,
lod_level=None, lod_level=None,
persistable=False,
**kwargs): **kwargs):
self.block = block self.block = block
...@@ -70,6 +71,17 @@ class Variable(object): ...@@ -70,6 +71,17 @@ class Variable(object):
"lod_level is {2}. They are not " "lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level, "matched".format(self.name, self.lod_level,
lod_level)) lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable {0} has been created before."
"The previous persistable is {1}; the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
self.block.vars[name] = self self.block.vars[name] = self
self.op = None self.op = None
...@@ -80,6 +92,10 @@ class Variable(object): ...@@ -80,6 +92,10 @@ class Variable(object):
__repr__ = __str__ __repr__ = __str__
@property
def persistable(self):
return self.desc.persistable()
@property @property
def name(self): def name(self):
return self.desc.name() return self.desc.name()
...@@ -240,6 +256,7 @@ class Operator(object): ...@@ -240,6 +256,7 @@ class Operator(object):
self.desc.set_block_attr(attr_name, attrs[attr_name].desc) self.desc.set_block_attr(attr_name, attrs[attr_name].desc)
self.desc.check_attrs() self.desc.check_attrs()
if type not in {'feed', 'fetch'}:
self.desc.infer_shape(self.block.desc) self.desc.infer_shape(self.block.desc)
def __str__(self): def __str__(self):
...@@ -307,9 +324,12 @@ class Block(object): ...@@ -307,9 +324,12 @@ class Block(object):
return self.desc.id return self.desc.id
def var(self, name): def var(self, name):
if name not in self.vars: if not isinstance(name, basestring):
raise TypeError()
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name) raise ValueError("var %s not in this block" % name)
return self.vars[name] return v
def all_parameters(self): def all_parameters(self):
return {v for k, v in self.vars.iteritems() if isinstance(v, Parameter)} return {v for k, v in self.vars.iteritems() if isinstance(v, Parameter)}
...@@ -348,6 +368,7 @@ class Block(object): ...@@ -348,6 +368,7 @@ class Block(object):
for op_idx in range(0, self.desc.op_size()): for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx)) ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None start_index = None
...@@ -360,6 +381,9 @@ class Block(object): ...@@ -360,6 +381,9 @@ class Block(object):
assert start_index is not None assert start_index is not None
assert end_index is not None assert end_index is not None
assert start_index <= end_index assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops # sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1): for index in range((start_index - 1 - 1), -1, -1):
...@@ -379,14 +403,6 @@ class Block(object): ...@@ -379,14 +403,6 @@ class Block(object):
class Program(object): class Program(object):
@classmethod
def instance(cls):
# From https://stackoverflow.com/questions/8212053
# Making Program as a Singleton class.
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self): def __init__(self):
self.desc = core.ProgramDesc() self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)] self.blocks = [Block(self, 0)]
...@@ -397,7 +413,15 @@ class Program(object): ...@@ -397,7 +413,15 @@ class Program(object):
proto = framework_pb2.ProgramDesc.FromString(str(protostr)) proto = framework_pb2.ProgramDesc.FromString(str(protostr))
return proto.__str__() return proto.__str__()
__repr__ = __str__ def clone(self):
p = Program()
p.desc = core.ProgramDesc(self.desc)
p.blocks = [Block(p, i) for i in xrange(self.desc.num_blocks())]
p.sync_with_cpp()
return p
def __repr__(self):
return str(self)
def global_block(self): def global_block(self):
return self.blocks[0] return self.blocks[0]
...@@ -408,11 +432,13 @@ class Program(object): ...@@ -408,11 +432,13 @@ class Program(object):
def current_block(self): def current_block(self):
return self.blocks[self.current_block_idx] return self.blocks[self.current_block_idx]
def append_backward(self, target, no_grad_set): def append_backward(self, target, no_grad_set=None):
""" """
return map(param_name -> (grad_name, block_index, op_index)) return map(param_name -> (grad_name, block_index, op_index))
""" """
assert isinstance(target, Variable) assert isinstance(target, Variable)
if no_grad_set is None:
no_grad_set = set()
param_to_grad_info = self.desc.append_backward(target.desc, no_grad_set) param_to_grad_info = self.desc.append_backward(target.desc, no_grad_set)
self.sync_with_cpp() self.sync_with_cpp()
return param_to_grad_info return param_to_grad_info
...@@ -445,7 +471,9 @@ class Parameter(Variable): ...@@ -445,7 +471,9 @@ class Parameter(Variable):
if each < 0: if each < 0:
raise ValueError("Parameter shape should not be related with " raise ValueError("Parameter shape should not be related with "
"batch-size") "batch-size")
Variable.__init__(self, block, shape=shape, dtype=dtype, **kwargs)
Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True) self.trainable = kwargs.get('trainable', True)
self.init_attr = kwargs.get('initialize_attr', { self.init_attr = kwargs.get('initialize_attr', {
'type': 'uniform_random', 'type': 'uniform_random',
...@@ -470,4 +498,4 @@ class Parameter(Variable): ...@@ -470,4 +498,4 @@ class Parameter(Variable):
# program is a global instance. # program is a global instance.
g_program = Program.instance() g_program = Program()
...@@ -3,7 +3,7 @@ import paddle.v2.framework.core as core ...@@ -3,7 +3,7 @@ import paddle.v2.framework.core as core
from paddle.v2.framework.framework import OpProtoHolder, Variable from paddle.v2.framework.framework import OpProtoHolder, Variable
import re import re
__all__ = ['fc', 'data', 'cross_entropy', 'conv2d'] __all__ = ['fc', 'data', 'cross_entropy', 'conv2d', 'pool2d']
def fc(input, def fc(input,
...@@ -35,7 +35,10 @@ def fc(input, ...@@ -35,7 +35,10 @@ def fc(input,
"Y": w, "Y": w,
}, },
outputs={"Out": tmp}, outputs={"Out": tmp},
attrs={'x_num_col_dims': num_flatten_dims}) attrs={
'x_num_col_dims': num_flatten_dims,
'y_num_col_dims': len(input_shape) - num_flatten_dims
})
mul_results.append(tmp) mul_results.append(tmp)
# sum # sum
...@@ -55,8 +58,10 @@ def data(name, ...@@ -55,8 +58,10 @@ def data(name,
shape, shape,
data_type='float32', data_type='float32',
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True,
program=None): program=None):
helper = LayerHelper('data', **locals()) helper = LayerHelper('data', **locals())
if append_batch_size:
shape = [-1] + shape # append batch size as -1 shape = [-1] + shape # append batch size as -1
return helper.create_global_variable( return helper.create_global_variable(
name=name, shape=shape, dtype=data_type, type=type) name=name, shape=shape, dtype=data_type, type=type)
...@@ -112,7 +117,7 @@ def _create_op_func_(op_type): ...@@ -112,7 +117,7 @@ def _create_op_func_(op_type):
_create_op_func_('mean') _create_op_func_('mean')
_create_op_func_('pool2d') _create_op_func_('mul')
def cross_entropy(input, label, **kwargs): def cross_entropy(input, label, **kwargs):
...@@ -167,6 +172,13 @@ def conv2d(input, ...@@ -167,6 +172,13 @@ def conv2d(input,
raise ValueError("num_channels must be divisible by groups.") raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels / groups num_filter_channels = num_channels / groups
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
input_shape = input.shape input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size filter_shape = [num_filters, num_filter_channels] + filter_size
filter = helper.create_parameter( filter = helper.create_parameter(
...@@ -187,3 +199,40 @@ def conv2d(input, ...@@ -187,3 +199,40 @@ def conv2d(input,
pre_act = helper.append_bias_op(pre_bias) pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act) return helper.append_activation(pre_act)
def pool2d(input,
pool_size,
pool_type,
pool_stride=[1, 1],
pool_padding=[0, 0],
global_pooling=False,
program=None):
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if isinstance(pool_size, int):
pool_size = [pool_size, pool_size]
if isinstance(pool_stride, int):
pool_stride = [pool_stride, pool_stride]
if isinstance(pool_padding, int):
pool_padding = [pool_padding, pool_padding]
helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="pool2d",
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding
})
return pool_out
import paddle.v2.framework.layers as layers
def simple_img_conv_pool(input,
filter_size,
num_filters,
pool_size,
pool_stride,
act,
program=None):
conv_out = layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
act=act,
program=program)
pool_out = layers.pool2d(
input=conv_out,
pool_size=pool_size,
pool_type='max',
pool_stride=pool_stride,
program=program)
return pool_out
...@@ -33,14 +33,12 @@ class TestAdamOp1(OpTest): ...@@ -33,14 +33,12 @@ class TestAdamOp1(OpTest):
self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}
param_out, moment1_out, moment2_out, beta1_pow_out, \ param_out, moment1_out, \
beta2_pow_out = adam_step(self.inputs, self.attrs) moment2_out = adam_step(self.inputs, self.attrs)
self.outputs = { self.outputs = {
'Moment1Out': moment1_out, 'Moment1Out': moment1_out,
'Moment2Out': moment2_out, 'Moment2Out': moment2_out,
'Beta1PowOut': beta1_pow_out,
'Beta2PowOut': beta2_pow_out,
'ParamOut': param_out 'ParamOut': param_out
} }
...@@ -78,14 +76,12 @@ class TestAdamOp2(OpTest): ...@@ -78,14 +76,12 @@ class TestAdamOp2(OpTest):
attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}
param_out, moment1_out, moment2_out, beta1_pow_out, \ param_out, moment1_out, \
beta2_pow_out = adam_step(self.inputs, attributes) moment2_out = adam_step(self.inputs, attributes)
self.outputs = { self.outputs = {
'Moment1Out': moment1_out, 'Moment1Out': moment1_out,
'Moment2Out': moment2_out, 'Moment2Out': moment2_out,
'Beta1PowOut': beta1_pow_out,
'Beta2PowOut': beta2_pow_out,
'ParamOut': param_out 'ParamOut': param_out
} }
...@@ -127,14 +123,12 @@ class TestAdamOpMultipleSteps(OpTest): ...@@ -127,14 +123,12 @@ class TestAdamOpMultipleSteps(OpTest):
def test_check_output(self): def test_check_output(self):
for _ in range(self.num_steps): for _ in range(self.num_steps):
param_out, moment1_out, moment2_out, beta1_pow_out, \ param_out, moment1_out, \
beta2_pow_out = adam_step(self.inputs, self.attrs) moment2_out = adam_step(self.inputs, self.attrs)
self.outputs = { self.outputs = {
'Moment1Out': moment1_out, 'Moment1Out': moment1_out,
'Moment2Out': moment2_out, 'Moment2Out': moment2_out,
'Beta1PowOut': beta1_pow_out,
'Beta2PowOut': beta2_pow_out,
'ParamOut': param_out 'ParamOut': param_out
} }
...@@ -145,8 +139,10 @@ class TestAdamOpMultipleSteps(OpTest): ...@@ -145,8 +139,10 @@ class TestAdamOpMultipleSteps(OpTest):
self.inputs['Param'] = param_out self.inputs['Param'] = param_out
self.inputs['Moment1'] = moment1_out self.inputs['Moment1'] = moment1_out
self.inputs['Moment2'] = moment2_out self.inputs['Moment2'] = moment2_out
self.inputs['Beta1Pow'] = beta1_pow_out
self.inputs['Beta2Pow'] = beta2_pow_out # Update powers of Beta1 and Beta2 for next time step
self.inputs['Beta1Pow'] *= self.attrs['beta1']
self.inputs['Beta2Pow'] *= self.attrs['beta1']
# Randomize gradient for next step # Randomize gradient for next step
self.inputs['Grad'] = np.random.uniform( self.inputs['Grad'] = np.random.uniform(
...@@ -175,11 +171,9 @@ def adam_step(inputs, attributes): ...@@ -175,11 +171,9 @@ def adam_step(inputs, attributes):
moment1_out = beta1 * moment1 + (1 - beta1) * grad moment1_out = beta1 * moment1 + (1 - beta1) * grad
moment2_out = beta2 * moment2 + (1 - beta2) * np.square(grad) moment2_out = beta2 * moment2 + (1 - beta2) * np.square(grad)
beta1_pow_out = beta1_pow * beta1 lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow)
beta2_pow_out = beta2_pow * beta2
lr_t = lr * np.sqrt(1 - beta2_pow_out) / (1 - beta1_pow_out)
param_out = param - lr_t * (moment1_out / (np.sqrt(moment2_out) + epsilon)) param_out = param - lr_t * (moment1_out / (np.sqrt(moment2_out) + epsilon))
return param_out, moment1_out, moment2_out, beta1_pow_out, beta2_pow_out return param_out, moment1_out, moment2_out
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -31,14 +31,13 @@ class TestAdamaxOp1(OpTest): ...@@ -31,14 +31,13 @@ class TestAdamaxOp1(OpTest):
self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon}
param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( param_out, moment_out, inf_norm_out = adamax_step(self.inputs,
self.inputs, self.attrs) self.attrs)
self.outputs = { self.outputs = {
'ParamOut': param_out, 'ParamOut': param_out,
'MomentOut': moment_out, 'MomentOut': moment_out,
'InfNormOut': inf_norm_out, 'InfNormOut': inf_norm_out
'Beta1PowOut': beta1_pow_out
} }
def test_check_output(self): def test_check_output(self):
...@@ -73,14 +72,12 @@ class TestAdamaxOp2(OpTest): ...@@ -73,14 +72,12 @@ class TestAdamaxOp2(OpTest):
} }
attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon}
param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( param_out, moment_out, inf_norm_out = adamax_step(self.inputs, attrs)
self.inputs, attrs)
self.outputs = { self.outputs = {
'ParamOut': param_out, 'ParamOut': param_out,
'MomentOut': moment_out, 'MomentOut': moment_out,
'InfNormOut': inf_norm_out, 'InfNormOut': inf_norm_out
'Beta1PowOut': beta1_pow_out
} }
def test_check_output(self): def test_check_output(self):
...@@ -117,19 +114,15 @@ class TestAdamaxOpMultipleSteps(OpTest): ...@@ -117,19 +114,15 @@ class TestAdamaxOpMultipleSteps(OpTest):
self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon}
param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step(
self.inputs, self.attrs)
def test_check_output(self): def test_check_output(self):
for _ in range(self.num_steps): for _ in range(self.num_steps):
param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( param_out, moment_out, inf_norm_out = adamax_step(self.inputs,
self.inputs, self.attrs) self.attrs)
self.outputs = { self.outputs = {
'ParamOut': param_out, 'ParamOut': param_out,
'MomentOut': moment_out, 'MomentOut': moment_out,
'InfNormOut': inf_norm_out, 'InfNormOut': inf_norm_out
'Beta1PowOut': beta1_pow_out
} }
# Verify output for this step # Verify output for this step
...@@ -139,7 +132,9 @@ class TestAdamaxOpMultipleSteps(OpTest): ...@@ -139,7 +132,9 @@ class TestAdamaxOpMultipleSteps(OpTest):
self.inputs['Param'] = param_out self.inputs['Param'] = param_out
self.inputs['Moment'] = moment_out self.inputs['Moment'] = moment_out
self.inputs['InfNorm'] = inf_norm_out self.inputs['InfNorm'] = inf_norm_out
self.inputs['Beta1Pow'] = beta1_pow_out
# Update Beta1 Power accumulator for next step
self.inputs['Beta1Pow'] *= self.attrs['beta1']
# Randomize gradient for next step # Randomize gradient for next step
self.inputs['Grad'] = np.random.uniform( self.inputs['Grad'] = np.random.uniform(
...@@ -167,11 +162,10 @@ def adamax_step(inputs, attributes): ...@@ -167,11 +162,10 @@ def adamax_step(inputs, attributes):
moment_out = beta1 * moment + (1 - beta1) * grad moment_out = beta1 * moment + (1 - beta1) * grad
inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad)) inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad))
beta1_pow_out = beta1_pow * beta1 lr_t = (lr / (1 - beta1_pow))
lr_t = (lr / (1 - beta1_pow_out))
param_out = param - lr_t * np.divide(moment_out, inf_norm_out) param_out = param - lr_t * np.divide(moment_out, inf_norm_out)
return param_out, moment_out, inf_norm_out, beta1_pow_out return param_out, moment_out, inf_norm_out
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest
from paddle.v2.framework.layers import mul, data
import paddle.v2.framework.core as core
from paddle.v2.framework.executor import Executor
from paddle.v2.framework.framework import g_program
import numpy
class TestExecutor(unittest.TestCase):
def test_mul(self):
a = data(name='a', shape=[784], data_type='float32')
b = data(
name='b',
shape=[784, 100],
data_type='float32',
append_batch_size=False)
out = mul(x=a, y=b)
place = core.CPUPlace()
a_np = numpy.random.random((100, 784)).astype('float32')
tensor_a = core.LoDTensor()
tensor_a.set(a_np, place)
b_np = numpy.random.random((784, 100)).astype('float32')
tensor_b = core.LoDTensor()
tensor_b.set(b_np, place)
exe = Executor(place)
outs = exe.run(g_program,
feed={'a': tensor_a,
'b': tensor_b},
fetch_list=[out])
out = numpy.array(outs[0])
self.assertEqual((100, 100), out.shape)
self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np)))
if __name__ == '__main__':
unittest.main()
import paddle.v2.framework.layers as layers import paddle.v2.framework.layers as layers
import paddle.v2.framework.nets as nets
from paddle.v2.framework.framework import Program, g_program from paddle.v2.framework.framework import Program, g_program
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import unittest import unittest
...@@ -18,7 +19,7 @@ class TestBook(unittest.TestCase): ...@@ -18,7 +19,7 @@ class TestBook(unittest.TestCase):
avg_cost = layers.mean(x=cost, program=program) avg_cost = layers.mean(x=cost, program=program)
self.assertIsNotNone(avg_cost) self.assertIsNotNone(avg_cost)
program.append_backward(avg_cost, set()) program.append_backward(avg_cost)
print str(program) print str(program)
def test_recognize_digits_mlp(self): def test_recognize_digits_mlp(self):
...@@ -38,24 +39,52 @@ class TestBook(unittest.TestCase): ...@@ -38,24 +39,52 @@ class TestBook(unittest.TestCase):
cost = layers.cross_entropy(input=predict, label=label, program=program) cost = layers.cross_entropy(input=predict, label=label, program=program)
avg_cost = layers.mean(x=cost, program=program) avg_cost = layers.mean(x=cost, program=program)
self.assertIsNotNone(avg_cost) self.assertIsNotNone(avg_cost)
# print str(program) print str(program)
def test_simple_conv2d(self): def test_simple_conv2d(self):
pd = core.ProgramDesc.__create_program_desc__() program = Program()
program = Program(desc=pd) images = layers.data(
images = data_layer(
name='pixel', shape=[3, 48, 48], data_type='int32', program=program) name='pixel', shape=[3, 48, 48], data_type='int32', program=program)
conv2d_layer( layers.conv2d(
input=images, num_filters=3, filter_size=[4, 4], program=program) input=images, num_filters=3, filter_size=[4, 4], program=program)
# print str(program) print str(program)
def test_simple_conv2d(self): def test_recognize_digits_conv(self):
program = Program() program = Program()
images = layers.data( images = layers.data(
name='pixel', shape=[3, 48, 48], data_type='int32', program=program) name='pixel',
layers.conv2d( shape=[1, 28, 28],
input=images, num_filters=3, filter_size=[4, 4], program=program) data_type='float32',
program=program)
label = layers.data(
name='label', shape=[1], data_type='int32', program=program)
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
num_filters=2,
pool_size=2,
pool_stride=2,
act="relu",
program=program)
conv_pool_2 = nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=4,
pool_size=2,
pool_stride=2,
act="relu",
program=program)
predict = layers.fc(input=conv_pool_2,
size=10,
act="softmax",
program=program)
cost = layers.cross_entropy(input=predict, label=label, program=program)
avg_cost = layers.mean(x=cost, program=program)
program.append_backward(avg_cost)
print str(program) print str(program)
......
...@@ -34,8 +34,26 @@ class TestProgram(unittest.TestCase): ...@@ -34,8 +34,26 @@ class TestProgram(unittest.TestCase):
self.assertEqual(1, b.idx) self.assertEqual(1, b.idx)
self.assertEqual(0, b.parent_idx) self.assertEqual(0, b.parent_idx)
def test_program_clone(self):
prog = Program()
x = prog.global_block().create_var(
name='X', shape=[1000, 784], dtype='float32')
y = prog.global_block().create_var(
name='Y', shape=[784, 100], dtype='float32')
out = prog.global_block().create_var(name='Out', dtype='float32')
prog.global_block().append_op(
type="mul", inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
# FIXME(yuyang18): We manual compare the output string, since the order
# of variable could be changed.
print prog
print prog.clone()
def test_append_backward(self): def test_append_backward(self):
prog = Program.instance() prog = Program()
block = prog.global_block() block = prog.global_block()
mul_x = block.create_var( mul_x = block.create_var(
......
...@@ -8,29 +8,30 @@ class TestSelectedRows(unittest.TestCase): ...@@ -8,29 +8,30 @@ class TestSelectedRows(unittest.TestCase):
place = core.CPUPlace() place = core.CPUPlace()
height = 10 height = 10
rows = [0, 4, 7] rows = [0, 4, 7]
row_numel = 10 row_numel = 12
selcted_rows = core.SelectedRows(rows, row_numel) selected_rows = core.SelectedRows(rows, height)
np_array = np.ones((len(rows), height)).astype("float32") np_array = np.ones((len(rows), row_numel)).astype("float32")
np_array[0, 0] = 2.0 np_array[0, 0] = 2.0
np_array[2, 8] = 4.0 np_array[2, 8] = 4.0
tensor = selcted_rows.get_tensor() tensor = selected_rows.get_tensor()
tensor.set(np_array, place) tensor.set(np_array, place)
# compare rows # compare rows
self.assertEqual(0, selcted_rows.rows()[0]) self.assertEqual(0, selected_rows.rows()[0])
self.assertEqual(4, selcted_rows.rows()[1]) self.assertEqual(4, selected_rows.rows()[1])
self.assertEqual(7, selcted_rows.rows()[2]) self.assertEqual(7, selected_rows.rows()[2])
# compare height # compare height
self.assertEqual(10, selcted_rows.height()) self.assertEqual(10, selected_rows.height())
# compare tensor # compare tensor
self.assertAlmostEqual(2.0, self.assertAlmostEqual(2.0,
selcted_rows.get_tensor().get_float_element(0)) selected_rows.get_tensor().get_float_element(0))
self.assertAlmostEqual(1.0, self.assertAlmostEqual(1.0,
selcted_rows.get_tensor().get_float_element(1)) selected_rows.get_tensor().get_float_element(1))
self.assertAlmostEqual( self.assertAlmostEqual(
4.0, selcted_rows.get_tensor().get_float_element(2 * row_numel + 8)) 4.0,
selected_rows.get_tensor().get_float_element(2 * row_numel + 8))
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
import numpy as np import numpy as np
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
from op_test import OpTest from op_test import OpTest
...@@ -17,5 +19,70 @@ class TestSGDOp(OpTest): ...@@ -17,5 +19,70 @@ class TestSGDOp(OpTest):
self.check_output() self.check_output()
class TestSparseSGDOp(unittest.TestCase):
def check_with_place(self, place):
scope = core.Scope()
# create and initialize Grad Variable
height = 10
rows = [0, 4, 7]
row_numel = 12
grad_selected_rows = scope.var('Grad').get_selected_rows()
grad_selected_rows.set_height(height)
grad_selected_rows.set_rows(rows)
np_array = np.ones((len(rows), row_numel)).astype("float32")
np_array[0, 0] = 2.0
np_array[2, 8] = 4.0
grad_tensor = grad_selected_rows.get_tensor()
grad_tensor.set(np_array, place)
# create and initialize Param Variable
param = scope.var('Param').get_tensor()
param_array = np.full((height, row_numel), 5.0).astype("float32")
param.set(param_array, place)
# create and initialize LeraningRate Variable
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), 2.0).astype("float32")
lr.set(lr_array, place)
# create and run sgd operator
sgd_op = Operator(
"sgd",
Param='Param',
Grad='Grad',
ParamOut='Param',
LearningRate='LearningRate')
ctx = core.DeviceContext.create(place)
sgd_op.run(scope, ctx)
# get and compare result
result_array = np.array(param)
# rows[0] = 0, 5.0 - 2.0 * 2.0
self.assertAlmostEqual(1.0, result_array[rows[0], 0])
# rows[0] = 0, 5.0 - 2.0 * 1.0
self.assertAlmostEqual(3.0, result_array[rows[0], 2])
# 5.0 - 2.0 * 0.0
self.assertAlmostEqual(5.0, result_array[1, 0])
# rows[1] = 4, 5.0 - 2.0 * 1.0
self.assertAlmostEqual(3.0, result_array[rows[1], 10])
# 5.0 - 2.0 * 0.0
self.assertAlmostEqual(5.0, result_array[5, 8])
# rows[2] = 7, 5.0 - 2.0 * 1.0
self.assertAlmostEqual(3.0, result_array[rows[2], 1])
# rows[2] = 7, 5.0 - 2.0 * 4.0
self.assertAlmostEqual(-3.0, result_array[rows[2], 8])
def test_sparse_sgd(self):
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
self.check_with_place(place)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册