提交 d21f279a 编写于 作者: R rensilin 提交者: iCode

Merge changes Iecc2518d,I69ec0b9d,Ie5622d75

* changes:
  loss_function(*output)
  fix
  update_create_programs
......@@ -12,87 +12,140 @@ def print_help(this_name):
"""Print help
"""
dirname = os.path.dirname(this_name)
print("Usage: {} <network building filename> [model_dir]\n".format(this_name))
print(" example: {} {}".format(this_name, os.path.join(dirname, 'example.py')))
print('Usage: {} <network building filename> [model_dir]\n'.format(this_name))
print(' example: {} {}'.format(this_name, os.path.join(dirname, 'example.py')))
class ModelBuilder:
"""
Attributes:
_save_path: Save path of programs
def _inference():
Build inference network(without loss and optimizer)
**This function is declared in the network_desc_path file, and will be set in initialize()**
Returns:
list<Variable>: inputs
and
list<Variable>: outputs
pass
def _loss_function(*outputs):
**This function is declared in the network_desc_path file, and will be set in initialize()**
Args:
*outputs: the second result of inference()
Returns:
Variable: loss
and
list<Variable>: labels
pass
"""
def initialize(self, network_desc_path, save_path=None):
"""compile the network description module
Args:
network_desc_path: path
save_path: model save path, default is ./model/<network_desc_path without .py>/
Returns:
bool: True if succeed else False
"""
if not isinstance(network_desc_path, str):
print('network_desc_path must be str')
return False
if not network_desc_path.endswith('.py'):
print('network_desc_path must be end with .py')
return False
if not os.path.exists(network_desc_path):
print('file not exists:', network_desc_path)
return False
scope = dict()
with open(network_desc_path, 'r') as f:
code = f.read()
compiled = compile(code, network_desc_path, 'exec')
exec(compiled, scope)
if not 'inference' in scope:
print('inference not defined')
return False
if not 'loss_function' in scope:
print('loss_function not defined')
return False
if save_path is None:
# example /a/b/c.d -> ./model/c
save_path = os.path.join('./model', os.path.splitext(os.path.split(network_desc_path)[1])[0])
print('save in the default path:', save_path)
self._save_path = save_path
self._inference = scope['inference']
self._loss_function = scope['loss_function']
return True
def build_and_save(self):
"""Build programs and save to _save_path
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
inputs, outputs = self._inference()
test_program = main_program.clone(for_test=True)
loss, labels = self._loss_function(*outputs)
optimizer = fluid.optimizer.SGD(learning_rate=1.0)
params_grads = optimizer.backward(loss)
if not os.path.exists(self._save_path):
os.makedirs(self._save_path)
programs = {
'startup_program': startup_program,
'main_program': main_program,
'test_program': test_program,
}
for name, program in programs.items():
with open(os.path.join(self._save_path, name), 'w') as f:
f.write(program.desc.serialize_to_string())
model_desc_path = os.path.join(self._save_path, 'model.yaml')
model_desc = {
'inputs': [{"name": var.name, "shape": var.shape} for var in inputs],
'outputs': [{"name": var.name, "shape": var.shape} for var in outputs],
'labels': [{"name": var.name, "shape": var.shape} for var in labels],
'loss': loss.name,
}
with open(model_desc_path, 'w') as f:
yaml.safe_dump(model_desc, f, encoding='utf-8', allow_unicode=True)
def inference_warpper(filename):
"""Build inference network(without loss and optimizer)
Args:
filename: path of file which defined real inference function
Returns:
list<Variable>: inputs
and
list<Variable>: outputs
"""
with open(filename, 'r') as f:
code = f.read()
compiled = compile(code, filename, 'exec')
scope = dict()
exec(compiled, scope)
return scope['inference']()
def main(argv):
"""Create programs
Args:
argv: arg list, length should be 2
"""
if len(argv) < 2 or not os.path.exists(argv[1]):
if len(argv) < 2:
print_help(argv[0])
exit(1)
network_build_file = argv[1]
network_desc_path = argv[1]
if len(argv) > 2:
model_dir = argv[2]
save_path = argv[2]
else:
model_dir = './model'
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
inputs, outputs = inference_warpper(network_build_file)
test_program = main_program.clone(for_test=True)
labels = list()
losses = list()
for output in outputs:
label = fluid.layers.data(name='label_' + output.name, shape=output.shape, dtype='float32')
loss = fluid.layers.square_error_cost(input=output, label=label)
loss = fluid.layers.mean(loss, name='loss_' + output.name)
labels.append(label)
losses.append(loss)
loss_all = fluid.layers.sum(losses)
optimizer = fluid.optimizer.SGD(learning_rate=1.0)
params_grads = optimizer.backward(loss_all)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
programs = {
'startup_program': startup_program,
'main_program': main_program,
'test_program': test_program,
}
for save_path, program in programs.items():
with open(os.path.join(model_dir, save_path), 'w') as f:
f.write(program.desc.serialize_to_string())
model_desc_path = os.path.join(model_dir, 'model.yaml')
model_desc = {
'inputs': [{"name": var.name, "shape": var.shape} for var in inputs],
'outputs': [{"name": var.name, "shape": var.shape, "label_name": label.name, "loss_name": loss.name} for var, label, loss in zip(outputs, labels, losses)],
'loss_all': loss_all.name,
}
with open(model_desc_path, 'w') as f:
yaml.safe_dump(model_desc, f, encoding='utf-8', allow_unicode=True)
save_path = None
builder = ModelBuilder()
if not builder.initialize(network_desc_path, save_path):
print_help(argv[0])
exit(1)
builder.build_and_save()
if __name__ == "__main__":
main(sys.argv)
......@@ -21,13 +21,31 @@ def inference():
cvm_input = fluid.layers.data(name='cvm_input', shape=[4488], dtype='float32')
net = cvm_input
net = fluid.layers.fc(net, 512, act='relu')
net = fluid.layers.fc(net, 256, act='relu')
net = fluid.layers.fc(net, 256, act='relu')
net = fluid.layers.fc(net, 128, act='relu')
net = fluid.layers.fc(net, 128, act='relu')
net = fluid.layers.fc(net, 128, act='relu')
net = fluid.layers.fc(net, 128, act='relu')
net = fluid.layers.fc(net, 512, act='relu', name='fc_1')
net = fluid.layers.fc(net, 256, act='relu', name='fc_2')
net = fluid.layers.fc(net, 256, act='relu', name='fc_3')
net = fluid.layers.fc(net, 128, act='relu', name='fc_4')
net = fluid.layers.fc(net, 128, act='relu', name='fc_5')
net = fluid.layers.fc(net, 128, act='relu', name='fc_6')
net = fluid.layers.fc(net, 128, act='relu', name='fc_7')
ctr_output = fluid.layers.fc(net, 1, act='sigmoid', name='ctr')
return [cvm_input], [ctr_output]
def loss_function(ctr_output):
"""
Args:
*outputs: the second result of inference()
Returns:
Variable: loss
and
list<Variable>: labels
"""
# TODO: calc loss here
label = fluid.layers.data(name='label_ctr', shape=ctr_output.shape, dtype='float32')
loss = fluid.layers.square_error_cost(input=ctr_output, label=label)
loss = fluid.layers.mean(loss, name='loss_ctr')
return loss, [label]
#include <iostream>
#include <fstream>
#include <gtest/gtest.h>
#include <cstdlib>
#include <cmath>
#include "paddle/fluid/train/custom_trainer/feed/executor/executor.h"
#include "paddle/fluid/framework/tensor_util.h"
......@@ -50,6 +52,11 @@ public:
context_ptr = nullptr;
}
float random(float min_x = 0.0, float max_x = 1.0) {
float r = static_cast<float>(rand()) / RAND_MAX;
return min_x + (max_x - min_x) * r;
}
std::shared_ptr<TrainerContext> context_ptr;
};
......@@ -60,38 +67,49 @@ TEST_F(CreateProgramsTest, example_network) {
auto config = YAML::Load(string::format_string("{thread_num: 2, startup_program: %s, main_program: %s}", startup_program_path, main_program_path));
auto model_desc = YAML::LoadFile(model_desc_path);
ASSERT_EQ(0, executor->initialize(config, context_ptr));
std::string input_name = "cvm_input";
std::string loss_name = "loss_ctr";
std::string label_name = "label_ctr";
// loss
ASSERT_TRUE(model_desc["loss"]);
ASSERT_EQ(loss_name, model_desc["loss"].as<std::string>());
// input
ASSERT_TRUE(model_desc["inputs"]);
ASSERT_EQ(1, model_desc["inputs"].size());
ASSERT_TRUE(model_desc["inputs"][0]["name"]);
ASSERT_TRUE(model_desc["inputs"][0]["shape"]);
ASSERT_EQ(input_name, model_desc["inputs"][0]["name"].as<std::string>());
std::vector<int> input_shape = model_desc["inputs"][0]["shape"].as<std::vector<int>>(std::vector<int>());
auto input_shape = model_desc["inputs"][0]["shape"].as<std::vector<int>>(std::vector<int>());
ASSERT_EQ(2, input_shape.size());
ASSERT_EQ(-1, input_shape[0]);
ASSERT_EQ(4488, input_shape[1]);
ASSERT_TRUE(model_desc["loss_all"]);
auto loss_all_name = model_desc["loss_all"].as<std::string>();
// label
ASSERT_TRUE(model_desc["labels"]);
ASSERT_EQ(1, model_desc["labels"].size());
ASSERT_TRUE(model_desc["labels"][0]["name"]);
ASSERT_TRUE(model_desc["labels"][0]["shape"]);
ASSERT_EQ(label_name, model_desc["labels"][0]["name"].as<std::string>());
auto label_shape = model_desc["labels"][0]["shape"].as<std::vector<int>>(std::vector<int>());
ASSERT_EQ(2, label_shape.size());
ASSERT_EQ(-1, label_shape[0]);
ASSERT_EQ(1, label_shape[1]);
ASSERT_TRUE(model_desc["outputs"]);
ASSERT_EQ(1, model_desc["outputs"].size());
ASSERT_TRUE(model_desc["outputs"][0]["name"]);
ASSERT_TRUE(model_desc["outputs"][0]["shape"]);
ASSERT_TRUE(model_desc["outputs"][0]["label_name"]);
ASSERT_TRUE(model_desc["outputs"][0]["loss_name"]);
auto ctr_output_label_name = model_desc["outputs"][0]["label_name"].as<std::string>();
auto ctr_output_loss_name = model_desc["outputs"][0]["loss_name"].as<std::string>();
auto ctr_output_name = model_desc["outputs"][0]["name"].as<std::string>();
std::vector<int> output_shape = model_desc["outputs"][0]["shape"].as<std::vector<int>>(std::vector<int>());
auto output_name = model_desc["outputs"][0]["name"].as<std::string>();
auto output_shape = model_desc["outputs"][0]["shape"].as<std::vector<int>>(std::vector<int>());
ASSERT_EQ(2, output_shape.size());
ASSERT_EQ(-1, output_shape[0]);
ASSERT_EQ(1, output_shape[1]);
auto input_var = executor->mutable_var<::paddle::framework::LoDTensor>(input_name);
auto label_var = executor->mutable_var<::paddle::framework::LoDTensor>(ctr_output_label_name);
auto label_var = executor->mutable_var<::paddle::framework::LoDTensor>(label_name);
ASSERT_NE(nullptr, input_var);
ASSERT_NE(nullptr, label_var);
......@@ -99,28 +117,26 @@ TEST_F(CreateProgramsTest, example_network) {
auto input_data = input_var->mutable_data<float>(context_ptr->cpu_place);
ASSERT_NE(nullptr, input_data);
for (int i = 0; i < input_shape[1]; ++i) {
input_data[i] = 0.1;
input_data[i] = random();
}
label_var->Resize({1, 1});
auto label_data = label_var->mutable_data<float>(context_ptr->cpu_place);
ASSERT_NE(nullptr, label_data);
label_data[0] = 0.5;
label_data[0] = random();
ASSERT_EQ(0, executor->run());
auto loss_var = executor->var<::paddle::framework::LoDTensor>(ctr_output_loss_name);
auto loss_var = executor->var<::paddle::framework::LoDTensor>(loss_name);
auto loss = loss_var.data<float>()[0];
auto loss_all_var = executor->var<::paddle::framework::LoDTensor>(loss_all_name);
auto loss_all = loss_all_var.data<float>()[0];
auto ctr_output_var = executor->var<::paddle::framework::LoDTensor>(ctr_output_name);
auto ctr_output = ctr_output_var.data<float>()[0];
auto output_var = executor->var<::paddle::framework::LoDTensor>(output_name);
auto output = output_var.data<float>()[0];
std::cout << "loss: " << loss << std::endl;
std::cout << "ctr_output: " << ctr_output << std::endl;
ASSERT_NEAR(loss, loss_all, 1e-9);
VLOG(3) << "loss: " << loss << std::endl;
VLOG(3) << "label: " << label_data[0] << std::endl;
VLOG(3) << "output: " << output << std::endl;
ASSERT_NEAR(loss, pow(output - label_data[0], 2), 1e-8);
}
} // namespace feed
......
......@@ -91,7 +91,7 @@ TEST_F(SimpleExecutorTest, run) {
auto config = YAML::Load(string::format_string("{thread_num: 2, startup_program: %s, main_program: %s}", startup_program_path, main_program_path));
ASSERT_EQ(0, executor->initialize(config, context_ptr));
auto x_var = executor->mutable_var<::paddle::framework::LoDTensor>("x");
ASSERT_NE(nullptr, x_var);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册