提交 0c61d9e3 编写于 作者: W wangliu

refine unit test

上级 aaa7bc2b
......@@ -90,14 +90,6 @@ class OpRegistry {
const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap attrs,
std::shared_ptr<paddle_mobile::framework::Scope> scope) {
LOG(paddle_mobile::kLOG_DEBUG1) << " type: " << type;
LOG(paddle_mobile::kLOG_DEBUG1) << " input size: " << inputs.size();
LOG(paddle_mobile::kLOG_DEBUG1) << " output size: " << outputs.size();
LOG(paddle_mobile::kLOG_DEBUG1) << " attr size: " << attrs.size();
LOG(paddle_mobile::kLOG_DEBUG1)
<< " OpInfoMap size: " << OpInfoMap<Dtype>::Instance()->map().size();
LOG(paddle_mobile::kLOG_DEBUG1) << " has type: " << type << " "
<< OpInfoMap<Dtype>::Instance()->Has(type);
auto& info = OpInfoMap<Dtype>::Instance()->Get(type);
auto op = info.Creator()(type, inputs, outputs, attrs, scope);
return std::shared_ptr<OperatorBase<Dtype>>(op);
......
......@@ -45,7 +45,7 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
printf("%s \n", file_name);
FILE *fp;
fp = fopen(file_name, "rb");
PADDLE_MOBILE_ENFORCE(fp != NULL, "open failed !");
PADDLE_MOBILE_ENFORCE(fp != NULL, " %s open failed !", file_name);
fseek(fp, 0, SEEK_END);
size_t size = ftell(fp);
......@@ -210,7 +210,7 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
tensor->Resize(framework::make_ddim(dim));
} else {
auto dim = var_desc->Tensor_desc().Dims();
PADDLE_MOBILE_ENFORCE(dim.size() > 1, "dim size is 0");
PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
dim[0] = 1;
auto tensor = var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim(dim));
......@@ -380,7 +380,8 @@ void Executor<Dtype, P>::InitMemory() {
program_.model_path + "/" + var_desc->Name());
} else {
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
auto tensor = var->template GetMutable<framework::Tensor>();
auto tensor = var->template GetMutable<framework::LoDTensor>();
tensor->template mutable_data<Ptype>();
}
}
......@@ -388,39 +389,12 @@ void Executor<Dtype, P>::InitMemory() {
}
}
template <typename Dtype, Precision P>
std::shared_ptr<framework::Tensor> Executor<Dtype, P>::predict(
framework::Tensor &t) {
// feed
auto scope = program_.scope;
framework::Variable *g_feed_value = scope->Var("pixel");
auto tensor = g_feed_value->GetMutable<framework::Tensor>();
tensor->ShareDataWith(t);
framework::Variable *con_output = scope->Var("conv2d_0.tmp_0");
framework::Tensor *output_tensor =
con_output->GetMutable<framework::Tensor>();
output_tensor->mutable_data<float>({1, 16, 32, 32});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims() <<
// std::endl;
std::shared_ptr<framework::Tensor> out_tensor =
std::make_shared<framework::LoDTensor>();
out_tensor.reset(output_tensor);
predict(t, 0);
return out_tensor;
}
template <typename Dtype, Precision P>
void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
auto feed_tensor = g_feed_value->GetMutable<framework::LoDTensor>();
feed_tensor->Resize(t.dims());
feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
......
......@@ -44,13 +44,13 @@ void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
std::vector<int> paddings = param.Paddings();
std::vector<int> dilations = param.Dilations();
DLOG << " compute end get Attrs " << strides[0];
// DLOG << " compute end get Attrs " << strides[0];
const int batch_size = static_cast<int>(input->dims()[0]);
std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
size_t data_dim = filter_shape_vec.size() - 2;
std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
col_shape_vec[0] = input->dims()[1] / groups;
......@@ -71,8 +71,6 @@ void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
col_matrix.ShareDataWith(col);
col_matrix.Resize(col_matrix_shape);
}
DLOG << " col_shape = " << col_shape;
DLOG << " col_matrix_shape = " << col_matrix_shape;
framework::DDim input_shape = framework::slice_ddim(
input->dims(), 1, static_cast<int>(input->dims().size()));
......@@ -80,8 +78,6 @@ void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
framework::DDim filter_matrix_shape = {filter.dims()[0],
filter.numel() / filter.dims()[0]};
filter.Resize(filter_matrix_shape);
DLOG << " filter.deims() = " << filter.dims();
framework::DDim output_matrix_shape = {
output->dims()[1],
output->numel() / (output->dims()[0] * output->dims()[1])};
......@@ -118,9 +114,6 @@ void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
// gemm
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
DLOG << " out_slice " << out_slice.dims();
DLOG << " filter_slice " << filter_slice.dims();
DLOG << " col_matrix " << col_matrix.dims();
math::matmul<float>(filter_slice, false, col_matrix, false,
static_cast<float>(1), &out_slice,
static_cast<float>(0));
......
......@@ -77,13 +77,13 @@ class Executor4Test : public Executor<DeviceType> {
const DDim &dDim) {
auto scope = this->program_.scope;
Variable *g_feed_value = scope->Var(input);
auto tensor = g_feed_value->GetMutable<Tensor>();
auto tensor = g_feed_value->GetMutable<LoDTensor>();
tensor->ShareDataWith(t);
Variable *con_output = scope->Var(output);
auto *output_tensor = con_output->GetMutable<Tensor>();
auto *output_tensor = con_output->GetMutable<LoDTensor>();
output_tensor->mutable_data<float>(dDim);
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
std::shared_ptr<LoDTensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
std::shared_ptr<paddle_mobile::framework::BlockDesc> to_predict_block =
......
......@@ -19,16 +19,12 @@ limitations under the License. */
int main() {
paddle_mobile::framework::Tensor input;
paddle_mobile::framework::Tensor output;
DLOG << 1;
SetupTensor<float>(&input, {1, 4, 60, 60}, static_cast<float>(0),
static_cast<float>(1));
DLOG << 2;
auto out_ddim = paddle_mobile::framework::make_ddim({1, 4, 60, 60});
output.Resize(out_ddim);
DLOG << 3;
paddle_mobile::operators::sigmoid(&input, &output);
DLOG << 4;
auto *output_ptr = output.data<float>();
for (int j = 0; j < output.numel(); ++j) {
DLOG << " value of output: " << output_ptr[j];
......
......@@ -18,7 +18,7 @@ limitations under the License. */
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("models/mobilenet"));
auto program = loader.Load(std::string("../models/mobilenet"));
if (program.originProgram == nullptr) {
DLOG << "program read file";
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册