提交 a66e0e35 编写于 作者: L liuqi

Finish gcn converter and validation.

上级 8d834826
......@@ -129,15 +129,21 @@ int main(int argc, char **argv) {
// save output
const Tensor *output = ws.GetTensor(output_node + ":0");
Tensor::MappingGuard output_guard(output);
ofstream out_file(output_file, ios::binary);
out_file.write((const char *)(output->data<float>()),
output->size() * sizeof(float));
out_file.flush();
out_file.close();
VLOG(0) << "Output shape: ["
<< output->dim(0) << ", "
<< output->dim(1) << ", "
<< output->dim(2) << ", "
<< output->dim(3) << "]";
std::remove(output_file.c_str());
if (output != nullptr) {
Tensor::MappingGuard output_guard(output);
ofstream out_file(output_file, ios::binary);
out_file.write((const char *)(output->data<float>()),
output->size() * sizeof(float));
out_file.flush();
out_file.close();
stringstream ss;
ss << "Output shape: [";
for (int i = 0; i < output->dim_size(); ++i) {
ss << output->dim(i) << ", ";
}
ss << "]";
VLOG(0) << ss.str();
}
}
\ No newline at end of file
......@@ -408,3 +408,81 @@ TEST_F(FusedConv2dOpTest, OPENCLHalfAlignedConvNxNS12) {
TestHalfComplexConvNxNS12<DeviceType::OPENCL>({32, 32, 32, 64});
}
template<DeviceType D, typename T>
static void TestGeneralConvNxNS12(const std::vector<index_t> &image_shape,
const std::vector<index_t> &filter_shape) {
testing::internal::LogToStderr();
auto func = [&](int stride_h, int stride_w, Padding type) {
srand(time(NULL));
// generate random input
index_t batch = 1;
index_t height = image_shape[0];
index_t width = image_shape[1];
index_t input_channels = filter_shape[2];
index_t output_channels = filter_shape[3];
index_t kernel_h = filter_shape[0];
index_t kernel_w = filter_shape[1];
// Construct graph
OpsTestNet net;
OpDefBuilder("FusedConv2D", "FusedConv2dTest")
.Input("Input")
.Input("Filter")
.Input("Bias")
.Output("Output")
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
// Add input data
net.AddRandomInput<D, T>("Input", {batch, height, width, input_channels});
net.AddRandomInput<D, T>(
"Filter", {kernel_h, kernel_w, input_channels, output_channels});
net.AddRandomInput<D, T>("Bias", {output_channels});
// run on cpu
net.RunOp();
// Check
Tensor expected;
expected.Copy(*net.GetOutput("Output"));
// run on gpu
BufferToImage<D, T>(net, "Input", "InputImage", kernels::BufferType::IN_OUT);
BufferToImage<D, T>(net, "Filter", "FilterImage", kernels::BufferType::FILTER);
BufferToImage<D, T>(net, "Bias", "BiasImage", kernels::BufferType::ARGUMENT);
OpDefBuilder("FusedConv2D", "FusedConv2dTest")
.Input("InputImage")
.Input("FilterImage")
.Input("BiasImage")
.Output("OutputImage")
.AddIntsArg("strides", {stride_h, stride_w})
.AddIntArg("padding", type)
.AddIntsArg("dilations", {1, 1})
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
// Run on device
net.RunOp(D);
ImageToBuffer<D, T>(net, "OutputImage", "OPENCLOutput", kernels::BufferType::IN_OUT);
ExpectTensorNear<float>(expected, *net.GetOutput("OPENCLOutput"), 0.001);
};
for (int stride : {1, 2}) {
func(stride, stride, VALID);
func(stride, stride, SAME);
}
}
TEST_F(FusedConv2dOpTest, OPENCL7X7ConvNxNS12) {
TestGeneralConvNxNS12<DeviceType::OPENCL, float>({32, 32},
{7, 7, 3, 64});
}
TEST_F(FusedConv2dOpTest, OPENCL15X1ConvNxNS12) {
TestGeneralConvNxNS12<DeviceType::OPENCL, float>({40, 40},
{15, 1, 32, 64});
}
......@@ -24,7 +24,7 @@ def main(unused_args):
input_graph_def, FLAGS.input_node, FLAGS.output_node, FLAGS.prequantize)
else:
output_graph_def = tf_converter_lib.convert_to_mace_pb(
input_graph_def, FLAGS.input_node, FLAGS.output_node, FLAGS.runtime)
input_graph_def, FLAGS.input_node, FLAGS.output_node, FLAGS.data_type, FLAGS.runtime)
with gfile.GFile(FLAGS.output, "wb") as f:
f.write(output_graph_def.SerializeToString())
......@@ -67,6 +67,11 @@ def parse_args():
type=bool,
default=False,
help="e.g., False")
parser.add_argument(
"--data_type",
type=str,
default='DT_FLOAT',
help="e.g., DT_HALF/DT_FLOAT")
return parser.parse_known_args()
......
import argparse
import sys
import os
import os.path
import tensorflow as tf
import numpy as np
......@@ -25,13 +27,23 @@ def generate_data(shape):
print "Generate input file done."
def load_data(file):
return np.fromfile(file=file, dtype=np.float32)
if os.path.isfile(file):
return np.fromfile(file=file, dtype=np.float32)
else:
return np.empty([0])
def valid_output(out_shape, mace_out_file, tf_out_value):
mace_out_value = load_data(mace_out_file)
mace_out_value = mace_out_value.reshape(out_shape)
res = np.allclose(tf_out_value, mace_out_value, rtol=0, atol=1e-5)
print 'Passed! Haha' if res else 'Failed! Oops'
if mace_out_value.size != 0:
mace_out_value = mace_out_value.reshape(out_shape)
np.testing.assert_allclose(tf_out_value, mace_out_value, rtol=0, atol=1e-3)
res = np.allclose(tf_out_value, mace_out_value, rtol=0, atol=1e-3)
if res:
print '=======================Passed! Haha======================'
else:
print '=======================Failed! Oops======================'
else:
print '=======================Skip empty node==================='
def run_model(input_shape):
......@@ -55,6 +67,7 @@ def run_model(input_shape):
input_value = input_value.reshape(input_shape)
output_value = session.run(output_node, feed_dict={input_node: [input_value]})
# output_value.astype(np.float32).tofile( os.path.dirname(FLAGS.input_file) + '/tf_weight')
return output_value
def main(unused_args):
......
#!/bin/bash
# Must run at root dir of mace project.
set -e
Usage() {
echo 'Usage: bash tools/validate_gcn.sh tf_model_file'
......@@ -16,23 +15,26 @@ MODEL_DIR=$(dirname ${TF_MODEL_FILE_PATH})
MACE_MODEL_NAME='mace_model.pb'
INPUT_FILE_NAME='model_input'
OUTPUT_FILE_NAME='gcn.out'
OUTPUT_LIST_FILE='gcn.list'
PHONE_DATA_DIR="/data/local/tmp/${MACE_MODEL_NAME}"
KERNEL_DIR="${PHONE_DATA_DIR}/cl/"
# Step 1: convert tf model to mace model
echo "Step 1: convert tf model to mace model"
# Step 1: Generate input data
echo "Step 1: Generate input data"
python tools/validate.py --generate_data true --random_seed 1 \
--input_file=${MODEL_DIR}/${INPUT_FILE_NAME} \
--input_shape=512,512,3
# Step 2: convert tf model to mace model
echo "Step 2: convert tf model to mace model"
bazel build //mace/python/tools:tf_converter
bazel-bin/mace/python/tools/tf_converter --input=${TF_MODEL_FILE_PATH} \
--output=${MODEL_DIR}/${MACE_MODEL_NAME} \
--input_node=input \
--output_node=GCN/br_result_2/fcn_br \
--data_type=DT_FLOAT \
--runtime=gpu
# Step 2: Generate input data
echo "Step 2: Generate input data"
python tools/validate.py --generate_data true --random_seed 1 \
--input_file=${MODEL_DIR}/${INPUT_FILE_NAME} \
--input_shape=512,512,3
# Step 3: Run model on the phone
echo "Step 3: Run model on the phone"
......@@ -50,28 +52,29 @@ adb push bazel-bin/mace/examples/mace_run ${PHONE_DATA_DIR}
num_threads=${1:-1}
adb shell MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \
MACE_KERNEL_PATH=$KERNEL_DIR \
OMP_NUM_THREADS=$num_threads \
${PHONE_DATA_DIR}/mace_run \
--model=${PHONE_DATA_DIR}/${MACE_MODEL_NAME} \
--input=mace_input_node \
--output=mace_output_node \
--input_shape=1,512,512,3\
--input_file=${PHONE_DATA_DIR}/${MACE_INPUT_FILE_NAME} \
--output_file=${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} \
--device=OPENCL
adb </dev/null shell MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \
MACE_KERNEL_PATH=$KERNEL_DIR \
OMP_NUM_THREADS=$num_threads \
${PHONE_DATA_DIR}/mace_run \
--model=${PHONE_DATA_DIR}/${MACE_MODEL_NAME} \
--input=mace_input_node \
--output=mace_output_node \
--input_shape=1,512,512,3\
--input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} \
--output_file=${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} \
--device=OPENCL
# Step 4: pull the mace run result.
echo "Step 4: pull the mace run result."
adb pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_DIR}
rm -rf ${MODEL_DIR}/${OUTPUT_FILE_NAME}
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_DIR}
# Step 5: validate the result
echo "Step 5: validate the result"
python tools/validate.py --model_file ${TF_MODEL_FILE_PATH} \
--input_file ${MODEL_DIR}/${INPUT_FILE_NAME} \
--mace_out_file ${MODEL_DIR}/${OUTPUT_FILE_NAME} \
--input_node input \
--output_node GCN/br_result_2/fcn_br
--input_file ${MODEL_DIR}/${INPUT_FILE_NAME} \
--mace_out_file ${MODEL_DIR}/${OUTPUT_FILE_NAME} \
--input_node input \
--output_node GCN/br_result_2/fcn_br\
--output_shape 1,512,512,2
import argparse
import sys
import tensorflow as tf
import numpy as np
from tensorflow import gfile
# Validation Flow:
# 1. Generate input data
# python validate_icnet.py --generate_data 1 \
# --random_seed 1
# 2. Use mace_run to run icnet on phone.
# 3. adb pull the result.
# 4. Compare output data of mace and tf
# python validate_icnet.py --model_file opt_icnet.pb \
# --tf_input_file input_file \
# --mace_out_file icnet.out
def generate_data(shape):
np.random.seed(FLAGS.random_seed)
data = np.random.random(shape)
print FLAGS.tf_input_file
data.astype(np.float32).tofile(FLAGS.tf_input_file)
mace_data = np.transpose(data, axes=(2, 0, 1))
mace_data.astype(np.float32).tofile(FLAGS.mace_input_file)
print "Generate input file done."
def load_data(file):
return np.fromfile(file=file, dtype=np.float32)
def valid_output(out_shape, mace_out_file, tf_out_value):
mace_out_value = load_data(mace_out_file)
mace_out_value = mace_out_value.reshape(out_shape)
tf_out_data_t = np.transpose(tf_out_value, axes=(0, 3, 1, 2))
res = np.allclose(mace_out_value, tf_out_data_t, rtol=0, atol=1e-5)
print 'Passed! Haha' if res else 'Failed! Oops'
def run_model(input_shape):
if not gfile.Exists(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!")
return -1
input_graph_def = tf.GraphDef()
with gfile.Open(FLAGS.model_file, "rb") as f:
data = f.read()
input_graph_def.ParseFromString(data)
tf.import_graph_def(input_graph_def, name="")
with tf.Session() as session:
with session.graph.as_default() as graph:
tf.import_graph_def(input_graph_def, name="")
input_node = graph.get_tensor_by_name('input_node:0')
output_node = graph.get_tensor_by_name('output_node:0')
input_value = load_data(FLAGS.tf_input_file)
input_value = input_value.reshape(input_shape)
output_value = session.run(output_node, feed_dict={input_node: [input_value]})
return output_value
def main(unused_args):
input_shape = [int(x) for x in FLAGS.input_shape.split(',')]
output_shape = [int(x) for x in FLAGS.output_shape.split(',')]
if FLAGS.generate_data:
generate_data(input_shape)
else:
output_value = run_model(input_shape)
valid_output(output_shape, FLAGS.mace_out_file, output_value)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_file",
type=str,
default="",
help="TensorFlow \'GraphDef\' file to load.")
parser.add_argument(
"--tf_input_file",
type=str,
default="",
help="tensorflow input data to load.")
parser.add_argument(
"--mace_input_file",
type=str,
default="",
help="mace input data to load.")
parser.add_argument(
"--mace_out_file",
type=str,
default="",
help="mace output file to load.")
parser.add_argument(
"--input_shape",
type=str,
default="480,480,3",
help="input shape.")
parser.add_argument(
"--output_shape",
type=str,
default="1,2,480,480",
help="output shape.")
parser.add_argument(
"--generate_data",
type='bool',
default="false",
help="Random seed for generate test case.")
parser.add_argument(
"--random_seed",
type=int,
default="0",
help="Random seed for generate test case.")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册