提交 c158bf6e 编写于 作者: 叶剑武

Merge branch 'norm_benchmark_half' into 'master'

add fp16 test case for mvnorm and lpnorm

See merge request !1225
......@@ -121,6 +121,63 @@ TEST_F(LpNormOpTest, SimpleTestSquareOpenCL2) {
0.680451, 0.732793, 0.683941, 0.729537});
}
namespace {
template <DeviceType D, typename T>
void TestLpNormRandom(const std::vector<index_t> &input_shape,
const int p,
const int axis) {
// Construct graph
OpsTestNet net;
// Add input data
net.AddRandomInput<D, float>("Input", input_shape);
net.TransformDataFormat<DeviceType::CPU, float>(
"Input", DataFormat::NHWC, "InputNCHW", DataFormat::NCHW);
OpDefBuilder("LpNorm", "LpNormTest")
.Input("InputNCHW")
.Output("OutputNCHW")
.AddIntArg("p", p)
.AddIntArg("axis", axis)
.Finalize(net.NewOperatorDef());
// run on cpu
net.RunOp();
net.TransformDataFormat<DeviceType::CPU, float>(
"OutputNCHW", DataFormat::NCHW, "Output", DataFormat::NHWC);
auto expected = net.CreateTensor<float>();
expected->Copy(*net.GetOutput("Output"));
OpDefBuilder("LpNorm", "LpNormTest")
.Input("Input")
.Output("Output")
.AddIntArg("p", p)
.AddIntArg("axis", axis)
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
net.RunOp(D);
if (DataTypeToEnum<T>::value == DT_HALF) {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-2, 1e-3);
} else {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
}
} // namespace
TEST_F(LpNormOpTest, SimpleTestSquareHalfOpenCL) {
TestLpNormRandom<DeviceType::GPU, half>({1, 8, 1, 2}, 2, 1);
}
TEST_F(LpNormOpTest, SimpleTestSquareHalfOpenCL2) {
TestLpNormRandom<DeviceType::GPU, half>({1, 8, 1, 2}, 2, 2);
}
} // namespace test
} // namespace ops
} // namespace mace
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "mace/core/types.h"
#include "mace/ops/ops_test_util.h"
namespace mace {
......@@ -161,6 +162,61 @@ TEST_F(MVNormOpTest, SimpleTestVarianceOpenCL) {
1.23241, 1.45648, 1.68056, 1.90463, 2.1287});
}
namespace {
template <DeviceType D, typename T>
void TestMVNormRandom(const std::vector<index_t> &input_shape,
bool normalize_variance,
bool across_channels) {
// Construct graph
OpsTestNet net;
// Add input data
net.AddRandomInput<D, float>("Input", input_shape);
net.TransformDataFormat<DeviceType::CPU, float>(
"Input", DataFormat::NHWC, "InputNCHW", DataFormat::NCHW);
OpDefBuilder("MVNorm", "MVNormTest")
.Input("InputNCHW")
.Output("OutputNCHW")
.AddIntArg("normalize_variance", normalize_variance)
.AddIntArg("across_channels", across_channels)
.Finalize(net.NewOperatorDef());
// run on cpu
net.RunOp();
net.TransformDataFormat<DeviceType::CPU, float>(
"OutputNCHW", DataFormat::NCHW, "Output", DataFormat::NHWC);
auto expected = net.CreateTensor<float>();
expected->Copy(*net.GetOutput("Output"));
OpDefBuilder("MVNorm", "MVNormTest")
.Input("Input")
.Output("Output")
.AddIntArg("normalize_variance", normalize_variance)
.AddIntArg("across_channels", across_channels)
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
.Finalize(net.NewOperatorDef());
net.RunOp(D);
if (DataTypeToEnum<T>::value == DT_HALF) {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-2, 1e-3);
} else {
ExpectTensorNear<float>(*expected, *net.GetOutput("Output"), 1e-5);
}
}
} // namespace
TEST_F(MVNormOpTest, SimpleTestMeanHalfOpenCL) {
TestMVNormRandom<DeviceType::GPU, half>({1, 1, 5, 12}, false, true);
}
TEST_F(MVNormOpTest, SimpleTestVarianceHalfOpenCL) {
TestMVNormRandom<DeviceType::GPU, half>({1, 1, 5, 12}, true, true);
}
} // namespace test
} // namespace ops
} // namespace mace
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册