未验证 提交 c8e26fea 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] add custom-op UTs 0/N (#44328)

* add custom-op UTs 0

* add authors
Co-authored-by: NAllen Guo <alleng@graphcore.ai>
Co-authored-by: NZhixin Yao <zhixiny@graphcore.ai>
Co-authored-by: NZhaorui Chen <zhaoruic@graphcore.ai>
Co-authored-by: NZhixin Yao <zhixiny@graphcore.ai>
Co-authored-by: NZhaorui Chen <zhaoruic@graphcore.ai>
上级 9181a99b
......@@ -4,7 +4,6 @@ if(WITH_IPU)
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
# set all UTs timeout to 200s
......@@ -15,4 +14,7 @@ if(WITH_IPU)
set_tests_properties(test_elemetwise_x_op_ipu PROPERTIES TIMEOUT 300)
set_tests_properties(test_reduce_x_op_ipu PROPERTIES TIMEOUT 600)
set_tests_properties(test_save_load_ipu PROPERTIES TIMEOUT 600)
add_subdirectory(custom_ops)
endif()
if(WITH_IPU)
file(
GLOB CUSTOM_OP_TESTS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"test_*.py")
string(REPLACE ".py" "" CUSTOM_OP_TESTS "${CUSTOM_OP_TESTS}")
foreach(CUSTOM_OP_TEST ${CUSTOM_OP_TESTS})
py_test(${CUSTOM_OP_TEST} SRCS ${CUSTOM_OP_TEST}.py)
endforeach()
add_subdirectory(deprecated)
endif()
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
namespace {
std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> x_shape) {
return {x_shape};
}
std::vector<paddle::DataType> InferDtype(paddle::DataType x_dtype) {
return {x_dtype};
}
std::vector<paddle::Tensor> OpForward(const paddle::Tensor &x) { return {x}; }
std::vector<paddle::Tensor> OpBackward(const paddle::Tensor &x) { return {x}; }
} // namespace
// https://github.com/graphcore/popart/blob/sdk-release-2.5/willow/src/builder_impl.cpp#L1458
// only support one input
PD_BUILD_OP(checkpointoutput)
.Inputs({"X"})
.Outputs({"Out"})
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(InferDtype))
.SetKernelFn(PD_KERNEL(OpForward));
PD_BUILD_GRAD_OP(checkpointoutput)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(OpBackward));
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
namespace {
std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> x_shape) {
return {x_shape};
}
std::vector<paddle::DataType> InferDtype(paddle::DataType x_dtype) {
return {x_dtype};
}
std::vector<paddle::Tensor> OpForward(const paddle::Tensor &x) { return {x}; }
std::vector<paddle::Tensor> OpBackward(const paddle::Tensor &x) { return {x}; }
} // namespace
// https://github.com/graphcore/popart/blob/sdk-release-2.5/willow/src/builder.cpp#L502
PD_BUILD_OP(custom_detach)
.Inputs({"X"})
.Outputs({"Out"})
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(InferDtype))
.SetKernelFn(PD_KERNEL(OpForward));
PD_BUILD_GRAD_OP(custom_detach)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(OpBackward));
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
namespace {
std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> x_shape) {
return {x_shape};
}
std::vector<paddle::DataType> InferDtype(paddle::DataType x_dtype) {
return {x_dtype};
}
std::vector<paddle::Tensor> OpForward(const paddle::Tensor &x) { return {x}; }
std::vector<paddle::Tensor> OpBackward(const paddle::Tensor &x) { return {x}; }
} // namespace
// https://github.com/graphcore/popart/blob/sdk-release-2.5/willow/src/builder.gen.cpp#L620
PD_BUILD_OP(custom_identity)
.Inputs({"X"})
.Outputs({"Out"})
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(InferDtype))
.SetKernelFn(PD_KERNEL(OpForward));
PD_BUILD_GRAD_OP(custom_identity)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(OpBackward));
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
namespace {
std::vector<std::vector<int64_t>> InferShape(
std::vector<int64_t> x_shape,
std::vector<int64_t> y_shape,
const std::string &reduction,
const int &ignoreIndex,
const bool &inputIsLogProbability) {
// reduction type: Sum, Mean, None
if (reduction == "None") {
return {y_shape};
} else {
return {{1}};
}
}
std::vector<paddle::DataType> InferDtype(paddle::DataType x_dtype,
paddle::DataType y_dtype) {
return {x_dtype};
}
std::vector<paddle::Tensor> OpForward(const paddle::Tensor &x,
const paddle::Tensor &y) {
return {x};
}
std::vector<paddle::Tensor> OpBackward(const paddle::Tensor &x) { return {x}; }
} // namespace
// https://github.com/graphcore/popart/blob/sdk-release-2.5/willow/src/builder.cpp#L775
// type of `reduction` is std::string
// `ignoreIndex` is optional, if no need, need to remove it manually(which is a
// new custom op in paddle)
PD_BUILD_OP(custom_nll)
.Inputs({"X", "Y"})
.Outputs({"Out"})
.Attrs({"reduction: std::string",
"ignoreIndex: int",
"inputIsLogProbability: bool"})
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(InferDtype))
.SetKernelFn(PD_KERNEL(OpForward));
PD_BUILD_GRAD_OP(custom_nll)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(OpBackward));
if(WITH_IPU)
py_test(test_custom_nllloss_ipu SRCS test_custom_nllloss_ipu.py)
endif()
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
std::vector<paddle::Tensor> Kernel_Function() { return {}; }
std::vector<paddle::Tensor> Kernel_Function_Grad() { return {}; }
// nllloss
std::vector<std::vector<int64_t>> InferShape_NllLoss(
std::vector<int64_t> x_shape,
std::vector<int64_t> y_shape,
const int& reduction,
const std::string& ignoreIndex,
const bool& inputIsLogProbability) {
// 0: sum, 1: mean, 2: none
if (reduction == 2) {
return {y_shape};
} else {
return {{1}};
}
}
std::vector<paddle::DataType> InferDtype_NllLoss(paddle::DataType x_dtype,
paddle::DataType y_dtype) {
return {x_dtype};
}
PD_BUILD_OP(custom_nll_loss)
.Inputs({"X", "Y"})
.Outputs({"Out"})
.Attrs({"reduction: int",
"ignoreIndex: std::string",
"inputIsLogProbability: bool"})
.SetKernelFn(PD_KERNEL(Kernel_Function))
.SetInferShapeFn(PD_INFER_SHAPE(InferShape_NllLoss))
.SetInferDtypeFn(PD_INFER_DTYPE(InferDtype_NllLoss));
PD_BUILD_GRAD_OP(custom_nll_loss)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(Kernel_Function_Grad));
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sys
import numpy as np
import paddle
import paddle.static
from paddle.utils.cpp_extension import load
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from op_test_ipu import IPUOpTest
def load_custom_ops():
cur_dir = os.path.dirname(os.path.realpath(__file__))
custom_ops = load(name="custom_nll_loss",
sources=[f"{cur_dir}/custom_nllloss.cc"],
extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'],
extra_ldflags=['-lpopfloat'])
return custom_ops
class TestBase(IPUOpTest):
def setUp(self):
self.load_custom_ops()
self.set_atol()
self.set_test_op()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return False
def load_custom_ops(self):
self.custom_ops = load_custom_ops()
def set_data_feed(self):
x = np.random.rand(16, 20, 256).astype('float32')
label = np.random.uniform(0, 256, size=[16, 20]).astype('int32')
self.feed_fp32 = {
'x': x,
'label': label,
}
def set_test_op(self):
self.op = self.custom_ops.custom_nll_loss
self.op_attrs = {
"reduction": 0,
"ignoreindex": "0",
"inputislogprobability": False,
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
label = paddle.static.data(name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
out = self.op(x, label, **self.op_attrs)
out = paddle.mean(out)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
self.build_model()
# only test IPU_FP32
self.run_model(IPUOpTest.ExecutionMode.IPU_FP32)
print(self.output_dict)
class TestCase1(TestBase):
def set_test_op(self):
self.op = self.custom_ops.custom_nll_loss
self.op_attrs = {
"reduction": 0,
"ignoreindex": "None",
"inputislogprobability": False,
}
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sys
import numpy as np
import paddle
import paddle.static
from paddle.utils.cpp_extension import load
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from op_test_ipu import IPUOpTest
def load_custom_ops():
cur_dir = os.path.dirname(os.path.realpath(__file__))
custom_ops = load(name="checkpointoutput",
sources=[
f"{cur_dir}/custom_checkpointoutput.cc",
],
extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'])
return custom_ops
class TestCheckpointoutput(IPUOpTest):
def setUp(self):
self.load_custom_ops()
self.set_atol()
self.set_test_op()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return False
def load_custom_ops(self):
self.custom_ops = load_custom_ops()
def set_test_op(self):
self.op = self.custom_ops.checkpointoutput
self.op_attrs = {}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x = paddle.add(x, x)
x = self.op(x, **self.op_attrs)
x = paddle.mean(x)
self.fetch_list = [x.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
self.build_model()
# only test IPU_FP32
self.run_model(IPUOpTest.ExecutionMode.IPU_FP32)
print(self.output_dict)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sys
import numpy as np
import paddle
import paddle.static
from paddle.utils.cpp_extension import load
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from op_test_ipu import IPUOpTest
# just load one custom-op for the data race issue under parallel mode
def load_custom_detach():
cur_dir = os.path.dirname(os.path.realpath(__file__))
custom_ops = load(name=f"custom_detach",
sources=[
f"{cur_dir}/custom_detach.cc",
],
extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'],
extra_ldflags=['-lpopfloat'])
return custom_ops
def load_custom_identity():
cur_dir = os.path.dirname(os.path.realpath(__file__))
custom_ops = load(name=f"custom_identity",
sources=[
f"{cur_dir}/custom_identity.cc",
],
extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'],
extra_ldflags=['-lpopfloat'])
return custom_ops
def load_custom_nll():
cur_dir = os.path.dirname(os.path.realpath(__file__))
custom_ops = load(name=f"custom_nll",
sources=[
f"{cur_dir}/custom_nll.cc",
],
extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'],
extra_ldflags=['-lpopfloat'])
return custom_ops
def build_ipu_strategy():
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.add_custom_op(paddle_op="custom_detach",
popart_op="Detach",
domain="ai.graphcore",
version=1)
ipu_strategy.add_custom_op(paddle_op="custom_identity",
popart_op="Identity",
domain="ai.onnx",
version=11)
ipu_strategy.add_custom_op(paddle_op="custom_nll",
popart_op="Nll",
domain="ai.graphcore",
version=1)
return ipu_strategy
class TestBase(IPUOpTest):
def setUp(self):
self.load_custom_ops()
self.set_atol()
self.set_test_op()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return False
def load_custom_ops(self):
self.custom_ops = load_custom_detach()
def set_test_op(self):
self.op = self.custom_ops.custom_detach
self.op_attrs = {}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = self.op(x, **self.op_attrs)
out = paddle.mean(out)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
ipu_strategy = build_ipu_strategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
self.run_op_test(exec_mode, ipu_strategy=ipu_strategy)
def test(self):
self.build_model()
# only test IPU_FP32
self.run_model(IPUOpTest.ExecutionMode.IPU_FP32)
print(self.output_dict)
class TestIdentity(TestBase):
def load_custom_ops(self):
self.custom_ops = load_custom_identity()
def set_test_op(self):
self.op = self.custom_ops.custom_identity
self.op_attrs = {}
class TestNll(TestBase):
def load_custom_ops(self):
self.custom_ops = load_custom_nll()
def set_data_feed(self):
x = np.random.rand(16, 20, 256).astype('float32')
label = np.random.uniform(0, 256, size=[16, 20]).astype('int32')
self.feed_fp32 = {
'x': x,
'label': label,
}
def set_test_op(self):
self.op = self.custom_ops.custom_nll
self.op_attrs = {
"reduction": "Sum",
"ignoreindex": 0,
"inputislogprobability": False,
}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
label = paddle.static.data(name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
out = self.op(x, label, **self.op_attrs)
out = paddle.mean(out)
self.fetch_list = [out.name]
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册