未验证 提交 9ed5db28 编写于 作者: R ronnywang 提交者: GitHub

[NPU] add batch_norm_op_npu and test (#34056)

* add batch_norm_op_npu and tests

* remove skip.If

* fix bug
上级 3f011d82
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
namespace paddle {
namespace operators {
template <typename T>
class NPUBatchNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &dev_ctx = ctx.template device_context<platform::NPUDeviceContext>();
const float epsilon = ctx.Attr<float>("epsilon");
float momentum = ctx.Attr<float>("momentum");
const bool is_test = ctx.Attr<bool>("is_test");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool trainable_stats = ctx.Attr<bool>("trainable_statistics");
const bool test_mode = is_test && (!trainable_stats);
const std::string data_layout = ctx.Attr<std::string>("data_layout");
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE_EQ(x_dims.size(), 4,
platform::errors::InvalidArgument(
"The input tensor X's dimension must equal to 4. But "
"received X's shape = [%s], X's dimension = [%d].",
x_dims, x_dims.size()));
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
Tensor x_tensor, y_tesnor;
x_tensor.ShareDataWith(*x);
y_tesnor.ShareDataWith(*y);
if (data_layout == "NHWC") {
x_tensor.set_layout(DataLayout::kNHWC);
y_tesnor.set_layout(DataLayout::kNHWC);
}
bool training = !test_mode && !use_global_stats;
if (!training) {
const auto *est_mean = ctx.Input<Tensor>("Mean");
const auto *est_var = ctx.Input<Tensor>("Variance");
framework::Tensor reserve_space1, reserve_space2;
reserve_space1.mutable_data<float>(est_mean->dims(), ctx.GetPlace());
reserve_space2.mutable_data<float>(est_var->dims(), ctx.GetPlace());
const auto &runner = NpuOpRunner(
"BatchNorm", {x_tensor, *scale, *bias, *est_mean, *est_var},
{y_tesnor, reserve_space1, reserve_space2, reserve_space1,
reserve_space2},
{{"epsilon", epsilon},
{"is_training", training},
{"data_format", data_layout}});
auto stream = dev_ctx.stream();
runner.Run(stream);
} else {
// if MomentumTensor is set, use MomentumTensor value, momentum
// is only used in this training branch
if (ctx.HasInput("MomentumTensor")) {
const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor");
Tensor mom_cpu;
TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu);
momentum = mom_cpu.data<float>()[0];
}
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
mean_out->mutable_data<T>(ctx.GetPlace());
variance_out->mutable_data<T>(ctx.GetPlace());
saved_mean->mutable_data<T>(ctx.GetPlace());
saved_variance->mutable_data<T>(ctx.GetPlace());
framework::Tensor mean_tmp, variance_tmp;
mean_tmp.mutable_data<float>(mean_out->dims(), ctx.GetPlace());
variance_tmp.mutable_data<float>(variance_out->dims(), ctx.GetPlace());
const auto &runner = NpuOpRunner(
"BatchNorm", {x_tensor, *scale, *bias},
{y_tesnor, mean_tmp, variance_tmp, *saved_mean, *saved_variance},
{{"epsilon", epsilon},
{"is_training", training},
{"data_format", data_layout}});
auto stream = dev_ctx.stream();
runner.Run(stream);
// Ascend can't output the estimated mean and variance
framework::Tensor this_factor_tensor;
this_factor_tensor.mutable_data<float>(framework::make_ddim({1}),
ctx.GetPlace());
framework::TensorFromVector<float>({static_cast<float>(1. - momentum)},
dev_ctx, &this_factor_tensor);
framework::Tensor momentum_tensor;
momentum_tensor.mutable_data<float>(framework::make_ddim({1}),
ctx.GetPlace());
framework::TensorFromVector<float>({static_cast<float>(momentum)},
dev_ctx, &momentum_tensor);
framework::Tensor ones_tensor;
ones_tensor.mutable_data<float>(mean_out->dims(), ctx.GetPlace());
framework::TensorFromVector<float>(
std::vector<float>(framework::product(mean_out->dims()), 1.0f),
dev_ctx, &ones_tensor);
const auto &runner1 = NpuOpRunner("AddMatMatElements",
{*mean_out, *saved_mean, ones_tensor,
momentum_tensor, this_factor_tensor},
{*mean_out}, {});
runner1.Run(stream);
const auto &runner2 = NpuOpRunner(
"AddMatMatElements", {*variance_out, *saved_variance, ones_tensor,
momentum_tensor, this_factor_tensor},
{*variance_out}, {});
runner2.Run(stream);
}
}
};
template <typename T>
class NPUBatchNormGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &dev_ctx = ctx.template device_context<platform::NPUDeviceContext>();
const float epsilon = ctx.Attr<float>("epsilon");
const std::string data_layout = ctx.Attr<std::string>("data_layout");
bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const auto *y_grad = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *saved_mean = ctx.Input<Tensor>("SavedMean");
auto *saved_variance = ctx.Input<Tensor>("SavedVariance");
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *scale_grad = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *bias_grad = ctx.Output<Tensor>(framework::GradVarName("Bias"));
const bool is_test = ctx.Attr<bool>("is_test");
use_global_stats = is_test || use_global_stats;
const Tensor *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE_EQ(x_dims.size(), 4,
platform::errors::InvalidArgument(
"The input tensor X's dimension must equal to 4. But "
"received X's shape = [%s], X's dimension = [%d].",
x_dims, x_dims.size()));
// init output
Tensor scale_grad_tmp, bias_grad_tmp, x_grad_tmp;
if (scale_grad && bias_grad) {
scale_grad->mutable_data<T>(ctx.GetPlace());
bias_grad->mutable_data<T>(ctx.GetPlace());
scale_grad_tmp.ShareDataWith(*scale_grad);
bias_grad_tmp.ShareDataWith(*bias_grad);
} else {
scale_grad_tmp.mutable_data<T>(scale->dims(), ctx.GetPlace());
bias_grad_tmp.mutable_data<T>(bias->dims(), ctx.GetPlace());
}
Tensor x_tensor, y_grad_tensor, x_grad_tensor;
x_tensor.ShareDataWith(*x);
y_grad_tensor.ShareDataWith(*y_grad);
if (x_grad) {
x_grad->mutable_data<T>(ctx.GetPlace());
x_grad_tensor.ShareDataWith(*x_grad);
} else {
x_grad_tensor.mutable_data<T>(x->dims(), ctx.GetPlace());
}
if (data_layout == "NHWC") {
x_tensor.set_layout(DataLayout::kNHWC);
y_grad_tensor.set_layout(DataLayout::kNHWC);
x_grad_tensor.set_layout(DataLayout::kNHWC);
}
if (!use_global_stats) {
const auto &runner = NpuOpRunner(
"BatchNormGrad",
{y_grad_tensor, x_tensor, *scale, *saved_mean, *saved_variance},
{x_grad_tensor, scale_grad_tmp, bias_grad_tmp, *saved_mean,
*saved_variance}, // segment fault if no reserve_space_3 and
// reserve_space_4
{{"epsilon", epsilon},
{"is_training", true},
{"data_format", data_layout}});
auto stream = dev_ctx.stream();
runner.Run(stream);
} else {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_var = ctx.Input<Tensor>("Variance");
const auto &runner = NpuOpRunner(
"BatchNormGrad",
{y_grad_tensor, x_tensor, *scale, *running_mean, *running_var},
{x_grad_tensor, scale_grad_tmp, bias_grad_tmp, *running_mean,
*running_var}, // segment fault if no reserve_space_3 and
// reserve_space_4
{{"epsilon", epsilon},
{"is_training", true},
{"data_format", data_layout}});
auto stream = dev_ctx.stream();
runner.Run(stream);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_NPU_KERNEL(batch_norm, ops::NPUBatchNormOpKernel<float>,
ops::NPUBatchNormOpKernel<plat::float16>);
REGISTER_OP_NPU_KERNEL(batch_norm_grad, ops::NPUBatchNormGradOpKernel<float>,
ops::NPUBatchNormGradOpKernel<plat::float16>);
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.op import Operator
from op_test import OpTest, _set_use_system_allocator
from paddle.fluid import Program, program_guard
from test_batch_norm_op import _reference_testing, _cal_mean_variance, _reference_training, _reference_grad
_set_use_system_allocator(False)
paddle.enable_static()
class TestBatchNormOpInference(unittest.TestCase):
def setUp(self):
self.dtype = np.float32
self.init_kernel_type()
self.data_formats = ["NCHW", "NHWC"]
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
def check_with_place(self, place, data_layout, dtype, shape):
epsilon = epsilon = 0.00001
if len(shape) == 2:
x_shape = shape
c = x_shape[1]
else:
n, h, w, c = shape[0], shape[1], shape[2], shape[3]
if data_layout == "NHWC":
x_shape = [n, h, w, c]
elif data_layout == "NCHW":
x_shape = [n, c, h, w]
else:
raise ValueError("Unknown data layout.")
scale_shape = [c]
x = np.random.random_sample(x_shape).astype(dtype)
x = x - 0.5
scale = np.random.random_sample(scale_shape).astype(np.float32)
bias = np.random.random_sample(scale_shape).astype(np.float32)
mean = np.zeros(scale_shape).astype(np.float32)
variance = np.ones(scale_shape).astype(np.float32)
y = _reference_testing(x, scale, bias, mean, variance, epsilon,
data_layout).astype(dtype)
var_dict = locals()
var_names = ["x", "scale", "bias", "mean", "variance", "y"]
ground_truth = {name: var_dict[name] for name in var_names}
ground_truth["saved_mean"] = mean
ground_truth["saved_variance"] = variance
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in ground_truth:
block.create_var(
name=name, dtype="float32", shape=ground_truth[name].shape)
inputs = {
"X": block.var("x"),
"Scale": block.var("scale"),
"Bias": block.var("bias"),
"Mean": block.var("mean"),
"Variance": block.var("variance")
}
attrs = {
"epsilon": epsilon,
"is_test": True,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
}
outputs = {
"Y": block.var("y"),
"MeanOut": block.var("mean"), # share memory
"VarianceOut": block.var("variance"), # share memory
"SavedMean": block.var("saved_mean"),
"SavedVariance": block.var("saved_variance")
}
block.create_var(name="reserve_space", dtype='float32')
outputs["ReserveSpace"] = block.var('reserve_space')
bn_op = block.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
program._sync_with_cpp()
exe = fluid.Executor(place)
out = exe.run(
program,
feed={
name: ground_truth[name]
for name in ["x", "scale", "bias", "mean", "variance"]
},
fetch_list=["y"])
self.__assert_close(var_dict["y"], out[0], "y", atol=1e-3)
def test_check_output(self):
place = core.NPUPlace(0)
for data_format in self.data_formats:
self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5])
def init_kernel_type(self):
pass
class TestFP16BatchNormOpInference(TestBatchNormOpInference):
def setUp(self):
self.dtype = np.float16
self.init_kernel_type()
self.data_formats = ["NCHW", "NHWC"]
class TestBatchNormOpTraining(unittest.TestCase):
def set_npu(self):
self.__class__.use_npu = True
def setUp(self):
self.set_npu()
self.use_mkldnn = False
self.fuse_with_relu = False
self.data_formats = ["NCHW", "NHWC"]
self.momentum = 0.9
self.use_momentum_variable = False
self.epsilon = 0.00001
self.init_kernel_type()
self.init_test_case()
def init_test_case(self):
self.use_global_stats = False
self.no_grad_set = set()
self.fetch_list = [
"y", 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD',
'scale@GRAD', 'bias@GRAD'
]
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
np.allclose(np.array(tensor), np_array, atol=atol)
def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance,
epsilon, momentum, shape, data_layout):
# run forward
y, saved_mean, var_ref = _reference_training(x, scale, bias, epsilon,
data_layout)
mean_out = saved_mean * (1. - momentum) + momentum * mean
variance_out = var_ref * (1. - momentum) + momentum * variance
saved_variance = 1. / np.sqrt(var_ref + epsilon)
# run backward
x_grad, scale_grad, bias_grad = _reference_grad(
x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout)
return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad
def set_mean_variance(self, scale_shape, x, data_layout):
mean, variance = _cal_mean_variance(x, self.epsilon, data_layout)
mean_pre = np.zeros(scale_shape).astype(np.float32)
variance_pre = np.ones(scale_shape).astype(np.float32)
# computing global mean/variance for one step
if self.use_global_stats:
mom = self.momentum
mean = mean * (1. - mom) + mom * mean_pre
variance = variance * (1. - mom) + mom * variance_pre
return mean, variance
def test_forward_backward(self):
def test_with_place(place, data_layout, shape):
# attr
epsilon = self.epsilon
momentum = self.momentum
if data_layout == "NCHW":
n, c, h, w = shape[0], shape[1], shape[2], shape[3]
else:
n, h, w, c = shape[0], shape[1], shape[2], shape[3]
scale_shape = [c]
np.random.seed(123)
x = np.random.random_sample(shape).astype(np.float32)
scale = np.random.random_sample(scale_shape).astype(np.float32)
bias = np.random.random_sample(scale_shape).astype(np.float32)
mean, variance = self.set_mean_variance(scale_shape, x, data_layout)
y_grad = np.random.random_sample(shape).astype(np.float32)
momentum_var = np.array([momentum]).astype(np.float32)
y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward(
x, y_grad, scale, bias, mean, variance, epsilon, momentum,
shape, data_layout)
var_dict = locals()
var_dict['y@GRAD'] = y_grad
var_dict['x@GRAD'] = x_grad
var_dict['scale@GRAD'] = scale_grad
var_dict['bias@GRAD'] = bias_grad
var_names = [
'x', 'scale', 'bias', 'mean', 'variance', "y", 'saved_mean',
'saved_variance', 'momentum_var'
]
ground_truth = {name: var_dict[name] for name in var_names}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in ground_truth:
block.create_var(
name=name,
dtype='float32',
shape=ground_truth[name].shape)
inputs = {
"X": block.var('x'),
"Scale": block.var('scale'),
"Bias": block.var('bias'),
"Mean": block.var('mean'),
"Variance": block.var('variance')
}
attrs = {
"epsilon": epsilon,
"is_test": False,
"data_layout": data_layout,
"use_mkldnn": self.use_mkldnn,
"fuse_with_relu": self.fuse_with_relu,
"use_global_stats": self.use_global_stats
}
if self.use_momentum_variable:
inputs['MomentumTensor'] = block.var('momentum_var')
else:
attrs['momentum'] = momentum
outputs = {
"Y": block.var("y"),
"MeanOut": block.var('mean'), # share memory
"VarianceOut": block.var('variance'), # share memory
"SavedMean": block.var('saved_mean'),
"SavedVariance": block.var('saved_variance')
}
block.create_var(name="reserve_space", dtype='float32')
outputs["ReserveSpace"] = block.var('reserve_space')
bn_op = block.append_op(
type="batch_norm",
inputs=inputs,
outputs=outputs,
attrs=attrs)
block.create_var(name='y@GRAD', dtype='float32', shape=y.shape)
# generate backward op_desc
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
bn_op.desc, self.no_grad_set, [])
grad_op_desc = grad_op_desc_list[0]
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(grad_op_desc)
for var_name in grad_op_desc.output_arg_names():
block.desc.var(var_name.encode("ascii"))
grad_op_desc.infer_var_type(block.desc)
grad_op_desc.infer_shape(block.desc)
for arg in grad_op_desc.output_arg_names():
grad_var = block.desc.find_var(arg.encode("ascii"))
grad_var.set_dtype(core.VarDesc.VarType.FP32)
program._sync_with_cpp()
exe = fluid.Executor(place)
out = exe.run(program,
feed={
name: var_dict[name]
for name in [
'x', 'scale', 'bias', 'mean', 'variance',
'y@GRAD', 'momentum_var'
]
},
fetch_list=self.fetch_list)
for id, name in enumerate(self.fetch_list):
if name == 'variance':
self.__assert_close(
var_dict[name], out[id], name, atol=1e-3)
continue
self.__assert_close(var_dict[name], out[id], name)
print("op test forward passed: ", str(place), data_layout)
for data_format in self.data_formats:
test_with_place(core.NPUPlace(0), data_format, [2, 3, 4, 5])
def init_kernel_type(self):
pass
class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining):
def init_test_case(self):
self.use_global_stats = False
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD'])
self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD']
class TestBatchNormOpTrainingMomentumVariable(TestBatchNormOpTraining):
def init_test_case(self):
self.use_momentum_variable = True
self.use_global_stats = False
self.no_grad_set = set()
self.fetch_list = [
'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD',
'scale@GRAD', 'bias@GRAD'
]
class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining):
def init_test_case(self):
self.use_global_stats = True
self.no_grad_set = set()
self.fetch_list = [
'y', 'mean', 'variance', 'x@GRAD', 'scale@GRAD', 'bias@GRAD'
]
def reference_grad(self, x, y_grad, scale, mean, var, epsilon, data_format):
if data_format == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
y_grad = np.transpose(y_grad, (0, 2, 3, 1))
x_grad = scale * y_grad / np.sqrt(var + epsilon)
grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon),
axis=(0, 1, 2))
grad_offset = np.sum(y_grad, axis=(0, 1, 2))
# transfer back to N, C, H, W
if data_format == "NCHW":
x_grad = np.transpose(x_grad, (0, 3, 1, 2))
x = np.transpose(x, (0, 3, 1, 2))
y_grad = np.transpose(y_grad, (0, 3, 1, 2))
return x_grad, grad_scale, grad_offset
def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance,
epsilon, momentum, shape, data_layout):
if data_layout != "NCHW" and data_layout != "NHWC":
raise ValueError("Unknown data order.")
if data_layout == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
# run normalizaton
normalized = (x - mean) / np.sqrt(variance + epsilon)
y = normalized * scale + bias
# transfer back to N, C, H, W
if data_layout == "NCHW":
x = np.transpose(x, (0, 3, 1, 2))
y = np.transpose(y, (0, 3, 1, 2))
mean_out = mean
variance_out = variance
saved_variance = 1. / np.sqrt(variance + epsilon)
# run backward
x_grad, scale_grad, bias_grad = self.reference_grad(
x, y_grad, scale, mean, variance, epsilon, data_layout)
return y, mean_out, variance_out, mean, saved_variance, x_grad, scale_grad, bias_grad
class TestBatchNormOpFreezeStatsAndScaleBiasTraining(
TestBatchNormOpFreezeStatsTraining):
def init_test_case(self):
self.use_global_stats = True
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD'])
self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD']
class TestDygraphBatchNormTrainableStats(unittest.TestCase):
def test_dygraph(self):
places = [fluid.NPUPlace(0)]
for p in places:
shape = [4, 10, 4, 4]
def compute(x, is_test, trainable_statistics):
with fluid.dygraph.guard(p):
bn = fluid.dygraph.BatchNorm(
shape[1],
is_test=is_test,
trainable_statistics=trainable_statistics)
y = bn(fluid.dygraph.to_variable(x))
return y.numpy()
x = np.random.randn(*shape).astype("float32")
y1 = compute(x, False, False)
y2 = compute(x, True, True)
self.assertTrue(np.allclose(y1, y2))
def test_static(self):
places = [fluid.NPUPlace(0)]
for p in places:
exe = fluid.Executor(p)
shape = [4, 10, 16, 16]
def compute(x_np, is_test, trainable_statistics):
with program_guard(Program(), Program()):
bn = fluid.dygraph.BatchNorm(
shape[1],
is_test=is_test,
trainable_statistics=trainable_statistics)
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
return r
x = np.random.randn(*shape).astype("float32")
y1 = compute(x, False, False)
y2 = compute(x, True, True)
self.assertTrue(np.allclose(y1, y2))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册