未验证 提交 8307b0cb 编写于 作者: W wangxinxin08 提交者: GitHub

add conv op check for illegal input or attributes (#35337)

* add conv op check for illegal input or attributes
上级 eae4bf5b
...@@ -116,6 +116,10 @@ std::vector<int64_t> ConvOp::ComputeOutputShape( ...@@ -116,6 +116,10 @@ std::vector<int64_t> ConvOp::ComputeOutputShape(
"the output channels is %d, the filter's shape is [%s], " "the output channels is %d, the filter's shape is [%s], "
"the groups is %d.", "the groups is %d.",
filter_dims[0], filter_dims, groups)); filter_dims[0], filter_dims, groups));
PADDLE_ENFORCE_GT(
filter_dims[0], 0,
platform::errors::InvalidArgument(
"the size of filter at axis 0 should be greater than 0"));
framework::DDim in_data_dims; framework::DDim in_data_dims;
if (channel_last) { if (channel_last) {
......
...@@ -1816,6 +1816,10 @@ def conv3d(input, ...@@ -1816,6 +1816,10 @@ def conv3d(input,
"Attr(data_format): %s." % str(data_format)) "Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NDHWC") channel_last = (data_format == "NDHWC")
if len(input.shape) != 5:
raise ValueError(
"Input should be 5D tensor, but received input with the shape of {}".
format(input.shape))
num_channels = input.shape[4] if channel_last else input.shape[1] num_channels = input.shape[4] if channel_last else input.shape[1]
if num_channels < 0: if num_channels < 0:
raise ValueError( raise ValueError(
...@@ -1824,6 +1828,10 @@ def conv3d(input, ...@@ -1824,6 +1828,10 @@ def conv3d(input,
if groups is None: if groups is None:
num_filter_channels = num_channels num_filter_channels = num_channels
elif groups <= 0:
raise ValueError(
"the groups of conv3d should be greater than 0. Received groups: {}".
format(groups))
else: else:
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
...@@ -4244,10 +4252,15 @@ def conv3d_transpose(input, ...@@ -4244,10 +4252,15 @@ def conv3d_transpose(input,
raise ValueError( raise ValueError(
"Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received " "Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received "
+ data_format + " but only NCDHW or NDHWC supported.") + data_format + " but only NCDHW or NDHWC supported.")
l_type = "conv3d_transpose" l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals()) helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable): if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable") raise TypeError("Input of conv3d_transpose must be Variable")
if len(input.shape) != 5:
raise ValueError(
"Input should be 5D tensor, but received input with the shape of {}".
format(input.shape))
input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[ input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[
-1] -1]
...@@ -4339,6 +4352,15 @@ def conv3d_transpose(input, ...@@ -4339,6 +4352,15 @@ def conv3d_transpose(input,
raise ValueError("output_size should be int, list[int] or tuple[int]") raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups groups = 1 if groups is None else groups
if groups <= 0:
raise ValueError(
"the groups of conv3d_transpose should be greater than 0. Received groups: {}".
format(groups))
if num_filters % groups != 0:
raise ValueError("Attr(num_filters) must be divisible by groups,"
"Received: Attr(num_filters) is {}, the groups is {}".
format(num_filters, groups))
filter_shape = [input_channel, num_filters // groups] + filter_size filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter( img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv1DError(TestCase):
def setUp(self):
self.input = []
self.filter = []
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCL"
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv1d(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
class TestFunctionalConv1DErrorCase1(TestFunctionalConv1DError):
def setUp(self):
self.input = np.random.randn(1, 3, 3)
self.filter = np.random.randn(3, 3, 1)
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCL"
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv1DError(TestCase):
def setUp(self):
self.input = []
self.filter = []
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCL"
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv1d_transpose(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
class TestFunctionalConv1DErrorCase1(TestFunctionalConv1DError):
def setUp(self):
self.input = np.random.randn(1, 3, 3)
self.filter = np.random.randn(3, 3, 1)
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCL"
if __name__ == "__main__":
unittest.main()
...@@ -457,5 +457,81 @@ class TestFunctionalConv2DErrorCase11(TestFunctionalConv2DError): ...@@ -457,5 +457,81 @@ class TestFunctionalConv2DErrorCase11(TestFunctionalConv2DError):
self.data_format = "NHCW" self.data_format = "NHCW"
class TestFunctionalConv2DErrorCase12(TestCase):
def setUp(self):
self.input = np.array([])
self.filter = np.array([])
self.num_filters = 0
self.filter_size = 0
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCHW"
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("input", self.input.shape, dtype=paddle.float32)
y = fluid.layers.conv2d(
x,
self.num_filters,
self.filter_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.filter),
bias_attr=False if self.bias is None else
I.NumpyArrayInitializer(self.bias),
act=None,
data_format=self.data_format)
exe = fluid.Executor()
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv2d(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
class TestFunctionalConv2DErrorCase13(TestFunctionalConv2DErrorCase12):
def setUp(self):
self.input = np.random.randn(1, 3, 3, 3)
self.filter = np.random.randn(3, 3, 1, 1)
self.num_filters = 3
self.filter_size = 1
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCHW"
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -463,5 +463,81 @@ class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError): ...@@ -463,5 +463,81 @@ class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError):
self.data_format = "NCHW" self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase10(TestCase):
def setUp(self):
self.input = np.array([])
self.filter = np.array([])
self.num_filters = 0
self.filter_size = 0
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCHW"
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("input", self.input.shape, dtype=paddle.float32)
y = fluid.layers.conv2d(
x,
self.num_filters,
self.filter_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.filter),
bias_attr=False if self.bias is None else
I.NumpyArrayInitializer(self.bias),
act=None,
data_format=self.data_format)
exe = fluid.Executor()
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv2d_transpose(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
class TestFunctionalConv2DErrorCase11(TestFunctionalConv2DErrorCase10):
def setUp(self):
self.input = np.random.randn(1, 3, 3, 3)
self.filter = np.random.randn(3, 3, 1, 1)
self.num_filters = 3
self.filter_size = 1
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCHW"
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -432,5 +432,81 @@ class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError): ...@@ -432,5 +432,81 @@ class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError):
self.data_format = "NDHWC" self.data_format = "NDHWC"
class TestFunctionalConv3DErrorCase11(TestCase):
def setUp(self):
self.input = np.array([])
self.filter = np.array([])
self.num_filters = 0
self.filter_size = 0
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCDHW"
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("input", self.input.shape, dtype=paddle.float32)
y = fluid.layers.conv3d(
x,
self.num_filters,
self.filter_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.filter),
bias_attr=False if self.bias is None else
I.NumpyArrayInitializer(self.bias),
act=None,
data_format=self.data_format)
exe = fluid.Executor()
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv3d(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
class TestFunctionalConv3DErrorCase12(TestFunctionalConv3DErrorCase11):
def setUp(self):
self.input = np.random.randn(1, 3, 3, 3, 3)
self.filter = np.random.randn(3, 3, 1, 1, 1)
self.num_filters = 3
self.filter_size = 1
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCDHW"
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -483,5 +483,82 @@ class TestFunctionalConv3DTransposeErrorCase9( ...@@ -483,5 +483,82 @@ class TestFunctionalConv3DTransposeErrorCase9(
self.data_format = "NCDHW" self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase10(TestCase):
def setUp(self):
self.input = np.array([])
self.filter = np.array([])
self.num_filters = 0
self.filter_size = 0
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCDHW"
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("input", self.input.shape, dtype=paddle.float32)
y = fluid.layers.conv3d_transpose(
x,
self.num_filters,
self.filter_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.filter),
bias_attr=False if self.bias is None else
I.NumpyArrayInitializer(self.bias),
act=None,
data_format=self.data_format)
exe = fluid.Executor()
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv3d_transpose(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
class TestFunctionalConv3DTransposeErrorCase11(
TestFunctionalConv3DTransposeErrorCase10):
def setUp(self):
self.input = np.random.randn(1, 3, 3, 3, 3)
self.filter = np.random.randn(3, 3, 1, 1, 1)
self.num_filters = 3
self.filter_size = 1
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCDHW"
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -299,12 +299,20 @@ def conv1d(x, ...@@ -299,12 +299,20 @@ def conv1d(x,
channel_last = (data_format == "NLC") channel_last = (data_format == "NLC")
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
conv2d_data_format = "NHWC" if channel_last else "NCHW" conv2d_data_format = "NHWC" if channel_last else "NCHW"
if len(x.shape) != 3:
raise ValueError(
"Input x should be 3D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
num_filters = weight.shape[0] num_filters = weight.shape[0]
if num_channels < 0: if num_channels < 0:
raise ValueError("The channel dimension of the input({}) " raise ValueError("The channel dimension of the input({}) "
"should be defined. Received: {}.".format( "should be defined. Received: {}.".format(
x.shape, num_channels)) x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv1d should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"the channel of input must be divisible by groups," "the channel of input must be divisible by groups,"
...@@ -508,12 +516,20 @@ def conv2d(x, ...@@ -508,12 +516,20 @@ def conv2d(x,
channel_last = (data_format == "NHWC") channel_last = (data_format == "NHWC")
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
if len(x.shape) != 4:
raise ValueError(
"Input x should be 4D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
num_filters = weight.shape[0] num_filters = weight.shape[0]
if num_channels < 0: if num_channels < 0:
raise ValueError("The channel dimension of the input({}) " raise ValueError("The channel dimension of the input({}) "
"should be defined. Received: {}.".format( "should be defined. Received: {}.".format(
x.shape, num_channels)) x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv2d should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"the channel of input must be divisible by groups," "the channel of input must be divisible by groups,"
...@@ -710,12 +726,20 @@ def conv1d_transpose(x, ...@@ -710,12 +726,20 @@ def conv1d_transpose(x,
data_format)) data_format))
channel_last = (data_format == "NLC") channel_last = (data_format == "NLC")
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
if len(x.shape) != 3:
raise ValueError(
"Input x should be 3D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
if num_channels < 0: if num_channels < 0:
raise ValueError("The channel dimension of the input({}) " raise ValueError("The channel dimension of the input({}) "
"should be defined. Received: {}.".format( "should be defined. Received: {}.".format(
x.shape, num_channels)) x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv1d_transpose should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"the channel of input must be divisible by groups," "the channel of input must be divisible by groups,"
...@@ -964,11 +988,19 @@ def conv2d_transpose(x, ...@@ -964,11 +988,19 @@ def conv2d_transpose(x,
data_format)) data_format))
channel_last = (data_format == "NHWC") channel_last = (data_format == "NHWC")
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
if len(x.shape) != 4:
raise ValueError(
"Input x should be 4D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
if num_channels < 0: if num_channels < 0:
raise ValueError("The channel dimension of the input({}) " raise ValueError("The channel dimension of the input({}) "
"should be defined. Received: {}.".format( "should be defined. Received: {}.".format(
x.shape, num_channels)) x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv2d_transpose should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"the channel of input must be divisible by groups," "the channel of input must be divisible by groups,"
...@@ -1167,12 +1199,20 @@ def conv3d(x, ...@@ -1167,12 +1199,20 @@ def conv3d(x,
channel_last = (data_format == "NDHWC") channel_last = (data_format == "NDHWC")
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
if len(x.shape) != 5:
raise ValueError(
"Input x should be 5D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
num_filters = weight.shape[0] num_filters = weight.shape[0]
if num_channels < 0: if num_channels < 0:
raise ValueError( raise ValueError(
"The channel dimension of the input({}) should be defined. " "The channel dimension of the input({}) should be defined. "
"Received: {}.".format(x.shape, num_channels)) "Received: {}.".format(x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv3d should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"The number of input channels must be divisible by Attr(groups). " "The number of input channels must be divisible by Attr(groups). "
...@@ -1358,12 +1398,20 @@ def conv3d_transpose(x, ...@@ -1358,12 +1398,20 @@ def conv3d_transpose(x,
channel_last = (data_format == "NDHWC") channel_last = (data_format == "NDHWC")
channel_dim = -1 if channel_last else 1 channel_dim = -1 if channel_last else 1
if len(x.shape) != 5:
raise ValueError(
"Input x should be 5D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim] num_channels = x.shape[channel_dim]
num_filters = weight.shape[1] num_filters = weight.shape[1]
if num_channels < 0: if num_channels < 0:
raise ValueError( raise ValueError(
"The channel dimension of the input({}) should be defined. " "The channel dimension of the input({}) should be defined. "
"Received: {}.".format(x.shape, num_channels)) "Received: {}.".format(x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv3d_transpose should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"The number of input channels must be divisible by Attr(groups). " "The number of input channels must be divisible by Attr(groups). "
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册