未验证 提交 87443831 编写于 作者: Z zhangkaihuo 提交者: GitHub

Standard sparse conv name (#44353)

上级 0dafbb03
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(out), Tensor(rulebook) output : Tensor(out), Tensor(rulebook)
kernel : kernel :
func : sparse_conv3d{sparse_coo, dense -> sparse_coo, dense} func : conv3d_coo{sparse_coo, dense -> sparse_coo, dense}
layout : x layout : x
intermediate : rulebook intermediate : rulebook
backward : conv3d_grad backward : conv3d_grad
......
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(x_grad), Tensor(kernel_grad) output : Tensor(x_grad), Tensor(kernel_grad)
kernel : kernel :
func : sparse_conv3d_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense} func : conv3d_coo_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense}
- backward_api : coo_to_dense_grad - backward_api : coo_to_dense_grad
forward : coo_to_dense(Tensor x) -> Tensor(out) forward : coo_to_dense(Tensor x) -> Tensor(out)
......
...@@ -17,13 +17,12 @@ limitations under the License. */ ...@@ -17,13 +17,12 @@ limitations under the License. */
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
namespace phi { namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx, void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
...@@ -37,7 +36,7 @@ void Conv3dGradKernel(const Context& dev_ctx, ...@@ -37,7 +36,7 @@ void Conv3dGradKernel(const Context& dev_ctx,
DenseTensor* kernel_grad); DenseTensor* kernel_grad);
template <typename T, typename Context> template <typename T, typename Context>
std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad( std::tuple<SparseCooTensor, DenseTensor> Conv3dCooGrad(
const Context& dev_ctx, const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
...@@ -52,7 +51,7 @@ std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad( ...@@ -52,7 +51,7 @@ std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad(
DenseTensor kernel_grad; DenseTensor kernel_grad;
// TODO(zhangkaihuo): call InferMeta func here // TODO(zhangkaihuo): call InferMeta func here
Conv3dGradKernel<T, Context>(dev_ctx, Conv3dCooGradKernel<T, Context>(dev_ctx,
x, x,
kernel, kernel,
rulebook, rulebook,
......
...@@ -23,7 +23,7 @@ namespace phi { ...@@ -23,7 +23,7 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx, void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
...@@ -35,7 +35,7 @@ void Conv3dKernel(const Context& dev_ctx, ...@@ -35,7 +35,7 @@ void Conv3dKernel(const Context& dev_ctx,
DenseTensor* rulebook); DenseTensor* rulebook);
template <typename T, typename Context> template <typename T, typename Context>
SparseCooTensor Conv3d(const Context& dev_ctx, SparseCooTensor Conv3dCoo(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor kernel, const DenseTensor kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
...@@ -45,7 +45,7 @@ SparseCooTensor Conv3d(const Context& dev_ctx, ...@@ -45,7 +45,7 @@ SparseCooTensor Conv3d(const Context& dev_ctx,
const bool subm, const bool subm,
DenseTensor* rulebook) { DenseTensor* rulebook) {
SparseCooTensor coo; SparseCooTensor coo;
Conv3dKernel<T, Context>(dev_ctx, Conv3dCooKernel<T, Context>(dev_ctx,
x, x,
kernel, kernel,
paddings, paddings,
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h" #include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "paddle/phi/core/visit_type.h" #include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
...@@ -31,7 +31,7 @@ namespace sparse { ...@@ -31,7 +31,7 @@ namespace sparse {
// x_grad = out_grad * transpose(kenrel) // x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad // kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT = int> template <typename T, typename IntT = int>
void Conv3dGradCPUKernel(const CPUContext& dev_ctx, void Conv3dCooGradCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
...@@ -178,7 +178,7 @@ void Conv3dGradCPUKernel(const CPUContext& dev_ctx, ...@@ -178,7 +178,7 @@ void Conv3dGradCPUKernel(const CPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx, void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
...@@ -191,8 +191,8 @@ void Conv3dGradKernel(const Context& dev_ctx, ...@@ -191,8 +191,8 @@ void Conv3dGradKernel(const Context& dev_ctx,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad) { DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradCPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooGradCPUKernel", ([&] {
Conv3dGradCPUKernel<T, data_t>(dev_ctx, Conv3dCooGradCPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
rulebook, rulebook,
...@@ -210,10 +210,10 @@ void Conv3dGradKernel(const Context& dev_ctx, ...@@ -210,10 +210,10 @@ void Conv3dGradKernel(const Context& dev_ctx,
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad, PD_REGISTER_KERNEL(conv3d_coo_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::Conv3dGradKernel, phi::sparse::Conv3dCooGradKernel,
float, float,
double) { double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
......
...@@ -27,7 +27,7 @@ namespace sparse { ...@@ -27,7 +27,7 @@ namespace sparse {
* out: (N, D, H, W, OC) * out: (N, D, H, W, OC)
**/ **/
template <typename T, typename IntT = int> template <typename T, typename IntT = int>
void Conv3dCPUKernel(const CPUContext& dev_ctx, void Conv3dCooCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
...@@ -151,7 +151,7 @@ void Conv3dCPUKernel(const CPUContext& dev_ctx, ...@@ -151,7 +151,7 @@ void Conv3dCPUKernel(const CPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx, void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
...@@ -162,8 +162,8 @@ void Conv3dKernel(const Context& dev_ctx, ...@@ -162,8 +162,8 @@ void Conv3dKernel(const Context& dev_ctx,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook) { DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dCPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooCPUKernel", ([&] {
Conv3dCPUKernel<T, data_t>(dev_ctx, Conv3dCooCPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
paddings, paddings,
...@@ -180,6 +180,6 @@ void Conv3dKernel(const Context& dev_ctx, ...@@ -180,6 +180,6 @@ void Conv3dKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
sparse_conv3d, CPU, ALL_LAYOUT, phi::sparse::Conv3dKernel, float, double) { conv3d_coo, CPU, ALL_LAYOUT, phi::sparse::Conv3dCooKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
} }
...@@ -21,7 +21,7 @@ limitations under the License. */ ...@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi { namespace phi {
namespace sparse { namespace sparse {
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h" #include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
...@@ -39,7 +39,7 @@ namespace sparse { ...@@ -39,7 +39,7 @@ namespace sparse {
// x_grad = out_grad * transpose(kenrel) // x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad // kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT> template <typename T, typename IntT>
void Conv3dGradGPUKernel(const GPUContext& dev_ctx, void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
...@@ -220,7 +220,7 @@ void Conv3dGradGPUKernel(const GPUContext& dev_ctx, ...@@ -220,7 +220,7 @@ void Conv3dGradGPUKernel(const GPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx, void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
...@@ -233,8 +233,8 @@ void Conv3dGradKernel(const Context& dev_ctx, ...@@ -233,8 +233,8 @@ void Conv3dGradKernel(const Context& dev_ctx,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad) { DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradGPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooGradGPUKernel", ([&] {
Conv3dGradGPUKernel<T, data_t>(dev_ctx, Conv3dCooGradGPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
rulebook, rulebook,
...@@ -252,10 +252,10 @@ void Conv3dGradKernel(const Context& dev_ctx, ...@@ -252,10 +252,10 @@ void Conv3dGradKernel(const Context& dev_ctx,
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad, PD_REGISTER_KERNEL(conv3d_coo_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::Conv3dGradKernel, phi::sparse::Conv3dCooGradKernel,
float, float,
double, double,
phi::dtype::float16) { phi::dtype::float16) {
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -27,7 +27,7 @@ namespace phi { ...@@ -27,7 +27,7 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename IntT> template <typename T, typename IntT>
void Conv3dGPUKernel(const GPUContext& dev_ctx, void Conv3dCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
...@@ -190,7 +190,7 @@ void Conv3dGPUKernel(const GPUContext& dev_ctx, ...@@ -190,7 +190,7 @@ void Conv3dGPUKernel(const GPUContext& dev_ctx,
* out: (N, D, H, W, OC) * out: (N, D, H, W, OC)
**/ **/
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx, void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
...@@ -201,8 +201,8 @@ void Conv3dKernel(const Context& dev_ctx, ...@@ -201,8 +201,8 @@ void Conv3dKernel(const Context& dev_ctx,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook) { DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooGPUKernel", ([&] {
Conv3dGPUKernel<T, data_t>(dev_ctx, Conv3dCooGPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
paddings, paddings,
...@@ -218,10 +218,10 @@ void Conv3dKernel(const Context& dev_ctx, ...@@ -218,10 +218,10 @@ void Conv3dKernel(const Context& dev_ctx,
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d, PD_REGISTER_KERNEL(conv3d_coo,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::Conv3dKernel, phi::sparse::Conv3dCooKernel,
float, float,
double, double,
phi::dtype::float16) { phi::dtype::float16) {
......
...@@ -28,7 +28,7 @@ limitations under the License. */ ...@@ -28,7 +28,7 @@ limitations under the License. */
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/utils.cu.h" #include "paddle/phi/kernels/funcs/sparse/utils.cu.h"
#include "paddle/phi/kernels/primitive/compute_primitives.h" #include "paddle/phi/kernels/primitive/compute_primitives.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi { namespace phi {
namespace sparse { namespace sparse {
......
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
PD_DECLARE_KERNEL(sparse_conv3d, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(conv3d_coo, CPU, ALL_LAYOUT);
template <typename T> template <typename T>
void TestConv3dBase(const std::vector<int>& indices, void TestConv3dBase(const std::vector<int>& indices,
......
...@@ -23,8 +23,8 @@ limitations under the License. */ ...@@ -23,8 +23,8 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h" #include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h" #include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi { namespace phi {
namespace tests { namespace tests {
...@@ -114,7 +114,7 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -114,7 +114,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (!std::is_same<T, phi::dtype::float16>::value) { if (!std::is_same<T, phi::dtype::float16>::value) {
DenseTensor rulebook = phi::Empty( DenseTensor rulebook = phi::Empty(
dev_ctx_cpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW)); dev_ctx_cpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW));
SparseCooTensor out = sparse::Conv3d<T>(dev_ctx_cpu, SparseCooTensor out = sparse::Conv3dCoo<T>(dev_ctx_cpu,
x_tensor, x_tensor,
kernel_tensor, kernel_tensor,
paddings, paddings,
...@@ -139,7 +139,7 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -139,7 +139,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (backward) { if (backward) {
std::tuple<SparseCooTensor, DenseTensor> grads = std::tuple<SparseCooTensor, DenseTensor> grads =
sparse::Conv3dGrad<T>(dev_ctx_cpu, sparse::Conv3dCooGrad<T>(dev_ctx_cpu,
x_tensor, x_tensor,
kernel_tensor, kernel_tensor,
rulebook, rulebook,
...@@ -198,7 +198,7 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -198,7 +198,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
DenseTensor d_rulebook = phi::Empty( DenseTensor d_rulebook = phi::Empty(
dev_ctx_gpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW)); dev_ctx_gpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW));
SparseCooTensor d_out = sparse::Conv3d<T>(dev_ctx_gpu, SparseCooTensor d_out = sparse::Conv3dCoo<T>(dev_ctx_gpu,
d_x_tensor, d_x_tensor,
d_kernel_tensor, d_kernel_tensor,
paddings, paddings,
...@@ -242,7 +242,7 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -242,7 +242,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (backward) { if (backward) {
std::tuple<SparseCooTensor, DenseTensor> grads = std::tuple<SparseCooTensor, DenseTensor> grads =
sparse::Conv3dGrad<T>(dev_ctx_gpu, sparse::Conv3dCooGrad<T>(dev_ctx_gpu,
d_x_tensor, d_x_tensor,
d_kernel_tensor, d_kernel_tensor,
d_rulebook, d_rulebook,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册