diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 29c7f5d0ce73cbf1af18e6f5869d59d2200917ad..f57674d5601813cbc4f10b7ad74d18b00622a0bb 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor { } void share_meta(const MetaTensor& meta_tensor) override { + share_dims(meta_tensor); set_dtype(meta_tensor.dtype()); // VarDesc doesn't contains layout, so we cannot share layout // set_layout(meta_tensor.layout()); - // special case 1: share lod of LoDTensor + // special case: share lod of LoDTensor share_lod(meta_tensor); - share_dims(meta_tensor); } private: diff --git a/paddle/fluid/operators/gather_nd_op.cc b/paddle/fluid/operators/gather_nd_op.cc index e5ca15a39ef51f7807246c2ee1d473a0499b6463..7d7d6ae81a0935402f94cbc16e31fbba8009ce9c 100644 --- a/paddle/fluid/operators/gather_nd_op.cc +++ b/paddle/fluid/operators/gather_nd_op.cc @@ -16,7 +16,6 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/binary.h" -#include "paddle/phi/infermeta/ternary.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index af90baf27d3f5bd79faf143319bfaf361992e649..3840b99dd176d5b348533f3e50f7f90fc3250ea1 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, ops::SoftmaxOpGradMaker, ops::SoftmaxOpGradMaker, ops::SoftmaxInplaceInferer, SoftmaxInferShapeFunctor); -DECLARE_INFER_SHAPE_FUNCTOR(softmax_grad, SoftmaxGradnferShapeFunctor, +DECLARE_INFER_SHAPE_FUNCTOR(softmax_grad, SoftmaxGradInferShapeFunctor, PD_INFER_META(phi::GeneralUnaryGradInferMeta)); REGISTER_OPERATOR(softmax_grad, ops::SoftmaxOpGrad, - SoftmaxGradnferShapeFunctor); + SoftmaxGradInferShapeFunctor); diff --git a/paddle/phi/core/meta_tensor.cc b/paddle/phi/core/meta_tensor.cc index eb114304f53ea08b05d36792330cf5bd3ebbee5d..38a6e09a61ef83aa313a67a5de1ee21ce16038eb 100644 --- a/paddle/phi/core/meta_tensor.cc +++ b/paddle/phi/core/meta_tensor.cc @@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) { } } -TensorBase* MetaTensor::get_tensor() const { return tensor_; } +TensorBase* MetaTensor::tensor() const { return tensor_; } void MetaTensor::share_dims(const MetaTensor& meta_tensor) { bool is_dense_tensor = phi::DenseTensor::classof(tensor_); @@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) { if (is_dense_tensor || is_selected_rows) { set_dims(meta_tensor.dims()); if (is_selected_rows) { - const auto in_tensor_base = meta_tensor.get_tensor(); + const auto in_tensor_base = meta_tensor.tensor(); PADDLE_ENFORCE_EQ( phi::SelectedRows::classof(in_tensor_base), true, diff --git a/paddle/phi/core/meta_tensor.h b/paddle/phi/core/meta_tensor.h index 3971a9f7e99e0282cae5e4d1e61ee6eb28c4b9a7..79f8d1c057e85b11a46a90652c769459db178e14 100644 --- a/paddle/phi/core/meta_tensor.h +++ b/paddle/phi/core/meta_tensor.h @@ -66,7 +66,7 @@ class MetaTensor { // Because the lod in compiletime and runtime is different, // so `LoD` cannot in public methods const LoD& lod() const; - TensorBase* get_tensor() const; + TensorBase* tensor() const; TensorBase* tensor_; }; diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 801bd98b504c54d1e523b2c6a00f8baa43ea3efd..a2bdf6b963bd1960ea048e21f5219a2d3127a1ee 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, } } -void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx) { - if (dx) { - dx->share_meta(x); - } +void GatherNdGradInferMeta(const MetaTensor& x, + const MetaTensor& index, + const MetaTensor& out_grad, + MetaTensor* x_grad) { + const auto& dtype = out_grad.dtype(); + x_grad->set_dims(x.dims()); + x_grad->share_lod(x); + x_grad->set_dtype(dtype); } void GeneralBinaryGradInferMeta(const MetaTensor& x, @@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x, } } +void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx) { + if (dx) { + dx->share_meta(x); + } +} + void GumbelSoftmaxGradInferMeta(const MetaTensor& out, const MetaTensor& dout, int axis, @@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, dout.dims(), errors::InvalidArgument( "Input(Out) and its gradients should have the same shape.")); - dx->share_meta(dout); -} -void GatherNdGradInferMeta(const MetaTensor& x, - const MetaTensor& index, - const MetaTensor& out_grad, - MetaTensor* x_grad) { - const auto& dtype = out_grad.dtype(); - x_grad->set_dims(x.dims()); - x_grad->share_lod(x); - x_grad->set_dtype(dtype); + dx->share_meta(dout); } void PsroiPoolGradInferMeta(const MetaTensor& x, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index 9ed24ef8646f52d38b88e206fb45aaa5f1bd6ebd..921df460118e6916a0a81ae0027f53d0ff201833 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, MetaTensor* dweight, MetaTensor* dbias); -void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx); +void GatherNdGradInferMeta(const MetaTensor& x, + const MetaTensor& index, + const MetaTensor& out_grad, + MetaTensor* x_grad); void GeneralBinaryGradInferMeta(const MetaTensor& x, const MetaTensor& y, @@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x, MetaTensor* dy, MetaTensor* dz); +void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx); + void GumbelSoftmaxGradInferMeta(const MetaTensor& out, const MetaTensor& dout, int axis, diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 641956c4d9de796bed166e1f6238ff6988601bec..b9d432244568365ff29dea43c465952bbfab1174 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -22,6 +22,153 @@ limitations under the License. */ namespace phi { +void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { + out->share_meta(x); +} + +void BCELossInferMeta(const MetaTensor& input, + const MetaTensor& label, + MetaTensor* out, + MetaConfig config) { + auto input_dims = input.dims(); + auto label_dims = label.dims(); + + int rank = input_dims.size(); + PADDLE_ENFORCE_EQ(rank, + label_dims.size(), + phi::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same rank." + "But received: the rank of Input(X) is [%d], " + "the rank of Input(Label) is [%d].", + rank, + label_dims.size())); + + bool check = true; + if ((!config.is_runtime) && + (phi::product(input_dims) <= 0 || phi::product(label_dims) <= 0)) { + check = false; + } + + if (check) { + PADDLE_ENFORCE_EQ(input_dims, + label_dims, + phi::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same " + "shape. But received: the shape of Input(X) is " + "[%s], the shape of Input(Label) is [%s].", + input_dims, + label_dims)); + } + + out->set_dims(input_dims); + out->set_dtype(input.dtype()); + out->share_lod(input); +} + +void BincountInferMeta(const MetaTensor& x, + const paddle::optional weights, + int minlength, + MetaTensor* out) { + auto input_dim = x.dims(); + + PADDLE_ENFORCE_GE(minlength, + 0, + phi::errors::InvalidArgument( + "The minlength should be greater than or equal to 0." + "But received minlength is %d", + minlength)); + + PADDLE_ENFORCE_EQ( + input_dim.size(), + 1, + phi::errors::InvalidArgument("The 'shape' of Input(X) must be 1-D tensor." + "But the dimension of Input(X) is [%d]", + input_dim.size())); + + if (weights.is_initialized()) { + auto weights_dim = weights->dims(); + PADDLE_ENFORCE_EQ(weights_dim.size(), + 1, + phi::errors::InvalidArgument( + "The 'shape' of Input(Weights) must be 1-D tensor." + "But the dimension of Input(Weights) is [%d]", + weights_dim.size())); + + PADDLE_ENFORCE_EQ( + weights_dim[0], + input_dim[0], + phi::errors::InvalidArgument( + "The 'shape' of Input(Weights) must be equal to the 'shape' of " + "Input(X)." + "But received: the 'shape' of Input(Weights) is [%s]," + "the 'shape' of Input(X) is [%s]", + weights_dim, + input_dim)); + } + out->set_dims(phi::make_ddim({-1})); + if (weights.is_initialized()) { + out->set_dtype(weights->dtype()); + } else { + out->set_dtype(x.dtype()); + } + + out->share_lod(x); +} + +void CholeskySolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + + auto x_dims_n = x_dims.size(); + auto y_dims_n = y_dims.size(); + + PADDLE_ENFORCE_GE(x_dims_n, + 2, + phi::errors::InvalidArgument( + "the rank of input Y must greater or equal to 2")); + PADDLE_ENFORCE_GE(y_dims_n, + 2, + phi::errors::InvalidArgument( + "the rank of input X must greater or equal to 2")); + PADDLE_ENFORCE_EQ( + y_dims[y_dims_n - 1], + y_dims[y_dims_n - 2], + phi::errors::InvalidArgument("input Matrix Y should be square matrix," + "But Got last shape of %ld x %ld", + y_dims[y_dims_n - 1], + y_dims[y_dims_n - 2])); + PADDLE_ENFORCE_EQ( + x_dims[x_dims_n - 2], + y_dims[y_dims_n - 2], + phi::errors::InvalidArgument("the first dim of Matrix X must be equal to " + "the fisrt dim of Matrix Y," + "But Got %ld and %ld", + x_dims[x_dims_n - 2], + y_dims[y_dims_n - 2])); + + std::vector x_dims_vec = phi::vectorize(x_dims); + std::vector y_dims_vec = phi::vectorize(y_dims); + + std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); + std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); + + std::vector expand_batch_portion = + funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); + + std::vector x_broadcast_dims({expand_batch_portion}); + x_broadcast_dims.insert(x_broadcast_dims.end(), + {x_dims_vec[x_dims_n - 2], x_dims_vec[x_dims_n - 1]}); + + // dim of 'out' is the same with 'X' after broadcast + out->set_dims(phi::make_ddim(x_broadcast_dims)); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); +} + void CompareInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, @@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x, out->set_dtype(DataType::BOOL); } +void CrossInferMeta(const MetaTensor& x, + const MetaTensor& y, + int axis, + MetaTensor* out) { + auto x_dim = x.dims(); + auto y_dim = y.dims(); + auto dim = axis; + + bool dims_match = phi::funcs::CheckDims(x_dim, y_dim); + PADDLE_ENFORCE_EQ( + dims_match, + true, + phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to " + "the 'shape' of Input(Y). But received " + "Input(X).dimensions = [%s], " + "Input(Y).dimensions = [%s]", + x_dim, + y_dim)); + + if (dim != DDim::kMaxRank) { + PADDLE_ENFORCE_EQ( + dim < x_dim.size() && dim >= (0 - x_dim.size()), + true, + phi::errors::OutOfRange( + "Attr(dim) is out of range, It's expected " + "to be in range of [-%d, %d]. But received Attr(dim) = %d.", + x_dim.size(), + x_dim.size() - 1, + dim)); + if (dim < 0) { + dim += x_dim.size(); + } + PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3, + true, + phi::errors::InvalidArgument( + "Input(X/Y).dims()[dim] should be equal to 3." + "But received Input(X/Y).dims()[dim] = %d.", + x_dim[dim])); + } + out->set_dims(x_dim); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); +} + +void DistInferMeta(const MetaTensor& x, + const MetaTensor& y, + float p, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + + PADDLE_ENFORCE_NE(phi::product(x_dims), + 0, + phi::errors::InvalidArgument( + "The Input(X) has not been initialized properly. The " + "shape of Input(X) = [%s].", + x_dims)); + PADDLE_ENFORCE_NE(phi::product(y_dims), + 0, + phi::errors::InvalidArgument( + "The Input(Y) has not been initialized properly. The " + "shape of Input(Y) = [%s].", + y_dims)); + out->set_dims({1}); + out->set_dtype(x.dtype()); +} + void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { auto x_dims = x.dims(); auto x_rank = static_cast(x_dims.size()); @@ -109,84 +324,11 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { out->set_layout(x.layout()); } -void MatmulInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool trans_x, - bool trans_y, - MetaTensor* out) { - std::vector dims_x = phi::vectorize(x.dims()); - std::vector dims_y = phi::vectorize(y.dims()); - auto ndims_x = dims_x.size(); - auto ndims_y = dims_y.size(); - PADDLE_ENFORCE_GT(ndims_x, - 0UL, - phi::errors::InvalidArgument( - "The Input(x) dims size must be greater than 0," - " but reviced dims size is 0. ")); - PADDLE_ENFORCE_GT(ndims_y, - 0UL, - phi::errors::InvalidArgument( - "The Input(y) dims size must be greater than 0," - " but reviced dims size is 0. ")); - - bool x_broadcasted = false, y_broadcasted = false; - if (ndims_x == 1) { - dims_x.insert(dims_x.begin(), 1); - ndims_x = 2; - x_broadcasted = true; - } - - if (ndims_y == 1) { - dims_y.push_back(1); - ndims_y = 2; - y_broadcasted = true; - } - - size_t M, N; - if (trans_x) { - M = dims_x[ndims_x - 1]; - } else { - M = dims_x[ndims_x - 2]; - } - if (trans_y) { - N = dims_y[ndims_y - 2]; - } else { - N = dims_y[ndims_y - 1]; - } - - std::vector new_dims; - if (ndims_x > ndims_y) { - new_dims.assign(dims_x.begin(), dims_x.end() - 2); - } else if (ndims_x < ndims_y) { - new_dims.assign(dims_y.begin(), dims_y.end() - 2); - } else { - new_dims.reserve(ndims_x); - for (size_t i = 0; i < ndims_x - 2; ++i) { - new_dims.push_back(std::max(dims_x[i], dims_y[i])); - } - } - if (!x_broadcasted) { - new_dims.push_back(M); - } - if (!y_broadcasted) { - new_dims.push_back(N); - } - if (x_broadcasted && y_broadcasted) { - new_dims.push_back(1); - } - - auto ddim_out = phi::make_ddim(new_dims); - - out->set_dims(ddim_out); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); -} - -void ElementwiseInferMeta(const MetaTensor& x, - const MetaTensor& y, - MetaTensor* out) { - return ElementwiseRawInferMeta(x, y, -1, std::move(out)); -} +void ElementwiseInferMeta(const MetaTensor& x, + const MetaTensor& y, + MetaTensor* out) { + return ElementwiseRawInferMeta(x, y, -1, std::move(out)); +} void ElementwiseRawInferMeta(const MetaTensor& x, const MetaTensor& y, @@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x, funcs::GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(), - y_dims_array.data(), - out_dims_array.data(), - max_dim, - axis); - auto out_dims = phi::make_ddim(out_dims_array); - out->set_dims(out_dims); - } else { - out->set_dims(x.dims()); - } - - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - -void HuberLossInferMeta(const MetaTensor& input, - const MetaTensor& label, - float delta, - MetaTensor* out, - MetaTensor* residual, - MetaConfig config) { - auto input_dims = input.dims(); - auto label_dims = label.dims(); - - PADDLE_ENFORCE_EQ(input_dims.size(), - label_dims.size(), - phi::errors::InvalidArgument( - "Input(input) rank and Input(label) rank should be " - "same, but received input rank(%d) != label rank(%d)", - input_dims.size(), - label_dims.size())); - - bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) || - phi::contain_unknown_dim(label_dims); - if (config.is_runtime || !contain_unknown_dim) { - PADDLE_ENFORCE_EQ( - input_dims, - label_dims, - phi::errors::InvalidArgument( - "The Input(input) and Input(label) should have the same " - "shape, but received input shape [%s] != label shape [%s]", - input_dims, - label_dims)); - } - - auto out_dims = label_dims; - residual->set_dims(out_dims); - out->set_dims(out_dims); - out->share_lod(input); -} - -void CholeskySolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - - auto x_dims_n = x_dims.size(); - auto y_dims_n = y_dims.size(); - - PADDLE_ENFORCE_GE(x_dims_n, - 2, - phi::errors::InvalidArgument( - "the rank of input Y must greater or equal to 2")); - PADDLE_ENFORCE_GE(y_dims_n, - 2, - phi::errors::InvalidArgument( - "the rank of input X must greater or equal to 2")); - PADDLE_ENFORCE_EQ( - y_dims[y_dims_n - 1], - y_dims[y_dims_n - 2], - phi::errors::InvalidArgument("input Matrix Y should be square matrix," - "But Got last shape of %ld x %ld", - y_dims[y_dims_n - 1], - y_dims[y_dims_n - 2])); - PADDLE_ENFORCE_EQ( - x_dims[x_dims_n - 2], - y_dims[y_dims_n - 2], - phi::errors::InvalidArgument("the first dim of Matrix X must be equal to " - "the fisrt dim of Matrix Y," - "But Got %ld and %ld", - x_dims[x_dims_n - 2], - y_dims[y_dims_n - 2])); - - std::vector x_dims_vec = phi::vectorize(x_dims); - std::vector y_dims_vec = phi::vectorize(y_dims); - - std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); - std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); - - std::vector expand_batch_portion = - funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); - - std::vector x_broadcast_dims({expand_batch_portion}); - x_broadcast_dims.insert(x_broadcast_dims.end(), - {x_dims_vec[x_dims_n - 2], x_dims_vec[x_dims_n - 1]}); - - // dim of 'out' is the same with 'X' after broadcast - out->set_dims(phi::make_ddim(x_broadcast_dims)); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - -void TriangularSolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - bool transpose, - bool unitriangular, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - - auto x_dims_n = x_dims.size(); - auto y_dims_n = y_dims.size(); - - PADDLE_ENFORCE_GE(x_dims_n, - 2, - phi::errors::InvalidArgument( - "The input tensor X's dimensions of TriangularSolveOp " - "should be >= 2. But received X's " - "dimensions = %d, X's shape = [%s]", - x_dims.size(), - x_dims)); - - PADDLE_ENFORCE_GE(y_dims_n, - 2, - phi::errors::InvalidArgument( - "The input tensor Y's dimensions of TriangularSolveOp " - "should be >=2. But received Y's " - "dimensions = %d, Y's shape = [%s]", - y_dims.size(), - y_dims)); - - PADDLE_ENFORCE_EQ(x_dims[x_dims_n - 2], - x_dims[x_dims_n - 1], - phi::errors::InvalidArgument( - "The inner-most 2 dimensions of Input(X) all should " - "be square matrices " - "But received X's shape[-2] = %d and shape[-1] = %d.", - x_dims[x_dims_n - 2], - x_dims[x_dims_n - 1])); - - std::vector x_dims_vec = phi::vectorize(x_dims); - std::vector y_dims_vec = phi::vectorize(y_dims); - - std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); - std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); - - std::vector expand_batch_portion = - funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); - - std::vector y_broadcast_dims({expand_batch_portion}); - y_broadcast_dims.insert(y_broadcast_dims.end(), - {y_dims_vec[y_dims_n - 2], y_dims_vec[y_dims_n - 1]}); - - // dim of 'out' is the same with 'Y' after broadcast - out->set_dims(phi::make_ddim(y_broadcast_dims)); - out->set_dtype(y.dtype()); - out->set_layout(y.layout()); - out->share_lod(y); -} - -void IndexSampleInferMeta(const MetaTensor& x, - const MetaTensor& y, - MetaTensor* out, - MetaConfig config) { - auto input_dims = x.dims(); - PADDLE_ENFORCE_EQ(input_dims.size(), - 2, - errors::InvalidArgument( - "Inputs(X) shape of IndexSample op should be 2-D, but " - "got X's shape = [%s], please check X shape.", - input_dims)); - - auto index_dims = y.dims(); - PADDLE_ENFORCE_EQ( - index_dims.size(), - 2, - errors::InvalidArgument( - "Inputs(Index) shape of IndexSample op should be 2-D, but " - "got Index's shape [%s] , please check index shape.", - input_dims)); - if (config.is_runtime) { - PADDLE_ENFORCE_EQ(input_dims[0], - index_dims[0], - errors::InvalidArgument( - "Inputs(X)'s value of dimension 0 must same with " - "Inputs(Index)'s value of dimension 0, but " - "got %d of Inputs(X), and got %d of Inputs(Index), " - "please check Inputs shape.", - input_dims[0], - index_dims[0])); - } - out->set_dtype(x.dtype()); - out->set_dims(index_dims); - out->share_lod(y); -} -void CrossInferMeta(const MetaTensor& x, - const MetaTensor& y, - int axis, - MetaTensor* out) { - auto x_dim = x.dims(); - auto y_dim = y.dims(); - auto dim = axis; - - bool dims_match = phi::funcs::CheckDims(x_dim, y_dim); - PADDLE_ENFORCE_EQ( - dims_match, - true, - phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to " - "the 'shape' of Input(Y). But received " - "Input(X).dimensions = [%s], " - "Input(Y).dimensions = [%s]", - x_dim, - y_dim)); - - if (dim != DDim::kMaxRank) { - PADDLE_ENFORCE_EQ( - dim < x_dim.size() && dim >= (0 - x_dim.size()), - true, - phi::errors::OutOfRange( - "Attr(dim) is out of range, It's expected " - "to be in range of [-%d, %d]. But received Attr(dim) = %d.", - x_dim.size(), - x_dim.size() - 1, - dim)); - if (dim < 0) { - dim += x_dim.size(); - } - PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3, - true, - phi::errors::InvalidArgument( - "Input(X/Y).dims()[dim] should be equal to 3." - "But received Input(X/Y).dims()[dim] = %d.", - x_dim[dim])); - } - out->set_dims(x_dim); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - -void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { - out->share_meta(x); -} - -void SegmentPoolInferMeta(const MetaTensor& x, - const MetaTensor& segment_ids, - const std::string& pooltype, - MetaTensor* out, - MetaTensor* summed_ids, - MetaConfig config) { - auto dims = x.dims(); - dims[0] = -1; - out->set_dims(dims); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - - if (pooltype == "MEAN") { - summed_ids->set_dims({-1, 1}); - summed_ids->set_dtype(x.dtype()); - summed_ids->set_layout(x.layout()); - } -} - -void BCELossInferMeta(const MetaTensor& input, - const MetaTensor& label, - MetaTensor* out, - MetaConfig config) { - auto input_dims = input.dims(); - auto label_dims = label.dims(); - - int rank = input_dims.size(); - PADDLE_ENFORCE_EQ(rank, - label_dims.size(), - phi::errors::InvalidArgument( - "Input(X) and Input(Label) shall have the same rank." - "But received: the rank of Input(X) is [%d], " - "the rank of Input(Label) is [%d].", - rank, - label_dims.size())); - - bool check = true; - if ((!config.is_runtime) && - (phi::product(input_dims) <= 0 || phi::product(label_dims) <= 0)) { - check = false; - } - - if (check) { - PADDLE_ENFORCE_EQ(input_dims, - label_dims, - phi::errors::InvalidArgument( - "Input(X) and Input(Label) shall have the same " - "shape. But received: the shape of Input(X) is " - "[%s], the shape of Input(Label) is [%s].", - input_dims, - label_dims)); - } - - out->set_dims(input_dims); - out->set_dtype(input.dtype()); - out->share_lod(input); -} - -void BincountInferMeta(const MetaTensor& x, - const paddle::optional weights, - int minlength, - MetaTensor* out) { - auto input_dim = x.dims(); - - PADDLE_ENFORCE_GE(minlength, - 0, - phi::errors::InvalidArgument( - "The minlength should be greater than or equal to 0." - "But received minlength is %d", - minlength)); - - PADDLE_ENFORCE_EQ( - input_dim.size(), - 1, - phi::errors::InvalidArgument("The 'shape' of Input(X) must be 1-D tensor." - "But the dimension of Input(X) is [%d]", - input_dim.size())); - - if (weights.is_initialized()) { - auto weights_dim = weights->dims(); - PADDLE_ENFORCE_EQ(weights_dim.size(), - 1, - phi::errors::InvalidArgument( - "The 'shape' of Input(Weights) must be 1-D tensor." - "But the dimension of Input(Weights) is [%d]", - weights_dim.size())); - - PADDLE_ENFORCE_EQ( - weights_dim[0], - input_dim[0], - phi::errors::InvalidArgument( - "The 'shape' of Input(Weights) must be equal to the 'shape' of " - "Input(X)." - "But received: the 'shape' of Input(Weights) is [%s]," - "the 'shape' of Input(X) is [%s]", - weights_dim, - input_dim)); - } - out->set_dims(phi::make_ddim({-1})); - if (weights.is_initialized()) { - out->set_dtype(weights->dtype()); + y_dims_array.data(), + out_dims_array.data(), + max_dim, + axis); + auto out_dims = phi::make_ddim(out_dims_array); + out->set_dims(out_dims); } else { - out->set_dtype(x.dtype()); + out->set_dims(x.dims()); } - out->share_lod(x); -} - -void DistInferMeta(const MetaTensor& x, - const MetaTensor& y, - float p, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - - PADDLE_ENFORCE_NE(phi::product(x_dims), - 0, - phi::errors::InvalidArgument( - "The Input(X) has not been initialized properly. The " - "shape of Input(X) = [%s].", - x_dims)); - PADDLE_ENFORCE_NE(phi::product(y_dims), - 0, - phi::errors::InvalidArgument( - "The Input(Y) has not been initialized properly. The " - "shape of Input(Y) = [%s].", - y_dims)); - out->set_dims({1}); out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); } void GatherNdInferMeta(const MetaTensor& x, @@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids, out->set_dims(ids_dims); } +void HuberLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float delta, + MetaTensor* out, + MetaTensor* residual, + MetaConfig config) { + auto input_dims = input.dims(); + auto label_dims = label.dims(); + + PADDLE_ENFORCE_EQ(input_dims.size(), + label_dims.size(), + phi::errors::InvalidArgument( + "Input(input) rank and Input(label) rank should be " + "same, but received input rank(%d) != label rank(%d)", + input_dims.size(), + label_dims.size())); + + bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) || + phi::contain_unknown_dim(label_dims); + if (config.is_runtime || !contain_unknown_dim) { + PADDLE_ENFORCE_EQ( + input_dims, + label_dims, + phi::errors::InvalidArgument( + "The Input(input) and Input(label) should have the same " + "shape, but received input shape [%s] != label shape [%s]", + input_dims, + label_dims)); + } + + auto out_dims = label_dims; + residual->set_dims(out_dims); + out->set_dims(out_dims); + out->share_lod(input); +} + +void IndexSampleInferMeta(const MetaTensor& x, + const MetaTensor& y, + MetaTensor* out, + MetaConfig config) { + auto input_dims = x.dims(); + PADDLE_ENFORCE_EQ(input_dims.size(), + 2, + errors::InvalidArgument( + "Inputs(X) shape of IndexSample op should be 2-D, but " + "got X's shape = [%s], please check X shape.", + input_dims)); + + auto index_dims = y.dims(); + PADDLE_ENFORCE_EQ( + index_dims.size(), + 2, + errors::InvalidArgument( + "Inputs(Index) shape of IndexSample op should be 2-D, but " + "got Index's shape [%s] , please check index shape.", + input_dims)); + if (config.is_runtime) { + PADDLE_ENFORCE_EQ(input_dims[0], + index_dims[0], + errors::InvalidArgument( + "Inputs(X)'s value of dimension 0 must same with " + "Inputs(Index)'s value of dimension 0, but " + "got %d of Inputs(X), and got %d of Inputs(Index), " + "please check Inputs shape.", + input_dims[0], + index_dims[0])); + } + out->set_dtype(x.dtype()); + out->set_dims(index_dims); + out->share_lod(y); +} + void LogLossInferMeta(const MetaTensor& input, const MetaTensor& label, float epsilon, @@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input, out->share_lod(input); } +void MatmulInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool trans_x, + bool trans_y, + MetaTensor* out) { + std::vector dims_x = phi::vectorize(x.dims()); + std::vector dims_y = phi::vectorize(y.dims()); + auto ndims_x = dims_x.size(); + auto ndims_y = dims_y.size(); + PADDLE_ENFORCE_GT(ndims_x, + 0UL, + phi::errors::InvalidArgument( + "The Input(x) dims size must be greater than 0," + " but reviced dims size is 0. ")); + PADDLE_ENFORCE_GT(ndims_y, + 0UL, + phi::errors::InvalidArgument( + "The Input(y) dims size must be greater than 0," + " but reviced dims size is 0. ")); + + bool x_broadcasted = false, y_broadcasted = false; + if (ndims_x == 1) { + dims_x.insert(dims_x.begin(), 1); + ndims_x = 2; + x_broadcasted = true; + } + + if (ndims_y == 1) { + dims_y.push_back(1); + ndims_y = 2; + y_broadcasted = true; + } + + size_t M, N; + if (trans_x) { + M = dims_x[ndims_x - 1]; + } else { + M = dims_x[ndims_x - 2]; + } + if (trans_y) { + N = dims_y[ndims_y - 2]; + } else { + N = dims_y[ndims_y - 1]; + } + + std::vector new_dims; + if (ndims_x > ndims_y) { + new_dims.assign(dims_x.begin(), dims_x.end() - 2); + } else if (ndims_x < ndims_y) { + new_dims.assign(dims_y.begin(), dims_y.end() - 2); + } else { + new_dims.reserve(ndims_x); + for (size_t i = 0; i < ndims_x - 2; ++i) { + new_dims.push_back(std::max(dims_x[i], dims_y[i])); + } + } + if (!x_broadcasted) { + new_dims.push_back(M); + } + if (!y_broadcasted) { + new_dims.push_back(N); + } + if (x_broadcasted && y_broadcasted) { + new_dims.push_back(1); + } + + auto ddim_out = phi::make_ddim(new_dims); + + out->set_dims(ddim_out); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); +} + void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { auto dim_x = x.dims(); auto dim_vec = vec.dims(); @@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { out->share_lod(x); } +void SegmentPoolInferMeta(const MetaTensor& x, + const MetaTensor& segment_ids, + const std::string& pooltype, + MetaTensor* out, + MetaTensor* summed_ids, + MetaConfig config) { + auto dims = x.dims(); + dims[0] = -1; + out->set_dims(dims); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + + if (pooltype == "MEAN") { + summed_ids->set_dims({-1, 1}); + summed_ids->set_dtype(x.dtype()); + summed_ids->set_layout(x.layout()); + } +} + void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, const MetaTensor& label, bool normalize, @@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, out->share_lod(x); } +void TriangularSolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + bool transpose, + bool unitriangular, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + + auto x_dims_n = x_dims.size(); + auto y_dims_n = y_dims.size(); + + PADDLE_ENFORCE_GE(x_dims_n, + 2, + phi::errors::InvalidArgument( + "The input tensor X's dimensions of TriangularSolveOp " + "should be >= 2. But received X's " + "dimensions = %d, X's shape = [%s]", + x_dims.size(), + x_dims)); + + PADDLE_ENFORCE_GE(y_dims_n, + 2, + phi::errors::InvalidArgument( + "The input tensor Y's dimensions of TriangularSolveOp " + "should be >=2. But received Y's " + "dimensions = %d, Y's shape = [%s]", + y_dims.size(), + y_dims)); + + PADDLE_ENFORCE_EQ(x_dims[x_dims_n - 2], + x_dims[x_dims_n - 1], + phi::errors::InvalidArgument( + "The inner-most 2 dimensions of Input(X) all should " + "be square matrices " + "But received X's shape[-2] = %d and shape[-1] = %d.", + x_dims[x_dims_n - 2], + x_dims[x_dims_n - 1])); + + std::vector x_dims_vec = phi::vectorize(x_dims); + std::vector y_dims_vec = phi::vectorize(y_dims); + + std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); + std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); + + std::vector expand_batch_portion = + funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); + + std::vector y_broadcast_dims({expand_batch_portion}); + y_broadcast_dims.insert(y_broadcast_dims.end(), + {y_dims_vec[y_dims_n - 2], y_dims_vec[y_dims_n - 1]}); + + // dim of 'out' is the same with 'Y' after broadcast + out->set_dims(phi::make_ddim(y_broadcast_dims)); + out->set_dtype(y.dtype()); + out->set_layout(y.layout()); + out->share_lod(y); +} + } // namespace phi diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index d2b16e557b06dc94107788995f0c26f1e27e1761..307ecc29cac7548a811b9bc0ae9a693c7062a354 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -29,22 +29,43 @@ namespace phi { // Because functions in this file not only can infer shape, but also need // infer lod or other useful data. +void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); + +void BCELossInferMeta(const MetaTensor& input, + const MetaTensor& label, + MetaTensor* out, + MetaConfig config = MetaConfig()); + +void BincountInferMeta(const MetaTensor& x, + const paddle::optional weights, + int minlength, + MetaTensor* out); + +void CholeskySolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + MetaTensor* out); + +void CompareAllInferMeta(const MetaTensor& x, + const MetaTensor& y, + MetaTensor* out); + void CompareInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, MetaTensor* out); -void CompareAllInferMeta(const MetaTensor& x, - const MetaTensor& y, - MetaTensor* out); +void CrossInferMeta(const MetaTensor& x, + const MetaTensor& y, + int axis, + MetaTensor* out); -void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); +void DistInferMeta(const MetaTensor& x, + const MetaTensor& y, + float p, + MetaTensor* out); -void MatmulInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool trans_x, - bool trans_y, - MetaTensor* out); +void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); void ElementwiseInferMeta(const MetaTensor& x, const MetaTensor& y, @@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta, int axis, MetaTensor* out); +void GatherNdInferMeta(const MetaTensor& x, + const MetaTensor& index, + MetaTensor* out); + +void GatherTreeMeta(const MetaTensor& ids, + const MetaTensor& parents, + MetaTensor* out); + void HuberLossInferMeta(const MetaTensor& input_meta, const MetaTensor& label_meta, float delta, @@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta, MetaTensor* residual, MetaConfig config = MetaConfig()); -void CholeskySolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - MetaTensor* out); - -void TriangularSolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - bool transpose, - bool unitriangular, - MetaTensor* out); - void IndexSampleInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out, MetaConfig config = MetaConfig()); -void CrossInferMeta(const MetaTensor& x, - const MetaTensor& y, - int axis, - MetaTensor* out); +void LogLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float epsilon, + MetaTensor* out, + MetaConfig config = MetaConfig()); -void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); +void MatmulInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool trans_x, + bool trans_y, + MetaTensor* out); + +void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out); void SegmentPoolInferMeta(const MetaTensor& x, const MetaTensor& segment_ids, @@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x, MetaTensor* summed_ids, MetaConfig config = MetaConfig()); -void BCELossInferMeta(const MetaTensor& input, - const MetaTensor& label, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void BincountInferMeta(const MetaTensor& x, - const paddle::optional weights, - int minlength, - MetaTensor* out); - -void DistInferMeta(const MetaTensor& x, - const MetaTensor& y, - float p, - MetaTensor* out); - -void GatherNdInferMeta(const MetaTensor& x, - const MetaTensor& index, - MetaTensor* out); - -void GatherTreeMeta(const MetaTensor& ids, - const MetaTensor& parents, - MetaTensor* out); - -void LogLossInferMeta(const MetaTensor& input, - const MetaTensor& label, - float epsilon, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out); - void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, const MetaTensor& label, bool normalize, @@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); +void TriangularSolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + bool transpose, + bool unitriangular, + MetaTensor* out); + } // namespace phi diff --git a/paddle/phi/infermeta/nullary.cc b/paddle/phi/infermeta/nullary.cc index 506d3fd14ea3fd568ce2f77d7ce30408062279e9..081084567e840f287bb113ee567888f4032f5638 100644 --- a/paddle/phi/infermeta/nullary.cc +++ b/paddle/phi/infermeta/nullary.cc @@ -16,6 +16,12 @@ limitations under the License. */ namespace phi { +void CreateInferMeta(const ScalarArray& shape, + DataType dtype, + MetaTensor* out) { + CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out); +} + void CreateInferMetaBase(const std::vector& shape, DataType dtype, DataLayout layout, @@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector& shape, out->set_layout(layout); } -void CreateInferMeta(const ScalarArray& shape, - DataType dtype, - MetaTensor* out) { - CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out); -} - void EyeInferMeta(int64_t num_rows, int64_t num_columns, DataType dtype, @@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows, out->set_dtype(dtype); } -void TruncatedGaussianRandomInferMeta(const std::vector& shape, - float mean, - float std, - int seed, - DataType dtype, - MetaTensor* out) { - auto out_dims = phi::make_ddim(shape); - out->set_dims(out_dims); - out->set_dtype(dtype); - out->set_layout(DataLayout::NCHW); -} - void GaussianRandomInferMeta(const ScalarArray& shape, float mean, float std, @@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape, out->set_layout(DataLayout::NCHW); } +void TruncatedGaussianRandomInferMeta(const std::vector& shape, + float mean, + float std, + int seed, + DataType dtype, + MetaTensor* out) { + auto out_dims = phi::make_ddim(shape); + out->set_dims(out_dims); + out->set_dtype(dtype); + out->set_layout(DataLayout::NCHW); +} + } // namespace phi diff --git a/paddle/phi/infermeta/nullary.h b/paddle/phi/infermeta/nullary.h index bd0567486e4d62a9f6fe9adfa02727bfe79937e1..38eaa636f8c8779c5a1f597b8cfb23ce6efc5edc 100644 --- a/paddle/phi/infermeta/nullary.h +++ b/paddle/phi/infermeta/nullary.h @@ -28,25 +28,18 @@ namespace phi { // Because functions in this file not only can infer shape, but also need // infer lod or other useful data. +void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out); + void CreateInferMetaBase(const std::vector& shape, DataType dtype, DataLayout layout, MetaTensor* out); -void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out); - void EyeInferMeta(int64_t num_rows, int64_t num_columns, DataType dtype, MetaTensor* out); -void TruncatedGaussianRandomInferMeta(const std::vector& shape, - float mean, - float std, - int seed, - DataType dtype, - MetaTensor* out); - void GaussianRandomInferMeta(const ScalarArray& shape, float mean, float std, @@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out); +void TruncatedGaussianRandomInferMeta(const std::vector& shape, + float mean, + float std, + int seed, + DataType dtype, + MetaTensor* out); + } // namespace phi diff --git a/paddle/phi/infermeta/ternary.cc b/paddle/phi/infermeta/ternary.cc index 88ac2cb0f8d1b01ade0e58bc8f1253c67ad05981..235cfe368c1921eac546b670470963fb49100290 100644 --- a/paddle/phi/infermeta/ternary.cc +++ b/paddle/phi/infermeta/ternary.cc @@ -18,6 +18,58 @@ limitations under the License. */ namespace phi { +void AccuracyInferMeta(const MetaTensor& out, + const MetaTensor& indice, + const MetaTensor& label, + MetaTensor* accuracy, + MetaTensor* correct, + MetaTensor* total, + MetaConfig config) { + auto inference_dim = out.dims(); + auto label_dim = label.dims(); + // Assume indices has same shape as inference, because + // it's the output of topk. + PADDLE_ENFORCE_EQ( + label_dim.size(), + 2, + phi::errors::InvalidArgument( + "ShapeError: label's dimensions of AccuracyOp must be 2. " + "But received label's dimensions = %d, label's shape = [%s]", + label_dim.size(), + label_dim)); + if (config.is_runtime) { + PADDLE_ENFORCE_EQ(label_dim[1], + 1, + phi::errors::InvalidArgument( + "ShapeError: label's second dimension of " + "AccuracyOp must be 1. But received label's " + "second dimension is = %d, label's shape = [%s]", + label_dim[1], + label_dim)); + PADDLE_ENFORCE_EQ( + inference_dim[0], + label_dim[0], + phi::errors::InvalidArgument( + "ShapeError: the output's num_rows of AccuracyOp must be" + " the same as label's num_rows. But received output's " + "shape = [%s], label's shape = [%s], output's num_rows = %d, " + "label's " + "num_rows = %d", + inference_dim, + label_dim, + inference_dim[0], + label_dim[0])); + } + + accuracy->set_dims({1}); + accuracy->set_dtype(out.dtype()); + correct->set_dims({1}); + correct->set_dtype(out.dtype()); + total->set_dims({1}); + total->set_dtype(out.dtype()); + accuracy->share_lod(out); +} + void AddmmInferMeta(const MetaTensor& input, const MetaTensor& x, const MetaTensor& y, @@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input, out->set_dtype(input.dtype()); } +void GraphSendRecvInferMeta(const MetaTensor& x, + const MetaTensor& src_index, + const MetaTensor& dst_index, + const std::string& pool_type, + MetaTensor* out, + MetaTensor* dst_count) { + auto src_index_dims = src_index.dims(); + if (src_index_dims.size() == 2) { + PADDLE_ENFORCE_EQ(src_index_dims[1], + 1, + phi::errors::InvalidArgument( + "The last dim of Src_index should be 1 when it " + "is 2D, but we get %d", + src_index_dims[1])); + } else { + PADDLE_ENFORCE_EQ( + src_index_dims.size(), + 1, + phi::errors::InvalidArgument( + "The Src_index should be 1D, when it is not 2D, but we get %d", + src_index_dims.size())); + } + + auto dst_index_dims = dst_index.dims(); + if (dst_index_dims.size() == 2) { + PADDLE_ENFORCE_EQ(dst_index_dims[1], + 1, + phi::errors::InvalidArgument( + "The last dim of Dst_index should be 1 when it " + "is 2D, but we get %d", + dst_index_dims[1])); + } else { + PADDLE_ENFORCE_EQ( + dst_index_dims.size(), + 1, + phi::errors::InvalidArgument("The Dst_index should be 1D, " + "when it is not 2D, but we get %d", + dst_index_dims.size())); + } + + PADDLE_ENFORCE_EQ(src_index_dims[0], + dst_index_dims[0], + phi::errors::InvalidArgument( + "Src_index and Dst_index should have the same shape.")); + + auto dims = x.dims(); + out->set_dims(dims); + out->set_dtype(x.dtype()); + + if (pool_type == "MEAN") { + dst_count->set_dims({dims[0]}); + dst_count->set_dtype(DataType::INT32); + } +} + +void LerpInferMeta(const MetaTensor& x, + const MetaTensor& y, + const MetaTensor& weight, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + auto w_dims = weight.dims(); + DDim out_dims; + out_dims = funcs::GetOutputDims(x_dims, y_dims); + if (w_dims.size() > 1 || w_dims[0] != 1) { + out_dims = funcs::GetOutputDims(out_dims, w_dims); + } + out->set_dims(out_dims); + out->set_dtype(x.dtype()); + out->share_lod(x); +} + +void LinspaceInferMeta(const MetaTensor& start, + const MetaTensor& stop, + const MetaTensor& number, + MetaTensor* out) { + auto s_dims = start.dims(); + PADDLE_ENFORCE_EQ( + (s_dims.size() == 1) && (s_dims[0] == 1), + true, + phi::errors::InvalidArgument("The shape of Input(Start) must be [1]," + "but received input shape is [%s].", + s_dims)); + auto e_dims = stop.dims(); + PADDLE_ENFORCE_EQ( + (e_dims.size() == 1) && (e_dims[0] == 1), + true, + phi::errors::InvalidArgument("The shape of Input(Stop) must be [1]," + "but received input shape is [%s].", + e_dims)); + auto step_dims = number.dims(); + PADDLE_ENFORCE_EQ( + (step_dims.size() == 1) && (step_dims[0] == 1), + true, + phi::errors::InvalidArgument("The shape of Input(Num) must be [1]," + "but received input shape is [%s].", + step_dims)); + out->set_dims(phi::make_ddim({-1})); + out->set_dtype(start.dtype()); +} + void NllLossRawInferMeta(const MetaTensor& input, const MetaTensor& label, paddle::optional weight, @@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input, scores->set_dtype(length.dtype()); } -void LerpInferMeta(const MetaTensor& x, - const MetaTensor& y, - const MetaTensor& weight, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - auto w_dims = weight.dims(); - DDim out_dims; - out_dims = funcs::GetOutputDims(x_dims, y_dims); - if (w_dims.size() > 1 || w_dims[0] != 1) { - out_dims = funcs::GetOutputDims(out_dims, w_dims); - } - out->set_dims(out_dims); - out->set_dtype(x.dtype()); - out->share_lod(x); -} - -void LinspaceInferMeta(const MetaTensor& start, - const MetaTensor& stop, - const MetaTensor& number, - MetaTensor* out) { - auto s_dims = start.dims(); - PADDLE_ENFORCE_EQ( - (s_dims.size() == 1) && (s_dims[0] == 1), - true, - phi::errors::InvalidArgument("The shape of Input(Start) must be [1]," - "but received input shape is [%s].", - s_dims)); - auto e_dims = stop.dims(); - PADDLE_ENFORCE_EQ( - (e_dims.size() == 1) && (e_dims[0] == 1), - true, - phi::errors::InvalidArgument("The shape of Input(Stop) must be [1]," - "but received input shape is [%s].", - e_dims)); - auto step_dims = number.dims(); - PADDLE_ENFORCE_EQ( - (step_dims.size() == 1) && (step_dims[0] == 1), - true, - phi::errors::InvalidArgument("The shape of Input(Num) must be [1]," - "but received input shape is [%s].", - step_dims)); - out->set_dims(phi::make_ddim({-1})); - out->set_dtype(start.dtype()); -} - -void AccuracyInferMeta(const MetaTensor& out, - const MetaTensor& indice, - const MetaTensor& label, - MetaTensor* accuracy, - MetaTensor* correct, - MetaTensor* total, - MetaConfig config) { - auto inference_dim = out.dims(); - auto label_dim = label.dims(); - // Assume indices has same shape as inference, because - // it's the output of topk. - PADDLE_ENFORCE_EQ( - label_dim.size(), - 2, - phi::errors::InvalidArgument( - "ShapeError: label's dimensions of AccuracyOp must be 2. " - "But received label's dimensions = %d, label's shape = [%s]", - label_dim.size(), - label_dim)); - if (config.is_runtime) { - PADDLE_ENFORCE_EQ(label_dim[1], - 1, - phi::errors::InvalidArgument( - "ShapeError: label's second dimension of " - "AccuracyOp must be 1. But received label's " - "second dimension is = %d, label's shape = [%s]", - label_dim[1], - label_dim)); - PADDLE_ENFORCE_EQ( - inference_dim[0], - label_dim[0], - phi::errors::InvalidArgument( - "ShapeError: the output's num_rows of AccuracyOp must be" - " the same as label's num_rows. But received output's " - "shape = [%s], label's shape = [%s], output's num_rows = %d, " - "label's " - "num_rows = %d", - inference_dim, - label_dim, - inference_dim[0], - label_dim[0])); - } - - accuracy->set_dims({1}); - accuracy->set_dtype(out.dtype()); - correct->set_dims({1}); - correct->set_dtype(out.dtype()); - total->set_dims({1}); - total->set_dtype(out.dtype()); - accuracy->share_lod(out); -} - -void GraphSendRecvInferMeta(const MetaTensor& x, - const MetaTensor& src_index, - const MetaTensor& dst_index, - const std::string& pool_type, - MetaTensor* out, - MetaTensor* dst_count) { - auto src_index_dims = src_index.dims(); - if (src_index_dims.size() == 2) { - PADDLE_ENFORCE_EQ(src_index_dims[1], - 1, - phi::errors::InvalidArgument( - "The last dim of Src_index should be 1 when it " - "is 2D, but we get %d", - src_index_dims[1])); - } else { - PADDLE_ENFORCE_EQ( - src_index_dims.size(), - 1, - phi::errors::InvalidArgument( - "The Src_index should be 1D, when it is not 2D, but we get %d", - src_index_dims.size())); - } - - auto dst_index_dims = dst_index.dims(); - if (dst_index_dims.size() == 2) { - PADDLE_ENFORCE_EQ(dst_index_dims[1], - 1, - phi::errors::InvalidArgument( - "The last dim of Dst_index should be 1 when it " - "is 2D, but we get %d", - dst_index_dims[1])); - } else { - PADDLE_ENFORCE_EQ( - dst_index_dims.size(), - 1, - phi::errors::InvalidArgument("The Dst_index should be 1D, " - "when it is not 2D, but we get %d", - dst_index_dims.size())); - } - - PADDLE_ENFORCE_EQ(src_index_dims[0], - dst_index_dims[0], - phi::errors::InvalidArgument( - "Src_index and Dst_index should have the same shape.")); - - auto dims = x.dims(); - out->set_dims(dims); - out->set_dtype(x.dtype()); - - if (pool_type == "MEAN") { - dst_count->set_dims({dims[0]}); - dst_count->set_dtype(DataType::INT32); - } -} } // namespace phi diff --git a/paddle/phi/infermeta/ternary.h b/paddle/phi/infermeta/ternary.h index c9a7e78db752f95c7e38857e3f1075a0d672246b..209a07db18b5c7a87ba094c5839149533757220d 100644 --- a/paddle/phi/infermeta/ternary.h +++ b/paddle/phi/infermeta/ternary.h @@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input, float beta, MetaTensor* out); -void GatherNdGradInferMeta(const MetaTensor& x, - const MetaTensor& index, - const MetaTensor& out_grad, - MetaTensor* x_grad); +void GraphSendRecvInferMeta(const MetaTensor& x, + const MetaTensor& src_index, + const MetaTensor& dst_index, + const std::string& pool_type, + MetaTensor* out, + MetaTensor* dst_count); -void ScatterInferMeta(const MetaTensor& x, - const MetaTensor& index, - const MetaTensor& updates, - bool overwrite, - MetaTensor* out); +void LerpInferMeta(const MetaTensor& x, + const MetaTensor& y, + const MetaTensor& weight, + MetaTensor* out); + +void LinspaceInferMeta(const MetaTensor& start, + const MetaTensor& stop, + const MetaTensor& number, + MetaTensor* out); void NllLossRawInferMeta(const MetaTensor& input, const MetaTensor& label, @@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input, MetaTensor* total_weight, MetaConfig config = MetaConfig()); +void ScatterInferMeta(const MetaTensor& x, + const MetaTensor& index, + const MetaTensor& updates, + bool overwrite, + MetaTensor* out); + void ScatterNdAddInferMeta(const MetaTensor& x, const MetaTensor& index, const MetaTensor& updates, @@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input, MetaTensor* path, MetaConfig config = MetaConfig()); -void LerpInferMeta(const MetaTensor& x, - const MetaTensor& y, - const MetaTensor& weight, - MetaTensor* out); - -void LinspaceInferMeta(const MetaTensor& start, - const MetaTensor& stop, - const MetaTensor& number, - MetaTensor* out); - -void GraphSendRecvInferMeta(const MetaTensor& x, - const MetaTensor& src_index, - const MetaTensor& dst_index, - const std::string& pool_type, - MetaTensor* out, - MetaTensor* dst_count); } // namespace phi diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 1b820510470238202acbb36566ec2b93c5ff4520..f7693c2f90ac942b9b732038a9a1dfdbb47e4d97 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -26,6 +26,82 @@ limitations under the License. */ namespace phi { +void ArgMinMaxInferMeta(const MetaTensor& x, + int64_t axis, + bool keepdims, + bool flatten, + int dtype, + MetaTensor* out, + MetaConfig config) { + const auto& x_dims = x.dims(); + + PADDLE_ENFORCE_GE( + axis, + -x_dims.size(), + phi::errors::InvalidArgument("'axis'(%d) must be greater than or equal to" + " -Rank(X)(%d).", + axis, + -x_dims.size())); + PADDLE_ENFORCE_LT(axis, + x_dims.size(), + phi::errors::InvalidArgument( + "'axis'(%d) must be less than Rank(X)(%d) of Input(X).", + axis, + x_dims.size())); + + PADDLE_ENFORCE_EQ( + (dtype < 0 || dtype == 2 || dtype == 3), + true, + phi::errors::InvalidArgument( + "The attribute of dtype in argmin/argmax must be [%s] or [%s], but " + "received [%s]", + paddle::framework::DataTypeToString( + paddle::framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + paddle::framework::proto::VarType::INT64), + paddle::framework::DataTypeToString( + static_cast(dtype)))); + + auto x_rank = x_dims.size(); + if (axis < 0) axis += x_rank; + if (config.is_runtime) { + if (dtype == paddle::framework::proto::VarType::INT32) { + int64_t all_element_num = 0; + if (flatten) { + all_element_num = phi::product(x_dims); + + } else { + all_element_num = x_dims[axis]; + } + PADDLE_ENFORCE_LE( + all_element_num, + INT_MAX, + phi::errors::InvalidArgument( + "The element num of the argmin/argmax input at axis is " + "%d, is larger than int32 maximum value:%d, you must " + "set the dtype of argmin/argmax to 'int64'.", + all_element_num, + INT_MAX)); + } + } + std::vector vec; + if (flatten) { + vec.emplace_back(static_cast(1)); + } else { + for (int64_t i = 0; i < axis; i++) vec.emplace_back(x_dims[i]); + if (keepdims) { + vec.emplace_back(static_cast(1)); + } + for (int64_t i = axis + 1; i < x_rank; i++) vec.emplace_back(x_dims[i]); + } + out->set_dims(phi::make_ddim(vec)); + if (dtype == 2) { + out->set_dtype(DataType::INT32); + } else if (dtype == 3) { + out->set_dtype(DataType::INT64); + } +} + void ArgsortInferMeta(const MetaTensor& input, int axis, bool descending, @@ -54,96 +130,6 @@ void ArgsortInferMeta(const MetaTensor& input, indices->share_lod(input); } -void UnchangedInferMeta(const MetaTensor& x, MetaTensor* out) { - out->share_meta(x); -} - -// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1] -void UnchangedInferMetaCheckAxis(const MetaTensor& x, - int axis, - MetaTensor* out) { - auto rank = x.dims().size(); - PADDLE_ENFORCE_GE( - axis, - -rank, - errors::InvalidArgument( - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(X). But received axis: %d, R: %d.", - axis, - rank)); - PADDLE_ENFORCE_LT( - axis, - rank, - phi::errors::InvalidArgument( - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(X). But received axis: %d, R: %d.", - axis, - rank)); - out->share_meta(x); -} - -void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out) { - out->set_dims(x.dims()); - out->set_dtype(dtype::ToReal(x.dtype())); - out->set_layout(x.layout()); -} - -void FlattenInferMeta(const MetaTensor& x, - int start_axis, - int stop_axis, - MetaTensor* out) { - auto x_dims = x.dims(); - int in_dims_size = x_dims.size(); - if (start_axis < 0) { - start_axis = start_axis + in_dims_size; - } - if (stop_axis < 0) { - stop_axis = stop_axis + in_dims_size; - } - PADDLE_ENFORCE_GE( - stop_axis, - start_axis, - phi::errors::InvalidArgument("The stop_axis should be greater" - "than or equal to start_axis.")); - - int64_t outer = 1; - std::vector out_shape; - out_shape.reserve(in_dims_size - stop_axis + start_axis); - - for (int i = 0; i < start_axis; ++i) { - out_shape.push_back(x_dims[i]); - } - for (int i = start_axis; i <= stop_axis; i++) { - if (x_dims[i] == -1 || outer == -1) { - outer = -1; - } else { - outer *= x_dims[i]; - } - } - out_shape.push_back(outer); - for (int i = stop_axis + 1; i < in_dims_size; i++) { - out_shape.push_back(x_dims[i]); - } - const auto& out_dims = phi::make_ddim(out_shape); - out->set_dims(out_dims); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - - if (x_dims[0] == out_dims[0]) { - // Only pass LoD when the first dimension of output and Input(X) - // are the same. - out->share_lod(x); - } -} - -void GumbelSoftmaxInferMeta(const MetaTensor& x, - float temperature, - bool hard, - int axis, - MetaTensor* out) { - UnchangedInferMetaCheckAxis(x, axis, out); -} - void CastInferMeta(const MetaTensor& x, DataType out_dtype, MetaTensor* out) { out->set_dims(x.dims()); out->set_dtype(out_dtype); @@ -203,73 +189,275 @@ void CumsumInferMeta(const MetaTensor& x, out->share_lod(x); } -void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out) { - PADDLE_ENFORCE_EQ( - product(x.dims()), - 1UL, - errors::InvalidArgument("The number of elements in Input(X) should be 1." - "Now the number is %d.", - product(x.dims()))); - out->set_dims(x.dims()); - out->share_lod(x); - out->set_dtype(x.dtype()); -} - -static phi::DDim ValidateShape(const std::vector shape, - const phi::DDim& in_dims) { - const int64_t in_size = phi::product(in_dims); - auto in_dims_vec = phi::vectorize(in_dims); - bool all_positive = std::all_of(in_dims_vec.cbegin(), - in_dims_vec.cend(), - [](int64_t i) { return i > 0; }); - // only one dimension can be set to -1, whose size will be automatically - // infered. - const int64_t unk_dim_val = -1; - const int64_t copy_dim_val = 0; +void DiagInferMeta(const MetaTensor& x, + int offset, + float padding_value, + MetaTensor* out) { + auto x_dims = x.dims(); - std::vector output_shape(shape.size(), 0); - int64_t capacity = 1; - int unk_dim_idx = -1; - for (size_t i = 0; i < shape.size(); ++i) { - if (shape[i] == unk_dim_val) { - PADDLE_ENFORCE_EQ( - unk_dim_idx, - -1, - phi::errors::InvalidArgument( - "Only one dimension value of 'shape' in ReshapeOp can " - "be -1. But received shape = [%s], shape[%d] is also -1.", - phi::make_ddim(shape), - i)); - unk_dim_idx = i; - } else if (shape[i] == copy_dim_val) { - PADDLE_ENFORCE_LT( - static_cast(i), - in_dims.size(), - phi::errors::InvalidArgument( - "The index of 0 in `shape` must be less than " - "the input tensor X's dimensions. " - "But received shape = [%s], shape[%d] = 0, X's shape = [%s], " - "X's dimensions = %d.", - phi::make_ddim(shape), - i, - in_dims, - in_dims.size())); + if (x_dims.size() == 1UL) { + int64_t size_ = x_dims[0] + std::abs(offset); + out->set_dims({size_, size_}); + out->set_dtype(x.dtype()); + } else if (x_dims.size() == 2UL) { + int64_t size_ = 0; + if (offset >= 0) { + // Note(LutaoChu): Do not use std::min here, otherwise the calculation + // of `size_` will have unexpected result on Windows Python3.8 + if (x_dims[0] < x_dims[1] - offset) { + size_ = x_dims[0]; + } else { + size_ = x_dims[1] - offset; + } } else { - PADDLE_ENFORCE_GT( - shape[i], - 0, - phi::errors::InvalidArgument( - "Each dimension value of 'shape' in ReshapeOp must not " - "be negative except one unknown dimension. " - "But received shape = [%s], shape[%d] = %d.", - phi::make_ddim(shape), - i, - shape[i])); + // Note(LutaoChu): Do not use std::min here, otherwise the calculation + // of `size_` will have unexpected result on Windows Python3.8 + if (x_dims[0] + offset < x_dims[1]) { + size_ = x_dims[0] + offset; + } else { + size_ = x_dims[1]; + } } - - // NOTE all non-zero values will be converted to True (include negative - // value) - capacity *= (shape[i] ? shape[i] : in_dims[i]); + out->set_dims({size_}); + out->set_dtype(x.dtype()); + } else { + PADDLE_THROW(phi::errors::InvalidArgument( + "The input tensor X's dimensions of DiagV2Op should be either 1 or " + "2, but received %d.", + x_dims.size())); + } +} + +void DiagonalInferMeta(const MetaTensor& input, + int offset, + int axis1, + int axis2, + MetaTensor* out) { + auto x_dims = input.dims(); + int offset_ = offset; + int axis1_ = axis1 < 0 ? x_dims.size() + axis1 : axis1; + int axis2_ = axis2 < 0 ? x_dims.size() + axis2 : axis2; + + PADDLE_ENFORCE_GE( + x_dims.size(), + 2, + phi::errors::OutOfRange("Input's dim is out of range (expected at " + "least 2 dimensions, but got %ld).", + x_dims.size())); + PADDLE_ENFORCE_LT( + axis1_, + x_dims.size(), + phi::errors::OutOfRange( + "Attr(axis1) is out of range (expected to be in range of [%ld, " + "%ld], but got %ld).", + -(x_dims.size()), + (x_dims.size() - 1), + axis1)); + PADDLE_ENFORCE_LT( + axis2_, + x_dims.size(), + phi::errors::OutOfRange( + "Attr(axis2) is out of range (expected to be in range of [%ld, " + "%ld], but got %ld).", + -(x_dims.size()), + (x_dims.size() - 1), + axis2)); + PADDLE_ENFORCE_NE( + axis1_, + axis2_, + phi::errors::InvalidArgument("The dimensions should not be identical " + "%d vs %d.", + axis1, + axis2)); + + auto out_dims = vectorize(x_dims); + // from out_dims get the dim size of axis1_. + auto axis1_size = out_dims[axis1_]; + auto axis2_size = out_dims[axis2_]; + // delete two dims by attr axis1 and axis2 from out_dims. + /* example: + out_dim = [2, 3, 4]; + axis1 = 0; + axis2 = 1; + according to the attr of axis1 and axis2, we get: + out_dim = [4]. + */ + out_dims.erase(out_dims.begin() + std::max(axis1_, axis2_)); + out_dims.erase(out_dims.begin() + std::min(axis1_, axis2_)); + + if (offset_ == 0) { + out_dims.push_back(std::min(axis1_size, axis2_size)); + } else if (offset_ > 0) { + if ((axis2_size - offset_) > 0) { + out_dims.push_back(std::min(axis1_size, axis2_size - offset_)); + } else { + out_dims.push_back(0); + } + } else { + if ((axis1_size + offset_) > 0) { + out_dims.push_back(std::min(axis1_size + offset_, axis2_size)); + } else { + out_dims.push_back(0); + } + } + out->set_dims(phi::make_ddim(out_dims)); +} + +void EighInferMeta(const MetaTensor& x, + const std::string& uplo, + MetaTensor* out_w, + MetaTensor* out_v) { + auto input_dim = x.dims(); + auto rank = input_dim.size(); + + PADDLE_ENFORCE_GE(rank, + 2, + phi::errors::InvalidArgument( + "The Input(X) should have at least 2 dimensions." + "But received a %d dimension tensor.", + rank)); + PADDLE_ENFORCE_EQ( + input_dim[rank - 2], + input_dim[rank - 1], + phi::errors::InvalidArgument( + "Eigh op is designed for square matrix, consequently" + "inner-most 2 dimensions of Input(X) should be symmetric." + "But received X's shape[-2] = %d and shape[-1] = %d.", + input_dim[rank - 2], + input_dim[rank - 1])); + + std::vector values_dim; + + for (auto i = 0; i < rank - 1; i++) { + values_dim.emplace_back(input_dim[i]); + } + out_w->set_dims(phi::make_ddim(values_dim)); + out_v->set_dims(input_dim); +} + +void FlattenInferMeta(const MetaTensor& x, + int start_axis, + int stop_axis, + MetaTensor* out) { + auto x_dims = x.dims(); + int in_dims_size = x_dims.size(); + if (start_axis < 0) { + start_axis = start_axis + in_dims_size; + } + if (stop_axis < 0) { + stop_axis = stop_axis + in_dims_size; + } + PADDLE_ENFORCE_GE( + stop_axis, + start_axis, + phi::errors::InvalidArgument("The stop_axis should be greater" + "than or equal to start_axis.")); + + int64_t outer = 1; + std::vector out_shape; + out_shape.reserve(in_dims_size - stop_axis + start_axis); + + for (int i = 0; i < start_axis; ++i) { + out_shape.push_back(x_dims[i]); + } + for (int i = start_axis; i <= stop_axis; i++) { + if (x_dims[i] == -1 || outer == -1) { + outer = -1; + } else { + outer *= x_dims[i]; + } + } + out_shape.push_back(outer); + for (int i = stop_axis + 1; i < in_dims_size; i++) { + out_shape.push_back(x_dims[i]); + } + const auto& out_dims = phi::make_ddim(out_shape); + out->set_dims(out_dims); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + out->share_lod(x); + } +} + +void GumbelSoftmaxInferMeta(const MetaTensor& x, + float temperature, + bool hard, + int axis, + MetaTensor* out) { + UnchangedInferMetaCheckAxis(x, axis, out); +} + +void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out) { + PADDLE_ENFORCE_EQ( + product(x.dims()), + 1UL, + errors::InvalidArgument("The number of elements in Input(X) should be 1." + "Now the number is %d.", + product(x.dims()))); + out->set_dims(x.dims()); + out->share_lod(x); + out->set_dtype(x.dtype()); +} + +static phi::DDim ValidateShape(const std::vector shape, + const phi::DDim& in_dims) { + const int64_t in_size = phi::product(in_dims); + auto in_dims_vec = phi::vectorize(in_dims); + bool all_positive = std::all_of(in_dims_vec.cbegin(), + in_dims_vec.cend(), + [](int64_t i) { return i > 0; }); + // only one dimension can be set to -1, whose size will be automatically + // infered. + const int64_t unk_dim_val = -1; + const int64_t copy_dim_val = 0; + + std::vector output_shape(shape.size(), 0); + int64_t capacity = 1; + int unk_dim_idx = -1; + for (size_t i = 0; i < shape.size(); ++i) { + if (shape[i] == unk_dim_val) { + PADDLE_ENFORCE_EQ( + unk_dim_idx, + -1, + phi::errors::InvalidArgument( + "Only one dimension value of 'shape' in ReshapeOp can " + "be -1. But received shape = [%s], shape[%d] is also -1.", + phi::make_ddim(shape), + i)); + unk_dim_idx = i; + } else if (shape[i] == copy_dim_val) { + PADDLE_ENFORCE_LT( + static_cast(i), + in_dims.size(), + phi::errors::InvalidArgument( + "The index of 0 in `shape` must be less than " + "the input tensor X's dimensions. " + "But received shape = [%s], shape[%d] = 0, X's shape = [%s], " + "X's dimensions = %d.", + phi::make_ddim(shape), + i, + in_dims, + in_dims.size())); + } else { + PADDLE_ENFORCE_GT( + shape[i], + 0, + phi::errors::InvalidArgument( + "Each dimension value of 'shape' in ReshapeOp must not " + "be negative except one unknown dimension. " + "But received shape = [%s], shape[%d] = %d.", + phi::make_ddim(shape), + i, + shape[i])); + } + + // NOTE all non-zero values will be converted to True (include negative + // value) + capacity *= (shape[i] ? shape[i] : in_dims[i]); output_shape[i] = (shape[i] ? static_cast(shape[i]) : in_dims[i]); } @@ -360,6 +548,11 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) { out->set_dtype(DataType::BOOL); } +void IsfiniteInferMeta(const MetaTensor& x, MetaTensor* out) { + out->set_dims(x.dims()); + out->set_dtype(DataType::BOOL); +} + void MultinomialInferMeta(const MetaTensor& x, int num_samples, bool replacement, @@ -395,124 +588,97 @@ void MultinomialInferMeta(const MetaTensor& x, out->set_dtype(DataType::INT64); } -void TileInferMeta(const MetaTensor& x, - const ScalarArray& repeat_times, - MetaTensor* out, - MetaConfig config) { -#define MAX_RANK_SUPPORTED 6 - - auto repeat_times_data = repeat_times.GetData(); - auto x_dims = x.dims(); - if (repeat_times_data.size() == 0) { - repeat_times_data = std::vector(x_dims.size(), -1); - } - - PADDLE_ENFORCE_LE( - x_dims.size(), - MAX_RANK_SUPPORTED, - errors::InvalidArgument( - "The rank of the input 'x' for tile op " - "must not be greater than %d, but the value received is %d.", - MAX_RANK_SUPPORTED, - x_dims.size())); - PADDLE_ENFORCE_LE( - repeat_times_data.size(), - MAX_RANK_SUPPORTED, - errors::InvalidArgument( - "The size of the shape of input 'repeat_times' for tile op " - "must not be greater than %d, but the value received is %d.", - MAX_RANK_SUPPORTED, - repeat_times_data.size())); - PADDLE_ENFORCE_GE( - repeat_times_data.size(), - 1, - errors::InvalidArgument( - "The size of the shape of input 'repeat_times' for tile op " - "must be positive integers, but the value received is %d.", - repeat_times_data.size())); - - auto out_rank = - std::max(static_cast(x_dims.size()), repeat_times_data.size()); - std::vector out_shape(out_rank); - auto x_dim_vec = phi::vectorize(x_dims); - if (x_dim_vec.size() > repeat_times_data.size()) { - auto diff = x_dim_vec.size() - repeat_times_data.size(); - repeat_times_data.insert(repeat_times_data.begin(), diff, -1); - } else { - auto diff = repeat_times_data.size() - x_dim_vec.size(); - x_dim_vec.insert(x_dim_vec.begin(), diff, -1); +void PadInferMeta(const MetaTensor& input, + const std::vector& paddings, + float pad_value, + MetaTensor* out, + MetaConfig config) { + auto x_dim = input.dims(); + PADDLE_ENFORCE_EQ( + static_cast(paddings.size()), + x_dim.size() * 2, + phi::errors::InvalidArgument( + "Size of 'paddings' dimension should be equal to 2 * size of " + "Input(X)'s dimension, but received (size of 'paddings' dimension " + "is) %d vs (2 * size of Input(X)'s dimension is) %d.", + static_cast(paddings.size()), + x_dim.size() * 2)); + for (size_t i = 0; i < paddings.size(); ++i) { + PADDLE_ENFORCE_GE(paddings[i], + 0, + phi::errors::InvalidArgument( + "The element of 'paddings' should >= 0, but " + "received %d for index %d.", + paddings[i], + static_cast(i))); } - for (size_t i = 0; i < repeat_times_data.size(); ++i) { - if (x_dim_vec[i] == -1 || repeat_times_data[i] == -1) { - out_shape[i] = -1; + std::vector out_dims(x_dim.size()); + for (int i = 0; i < x_dim.size(); ++i) { + if ((!config.is_runtime) && (x_dim[i] == -1)) { + out_dims[i] = -1; } else { - PADDLE_ENFORCE_GT( - repeat_times_data[i], - 0, - errors::InvalidArgument( - "Every element of the input 'repeat_times' for tile op must be " - "greater than 0, but the value given is %d.", - repeat_times_data[i])); - out_shape[i] = x_dim_vec[i] * repeat_times_data[i]; + out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } } - - out->set_dims(phi::make_ddim(out_shape)); - if (out_shape[0] == x_dims[0]) { - out->share_lod(x); + out->set_dims(phi::make_ddim(out_dims)); + if (out_dims[0] == x_dim[0]) { + // Only pass LoD when the first dimension is equal between + // output and input. + out->share_lod(input); } + out->set_dtype(input.dtype()); } -void ReshapeInferMeta(const MetaTensor& x, - const ScalarArray& shape, - MetaTensor* out, - MetaConfig config) { - auto& shape_data = shape.GetData(); - PADDLE_ENFORCE_NOT_NULL(out, - phi::errors::InvalidArgument( - "Output(Out) of ReshapeOp should not be null.")); - if (!config.is_runtime && shape.FromTensor()) { - out->set_dims(phi::make_ddim(shape_data)); - out->share_lod(x); - return; - } - PADDLE_ENFORCE_GT(shape_data.size(), - 0, +void PixelShuffleInferMeta(const MetaTensor& x, + int upscale_factor, + const std::string& data_format, + MetaTensor* out) { + auto input_dims = x.dims(); + PADDLE_ENFORCE_EQ(input_dims.size(), + 4, phi::errors::InvalidArgument( - "The shape's size in ReshapeOp can't be zero.")); - InferMetaFromVecValue(x, shape_data, out); -} + "Input should be a 4-D tensor of format [N, C, H, W] " + "or [N, H, W, C], but got %u.", + input_dims.size())); -void ReshapeWithXShapeInferMeta(const MetaTensor& x, - const ScalarArray& shape, - MetaTensor* xshape, - MetaTensor* out, - MetaConfig config) { - PADDLE_ENFORCE_NOT_NULL( - xshape, - phi::errors::InvalidArgument( - "Output(XShape) of ReshapeOp should not be null.")); - const auto& x_dims = x.dims(); - std::vector xshape_dims(x_dims.size() + 1); - xshape_dims[0] = 0; - for (int i = 0; i < x_dims.size(); ++i) { - xshape_dims[i + 1] = x_dims[i]; + const bool channel_last = (data_format == "NHWC"); + + if (!channel_last) { + PADDLE_ENFORCE_EQ(input_dims[1] % (upscale_factor * upscale_factor), + 0, + phi::errors::InvalidArgument( + "The square of upscale_factor[%u] should divide the " + "number of channel[%u]", + upscale_factor * upscale_factor, + input_dims[1])); + } else { + PADDLE_ENFORCE_EQ(input_dims[3] % (upscale_factor * upscale_factor), + 0, + phi::errors::InvalidArgument( + "The square of upscale_factor[%u] should divide the " + "number of channel[%u]", + upscale_factor * upscale_factor, + input_dims[3])); } - xshape->set_dims(phi::make_ddim(xshape_dims)); - xshape->share_lod(x); - ReshapeInferMeta(x, shape, out, config); + auto output_dims = input_dims; + output_dims[0] = input_dims[0]; + if (!channel_last) { + output_dims[1] = input_dims[1] / (upscale_factor * upscale_factor); + output_dims[2] = input_dims[2] * upscale_factor; + output_dims[3] = input_dims[3] * upscale_factor; + } else { + output_dims[1] = input_dims[1] * upscale_factor; + output_dims[2] = input_dims[2] * upscale_factor; + output_dims[3] = input_dims[3] / (upscale_factor * upscale_factor); + } + out->set_dtype(x.dtype()); + out->set_dims(output_dims); } -/* Why not use SumRawInferMeta directly? - Because we need make InferMetaFunction's args follow the design of api.yaml -*/ -void SumInferMeta(const MetaTensor& x, - const std::vector& axis, - DataType dtype, - bool keep_dim, - MetaTensor* out) { - bool reduce_all = false; - SumRawInferMeta(x, axis, keep_dim, reduce_all, dtype, out); +void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out) { + out->set_dims(x.dims()); + out->set_dtype(dtype::ToReal(x.dtype())); + out->set_layout(x.layout()); } DDim ReduceInferDim(const MetaTensor& x, @@ -584,29 +750,12 @@ DDim ReduceInferDim(const MetaTensor& x, return out_dim; } -void SumRawInferMeta(const MetaTensor& x, +void ReduceInferMeta(const MetaTensor& x, const std::vector& axis, bool keep_dim, - bool reduce_all, - DataType dtype, MetaTensor* out) { - DDim out_dim = ReduceInferDim(x, axis, keep_dim, reduce_all); - - DataType out_dtype; - if (dtype != DataType::UNDEFINED) { - out_dtype = dtype; - } else { - if (x.dtype() == DataType::BOOL || x.dtype() == DataType::INT32 || - x.dtype() == DataType::INT64) { - out_dtype = DataType::INT64; - } else { - out_dtype = x.dtype(); - } - } - - out->set_dims(out_dim); - out->set_dtype(out_dtype); - out->set_layout(x.layout()); + bool reduce_all = false; + ReduceInferMetaBase(x, axis, keep_dim, reduce_all, out); } void ReduceInferMetaBase(const MetaTensor& x, @@ -620,33 +769,109 @@ void ReduceInferMetaBase(const MetaTensor& x, out->set_layout(x.layout()); } -void ReduceInferMeta(const MetaTensor& x, - const std::vector& axis, - bool keep_dim, - MetaTensor* out) { - bool reduce_all = false; - ReduceInferMetaBase(x, axis, keep_dim, reduce_all, out); +void ReshapeInferMeta(const MetaTensor& x, + const ScalarArray& shape, + MetaTensor* out, + MetaConfig config) { + auto& shape_data = shape.GetData(); + PADDLE_ENFORCE_NOT_NULL(out, + phi::errors::InvalidArgument( + "Output(Out) of ReshapeOp should not be null.")); + if (!config.is_runtime && shape.FromTensor()) { + out->set_dims(phi::make_ddim(shape_data)); + out->share_lod(x); + return; + } + PADDLE_ENFORCE_GT(shape_data.size(), + 0, + phi::errors::InvalidArgument( + "The shape's size in ReshapeOp can't be zero.")); + InferMetaFromVecValue(x, shape_data, out); } -void TransferLayoutInferMeta(const MetaTensor& x, - DataLayout layout, - MetaTensor* out) { - out->set_dims(x.dims()); - out->set_dtype(x.dtype()); - out->set_layout(layout); +void ReshapeWithXShapeInferMeta(const MetaTensor& x, + const ScalarArray& shape, + MetaTensor* xshape, + MetaTensor* out, + MetaConfig config) { + PADDLE_ENFORCE_NOT_NULL( + xshape, + phi::errors::InvalidArgument( + "Output(XShape) of ReshapeOp should not be null.")); + const auto& x_dims = x.dims(); + std::vector xshape_dims(x_dims.size() + 1); + xshape_dims[0] = 0; + for (int i = 0; i < x_dims.size(); ++i) { + xshape_dims[i + 1] = x_dims[i]; + } + xshape->set_dims(phi::make_ddim(xshape_dims)); + xshape->share_lod(x); + ReshapeInferMeta(x, shape, out, config); } -void SplitInferMeta(const MetaTensor& x, - const ScalarArray& num_or_sections, - const Scalar& axis, - std::vector out, - MetaConfig config) { - int axis_value = axis.to(); - int rank = x.dims().size(); - PADDLE_ENFORCE_EQ( - axis_value >= -rank && axis_value < rank, - true, - phi::errors::InvalidArgument( +void ShardIndexInferMeta(const MetaTensor& in, + int index_num, + int nshards, + int shard_id, + int ignore_value, + MetaTensor* out, + MetaConfig config) { + auto x_dims = in.dims(); + PADDLE_ENFORCE_GE( + x_dims.size(), + 2, + phi::errors::InvalidArgument("Rank of Input(X) should be at least 2, " + "but the value given is %d.", + x_dims.size())); + if (config.is_runtime || x_dims[x_dims.size() - 1] > 0) { + PADDLE_ENFORCE_EQ(x_dims[x_dims.size() - 1], + 1U, + phi::errors::InvalidArgument( + "The last dimension of Input(X) should be 1, " + "but the value given is %d.", + x_dims[x_dims.size() - 1])); + } + + out->set_dims(x_dims); + out->share_lod(in); + out->set_dtype(in.dtype()); +} + +void SizeInferMeta(const MetaTensor& input, MetaTensor* out) { + out->set_dtype(DataType::INT64); + out->set_dims({1}); +} + +void SoftmaxInferMeta(const MetaTensor& x, int axis, MetaTensor* out) { + auto dim_x = x.dims(); + auto rank_x = dim_x.size(); + PADDLE_ENFORCE_GE(axis, + -rank_x, + phi::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(X).")); + PADDLE_ENFORCE_LT(axis, + rank_x, + phi::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(X).")); + + out->set_dims(x.dims()); + out->set_dtype(x.dtype()); + out->share_lod(x); +} + +void SplitInferMeta(const MetaTensor& x, + const ScalarArray& num_or_sections, + const Scalar& axis, + std::vector out, + MetaConfig config) { + int axis_value = axis.to(); + int rank = x.dims().size(); + PADDLE_ENFORCE_EQ( + axis_value >= -rank && axis_value < rank, + true, + phi::errors::InvalidArgument( "The axis is expected to be in range of [%d, %d), but got %d", -rank, rank, @@ -767,22 +992,108 @@ void SplitInferMeta(const MetaTensor& x, } } -void UnbindInferMeta(const MetaTensor& x, - int axis, - std::vector* outs) { - auto in_dims = x.dims(); - std::vector out_dim; - axis = axis < 0 ? in_dims.size() + axis : axis; - for (int i = 0; i < in_dims.size(); ++i) { - if (i != axis) out_dim.push_back(in_dims[i]); +/* Why not use SumRawInferMeta directly? + Because we need make InferMetaFunction's args follow the design of api.yaml +*/ +void SumInferMeta(const MetaTensor& x, + const std::vector& axis, + DataType dtype, + bool keep_dim, + MetaTensor* out) { + bool reduce_all = false; + SumRawInferMeta(x, axis, keep_dim, reduce_all, dtype, out); +} + +void SumRawInferMeta(const MetaTensor& x, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + DataType dtype, + MetaTensor* out) { + DDim out_dim = ReduceInferDim(x, axis, keep_dim, reduce_all); + + DataType out_dtype; + if (dtype != DataType::UNDEFINED) { + out_dtype = dtype; + } else { + if (x.dtype() == DataType::BOOL || x.dtype() == DataType::INT32 || + x.dtype() == DataType::INT64) { + out_dtype = DataType::INT64; + } else { + out_dtype = x.dtype(); + } } - auto out_dims = phi::make_ddim(out_dim); - for (size_t i = 0; i < outs->size(); ++i) { - (*outs)[i].set_dtype(x.dtype()); - (*outs)[i].set_dims(out_dims); - (*outs)[i].set_layout(x.layout()); - (*outs)[i].share_lod(x); + out->set_dims(out_dim); + out->set_dtype(out_dtype); + out->set_layout(x.layout()); +} + +void TileInferMeta(const MetaTensor& x, + const ScalarArray& repeat_times, + MetaTensor* out, + MetaConfig config) { +#define MAX_RANK_SUPPORTED 6 + + auto repeat_times_data = repeat_times.GetData(); + auto x_dims = x.dims(); + if (repeat_times_data.size() == 0) { + repeat_times_data = std::vector(x_dims.size(), -1); + } + + PADDLE_ENFORCE_LE( + x_dims.size(), + MAX_RANK_SUPPORTED, + errors::InvalidArgument( + "The rank of the input 'x' for tile op " + "must not be greater than %d, but the value received is %d.", + MAX_RANK_SUPPORTED, + x_dims.size())); + PADDLE_ENFORCE_LE( + repeat_times_data.size(), + MAX_RANK_SUPPORTED, + errors::InvalidArgument( + "The size of the shape of input 'repeat_times' for tile op " + "must not be greater than %d, but the value received is %d.", + MAX_RANK_SUPPORTED, + repeat_times_data.size())); + PADDLE_ENFORCE_GE( + repeat_times_data.size(), + 1, + errors::InvalidArgument( + "The size of the shape of input 'repeat_times' for tile op " + "must be positive integers, but the value received is %d.", + repeat_times_data.size())); + + auto out_rank = + std::max(static_cast(x_dims.size()), repeat_times_data.size()); + std::vector out_shape(out_rank); + auto x_dim_vec = phi::vectorize(x_dims); + if (x_dim_vec.size() > repeat_times_data.size()) { + auto diff = x_dim_vec.size() - repeat_times_data.size(); + repeat_times_data.insert(repeat_times_data.begin(), diff, -1); + } else { + auto diff = repeat_times_data.size() - x_dim_vec.size(); + x_dim_vec.insert(x_dim_vec.begin(), diff, -1); + } + for (size_t i = 0; i < repeat_times_data.size(); ++i) { + if (x_dim_vec[i] == -1 || repeat_times_data[i] == -1) { + out_shape[i] = -1; + } else { + PADDLE_ENFORCE_GT( + repeat_times_data[i], + 0, + errors::InvalidArgument( + "Every element of the input 'repeat_times' for tile op must be " + "greater than 0, but the value given is %d.", + repeat_times_data[i])); + out_shape[i] = x_dim_vec[i] * repeat_times_data[i]; + } + } + + out->set_dims(phi::make_ddim(out_shape)); + if (out_shape[0] == x_dims[0]) { + out->share_lod(x); } } @@ -840,79 +1151,112 @@ void TraceInferMeta( out->set_dtype(x.dtype()); } -void DiagonalInferMeta(const MetaTensor& input, - int offset, - int axis1, - int axis2, - MetaTensor* out) { - auto x_dims = input.dims(); - int offset_ = offset; - int axis1_ = axis1 < 0 ? x_dims.size() + axis1 : axis1; - int axis2_ = axis2 < 0 ? x_dims.size() + axis2 : axis2; +void TransferLayoutInferMeta(const MetaTensor& x, + DataLayout layout, + MetaTensor* out) { + out->set_dims(x.dims()); + out->set_dtype(x.dtype()); + out->set_layout(layout); +} - PADDLE_ENFORCE_GE( - x_dims.size(), - 2, - phi::errors::OutOfRange("Input's dim is out of range (expected at " - "least 2 dimensions, but got %ld).", - x_dims.size())); - PADDLE_ENFORCE_LT( - axis1_, - x_dims.size(), - phi::errors::OutOfRange( - "Attr(axis1) is out of range (expected to be in range of [%ld, " - "%ld], but got %ld).", - -(x_dims.size()), - (x_dims.size() - 1), - axis1)); - PADDLE_ENFORCE_LT( - axis2_, - x_dims.size(), - phi::errors::OutOfRange( - "Attr(axis2) is out of range (expected to be in range of [%ld, " - "%ld], but got %ld).", - -(x_dims.size()), - (x_dims.size() - 1), - axis2)); - PADDLE_ENFORCE_NE( - axis1_, - axis2_, - phi::errors::InvalidArgument("The dimensions should not be identical " - "%d vs %d.", - axis1, - axis2)); +void TransposeInferMeta(const MetaTensor& x, + const std::vector& axis, + MetaTensor* out) { + auto x_dims = x.dims(); + size_t x_rank = x_dims.size(); + size_t axis_size = axis.size(); - auto out_dims = vectorize(x_dims); - // from out_dims get the dim size of axis1_. - auto axis1_size = out_dims[axis1_]; - auto axis2_size = out_dims[axis2_]; - // delete two dims by attr axis1 and axis2 from out_dims. - /* example: - out_dim = [2, 3, 4]; - axis1 = 0; - axis2 = 1; - according to the attr of axis1 and axis2, we get: - out_dim = [4]. - */ - out_dims.erase(out_dims.begin() + std::max(axis1_, axis2_)); - out_dims.erase(out_dims.begin() + std::min(axis1_, axis2_)); + PADDLE_ENFORCE_EQ( + x_rank, + axis_size, + errors::InvalidArgument("The input tensor's dimension " + "should be equal to the axis's size. " + "But received input tensor's dimension is %d, " + "axis's size is %d", + x_rank, + axis_size)); - if (offset_ == 0) { - out_dims.push_back(std::min(axis1_size, axis2_size)); - } else if (offset_ > 0) { - if ((axis2_size - offset_) > 0) { - out_dims.push_back(std::min(axis1_size, axis2_size - offset_)); - } else { - out_dims.push_back(0); - } - } else { - if ((axis1_size + offset_) > 0) { - out_dims.push_back(std::min(axis1_size + offset_, axis2_size)); - } else { - out_dims.push_back(0); - } + std::vector count(axis_size, 0); + for (size_t i = 0; i < axis_size; i++) { + PADDLE_ENFORCE_GE( + axis[i], + 0, + errors::InvalidArgument("The axis should be greater than or equal to 0." + "But received %d of axis[%d]", + axis[i], + i)); + + PADDLE_ENFORCE_EQ( + axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, + true, + errors::InvalidArgument( + "Each element of Attribute axis should " + "be a unique value range from 0 to (dims - 1), " + "where the dims is the axis's size, " + "unique value means this axis value can appear only once. " + "But received axis[%d] is %d, axis_size is %d, " + "count[axis[%d]] is %d", + i, + axis[i], + axis_size, + i, + count[axis[i]])); } - out->set_dims(phi::make_ddim(out_dims)); + + phi::DDim out_dims(x_dims); + for (size_t i = 0; i < axis_size; ++i) { + out_dims[i] = x_dims[axis[i]]; + } + + out->set_dims(out_dims); + out->set_dtype(x.dtype()); +} + +void UnbindInferMeta(const MetaTensor& x, + int axis, + std::vector* outs) { + auto in_dims = x.dims(); + std::vector out_dim; + axis = axis < 0 ? in_dims.size() + axis : axis; + for (int i = 0; i < in_dims.size(); ++i) { + if (i != axis) out_dim.push_back(in_dims[i]); + } + auto out_dims = phi::make_ddim(out_dim); + + for (size_t i = 0; i < outs->size(); ++i) { + (*outs)[i].set_dtype(x.dtype()); + (*outs)[i].set_dims(out_dims); + (*outs)[i].set_layout(x.layout()); + (*outs)[i].share_lod(x); + } +} + +void UnchangedInferMeta(const MetaTensor& x, MetaTensor* out) { + out->share_meta(x); +} + +// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1] +void UnchangedInferMetaCheckAxis(const MetaTensor& x, + int axis, + MetaTensor* out) { + auto rank = x.dims().size(); + PADDLE_ENFORCE_GE( + axis, + -rank, + errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(X). But received axis: %d, R: %d.", + axis, + rank)); + PADDLE_ENFORCE_LT( + axis, + rank, + phi::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(X). But received axis: %d, R: %d.", + axis, + rank)); + out->share_meta(x); } void UnfoldInferMeta(const MetaTensor& x, @@ -1073,303 +1417,6 @@ void UnfoldInferMeta(const MetaTensor& x, out->set_dims(phi::make_ddim(out_dims)); } -void DiagInferMeta(const MetaTensor& x, - int offset, - float padding_value, - MetaTensor* out) { - auto x_dims = x.dims(); - - if (x_dims.size() == 1UL) { - int64_t size_ = x_dims[0] + std::abs(offset); - out->set_dims({size_, size_}); - out->set_dtype(x.dtype()); - } else if (x_dims.size() == 2UL) { - int64_t size_ = 0; - if (offset >= 0) { - // Note(LutaoChu): Do not use std::min here, otherwise the calculation - // of `size_` will have unexpected result on Windows Python3.8 - if (x_dims[0] < x_dims[1] - offset) { - size_ = x_dims[0]; - } else { - size_ = x_dims[1] - offset; - } - } else { - // Note(LutaoChu): Do not use std::min here, otherwise the calculation - // of `size_` will have unexpected result on Windows Python3.8 - if (x_dims[0] + offset < x_dims[1]) { - size_ = x_dims[0] + offset; - } else { - size_ = x_dims[1]; - } - } - out->set_dims({size_}); - out->set_dtype(x.dtype()); - } else { - PADDLE_THROW(phi::errors::InvalidArgument( - "The input tensor X's dimensions of DiagV2Op should be either 1 or " - "2, but received %d.", - x_dims.size())); - } -} - -void ArgMinMaxInferMeta(const MetaTensor& x, - int64_t axis, - bool keepdims, - bool flatten, - int dtype, - MetaTensor* out, - MetaConfig config) { - const auto& x_dims = x.dims(); - - PADDLE_ENFORCE_GE( - axis, - -x_dims.size(), - phi::errors::InvalidArgument("'axis'(%d) must be greater than or equal to" - " -Rank(X)(%d).", - axis, - -x_dims.size())); - PADDLE_ENFORCE_LT(axis, - x_dims.size(), - phi::errors::InvalidArgument( - "'axis'(%d) must be less than Rank(X)(%d) of Input(X).", - axis, - x_dims.size())); - - PADDLE_ENFORCE_EQ( - (dtype < 0 || dtype == 2 || dtype == 3), - true, - phi::errors::InvalidArgument( - "The attribute of dtype in argmin/argmax must be [%s] or [%s], but " - "received [%s]", - paddle::framework::DataTypeToString( - paddle::framework::proto::VarType::INT32), - paddle::framework::DataTypeToString( - paddle::framework::proto::VarType::INT64), - paddle::framework::DataTypeToString( - static_cast(dtype)))); - - auto x_rank = x_dims.size(); - if (axis < 0) axis += x_rank; - if (config.is_runtime) { - if (dtype == paddle::framework::proto::VarType::INT32) { - int64_t all_element_num = 0; - if (flatten) { - all_element_num = phi::product(x_dims); - - } else { - all_element_num = x_dims[axis]; - } - PADDLE_ENFORCE_LE( - all_element_num, - INT_MAX, - phi::errors::InvalidArgument( - "The element num of the argmin/argmax input at axis is " - "%d, is larger than int32 maximum value:%d, you must " - "set the dtype of argmin/argmax to 'int64'.", - all_element_num, - INT_MAX)); - } - } - std::vector vec; - if (flatten) { - vec.emplace_back(static_cast(1)); - } else { - for (int64_t i = 0; i < axis; i++) vec.emplace_back(x_dims[i]); - if (keepdims) { - vec.emplace_back(static_cast(1)); - } - for (int64_t i = axis + 1; i < x_rank; i++) vec.emplace_back(x_dims[i]); - } - out->set_dims(phi::make_ddim(vec)); - if (dtype == 2) { - out->set_dtype(DataType::INT32); - } else if (dtype == 3) { - out->set_dtype(DataType::INT64); - } -} - -void SizeInferMeta(const MetaTensor& input, MetaTensor* out) { - out->set_dtype(DataType::INT64); - out->set_dims({1}); -} - -void PadInferMeta(const MetaTensor& input, - const std::vector& paddings, - float pad_value, - MetaTensor* out, - MetaConfig config) { - auto x_dim = input.dims(); - PADDLE_ENFORCE_EQ( - static_cast(paddings.size()), - x_dim.size() * 2, - phi::errors::InvalidArgument( - "Size of 'paddings' dimension should be equal to 2 * size of " - "Input(X)'s dimension, but received (size of 'paddings' dimension " - "is) %d vs (2 * size of Input(X)'s dimension is) %d.", - static_cast(paddings.size()), - x_dim.size() * 2)); - for (size_t i = 0; i < paddings.size(); ++i) { - PADDLE_ENFORCE_GE(paddings[i], - 0, - phi::errors::InvalidArgument( - "The element of 'paddings' should >= 0, but " - "received %d for index %d.", - paddings[i], - static_cast(i))); - } - std::vector out_dims(x_dim.size()); - for (int i = 0; i < x_dim.size(); ++i) { - if ((!config.is_runtime) && (x_dim[i] == -1)) { - out_dims[i] = -1; - } else { - out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; - } - } - out->set_dims(phi::make_ddim(out_dims)); - if (out_dims[0] == x_dim[0]) { - // Only pass LoD when the first dimension is equal between - // output and input. - out->share_lod(input); - } - out->set_dtype(input.dtype()); -} - -void IsfiniteInferMeta(const MetaTensor& x, MetaTensor* out) { - out->set_dims(x.dims()); - out->set_dtype(DataType::BOOL); -} - -void PixelShuffleInferMeta(const MetaTensor& x, - int upscale_factor, - const std::string& data_format, - MetaTensor* out) { - auto input_dims = x.dims(); - PADDLE_ENFORCE_EQ(input_dims.size(), - 4, - phi::errors::InvalidArgument( - "Input should be a 4-D tensor of format [N, C, H, W] " - "or [N, H, W, C], but got %u.", - input_dims.size())); - - const bool channel_last = (data_format == "NHWC"); - - if (!channel_last) { - PADDLE_ENFORCE_EQ(input_dims[1] % (upscale_factor * upscale_factor), - 0, - phi::errors::InvalidArgument( - "The square of upscale_factor[%u] should divide the " - "number of channel[%u]", - upscale_factor * upscale_factor, - input_dims[1])); - } else { - PADDLE_ENFORCE_EQ(input_dims[3] % (upscale_factor * upscale_factor), - 0, - phi::errors::InvalidArgument( - "The square of upscale_factor[%u] should divide the " - "number of channel[%u]", - upscale_factor * upscale_factor, - input_dims[3])); - } - auto output_dims = input_dims; - output_dims[0] = input_dims[0]; - if (!channel_last) { - output_dims[1] = input_dims[1] / (upscale_factor * upscale_factor); - output_dims[2] = input_dims[2] * upscale_factor; - output_dims[3] = input_dims[3] * upscale_factor; - } else { - output_dims[1] = input_dims[1] * upscale_factor; - output_dims[2] = input_dims[2] * upscale_factor; - output_dims[3] = input_dims[3] / (upscale_factor * upscale_factor); - } - out->set_dtype(x.dtype()); - out->set_dims(output_dims); -} - -void TransposeInferMeta(const MetaTensor& x, - const std::vector& axis, - MetaTensor* out) { - auto x_dims = x.dims(); - size_t x_rank = x_dims.size(); - size_t axis_size = axis.size(); - - PADDLE_ENFORCE_EQ( - x_rank, - axis_size, - errors::InvalidArgument("The input tensor's dimension " - "should be equal to the axis's size. " - "But received input tensor's dimension is %d, " - "axis's size is %d", - x_rank, - axis_size)); - - std::vector count(axis_size, 0); - for (size_t i = 0; i < axis_size; i++) { - PADDLE_ENFORCE_GE( - axis[i], - 0, - errors::InvalidArgument("The axis should be greater than or equal to 0." - "But received %d of axis[%d]", - axis[i], - i)); - - PADDLE_ENFORCE_EQ( - axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, - true, - errors::InvalidArgument( - "Each element of Attribute axis should " - "be a unique value range from 0 to (dims - 1), " - "where the dims is the axis's size, " - "unique value means this axis value can appear only once. " - "But received axis[%d] is %d, axis_size is %d, " - "count[axis[%d]] is %d", - i, - axis[i], - axis_size, - i, - count[axis[i]])); - } - - phi::DDim out_dims(x_dims); - for (size_t i = 0; i < axis_size; ++i) { - out_dims[i] = x_dims[axis[i]]; - } - - out->set_dims(out_dims); - out->set_dtype(x.dtype()); -} - -void EighInferMeta(const MetaTensor& x, - const std::string& uplo, - MetaTensor* out_w, - MetaTensor* out_v) { - auto input_dim = x.dims(); - auto rank = input_dim.size(); - - PADDLE_ENFORCE_GE(rank, - 2, - phi::errors::InvalidArgument( - "The Input(X) should have at least 2 dimensions." - "But received a %d dimension tensor.", - rank)); - PADDLE_ENFORCE_EQ( - input_dim[rank - 2], - input_dim[rank - 1], - phi::errors::InvalidArgument( - "Eigh op is designed for square matrix, consequently" - "inner-most 2 dimensions of Input(X) should be symmetric." - "But received X's shape[-2] = %d and shape[-1] = %d.", - input_dim[rank - 2], - input_dim[rank - 1])); - - std::vector values_dim; - - for (auto i = 0; i < rank - 1; i++) { - values_dim.emplace_back(input_dim[i]); - } - out_w->set_dims(phi::make_ddim(values_dim)); - out_v->set_dims(input_dim); -} - void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) { auto rank = condition.dims().size(); PADDLE_ENFORCE_GE( @@ -1381,53 +1428,6 @@ void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) { out->set_dtype(DataType::INT64); } -void ShardIndexInferMeta(const MetaTensor& in, - int index_num, - int nshards, - int shard_id, - int ignore_value, - MetaTensor* out, - MetaConfig config) { - auto x_dims = in.dims(); - PADDLE_ENFORCE_GE( - x_dims.size(), - 2, - phi::errors::InvalidArgument("Rank of Input(X) should be at least 2, " - "but the value given is %d.", - x_dims.size())); - if (config.is_runtime || x_dims[x_dims.size() - 1] > 0) { - PADDLE_ENFORCE_EQ(x_dims[x_dims.size() - 1], - 1U, - phi::errors::InvalidArgument( - "The last dimension of Input(X) should be 1, " - "but the value given is %d.", - x_dims[x_dims.size() - 1])); - } - - out->set_dims(x_dims); - out->share_lod(in); - out->set_dtype(in.dtype()); -} - -void SoftmaxInferMeta(const MetaTensor& x, int axis, MetaTensor* out) { - auto dim_x = x.dims(); - auto rank_x = dim_x.size(); - PADDLE_ENFORCE_GE(axis, - -rank_x, - phi::errors::InvalidArgument( - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(X).")); - PADDLE_ENFORCE_LT(axis, - rank_x, - phi::errors::InvalidArgument( - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(X).")); - - out->set_dims(x.dims()); - out->set_dtype(x.dtype()); - out->share_lod(x); -} - } // namespace phi PD_REGISTER_INFER_META_FN(copy_to, phi::CopyToInferMeta); diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index c7b7f8e3c1397b0bbae67a77ffada3b85535b5ba..539b6dcba42bc5a5d1a201d67e2d1f6d0664ff7b 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -32,32 +32,20 @@ class MetaConfig; // Because functions in this file not only can infer shape, but also need // infer lod or other useful data. +void ArgMinMaxInferMeta(const MetaTensor& x, + int64_t axis, + bool keepdims, + bool flatten, + int dtype, + MetaTensor* out, + MetaConfig config = MetaConfig()); + void ArgsortInferMeta(const MetaTensor& input, int axis, bool descending, MetaTensor* output, MetaTensor* indices); -void UnchangedInferMeta(const MetaTensor& x, MetaTensor* out); - -// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1] -void UnchangedInferMetaCheckAxis(const MetaTensor& x, - int axis, - MetaTensor* out); - -void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out); - -void FlattenInferMeta(const MetaTensor& x, - int start_axis, - int stop_axis, - MetaTensor* out); - -void GumbelSoftmaxInferMeta(const MetaTensor& x, - float temperature, - bool hard, - int axis, - MetaTensor* out); - void CastInferMeta(const MetaTensor& x, DataType out_dtype, MetaTensor* out); void CholeskyInferMeta(const MetaTensor& x, bool upper, MetaTensor* out); @@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x, bool reverse, MetaTensor* out); +void DiagInferMeta(const MetaTensor& x, + int offset, + float padding_value, + MetaTensor* out); + +void DiagonalInferMeta( + const MetaTensor& input, int offset, int axis1, int axis2, MetaTensor* out); + +void EighInferMeta(const MetaTensor& x, + const std::string& uplo, + MetaTensor* out_w, + MetaTensor* out_v); + +void FlattenInferMeta(const MetaTensor& x, + int start_axis, + int stop_axis, + MetaTensor* out); + +void GumbelSoftmaxInferMeta(const MetaTensor& x, + float temperature, + bool hard, + int axis, + MetaTensor* out); + void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out); void InferMetaFromVecValue(const MetaTensor& x, @@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x, void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out); +void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out); + void MultinomialInferMeta(const MetaTensor& x, int num_samples, bool replacement, MetaTensor* out); +void PadInferMeta(const MetaTensor& input, + const std::vector& paddings, + float pad_value, + MetaTensor* out, + MetaConfig config = MetaConfig()); + +void PixelShuffleInferMeta(const MetaTensor& x, + int upscale_factor, + const std::string& data_format, + MetaTensor* out); + +void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out); + +void ReduceInferMeta(const MetaTensor& x, + const std::vector& axis, + bool keep_dim, + MetaTensor* out); + +void ReduceInferMetaBase(const MetaTensor& x, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + MetaTensor* out); + void ReshapeInferMeta(const MetaTensor& x, const ScalarArray& shape, MetaTensor* out, @@ -100,28 +138,23 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); -void TileInferMeta(const MetaTensor& x, - const ScalarArray& repeat_times, - MetaTensor* out, - MetaConfig config = MetaConfig()); +void ShardIndexInferMeta(const MetaTensor& in, + int index_num, + int nshards, + int shard_id, + int ignore_value, + MetaTensor* out, + MetaConfig config = MetaConfig()); -void SumRawInferMeta(const MetaTensor& x, - const std::vector& axis, - bool keep_dim, - bool reduce_all, - DataType dtype, - MetaTensor* out); +void SizeInferMeta(const MetaTensor& input, MetaTensor* out); -void ReduceInferMetaBase(const MetaTensor& x, - const std::vector& axis, - bool keep_dim, - bool reduce_all, - MetaTensor* out); +void SoftmaxInferMeta(const MetaTensor& x, int axis, MetaTensor* out); -void ReduceInferMeta(const MetaTensor& x, - const std::vector& axis, - bool keep_dim, - MetaTensor* out); +void SplitInferMeta(const MetaTensor& x_meta, + const ScalarArray& num_or_sections, + const Scalar& axis, + std::vector out, + MetaConfig config = MetaConfig()); void SumInferMeta(const MetaTensor& x, const std::vector& axis, @@ -129,21 +162,39 @@ void SumInferMeta(const MetaTensor& x, bool keep_dim, MetaTensor* out); +void SumRawInferMeta(const MetaTensor& x, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + DataType dtype, + MetaTensor* out); + +void TileInferMeta(const MetaTensor& x, + const ScalarArray& repeat_times, + MetaTensor* out, + MetaConfig config = MetaConfig()); + +void TraceInferMeta( + const MetaTensor& x, int offset, int axis1, int axis2, MetaTensor* out); + void TransferLayoutInferMeta(const MetaTensor& x, DataLayout layout, MetaTensor* out); -void SplitInferMeta(const MetaTensor& x_meta, - const ScalarArray& num_or_sections, - const Scalar& axis, - std::vector out, - MetaConfig config = MetaConfig()); +void TransposeInferMeta(const MetaTensor& x, + const std::vector& axis, + MetaTensor* out); void UnbindInferMeta(const MetaTensor& x, int axis, std::vector* outs); -void TraceInferMeta( - const MetaTensor& x, int offset, int axis1, int axis2, MetaTensor* out); + +void UnchangedInferMeta(const MetaTensor& x, MetaTensor* out); + +// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1] +void UnchangedInferMetaCheckAxis(const MetaTensor& x, + int axis, + MetaTensor* out); void UnfoldInferMeta(const MetaTensor& x, const std::vector& kernel_sizes, @@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); -void DiagInferMeta(const MetaTensor& x, - int offset, - float padding_value, - MetaTensor* out); - -void ArgMinMaxInferMeta(const MetaTensor& x, - int64_t axis, - bool keepdims, - bool flatten, - int dtype, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void SizeInferMeta(const MetaTensor& input, MetaTensor* out); - -void PadInferMeta(const MetaTensor& input, - const std::vector& paddings, - float pad_value, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void DiagonalInferMeta( - const MetaTensor& input, int offset, int axis1, int axis2, MetaTensor* out); - -void PixelShuffleInferMeta(const MetaTensor& x, - int upscale_factor, - const std::string& data_format, - MetaTensor* out); - -void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out); - -void TransposeInferMeta(const MetaTensor& x, - const std::vector& axis, - MetaTensor* out); - -void EighInferMeta(const MetaTensor& x, - const std::string& uplo, - MetaTensor* out_w, - MetaTensor* out_v); - void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out); -void ShardIndexInferMeta(const MetaTensor& in, - int index_num, - int nshards, - int shard_id, - int ignore_value, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void SoftmaxInferMeta(const MetaTensor& x, int axis, MetaTensor* out); - } // namespace phi diff --git a/paddle/phi/kernels/funcs/matrix_inverse.h b/paddle/phi/kernels/funcs/matrix_inverse.h index c5b04a8106561962b6916907d86450a63c763830..1c6756f1720a23ada5bb4ff2fdb4f4840660ed58 100644 --- a/paddle/phi/kernels/funcs/matrix_inverse.h +++ b/paddle/phi/kernels/funcs/matrix_inverse.h @@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx, int batch_size = rank > 2 ? a.numel() / (n * n) : 1; const T* a_ptr = a.data(); - T* a_inv_ptr = a_inv->mutable_data(dev_ctx.GetPlace()); + T* a_inv_ptr = dev_ctx.template Alloc(a_inv); for (int i = 0; i < batch_size; ++i) { ConstEigenMatrixMap mat(a_ptr + i * n * n, n, n);