diff --git a/paddle/fluid/operators/gather_nd_op.cc b/paddle/fluid/operators/gather_nd_op.cc index e5ca15a39ef51f7807246c2ee1d473a0499b6463..7d7d6ae81a0935402f94cbc16e31fbba8009ce9c 100644 --- a/paddle/fluid/operators/gather_nd_op.cc +++ b/paddle/fluid/operators/gather_nd_op.cc @@ -16,7 +16,6 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/binary.h" -#include "paddle/phi/infermeta/ternary.h" namespace paddle { namespace operators { diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 801bd98b504c54d1e523b2c6a00f8baa43ea3efd..a2bdf6b963bd1960ea048e21f5219a2d3127a1ee 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, } } -void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx) { - if (dx) { - dx->share_meta(x); - } +void GatherNdGradInferMeta(const MetaTensor& x, + const MetaTensor& index, + const MetaTensor& out_grad, + MetaTensor* x_grad) { + const auto& dtype = out_grad.dtype(); + x_grad->set_dims(x.dims()); + x_grad->share_lod(x); + x_grad->set_dtype(dtype); } void GeneralBinaryGradInferMeta(const MetaTensor& x, @@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x, } } +void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx) { + if (dx) { + dx->share_meta(x); + } +} + void GumbelSoftmaxGradInferMeta(const MetaTensor& out, const MetaTensor& dout, int axis, @@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, dout.dims(), errors::InvalidArgument( "Input(Out) and its gradients should have the same shape.")); - dx->share_meta(dout); -} -void GatherNdGradInferMeta(const MetaTensor& x, - const MetaTensor& index, - const MetaTensor& out_grad, - MetaTensor* x_grad) { - const auto& dtype = out_grad.dtype(); - x_grad->set_dims(x.dims()); - x_grad->share_lod(x); - x_grad->set_dtype(dtype); + dx->share_meta(dout); } void PsroiPoolGradInferMeta(const MetaTensor& x, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index 9ed24ef8646f52d38b88e206fb45aaa5f1bd6ebd..921df460118e6916a0a81ae0027f53d0ff201833 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, MetaTensor* dweight, MetaTensor* dbias); -void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx); +void GatherNdGradInferMeta(const MetaTensor& x, + const MetaTensor& index, + const MetaTensor& out_grad, + MetaTensor* x_grad); void GeneralBinaryGradInferMeta(const MetaTensor& x, const MetaTensor& y, @@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x, MetaTensor* dy, MetaTensor* dz); +void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx); + void GumbelSoftmaxGradInferMeta(const MetaTensor& out, const MetaTensor& dout, int axis, diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 641956c4d9de796bed166e1f6238ff6988601bec..b9d432244568365ff29dea43c465952bbfab1174 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -22,6 +22,153 @@ limitations under the License. */ namespace phi { +void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { + out->share_meta(x); +} + +void BCELossInferMeta(const MetaTensor& input, + const MetaTensor& label, + MetaTensor* out, + MetaConfig config) { + auto input_dims = input.dims(); + auto label_dims = label.dims(); + + int rank = input_dims.size(); + PADDLE_ENFORCE_EQ(rank, + label_dims.size(), + phi::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same rank." + "But received: the rank of Input(X) is [%d], " + "the rank of Input(Label) is [%d].", + rank, + label_dims.size())); + + bool check = true; + if ((!config.is_runtime) && + (phi::product(input_dims) <= 0 || phi::product(label_dims) <= 0)) { + check = false; + } + + if (check) { + PADDLE_ENFORCE_EQ(input_dims, + label_dims, + phi::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same " + "shape. But received: the shape of Input(X) is " + "[%s], the shape of Input(Label) is [%s].", + input_dims, + label_dims)); + } + + out->set_dims(input_dims); + out->set_dtype(input.dtype()); + out->share_lod(input); +} + +void BincountInferMeta(const MetaTensor& x, + const paddle::optional weights, + int minlength, + MetaTensor* out) { + auto input_dim = x.dims(); + + PADDLE_ENFORCE_GE(minlength, + 0, + phi::errors::InvalidArgument( + "The minlength should be greater than or equal to 0." + "But received minlength is %d", + minlength)); + + PADDLE_ENFORCE_EQ( + input_dim.size(), + 1, + phi::errors::InvalidArgument("The 'shape' of Input(X) must be 1-D tensor." + "But the dimension of Input(X) is [%d]", + input_dim.size())); + + if (weights.is_initialized()) { + auto weights_dim = weights->dims(); + PADDLE_ENFORCE_EQ(weights_dim.size(), + 1, + phi::errors::InvalidArgument( + "The 'shape' of Input(Weights) must be 1-D tensor." + "But the dimension of Input(Weights) is [%d]", + weights_dim.size())); + + PADDLE_ENFORCE_EQ( + weights_dim[0], + input_dim[0], + phi::errors::InvalidArgument( + "The 'shape' of Input(Weights) must be equal to the 'shape' of " + "Input(X)." + "But received: the 'shape' of Input(Weights) is [%s]," + "the 'shape' of Input(X) is [%s]", + weights_dim, + input_dim)); + } + out->set_dims(phi::make_ddim({-1})); + if (weights.is_initialized()) { + out->set_dtype(weights->dtype()); + } else { + out->set_dtype(x.dtype()); + } + + out->share_lod(x); +} + +void CholeskySolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + + auto x_dims_n = x_dims.size(); + auto y_dims_n = y_dims.size(); + + PADDLE_ENFORCE_GE(x_dims_n, + 2, + phi::errors::InvalidArgument( + "the rank of input Y must greater or equal to 2")); + PADDLE_ENFORCE_GE(y_dims_n, + 2, + phi::errors::InvalidArgument( + "the rank of input X must greater or equal to 2")); + PADDLE_ENFORCE_EQ( + y_dims[y_dims_n - 1], + y_dims[y_dims_n - 2], + phi::errors::InvalidArgument("input Matrix Y should be square matrix," + "But Got last shape of %ld x %ld", + y_dims[y_dims_n - 1], + y_dims[y_dims_n - 2])); + PADDLE_ENFORCE_EQ( + x_dims[x_dims_n - 2], + y_dims[y_dims_n - 2], + phi::errors::InvalidArgument("the first dim of Matrix X must be equal to " + "the fisrt dim of Matrix Y," + "But Got %ld and %ld", + x_dims[x_dims_n - 2], + y_dims[y_dims_n - 2])); + + std::vector x_dims_vec = phi::vectorize(x_dims); + std::vector y_dims_vec = phi::vectorize(y_dims); + + std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); + std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); + + std::vector expand_batch_portion = + funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); + + std::vector x_broadcast_dims({expand_batch_portion}); + x_broadcast_dims.insert(x_broadcast_dims.end(), + {x_dims_vec[x_dims_n - 2], x_dims_vec[x_dims_n - 1]}); + + // dim of 'out' is the same with 'X' after broadcast + out->set_dims(phi::make_ddim(x_broadcast_dims)); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); +} + void CompareInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, @@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x, out->set_dtype(DataType::BOOL); } +void CrossInferMeta(const MetaTensor& x, + const MetaTensor& y, + int axis, + MetaTensor* out) { + auto x_dim = x.dims(); + auto y_dim = y.dims(); + auto dim = axis; + + bool dims_match = phi::funcs::CheckDims(x_dim, y_dim); + PADDLE_ENFORCE_EQ( + dims_match, + true, + phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to " + "the 'shape' of Input(Y). But received " + "Input(X).dimensions = [%s], " + "Input(Y).dimensions = [%s]", + x_dim, + y_dim)); + + if (dim != DDim::kMaxRank) { + PADDLE_ENFORCE_EQ( + dim < x_dim.size() && dim >= (0 - x_dim.size()), + true, + phi::errors::OutOfRange( + "Attr(dim) is out of range, It's expected " + "to be in range of [-%d, %d]. But received Attr(dim) = %d.", + x_dim.size(), + x_dim.size() - 1, + dim)); + if (dim < 0) { + dim += x_dim.size(); + } + PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3, + true, + phi::errors::InvalidArgument( + "Input(X/Y).dims()[dim] should be equal to 3." + "But received Input(X/Y).dims()[dim] = %d.", + x_dim[dim])); + } + out->set_dims(x_dim); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); +} + +void DistInferMeta(const MetaTensor& x, + const MetaTensor& y, + float p, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + + PADDLE_ENFORCE_NE(phi::product(x_dims), + 0, + phi::errors::InvalidArgument( + "The Input(X) has not been initialized properly. The " + "shape of Input(X) = [%s].", + x_dims)); + PADDLE_ENFORCE_NE(phi::product(y_dims), + 0, + phi::errors::InvalidArgument( + "The Input(Y) has not been initialized properly. The " + "shape of Input(Y) = [%s].", + y_dims)); + out->set_dims({1}); + out->set_dtype(x.dtype()); +} + void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { auto x_dims = x.dims(); auto x_rank = static_cast(x_dims.size()); @@ -109,84 +324,11 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { out->set_layout(x.layout()); } -void MatmulInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool trans_x, - bool trans_y, - MetaTensor* out) { - std::vector dims_x = phi::vectorize(x.dims()); - std::vector dims_y = phi::vectorize(y.dims()); - auto ndims_x = dims_x.size(); - auto ndims_y = dims_y.size(); - PADDLE_ENFORCE_GT(ndims_x, - 0UL, - phi::errors::InvalidArgument( - "The Input(x) dims size must be greater than 0," - " but reviced dims size is 0. ")); - PADDLE_ENFORCE_GT(ndims_y, - 0UL, - phi::errors::InvalidArgument( - "The Input(y) dims size must be greater than 0," - " but reviced dims size is 0. ")); - - bool x_broadcasted = false, y_broadcasted = false; - if (ndims_x == 1) { - dims_x.insert(dims_x.begin(), 1); - ndims_x = 2; - x_broadcasted = true; - } - - if (ndims_y == 1) { - dims_y.push_back(1); - ndims_y = 2; - y_broadcasted = true; - } - - size_t M, N; - if (trans_x) { - M = dims_x[ndims_x - 1]; - } else { - M = dims_x[ndims_x - 2]; - } - if (trans_y) { - N = dims_y[ndims_y - 2]; - } else { - N = dims_y[ndims_y - 1]; - } - - std::vector new_dims; - if (ndims_x > ndims_y) { - new_dims.assign(dims_x.begin(), dims_x.end() - 2); - } else if (ndims_x < ndims_y) { - new_dims.assign(dims_y.begin(), dims_y.end() - 2); - } else { - new_dims.reserve(ndims_x); - for (size_t i = 0; i < ndims_x - 2; ++i) { - new_dims.push_back(std::max(dims_x[i], dims_y[i])); - } - } - if (!x_broadcasted) { - new_dims.push_back(M); - } - if (!y_broadcasted) { - new_dims.push_back(N); - } - if (x_broadcasted && y_broadcasted) { - new_dims.push_back(1); - } - - auto ddim_out = phi::make_ddim(new_dims); - - out->set_dims(ddim_out); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); -} - -void ElementwiseInferMeta(const MetaTensor& x, - const MetaTensor& y, - MetaTensor* out) { - return ElementwiseRawInferMeta(x, y, -1, std::move(out)); -} +void ElementwiseInferMeta(const MetaTensor& x, + const MetaTensor& y, + MetaTensor* out) { + return ElementwiseRawInferMeta(x, y, -1, std::move(out)); +} void ElementwiseRawInferMeta(const MetaTensor& x, const MetaTensor& y, @@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x, funcs::GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(), - y_dims_array.data(), - out_dims_array.data(), - max_dim, - axis); - auto out_dims = phi::make_ddim(out_dims_array); - out->set_dims(out_dims); - } else { - out->set_dims(x.dims()); - } - - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - -void HuberLossInferMeta(const MetaTensor& input, - const MetaTensor& label, - float delta, - MetaTensor* out, - MetaTensor* residual, - MetaConfig config) { - auto input_dims = input.dims(); - auto label_dims = label.dims(); - - PADDLE_ENFORCE_EQ(input_dims.size(), - label_dims.size(), - phi::errors::InvalidArgument( - "Input(input) rank and Input(label) rank should be " - "same, but received input rank(%d) != label rank(%d)", - input_dims.size(), - label_dims.size())); - - bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) || - phi::contain_unknown_dim(label_dims); - if (config.is_runtime || !contain_unknown_dim) { - PADDLE_ENFORCE_EQ( - input_dims, - label_dims, - phi::errors::InvalidArgument( - "The Input(input) and Input(label) should have the same " - "shape, but received input shape [%s] != label shape [%s]", - input_dims, - label_dims)); - } - - auto out_dims = label_dims; - residual->set_dims(out_dims); - out->set_dims(out_dims); - out->share_lod(input); -} - -void CholeskySolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - - auto x_dims_n = x_dims.size(); - auto y_dims_n = y_dims.size(); - - PADDLE_ENFORCE_GE(x_dims_n, - 2, - phi::errors::InvalidArgument( - "the rank of input Y must greater or equal to 2")); - PADDLE_ENFORCE_GE(y_dims_n, - 2, - phi::errors::InvalidArgument( - "the rank of input X must greater or equal to 2")); - PADDLE_ENFORCE_EQ( - y_dims[y_dims_n - 1], - y_dims[y_dims_n - 2], - phi::errors::InvalidArgument("input Matrix Y should be square matrix," - "But Got last shape of %ld x %ld", - y_dims[y_dims_n - 1], - y_dims[y_dims_n - 2])); - PADDLE_ENFORCE_EQ( - x_dims[x_dims_n - 2], - y_dims[y_dims_n - 2], - phi::errors::InvalidArgument("the first dim of Matrix X must be equal to " - "the fisrt dim of Matrix Y," - "But Got %ld and %ld", - x_dims[x_dims_n - 2], - y_dims[y_dims_n - 2])); - - std::vector x_dims_vec = phi::vectorize(x_dims); - std::vector y_dims_vec = phi::vectorize(y_dims); - - std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); - std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); - - std::vector expand_batch_portion = - funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); - - std::vector x_broadcast_dims({expand_batch_portion}); - x_broadcast_dims.insert(x_broadcast_dims.end(), - {x_dims_vec[x_dims_n - 2], x_dims_vec[x_dims_n - 1]}); - - // dim of 'out' is the same with 'X' after broadcast - out->set_dims(phi::make_ddim(x_broadcast_dims)); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - -void TriangularSolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - bool transpose, - bool unitriangular, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - - auto x_dims_n = x_dims.size(); - auto y_dims_n = y_dims.size(); - - PADDLE_ENFORCE_GE(x_dims_n, - 2, - phi::errors::InvalidArgument( - "The input tensor X's dimensions of TriangularSolveOp " - "should be >= 2. But received X's " - "dimensions = %d, X's shape = [%s]", - x_dims.size(), - x_dims)); - - PADDLE_ENFORCE_GE(y_dims_n, - 2, - phi::errors::InvalidArgument( - "The input tensor Y's dimensions of TriangularSolveOp " - "should be >=2. But received Y's " - "dimensions = %d, Y's shape = [%s]", - y_dims.size(), - y_dims)); - - PADDLE_ENFORCE_EQ(x_dims[x_dims_n - 2], - x_dims[x_dims_n - 1], - phi::errors::InvalidArgument( - "The inner-most 2 dimensions of Input(X) all should " - "be square matrices " - "But received X's shape[-2] = %d and shape[-1] = %d.", - x_dims[x_dims_n - 2], - x_dims[x_dims_n - 1])); - - std::vector x_dims_vec = phi::vectorize(x_dims); - std::vector y_dims_vec = phi::vectorize(y_dims); - - std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); - std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); - - std::vector expand_batch_portion = - funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); - - std::vector y_broadcast_dims({expand_batch_portion}); - y_broadcast_dims.insert(y_broadcast_dims.end(), - {y_dims_vec[y_dims_n - 2], y_dims_vec[y_dims_n - 1]}); - - // dim of 'out' is the same with 'Y' after broadcast - out->set_dims(phi::make_ddim(y_broadcast_dims)); - out->set_dtype(y.dtype()); - out->set_layout(y.layout()); - out->share_lod(y); -} - -void IndexSampleInferMeta(const MetaTensor& x, - const MetaTensor& y, - MetaTensor* out, - MetaConfig config) { - auto input_dims = x.dims(); - PADDLE_ENFORCE_EQ(input_dims.size(), - 2, - errors::InvalidArgument( - "Inputs(X) shape of IndexSample op should be 2-D, but " - "got X's shape = [%s], please check X shape.", - input_dims)); - - auto index_dims = y.dims(); - PADDLE_ENFORCE_EQ( - index_dims.size(), - 2, - errors::InvalidArgument( - "Inputs(Index) shape of IndexSample op should be 2-D, but " - "got Index's shape [%s] , please check index shape.", - input_dims)); - if (config.is_runtime) { - PADDLE_ENFORCE_EQ(input_dims[0], - index_dims[0], - errors::InvalidArgument( - "Inputs(X)'s value of dimension 0 must same with " - "Inputs(Index)'s value of dimension 0, but " - "got %d of Inputs(X), and got %d of Inputs(Index), " - "please check Inputs shape.", - input_dims[0], - index_dims[0])); - } - out->set_dtype(x.dtype()); - out->set_dims(index_dims); - out->share_lod(y); -} -void CrossInferMeta(const MetaTensor& x, - const MetaTensor& y, - int axis, - MetaTensor* out) { - auto x_dim = x.dims(); - auto y_dim = y.dims(); - auto dim = axis; - - bool dims_match = phi::funcs::CheckDims(x_dim, y_dim); - PADDLE_ENFORCE_EQ( - dims_match, - true, - phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to " - "the 'shape' of Input(Y). But received " - "Input(X).dimensions = [%s], " - "Input(Y).dimensions = [%s]", - x_dim, - y_dim)); - - if (dim != DDim::kMaxRank) { - PADDLE_ENFORCE_EQ( - dim < x_dim.size() && dim >= (0 - x_dim.size()), - true, - phi::errors::OutOfRange( - "Attr(dim) is out of range, It's expected " - "to be in range of [-%d, %d]. But received Attr(dim) = %d.", - x_dim.size(), - x_dim.size() - 1, - dim)); - if (dim < 0) { - dim += x_dim.size(); - } - PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3, - true, - phi::errors::InvalidArgument( - "Input(X/Y).dims()[dim] should be equal to 3." - "But received Input(X/Y).dims()[dim] = %d.", - x_dim[dim])); - } - out->set_dims(x_dim); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - -void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { - out->share_meta(x); -} - -void SegmentPoolInferMeta(const MetaTensor& x, - const MetaTensor& segment_ids, - const std::string& pooltype, - MetaTensor* out, - MetaTensor* summed_ids, - MetaConfig config) { - auto dims = x.dims(); - dims[0] = -1; - out->set_dims(dims); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - - if (pooltype == "MEAN") { - summed_ids->set_dims({-1, 1}); - summed_ids->set_dtype(x.dtype()); - summed_ids->set_layout(x.layout()); - } -} - -void BCELossInferMeta(const MetaTensor& input, - const MetaTensor& label, - MetaTensor* out, - MetaConfig config) { - auto input_dims = input.dims(); - auto label_dims = label.dims(); - - int rank = input_dims.size(); - PADDLE_ENFORCE_EQ(rank, - label_dims.size(), - phi::errors::InvalidArgument( - "Input(X) and Input(Label) shall have the same rank." - "But received: the rank of Input(X) is [%d], " - "the rank of Input(Label) is [%d].", - rank, - label_dims.size())); - - bool check = true; - if ((!config.is_runtime) && - (phi::product(input_dims) <= 0 || phi::product(label_dims) <= 0)) { - check = false; - } - - if (check) { - PADDLE_ENFORCE_EQ(input_dims, - label_dims, - phi::errors::InvalidArgument( - "Input(X) and Input(Label) shall have the same " - "shape. But received: the shape of Input(X) is " - "[%s], the shape of Input(Label) is [%s].", - input_dims, - label_dims)); - } - - out->set_dims(input_dims); - out->set_dtype(input.dtype()); - out->share_lod(input); -} - -void BincountInferMeta(const MetaTensor& x, - const paddle::optional weights, - int minlength, - MetaTensor* out) { - auto input_dim = x.dims(); - - PADDLE_ENFORCE_GE(minlength, - 0, - phi::errors::InvalidArgument( - "The minlength should be greater than or equal to 0." - "But received minlength is %d", - minlength)); - - PADDLE_ENFORCE_EQ( - input_dim.size(), - 1, - phi::errors::InvalidArgument("The 'shape' of Input(X) must be 1-D tensor." - "But the dimension of Input(X) is [%d]", - input_dim.size())); - - if (weights.is_initialized()) { - auto weights_dim = weights->dims(); - PADDLE_ENFORCE_EQ(weights_dim.size(), - 1, - phi::errors::InvalidArgument( - "The 'shape' of Input(Weights) must be 1-D tensor." - "But the dimension of Input(Weights) is [%d]", - weights_dim.size())); - - PADDLE_ENFORCE_EQ( - weights_dim[0], - input_dim[0], - phi::errors::InvalidArgument( - "The 'shape' of Input(Weights) must be equal to the 'shape' of " - "Input(X)." - "But received: the 'shape' of Input(Weights) is [%s]," - "the 'shape' of Input(X) is [%s]", - weights_dim, - input_dim)); - } - out->set_dims(phi::make_ddim({-1})); - if (weights.is_initialized()) { - out->set_dtype(weights->dtype()); + y_dims_array.data(), + out_dims_array.data(), + max_dim, + axis); + auto out_dims = phi::make_ddim(out_dims_array); + out->set_dims(out_dims); } else { - out->set_dtype(x.dtype()); + out->set_dims(x.dims()); } - out->share_lod(x); -} - -void DistInferMeta(const MetaTensor& x, - const MetaTensor& y, - float p, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - - PADDLE_ENFORCE_NE(phi::product(x_dims), - 0, - phi::errors::InvalidArgument( - "The Input(X) has not been initialized properly. The " - "shape of Input(X) = [%s].", - x_dims)); - PADDLE_ENFORCE_NE(phi::product(y_dims), - 0, - phi::errors::InvalidArgument( - "The Input(Y) has not been initialized properly. The " - "shape of Input(Y) = [%s].", - y_dims)); - out->set_dims({1}); out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); } void GatherNdInferMeta(const MetaTensor& x, @@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids, out->set_dims(ids_dims); } +void HuberLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float delta, + MetaTensor* out, + MetaTensor* residual, + MetaConfig config) { + auto input_dims = input.dims(); + auto label_dims = label.dims(); + + PADDLE_ENFORCE_EQ(input_dims.size(), + label_dims.size(), + phi::errors::InvalidArgument( + "Input(input) rank and Input(label) rank should be " + "same, but received input rank(%d) != label rank(%d)", + input_dims.size(), + label_dims.size())); + + bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) || + phi::contain_unknown_dim(label_dims); + if (config.is_runtime || !contain_unknown_dim) { + PADDLE_ENFORCE_EQ( + input_dims, + label_dims, + phi::errors::InvalidArgument( + "The Input(input) and Input(label) should have the same " + "shape, but received input shape [%s] != label shape [%s]", + input_dims, + label_dims)); + } + + auto out_dims = label_dims; + residual->set_dims(out_dims); + out->set_dims(out_dims); + out->share_lod(input); +} + +void IndexSampleInferMeta(const MetaTensor& x, + const MetaTensor& y, + MetaTensor* out, + MetaConfig config) { + auto input_dims = x.dims(); + PADDLE_ENFORCE_EQ(input_dims.size(), + 2, + errors::InvalidArgument( + "Inputs(X) shape of IndexSample op should be 2-D, but " + "got X's shape = [%s], please check X shape.", + input_dims)); + + auto index_dims = y.dims(); + PADDLE_ENFORCE_EQ( + index_dims.size(), + 2, + errors::InvalidArgument( + "Inputs(Index) shape of IndexSample op should be 2-D, but " + "got Index's shape [%s] , please check index shape.", + input_dims)); + if (config.is_runtime) { + PADDLE_ENFORCE_EQ(input_dims[0], + index_dims[0], + errors::InvalidArgument( + "Inputs(X)'s value of dimension 0 must same with " + "Inputs(Index)'s value of dimension 0, but " + "got %d of Inputs(X), and got %d of Inputs(Index), " + "please check Inputs shape.", + input_dims[0], + index_dims[0])); + } + out->set_dtype(x.dtype()); + out->set_dims(index_dims); + out->share_lod(y); +} + void LogLossInferMeta(const MetaTensor& input, const MetaTensor& label, float epsilon, @@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input, out->share_lod(input); } +void MatmulInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool trans_x, + bool trans_y, + MetaTensor* out) { + std::vector dims_x = phi::vectorize(x.dims()); + std::vector dims_y = phi::vectorize(y.dims()); + auto ndims_x = dims_x.size(); + auto ndims_y = dims_y.size(); + PADDLE_ENFORCE_GT(ndims_x, + 0UL, + phi::errors::InvalidArgument( + "The Input(x) dims size must be greater than 0," + " but reviced dims size is 0. ")); + PADDLE_ENFORCE_GT(ndims_y, + 0UL, + phi::errors::InvalidArgument( + "The Input(y) dims size must be greater than 0," + " but reviced dims size is 0. ")); + + bool x_broadcasted = false, y_broadcasted = false; + if (ndims_x == 1) { + dims_x.insert(dims_x.begin(), 1); + ndims_x = 2; + x_broadcasted = true; + } + + if (ndims_y == 1) { + dims_y.push_back(1); + ndims_y = 2; + y_broadcasted = true; + } + + size_t M, N; + if (trans_x) { + M = dims_x[ndims_x - 1]; + } else { + M = dims_x[ndims_x - 2]; + } + if (trans_y) { + N = dims_y[ndims_y - 2]; + } else { + N = dims_y[ndims_y - 1]; + } + + std::vector new_dims; + if (ndims_x > ndims_y) { + new_dims.assign(dims_x.begin(), dims_x.end() - 2); + } else if (ndims_x < ndims_y) { + new_dims.assign(dims_y.begin(), dims_y.end() - 2); + } else { + new_dims.reserve(ndims_x); + for (size_t i = 0; i < ndims_x - 2; ++i) { + new_dims.push_back(std::max(dims_x[i], dims_y[i])); + } + } + if (!x_broadcasted) { + new_dims.push_back(M); + } + if (!y_broadcasted) { + new_dims.push_back(N); + } + if (x_broadcasted && y_broadcasted) { + new_dims.push_back(1); + } + + auto ddim_out = phi::make_ddim(new_dims); + + out->set_dims(ddim_out); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); +} + void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { auto dim_x = x.dims(); auto dim_vec = vec.dims(); @@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { out->share_lod(x); } +void SegmentPoolInferMeta(const MetaTensor& x, + const MetaTensor& segment_ids, + const std::string& pooltype, + MetaTensor* out, + MetaTensor* summed_ids, + MetaConfig config) { + auto dims = x.dims(); + dims[0] = -1; + out->set_dims(dims); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + + if (pooltype == "MEAN") { + summed_ids->set_dims({-1, 1}); + summed_ids->set_dtype(x.dtype()); + summed_ids->set_layout(x.layout()); + } +} + void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, const MetaTensor& label, bool normalize, @@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, out->share_lod(x); } +void TriangularSolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + bool transpose, + bool unitriangular, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + + auto x_dims_n = x_dims.size(); + auto y_dims_n = y_dims.size(); + + PADDLE_ENFORCE_GE(x_dims_n, + 2, + phi::errors::InvalidArgument( + "The input tensor X's dimensions of TriangularSolveOp " + "should be >= 2. But received X's " + "dimensions = %d, X's shape = [%s]", + x_dims.size(), + x_dims)); + + PADDLE_ENFORCE_GE(y_dims_n, + 2, + phi::errors::InvalidArgument( + "The input tensor Y's dimensions of TriangularSolveOp " + "should be >=2. But received Y's " + "dimensions = %d, Y's shape = [%s]", + y_dims.size(), + y_dims)); + + PADDLE_ENFORCE_EQ(x_dims[x_dims_n - 2], + x_dims[x_dims_n - 1], + phi::errors::InvalidArgument( + "The inner-most 2 dimensions of Input(X) all should " + "be square matrices " + "But received X's shape[-2] = %d and shape[-1] = %d.", + x_dims[x_dims_n - 2], + x_dims[x_dims_n - 1])); + + std::vector x_dims_vec = phi::vectorize(x_dims); + std::vector y_dims_vec = phi::vectorize(y_dims); + + std::vector x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2); + std::vector y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2); + + std::vector expand_batch_portion = + funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut); + + std::vector y_broadcast_dims({expand_batch_portion}); + y_broadcast_dims.insert(y_broadcast_dims.end(), + {y_dims_vec[y_dims_n - 2], y_dims_vec[y_dims_n - 1]}); + + // dim of 'out' is the same with 'Y' after broadcast + out->set_dims(phi::make_ddim(y_broadcast_dims)); + out->set_dtype(y.dtype()); + out->set_layout(y.layout()); + out->share_lod(y); +} + } // namespace phi diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index d2b16e557b06dc94107788995f0c26f1e27e1761..307ecc29cac7548a811b9bc0ae9a693c7062a354 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -29,22 +29,43 @@ namespace phi { // Because functions in this file not only can infer shape, but also need // infer lod or other useful data. +void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); + +void BCELossInferMeta(const MetaTensor& input, + const MetaTensor& label, + MetaTensor* out, + MetaConfig config = MetaConfig()); + +void BincountInferMeta(const MetaTensor& x, + const paddle::optional weights, + int minlength, + MetaTensor* out); + +void CholeskySolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + MetaTensor* out); + +void CompareAllInferMeta(const MetaTensor& x, + const MetaTensor& y, + MetaTensor* out); + void CompareInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, MetaTensor* out); -void CompareAllInferMeta(const MetaTensor& x, - const MetaTensor& y, - MetaTensor* out); +void CrossInferMeta(const MetaTensor& x, + const MetaTensor& y, + int axis, + MetaTensor* out); -void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); +void DistInferMeta(const MetaTensor& x, + const MetaTensor& y, + float p, + MetaTensor* out); -void MatmulInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool trans_x, - bool trans_y, - MetaTensor* out); +void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); void ElementwiseInferMeta(const MetaTensor& x, const MetaTensor& y, @@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta, int axis, MetaTensor* out); +void GatherNdInferMeta(const MetaTensor& x, + const MetaTensor& index, + MetaTensor* out); + +void GatherTreeMeta(const MetaTensor& ids, + const MetaTensor& parents, + MetaTensor* out); + void HuberLossInferMeta(const MetaTensor& input_meta, const MetaTensor& label_meta, float delta, @@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta, MetaTensor* residual, MetaConfig config = MetaConfig()); -void CholeskySolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - MetaTensor* out); - -void TriangularSolveInferMeta(const MetaTensor& x, - const MetaTensor& y, - bool upper, - bool transpose, - bool unitriangular, - MetaTensor* out); - void IndexSampleInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out, MetaConfig config = MetaConfig()); -void CrossInferMeta(const MetaTensor& x, - const MetaTensor& y, - int axis, - MetaTensor* out); +void LogLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float epsilon, + MetaTensor* out, + MetaConfig config = MetaConfig()); -void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); +void MatmulInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool trans_x, + bool trans_y, + MetaTensor* out); + +void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out); void SegmentPoolInferMeta(const MetaTensor& x, const MetaTensor& segment_ids, @@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x, MetaTensor* summed_ids, MetaConfig config = MetaConfig()); -void BCELossInferMeta(const MetaTensor& input, - const MetaTensor& label, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void BincountInferMeta(const MetaTensor& x, - const paddle::optional weights, - int minlength, - MetaTensor* out); - -void DistInferMeta(const MetaTensor& x, - const MetaTensor& y, - float p, - MetaTensor* out); - -void GatherNdInferMeta(const MetaTensor& x, - const MetaTensor& index, - MetaTensor* out); - -void GatherTreeMeta(const MetaTensor& ids, - const MetaTensor& parents, - MetaTensor* out); - -void LogLossInferMeta(const MetaTensor& input, - const MetaTensor& label, - float epsilon, - MetaTensor* out, - MetaConfig config = MetaConfig()); - -void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out); - void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, const MetaTensor& label, bool normalize, @@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); +void TriangularSolveInferMeta(const MetaTensor& x, + const MetaTensor& y, + bool upper, + bool transpose, + bool unitriangular, + MetaTensor* out); + } // namespace phi diff --git a/paddle/phi/infermeta/nullary.cc b/paddle/phi/infermeta/nullary.cc index 506d3fd14ea3fd568ce2f77d7ce30408062279e9..081084567e840f287bb113ee567888f4032f5638 100644 --- a/paddle/phi/infermeta/nullary.cc +++ b/paddle/phi/infermeta/nullary.cc @@ -16,6 +16,12 @@ limitations under the License. */ namespace phi { +void CreateInferMeta(const ScalarArray& shape, + DataType dtype, + MetaTensor* out) { + CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out); +} + void CreateInferMetaBase(const std::vector& shape, DataType dtype, DataLayout layout, @@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector& shape, out->set_layout(layout); } -void CreateInferMeta(const ScalarArray& shape, - DataType dtype, - MetaTensor* out) { - CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out); -} - void EyeInferMeta(int64_t num_rows, int64_t num_columns, DataType dtype, @@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows, out->set_dtype(dtype); } -void TruncatedGaussianRandomInferMeta(const std::vector& shape, - float mean, - float std, - int seed, - DataType dtype, - MetaTensor* out) { - auto out_dims = phi::make_ddim(shape); - out->set_dims(out_dims); - out->set_dtype(dtype); - out->set_layout(DataLayout::NCHW); -} - void GaussianRandomInferMeta(const ScalarArray& shape, float mean, float std, @@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape, out->set_layout(DataLayout::NCHW); } +void TruncatedGaussianRandomInferMeta(const std::vector& shape, + float mean, + float std, + int seed, + DataType dtype, + MetaTensor* out) { + auto out_dims = phi::make_ddim(shape); + out->set_dims(out_dims); + out->set_dtype(dtype); + out->set_layout(DataLayout::NCHW); +} + } // namespace phi diff --git a/paddle/phi/infermeta/nullary.h b/paddle/phi/infermeta/nullary.h index bd0567486e4d62a9f6fe9adfa02727bfe79937e1..38eaa636f8c8779c5a1f597b8cfb23ce6efc5edc 100644 --- a/paddle/phi/infermeta/nullary.h +++ b/paddle/phi/infermeta/nullary.h @@ -28,25 +28,18 @@ namespace phi { // Because functions in this file not only can infer shape, but also need // infer lod or other useful data. +void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out); + void CreateInferMetaBase(const std::vector& shape, DataType dtype, DataLayout layout, MetaTensor* out); -void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out); - void EyeInferMeta(int64_t num_rows, int64_t num_columns, DataType dtype, MetaTensor* out); -void TruncatedGaussianRandomInferMeta(const std::vector& shape, - float mean, - float std, - int seed, - DataType dtype, - MetaTensor* out); - void GaussianRandomInferMeta(const ScalarArray& shape, float mean, float std, @@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out); +void TruncatedGaussianRandomInferMeta(const std::vector& shape, + float mean, + float std, + int seed, + DataType dtype, + MetaTensor* out); + } // namespace phi diff --git a/paddle/phi/infermeta/ternary.cc b/paddle/phi/infermeta/ternary.cc index 88ac2cb0f8d1b01ade0e58bc8f1253c67ad05981..235cfe368c1921eac546b670470963fb49100290 100644 --- a/paddle/phi/infermeta/ternary.cc +++ b/paddle/phi/infermeta/ternary.cc @@ -18,6 +18,58 @@ limitations under the License. */ namespace phi { +void AccuracyInferMeta(const MetaTensor& out, + const MetaTensor& indice, + const MetaTensor& label, + MetaTensor* accuracy, + MetaTensor* correct, + MetaTensor* total, + MetaConfig config) { + auto inference_dim = out.dims(); + auto label_dim = label.dims(); + // Assume indices has same shape as inference, because + // it's the output of topk. + PADDLE_ENFORCE_EQ( + label_dim.size(), + 2, + phi::errors::InvalidArgument( + "ShapeError: label's dimensions of AccuracyOp must be 2. " + "But received label's dimensions = %d, label's shape = [%s]", + label_dim.size(), + label_dim)); + if (config.is_runtime) { + PADDLE_ENFORCE_EQ(label_dim[1], + 1, + phi::errors::InvalidArgument( + "ShapeError: label's second dimension of " + "AccuracyOp must be 1. But received label's " + "second dimension is = %d, label's shape = [%s]", + label_dim[1], + label_dim)); + PADDLE_ENFORCE_EQ( + inference_dim[0], + label_dim[0], + phi::errors::InvalidArgument( + "ShapeError: the output's num_rows of AccuracyOp must be" + " the same as label's num_rows. But received output's " + "shape = [%s], label's shape = [%s], output's num_rows = %d, " + "label's " + "num_rows = %d", + inference_dim, + label_dim, + inference_dim[0], + label_dim[0])); + } + + accuracy->set_dims({1}); + accuracy->set_dtype(out.dtype()); + correct->set_dims({1}); + correct->set_dtype(out.dtype()); + total->set_dims({1}); + total->set_dtype(out.dtype()); + accuracy->share_lod(out); +} + void AddmmInferMeta(const MetaTensor& input, const MetaTensor& x, const MetaTensor& y, @@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input, out->set_dtype(input.dtype()); } +void GraphSendRecvInferMeta(const MetaTensor& x, + const MetaTensor& src_index, + const MetaTensor& dst_index, + const std::string& pool_type, + MetaTensor* out, + MetaTensor* dst_count) { + auto src_index_dims = src_index.dims(); + if (src_index_dims.size() == 2) { + PADDLE_ENFORCE_EQ(src_index_dims[1], + 1, + phi::errors::InvalidArgument( + "The last dim of Src_index should be 1 when it " + "is 2D, but we get %d", + src_index_dims[1])); + } else { + PADDLE_ENFORCE_EQ( + src_index_dims.size(), + 1, + phi::errors::InvalidArgument( + "The Src_index should be 1D, when it is not 2D, but we get %d", + src_index_dims.size())); + } + + auto dst_index_dims = dst_index.dims(); + if (dst_index_dims.size() == 2) { + PADDLE_ENFORCE_EQ(dst_index_dims[1], + 1, + phi::errors::InvalidArgument( + "The last dim of Dst_index should be 1 when it " + "is 2D, but we get %d", + dst_index_dims[1])); + } else { + PADDLE_ENFORCE_EQ( + dst_index_dims.size(), + 1, + phi::errors::InvalidArgument("The Dst_index should be 1D, " + "when it is not 2D, but we get %d", + dst_index_dims.size())); + } + + PADDLE_ENFORCE_EQ(src_index_dims[0], + dst_index_dims[0], + phi::errors::InvalidArgument( + "Src_index and Dst_index should have the same shape.")); + + auto dims = x.dims(); + out->set_dims(dims); + out->set_dtype(x.dtype()); + + if (pool_type == "MEAN") { + dst_count->set_dims({dims[0]}); + dst_count->set_dtype(DataType::INT32); + } +} + +void LerpInferMeta(const MetaTensor& x, + const MetaTensor& y, + const MetaTensor& weight, + MetaTensor* out) { + auto x_dims = x.dims(); + auto y_dims = y.dims(); + auto w_dims = weight.dims(); + DDim out_dims; + out_dims = funcs::GetOutputDims(x_dims, y_dims); + if (w_dims.size() > 1 || w_dims[0] != 1) { + out_dims = funcs::GetOutputDims(out_dims, w_dims); + } + out->set_dims(out_dims); + out->set_dtype(x.dtype()); + out->share_lod(x); +} + +void LinspaceInferMeta(const MetaTensor& start, + const MetaTensor& stop, + const MetaTensor& number, + MetaTensor* out) { + auto s_dims = start.dims(); + PADDLE_ENFORCE_EQ( + (s_dims.size() == 1) && (s_dims[0] == 1), + true, + phi::errors::InvalidArgument("The shape of Input(Start) must be [1]," + "but received input shape is [%s].", + s_dims)); + auto e_dims = stop.dims(); + PADDLE_ENFORCE_EQ( + (e_dims.size() == 1) && (e_dims[0] == 1), + true, + phi::errors::InvalidArgument("The shape of Input(Stop) must be [1]," + "but received input shape is [%s].", + e_dims)); + auto step_dims = number.dims(); + PADDLE_ENFORCE_EQ( + (step_dims.size() == 1) && (step_dims[0] == 1), + true, + phi::errors::InvalidArgument("The shape of Input(Num) must be [1]," + "but received input shape is [%s].", + step_dims)); + out->set_dims(phi::make_ddim({-1})); + out->set_dtype(start.dtype()); +} + void NllLossRawInferMeta(const MetaTensor& input, const MetaTensor& label, paddle::optional weight, @@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input, scores->set_dtype(length.dtype()); } -void LerpInferMeta(const MetaTensor& x, - const MetaTensor& y, - const MetaTensor& weight, - MetaTensor* out) { - auto x_dims = x.dims(); - auto y_dims = y.dims(); - auto w_dims = weight.dims(); - DDim out_dims; - out_dims = funcs::GetOutputDims(x_dims, y_dims); - if (w_dims.size() > 1 || w_dims[0] != 1) { - out_dims = funcs::GetOutputDims(out_dims, w_dims); - } - out->set_dims(out_dims); - out->set_dtype(x.dtype()); - out->share_lod(x); -} - -void LinspaceInferMeta(const MetaTensor& start, - const MetaTensor& stop, - const MetaTensor& number, - MetaTensor* out) { - auto s_dims = start.dims(); - PADDLE_ENFORCE_EQ( - (s_dims.size() == 1) && (s_dims[0] == 1), - true, - phi::errors::InvalidArgument("The shape of Input(Start) must be [1]," - "but received input shape is [%s].", - s_dims)); - auto e_dims = stop.dims(); - PADDLE_ENFORCE_EQ( - (e_dims.size() == 1) && (e_dims[0] == 1), - true, - phi::errors::InvalidArgument("The shape of Input(Stop) must be [1]," - "but received input shape is [%s].", - e_dims)); - auto step_dims = number.dims(); - PADDLE_ENFORCE_EQ( - (step_dims.size() == 1) && (step_dims[0] == 1), - true, - phi::errors::InvalidArgument("The shape of Input(Num) must be [1]," - "but received input shape is [%s].", - step_dims)); - out->set_dims(phi::make_ddim({-1})); - out->set_dtype(start.dtype()); -} - -void AccuracyInferMeta(const MetaTensor& out, - const MetaTensor& indice, - const MetaTensor& label, - MetaTensor* accuracy, - MetaTensor* correct, - MetaTensor* total, - MetaConfig config) { - auto inference_dim = out.dims(); - auto label_dim = label.dims(); - // Assume indices has same shape as inference, because - // it's the output of topk. - PADDLE_ENFORCE_EQ( - label_dim.size(), - 2, - phi::errors::InvalidArgument( - "ShapeError: label's dimensions of AccuracyOp must be 2. " - "But received label's dimensions = %d, label's shape = [%s]", - label_dim.size(), - label_dim)); - if (config.is_runtime) { - PADDLE_ENFORCE_EQ(label_dim[1], - 1, - phi::errors::InvalidArgument( - "ShapeError: label's second dimension of " - "AccuracyOp must be 1. But received label's " - "second dimension is = %d, label's shape = [%s]", - label_dim[1], - label_dim)); - PADDLE_ENFORCE_EQ( - inference_dim[0], - label_dim[0], - phi::errors::InvalidArgument( - "ShapeError: the output's num_rows of AccuracyOp must be" - " the same as label's num_rows. But received output's " - "shape = [%s], label's shape = [%s], output's num_rows = %d, " - "label's " - "num_rows = %d", - inference_dim, - label_dim, - inference_dim[0], - label_dim[0])); - } - - accuracy->set_dims({1}); - accuracy->set_dtype(out.dtype()); - correct->set_dims({1}); - correct->set_dtype(out.dtype()); - total->set_dims({1}); - total->set_dtype(out.dtype()); - accuracy->share_lod(out); -} - -void GraphSendRecvInferMeta(const MetaTensor& x, - const MetaTensor& src_index, - const MetaTensor& dst_index, - const std::string& pool_type, - MetaTensor* out, - MetaTensor* dst_count) { - auto src_index_dims = src_index.dims(); - if (src_index_dims.size() == 2) { - PADDLE_ENFORCE_EQ(src_index_dims[1], - 1, - phi::errors::InvalidArgument( - "The last dim of Src_index should be 1 when it " - "is 2D, but we get %d", - src_index_dims[1])); - } else { - PADDLE_ENFORCE_EQ( - src_index_dims.size(), - 1, - phi::errors::InvalidArgument( - "The Src_index should be 1D, when it is not 2D, but we get %d", - src_index_dims.size())); - } - - auto dst_index_dims = dst_index.dims(); - if (dst_index_dims.size() == 2) { - PADDLE_ENFORCE_EQ(dst_index_dims[1], - 1, - phi::errors::InvalidArgument( - "The last dim of Dst_index should be 1 when it " - "is 2D, but we get %d", - dst_index_dims[1])); - } else { - PADDLE_ENFORCE_EQ( - dst_index_dims.size(), - 1, - phi::errors::InvalidArgument("The Dst_index should be 1D, " - "when it is not 2D, but we get %d", - dst_index_dims.size())); - } - - PADDLE_ENFORCE_EQ(src_index_dims[0], - dst_index_dims[0], - phi::errors::InvalidArgument( - "Src_index and Dst_index should have the same shape.")); - - auto dims = x.dims(); - out->set_dims(dims); - out->set_dtype(x.dtype()); - - if (pool_type == "MEAN") { - dst_count->set_dims({dims[0]}); - dst_count->set_dtype(DataType::INT32); - } -} } // namespace phi diff --git a/paddle/phi/infermeta/ternary.h b/paddle/phi/infermeta/ternary.h index c9a7e78db752f95c7e38857e3f1075a0d672246b..209a07db18b5c7a87ba094c5839149533757220d 100644 --- a/paddle/phi/infermeta/ternary.h +++ b/paddle/phi/infermeta/ternary.h @@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input, float beta, MetaTensor* out); -void GatherNdGradInferMeta(const MetaTensor& x, - const MetaTensor& index, - const MetaTensor& out_grad, - MetaTensor* x_grad); +void GraphSendRecvInferMeta(const MetaTensor& x, + const MetaTensor& src_index, + const MetaTensor& dst_index, + const std::string& pool_type, + MetaTensor* out, + MetaTensor* dst_count); -void ScatterInferMeta(const MetaTensor& x, - const MetaTensor& index, - const MetaTensor& updates, - bool overwrite, - MetaTensor* out); +void LerpInferMeta(const MetaTensor& x, + const MetaTensor& y, + const MetaTensor& weight, + MetaTensor* out); + +void LinspaceInferMeta(const MetaTensor& start, + const MetaTensor& stop, + const MetaTensor& number, + MetaTensor* out); void NllLossRawInferMeta(const MetaTensor& input, const MetaTensor& label, @@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input, MetaTensor* total_weight, MetaConfig config = MetaConfig()); +void ScatterInferMeta(const MetaTensor& x, + const MetaTensor& index, + const MetaTensor& updates, + bool overwrite, + MetaTensor* out); + void ScatterNdAddInferMeta(const MetaTensor& x, const MetaTensor& index, const MetaTensor& updates, @@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input, MetaTensor* path, MetaConfig config = MetaConfig()); -void LerpInferMeta(const MetaTensor& x, - const MetaTensor& y, - const MetaTensor& weight, - MetaTensor* out); - -void LinspaceInferMeta(const MetaTensor& start, - const MetaTensor& stop, - const MetaTensor& number, - MetaTensor* out); - -void GraphSendRecvInferMeta(const MetaTensor& x, - const MetaTensor& src_index, - const MetaTensor& dst_index, - const std::string& pool_type, - MetaTensor* out, - MetaTensor* dst_count); } // namespace phi