未验证 提交 f3f27d25 编写于 作者: Z zyfncg 提交者: GitHub

[PHI] Refactor infermeta files (Part2) (#40367)

* refactor infermeta files

* update
上级 080024f0
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h" #include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/ternary.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, ...@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
} }
} }
void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx) { void GatherNdGradInferMeta(const MetaTensor& x,
if (dx) { const MetaTensor& index,
dx->share_meta(x); const MetaTensor& out_grad,
} MetaTensor* x_grad) {
const auto& dtype = out_grad.dtype();
x_grad->set_dims(x.dims());
x_grad->share_lod(x);
x_grad->set_dtype(dtype);
} }
void GeneralBinaryGradInferMeta(const MetaTensor& x, void GeneralBinaryGradInferMeta(const MetaTensor& x,
...@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x, ...@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
} }
} }
void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx) {
if (dx) {
dx->share_meta(x);
}
}
void GumbelSoftmaxGradInferMeta(const MetaTensor& out, void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
const MetaTensor& dout, const MetaTensor& dout,
int axis, int axis,
...@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, ...@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
dout.dims(), dout.dims(),
errors::InvalidArgument( errors::InvalidArgument(
"Input(Out) and its gradients should have the same shape.")); "Input(Out) and its gradients should have the same shape."));
dx->share_meta(dout);
}
void GatherNdGradInferMeta(const MetaTensor& x, dx->share_meta(dout);
const MetaTensor& index,
const MetaTensor& out_grad,
MetaTensor* x_grad) {
const auto& dtype = out_grad.dtype();
x_grad->set_dims(x.dims());
x_grad->share_lod(x);
x_grad->set_dtype(dtype);
} }
void PsroiPoolGradInferMeta(const MetaTensor& x, void PsroiPoolGradInferMeta(const MetaTensor& x,
......
...@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, ...@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor* dweight, MetaTensor* dweight,
MetaTensor* dbias); MetaTensor* dbias);
void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx); void GatherNdGradInferMeta(const MetaTensor& x,
const MetaTensor& index,
const MetaTensor& out_grad,
MetaTensor* x_grad);
void GeneralBinaryGradInferMeta(const MetaTensor& x, void GeneralBinaryGradInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
...@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x, ...@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
MetaTensor* dy, MetaTensor* dy,
MetaTensor* dz); MetaTensor* dz);
void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx);
void GumbelSoftmaxGradInferMeta(const MetaTensor& out, void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
const MetaTensor& dout, const MetaTensor& dout,
int axis, int axis,
......
...@@ -22,6 +22,153 @@ limitations under the License. */ ...@@ -22,6 +22,153 @@ limitations under the License. */
namespace phi { namespace phi {
void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
out->share_meta(x);
}
void BCELossInferMeta(const MetaTensor& input,
const MetaTensor& label,
MetaTensor* out,
MetaConfig config) {
auto input_dims = input.dims();
auto label_dims = label.dims();
int rank = input_dims.size();
PADDLE_ENFORCE_EQ(rank,
label_dims.size(),
phi::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d].",
rank,
label_dims.size()));
bool check = true;
if ((!config.is_runtime) &&
(phi::product(input_dims) <= 0 || phi::product(label_dims) <= 0)) {
check = false;
}
if (check) {
PADDLE_ENFORCE_EQ(input_dims,
label_dims,
phi::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s].",
input_dims,
label_dims));
}
out->set_dims(input_dims);
out->set_dtype(input.dtype());
out->share_lod(input);
}
void BincountInferMeta(const MetaTensor& x,
const paddle::optional<const MetaTensor&> weights,
int minlength,
MetaTensor* out) {
auto input_dim = x.dims();
PADDLE_ENFORCE_GE(minlength,
0,
phi::errors::InvalidArgument(
"The minlength should be greater than or equal to 0."
"But received minlength is %d",
minlength));
PADDLE_ENFORCE_EQ(
input_dim.size(),
1,
phi::errors::InvalidArgument("The 'shape' of Input(X) must be 1-D tensor."
"But the dimension of Input(X) is [%d]",
input_dim.size()));
if (weights.is_initialized()) {
auto weights_dim = weights->dims();
PADDLE_ENFORCE_EQ(weights_dim.size(),
1,
phi::errors::InvalidArgument(
"The 'shape' of Input(Weights) must be 1-D tensor."
"But the dimension of Input(Weights) is [%d]",
weights_dim.size()));
PADDLE_ENFORCE_EQ(
weights_dim[0],
input_dim[0],
phi::errors::InvalidArgument(
"The 'shape' of Input(Weights) must be equal to the 'shape' of "
"Input(X)."
"But received: the 'shape' of Input(Weights) is [%s],"
"the 'shape' of Input(X) is [%s]",
weights_dim,
input_dim));
}
out->set_dims(phi::make_ddim({-1}));
if (weights.is_initialized()) {
out->set_dtype(weights->dtype());
} else {
out->set_dtype(x.dtype());
}
out->share_lod(x);
}
void CholeskySolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto x_dims_n = x_dims.size();
auto y_dims_n = y_dims.size();
PADDLE_ENFORCE_GE(x_dims_n,
2,
phi::errors::InvalidArgument(
"the rank of input Y must greater or equal to 2"));
PADDLE_ENFORCE_GE(y_dims_n,
2,
phi::errors::InvalidArgument(
"the rank of input X must greater or equal to 2"));
PADDLE_ENFORCE_EQ(
y_dims[y_dims_n - 1],
y_dims[y_dims_n - 2],
phi::errors::InvalidArgument("input Matrix Y should be square matrix,"
"But Got last shape of %ld x %ld",
y_dims[y_dims_n - 1],
y_dims[y_dims_n - 2]));
PADDLE_ENFORCE_EQ(
x_dims[x_dims_n - 2],
y_dims[y_dims_n - 2],
phi::errors::InvalidArgument("the first dim of Matrix X must be equal to "
"the fisrt dim of Matrix Y,"
"But Got %ld and %ld",
x_dims[x_dims_n - 2],
y_dims[y_dims_n - 2]));
std::vector<int64_t> x_dims_vec = phi::vectorize(x_dims);
std::vector<int64_t> y_dims_vec = phi::vectorize(y_dims);
std::vector<int64_t> x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2);
std::vector<int64_t> y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2);
std::vector<int64_t> expand_batch_portion =
funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut);
std::vector<int64_t> x_broadcast_dims({expand_batch_portion});
x_broadcast_dims.insert(x_broadcast_dims.end(),
{x_dims_vec[x_dims_n - 2], x_dims_vec[x_dims_n - 1]});
// dim of 'out' is the same with 'X' after broadcast
out->set_dims(phi::make_ddim(x_broadcast_dims));
out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);
}
void CompareInferMeta(const MetaTensor& x, void CompareInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
int axis, int axis,
...@@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x, ...@@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x,
out->set_dtype(DataType::BOOL); out->set_dtype(DataType::BOOL);
} }
void CrossInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
MetaTensor* out) {
auto x_dim = x.dims();
auto y_dim = y.dims();
auto dim = axis;
bool dims_match = phi::funcs::CheckDims(x_dim, y_dim);
PADDLE_ENFORCE_EQ(
dims_match,
true,
phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]",
x_dim,
y_dim));
if (dim != DDim::kMaxRank) {
PADDLE_ENFORCE_EQ(
dim < x_dim.size() && dim >= (0 - x_dim.size()),
true,
phi::errors::OutOfRange(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d.",
x_dim.size(),
x_dim.size() - 1,
dim));
if (dim < 0) {
dim += x_dim.size();
}
PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3,
true,
phi::errors::InvalidArgument(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d.",
x_dim[dim]));
}
out->set_dims(x_dim);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);
}
void DistInferMeta(const MetaTensor& x,
const MetaTensor& y,
float p,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
PADDLE_ENFORCE_NE(phi::product(x_dims),
0,
phi::errors::InvalidArgument(
"The Input(X) has not been initialized properly. The "
"shape of Input(X) = [%s].",
x_dims));
PADDLE_ENFORCE_NE(phi::product(y_dims),
0,
phi::errors::InvalidArgument(
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s].",
y_dims));
out->set_dims({1});
out->set_dtype(x.dtype());
}
void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
auto x_dims = x.dims(); auto x_dims = x.dims();
auto x_rank = static_cast<size_t>(x_dims.size()); auto x_rank = static_cast<size_t>(x_dims.size());
...@@ -109,84 +324,11 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { ...@@ -109,84 +324,11 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
out->set_layout(x.layout()); out->set_layout(x.layout());
} }
void MatmulInferMeta(const MetaTensor& x, void ElementwiseInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
bool trans_x, MetaTensor* out) {
bool trans_y, return ElementwiseRawInferMeta(x, y, -1, std::move(out));
MetaTensor* out) { }
std::vector<int64_t> dims_x = phi::vectorize(x.dims());
std::vector<int64_t> dims_y = phi::vectorize(y.dims());
auto ndims_x = dims_x.size();
auto ndims_y = dims_y.size();
PADDLE_ENFORCE_GT(ndims_x,
0UL,
phi::errors::InvalidArgument(
"The Input(x) dims size must be greater than 0,"
" but reviced dims size is 0. "));
PADDLE_ENFORCE_GT(ndims_y,
0UL,
phi::errors::InvalidArgument(
"The Input(y) dims size must be greater than 0,"
" but reviced dims size is 0. "));
bool x_broadcasted = false, y_broadcasted = false;
if (ndims_x == 1) {
dims_x.insert(dims_x.begin(), 1);
ndims_x = 2;
x_broadcasted = true;
}
if (ndims_y == 1) {
dims_y.push_back(1);
ndims_y = 2;
y_broadcasted = true;
}
size_t M, N;
if (trans_x) {
M = dims_x[ndims_x - 1];
} else {
M = dims_x[ndims_x - 2];
}
if (trans_y) {
N = dims_y[ndims_y - 2];
} else {
N = dims_y[ndims_y - 1];
}
std::vector<int64_t> new_dims;
if (ndims_x > ndims_y) {
new_dims.assign(dims_x.begin(), dims_x.end() - 2);
} else if (ndims_x < ndims_y) {
new_dims.assign(dims_y.begin(), dims_y.end() - 2);
} else {
new_dims.reserve(ndims_x);
for (size_t i = 0; i < ndims_x - 2; ++i) {
new_dims.push_back(std::max(dims_x[i], dims_y[i]));
}
}
if (!x_broadcasted) {
new_dims.push_back(M);
}
if (!y_broadcasted) {
new_dims.push_back(N);
}
if (x_broadcasted && y_broadcasted) {
new_dims.push_back(1);
}
auto ddim_out = phi::make_ddim(new_dims);
out->set_dims(ddim_out);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
}
void ElementwiseInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out) {
return ElementwiseRawInferMeta(x, y, -1, std::move(out));
}
void ElementwiseRawInferMeta(const MetaTensor& x, void ElementwiseRawInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
...@@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x, ...@@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
funcs::GetBroadcastDimsArrays(x_dims, funcs::GetBroadcastDimsArrays(x_dims,
y_dims, y_dims,
x_dims_array.data(), x_dims_array.data(),
y_dims_array.data(), y_dims_array.data(),
out_dims_array.data(), out_dims_array.data(),
max_dim, max_dim,
axis); axis);
auto out_dims = phi::make_ddim(out_dims_array); auto out_dims = phi::make_ddim(out_dims_array);
out->set_dims(out_dims); out->set_dims(out_dims);
} else {
out->set_dims(x.dims());
}
out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);
}
void HuberLossInferMeta(const MetaTensor& input,
const MetaTensor& label,
float delta,
MetaTensor* out,
MetaTensor* residual,
MetaConfig config) {
auto input_dims = input.dims();
auto label_dims = label.dims();
PADDLE_ENFORCE_EQ(input_dims.size(),
label_dims.size(),
phi::errors::InvalidArgument(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)",
input_dims.size(),
label_dims.size()));
bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) ||
phi::contain_unknown_dim(label_dims);
if (config.is_runtime || !contain_unknown_dim) {
PADDLE_ENFORCE_EQ(
input_dims,
label_dims,
phi::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
input_dims,
label_dims));
}
auto out_dims = label_dims;
residual->set_dims(out_dims);
out->set_dims(out_dims);
out->share_lod(input);
}
void CholeskySolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto x_dims_n = x_dims.size();
auto y_dims_n = y_dims.size();
PADDLE_ENFORCE_GE(x_dims_n,
2,
phi::errors::InvalidArgument(
"the rank of input Y must greater or equal to 2"));
PADDLE_ENFORCE_GE(y_dims_n,
2,
phi::errors::InvalidArgument(
"the rank of input X must greater or equal to 2"));
PADDLE_ENFORCE_EQ(
y_dims[y_dims_n - 1],
y_dims[y_dims_n - 2],
phi::errors::InvalidArgument("input Matrix Y should be square matrix,"
"But Got last shape of %ld x %ld",
y_dims[y_dims_n - 1],
y_dims[y_dims_n - 2]));
PADDLE_ENFORCE_EQ(
x_dims[x_dims_n - 2],
y_dims[y_dims_n - 2],
phi::errors::InvalidArgument("the first dim of Matrix X must be equal to "
"the fisrt dim of Matrix Y,"
"But Got %ld and %ld",
x_dims[x_dims_n - 2],
y_dims[y_dims_n - 2]));
std::vector<int64_t> x_dims_vec = phi::vectorize(x_dims);
std::vector<int64_t> y_dims_vec = phi::vectorize(y_dims);
std::vector<int64_t> x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2);
std::vector<int64_t> y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2);
std::vector<int64_t> expand_batch_portion =
funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut);
std::vector<int64_t> x_broadcast_dims({expand_batch_portion});
x_broadcast_dims.insert(x_broadcast_dims.end(),
{x_dims_vec[x_dims_n - 2], x_dims_vec[x_dims_n - 1]});
// dim of 'out' is the same with 'X' after broadcast
out->set_dims(phi::make_ddim(x_broadcast_dims));
out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);
}
void TriangularSolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
bool transpose,
bool unitriangular,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto x_dims_n = x_dims.size();
auto y_dims_n = y_dims.size();
PADDLE_ENFORCE_GE(x_dims_n,
2,
phi::errors::InvalidArgument(
"The input tensor X's dimensions of TriangularSolveOp "
"should be >= 2. But received X's "
"dimensions = %d, X's shape = [%s]",
x_dims.size(),
x_dims));
PADDLE_ENFORCE_GE(y_dims_n,
2,
phi::errors::InvalidArgument(
"The input tensor Y's dimensions of TriangularSolveOp "
"should be >=2. But received Y's "
"dimensions = %d, Y's shape = [%s]",
y_dims.size(),
y_dims));
PADDLE_ENFORCE_EQ(x_dims[x_dims_n - 2],
x_dims[x_dims_n - 1],
phi::errors::InvalidArgument(
"The inner-most 2 dimensions of Input(X) all should "
"be square matrices "
"But received X's shape[-2] = %d and shape[-1] = %d.",
x_dims[x_dims_n - 2],
x_dims[x_dims_n - 1]));
std::vector<int64_t> x_dims_vec = phi::vectorize(x_dims);
std::vector<int64_t> y_dims_vec = phi::vectorize(y_dims);
std::vector<int64_t> x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2);
std::vector<int64_t> y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2);
std::vector<int64_t> expand_batch_portion =
funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut);
std::vector<int64_t> y_broadcast_dims({expand_batch_portion});
y_broadcast_dims.insert(y_broadcast_dims.end(),
{y_dims_vec[y_dims_n - 2], y_dims_vec[y_dims_n - 1]});
// dim of 'out' is the same with 'Y' after broadcast
out->set_dims(phi::make_ddim(y_broadcast_dims));
out->set_dtype(y.dtype());
out->set_layout(y.layout());
out->share_lod(y);
}
void IndexSampleInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out,
MetaConfig config) {
auto input_dims = x.dims();
PADDLE_ENFORCE_EQ(input_dims.size(),
2,
errors::InvalidArgument(
"Inputs(X) shape of IndexSample op should be 2-D, but "
"got X's shape = [%s], please check X shape.",
input_dims));
auto index_dims = y.dims();
PADDLE_ENFORCE_EQ(
index_dims.size(),
2,
errors::InvalidArgument(
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape.",
input_dims));
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(input_dims[0],
index_dims[0],
errors::InvalidArgument(
"Inputs(X)'s value of dimension 0 must same with "
"Inputs(Index)'s value of dimension 0, but "
"got %d of Inputs(X), and got %d of Inputs(Index), "
"please check Inputs shape.",
input_dims[0],
index_dims[0]));
}
out->set_dtype(x.dtype());
out->set_dims(index_dims);
out->share_lod(y);
}
void CrossInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
MetaTensor* out) {
auto x_dim = x.dims();
auto y_dim = y.dims();
auto dim = axis;
bool dims_match = phi::funcs::CheckDims(x_dim, y_dim);
PADDLE_ENFORCE_EQ(
dims_match,
true,
phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]",
x_dim,
y_dim));
if (dim != DDim::kMaxRank) {
PADDLE_ENFORCE_EQ(
dim < x_dim.size() && dim >= (0 - x_dim.size()),
true,
phi::errors::OutOfRange(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d.",
x_dim.size(),
x_dim.size() - 1,
dim));
if (dim < 0) {
dim += x_dim.size();
}
PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3,
true,
phi::errors::InvalidArgument(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d.",
x_dim[dim]));
}
out->set_dims(x_dim);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);
}
void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
out->share_meta(x);
}
void SegmentPoolInferMeta(const MetaTensor& x,
const MetaTensor& segment_ids,
const std::string& pooltype,
MetaTensor* out,
MetaTensor* summed_ids,
MetaConfig config) {
auto dims = x.dims();
dims[0] = -1;
out->set_dims(dims);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
if (pooltype == "MEAN") {
summed_ids->set_dims({-1, 1});
summed_ids->set_dtype(x.dtype());
summed_ids->set_layout(x.layout());
}
}
void BCELossInferMeta(const MetaTensor& input,
const MetaTensor& label,
MetaTensor* out,
MetaConfig config) {
auto input_dims = input.dims();
auto label_dims = label.dims();
int rank = input_dims.size();
PADDLE_ENFORCE_EQ(rank,
label_dims.size(),
phi::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d].",
rank,
label_dims.size()));
bool check = true;
if ((!config.is_runtime) &&
(phi::product(input_dims) <= 0 || phi::product(label_dims) <= 0)) {
check = false;
}
if (check) {
PADDLE_ENFORCE_EQ(input_dims,
label_dims,
phi::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s].",
input_dims,
label_dims));
}
out->set_dims(input_dims);
out->set_dtype(input.dtype());
out->share_lod(input);
}
void BincountInferMeta(const MetaTensor& x,
const paddle::optional<const MetaTensor&> weights,
int minlength,
MetaTensor* out) {
auto input_dim = x.dims();
PADDLE_ENFORCE_GE(minlength,
0,
phi::errors::InvalidArgument(
"The minlength should be greater than or equal to 0."
"But received minlength is %d",
minlength));
PADDLE_ENFORCE_EQ(
input_dim.size(),
1,
phi::errors::InvalidArgument("The 'shape' of Input(X) must be 1-D tensor."
"But the dimension of Input(X) is [%d]",
input_dim.size()));
if (weights.is_initialized()) {
auto weights_dim = weights->dims();
PADDLE_ENFORCE_EQ(weights_dim.size(),
1,
phi::errors::InvalidArgument(
"The 'shape' of Input(Weights) must be 1-D tensor."
"But the dimension of Input(Weights) is [%d]",
weights_dim.size()));
PADDLE_ENFORCE_EQ(
weights_dim[0],
input_dim[0],
phi::errors::InvalidArgument(
"The 'shape' of Input(Weights) must be equal to the 'shape' of "
"Input(X)."
"But received: the 'shape' of Input(Weights) is [%s],"
"the 'shape' of Input(X) is [%s]",
weights_dim,
input_dim));
}
out->set_dims(phi::make_ddim({-1}));
if (weights.is_initialized()) {
out->set_dtype(weights->dtype());
} else { } else {
out->set_dtype(x.dtype()); out->set_dims(x.dims());
} }
out->share_lod(x);
}
void DistInferMeta(const MetaTensor& x,
const MetaTensor& y,
float p,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
PADDLE_ENFORCE_NE(phi::product(x_dims),
0,
phi::errors::InvalidArgument(
"The Input(X) has not been initialized properly. The "
"shape of Input(X) = [%s].",
x_dims));
PADDLE_ENFORCE_NE(phi::product(y_dims),
0,
phi::errors::InvalidArgument(
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s].",
y_dims));
out->set_dims({1});
out->set_dtype(x.dtype()); out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);
} }
void GatherNdInferMeta(const MetaTensor& x, void GatherNdInferMeta(const MetaTensor& x,
...@@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids, ...@@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids,
out->set_dims(ids_dims); out->set_dims(ids_dims);
} }
void HuberLossInferMeta(const MetaTensor& input,
const MetaTensor& label,
float delta,
MetaTensor* out,
MetaTensor* residual,
MetaConfig config) {
auto input_dims = input.dims();
auto label_dims = label.dims();
PADDLE_ENFORCE_EQ(input_dims.size(),
label_dims.size(),
phi::errors::InvalidArgument(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)",
input_dims.size(),
label_dims.size()));
bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) ||
phi::contain_unknown_dim(label_dims);
if (config.is_runtime || !contain_unknown_dim) {
PADDLE_ENFORCE_EQ(
input_dims,
label_dims,
phi::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
input_dims,
label_dims));
}
auto out_dims = label_dims;
residual->set_dims(out_dims);
out->set_dims(out_dims);
out->share_lod(input);
}
void IndexSampleInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out,
MetaConfig config) {
auto input_dims = x.dims();
PADDLE_ENFORCE_EQ(input_dims.size(),
2,
errors::InvalidArgument(
"Inputs(X) shape of IndexSample op should be 2-D, but "
"got X's shape = [%s], please check X shape.",
input_dims));
auto index_dims = y.dims();
PADDLE_ENFORCE_EQ(
index_dims.size(),
2,
errors::InvalidArgument(
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape.",
input_dims));
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(input_dims[0],
index_dims[0],
errors::InvalidArgument(
"Inputs(X)'s value of dimension 0 must same with "
"Inputs(Index)'s value of dimension 0, but "
"got %d of Inputs(X), and got %d of Inputs(Index), "
"please check Inputs shape.",
input_dims[0],
index_dims[0]));
}
out->set_dtype(x.dtype());
out->set_dims(index_dims);
out->share_lod(y);
}
void LogLossInferMeta(const MetaTensor& input, void LogLossInferMeta(const MetaTensor& input,
const MetaTensor& label, const MetaTensor& label,
float epsilon, float epsilon,
...@@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input, ...@@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input,
out->share_lod(input); out->share_lod(input);
} }
void MatmulInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool trans_x,
bool trans_y,
MetaTensor* out) {
std::vector<int64_t> dims_x = phi::vectorize(x.dims());
std::vector<int64_t> dims_y = phi::vectorize(y.dims());
auto ndims_x = dims_x.size();
auto ndims_y = dims_y.size();
PADDLE_ENFORCE_GT(ndims_x,
0UL,
phi::errors::InvalidArgument(
"The Input(x) dims size must be greater than 0,"
" but reviced dims size is 0. "));
PADDLE_ENFORCE_GT(ndims_y,
0UL,
phi::errors::InvalidArgument(
"The Input(y) dims size must be greater than 0,"
" but reviced dims size is 0. "));
bool x_broadcasted = false, y_broadcasted = false;
if (ndims_x == 1) {
dims_x.insert(dims_x.begin(), 1);
ndims_x = 2;
x_broadcasted = true;
}
if (ndims_y == 1) {
dims_y.push_back(1);
ndims_y = 2;
y_broadcasted = true;
}
size_t M, N;
if (trans_x) {
M = dims_x[ndims_x - 1];
} else {
M = dims_x[ndims_x - 2];
}
if (trans_y) {
N = dims_y[ndims_y - 2];
} else {
N = dims_y[ndims_y - 1];
}
std::vector<int64_t> new_dims;
if (ndims_x > ndims_y) {
new_dims.assign(dims_x.begin(), dims_x.end() - 2);
} else if (ndims_x < ndims_y) {
new_dims.assign(dims_y.begin(), dims_y.end() - 2);
} else {
new_dims.reserve(ndims_x);
for (size_t i = 0; i < ndims_x - 2; ++i) {
new_dims.push_back(std::max(dims_x[i], dims_y[i]));
}
}
if (!x_broadcasted) {
new_dims.push_back(M);
}
if (!y_broadcasted) {
new_dims.push_back(N);
}
if (x_broadcasted && y_broadcasted) {
new_dims.push_back(1);
}
auto ddim_out = phi::make_ddim(new_dims);
out->set_dims(ddim_out);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
}
void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
auto dim_x = x.dims(); auto dim_x = x.dims();
auto dim_vec = vec.dims(); auto dim_vec = vec.dims();
...@@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { ...@@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
out->share_lod(x); out->share_lod(x);
} }
void SegmentPoolInferMeta(const MetaTensor& x,
const MetaTensor& segment_ids,
const std::string& pooltype,
MetaTensor* out,
MetaTensor* summed_ids,
MetaConfig config) {
auto dims = x.dims();
dims[0] = -1;
out->set_dims(dims);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
if (pooltype == "MEAN") {
summed_ids->set_dims({-1, 1});
summed_ids->set_dtype(x.dtype());
summed_ids->set_layout(x.layout());
}
}
void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
const MetaTensor& label, const MetaTensor& label,
bool normalize, bool normalize,
...@@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, ...@@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
out->share_lod(x); out->share_lod(x);
} }
void TriangularSolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
bool transpose,
bool unitriangular,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto x_dims_n = x_dims.size();
auto y_dims_n = y_dims.size();
PADDLE_ENFORCE_GE(x_dims_n,
2,
phi::errors::InvalidArgument(
"The input tensor X's dimensions of TriangularSolveOp "
"should be >= 2. But received X's "
"dimensions = %d, X's shape = [%s]",
x_dims.size(),
x_dims));
PADDLE_ENFORCE_GE(y_dims_n,
2,
phi::errors::InvalidArgument(
"The input tensor Y's dimensions of TriangularSolveOp "
"should be >=2. But received Y's "
"dimensions = %d, Y's shape = [%s]",
y_dims.size(),
y_dims));
PADDLE_ENFORCE_EQ(x_dims[x_dims_n - 2],
x_dims[x_dims_n - 1],
phi::errors::InvalidArgument(
"The inner-most 2 dimensions of Input(X) all should "
"be square matrices "
"But received X's shape[-2] = %d and shape[-1] = %d.",
x_dims[x_dims_n - 2],
x_dims[x_dims_n - 1]));
std::vector<int64_t> x_dims_vec = phi::vectorize(x_dims);
std::vector<int64_t> y_dims_vec = phi::vectorize(y_dims);
std::vector<int64_t> x_dims_vec_cut(x_dims_vec.begin(), x_dims_vec.end() - 2);
std::vector<int64_t> y_dims_vec_cut(y_dims_vec.begin(), y_dims_vec.end() - 2);
std::vector<int64_t> expand_batch_portion =
funcs::MatrixGetBroadcastBatchPortion(x_dims_vec_cut, y_dims_vec_cut);
std::vector<int64_t> y_broadcast_dims({expand_batch_portion});
y_broadcast_dims.insert(y_broadcast_dims.end(),
{y_dims_vec[y_dims_n - 2], y_dims_vec[y_dims_n - 1]});
// dim of 'out' is the same with 'Y' after broadcast
out->set_dims(phi::make_ddim(y_broadcast_dims));
out->set_dtype(y.dtype());
out->set_layout(y.layout());
out->share_lod(y);
}
} // namespace phi } // namespace phi
...@@ -29,22 +29,43 @@ namespace phi { ...@@ -29,22 +29,43 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need // Because functions in this file not only can infer shape, but also need
// infer lod or other useful data. // infer lod or other useful data.
void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out);
void BCELossInferMeta(const MetaTensor& input,
const MetaTensor& label,
MetaTensor* out,
MetaConfig config = MetaConfig());
void BincountInferMeta(const MetaTensor& x,
const paddle::optional<const MetaTensor&> weights,
int minlength,
MetaTensor* out);
void CholeskySolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
MetaTensor* out);
void CompareAllInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out);
void CompareInferMeta(const MetaTensor& x, void CompareInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
int axis, int axis,
MetaTensor* out); MetaTensor* out);
void CompareAllInferMeta(const MetaTensor& x, void CrossInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
MetaTensor* out); int axis,
MetaTensor* out);
void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); void DistInferMeta(const MetaTensor& x,
const MetaTensor& y,
float p,
MetaTensor* out);
void MatmulInferMeta(const MetaTensor& x, void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out);
const MetaTensor& y,
bool trans_x,
bool trans_y,
MetaTensor* out);
void ElementwiseInferMeta(const MetaTensor& x, void ElementwiseInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
...@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta, ...@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
int axis, int axis,
MetaTensor* out); MetaTensor* out);
void GatherNdInferMeta(const MetaTensor& x,
const MetaTensor& index,
MetaTensor* out);
void GatherTreeMeta(const MetaTensor& ids,
const MetaTensor& parents,
MetaTensor* out);
void HuberLossInferMeta(const MetaTensor& input_meta, void HuberLossInferMeta(const MetaTensor& input_meta,
const MetaTensor& label_meta, const MetaTensor& label_meta,
float delta, float delta,
...@@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta, ...@@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
MetaTensor* residual, MetaTensor* residual,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void CholeskySolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
MetaTensor* out);
void TriangularSolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
bool transpose,
bool unitriangular,
MetaTensor* out);
void IndexSampleInferMeta(const MetaTensor& x, void IndexSampleInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
MetaTensor* out, MetaTensor* out,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void CrossInferMeta(const MetaTensor& x, void LogLossInferMeta(const MetaTensor& input,
const MetaTensor& y, const MetaTensor& label,
int axis, float epsilon,
MetaTensor* out); MetaTensor* out,
MetaConfig config = MetaConfig());
void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); void MatmulInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool trans_x,
bool trans_y,
MetaTensor* out);
void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out);
void SegmentPoolInferMeta(const MetaTensor& x, void SegmentPoolInferMeta(const MetaTensor& x,
const MetaTensor& segment_ids, const MetaTensor& segment_ids,
...@@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x, ...@@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x,
MetaTensor* summed_ids, MetaTensor* summed_ids,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void BCELossInferMeta(const MetaTensor& input,
const MetaTensor& label,
MetaTensor* out,
MetaConfig config = MetaConfig());
void BincountInferMeta(const MetaTensor& x,
const paddle::optional<const MetaTensor&> weights,
int minlength,
MetaTensor* out);
void DistInferMeta(const MetaTensor& x,
const MetaTensor& y,
float p,
MetaTensor* out);
void GatherNdInferMeta(const MetaTensor& x,
const MetaTensor& index,
MetaTensor* out);
void GatherTreeMeta(const MetaTensor& ids,
const MetaTensor& parents,
MetaTensor* out);
void LogLossInferMeta(const MetaTensor& input,
const MetaTensor& label,
float epsilon,
MetaTensor* out,
MetaConfig config = MetaConfig());
void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out);
void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
const MetaTensor& label, const MetaTensor& label,
bool normalize, bool normalize,
...@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, ...@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
MetaTensor* out, MetaTensor* out,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void TriangularSolveInferMeta(const MetaTensor& x,
const MetaTensor& y,
bool upper,
bool transpose,
bool unitriangular,
MetaTensor* out);
} // namespace phi } // namespace phi
...@@ -16,6 +16,12 @@ limitations under the License. */ ...@@ -16,6 +16,12 @@ limitations under the License. */
namespace phi { namespace phi {
void CreateInferMeta(const ScalarArray& shape,
DataType dtype,
MetaTensor* out) {
CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out);
}
void CreateInferMetaBase(const std::vector<int64_t>& shape, void CreateInferMetaBase(const std::vector<int64_t>& shape,
DataType dtype, DataType dtype,
DataLayout layout, DataLayout layout,
...@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape, ...@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
out->set_layout(layout); out->set_layout(layout);
} }
void CreateInferMeta(const ScalarArray& shape,
DataType dtype,
MetaTensor* out) {
CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out);
}
void EyeInferMeta(int64_t num_rows, void EyeInferMeta(int64_t num_rows,
int64_t num_columns, int64_t num_columns,
DataType dtype, DataType dtype,
...@@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows, ...@@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows,
out->set_dtype(dtype); out->set_dtype(dtype);
} }
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
int seed,
DataType dtype,
MetaTensor* out) {
auto out_dims = phi::make_ddim(shape);
out->set_dims(out_dims);
out->set_dtype(dtype);
out->set_layout(DataLayout::NCHW);
}
void GaussianRandomInferMeta(const ScalarArray& shape, void GaussianRandomInferMeta(const ScalarArray& shape,
float mean, float mean,
float std, float std,
...@@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape, ...@@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
out->set_layout(DataLayout::NCHW); out->set_layout(DataLayout::NCHW);
} }
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
int seed,
DataType dtype,
MetaTensor* out) {
auto out_dims = phi::make_ddim(shape);
out->set_dims(out_dims);
out->set_dtype(dtype);
out->set_layout(DataLayout::NCHW);
}
} // namespace phi } // namespace phi
...@@ -28,25 +28,18 @@ namespace phi { ...@@ -28,25 +28,18 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need // Because functions in this file not only can infer shape, but also need
// infer lod or other useful data. // infer lod or other useful data.
void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out);
void CreateInferMetaBase(const std::vector<int64_t>& shape, void CreateInferMetaBase(const std::vector<int64_t>& shape,
DataType dtype, DataType dtype,
DataLayout layout, DataLayout layout,
MetaTensor* out); MetaTensor* out);
void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out);
void EyeInferMeta(int64_t num_rows, void EyeInferMeta(int64_t num_rows,
int64_t num_columns, int64_t num_columns,
DataType dtype, DataType dtype,
MetaTensor* out); MetaTensor* out);
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
int seed,
DataType dtype,
MetaTensor* out);
void GaussianRandomInferMeta(const ScalarArray& shape, void GaussianRandomInferMeta(const ScalarArray& shape,
float mean, float mean,
float std, float std,
...@@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape, ...@@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
DataType dtype, DataType dtype,
MetaTensor* out); MetaTensor* out);
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
int seed,
DataType dtype,
MetaTensor* out);
} // namespace phi } // namespace phi
...@@ -18,6 +18,58 @@ limitations under the License. */ ...@@ -18,6 +18,58 @@ limitations under the License. */
namespace phi { namespace phi {
void AccuracyInferMeta(const MetaTensor& out,
const MetaTensor& indice,
const MetaTensor& label,
MetaTensor* accuracy,
MetaTensor* correct,
MetaTensor* total,
MetaConfig config) {
auto inference_dim = out.dims();
auto label_dim = label.dims();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ(
label_dim.size(),
2,
phi::errors::InvalidArgument(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]",
label_dim.size(),
label_dim));
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(label_dim[1],
1,
phi::errors::InvalidArgument(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]",
label_dim[1],
label_dim));
PADDLE_ENFORCE_EQ(
inference_dim[0],
label_dim[0],
phi::errors::InvalidArgument(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d",
inference_dim,
label_dim,
inference_dim[0],
label_dim[0]));
}
accuracy->set_dims({1});
accuracy->set_dtype(out.dtype());
correct->set_dims({1});
correct->set_dtype(out.dtype());
total->set_dims({1});
total->set_dtype(out.dtype());
accuracy->share_lod(out);
}
void AddmmInferMeta(const MetaTensor& input, void AddmmInferMeta(const MetaTensor& input,
const MetaTensor& x, const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
...@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input, ...@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
out->set_dtype(input.dtype()); out->set_dtype(input.dtype());
} }
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& pool_type,
MetaTensor* out,
MetaTensor* dst_count) {
auto src_index_dims = src_index.dims();
if (src_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(src_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d",
src_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
src_index_dims.size(),
1,
phi::errors::InvalidArgument(
"The Src_index should be 1D, when it is not 2D, but we get %d",
src_index_dims.size()));
}
auto dst_index_dims = dst_index.dims();
if (dst_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(dst_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d",
dst_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
dst_index_dims.size(),
1,
phi::errors::InvalidArgument("The Dst_index should be 1D, "
"when it is not 2D, but we get %d",
dst_index_dims.size()));
}
PADDLE_ENFORCE_EQ(src_index_dims[0],
dst_index_dims[0],
phi::errors::InvalidArgument(
"Src_index and Dst_index should have the same shape."));
auto dims = x.dims();
out->set_dims(dims);
out->set_dtype(x.dtype());
if (pool_type == "MEAN") {
dst_count->set_dims({dims[0]});
dst_count->set_dtype(DataType::INT32);
}
}
void LerpInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto w_dims = weight.dims();
DDim out_dims;
out_dims = funcs::GetOutputDims(x_dims, y_dims);
if (w_dims.size() > 1 || w_dims[0] != 1) {
out_dims = funcs::GetOutputDims(out_dims, w_dims);
}
out->set_dims(out_dims);
out->set_dtype(x.dtype());
out->share_lod(x);
}
void LinspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop,
const MetaTensor& number,
MetaTensor* out) {
auto s_dims = start.dims();
PADDLE_ENFORCE_EQ(
(s_dims.size() == 1) && (s_dims[0] == 1),
true,
phi::errors::InvalidArgument("The shape of Input(Start) must be [1],"
"but received input shape is [%s].",
s_dims));
auto e_dims = stop.dims();
PADDLE_ENFORCE_EQ(
(e_dims.size() == 1) && (e_dims[0] == 1),
true,
phi::errors::InvalidArgument("The shape of Input(Stop) must be [1],"
"but received input shape is [%s].",
e_dims));
auto step_dims = number.dims();
PADDLE_ENFORCE_EQ(
(step_dims.size() == 1) && (step_dims[0] == 1),
true,
phi::errors::InvalidArgument("The shape of Input(Num) must be [1],"
"but received input shape is [%s].",
step_dims));
out->set_dims(phi::make_ddim({-1}));
out->set_dtype(start.dtype());
}
void NllLossRawInferMeta(const MetaTensor& input, void NllLossRawInferMeta(const MetaTensor& input,
const MetaTensor& label, const MetaTensor& label,
paddle::optional<const MetaTensor&> weight, paddle::optional<const MetaTensor&> weight,
...@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input, ...@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
scores->set_dtype(length.dtype()); scores->set_dtype(length.dtype());
} }
void LerpInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
MetaTensor* out) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto w_dims = weight.dims();
DDim out_dims;
out_dims = funcs::GetOutputDims(x_dims, y_dims);
if (w_dims.size() > 1 || w_dims[0] != 1) {
out_dims = funcs::GetOutputDims(out_dims, w_dims);
}
out->set_dims(out_dims);
out->set_dtype(x.dtype());
out->share_lod(x);
}
void LinspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop,
const MetaTensor& number,
MetaTensor* out) {
auto s_dims = start.dims();
PADDLE_ENFORCE_EQ(
(s_dims.size() == 1) && (s_dims[0] == 1),
true,
phi::errors::InvalidArgument("The shape of Input(Start) must be [1],"
"but received input shape is [%s].",
s_dims));
auto e_dims = stop.dims();
PADDLE_ENFORCE_EQ(
(e_dims.size() == 1) && (e_dims[0] == 1),
true,
phi::errors::InvalidArgument("The shape of Input(Stop) must be [1],"
"but received input shape is [%s].",
e_dims));
auto step_dims = number.dims();
PADDLE_ENFORCE_EQ(
(step_dims.size() == 1) && (step_dims[0] == 1),
true,
phi::errors::InvalidArgument("The shape of Input(Num) must be [1],"
"but received input shape is [%s].",
step_dims));
out->set_dims(phi::make_ddim({-1}));
out->set_dtype(start.dtype());
}
void AccuracyInferMeta(const MetaTensor& out,
const MetaTensor& indice,
const MetaTensor& label,
MetaTensor* accuracy,
MetaTensor* correct,
MetaTensor* total,
MetaConfig config) {
auto inference_dim = out.dims();
auto label_dim = label.dims();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ(
label_dim.size(),
2,
phi::errors::InvalidArgument(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]",
label_dim.size(),
label_dim));
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(label_dim[1],
1,
phi::errors::InvalidArgument(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]",
label_dim[1],
label_dim));
PADDLE_ENFORCE_EQ(
inference_dim[0],
label_dim[0],
phi::errors::InvalidArgument(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d",
inference_dim,
label_dim,
inference_dim[0],
label_dim[0]));
}
accuracy->set_dims({1});
accuracy->set_dtype(out.dtype());
correct->set_dims({1});
correct->set_dtype(out.dtype());
total->set_dims({1});
total->set_dtype(out.dtype());
accuracy->share_lod(out);
}
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& pool_type,
MetaTensor* out,
MetaTensor* dst_count) {
auto src_index_dims = src_index.dims();
if (src_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(src_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d",
src_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
src_index_dims.size(),
1,
phi::errors::InvalidArgument(
"The Src_index should be 1D, when it is not 2D, but we get %d",
src_index_dims.size()));
}
auto dst_index_dims = dst_index.dims();
if (dst_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(dst_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d",
dst_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
dst_index_dims.size(),
1,
phi::errors::InvalidArgument("The Dst_index should be 1D, "
"when it is not 2D, but we get %d",
dst_index_dims.size()));
}
PADDLE_ENFORCE_EQ(src_index_dims[0],
dst_index_dims[0],
phi::errors::InvalidArgument(
"Src_index and Dst_index should have the same shape."));
auto dims = x.dims();
out->set_dims(dims);
out->set_dtype(x.dtype());
if (pool_type == "MEAN") {
dst_count->set_dims({dims[0]});
dst_count->set_dtype(DataType::INT32);
}
}
} // namespace phi } // namespace phi
...@@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input, ...@@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input,
float beta, float beta,
MetaTensor* out); MetaTensor* out);
void GatherNdGradInferMeta(const MetaTensor& x, void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& index, const MetaTensor& src_index,
const MetaTensor& out_grad, const MetaTensor& dst_index,
MetaTensor* x_grad); const std::string& pool_type,
MetaTensor* out,
MetaTensor* dst_count);
void ScatterInferMeta(const MetaTensor& x, void LerpInferMeta(const MetaTensor& x,
const MetaTensor& index, const MetaTensor& y,
const MetaTensor& updates, const MetaTensor& weight,
bool overwrite, MetaTensor* out);
MetaTensor* out);
void LinspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop,
const MetaTensor& number,
MetaTensor* out);
void NllLossRawInferMeta(const MetaTensor& input, void NllLossRawInferMeta(const MetaTensor& input,
const MetaTensor& label, const MetaTensor& label,
...@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input, ...@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
MetaTensor* total_weight, MetaTensor* total_weight,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void ScatterInferMeta(const MetaTensor& x,
const MetaTensor& index,
const MetaTensor& updates,
bool overwrite,
MetaTensor* out);
void ScatterNdAddInferMeta(const MetaTensor& x, void ScatterNdAddInferMeta(const MetaTensor& x,
const MetaTensor& index, const MetaTensor& index,
const MetaTensor& updates, const MetaTensor& updates,
...@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input, ...@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
MetaTensor* path, MetaTensor* path,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void LerpInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
MetaTensor* out);
void LinspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop,
const MetaTensor& number,
MetaTensor* out);
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& pool_type,
MetaTensor* out,
MetaTensor* dst_count);
} // namespace phi } // namespace phi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册