未验证 提交 99c78b77 编写于 作者: K Kaipeng Deng 提交者: GitHub

fix softmax axis!=-1. test=develop (#19800)

上级 6a1db204
......@@ -41,6 +41,7 @@ void SoftmaxEigen(const DeviceContext& context, const int axis_dim,
const framework::Tensor* X, framework::Tensor* Y) {
constexpr int kBatchDim = 0;
constexpr int kClassDim = 1;
constexpr int kAxisDim = 1;
auto logits = EigenMatrix<T>::From(*X);
auto softmax = EigenMatrix<T>::From(*Y);
......@@ -49,26 +50,28 @@ void SoftmaxEigen(const DeviceContext& context, const int axis_dim,
const int num_classes = logits.dimension(kClassDim);
const int num_remain = num_classes / axis_dim;
Eigen::DSizes<int, 1> along_class(kClassDim);
Eigen::DSizes<int, 2> batch_by_one(batch_size, 1);
Eigen::DSizes<int, 2> one_by_class(1, num_classes);
Eigen::DSizes<int, 1> along_axis(kAxisDim);
Eigen::DSizes<int, 2> batch_classes(batch_size, num_classes);
Eigen::DSizes<int, 3> batch_one_remain(batch_size, 1, num_remain);
Eigen::DSizes<int, 3> one_axis_one(1, axis_dim, 1);
Eigen::DSizes<int, 3> batch_axis_remain(batch_size, axis_dim, num_remain);
Eigen::DSizes<int, 2> one_axis(1, axis_dim);
auto shifted_logits = (logits -
logits.maximum(along_class)
auto logits_reshape = logits.reshape(batch_axis_remain);
auto shifted_logits = (logits_reshape -
logits_reshape.maximum(along_axis)
.eval()
.reshape(batch_by_one)
.broadcast(one_by_class))
.reshape(batch_one_remain)
.broadcast(one_axis_one))
.unaryExpr(ValueClip<T>());
softmax.device(*context.eigen_device()) = shifted_logits.exp();
softmax.device(*context.eigen_device()) = (softmax *
softmax.reshape(batch_axis_remain)
.sum(along_class)
auto exp = shifted_logits.exp();
softmax.device(*context.eigen_device()) = (exp *
exp.sum(along_axis)
.inverse()
.eval()
.broadcast(one_axis));
.reshape(batch_one_remain)
.broadcast(one_axis_one))
.reshape(batch_classes);
}
template <typename DeviceContext, typename T, bool is_test, typename Enable>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册