未验证 提交 3f021781 编写于 作者: K Kaipeng Deng 提交者: GitHub

fix softmax CE time limit check failed (#19846)

* fix softmax ce time limit check failed. test=develop

* refine softmax calc. test=develop
上级 a4919d36
...@@ -52,26 +52,45 @@ void SoftmaxEigen(const DeviceContext& context, const int axis_dim, ...@@ -52,26 +52,45 @@ void SoftmaxEigen(const DeviceContext& context, const int axis_dim,
Eigen::DSizes<int, 1> along_axis(kAxisDim); Eigen::DSizes<int, 1> along_axis(kAxisDim);
Eigen::DSizes<int, 2> batch_classes(batch_size, num_classes); Eigen::DSizes<int, 2> batch_classes(batch_size, num_classes);
Eigen::DSizes<int, 2> batch_by_one(batch_size, 1);
Eigen::DSizes<int, 2> one_by_class(1, num_classes);
Eigen::DSizes<int, 3> batch_one_remain(batch_size, 1, num_remain); Eigen::DSizes<int, 3> batch_one_remain(batch_size, 1, num_remain);
Eigen::DSizes<int, 3> one_axis_one(1, axis_dim, 1); Eigen::DSizes<int, 3> one_axis_one(1, axis_dim, 1);
Eigen::DSizes<int, 2> one_axis(1, axis_dim);
Eigen::DSizes<int, 3> batch_axis_remain(batch_size, axis_dim, num_remain); Eigen::DSizes<int, 3> batch_axis_remain(batch_size, axis_dim, num_remain);
auto logits_reshape = logits.reshape(batch_axis_remain); // For numerical stability, logits should be shifted by maximum number along
auto shifted_logits = (logits_reshape - // axis, calculate shifted_logits into softmax tensor for memory reuse.
logits_reshape.maximum(along_axis) if (num_remain == 1) {
.eval() // axis == -1, axis and class in same dimension, calculate along
.reshape(batch_one_remain) // class dimension directly for higher performance
.broadcast(one_axis_one)) softmax.device(*context.eigen_device()) = (logits -
.unaryExpr(ValueClip<T>()); logits.maximum(along_axis)
.eval()
auto exp = shifted_logits.exp(); .reshape(batch_by_one)
softmax.device(*context.eigen_device()) = (exp * .broadcast(one_by_class))
exp.sum(along_axis) .unaryExpr(ValueClip<T>());
} else {
// axis != -1, class dimension split into (axis, remain), max and sum
// should be calculated along axis dimension
softmax.device(*context.eigen_device()) =
(logits.reshape(batch_axis_remain) -
logits.reshape(batch_axis_remain)
.maximum(along_axis)
.eval()
.reshape(batch_one_remain)
.broadcast(one_axis_one)
.reshape(batch_classes))
.unaryExpr(ValueClip<T>());
}
softmax.device(*context.eigen_device()) = softmax.exp();
softmax.device(*context.eigen_device()) = (softmax *
softmax.reshape(batch_axis_remain)
.sum(along_axis)
.inverse() .inverse()
.eval() .eval()
.reshape(batch_one_remain) .broadcast(one_axis));
.broadcast(one_axis_one))
.reshape(batch_classes);
} }
template <typename DeviceContext, typename T, bool is_test, typename Enable> template <typename DeviceContext, typename T, bool is_test, typename Enable>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册