提交 cf924728 编写于 作者: D dongzhihong

clang format

上级 7945572c
...@@ -45,7 +45,8 @@ class SoftmaxKernel : public framework::OpKernel { ...@@ -45,7 +45,8 @@ class SoftmaxKernel : public framework::OpKernel {
Eigen::DSizes<int, 2> batch_by_one(batch_size, 1); Eigen::DSizes<int, 2> batch_by_one(batch_size, 1);
Eigen::DSizes<int, 2> one_by_class(1, num_classes); Eigen::DSizes<int, 2> one_by_class(1, num_classes);
auto shifted_logits = (logits - logits.maximum(along_class) auto shifted_logits = (logits -
logits.maximum(along_class)
.eval() .eval()
.reshape(batch_by_one) .reshape(batch_by_one)
.broadcast(one_by_class)); .broadcast(one_by_class));
...@@ -53,7 +54,8 @@ class SoftmaxKernel : public framework::OpKernel { ...@@ -53,7 +54,8 @@ class SoftmaxKernel : public framework::OpKernel {
softmax.device(context.GetEigenDevice<Place>()) = shifted_logits.exp(); softmax.device(context.GetEigenDevice<Place>()) = shifted_logits.exp();
softmax.device(context.GetEigenDevice<Place>()) = softmax.device(context.GetEigenDevice<Place>()) =
(softmax * softmax.sum(along_class) (softmax *
softmax.sum(along_class)
.inverse() .inverse()
.eval() .eval()
.reshape(batch_by_one) .reshape(batch_by_one)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册