提交 672c9681 编写于 作者: T tensor-tang

remove gpu code when backward mkldnn_softmax

上级 2c6ac629
...@@ -189,27 +189,19 @@ Error __must_check MKLDNNSoftmaxActivation::forward(Argument& act) { ...@@ -189,27 +189,19 @@ Error __must_check MKLDNNSoftmaxActivation::forward(Argument& act) {
Error __must_check MKLDNNSoftmaxActivation::backward(Argument& act) { Error __must_check MKLDNNSoftmaxActivation::backward(Argument& act) {
MatrixPtr outputV = act.value; MatrixPtr outputV = act.value;
MatrixPtr outputG = act.grad; MatrixPtr outputG = act.grad;
Matrix::resizeOrCreate(sftMaxDot_,
if (outputG->useGpu()) { outputG->getHeight(),
outputG->softmaxBackward(*outputV); outputG->getWidth(),
} else { /* trans */ false,
SetDevice device(act.deviceId); /* useGpu */ false);
Matrix::resizeOrCreate(sftMaxDot_, Matrix::resizeOrCreate(sftMaxSum_,
outputG->getHeight(), outputG->getHeight(),
outputG->getWidth(), 1,
/* trans */ false, /* trans */ false,
useGpu(act.deviceId)); /* useGpu */ false);
Matrix::resizeOrCreate(sftMaxSum_, sftMaxDot_->dotMul(*outputG, *outputV);
outputG->getHeight(), sftMaxSum_->colMerge(*sftMaxDot_);
1, act.grad->softmaxDerivative(*act.value, *sftMaxSum_);
/* trans */ false,
useGpu(act.deviceId));
sftMaxDot_->dotMul(*outputG, *outputV);
sftMaxSum_->colMerge(*sftMaxDot_);
act.grad->softmaxDerivative(*act.value, *sftMaxSum_);
}
return Error(); return Error();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册