提交 672c9681 编写于 作者: T tensor-tang

remove gpu code when backward mkldnn_softmax

上级 2c6ac629
...@@ -189,27 +189,19 @@ Error __must_check MKLDNNSoftmaxActivation::forward(Argument& act) { ...@@ -189,27 +189,19 @@ Error __must_check MKLDNNSoftmaxActivation::forward(Argument& act) {
Error __must_check MKLDNNSoftmaxActivation::backward(Argument& act) { Error __must_check MKLDNNSoftmaxActivation::backward(Argument& act) {
MatrixPtr outputV = act.value; MatrixPtr outputV = act.value;
MatrixPtr outputG = act.grad; MatrixPtr outputG = act.grad;
if (outputG->useGpu()) {
outputG->softmaxBackward(*outputV);
} else {
SetDevice device(act.deviceId);
Matrix::resizeOrCreate(sftMaxDot_, Matrix::resizeOrCreate(sftMaxDot_,
outputG->getHeight(), outputG->getHeight(),
outputG->getWidth(), outputG->getWidth(),
/* trans */ false, /* trans */ false,
useGpu(act.deviceId)); /* useGpu */ false);
Matrix::resizeOrCreate(sftMaxSum_, Matrix::resizeOrCreate(sftMaxSum_,
outputG->getHeight(), outputG->getHeight(),
1, 1,
/* trans */ false, /* trans */ false,
useGpu(act.deviceId)); /* useGpu */ false);
sftMaxDot_->dotMul(*outputG, *outputV); sftMaxDot_->dotMul(*outputG, *outputV);
sftMaxSum_->colMerge(*sftMaxDot_); sftMaxSum_->colMerge(*sftMaxDot_);
act.grad->softmaxDerivative(*act.value, *sftMaxSum_); act.grad->softmaxDerivative(*act.value, *sftMaxSum_);
}
return Error(); return Error();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册