提交 706c5724 编写于 作者: X xutianbing

Matrix API refactor, when passing parameters, convert shared_ptr (MatrixPtr) to

reference or raw matrix (Matrix & or Matrix *)
contextProjectionForward
contextProjectionBackward
contextProjectionBackwardData
contextProjectionBackwardWeight
classificationError
The mul functions would be updated later.
上级 80b45ad1
...@@ -78,7 +78,7 @@ public: ...@@ -78,7 +78,7 @@ public:
useGpu(arguments[0].deviceId)); useGpu(arguments[0].deviceId));
errorMat->zeroMem(); errorMat->zeroMem();
if (label != nullptr) { if (label != nullptr) {
errorMat->classificationError(output, label); errorMat->classificationError(*output, *label);
} else if (dynamic_cast<CpuSparseMatrix*>(multiBinaryLabel.get()) || } else if (dynamic_cast<CpuSparseMatrix*>(multiBinaryLabel.get()) ||
dynamic_cast<GpuSparseMatrix*>(multiBinaryLabel.get())) { dynamic_cast<GpuSparseMatrix*>(multiBinaryLabel.get())) {
errorMat->classificationErrorMulti( errorMat->classificationErrorMulti(
......
...@@ -90,8 +90,8 @@ void ContextProjection::forward() { ...@@ -90,8 +90,8 @@ void ContextProjection::forward() {
REGISTER_TIMER_INFO("ContextProjectionForward", getName().c_str()); REGISTER_TIMER_INFO("ContextProjectionForward", getName().c_str());
bool isPadding = config_.trainable_padding(); bool isPadding = config_.trainable_padding();
out_->value->contextProjectionForward( out_->value->contextProjectionForward(
in_->value, *(in_->value),
state_ ? state_ : isPadding ? weight_->getW() : nullptr, state_ ? state_.get() : isPadding ? weight_->getW().get() : nullptr,
*startPositions, *startPositions,
config_.context_length(), config_.context_length(),
config_.context_start(), config_.context_start(),
...@@ -128,8 +128,8 @@ void ContextProjection::backward(const UpdateCallback& callback) { ...@@ -128,8 +128,8 @@ void ContextProjection::backward(const UpdateCallback& callback) {
bool isPadding = config_.trainable_padding(); bool isPadding = config_.trainable_padding();
if (!out_->grad->useGpu()) { if (!out_->grad->useGpu()) {
out_->grad->contextProjectionBackward( out_->grad->contextProjectionBackward(
in_->grad, in_->grad.get(),
isPadding ? weight_->getWGrad() : nullptr, isPadding ? weight_->getWGrad().get() : nullptr,
*startPositions, *startPositions,
config_.context_length(), config_.context_length(),
config_.context_start(), config_.context_start(),
...@@ -137,7 +137,7 @@ void ContextProjection::backward(const UpdateCallback& callback) { ...@@ -137,7 +137,7 @@ void ContextProjection::backward(const UpdateCallback& callback) {
isPadding); isPadding);
} else { } else {
if (in_->grad) { if (in_->grad) {
out_->grad->contextProjectionBackwardData(in_->grad, out_->grad->contextProjectionBackwardData(*(in_->grad),
*startPositions, *startPositions,
config_.context_length(), config_.context_length(),
config_.context_start()); config_.context_start());
...@@ -145,7 +145,7 @@ void ContextProjection::backward(const UpdateCallback& callback) { ...@@ -145,7 +145,7 @@ void ContextProjection::backward(const UpdateCallback& callback) {
if (isPadding && weight_->getWGrad()) { if (isPadding && weight_->getWGrad()) {
out_->grad->contextProjectionBackwardWeight( out_->grad->contextProjectionBackwardWeight(
weight_->getWGrad(), *(weight_->getWGrad()),
*startPositions, *startPositions,
config_.context_length(), config_.context_length(),
config_.context_start(), config_.context_start(),
......
...@@ -766,20 +766,19 @@ void GpuMatrix::maxoutBackward(Matrix& a, ...@@ -766,20 +766,19 @@ void GpuMatrix::maxoutBackward(Matrix& a,
} }
/*calulate the error of classification */ /*calulate the error of classification */
void GpuMatrix::classificationError(MatrixPtr output, IVectorPtr label) { void GpuMatrix::classificationError(Matrix& output, IVector& label) {
GpuMatrixPtr output_ptr = std::dynamic_pointer_cast<GpuMatrix>(output); auto output_ptr = dynamic_cast<const GpuMatrix*>(&output);
GpuIVectorPtr label_ptr = std::dynamic_pointer_cast<GpuIVector>(label); auto label_ptr = dynamic_cast<const GpuIVector*>(&label);
CHECK(output_ptr && label_ptr) << "Invalid argument pointer"; CHECK(output_ptr && label_ptr) << "Invalid argument pointer";
CHECK(height_ == output_ptr->height_ && width_ == 1) CHECK(height_ == output_ptr->height_ && width_ == 1)
<< "Matrix dimensions are not equal"; << "Matrix dimensions are not equal";
real* output_d = output_ptr->data_;
real* recResult_d = data_;
int* label_d = label_ptr->getData();
hl_matrix_classification_error( hl_matrix_classification_error((real*)output_ptr->data_,
output_d, label_d, recResult_d, height_, output_ptr->width_); (int*)label_ptr->getData(),
data_,
height_,
output_ptr->width_);
} }
/* copy -log(output[i * width + label]) to this->data[i] */ /* copy -log(output[i * width + label]) to this->data[i] */
...@@ -1370,86 +1369,62 @@ void GpuMatrix::maxSequenceBackward(Matrix& outputGrad, ...@@ -1370,86 +1369,62 @@ void GpuMatrix::maxSequenceBackward(Matrix& outputGrad,
hl_max_sequence_backward(outGrad, maxIndex, inputGrad, numSequences, dim); hl_max_sequence_backward(outGrad, maxIndex, inputGrad, numSequences, dim);
} }
void GpuMatrix::contextProjectionForward(MatrixPtr input, void GpuMatrix::contextProjectionForward(Matrix& input,
MatrixPtr weight, Matrix* weight,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
size_t beginPad, size_t beginPad,
bool isPadding) { bool isPadding) {
CHECK(dynamic_cast<GpuMatrix*>(input.get())); CHECK(dynamic_cast<GpuMatrix*>(&input));
CHECK(dynamic_cast<const GpuIVector*>(&sequence)); CHECK(dynamic_cast<const GpuIVector*>(&sequence));
if (weight) CHECK(dynamic_cast<GpuMatrix*>(weight.get())); if (weight) CHECK(dynamic_cast<GpuMatrix*>(weight));
CHECK_EQ(getWidth(), input.getWidth() * contextLength);
size_t numSequences = sequence.getSize() - 1;
int64_t inputDim = input->getWidth();
int64_t dim = getWidth();
CHECK_EQ(dim, inputDim * contextLength);
real* outData = getData();
real* inputData = input->getData();
const int* starts = sequence.getData();
hl_context_projection_forward(inputData, hl_context_projection_forward(input.getData(),
starts, sequence.getData(),
isPadding ? weight->getData() : NULL, isPadding ? weight->getData() : NULL,
outData, getData(),
numSequences, sequence.getSize() - 1,
inputDim, input.getWidth(),
contextLength, contextLength,
contextStart, contextStart,
beginPad, beginPad,
isPadding); isPadding);
} }
void GpuMatrix::contextProjectionBackwardData(MatrixPtr inputGrad, void GpuMatrix::contextProjectionBackwardData(Matrix& inputGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart) { int contextStart) {
CHECK(dynamic_cast<GpuMatrix*>(inputGrad.get())); CHECK(dynamic_cast<GpuMatrix*>(&inputGrad));
CHECK(dynamic_cast<const GpuIVector*>(&sequence)); CHECK(dynamic_cast<const GpuIVector*>(&sequence));
CHECK_EQ(getWidth(), inputGrad.getWidth() * contextLength);
size_t numSequences = sequence.getSize() - 1; hl_context_projection_backward_data(getData(),
int64_t inputDim = inputGrad->getWidth(); sequence.getData(),
int64_t dim = getWidth(); inputGrad.getData(),
CHECK_EQ(dim, inputDim * contextLength); sequence.getSize() - 1,
inputGrad.getWidth(),
real* outGrad = getData();
real* inGrad = inputGrad->getData();
const int* starts = sequence.getData();
hl_context_projection_backward_data(outGrad,
starts,
inGrad,
numSequences,
inputDim,
contextLength, contextLength,
contextStart); contextStart);
} }
void GpuMatrix::contextProjectionBackwardWeight(MatrixPtr weightGrad, void GpuMatrix::contextProjectionBackwardWeight(Matrix& weightGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
int totalPad, int totalPad,
size_t beginPad) { size_t beginPad) {
CHECK(dynamic_cast<GpuMatrix*>(weightGrad.get())); CHECK(dynamic_cast<GpuMatrix*>(&weightGrad));
CHECK(dynamic_cast<const GpuIVector*>(&sequence)); CHECK(dynamic_cast<const GpuIVector*>(&sequence));
CHECK_EQ(getWidth(), weightGrad.getWidth() * contextLength);
size_t numSequences = sequence.getSize() - 1; hl_context_projection_backward_weight(getData(),
int64_t weightDim = weightGrad->getWidth(); sequence.getData(),
int64_t dim = getWidth(); weightGrad.getData(),
CHECK_EQ(dim, weightDim * contextLength); sequence.getSize() - 1,
weightGrad.getWidth(),
real* outGrad = getData();
real* wtGrad = weightGrad->getData();
const int* starts = sequence.getData();
hl_context_projection_backward_weight(outGrad,
starts,
wtGrad,
numSequences,
weightDim,
totalPad, totalPad,
contextLength, contextLength,
contextStart, contextStart,
...@@ -2371,23 +2346,21 @@ void CpuMatrix::maxSequenceBackward(Matrix& outputGrad, ...@@ -2371,23 +2346,21 @@ void CpuMatrix::maxSequenceBackward(Matrix& outputGrad,
} }
} }
void CpuMatrix::contextProjectionForward(MatrixPtr input, void CpuMatrix::contextProjectionForward(Matrix& input,
MatrixPtr weight, Matrix* weight,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
size_t beginPad, size_t beginPad,
bool isPadding) { bool isPadding) {
CHECK(dynamic_cast<CpuMatrix*>(input.get())); auto input_ptr = dynamic_cast<CpuMatrix*>(&input);
CHECK(dynamic_cast<const CpuIVector*>(&sequence)); auto seq_ptr = dynamic_cast<const CpuIVector*>(&sequence);
if (weight) CHECK(dynamic_cast<CpuMatrix*>(weight.get())); CHECK(input_ptr && seq_ptr);
if (weight) CHECK(dynamic_cast<CpuMatrix*>(weight));
size_t numSequences = sequence.getSize() - 1; CHECK_EQ(getWidth(), input_ptr->getWidth() * contextLength);
int64_t inputDim = input->getWidth();
int64_t dim = getWidth(); const int* starts = seq_ptr->getData();
CHECK_EQ(dim, inputDim * contextLength); size_t numSequences = seq_ptr->getSize() - 1;
const int* starts = sequence.getData();
for (size_t i = 0; i < numSequences; ++i) { for (size_t i = 0; i < numSequences; ++i) {
for (int j = 0; j < contextLength; ++j) { for (int j = 0; j < contextLength; ++j) {
int begin = starts[i] + contextStart + j; int begin = starts[i] + contextStart + j;
...@@ -2400,7 +2373,7 @@ void CpuMatrix::contextProjectionForward(MatrixPtr input, ...@@ -2400,7 +2373,7 @@ void CpuMatrix::contextProjectionForward(MatrixPtr input,
MatrixPtr mat = this->subMatrix(starts[i], padSize); MatrixPtr mat = this->subMatrix(starts[i], padSize);
if (isPadding) { if (isPadding) {
MatrixPtr sub = weight->subMatrix(j, padSize); MatrixPtr sub = weight->subMatrix(j, padSize);
mat->addAtOffset(*sub, j * inputDim); mat->addAtOffset(*sub, j * input_ptr->getWidth());
} }
dstBegin = starts[i] + padSize; dstBegin = starts[i] + padSize;
begin = starts[i]; begin = starts[i];
...@@ -2412,41 +2385,36 @@ void CpuMatrix::contextProjectionForward(MatrixPtr input, ...@@ -2412,41 +2385,36 @@ void CpuMatrix::contextProjectionForward(MatrixPtr input,
if (isPadding) { if (isPadding) {
MatrixPtr sub = MatrixPtr sub =
weight->subMatrix(beginPad + contextStart + j - padSize, padSize); weight->subMatrix(beginPad + contextStart + j - padSize, padSize);
mat->addAtOffset(*sub, j * inputDim); mat->addAtOffset(*sub, j * input_ptr->getWidth());
} }
dstEnd = starts[i + 1] - padSize; dstEnd = starts[i + 1] - padSize;
end = starts[i + 1]; end = starts[i + 1];
} }
if (end <= begin) continue; if (end <= begin) continue;
MatrixPtr src = input->subMatrix(begin, end - begin); MatrixPtr src = input_ptr->subMatrix(begin, end - begin);
MatrixPtr dst = this->subMatrix(dstBegin, dstEnd - dstBegin); MatrixPtr dst = this->subMatrix(dstBegin, dstEnd - dstBegin);
dst->addAtOffset(*src, j * inputDim); dst->addAtOffset(*src, j * input_ptr->getWidth());
} }
} }
} }
void CpuMatrix::contextProjectionBackward(MatrixPtr inputGrad, void CpuMatrix::contextProjectionBackward(Matrix* inputGrad,
MatrixPtr weightGrad, Matrix* weightGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
size_t beginPad, size_t beginPad,
bool isPadding) { bool isPadding) {
if (inputGrad) CHECK(dynamic_cast<CpuMatrix*>(inputGrad.get())); if (inputGrad) CHECK(dynamic_cast<CpuMatrix*>(inputGrad));
if (weightGrad) CHECK(dynamic_cast<CpuMatrix*>(weightGrad.get())); if (weightGrad) CHECK(dynamic_cast<CpuMatrix*>(weightGrad));
CHECK(dynamic_cast<const CpuIVector*>(&sequence)); CHECK(dynamic_cast<const CpuIVector*>(&sequence));
int64_t inputDim = 0; int64_t inputDim = inputGrad ? inputGrad->getWidth()
int64_t dim = getWidth(); : weightGrad ? weightGrad->getWidth() : 0;
size_t numSequences = sequence.getSize() - 1; CHECK_EQ(getWidth(), inputDim * contextLength);
const int* starts = sequence.getData();
if (inputGrad) {
inputDim = inputGrad->getWidth();
} else {
inputDim = weightGrad->getWidth();
}
CHECK_EQ(dim, inputDim * contextLength);
const int* starts = sequence.getData();
size_t numSequences = sequence.getSize() - 1;
for (size_t i = 0; i < numSequences; ++i) { for (size_t i = 0; i < numSequences; ++i) {
for (int j = 0; j < contextLength; ++j) { for (int j = 0; j < contextLength; ++j) {
int begin = starts[i] + contextStart + j; int begin = starts[i] + contextStart + j;
...@@ -3544,21 +3512,20 @@ void CpuMatrix::rowNormalizeL1(Matrix& out) { ...@@ -3544,21 +3512,20 @@ void CpuMatrix::rowNormalizeL1(Matrix& out) {
} }
/* calulate classification error */ /* calulate classification error */
void CpuMatrix::classificationError(MatrixPtr output, IVectorPtr label) { void CpuMatrix::classificationError(Matrix& output, IVector& label) {
CHECK(dynamic_cast<CpuMatrix*>(output.get())); CHECK(dynamic_cast<const CpuMatrix*>(&output));
CHECK(dynamic_cast<CpuIVector*>(label.get())); CHECK(dynamic_cast<const CpuIVector*>(&label));
size_t numSamples = getHeight();
size_t dim = output->getWidth();
CHECK_EQ(label->getSize(), numSamples);
CHECK_EQ(output->getHeight(), numSamples);
CHECK_EQ(getWidth(), (size_t)1); CHECK_EQ(getWidth(), (size_t)1);
size_t numSamples = getHeight();
CHECK_EQ(label.getSize(), numSamples);
CHECK_EQ(output.getHeight(), numSamples);
real* out = output->getData(); size_t dim = output.getWidth();
real* result = getData(); real* out = output.getData();
int* lbl = label->getData(); int* lbl = label.getData();
real maxData; real maxData = 0.0;
int maxIndex; int maxIndex = -1;
for (size_t i = 0; i < numSamples; ++i) { for (size_t i = 0; i < numSamples; ++i) {
CHECK_GE(lbl[i], 0); CHECK_GE(lbl[i], 0);
CHECK_LT((size_t)lbl[i], dim); CHECK_LT((size_t)lbl[i], dim);
...@@ -3570,7 +3537,7 @@ void CpuMatrix::classificationError(MatrixPtr output, IVectorPtr label) { ...@@ -3570,7 +3537,7 @@ void CpuMatrix::classificationError(MatrixPtr output, IVectorPtr label) {
maxData = out[i * dim + j]; maxData = out[i * dim + j];
} }
} }
result[i] = (maxIndex != lbl[i]); getData()[i] = (maxIndex != lbl[i]);
} }
} }
......
...@@ -835,7 +835,7 @@ public: ...@@ -835,7 +835,7 @@ public:
* *
* output[i] = 0 if row i is correct. * output[i] = 0 if row i is correct.
*/ */
virtual void classificationError(MatrixPtr output, IVectorPtr label) { virtual void classificationError(Matrix& output, IVector& label) {
LOG(FATAL) << "Not implemented"; LOG(FATAL) << "Not implemented";
} }
...@@ -997,8 +997,8 @@ public: ...@@ -997,8 +997,8 @@ public:
LOG(FATAL) << "Not implemeted"; LOG(FATAL) << "Not implemeted";
} }
virtual void contextProjectionForward(MatrixPtr input, virtual void contextProjectionForward(Matrix& input,
MatrixPtr weight, Matrix* weight,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
...@@ -1007,8 +1007,8 @@ public: ...@@ -1007,8 +1007,8 @@ public:
LOG(FATAL) << "Not implemeted"; LOG(FATAL) << "Not implemeted";
} }
virtual void contextProjectionBackward(MatrixPtr inputGrad, virtual void contextProjectionBackward(Matrix* inputGrad,
MatrixPtr weightGrad, Matrix* weightGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
...@@ -1017,14 +1017,14 @@ public: ...@@ -1017,14 +1017,14 @@ public:
LOG(FATAL) << "Not implemeted"; LOG(FATAL) << "Not implemeted";
} }
virtual void contextProjectionBackwardData(MatrixPtr inputGrad, virtual void contextProjectionBackwardData(Matrix& inputGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart) { int contextStart) {
LOG(FATAL) << "Not implemeted"; LOG(FATAL) << "Not implemeted";
} }
virtual void contextProjectionBackwardWeight(MatrixPtr weightGrad, virtual void contextProjectionBackwardWeight(Matrix& weightGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
...@@ -1373,7 +1373,7 @@ public: ...@@ -1373,7 +1373,7 @@ public:
void check(std::ostream& os, Matrix& refMat, bool printDiff = true); void check(std::ostream& os, Matrix& refMat, bool printDiff = true);
void randomizeUniform(); void randomizeUniform();
void classificationError(MatrixPtr output, IVectorPtr label); void classificationError(Matrix& output, IVector& label);
void convExpand(Matrix& feature, void convExpand(Matrix& feature,
int feaImgHeight, int feaImgHeight,
...@@ -1487,20 +1487,20 @@ public: ...@@ -1487,20 +1487,20 @@ public:
const IVector& sequence, const IVector& sequence,
IVector& index); IVector& index);
void contextProjectionForward(MatrixPtr input, void contextProjectionForward(Matrix& input,
MatrixPtr weight, Matrix* weight,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
size_t beginPad, size_t beginPad,
bool isPadding); bool isPadding);
void contextProjectionBackwardData(MatrixPtr inputGrad, void contextProjectionBackwardData(Matrix& inputGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart); int contextStart);
void contextProjectionBackwardWeight(MatrixPtr weightGrad, void contextProjectionBackwardWeight(Matrix& weightGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
...@@ -1713,16 +1713,16 @@ public: ...@@ -1713,16 +1713,16 @@ public:
const IVector& sequence, const IVector& sequence,
IVector& index); IVector& index);
void contextProjectionForward(MatrixPtr input, void contextProjectionForward(Matrix& input,
MatrixPtr weight, Matrix* weight,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
size_t beginPad, size_t beginPad,
bool isPadding); bool isPadding);
void contextProjectionBackward(MatrixPtr inputGrad, void contextProjectionBackward(Matrix* inputGrad,
MatrixPtr weightGrad, Matrix* weightGrad,
const IVector& sequence, const IVector& sequence,
int contextLength, int contextLength,
int contextStart, int contextStart,
...@@ -1881,7 +1881,7 @@ public: ...@@ -1881,7 +1881,7 @@ public:
void randomizeUniform(); void randomizeUniform();
void classificationError(MatrixPtr output, IVectorPtr label); void classificationError(Matrix& output, IVector& label);
void addByBitCode(size_t numClasses, const IVector& codes, const Matrix& vec); void addByBitCode(size_t numClasses, const IVector& codes, const Matrix& vec);
......
...@@ -65,16 +65,16 @@ void testMatrixProjectionForward(int contextStart, ...@@ -65,16 +65,16 @@ void testMatrixProjectionForward(int contextStart,
// calculate // calculate
int beginPad = std::max(0, -contextStart); int beginPad = std::max(0, -contextStart);
cpuOutput->contextProjectionForward(cpuInput, cpuOutput->contextProjectionForward(*cpuInput,
cpuWeight, cpuWeight.get(),
*cpuSequence, *cpuSequence,
contextLength, contextLength,
contextStart, contextStart,
beginPad, beginPad,
padding); padding);
gpuOutput->contextProjectionForward(gpuInput, gpuOutput->contextProjectionForward(*gpuInput,
gpuWeight, gpuWeight.get(),
*gpuSequence, *gpuSequence,
contextLength, contextLength,
contextStart, contextStart,
...@@ -120,17 +120,17 @@ void testMatrixProjectionBackward(int contextStart, ...@@ -120,17 +120,17 @@ void testMatrixProjectionBackward(int contextStart,
// calculate // calculate
int beginPad = std::max(0, -contextStart); int beginPad = std::max(0, -contextStart);
cpuOutputGrad->contextProjectionBackward(cpuInputGrad, cpuOutputGrad->contextProjectionBackward(cpuInputGrad.get(),
cpuWeightGrad, cpuWeightGrad.get(),
*cpuSequence, *cpuSequence,
contextLength, contextLength,
contextStart, contextStart,
beginPad, beginPad,
padding); padding);
gpuOutputGrad->contextProjectionBackwardData( gpuOutputGrad->contextProjectionBackwardData(
gpuInputGrad, *gpuSequence, contextLength, contextStart); *gpuInputGrad, *gpuSequence, contextLength, contextStart);
if (padding) { if (padding) {
gpuOutputGrad->contextProjectionBackwardWeight(gpuWeightGrad, gpuOutputGrad->contextProjectionBackwardWeight(*gpuWeightGrad,
*gpuSequence, *gpuSequence,
contextLength, contextLength,
contextStart, contextStart,
...@@ -939,8 +939,8 @@ void testClassificationError(int numSamples, int dim) { ...@@ -939,8 +939,8 @@ void testClassificationError(int numSamples, int dim) {
gpuOutput->copyFrom(*cpuOutput); gpuOutput->copyFrom(*cpuOutput);
gpuLabel->copyFrom(*cpuLabel); gpuLabel->copyFrom(*cpuLabel);
cpuError->classificationError(cpuOutput, cpuLabel); cpuError->classificationError(*cpuOutput, *cpuLabel);
gpuError->classificationError(gpuOutput, gpuLabel); gpuError->classificationError(*gpuOutput, *gpuLabel);
TensorCheckEqual(*cpuError, *gpuError); TensorCheckEqual(*cpuError, *gpuError);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册