提交 7e0b51f2 编写于 作者: H hedaoyuan

some bugs fix

上级 a7855d3e
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* hl_tensor_ops.h
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-06 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*
*/ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef HL_TENSOR_OPS_H_ #ifndef HL_TENSOR_OPS_H_
#define HL_TENSOR_OPS_H_ #define HL_TENSOR_OPS_H_
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* TensorApply.h
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-06 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*
*/ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once #pragma once
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* TensorEvaluate.h
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-06 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*
*/ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once #pragma once
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* TensorExpression.h
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-06 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*
*/ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once #pragma once
#include <cstddef> #include <cstddef>
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* TrainingAlgorithmOp.cu
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-29 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*
*/ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "BaseMatrix.h" #include "BaseMatrix.h"
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* TrainingAlgorithmOp.h
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-29 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*
*/ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once #pragma once
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* OriginalOptimizerApi.h
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-29 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*/
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once #pragma once
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
...@@ -105,50 +106,9 @@ void TensorCheckEqual(const GpuVectorT<T>& vector1, ...@@ -105,50 +106,9 @@ void TensorCheckEqual(const GpuVectorT<T>& vector1,
TensorCheckEqual(cpu1, cpu2); TensorCheckEqual(cpu1, cpu2);
} }
int VectorCheckErr(const Vector& vector1, const Vector& vector2) {
CHECK(vector1.getSize() == vector2.getSize());
const real* data1 = vector1.getData();
const real* data2 = vector2.getData();
size_t size = vector1.getSize();
int count = 0;
for (size_t i = 0; i < size; i++) {
real a = data1[i];
real b = data2[i];
if (fabs(a - b) > FLAGS_max_diff) {
if ((fabsf(a - b) / fabsf(a)) > (FLAGS_max_diff / 10.0f)) {
count++;
}
}
}
return count;
}
#define INIT_UNARY(A1, A2) \
Tensor A1(height, width); \
Tensor A2(height, width); \
A1.randomizeUniform(); \
A2.copyFrom(A1)
#define INIT_BINARY(A1, A2, B) \
INIT_UNARY(A1, A2); \
Tensor B(height, width); \
B.randomizeUniform()
#define INIT_TERNARY(A1, A2, B, C) \
INIT_BINARY(A1, A2, B); \
Tensor C(height, width); \
C.randomizeUniform()
#define INIT_QUATERNARY(A1, A2, B, C, D) \
INIT_TERNARY(A1, A2, B, C); \
Tensor D(height, width); \
D.randomizeUniform()
// Performance Check // Performance Check
#ifdef PADDLE_DISABLE_TIMER #ifdef PADDLE_DISABLE_TIMER
#define CHECK_VECTORPTR(vector1, vector2) \
EXPECT_EQ(VectorCheckErr(vector1, vector2), 0)
#define EXPRESSION_PERFORMANCE(expression) \ #define EXPRESSION_PERFORMANCE(expression) \
expression; expression;
...@@ -156,8 +116,6 @@ int VectorCheckErr(const Vector& vector1, const Vector& vector2) { ...@@ -156,8 +116,6 @@ int VectorCheckErr(const Vector& vector1, const Vector& vector2) {
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
#define CHECK_VECTORPTR(vector1, vector2)
#define EXPRESSION_PERFORMANCE(expression) \ #define EXPRESSION_PERFORMANCE(expression) \
do {\ do {\
char expr[30];\ char expr[30];\
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* test_Tensor.cpp
*
* Author: hedaoyuan (hedaoyuan@baidu.com)
* Created on: 2016-06-06
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved
*/
#include <gtest/gtest.h>
#include "paddle/math/Matrix.h"
using namespace paddle; // NOLINT
using namespace std; // NOLINT
template<typename Tensor>
extern void TensorCheckEqual(const Tensor& tensor1, const Tensor& tensor2);
void TensorCheckEqual(const CpuMatrix& matrix1, const CpuMatrix& matrix2) {
CHECK(matrix1.getHeight() == matrix2.getHeight());
CHECK(matrix1.getWidth() == matrix2.getWidth());
int height = matrix1.getHeight();
int width = matrix1.getWidth();
const real* data1 = matrix1.getData();
const real* data2 = matrix2.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (data1[i * width + j] != data2[i * width + j]) {
count++;
}
}
}
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
void TensorCheckEqual(const GpuMatrix& matrix1, const GpuMatrix& matrix2) {
CpuMatrix cpu1(matrix1.getHeight(), matrix1.getWidth());
CpuMatrix cpu2(matrix2.getHeight(), matrix2.getWidth());
cpu1.copyFrom(matrix1);
cpu2.copyFrom(matrix2);
TensorCheckEqual(cpu1, cpu2);
}
void TensorCheckErr(const CpuMatrix& matrix1, const CpuMatrix& matrix2) {
CHECK(matrix1.getHeight() == matrix2.getHeight());
CHECK(matrix1.getWidth() == matrix2.getWidth());
#ifndef PADDLE_TYPE_DOUBLE
real err = 1e-5;
#else
real err = 1e-10;
#endif
int height = matrix1.getHeight(); Licensed under the Apache License, Version 2.0 (the "License");
int width = matrix1.getWidth(); you may not use this file except in compliance with the License.
const real* data1 = matrix1.getData(); You may obtain a copy of the License at
const real* data2 = matrix2.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
real a = data1[i * width + j];
real b = data2[i * width + j];
if (fabs(a - b) > err) {
if ((fabsf(a - b) / fabsf(a)) > (err / 10.0f)) {
count++;
}
}
}
}
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
void TensorCheckErr(const GpuMatrix& matrix1, const GpuMatrix& matrix2) { http://www.apache.org/licenses/LICENSE-2.0
CpuMatrix cpu1(matrix1.getHeight(), matrix1.getWidth());
CpuMatrix cpu2(matrix2.getHeight(), matrix2.getWidth());
cpu1.copyFrom(matrix1);
cpu2.copyFrom(matrix2);
TensorCheckErr(cpu1, cpu2);
}
template<class T> Unless required by applicable law or agreed to in writing, software
void TensorCheckEqual(const CpuVectorT<T>& vector1, distributed under the License is distributed on an "AS IS" BASIS,
const CpuVectorT<T>& vector2) { WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
CHECK(vector1.getSize() == vector2.getSize()); See the License for the specific language governing permissions and
limitations under the License. */
const T* data1 = vector1.getData(); #include <gtest/gtest.h>
const T* data2 = vector2.getData(); #include "paddle/math/Matrix.h"
size_t size = vector1.getSize(); #include "TensorCheck.h"
int count = 0;
for (size_t i = 0; i < size; i++) {
if (data1[i] != data2[i]) {
count++;
}
}
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
template<class T> using namespace paddle; // NOLINT
void TensorCheckEqual(const GpuVectorT<T>& vector1, using namespace std; // NOLINT
const GpuVectorT<T>& vector2) {
CpuVectorT<T> cpu1(vector1.getSize());
CpuVectorT<T> cpu2(vector2.getSize());
cpu1.copyFrom(vector1);
cpu2.copyFrom(vector2);
TensorCheckEqual(cpu1, cpu2);
}
#define INIT_UNARY(A1, A2) \ #define INIT_UNARY(A1, A2) \
Tensor A1(height, width); \ Tensor A1(height, width); \
......
/** /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
* test_TrainingAlgorithm.cpp
* Licensed under the Apache License, Version 2.0 (the "License");
* Author: hedaoyuan (hedaoyuan@baidu.com) you may not use this file except in compliance with the License.
* Created on: 2016-06-29 You may obtain a copy of the License at
*
* Copyright (c) Baidu.com, Inc. All Rights Reserved http://www.apache.org/licenses/LICENSE-2.0
*/
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
...@@ -44,6 +49,26 @@ private: ...@@ -44,6 +49,26 @@ private:
}\ }\
} while (0) } while (0)
int VectorCheckErr(const Vector& vector1, const Vector& vector2) {
CHECK(vector1.getSize() == vector2.getSize());
const real* data1 = vector1.getData();
const real* data2 = vector2.getData();
size_t size = vector1.getSize();
int count = 0;
for (size_t i = 0; i < size; i++) {
real a = data1[i];
real b = data2[i];
if (fabs(a - b) > FLAGS_max_diff) {
if ((fabsf(a - b) / fabsf(a)) > (FLAGS_max_diff / 10.0f)) {
count++;
}
}
}
return count;
}
int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) { int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) {
VectorPtr tmp1; VectorPtr tmp1;
VectorPtr tmp2; VectorPtr tmp2;
...@@ -52,6 +77,17 @@ int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) { ...@@ -52,6 +77,17 @@ int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) {
return VectorCheckErr(*tmp1, *tmp2); return VectorCheckErr(*tmp1, *tmp2);
} }
#ifdef PADDLE_DISABLE_TIMER
#define CHECK_VECTORPTR(vector1, vector2) \
EXPECT_EQ(VectorCheckErr(vector1, vector2), 0)
#else
#define CHECK_VECTORPTR(vector1, vector2)
#endif
typedef std::function<void(size_t size, bool useGpu)> testMatrixFunc; typedef std::function<void(size_t size, bool useGpu)> testMatrixFunc;
void testCase(testMatrixFunc matrixFunc) { void testCase(testMatrixFunc matrixFunc) {
......
...@@ -27,7 +27,16 @@ void testMatrixCase(testMatrixFunc matrixFunc) { ...@@ -27,7 +27,16 @@ void testMatrixCase(testMatrixFunc matrixFunc) {
template<typename Tensor> template<typename Tensor>
void testLazyAssign(int height, int width) { void testLazyAssign(int height, int width) {
INIT_QUATERNARY(A1, A2, B, C, D); Tensor A1(height, width);
Tensor A2(height, width);
Tensor B(height, width);
Tensor C(height, width);
Tensor D(height, width);
A1.randomizeUniform();
B.randomizeUniform();
C.randomizeUniform();
D.randomizeUniform();
A2.copyFrom(A1);
EXPRESSION_PERFORMANCE(A1 = B + C; A1 = A1 * D;); EXPRESSION_PERFORMANCE(A1 = B + C; A1 = A1 * D;);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册