Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c13bdb15
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c13bdb15
编写于
10月 15, 2016
作者:
G
gangliao
提交者:
Yu Yang
10月 15, 2016
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove redundant HPPL_TYPE_DOUBLE (#200)
上级
91df6062
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
20 addition
and
20 deletion
+20
-20
CMakeLists.txt
CMakeLists.txt
+1
-1
paddle/cuda/include/hl_base.h
paddle/cuda/include/hl_base.h
+1
-1
paddle/cuda/include/hl_cpu_gru.cuh
paddle/cuda/include/hl_cpu_gru.cuh
+1
-1
paddle/cuda/include/hl_gpu_functions.cuh
paddle/cuda/include/hl_gpu_functions.cuh
+2
-2
paddle/cuda/include/hl_matrix_base.cuh
paddle/cuda/include/hl_matrix_base.cuh
+1
-1
paddle/cuda/include/hl_matrix_type.cuh
paddle/cuda/include/hl_matrix_type.cuh
+2
-2
paddle/cuda/include/hl_sse_matrix_kernel.cuh
paddle/cuda/include/hl_sse_matrix_kernel.cuh
+2
-2
paddle/cuda/src/hl_cuda_cublas.cc
paddle/cuda/src/hl_cuda_cublas.cc
+1
-1
paddle/cuda/src/hl_cuda_cudnn.cc
paddle/cuda/src/hl_cuda_cudnn.cc
+5
-5
paddle/cuda/src/hl_cuda_device.cc
paddle/cuda/src/hl_cuda_device.cc
+1
-1
paddle/cuda/src/hl_cuda_matrix.cu
paddle/cuda/src/hl_cuda_matrix.cu
+2
-2
paddle/cuda/src/hl_cuda_sparse.cuh
paddle/cuda/src/hl_cuda_sparse.cuh
+1
-1
未找到文件。
CMakeLists.txt
浏览文件 @
c13bdb15
...
...
@@ -104,7 +104,7 @@ else()
endif
(
NOT WITH_GPU
)
if
(
WITH_DOUBLE
)
add_definitions
(
-DPADDLE_TYPE_DOUBLE
-DHPPL_TYPE_DOUBLE
)
add_definitions
(
-DPADDLE_TYPE_DOUBLE
)
set
(
ACCURACY double
)
else
(
WITH_DOUBLE
)
set
(
ACCURACY float
)
...
...
paddle/cuda/include/hl_base.h
浏览文件 @
c13bdb15
...
...
@@ -185,7 +185,7 @@ typedef struct {
size_t
nnz
;
}
_hl_sparse_matrix_s
,
*
hl_sparse_matrix_s
;
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
/**
* HPPL data type: real (float or double)
*
...
...
paddle/cuda/include/hl_cpu_gru.cuh
浏览文件 @
c13bdb15
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/math/MathFunctions.h"
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
#define CBLAS_GEMM paddle::gemm<float>
#else
#define CBLAS_GEMM paddle::gemm<double>
...
...
paddle/cuda/include/hl_gpu_functions.cuh
浏览文件 @
c13bdb15
...
...
@@ -28,7 +28,7 @@ namespace hppl {
const
real
min
=
SIGMOID_THRESHOLD_MIN
;
const
real
max
=
SIGMOID_THRESHOLD_MAX
;
real
tmp
=
(
a
<
min
)
?
min
:
((
a
>
max
)
?
max
:
a
);
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
return
__fdividef
(
1.0
f
,
1.0
f
+
__expf
(
-
tmp
));
#else
return
1.0
/
(
1.0
+
exp
(
-
tmp
));
...
...
@@ -36,7 +36,7 @@ namespace hppl {
}
__device__
static
real
tanh
(
const
real
a
)
{
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
return
__fdividef
(
2.0
f
,
(
1.0
f
+
__expf
(
-
2.0
f
*
a
)))
-
1.0
f
;
#else
return
(
2.0
/
(
1.0
+
exp
(
-
2.0
*
a
)))
-
1.0
;
...
...
paddle/cuda/include/hl_matrix_base.cuh
浏览文件 @
c13bdb15
...
...
@@ -30,7 +30,7 @@ limitations under the License. */
#define INLINE inline
#endif
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
#define DEVICE_FMAX fmaxf
#define DEVICE_FMIN fminf
#else
...
...
paddle/cuda/include/hl_matrix_type.cuh
浏览文件 @
c13bdb15
...
...
@@ -21,7 +21,7 @@ limitations under the License. */
#ifdef __CUDA_ARCH__
// typedef void* vecType;
#include <vector_types.h>
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
typedef
float4
vecType
;
#else
typedef
double2
vecType
;
...
...
@@ -30,7 +30,7 @@ typedef double2 vecType;
#include <mmintrin.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
typedef
__m128
vecType
;
#else
typedef
__m128d
vecType
;
...
...
paddle/cuda/include/hl_sse_matrix_kernel.cuh
浏览文件 @
c13bdb15
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
#define VECTOR_SIZE 16
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
/* number of float in vector */
#define VECTOR_LEN 4
#define VECTOR_SET _mm_set_ps1
...
...
@@ -41,7 +41,7 @@ inline bool hl_check_align(void *ptr) {
return
hl_check_align
(
reinterpret_cast
<
size_t
>
(
ptr
));
}
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
template
<
class
Agg
>
inline
real
hl_agg_op
(
Agg
agg
,
vecType
mm
)
{
__m128
lo
=
_mm_unpacklo_ps
(
mm
,
mm
);
...
...
paddle/cuda/src/hl_cuda_cublas.cc
浏览文件 @
c13bdb15
...
...
@@ -84,7 +84,7 @@ CUBLAS_BLAS_ROUTINE_EACH(DYNAMIC_LOAD_CUBLAS_V2_WRAP)
}
/* namespace dynload */
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
#define CUBLAS_GEAM dynload::cublasSgeam
#define CUBLAS_GEMV dynload::cublasSgemv
#define CUBLAS_GEMM dynload::cublasSgemm
...
...
paddle/cuda/src/hl_cuda_cudnn.cc
浏览文件 @
c13bdb15
...
...
@@ -340,7 +340,7 @@ void hl_create_tensor_descriptor(hl_tensor_descriptor* image_desc,
(
cudnn_tensor_descriptor
)
malloc
(
sizeof
(
_cudnn_tensor_descriptor
));
CHECK_NOTNULL
(
hl_desc
);
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
cudnnDataType_t
data_type
=
CUDNN_DATA_FLOAT
;
#else
cudnnDataType_t
data_type
=
CUDNN_DATA_DOUBLE
;
...
...
@@ -373,7 +373,7 @@ void hl_create_tensor_descriptor(hl_tensor_descriptor* image_desc) {
(
cudnn_tensor_descriptor
)
malloc
(
sizeof
(
_cudnn_tensor_descriptor
));
CHECK_NOTNULL
(
hl_desc
);
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
cudnnDataType_t
data_type
=
CUDNN_DATA_FLOAT
;
#else
cudnnDataType_t
data_type
=
CUDNN_DATA_DOUBLE
;
...
...
@@ -611,7 +611,7 @@ void hl_create_filter_descriptor(hl_filter_descriptor* filter,
CHECK_CUDNN
(
dynload
::
cudnnCreateFilterDescriptor
(
&
hl_filter
->
desc
));
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
cudnnDataType_t
data_type
=
CUDNN_DATA_FLOAT
;
#else
cudnnDataType_t
data_type
=
CUDNN_DATA_DOUBLE
;
...
...
@@ -921,7 +921,7 @@ void hl_softmax_forward(real *input,
int
height
,
int
width
)
{
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
cudnnDataType_t
data_type
=
CUDNN_DATA_FLOAT
;
#else
cudnnDataType_t
data_type
=
CUDNN_DATA_DOUBLE
;
...
...
@@ -955,7 +955,7 @@ void hl_softmax_backward(real *output_value,
int
height
,
int
width
)
{
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
cudnnDataType_t
data_type
=
CUDNN_DATA_FLOAT
;
#else
cudnnDataType_t
data_type
=
CUDNN_DATA_DOUBLE
;
...
...
paddle/cuda/src/hl_cuda_device.cc
浏览文件 @
c13bdb15
...
...
@@ -626,7 +626,7 @@ void hl_specify_devices_start(int* device, int number) {
void
hl_rand
(
real
*
dest_d
,
size_t
num
)
{
pthread_mutex_lock
(
t_resource
.
gen_mutex
);
CHECK_EQ
(
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
dynload
::
curandGenerateUniform
(
t_resource
.
gen
,
dest_d
,
num
),
#else
dynload
::
curandGenerateUniformDouble
(
t_resource
.
gen
,
dest_d
,
num
),
...
...
paddle/cuda/src/hl_cuda_matrix.cu
浏览文件 @
c13bdb15
...
...
@@ -47,7 +47,7 @@ void hl_matrix_add(real *A_d,
CHECK_SYNC
(
"hl_matrix_add failed"
);
}
#ifdef
HPPL
_TYPE_DOUBLE
#ifdef
PADDLE
_TYPE_DOUBLE
#define THRESHOLD 128
#else
#define THRESHOLD 64
...
...
@@ -102,7 +102,7 @@ void subMaxAndExp(real* I,
val
=
-
THRESHOLD
;
}
I
[
nextIdx
]
=
val
;
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
O
[
nextIdx
]
=
__expf
(
val
);
#else
O
[
nextIdx
]
=
exp
(
val
);
...
...
paddle/cuda/src/hl_cuda_sparse.cuh
浏览文件 @
c13bdb15
...
...
@@ -355,7 +355,7 @@ __global__ void KeSMatrixCscMulDense(real *C_d,
}
/* best perf */
#ifndef
HPPL
_TYPE_DOUBLE
#ifndef
PADDLE
_TYPE_DOUBLE
#define CU_CSCMM_THREAD_M_BEST 9
#else
#define CU_CSCMM_THREAD_M_BEST 4
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录