Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
00441a90
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
00441a90
编写于
5月 17, 2017
作者:
L
liaogang
浏览文件
操作
浏览文件
下载
差异文件
fix conficts
上级
af065196
0a4b540a
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
302 addition
and
100 deletion
+302
-100
RELEASE.cn.md
RELEASE.cn.md
+0
-0
cmake/generic.cmake
cmake/generic.cmake
+99
-0
demo/semantic_role_labeling/api_train_v2.py
demo/semantic_role_labeling/api_train_v2.py
+137
-50
paddle/majel/CMakeLists.txt
paddle/majel/CMakeLists.txt
+1
-40
paddle/majel/test/CMakeLists.txt
paddle/majel/test/CMakeLists.txt
+6
-9
paddle/majel/test/cuda_test.cu
paddle/majel/test/cuda_test.cu
+59
-0
paddle/scripts/travis/docs.sh
paddle/scripts/travis/docs.sh
+0
-1
未找到文件。
RELEASE.cn.md
100755 → 100644
浏览文件 @
00441a90
文件模式从 100755 更改为 100644
cmake/generic.cmake
浏览文件 @
00441a90
...
...
@@ -28,6 +28,105 @@
# cmake_parse_arguments can help us to achieve this goal.
# https://cmake.org/cmake/help/v3.0/module/CMakeParseArguments.html
# cc_library parses tensor.cc and figures out that target also depend on tensor.h.
# cc_library(tensor
# SRCS
# tensor.cc
# DEPS
# variant)
function
(
cc_library TARGET_NAME
)
set
(
options OPTIONAL
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
cc_library
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
if
(
${
cc_library_OPTIONAL
}
STREQUAL
"SHARED"
)
add_library
(
${
TARGET_NAME
}
SHARED
${
cc_library_SRCS
}
)
else
()
add_library
(
${
TARGET_NAME
}
STATIC
${
cc_library_SRCS
}
)
endif
()
add_dependencies
(
${
TARGET_NAME
}
${
cc_library_DEPS
}
${
external_project_dependencies
}
)
endfunction
(
cc_library
)
# cc_binary parses tensor.cc and figures out that target also depend on tensor.h.
# cc_binary(tensor
# SRCS
# tensor.cc)
function
(
cc_binary TARGET_NAME
)
set
(
options OPTIONAL
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
cc_binary
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
add_executable
(
${
TARGET_NAME
}
${
cc_binary_SRCS
}
)
add_dependencies
(
${
TARGET_NAME
}
${
cc_binary_DEPS
}
${
external_project_dependencies
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
cc_binary_DEPS
}
)
endfunction
(
cc_binary
)
# The dependency to target tensor implies that if any of
# tensor{.h,.cc,_test.cc} is changed, tensor_test need to be re-built.
# cc_test(tensor_test
# SRCS
# tensor_test.cc
# DEPS
# tensor)
function
(
cc_test TARGET_NAME
)
set
(
options
""
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
cc_test
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
add_executable
(
${
TARGET_NAME
}
${
cc_test_SRCS
}
)
add_dependencies
(
${
TARGET_NAME
}
${
cc_test_DEPS
}
${
external_project_dependencies
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
cc_test_DEPS
}
${
GTEST_MAIN_LIBRARIES
}
${
GTEST_LIBRARIES
}
)
add_test
(
${
TARGET_NAME
}
${
TARGET_NAME
}
)
endfunction
(
cc_test
)
# Suppose that ops.cu includes global functions that take Tensor as
# their parameters, so ops depend on tensor. This implies that if
# any of tensor.{h.cc}, ops.{h,cu} is changed, ops need to be re-built.
# nv_library(ops
# SRCS
# ops.cu
# DEPS
# tensor)
function
(
nv_library TARGET_NAME
)
set
(
options OPTIONAL
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
nv_library
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
if
(
${
nv_library_OPTIONAL
}
STREQUAL
"SHARED"
)
cuda_add_library
(
${
TARGET_NAME
}
SHARED
${
nv_library_SRCS
}
)
else
()
cuda_add_library
(
${
TARGET_NAME
}
STATIC
${
nv_library_SRCS
}
)
endif
()
add_dependencies
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
${
external_project_dependencies
}
)
endfunction
(
nv_library
)
function
(
nv_binary TARGET_NAME
)
set
(
options
""
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
nv_binary
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
cuda_add_executable
(
${
TARGET_NAME
}
${
nv_binary_SRCS
}
)
add_dependencies
(
${
TARGET_NAME
}
${
nv_binary_DEPS
}
${
external_project_dependencies
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
nv_binary_DEPS
}
)
endfunction
(
nv_binary
)
# The dependency to target tensor implies that if any of
# ops{.h,.cu,_test.cu} is changed, ops_test need to be re-built.
# nv_test(ops_test
# SRCS
# ops_test.cu
# DEPS
# ops)
function
(
nv_test TARGET_NAME
)
set
(
options
""
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
nv_test
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
cuda_add_executable
(
${
TARGET_NAME
}
${
nv_test_SRCS
}
)
add_dependencies
(
${
TARGET_NAME
}
${
nv_test_DEPS
}
${
external_project_dependencies
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
nv_test_DEPS
}
${
GTEST_MAIN_LIBRARIES
}
${
GTEST_LIBRARIES
}
)
add_test
(
${
TARGET_NAME
}
${
TARGET_NAME
}
)
endfunction
(
nv_test
)
set
(
GOPATH
"
${
CMAKE_CURRENT_BINARY_DIR
}
/go"
)
file
(
MAKE_DIRECTORY
${
GOPATH
}
)
...
...
demo/semantic_role_labeling/api_train_v2.py
浏览文件 @
00441a90
import
sys
import
math
import
numpy
as
np
import
paddle.v2
as
paddle
import
gzip
import
logging
import
paddle.v2.dataset.conll05
as
conll05
import
paddle.v2.evaluator
as
evaluator
import
paddle.v2
as
paddle
logger
=
logging
.
getLogger
(
'paddle'
)
def
db_lstm
():
word_dict
,
verb_dict
,
label_dict
=
conll05
.
get_dict
()
word_dict_len
=
len
(
word_dict
)
label_dict_len
=
len
(
label_dict
)
pred_len
=
len
(
verb_dict
)
word_dict
,
verb_dict
,
label_dict
=
conll05
.
get_dict
()
word_dict_len
=
len
(
word_dict
)
label_dict_len
=
len
(
label_dict
)
pred_len
=
len
(
verb_dict
)
mark_dict_len
=
2
word_dim
=
32
mark_dim
=
5
hidden_dim
=
512
depth
=
8
mark_dict_len
=
2
word_dim
=
32
mark_dim
=
5
hidden_dim
=
512
depth
=
8
default_std
=
1
/
math
.
sqrt
(
hidden_dim
)
/
3.0
mix_hidden_lr
=
1e-3
#8 features
def
d_type
(
size
):
def
d_type
(
size
):
return
paddle
.
data_type
.
integer_value_sequence
(
size
)
def
db_lstm
():
#8 features
word
=
paddle
.
layer
.
data
(
name
=
'word_data'
,
type
=
d_type
(
word_dict_len
))
predicate
=
paddle
.
layer
.
data
(
name
=
'verb_data'
,
type
=
d_type
(
pred_len
))
...
...
@@ -31,11 +38,7 @@ def db_lstm():
ctx_p2
=
paddle
.
layer
.
data
(
name
=
'ctx_p2_data'
,
type
=
d_type
(
word_dict_len
))
mark
=
paddle
.
layer
.
data
(
name
=
'mark_data'
,
type
=
d_type
(
mark_dict_len
))
target
=
paddle
.
layer
.
data
(
name
=
'target'
,
type
=
d_type
(
label_dict_len
))
default_std
=
1
/
math
.
sqrt
(
hidden_dim
)
/
3.0
emb_para
=
paddle
.
attr
.
Param
(
name
=
'emb'
,
initial_std
=
0.
,
learning_rate
=
0.
)
emb_para
=
paddle
.
attr
.
Param
(
name
=
'emb'
,
initial_std
=
0.
,
is_static
=
True
)
std_0
=
paddle
.
attr
.
Param
(
initial_std
=
0.
)
std_default
=
paddle
.
attr
.
Param
(
initial_std
=
default_std
)
...
...
@@ -63,7 +66,6 @@ def db_lstm():
input
=
emb
,
param_attr
=
std_default
)
for
emb
in
emb_layers
])
mix_hidden_lr
=
1e-3
lstm_para_attr
=
paddle
.
attr
.
Param
(
initial_std
=
0.0
,
learning_rate
=
1.0
)
hidden_para_attr
=
paddle
.
attr
.
Param
(
initial_std
=
default_std
,
learning_rate
=
mix_hidden_lr
)
...
...
@@ -111,6 +113,21 @@ def db_lstm():
input
=
input_tmp
[
1
],
param_attr
=
lstm_para_attr
)
],
)
return
feature_out
def
load_parameter
(
file_name
,
h
,
w
):
with
open
(
file_name
,
'rb'
)
as
f
:
f
.
read
(
16
)
# skip header.
return
np
.
fromfile
(
f
,
dtype
=
np
.
float32
).
reshape
(
h
,
w
)
def
train
():
paddle
.
init
(
use_gpu
=
False
,
trainer_count
=
1
)
# define network topology
feature_out
=
db_lstm
()
target
=
paddle
.
layer
.
data
(
name
=
'target'
,
type
=
d_type
(
label_dict_len
))
crf_cost
=
paddle
.
layer
.
crf
(
size
=
label_dict_len
,
input
=
feature_out
,
label
=
target
,
...
...
@@ -120,29 +137,15 @@ def db_lstm():
learning_rate
=
mix_hidden_lr
))
crf_dec
=
paddle
.
layer
.
crf_decoding
(
name
=
'crf_dec_l'
,
size
=
label_dict_len
,
input
=
feature_out
,
label
=
target
,
param_attr
=
paddle
.
attr
.
Param
(
name
=
'crfw'
))
return
crf_cost
,
crf_dec
def
load_parameter
(
file_name
,
h
,
w
):
with
open
(
file_name
,
'rb'
)
as
f
:
f
.
read
(
16
)
# skip header.
return
np
.
fromfile
(
f
,
dtype
=
np
.
float32
).
reshape
(
h
,
w
)
def
main
():
paddle
.
init
(
use_gpu
=
False
,
trainer_count
=
1
)
# define network topology
crf_cost
,
crf_dec
=
db_lstm
()
evaluator
.
sum
(
input
=
crf_dec
)
# create parameters
parameters
=
paddle
.
parameters
.
create
([
crf_cost
,
crf_dec
])
parameters
=
paddle
.
parameters
.
create
(
crf_cost
)
parameters
.
set
(
'emb'
,
load_parameter
(
conll05
.
get_embedding
(),
44068
,
32
))
# create optimizer
optimizer
=
paddle
.
optimizer
.
Momentum
(
...
...
@@ -152,18 +155,12 @@ def main():
model_average
=
paddle
.
optimizer
.
ModelAverage
(
average_window
=
0.5
,
max_average_window
=
10000
),
)
def
event_handler
(
event
):
if
isinstance
(
event
,
paddle
.
event
.
EndIteration
):
if
event
.
batch_id
%
100
==
0
:
print
"Pass %d, Batch %d, Cost %f, %s"
%
(
event
.
pass_id
,
event
.
batch_id
,
event
.
cost
,
event
.
metrics
)
trainer
=
paddle
.
trainer
.
SGD
(
cost
=
crf_cost
,
parameters
=
parameters
,
update_equation
=
optimizer
)
parameters
.
set
(
'emb'
,
load_parameter
(
conll05
.
get_embedding
(),
44068
,
32
)
)
update_equation
=
optimizer
,
extra_layers
=
crf_dec
)
trn_
reader
=
paddle
.
batch
(
reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
conll05
.
test
(),
buf_size
=
8192
),
batch_size
=
10
)
...
...
@@ -179,12 +176,102 @@ def main():
'target'
:
8
}
def
event_handler
(
event
):
if
isinstance
(
event
,
paddle
.
event
.
EndIteration
):
if
event
.
batch_id
%
100
==
0
:
logger
.
info
(
"Pass %d, Batch %d, Cost %f, %s"
%
(
event
.
pass_id
,
event
.
batch_id
,
event
.
cost
,
event
.
metrics
))
if
event
.
batch_id
and
event
.
batch_id
%
1000
==
0
:
result
=
trainer
.
test
(
reader
=
reader
,
feeding
=
feeding
)
logger
.
info
(
"
\n
Test with Pass %d, Batch %d, %s"
%
(
event
.
pass_id
,
event
.
batch_id
,
result
.
metrics
))
if
isinstance
(
event
,
paddle
.
event
.
EndPass
):
# save parameters
with
gzip
.
open
(
'params_pass_%d.tar.gz'
%
event
.
pass_id
,
'w'
)
as
f
:
parameters
.
to_tar
(
f
)
result
=
trainer
.
test
(
reader
=
reader
,
feeding
=
feeding
)
logger
.
info
(
"
\n
Test with Pass %d, %s"
%
(
event
.
pass_id
,
result
.
metrics
))
trainer
.
train
(
reader
=
trn_
reader
,
reader
=
reader
,
event_handler
=
event_handler
,
num_passes
=
10
000
,
num_passes
=
10
,
feeding
=
feeding
)
def
infer_a_batch
(
inferer
,
test_data
,
word_dict
,
pred_dict
,
label_dict
):
probs
=
inferer
.
infer
(
input
=
test_data
,
field
=
'id'
)
assert
len
(
probs
)
==
sum
(
len
(
x
[
0
])
for
x
in
test_data
)
for
idx
,
test_sample
in
enumerate
(
test_data
):
start_id
=
0
pred_str
=
"%s
\t
"
%
(
pred_dict
[
test_sample
[
6
][
0
]])
for
w
,
tag
in
zip
(
test_sample
[
0
],
probs
[
start_id
:
start_id
+
len
(
test_sample
[
0
])]):
pred_str
+=
"%s[%s] "
%
(
word_dict
[
w
],
label_dict
[
tag
])
print
(
pred_str
.
strip
())
start_id
+=
len
(
test_sample
[
0
])
def
infer
():
label_dict_reverse
=
dict
((
value
,
key
)
for
key
,
value
in
label_dict
.
iteritems
())
word_dict_reverse
=
dict
((
value
,
key
)
for
key
,
value
in
word_dict
.
iteritems
())
pred_dict_reverse
=
dict
((
value
,
key
)
for
key
,
value
in
verb_dict
.
iteritems
())
test_creator
=
paddle
.
dataset
.
conll05
.
test
()
paddle
.
init
(
use_gpu
=
False
,
trainer_count
=
1
)
# define network topology
feature_out
=
db_lstm
()
predict
=
paddle
.
layer
.
crf_decoding
(
size
=
label_dict_len
,
input
=
feature_out
,
param_attr
=
paddle
.
attr
.
Param
(
name
=
'crfw'
))
test_pass
=
0
with
gzip
.
open
(
'params_pass_%d.tar.gz'
%
(
test_pass
))
as
f
:
parameters
=
paddle
.
parameters
.
Parameters
.
from_tar
(
f
)
inferer
=
paddle
.
inference
.
Inference
(
output_layer
=
predict
,
parameters
=
parameters
)
# prepare test data
test_data
=
[]
test_batch_size
=
50
for
idx
,
item
in
enumerate
(
test_creator
()):
test_data
.
append
(
item
[
0
:
8
])
if
idx
and
(
not
idx
%
test_batch_size
):
infer_a_batch
(
inferer
,
test_data
,
word_dict_reverse
,
pred_dict_reverse
,
label_dict_reverse
,
)
test_data
=
[]
infer_a_batch
(
inferer
,
test_data
,
word_dict_reverse
,
pred_dict_reverse
,
label_dict_reverse
,
)
test_data
=
[]
def
main
(
is_inferring
=
False
):
if
is_inferring
:
infer
()
else
:
train
()
if
__name__
==
'__main__'
:
main
()
main
(
is_inferring
=
False
)
paddle/majel/CMakeLists.txt
浏览文件 @
00441a90
cmake_minimum_required
(
VERSION 3.0
)
if
(
${
CMAKE_CURRENT_SOURCE_DIR
}
STREQUAL
${
CMAKE_SOURCE_DIR
}
)
# find #include <majel/xx.h>
get_filename_component
(
PARENT_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
DIRECTORY
)
include_directories
(
${
PARENT_DIR
}
)
# find cmake directory modules
get_filename_component
(
PARENT_DIR
${
PARENT_DIR
}
DIRECTORY
)
set
(
CMAKE_MODULE_PATH
${
CMAKE_MODULE_PATH
}
"
${
PARENT_DIR
}
/cmake"
)
# enable boost
find_package
(
Boost REQUIRED
)
if
(
NOT Boost_FOUND
)
message
(
FATAL
"Cannot find Boost library."
)
endif
()
include_directories
(
${
Boost_INCLUDE_DIRS
}
)
# enable c++11
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-std=c++11"
)
# enable gtest
set
(
THIRD_PARTY_PATH
${
CMAKE_CURRENT_SOURCE_DIR
}
/third_party
)
set
(
WITH_TESTING ON
)
include
(
external/gtest
)
else
()
message
(
"-- Found gtest (include:
${
GTEST_INCLUDE_DIR
}
, library:
${
GTEST_LIBRARIES
}
)"
)
endif
()
########################### Build Majel #############################
set
(
MAJEL_CXX_FILES place.cc
)
set
(
MAJEL_CUDA_FILES
""
)
if
(
CUDA_FOUND
)
cuda_add_library
(
majel
${
MAJEL_CUDA_FILES
}
${
MAJEL_CXX_FILES
}
)
else
()
add_library
(
majel
${
MAJEL_CXX_FILES
}
)
endif
()
add_dependencies
(
majel
${
external_project_dependencies
}
)
#####################################################################
cc_library
(
majel SRCS place.cc
)
if
(
WITH_TESTING
)
add_subdirectory
(
test
)
...
...
paddle/majel/test/CMakeLists.txt
浏览文件 @
00441a90
file
(
GLOB_RECURSE ALL_TEST_FILES RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"*.cc"
)
cc_test
(
place_test
SRCS place_test.cc
DEPS majel
)
add_executable
(
majel_tests
${
ALL_TEST_FILES
}
)
add_dependencies
(
majel_tests majel
)
target_link_libraries
(
majel_tests
${
GTEST_LIBRARIES
}
${
GTEST_MAIN_LIBRARIES
}
majel
)
add_test
(
majel_tests majel_tests
)
if
(
WITH_GPU
)
nv_test
(
cuda_test SRCS cuda_test.cu
)
endif
()
paddle/majel/test/cuda_test.cu
0 → 100644
浏览文件 @
00441a90
#include <cuda_runtime.h>
#include <stdio.h>
#include "gtest/gtest.h"
#define CHECK_ERR(x) \
if (x != cudaSuccess) { \
fprintf(stderr, \
"%s in %s at line %d\n", \
cudaGetErrorString(err), \
__FILE__, \
__LINE__); \
exit(-1); \
}
__global__
void
vecAdd
(
float
*
d_A
,
float
*
d_B
,
float
*
d_C
,
int
n
)
{
int
i
=
blockDim
.
x
*
blockIdx
.
x
+
threadIdx
.
x
;
if
(
i
<
n
)
{
d_C
[
i
]
=
d_A
[
i
]
+
d_B
[
i
];
}
}
TEST
(
Cuda
,
Equality
)
{
int
n
=
10
;
// Memory allocation for h_A, h_B and h_C (in the host)
float
h_A
[
10
]
=
{
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
,
7.0
,
8.0
,
9.0
,
0.0
};
float
h_B
[
10
]
=
{
0.0
,
9.0
,
8.0
,
7.0
,
6.0
,
5.0
,
4.0
,
3.0
,
2.0
,
1.0
};
float
h_C
[
10
];
float
*
d_A
,
*
d_B
,
*
d_C
;
cudaError_t
err
;
// Memory allocation for d_A, d_B and d_C (in the device)
err
=
cudaMalloc
((
void
**
)
&
d_A
,
sizeof
(
float
)
*
n
);
CHECK_ERR
(
err
);
err
=
cudaMalloc
((
void
**
)
&
d_B
,
sizeof
(
float
)
*
n
);
CHECK_ERR
(
err
);
err
=
cudaMalloc
((
void
**
)
&
d_C
,
sizeof
(
float
)
*
n
);
CHECK_ERR
(
err
);
// Copying memory to device
err
=
cudaMemcpy
(
d_A
,
h_A
,
sizeof
(
float
)
*
n
,
cudaMemcpyHostToDevice
);
CHECK_ERR
(
err
);
err
=
cudaMemcpy
(
d_B
,
h_B
,
sizeof
(
float
)
*
n
,
cudaMemcpyHostToDevice
);
CHECK_ERR
(
err
);
// Calling the kernel
vecAdd
<<<
ceil
(
n
/
256.0
),
256
>>>
(
d_A
,
d_B
,
d_C
,
n
);
// Copying results back to host
err
=
cudaMemcpy
(
h_C
,
d_C
,
sizeof
(
float
)
*
n
,
cudaMemcpyDeviceToHost
);
CHECK_ERR
(
err
);
EXPECT_EQ
(
h_C
[
0
],
1.0
);
for
(
int
i
=
1
;
i
<
n
-
1
;
++
i
)
{
EXPECT_EQ
(
h_C
[
i
],
11.0
);
}
EXPECT_EQ
(
h_C
[
9
],
1.0
);
}
paddle/scripts/travis/docs.sh
浏览文件 @
00441a90
...
...
@@ -60,7 +60,6 @@ function deploy_docs() {
deploy_docs
"master"
"."
deploy_docs
"develop"
"./develop/"
deploy_docs
"release/0.10.0"
"./release/0.10.0/"
# Check is there anything changed.
set
+e
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录