提交 b6d89204 编写于 作者: P peizhilin

Merge branch 'windows/build' into windows/online

...@@ -130,6 +130,11 @@ if (APPLE OR WIN32) ...@@ -130,6 +130,11 @@ if (APPLE OR WIN32)
"Disable MKL for building on mac and windows" FORCE) "Disable MKL for building on mac and windows" FORCE)
endif() endif()
if (WIN32)
set(WITH_AVX OFF CACHE STRING
"Disable AVX when compiling for Windows" FORCE)
endif()
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
"A path setting third party libraries download & build directories.") "A path setting third party libraries download & build directories.")
......
...@@ -85,9 +85,7 @@ function(op_library TARGET) ...@@ -85,9 +85,7 @@ function(op_library TARGET)
if (WIN32) if (WIN32)
# remove windows unsupported op, because windows has no nccl, no warpctc such ops. # remove windows unsupported op, because windows has no nccl, no warpctc such ops.
foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op" foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op"
# "hierarchical_sigmoid_op" "cumsum_op" "channel_send_op" "channel_create_op" "channel_close_op" "channel_recv_op")
# "crf_decoding_op" "select_op" "lstmp_op" "gru_op" "fusion_gru_op" "lstm_op" "fusion_lstm_op"
"fusion_seqconv_eltadd_relu_op" "channel_send_op" "channel_create_op" "channel_close_op" "channel_recv_op")
if ("${TARGET}" STREQUAL "${windows_unsupport_op}") if ("${TARGET}" STREQUAL "${windows_unsupport_op}")
return() return()
endif() endif()
......
...@@ -70,17 +70,20 @@ int main() ...@@ -70,17 +70,20 @@ int main()
return 0; return 0;
}" AVX_FOUND) }" AVX_FOUND)
# Check AVX 2 # disable AVX2 by default on windows
set(CMAKE_REQUIRED_FLAGS ${AVX2_FLAG}) if(NOT WIN32)
set(AVX2_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE) # Check AVX 2
CHECK_CXX_SOURCE_RUNS(" set(CMAKE_REQUIRED_FLAGS ${AVX2_FLAG})
#include <immintrin.h> set(AVX2_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
int main() CHECK_CXX_SOURCE_RUNS("
{ #include <immintrin.h>
int main()
{
__m256i a = _mm256_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4); __m256i a = _mm256_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4);
__m256i result = _mm256_abs_epi32 (a); __m256i result = _mm256_abs_epi32 (a);
return 0; return 0;
}" AVX2_FOUND) }" AVX2_FOUND)
endif(NOT WIN32)
# Check AVX512F # Check AVX512F
set(CMAKE_REQUIRED_FLAGS ${AVX512F_FLAG}) set(CMAKE_REQUIRED_FLAGS ${AVX512F_FLAG})
......
...@@ -48,9 +48,9 @@ endif() ...@@ -48,9 +48,9 @@ endif()
set(COMMON_OP_DEPS "") set(COMMON_OP_DEPS "")
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} xxhash selected_rows_functor selected_rows lod_tensor maxouting unpooling pooling lod_rank_table context_project sequence_pooling executor sequence_padding sequence_scale cos_sim_functor memory concat_and_split cross_entropy softmax vol2col im2col sampler) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} xxhash selected_rows_functor selected_rows lod_tensor maxouting unpooling pooling lod_rank_table context_project sequence_pooling executor sequence_padding sequence_scale cos_sim_functor memory concat_and_split cross_entropy softmax vol2col im2col sampler)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} lstm_compute matrix_bit_code gru_compute activation_functions jit_kernel) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} lstm_compute matrix_bit_code sequence2batch gru_compute activation_functions jit_kernel)
if (NOT WIN32) if (NOT WIN32)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence2batch dynload_warpctc) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} dynload_warpctc)
endif() endif()
if (WITH_GPU) if (WITH_GPU)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} depthwise_conv cub) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} depthwise_conv cub)
......
...@@ -111,7 +111,7 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> { ...@@ -111,7 +111,7 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> {
auto pre_out_mat = EigenMatrix<T>::From(*pre_out); auto pre_out_mat = EigenMatrix<T>::From(*pre_out);
auto pre_out_grad_mat = EigenMatrix<T>::From(pre_out_grad); auto pre_out_grad_mat = EigenMatrix<T>::From(pre_out_grad);
auto out_grad_mat = EigenMatrix<T>::From(*out_grad); auto out_grad_mat = EigenMatrix<T>::From(*out_grad);
Eigen::array<int, 2> bcast({{1, static_cast<int>(pre_out_grad.dims()[1])}}); Eigen::array<int, 2> bcast{1, static_cast<int>(pre_out_grad.dims()[1])};
// softrelu derivative // softrelu derivative
pre_out_grad_mat.device(place) = pre_out_grad_mat.device(place) =
......
...@@ -81,4 +81,3 @@ if(WITH_XBYAK) ...@@ -81,4 +81,3 @@ if(WITH_XBYAK)
endif() endif()
cc_library(jit_kernel SRCS ${JIT_KERNEL_SRCS} DEPS ${JIT_KERNEL_DEPS}) cc_library(jit_kernel SRCS ${JIT_KERNEL_SRCS} DEPS ${JIT_KERNEL_DEPS})
cc_test(jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel) cc_test(jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel)
...@@ -67,7 +67,7 @@ inline constexpr size_t FindLastSet(size_t x) { ...@@ -67,7 +67,7 @@ inline constexpr size_t FindLastSet(size_t x) {
: (std::is_same<size_t, unsigned long>::value // NOLINT : (std::is_same<size_t, unsigned long>::value // NOLINT
? (x ? 8 * sizeof(x) - __builtin_clzl(x) : 0) ? (x ? 8 * sizeof(x) - __builtin_clzl(x) : 0)
: (x ? 8 * sizeof(x) - __builtin_clzll(x) : 0)); : (x ? 8 * sizeof(x) - __builtin_clzll(x) : 0));
}
#else #else
// windows don't have built-in clz, ctz function // windows don't have built-in clz, ctz function
template <typename T> template <typename T>
...@@ -92,7 +92,6 @@ inline int clz(const T& value) { ...@@ -92,7 +92,6 @@ inline int clz(const T& value) {
inline size_t FindLastSet(size_t x) { return sizeof(size_t) * 8 - clz(x); } inline size_t FindLastSet(size_t x) { return sizeof(size_t) * 8 - clz(x); }
#endif // !_WIN32 #endif // !_WIN32
}
struct SimpleCode { struct SimpleCode {
SimpleCode(size_t code, size_t num_classes) : c_(code + num_classes) {} SimpleCode(size_t code, size_t num_classes) : c_(code + num_classes) {}
......
...@@ -170,12 +170,6 @@ __all__ = [ ...@@ -170,12 +170,6 @@ __all__ = [
'bilinear_tensor_product', 'bilinear_tensor_product',
] ]
# To avoid the api checker complains
if os.name == 'nt':
__all__.remove('dynamic_lstm')
__all__.remove('crf_decoding')
__all__.remove('roi_pool')
def fc(input, def fc(input,
size, size,
...@@ -349,10 +343,8 @@ def embedding(input, ...@@ -349,10 +343,8 @@ def embedding(input,
return tmp return tmp
if os.name != 'nt': @templatedoc(op_type="lstm")
def dynamic_lstm(input,
@templatedoc(op_type="lstm")
def dynamic_lstm(input,
size, size,
h_0=None, h_0=None,
c_0=None, c_0=None,
...@@ -969,10 +961,8 @@ def linear_chain_crf(input, label, param_attr=None): ...@@ -969,10 +961,8 @@ def linear_chain_crf(input, label, param_attr=None):
return log_likelihood return log_likelihood
if os.name != 'nt': @templatedoc()
def crf_decoding(input, param_attr, label=None):
@templatedoc()
def crf_decoding(input, param_attr, label=None):
""" """
${comment} ${comment}
...@@ -998,11 +988,9 @@ if os.name != 'nt': ...@@ -998,11 +988,9 @@ if os.name != 'nt':
dtype=helper.input_dtype()) dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='crf_decoding', type='crf_decoding',
inputs={ inputs={"Emission": [input],
"Emission": [input],
"Transition": transition, "Transition": transition,
"Label": label "Label": label},
},
outputs={"ViterbiPath": [viterbi_path]}) outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path return viterbi_path
...@@ -5599,14 +5587,8 @@ def label_smooth(label, ...@@ -5599,14 +5587,8 @@ def label_smooth(label,
return smooth_label return smooth_label
if os.name != 'nt': @templatedoc()
def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0):
@templatedoc()
def roi_pool(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0):
""" """
${comment} ${comment}
......
...@@ -100,12 +100,12 @@ Examples: ...@@ -100,12 +100,12 @@ Examples:
>>> result = fluid.layers.hard_shrink(x=data, threshold=0.3) >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
""" """
if os.name != 'nt': __all__ += ['cumsum']
__all__ += ['cumsum']
_cum_sum_ = generate_layer_fn('cumsum') _cum_sum_ = generate_layer_fn('cumsum')
def cumsum(x, axis=None, exclusive=None, reverse=None):
def cumsum(x, axis=None, exclusive=None, reverse=None):
locals_var = locals().keys() locals_var = locals().keys()
kwargs = dict() kwargs = dict()
for name in locals_var: for name in locals_var:
...@@ -114,12 +114,13 @@ if os.name != 'nt': ...@@ -114,12 +114,13 @@ if os.name != 'nt':
kwargs[name] = val kwargs[name] = val
return _cum_sum_(**kwargs) return _cum_sum_(**kwargs)
cumsum.__doc__ = _cum_sum_.__doc__ + """
Examples: cumsum.__doc__ = _cum_sum_.__doc__ + """
Examples:
>>> data = fluid.layers.data(name="input", shape=[32, 784]) >>> data = fluid.layers.data(name="input", shape=[32, 784])
>>> result = fluid.layers.cumsum(data, axis=0) >>> result = fluid.layers.cumsum(data, axis=0)
""" """
__all__ += ['thresholded_relu'] __all__ += ['thresholded_relu']
......
...@@ -34,6 +34,7 @@ def wait_server_ready(endpoints): ...@@ -34,6 +34,7 @@ def wait_server_ready(endpoints):
""" """
while True: while True:
all_ok = True all_ok = True
not_ready_endpoints = []
for ep in endpoints: for ep in endpoints:
ip_port = ep.split(":") ip_port = ep.split(":")
with closing(socket.socket(socket.AF_INET, with closing(socket.socket(socket.AF_INET,
...@@ -42,8 +43,11 @@ def wait_server_ready(endpoints): ...@@ -42,8 +43,11 @@ def wait_server_ready(endpoints):
result = sock.connect_ex((ip_port[0], int(ip_port[1]))) result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0: if result != 0:
all_ok = False all_ok = False
not_ready_endpoints.append(ep)
if not all_ok: if not all_ok:
sys.stderr.write("pserver not ready, wait 3 sec to retry...\n") sys.stderr.write("pserver not ready, wait 3 sec to retry...\n")
sys.stderr.write("not ready endpoints:" + str(not_ready_endpoints) +
"\n")
sys.stderr.flush() sys.stderr.flush()
time.sleep(3) time.sleep(3)
else: else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册