未验证 提交 2241c370 编写于 作者: 走神的阿圆's avatar 走神的阿圆 提交者: GitHub

Update backend to v2.0.0. (#617)

上级 9ab97a4d
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
cmake_minimum_required(VERSION 3.2)
project(VisualDL)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
endif(CCACHE_FOUND)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "-fPIC")
set(VISUALDL_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
"A path setting third party libraries download & build directories.")
################################ Configurations #######################################
option(WITH_DOC "Compile VisualDL with documentation" OFF)
option(WITH_TESTING "Compile VisualDL with unit testing" ON)
option(WITH_PYTHON3 "Compile VisualDL with Python 3" OFF)
option(WITH_JULIA "Compile VisualDL with Julia" OFF)
option(ON_RELEASE "RELEASE mode" ON)
include(external/zlib) # download, build, install zlib
if (NOT ON_RELEASE)
include(external/gflags) # download, build, install gflags
include(external/glog) # download, build, install glog
endif(NOT ON_RELEASE)
include(external/gtest) # download, build, install gtest
include(external/eigen) # download eigen
include(external/pybind11) # download pybind11
include(external/protobuf) # download, build, install protobuf
include(external/python) # find python and set path
if (NOT ON_RELEASE)
message(STATUS "build on debug mode")
add_definitions(-DVISUALDL_WITH_GLOG)
endif(NOT ON_RELEASE)
if (MSVC)
add_definitions(-DPROTOBUF_USE_DLLS)
add_definitions(-DNOMINMAX) # No min/max macros
endif(MSVC)
include_directories(${PROJECT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/visualdl/storage)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/visualdl/logic)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/visualdl/python)
if(WITH_JULIA)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/visualdl/julia)
endif()
# compile test only in release mode
if (NOT ON_RELEASE)
add_executable(vl_test
${PROJECT_SOURCE_DIR}/visualdl/test.cc
${PROJECT_SOURCE_DIR}/visualdl/logic/sdk_test.cc
${PROJECT_SOURCE_DIR}/visualdl/logic/histogram_test.cc
${PROJECT_SOURCE_DIR}/visualdl/storage/storage_test.cc
${PROJECT_SOURCE_DIR}/visualdl/storage/test_binary_record.cc
${PROJECT_SOURCE_DIR}/visualdl/utils/test_concurrency.cc
${PROJECT_SOURCE_DIR}/visualdl/utils/test_image.cc
${PROJECT_SOURCE_DIR}/visualdl/utils/concurrency.h
${PROJECT_SOURCE_DIR}/visualdl/utils/filesystem.h
)
target_link_libraries(vl_test sdk storage binary_record entry tablet im gtest glog protobuf gflags pthread eigen3)
enable_testing ()
add_test(NAME vstest COMMAND ./vl_test)
endif(NOT ON_RELEASE)
if(WITH_DOC)
add_subdirectory(docs)
endif()
# - This module looks for Sphinx
# Find the Sphinx documentation generator
#
# This modules defines
# SPHINX_EXECUTABLE
# SPHINX_FOUND
find_program(SPHINX_EXECUTABLE
NAMES sphinx-build
PATHS
/usr/bin
/usr/local/bin
/opt/local/bin
DOC "Sphinx documentation generator"
)
if( NOT SPHINX_EXECUTABLE )
set(_Python_VERSIONS
2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0 1.6 1.5
)
foreach( _version ${_Python_VERSIONS} )
set( _sphinx_NAMES sphinx-build-${_version} )
find_program( SPHINX_EXECUTABLE
NAMES ${_sphinx_NAMES}
PATHS
/usr/bin
/usr/local/bin
/opt/loca/bin
DOC "Sphinx documentation generator"
)
endforeach()
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Sphinx DEFAULT_MSG
SPHINX_EXECUTABLE
)
option( SPHINX_HTML_OUTPUT "Build a single HTML with the whole content." ON )
option( SPHINX_DIRHTML_OUTPUT "Build HTML pages, but with a single directory per document." OFF )
option( SPHINX_HTMLHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in htmlhelp." OFF )
option( SPHINX_QTHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in qthelp." OFF )
option( SPHINX_DEVHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in devhelp." OFF )
option( SPHINX_EPUB_OUTPUT "Build HTML pages with additional information for building a documentation collection in epub." OFF )
option( SPHINX_LATEX_OUTPUT "Build LaTeX sources that can be compiled to a PDF document using pdflatex." OFF )
option( SPHINX_MAN_OUTPUT "Build manual pages in groff format for UNIX systems." OFF )
option( SPHINX_TEXT_OUTPUT "Build plain text files." OFF )
mark_as_advanced(
SPHINX_EXECUTABLE
SPHINX_HTML_OUTPUT
SPHINX_DIRHTML_OUTPUT
SPHINX_HTMLHELP_OUTPUT
SPHINX_QTHELP_OUTPUT
SPHINX_DEVHELP_OUTPUT
SPHINX_EPUB_OUTPUT
SPHINX_LATEX_OUTPUT
SPHINX_MAN_OUTPUT
SPHINX_TEXT_OUTPUT
)
function( Sphinx_add_target target_name builder conf cache source destination )
add_custom_target( ${target_name} ALL
COMMAND ${SPHINX_EXECUTABLE} -b ${builder}
-d ${cache}
-c ${conf}
${source}
${destination}
COMMENT "Generating sphinx documentation: ${builder}"
COMMAND cd ${destination} && ln -sf ./index_*.html index.html
)
set_property(
DIRECTORY APPEND PROPERTY
ADDITIONAL_MAKE_CLEAN_FILES
${destination}
)
endfunction()
# Target dependencies can be optionally listed at the end.
function( Sphinx_add_targets target_base_name conf source base_destination )
set( _dependencies )
foreach( arg IN LISTS ARGN )
set( _dependencies ${_dependencies} ${arg} )
endforeach()
if( ${SPHINX_HTML_OUTPUT} )
Sphinx_add_target( ${target_base_name}_html html ${conf} ${source} ${base_destination}/html )
add_dependencies( ${target_base_name}_html ${_dependencies} )
endif()
if( ${SPHINX_DIRHTML_OUTPUT} )
Sphinx_add_target( ${target_base_name}_dirhtml dirhtml ${conf} ${source} ${base_destination}/dirhtml )
add_dependencies( ${target_base_name}_dirhtml ${_dependencies} )
endif()
if( ${SPHINX_QTHELP_OUTPUT} )
Sphinx_add_target( ${target_base_name}_qthelp qthelp ${conf} ${source} ${base_destination}/qthelp )
add_dependencies( ${target_base_name}_qthelp ${_dependencies} )
endif()
if( ${SPHINX_DEVHELP_OUTPUT} )
Sphinx_add_target( ${target_base_name}_devhelp devhelp ${conf} ${source} ${base_destination}/devhelp )
add_dependencies( ${target_base_name}_devhelp ${_dependencies} )
endif()
if( ${SPHINX_EPUB_OUTPUT} )
Sphinx_add_target( ${target_base_name}_epub epub ${conf} ${source} ${base_destination}/epub )
add_dependencies( ${target_base_name}_epub ${_dependencies} )
endif()
if( ${SPHINX_LATEX_OUTPUT} )
Sphinx_add_target( ${target_base_name}_latex latex ${conf} ${source} ${base_destination}/latex )
add_dependencies( ${target_base_name}_latex ${_dependencies} )
endif()
if( ${SPHINX_MAN_OUTPUT} )
Sphinx_add_target( ${target_base_name}_man man ${conf} ${source} ${base_destination}/man )
add_dependencies( ${target_base_name}_man ${_dependencies} )
endif()
if( ${SPHINX_TEXT_OUTPUT} )
Sphinx_add_target( ${target_base_name}_text text ${conf} ${source} ${base_destination}/text )
add_dependencies( ${target_base_name}_text ${_dependencies} )
endif()
if( ${BUILD_TESTING} )
sphinx_add_target( ${target_base_name}_linkcheck linkcheck ${conf} ${source} ${base_destination}/linkcheck )
add_dependencies( ${target_base_name}_linkcheck ${_dependencies} )
endif()
endfunction()
INCLUDE(ExternalProject)
SET(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3)
INCLUDE_DIRECTORIES(${EIGEN_SOURCE_DIR}/src/extern_eigen3)
ExternalProject_Add(
extern_eigen3
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/libigl/eigen.git"
GIT_TAG "master"
PREFIX ${EIGEN_SOURCE_DIR}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
if (${CMAKE_VERSION} VERSION_LESS "3.3.0")
set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/eigen3_dummy.c)
file(WRITE ${dummyfile} "const char * dummy_eigen3 = \"${dummyfile}\";")
add_library(eigen3 STATIC ${dummyfile})
else()
add_library(eigen3 INTERFACE)
endif()
add_dependencies(eigen3 extern_eigen3)
LIST(APPEND external_project_dependencies eigen3)
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags)
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
IF(WIN32)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ELSE(WIN32)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
ExternalProject_Add(
extern_gflags
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/gflags/gflags.git"
GIT_TAG 77592648e3f3be87d6c7123eb81cbad75f9aef5a
PREFIX ${GFLAGS_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DBUILD_TESTING=OFF
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
ADD_DEPENDENCIES(gflags extern_gflags)
LIST(APPEND external_project_dependencies gflags)
IF(WITH_C_API)
INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags)
IF(ANDROID)
INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib)
ENDIF()
ENDIF()
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
SET(GLOG_SOURCES_DIR ${THIRD_PARTY_PATH}/glog)
SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog)
SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE)
IF(WIN32)
SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
ELSE(WIN32)
SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
ExternalProject_Add(
extern_glog
${EXTERNAL_PROJECT_LOG_ARGS}
DEPENDS gflags
GIT_REPOSITORY "https://github.com/google/glog.git"
GIT_TAG v0.3.5
PREFIX ${GLOG_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=${GLOG_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DWITH_GFLAGS=ON
-Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
-DBUILD_TESTING=OFF
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GLOG_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR:PATH=${GLOG_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
ADD_LIBRARY(glog STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARIES})
ADD_DEPENDENCIES(glog extern_glog gflags)
LINK_LIBRARIES(glog gflags)
LIST(APPEND external_project_dependencies glog)
IF(WITH_C_API)
INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog)
IF(ANDROID)
INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib)
ENDIF()
ENDIF()
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IF(WITH_TESTING)
ENABLE_TESTING()
INCLUDE(ExternalProject)
SET(GTEST_SOURCES_DIR ${THIRD_PARTY_PATH}/gtest)
SET(GTEST_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gtest)
SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE)
INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR})
IF(WIN32)
set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/lib/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE)
set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE)
ELSE(WIN32)
set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/lib/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE)
set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/lib/libgtest_main.a" CACHE FILEPATH "gtest main libraries." FORCE)
ENDIF(WIN32)
IF(WITH_MKLML)
# wait for mklml downloading completed
SET(GTEST_DEPENDS ${MKLML_PROJECT})
ENDIF()
ExternalProject_Add(
extern_gtest
${EXTERNAL_PROJECT_LOG_ARGS}
DEPENDS ${GTEST_DEPENDS}
GIT_REPOSITORY "https://github.com/google/googletest.git"
GIT_TAG "release-1.8.0"
PREFIX ${GTEST_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DBUILD_GMOCK=ON
-Dgtest_disable_pthreads=ON
-Dgtest_force_shared_crt=ON
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
ADD_LIBRARY(gtest STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET gtest PROPERTY IMPORTED_LOCATION ${GTEST_LIBRARIES})
ADD_DEPENDENCIES(gtest extern_gtest)
ADD_LIBRARY(gtest_main STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET gtest_main PROPERTY IMPORTED_LOCATION ${GTEST_MAIN_LIBRARIES})
ADD_DEPENDENCIES(gtest_main extern_gtest)
LIST(APPEND external_project_dependencies gtest gtest_main)
ENDIF(WITH_TESTING)
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
# Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp
FIND_PACKAGE(Protobuf QUIET)
SET(PROTOBUF_FOUND "OFF")
if(NOT COMMAND protobuf_generate_python) # before cmake 3.4, protobuf_genrerate_python is not defined.
function(protobuf_generate_python SRCS)
# shameless copy from https://github.com/Kitware/CMake/blob/master/Modules/FindProtobuf.cmake
if(NOT ARGN)
message(SEND_ERROR "Error: PROTOBUF_GENERATE_PYTHON() called without any proto files")
return()
endif()
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
# Create an include path for each file specified
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(ABS_PATH ${ABS_FIL} PATH)
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
else()
set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
endif()
if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
endif()
if(DEFINED Protobuf_IMPORT_DIRS)
foreach(DIR ${Protobuf_IMPORT_DIRS})
get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protobuf_include_path -I ${ABS_PATH})
endif()
endforeach()
endif()
set(${SRCS})
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(FIL_WE ${FIL} NAME_WE)
if(NOT PROTOBUF_GENERATE_CPP_APPEND_PATH)
get_filename_component(FIL_DIR ${FIL} DIRECTORY)
if(FIL_DIR)
set(FIL_WE "${FIL_DIR}/${FIL_WE}")
endif()
endif()
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py")
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py"
COMMAND ${Protobuf_PROTOC_EXECUTABLE} --python_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
COMMENT "Running Python protocol buffer compiler on ${FIL}"
VERBATIM )
endforeach()
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
endfunction()
endif()
# Print and set the protobuf library information,
# finish this cmake process and exit from this file.
macro(PROMPT_PROTOBUF_LIB)
SET(protobuf_DEPS ${ARGN})
MESSAGE(STATUS "Protobuf protoc executable: ${PROTOBUF_PROTOC_EXECUTABLE}")
MESSAGE(STATUS "Protobuf library: ${PROTOBUF_LIBRARY}")
MESSAGE(STATUS "Protobuf version: ${PROTOBUF_VERSION}")
INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR})
# Assuming that all the protobuf libraries are of the same type.
IF(${PROTOBUF_LIBRARY} MATCHES "${CMAKE_STATIC_LIBRARY_SUFFIX}$")
SET(protobuf_LIBTYPE STATIC)
ELSEIF(${PROTOBUF_LIBRARY} MATCHES "${CMAKE_SHARED_LIBRARY_SUFFIX}$")
SET(protobuf_LIBTYPE SHARED)
ELSE()
MESSAGE(FATAL_ERROR "Unknown library type: ${PROTOBUF_LIBRARY}")
ENDIF()
ADD_LIBRARY(protobuf ${protobuf_LIBTYPE} IMPORTED GLOBAL)
SET_PROPERTY(TARGET protobuf PROPERTY IMPORTED_LOCATION ${PROTOBUF_LIBRARY})
ADD_LIBRARY(protobuf_lite ${protobuf_LIBTYPE} IMPORTED GLOBAL)
SET_PROPERTY(TARGET protobuf_lite PROPERTY IMPORTED_LOCATION ${PROTOBUF_LITE_LIBRARY})
ADD_LIBRARY(libprotoc ${protobuf_LIBTYPE} IMPORTED GLOBAL)
SET_PROPERTY(TARGET libprotoc PROPERTY IMPORTED_LOCATION ${PROTOC_LIBRARY})
ADD_EXECUTABLE(protoc IMPORTED GLOBAL)
SET_PROPERTY(TARGET protoc PROPERTY IMPORTED_LOCATION ${PROTOBUF_PROTOC_EXECUTABLE})
# FIND_Protobuf.cmake uses `Protobuf_PROTOC_EXECUTABLE`.
# make `protobuf_generate_cpp` happy.
SET(Protobuf_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
FOREACH(dep ${protobuf_DEPS})
ADD_DEPENDENCIES(protobuf ${dep})
ADD_DEPENDENCIES(protobuf_lite ${dep})
ADD_DEPENDENCIES(libprotoc ${dep})
ADD_DEPENDENCIES(protoc ${dep})
ENDFOREACH()
LIST(APPEND external_project_dependencies protobuf)
RETURN()
endmacro()
macro(SET_PROTOBUF_VERSION)
EXEC_PROGRAM(${PROTOBUF_PROTOC_EXECUTABLE} ARGS --version OUTPUT_VARIABLE PROTOBUF_VERSION)
STRING(REGEX MATCH "[0-9]+.[0-9]+" PROTOBUF_VERSION "${PROTOBUF_VERSION}")
endmacro()
set(PROTOBUF_ROOT "" CACHE PATH "Folder contains protobuf")
if (NOT "${PROTOBUF_ROOT}" STREQUAL "")
find_path(PROTOBUF_INCLUDE_DIR google/protobuf/message.h PATHS ${PROTOBUF_ROOT}/include)
find_library(PROTOBUF_LIBRARY protobuf PATHS ${PROTOBUF_ROOT}/lib)
find_library(PROTOBUF_LITE_LIBRARY protobuf-lite PATHS ${PROTOBUF_ROOT}/lib)
find_library(PROTOBUF_PROTOC_LIBRARY protoc PATHS ${PROTOBUF_ROOT}/lib)
find_program(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin)
if (PROTOBUF_INCLUDE_DIR AND PROTOBUF_LIBRARY AND PROTOBUF_LITE_LIBRARY AND PROTOBUF_PROTOC_LIBRARY AND PROTOBUF_PROTOC_EXECUTABLE)
message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.")
SET_PROTOBUF_VERSION()
PROMPT_PROTOBUF_LIB()
else()
message(WARNING "Cannot find protobuf library in ${PROTOBUF_ROOT}.")
endif()
endif()
FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST)
STRING(REPLACE "extern_" "" TARGET_DIR_NAME "${TARGET_NAME}")
SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/${TARGET_DIR_NAME})
SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/${TARGET_DIR_NAME})
SET(${TARGET_NAME}_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" PARENT_SCOPE)
SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" PARENT_SCOPE)
SET(${TARGET_NAME}_LITE_LIBRARY
"${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite${CMAKE_STATIC_LIBRARY_SUFFIX}"
PARENT_SCOPE)
SET(${TARGET_NAME}_LIBRARY
"${PROTOBUF_INSTALL_DIR}/lib/libprotobuf${CMAKE_STATIC_LIBRARY_SUFFIX}"
PARENT_SCOPE)
SET(${TARGET_NAME}_PROTOC_LIBRARY
"${PROTOBUF_INSTALL_DIR}/lib/libprotoc${CMAKE_STATIC_LIBRARY_SUFFIX}"
PARENT_SCOPE)
SET(${TARGET_NAME}_PROTOC_EXECUTABLE
"${PROTOBUF_INSTALL_DIR}/bin/protoc${CMAKE_EXECUTABLE_SUFFIX}"
PARENT_SCOPE)
SET(OPTIONAL_CACHE_ARGS "")
SET(OPTIONAL_ARGS "")
IF(BUILD_FOR_HOST)
SET(OPTIONAL_ARGS "-Dprotobuf_WITH_ZLIB=OFF")
ELSE()
SET(OPTIONAL_ARGS
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
"-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
"-Dprotobuf_WITH_ZLIB=ON"
"-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}"
${EXTERNAL_OPTIONAL_ARGS})
SET(OPTIONAL_CACHE_ARGS "-DZLIB_ROOT:STRING=${ZLIB_ROOT}")
ENDIF()
IF(MSVC)
ExternalProject_Add(
${TARGET_NAME}
${EXTERNAL_PROJECT_LOG_ARGS}
PREFIX ${PROTOBUF_SOURCES_DIR}
UPDATE_COMMAND ""
GIT_REPOSITORY "https://github.com/google/protobuf.git"
GIT_TAG "9f69353562fe1fbb3fbd11345ea3676b0eb267cd"
CONFIGURE_COMMAND
${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake
${OPTIONAL_ARGS}
-G ${CMAKE_GENERATOR}
-Dprotobuf_BUILD_SHARED_LIBS=ON
-DCMAKE_CONFIGURATION_TYPES=${CMAKE_BUILD_TYPE}
-Dprotobuf_BUILD_TESTS=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=lib
CMAKE_CACHE_ARGS
-DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR}
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
${OPTIONAL_CACHE_ARGS}
)
ELSE()
ExternalProject_Add(
${TARGET_NAME}
${EXTERNAL_PROJECT_LOG_ARGS}
PREFIX ${PROTOBUF_SOURCES_DIR}
UPDATE_COMMAND ""
DEPENDS zlib
URL https://github.com/google/protobuf/archive/v3.1.0.zip
CONFIGURE_COMMAND
${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake
${OPTIONAL_ARGS}
-Dprotobuf_BUILD_TESTS=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=lib
CMAKE_CACHE_ARGS
-DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR}
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
${OPTIONAL_CACHE_ARGS}
)
ENDIF(MSVC)
ENDFUNCTION()
SET(PROTOBUF_VERSION 3.1)
IF(CMAKE_CROSSCOMPILING)
build_protobuf(protobuf_host TRUE)
LIST(APPEND external_project_dependencies protobuf_host)
SET(PROTOBUF_PROTOC_EXECUTABLE ${protobuf_host_PROTOC_EXECUTABLE}
CACHE FILEPATH "protobuf executable." FORCE)
ENDIF()
IF(NOT PROTOBUF_FOUND)
build_protobuf(extern_protobuf FALSE)
SET(PROTOBUF_INCLUDE_DIR ${extern_protobuf_INCLUDE_DIR}
CACHE PATH "protobuf include directory." FORCE)
SET(PROTOBUF_LITE_LIBRARY ${extern_protobuf_LITE_LIBRARY}
CACHE FILEPATH "protobuf lite library." FORCE)
SET(PROTOBUF_LIBRARY ${extern_protobuf_LIBRARY}
CACHE FILEPATH "protobuf library." FORCE)
SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY}
CACHE FILEPATH "protoc library." FORCE)
IF(WITH_C_API)
INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf)
IF(ANDROID)
INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib)
ENDIF()
ENDIF()
IF(CMAKE_CROSSCOMPILING)
PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf)
ELSE()
SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE}
CACHE FILEPATH "protobuf executable." FORCE)
PROMPT_PROTOBUF_LIB(extern_protobuf)
ENDIF()
ENDIF(NOT PROTOBUF_FOUND)
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(ExternalProject)
set(PYBIND_SOURCE_DIR ${THIRD_PARTY_PATH}/pybind)
include_directories(${PYBIND_SOURCE_DIR}/src/extern_pybind/include)
ExternalProject_Add(
extern_pybind
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/pybind/pybind11.git"
GIT_TAG "v2.2.2"
PREFIX ${PYBIND_SOURCE_DIR}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
if(${CMAKE_VERSION} VERSION_LESS "3.3.0")
set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/pybind_dummy.c)
file(WRITE ${dummyfile} "const char * dummy_pybind = \"${dummyfile}\";")
add_library(pybind STATIC ${dummyfile})
else()
add_library(pybind INTERFACE)
endif()
add_dependencies(pybind extern_pybind)
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
SET(ZLIB_SOURCES_DIR ${THIRD_PARTY_PATH}/zlib)
SET(ZLIB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/zlib)
SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE)
SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE)
IF(WIN32)
SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/zlibstatic.lib" CACHE FILEPATH "zlib library." FORCE)
ELSE(WIN32)
SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR})
ExternalProject_Add(
zlib
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/madler/zlib.git"
GIT_TAG "v1.2.8"
PREFIX ${ZLIB_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR}
-DBUILD_SHARED_LIBS=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_MACOSX_RPATH=ON
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ZLIB_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
LIST(APPEND external_project_dependencies zlib)
IF(WITH_C_API)
INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib)
IF(ANDROID)
INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib)
ENDIF()
ENDIF()
# Find if a Python module is installed
# Found at http://www.cmake.org/pipermail/cmake/2011-January/041666.html
# To use do: find_python_module(PyQt4 REQUIRED)
function(find_python_module module)
string(TOUPPER ${module} module_upper)
if(NOT PY_${module_upper})
if(ARGC GREATER 1 AND ARGV1 STREQUAL "REQUIRED")
set(${module}_FIND_REQUIRED TRUE)
else()
set(${module}_FIND_REQUIRED FALSE)
endif()
# A module's location is usually a directory, but for binary modules
# it's a .so file.
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import re, ${module}; print(re.compile('/__init__.py.*').sub('',${module}.__file__))"
RESULT_VARIABLE _${module}_status
OUTPUT_VARIABLE _${module}_location
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT _${module}_status)
set(PY_${module_upper} ${_${module}_location} CACHE STRING
"Location of Python module ${module}")
endif(NOT _${module}_status)
endif(NOT PY_${module_upper})
find_package_handle_standard_args(PY_${module} DEFAULT_MSG PY_${module_upper})
if(NOT PY_${module_upper}_FOUND AND ${module}_FIND_REQUIRED)
message(FATAL_ERROR "python module ${module} is not found")
endif()
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import sys, ${module}; sys.stdout.write(${module}.__version__)"
OUTPUT_VARIABLE _${module}_version
RESULT_VARIABLE _${module}_status
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT _${module}_status)
set(PY_${module_upper}_VERSION ${_${module}_version} CACHE STRING
"Version of Python module ${module}")
endif(NOT _${module}_status)
set(PY_${module_upper}_FOUND ${PY_${module_upper}_FOUND} PARENT_SCOPE)
set(PY_${module_upper}_VERSION ${PY_${module_upper}_VERSION} PARENT_SCOPE)
endfunction(find_python_module)
# VisualDL demos
VisualDL supports Python and C++ based DL frameworks,
there are several demos for different platforms.
## PaddlePaddle
Locates in `./paddle`.
This is a visualization for `vgg` on `cifar10` dataset, we visualize the CONV parameters,
and there are some interesting patterns.
## PyTorch GAN
Locates in `./pytorch-CycleGAN-and-pix2pix`.
This submodule is forked from [pytorch-CycleGAN-and-pix2pix](
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix),
great model and the generated fake images are really funny.
This demo only works with CycleGAN mode, read [CycleGAN train doc](https://github.com/Superjomn/pytorch-CycleGAN-and-pix2pix#cyclegan-traintest) and [changes to the original code](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/compare/master...Superjomn:master) for more information.
## MxNet MNIST
Locates in `./mxnet_demo`.
By adding VisualDL as callbacks to `model.fit`,
we can use the Python SDK in MxNet,
but it seems that only the outside program can only retrieve parameters in epoch callbacks,
that limits the number of steps for visualization.
## PyTorch CNN
Locates in `./pytorch`.
This shows how to use VisualDL in PyTorch for a CNN on `cifar10` dataset. We visualize the loss in Scalar,
two convolutional layers in Image, the change trend of conv1 weights in Histogram and the final model graph
in Graph.
## Caffe2 MNIST
Locates in `./caffe2`.
This shows how to use VisualDL in Caffe2 for LeNet model on `mnist` dataset. We visualize the loss and accuracy in Scalar,
CONV parameters in Histograms.
# How to use VisualDL in Caffe2
Here we will show you how to use VisualDL with Caffe2 so that you can visualize the training process by using Caffe2.
We will use the Caffe2 Convolution Neural Network to train the handwritten digit [MNIST](http://yann.lecun.com/exdb/mnist/) dataset as an example.
This example is the simplification from Caffe2 MNIST tutorial
[Example](https://github.com/caffe2/tutorials/blob/master/MNIST.ipynb)in addition with VisualDL log writer.
The full demonstration code can be downloaded in [here](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/caffe2/caffe2_mnist_demo.py).
Make sure you have a working caffe2 environment before trying following code. Notice VisualDL requires protobuf 3.5+ in order to run.
First we initialize Loggers for different types of record as follows:
```python
from visualdl import LogWriter
# create VisualDL logger and directory
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# create 'train' run
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_caffe2_mnist_train_loss = logger.scalar("scalars/scalar_caffe2_mnist_train_loss")
scalar_caffe2_mnist_train_accuracy = logger.scalar("scalars/scalar_caffe2_mnist_train_accuracy")
histogram0 = logger.histogram("histogram/histogram0", num_buckets=50)
histogram1 = logger.histogram("histogram/histogram1", num_buckets=50)
```
For our model, we will be constructing the LeNet model with the sigmoid activations replaced with ReLUs.
Following is how Caffe2 define input, operators and model definition.
```python
def AddInput(model, batch_size, db, db_type):
data_uint8, label = model.TensorProtosDBInput(
[], ["data_uint8", "label"], batch_size=batch_size,
db=db, db_type=db_type)
# cast the data to float
data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
# scale data from [0,255] down to [0,1]
data = model.Scale(data, data, scale=float(1. / 256))
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
def AddLeNetModel(model, data):
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
# Here, the data is flattened from a tensor of dimension 50x4x4 to a vector of length 50*4*4
fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
relu3 = brew.relu(model, fc3, 'relu3')
# Last FC Layer
pred = brew.fc(model, relu3, 'pred', dim_in=500, dim_out=10)
# Softmax Layer
softmax = brew.softmax(model, pred, 'softmax')
return softmax
def AddAccuracy(model, softmax, label):
"""Adds an accuracy op to the model"""
accuracy = brew.accuracy(model, [softmax, label], "accuracy")
return accuracy
def AddTrainingOperators(model, softmax, label):
"""Adds training operators to the model."""
# Compute cross entropy between softmax scores and labels
xent = model.LabelCrossEntropy([softmax, label], 'xent')
# Compute the expected loss
loss = model.AveragedLoss(xent, "loss")
# Track the accuracy of the model
AddAccuracy(model, softmax, label)
# Use the average loss we just computed to add gradient operators to the model
model.AddGradientOperators([loss])
# Specify the optimization algorithm
optimizer.build_sgd(
model,
base_learning_rate=0.1,
policy="step",
stepsize=1,
gamma=0.999,
)
```
Use caffe2 model helper to construct model with definitions above. Prepare for training.
```python
arg_scope = {"order": "NCHW"}
# Create the model helper for the train model
train_model = model_helper.ModelHelper(name="mnist_train", arg_scope=arg_scope)
# Specify the input is from the train lmdb
data, label = AddInput(
train_model, batch_size=64,
db=os.path.join(data_folder, 'mnist-train-nchw-lmdb'),
db_type='lmdb')
# Add the model definition (fc layers, conv layers, softmax, etc.)
softmax = AddLeNetModel(train_model, data)
# Add training operators, specify loss function and optimization algorithm
AddTrainingOperators(train_model, softmax, label)
```
Then we start training and use VisualDL to record data for scalar and histogram at the same time.
Here we record for accuracy, loss as scalars and weights as histogram.
```python
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net, overwrite=True)
total_iters = 200
accuracy = np.zeros(total_iters)
loss = np.zeros(total_iters)
# MAIN TRAINING LOOP!
# Now, we will manually run the network for 200 iterations.
for i in range(total_iters):
workspace.RunNet(train_model.net)
accuracy[i] = workspace.blobs['accuracy']
loss[i] = workspace.blobs['loss']
scalar_caffe2_mnist_train_loss.add_record(i, loss[i])
scalar_caffe2_mnist_train_accuracy.add_record(i, accuracy[i])
conv1_w = workspace.FetchBlob("conv1_w")
conv2_w = workspace.FetchBlob("conv2_w")
histogram0.add_record(i, conv1_w[0].flatten())
histogram1.add_record(i, conv2_w[0].flatten())
# Check the accuracy and loss every so often
if i % 25 == 0:
print("Iter: {}, Loss: {}, Accuracy: {}".format(i, loss[i], accuracy[i]))
```
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import shutil
from caffe2.python import (
brew,
core,
model_helper,
optimizer,
workspace, )
# Here we import LogWriter so that we can write log data while MXNet is training
from visualdl import LogWriter
# If you would like to see some really detailed initializations,
# you can change --caffe2_log_level=0 to --caffe2_log_level=-1
core.GlobalInit(['caffe2', '--caffe2_log_level=0'])
print("Necessities imported!")
# This section preps your image and test set in a lmdb database
def DownloadResource(url, path):
'''Downloads resources from s3 by url and unzips them to the provided path'''
import requests
import zipfile
import StringIO
print("Downloading... {} to {}".format(url, path))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(path)
print("Completed download and extraction.")
# Setup the paths for the necessary directories
current_folder = os.path.join(os.path.expanduser('~'), 'caffe2_notebooks')
data_folder = os.path.join(current_folder, 'tutorial_data', 'mnist')
root_folder = os.path.join(current_folder, 'tutorial_files', 'tutorial_mnist')
db_missing = False
# Check if the data folder already exists
if not os.path.exists(data_folder):
os.makedirs(data_folder)
print("Your data folder was not found!! This was generated: {}".format(
data_folder))
# Check if the training lmdb exists in the data folder
if os.path.exists(os.path.join(data_folder, "mnist-train-nchw-lmdb")):
print("lmdb train db found!")
else:
db_missing = True
# Attempt the download of the db if either was missing
if db_missing:
print("one or both of the MNIST lmbd dbs not found!!")
db_url = "http://download.caffe2.ai/databases/mnist-lmdb.zip"
try:
DownloadResource(db_url, data_folder)
except Exception as ex:
print(
"Failed to download dataset. Please download it manually from {}".
format(db_url))
print("Unzip it and place the two database folders here: {}".format(
data_folder))
raise ex
# Clean up statistics from any old runs
if os.path.exists(root_folder):
print(
"Looks like you ran this before, so we need to cleanup those old files..."
)
shutil.rmtree(root_folder)
os.makedirs(root_folder)
workspace.ResetWorkspace(root_folder)
print("training data folder:" + data_folder)
print("workspace root folder:" + root_folder)
def AddInput(model, batch_size, db, db_type):
data_uint8, label = model.TensorProtosDBInput(
[], ["data_uint8", "label"],
batch_size=batch_size,
db=db,
db_type=db_type)
# cast the data to float
data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
# scale data from [0,255] down to [0,1]
data = model.Scale(data, data, scale=float(1. / 256))
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
def AddLeNetModel(model, data):
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
# Here, the data is flattened from a tensor of dimension 50x4x4 to a vector of length 50*4*4
fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
relu3 = brew.relu(model, fc3, 'relu3')
# Last FC Layer
pred = brew.fc(model, relu3, 'pred', dim_in=500, dim_out=10)
# Softmax Layer
softmax = brew.softmax(model, pred, 'softmax')
return softmax
def AddAccuracy(model, softmax, label):
"""Adds an accuracy op to the model"""
accuracy = brew.accuracy(model, [softmax, label], "accuracy")
return accuracy
def AddTrainingOperators(model, softmax, label):
"""Adds training operators to the model."""
# Compute cross entropy between softmax scores and labels
xent = model.LabelCrossEntropy([softmax, label], 'xent')
# Compute the expected loss
loss = model.AveragedLoss(xent, "loss")
# Track the accuracy of the model
AddAccuracy(model, softmax, label)
# Use the average loss we just computed to add gradient operators to the model
model.AddGradientOperators([loss])
# Specify the optimization algorithm
optimizer.build_sgd(
model,
base_learning_rate=0.1,
policy="step",
stepsize=1,
gamma=0.999, )
# create VisualDL logger
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_caffe2_mnist_train_loss = logger.scalar(
"scalars/scalar_caffe2_mnist_train_loss")
scalar_caffe2_mnist_train_accuracy = logger.scalar(
"scalars/scalar_caffe2_mnist_train_accuracy")
histogram0 = logger.histogram("histogram/histogram0", num_buckets=50)
histogram1 = logger.histogram("histogram/histogram1", num_buckets=50)
# Specify the data will be input in NCHW order
# (i.e. [batch_size, num_channels, height, width])
arg_scope = {"order": "NCHW"}
# Create the model helper for the train model
train_model = model_helper.ModelHelper(name="mnist_train", arg_scope=arg_scope)
# Specify the input is from the train lmdb
data, label = AddInput(
train_model,
batch_size=64,
db=os.path.join(data_folder, 'mnist-train-nchw-lmdb'),
db_type='lmdb')
# Add the model definition (fc layers, conv layers, softmax, etc.)
softmax = AddLeNetModel(train_model, data)
# Add training operators, specify loss function and optimization algorithm
AddTrainingOperators(train_model, softmax, label)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net, overwrite=True)
total_iters = 200
accuracy = np.zeros(total_iters)
loss = np.zeros(total_iters)
# MAIN TRAINING LOOP!
# Now, we will manually run the network for 200 iterations.
for i in range(total_iters):
workspace.RunNet(train_model.net)
accuracy[i] = workspace.blobs['accuracy']
loss[i] = workspace.blobs['loss']
scalar_caffe2_mnist_train_loss.add_record(i, loss[i])
scalar_caffe2_mnist_train_accuracy.add_record(i, accuracy[i])
conv1_w = workspace.FetchBlob("conv1_w")
conv2_w = workspace.FetchBlob("conv2_w")
histogram0.add_record(i, conv1_w[0].flatten())
histogram1.add_record(i, conv2_w[0].flatten())
# Check the accuracy and loss every so often
if i % 25 == 0:
print(
"Iter: {}, Loss: {}, Accuracy: {}".format(i, loss[i], accuracy[i]))
此差异已折叠。
此差异已折叠。
# coding=utf-8
import numpy as np
import wave
from visualdl import LogWriter
def read_audio_data(audio_path):
"""
读取音频数据
"""
CHUNK = 4096
f = wave.open(audio_path, "rb")
wavdata = []
chunk = f.readframes(CHUNK)
while chunk:
data = np.fromstring(chunk, dtype='uint8')
wavdata.extend(data)
chunk = f.readframes(CHUNK)
# 8k sample rate, 16bit frame, 1 channel
shape = [8000, 2, 1]
return shape, wavdata
# 创建一个 LogWriter 对象
log_writter = LogWriter("./log", sync_cycle=10)
# 创建audio组件,模式为train
ns = 2
with log_writter.mode("train") as logger:
input_audio = logger.audio(tag="test", num_samples=ns)
# 一般要设定一个变量audio_sample_num,用来记录当前已采样了几段audio数据
audio_sample_num = 0
for step in range(9):
# 设置start_sampling() 的条件,满足条件时,开始采样
if audio_sample_num == 0:
input_audio.start_sampling()
# 获取 idx
idx = input_audio.is_sample_taken()
# 如果 idx != -1,采样,否则跳过
if idx != -1:
# 读取数据,音频文件的格式可以为.wav .mp3等
audio_path = "test.wav"
audio_shape, audio_data = read_audio_data(audio_path)
# 使用 set_sample()函数添加数据
input_audio.set_sample(idx, audio_shape, audio_data)
audio_sample_num += 1
# 如果完成了当前轮的采样,则调用finish_sample()
if audio_sample_num % ns == 0:
input_audio.finish_sampling()
audio_sample_num = 0
# coding=utf-8
import numpy as np
from visualdl import LogWriter
# 创建一个LogWriter对象
log_writer = LogWriter("./log", sync_cycle=10)
# 创建一个 high dimensional 组件,模式设为 train
with log_writer.mode("train") as logger:
train_embedding = logger.embedding()
# 第一个参数为数据,数据类型为 List[List(float)]
hot_vectors = np.random.uniform(1, 2, size=(10, 3))
# 第二个参数为字典,数据类型为 Dict[str, int]
# 其中第一个分量为坐标点的名称, 第二个分量为该坐标对应原数据的第几行分量
word_dict = {
"label_1": 5,
"label_2": 4,
"label_3": 3,
"label_4": 2,
"label_5": 1,
}
# 使用 add_embeddings_with_word_dict(data, Dict)
train_embedding.add_embeddings_with_word_dict(hot_vectors, word_dict)
# coding=utf-8
import paddle.fluid as fluid
# 定义神经网络结构
def lenet_5(img):
conv1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv1_bn = fluid.layers.batch_norm(input=conv1)
conv2 = fluid.nets.simple_img_conv_pool(
input=conv1_bn,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
predition = fluid.layers.fc(input=conv2, size=10, act="softmax")
return predition
# 变量赋值
image = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32")
predition = lenet_5(image)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
# 将结果保存到./paddle_lenet_5_model
fluid.io.save_inference_model(
"./paddle_lenet_5_model",
feeded_var_names=[image.name],
target_vars=[predition],
executor=exe)
# coding=utf-8
import numpy as np
from visualdl import LogWriter
# 创建LogWriter对象
log_writer = LogWriter('./log', sync_cycle=10)
# 创建histogram组件,模式为train
with log_writer.mode("train") as logger:
param1_histogram = logger.histogram("param1", num_buckets=100)
# 设定步数为 1 - 100
for step in range(1, 101):
# 添加的数据为随机分布,所在区间值变小
interval_start = 1 + 2 * step / 100.0
interval_end = 6 - 2 * step / 100.0
data = np.random.uniform(interval_start, interval_end, size=(10000))
# 使用add_record()函数添加数据
param1_histogram.add_record(step, data)
# coding=utf-8
import numpy as np
from visualdl import LogWriter
from PIL import Image
def random_crop(img):
'''
此函数用于获取图片image的 100*100 的随机分块
'''
img = Image.open(img)
w, h = img.size
random_w = np.random.randint(0, w - 100)
random_h = np.random.randint(0, h - 100)
return img.crop((random_w, random_h, random_w + 100, random_h + 100))
# 创建LogWriter对象
log_writer = LogWriter("./log", sync_cycle=10)
# 创建 image 组件,模式为train, 采样数设为 ns
ns = 2
with log_writer.mode("train") as logger:
input_image = logger.image(tag="test", num_samples=ns)
# 一般要设置一个变量sample_num,用于记录当前已采样了几个image数据
sample_num = 0
for step in range(6):
# 设置start_sampling() 的条件,满足条件时,开始采样
if sample_num == 0:
input_image.start_sampling()
# 获取idx
idx = input_image.is_sample_taken()
# 如果 idx != -1,采样,否则跳过
if idx != -1:
# 获取图片数据
image_path = "test.jpg"
image_data = np.array(random_crop(image_path))
# 使用 set_sample() 函数添加数据
# flatten() 用于把 ndarray 由矩阵变为行向量
input_image.set_sample(idx, image_data.shape, image_data.flatten())
sample_num += 1
# 如果完成了当前轮的采样,则调用finish_sample()
if sample_num % ns == 0:
input_image.finish_sampling()
sample_num = 0
# coding=utf-8
from visualdl import LogWriter
# 创建LogWriter对象
log_writer = LogWriter("./log", sync_cycle=20)
# 创建 scalar 组件,模式为train,
with log_writer.mode("train") as logger:
train_acc = logger.scalar("acc")
train_loss = logger.scalar("loss")
# 创建 scalar 组件,模式设为 test, tag 设为 acc
with log_writer.mode("test") as logger:
test_acc = logger.scalar("acc")
value = [i / 1000.0 for i in range(1000)]
for step in range(1000):
# 向名称为 acc 的图中添加模式为train的数据
train_acc.add_record(step, value[step])
# 向名称为 loss 的图中添加模式为train的数据
train_loss.add_record(step, 1 / (value[step] + 1))
# 向名称为 acc 的图中添加模式为test的数据
test_acc.add_record(step, 1 - value[step])
# coding=utf-8
from visualdl import LogWriter
# 创建 LogWriter 对象
log_writter = LogWriter("./log", sync_cycle=10)
# 创建 text 组件,模式为 train, 标签为 test
with log_writter.mode("train") as logger:
vdl_text_comp = logger.text(tag="test")
# 使用 add_record() 函数添加数据
for i in range(1, 6):
vdl_text_comp.add_record(i, "这是第 %d 个 Step 的数据。" % i)
vdl_text_comp.add_record(i, "This is data %d ." % i)
示例
=====
VisualDL 支持 Python 和 C++ 基于 DL 框架,
下面是不同平台的一些示例。
.. toctree::
:maxdepth: 1
paddle/TUTORIAL_CN.md
keras/TUTORIAL_CN.md
mxnet/TUTORIAL_CN.md
pytorch/TUTORIAL_CN.md
\ No newline at end of file
Examples
========
VisualDL supports Python and C++ based DL frameworks,
here are some examples for different platforms.
.. toctree::
:maxdepth: 1
paddle/TUTORIAL_EN.md
keras/TUTORIAL_EN.md
mxnet/TUTORIAL_EN.md
pytorch/TUTORIAL_EN.md
caffe2/TUTORIAL_EN.md
\ No newline at end of file
# 如何在Keras中使用VisualDL
下面我们演示一下如何在Keras中使用VisualDL,从而可以把Keras的训练过程可视化出来。我们将以Keras用卷积神经网络(CNN, Convolutional Neural Network)来训练
[MNIST](http://yann.lecun.com/exdb/mnist/) 数据集作为例子。
程序的主体来自Keras的官方GitHub [Example](https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py)
我们只需要在代码里面创建 VisualDL 的数据采集 loggers
```python
# create VisualDL logger
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_keras_train_loss = logger.scalar(
"scalars/scalar_keras_train_loss")
image_input = logger.image("images/input", 1)
image0 = logger.image("images/image0", 1)
image1 = logger.image("images/image1", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=50)
histogram1 = logger.histogram("histogram/histogram1", num_buckets=50)
```
然后在Keras提供的回调函数(callback)中插入我们的数据采集代码就可以了。
```python
train_step = 0
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
global train_step
# Scalar
scalar_keras_train_loss.add_record(train_step, logs.get('loss'))
# get weights for 2 layers
W0 = model.layers[0].get_weights()[0] # 3 x 3 x 1 x 32
W1 = model.layers[1].get_weights()[0] # 3 x 3 x 32 x 64
weight_array0 = W0.flatten()
weight_array1 = W1.flatten()
# histogram
histogram0.add_record(train_step, weight_array0)
histogram1.add_record(train_step, weight_array1)
# image
image_input.start_sampling()
image_input.add_sample([28, 28], x_train[0].flatten())
image_input.finish_sampling()
image0.start_sampling()
image0.add_sample([9, 32], weight_array0)
image0.finish_sampling()
image1.start_sampling()
image1.add_sample([288, 64], weight_array1)
image1.finish_sampling()
train_step += 1
self.losses.append(logs.get('loss'))
```
训练结束后,各个组件的可视化结果如下:
关于误差的数值图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/keras_demo_figs/keras_scalar.png?raw=true" />
</p>
输入图片以及训练过后的第一,第二层卷积权重图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/keras_demo_figs/keras_image.png?raw=true" />
</p>
训练参数的柱状图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/keras_demo_figs/keras_histogram.png?raw=true" />
</p>
完整的演示程序可以在[这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/keras/keras_mnist_demo.py)下载。
# How to use VisualDL in Keras
Here we will show you how to use VisualDL with Keras so that you can visualize the training process of Keras.
We will use the Keras Convolution Neural Network to train the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset as an example.
The training program comes from the official GitHub [Example](https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py) of Keras.
We just need to create the VisualDL data collection loggers in the code
```python
# create VisualDL logger
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_keras_train_loss = logger.scalar(
"scalars/scalar_keras_train_loss")
image_input = logger.image("images/input", 1)
image0 = logger.image("images/image0", 1)
image1 = logger.image("images/image1", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=50)
histogram1 = logger.histogram("histogram/histogram1", num_buckets=50)
```
Then we can insert our data loggers in the callback function provided by Keras.
```python
train_step = 0
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
global train_step
# Scalar
scalar_keras_train_loss.add_record(train_step, logs.get('loss'))
# get weights for 2 layers
W0 = model.layers[0].get_weights()[0] # 3 x 3 x 1 x 32
W1 = model.layers[1].get_weights()[0] # 3 x 3 x 32 x 64
weight_array0 = W0.flatten()
weight_array1 = W1.flatten()
# histogram
histogram0.add_record(train_step, weight_array0)
histogram1.add_record(train_step, weight_array1)
# image
image_input.start_sampling()
image_input.add_sample([28, 28], x_train[0].flatten())
image_input.finish_sampling()
image0.start_sampling()
image0.add_sample([9, 32], weight_array0)
image0.finish_sampling()
image1.start_sampling()
image1.add_sample([288, 64], weight_array1)
image1.finish_sampling()
train_step += 1
self.losses.append(logs.get('loss'))
```
After the training, the visual results of each component are as follows:
The scalar diagram of the error is as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/keras_demo_figs/keras_scalar.png?raw=true" />
</p>
The input picture and the first, second layer convolution weight after the training are as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/keras_demo_figs/keras_image.png?raw=true" />
</p>
The histograms of the training parameters is as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/keras_demo_figs/keras_histogram.png?raw=true" />
</p>
The full demonstration code can be downloaded in [here](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/keras/keras_mnist_demo.py).
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from visualdl import LogWriter
batch_size = 2000
num_classes = 10
epochs = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# create VisualDL logger
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_keras_train_loss = logger.scalar(
"scalars/scalar_keras_mnist_train_loss")
image_input = logger.image("images/input", 1)
image0 = logger.image("images/image0", 1)
image1 = logger.image("images/image1", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=50)
histogram1 = logger.histogram("histogram/histogram1", num_buckets=50)
train_step = 0
class LossHistory(keras.callbacks.Callback):
def on_batch_end(self, batch, logs={}):
global train_step
# Scalar
scalar_keras_train_loss.add_record(train_step, logs.get('loss'))
# get weights for 2 layers
W0 = model.layers[0].get_weights()[0] # 3 x 3 x 1 x 32
W1 = model.layers[1].get_weights()[0] # 3 x 3 x 32 x 64
weight_array0 = W0.flatten()
weight_array1 = W1.flatten()
# histogram
histogram0.add_record(train_step, weight_array0)
histogram1.add_record(train_step, weight_array1)
# image
image_input.start_sampling()
image_input.add_sample([28, 28], x_train[0].flatten())
image_input.finish_sampling()
image0.start_sampling()
image0.add_sample([9, 32], weight_array0)
image0.finish_sampling()
image1.start_sampling()
image1.add_sample([288, 64], weight_array1)
image1.finish_sampling()
train_step += 1
history = LossHistory()
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# 在MXNet中使用VisualDL
下面我们演示一下如何在MXNet中使用VisualDL,从而可以把MXNet的训练过程以及最后的模型可视化出来。我们将以MXNet用卷积神经网络(CNN, Convolutional Neural Network)来训练[MNIST](http://yann.lecun.com/exdb/mnist/)数据集作为例子。
- [安装MXNet](#%E5%AE%89%E8%A3%85mxnet)
- [安装VisualDL](#%E5%AE%89%E8%A3%85visualdl)
- [开始编写训练MNIST的程序](#%E5%BC%80%E5%A7%8B%E7%BC%96%E5%86%99%E8%AE%AD%E7%BB%83mnist%E7%9A%84%E7%A8%8B%E5%BA%8F)
- [用VisualDL展示模型图](#%E7%94%A8visualdl%E5%B1%95%E7%A4%BA%E6%A8%A1%E5%9E%8B%E5%9B%BE)
## 安装MXNet
请按照MXNet的[官方网站](https://mxnet.incubator.apache.org/install/index.html)来安装MXNet,并验证安装成功。
>>> import mxnet as mx
>>> a = mx.nd.ones((2, 3))
>>> b = a * 2 + 1
>>> b.asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
## 安装VisualDL
VisualDL的安装很简单。请按照VisualDL的[官方网站](https://github.com/PaddlePaddle/VisualDL)进行安装。具体只需要两步
```
python setup.py bdist_wheel
pip install --upgrade dist/visualdl-*.whl
```
## 开始编写训练MNIST的程序
我们为您提供了一个演示程序 [mxnet_demo.py](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/mxnet_demo.py)。里面展示了如何下载MNIST数据集以及编写MXNet程序来进行CNN的训练。MXNet的部分借鉴了MXNet[官方入门文件](https://mxnet.incubator.apache.org/tutorials/python/mnist.html)
为了嵌入VisualDL程序,以便在MXNet训练时进行检测,我们需要声明一个LogWriter实例:
```python
logger = LogWriter(logdir, sync_cycle=30)
```
logger实例里面包含VisualDL的四个功能模块 Scalar, Image 以及 Histogram。这里我们使用 Scalar 模块:
```python
scalar0 = logger.scalar("scalars/scalar0")
```
模块的命名可以有 '/', 以便对于复杂模型创建不同的命名空间。
MXNet在fit函数中提供了很多[API](https://mxnet.incubator.apache.org/api/python/index.html)。我们把自己编写好的回调函数 add_scalar 插入到相应的 API中
```python
lenet_model.fit(train_iter,
eval_data=val_iter,
optimizer='sgd',
optimizer_params={'learning_rate':0.1},
eval_metric='acc',
# 在此嵌入我们自定义的回调函数
batch_end_callback=[add_scalar()],
num_epoch=2)
```
这样就好了。在MXNet的训练过程中,每一个批次(batch)训练完后,都会调用我们的回调函数来对准确率进行记录。如您所料,随着训练的进行,准确率会不断上升直到95%以上。以下是两个epoch训练过后的准确率走向:
<p align=center><img width="50%" src="https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/epoch2_small.png?raw=true" /></p>
## 用VisualDL展示模型图
VisualDL的一个优点是能可视化深度学习模型,帮助用户更直观的了解模型的构成,都有哪些操作,哪些输入等等。VisualDL的模型图支持原生态的PaddlePaddle格式以及普遍适用的ONNX格式。在这里用户可以使用MXNet训练模型,然后用 [ONNX-MXNet](https://github.com/onnx/onnx-mxnet) 工具将其转换成 ONNX 格式,然后进行可视化。
我们这里使用已经从MXNet转换到ONNX的现成模型 [Super_Resolution model](https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx)
VisualDL的使用很简单,在完成安装后只需要把模型文件(protobuf格式)用参数 -m 提供给VisualDL即可。
```bash
visualdl --logdir=/workspace -m /workspace/super_resolution_mnist.onnx --port=8888
```
模型图的效果如下:
<p align=center><img width="70%" src="https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/mxnet_graph.gif?raw=true" /></p>
生成的完整效果图可以在[这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/super_resolution_graph.png)下载。
# How to use VisualDL in MXNet
Here we will show you how to use VisualDL in MXNet so that you can visualize the training process of MXNet.
We will use the MXNet Convolution Neural Network to train the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset as an example.
## Install MXNet
Please install MXNet according to MXNet's [official website](https://mxnet.incubator.apache.org/install/index.html)
and verify that the installation is successful.
>>> import mxnet as mx
>>> a = mx.nd.ones((2, 3))
>>> b = a * 2 + 1
>>> b.asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
## Install VisualDL
The installation of VisualDL is very simple. Please install it according to the [official website](https://github.com/PaddlePaddle/VisualDL) of VisualDL.
Only two steps are required.
```
python setup.py bdist_wheel
pip install --upgrade dist/visualdl-*.whl
```
## Start writing the program for training MNIST
We have provided you with a demonstration program [mxnet_demo.py](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/mxnet_demo.py).
It shows how to download MNIST data sets and write a MXNet programs for CNN training.
The training program is based on the [MXNet tutorial](https://mxnet.incubator.apache.org/tutorials/python/mnist.html)
We need to create a VisualDL LogWriter instance to record MXNet training:
```python
logger = LogWriter(logdir, sync_cycle=30)
```
The logger instance contains three modules, Scalar, Image, and Histogram. Here we use the Scalar module:
```python
scalar0 = logger.scalar("scalars/scalar0")
```
The Tag can contain '/' in order to create a different namespace for complex model.
MXNet provides a lot of [API](https://mxnet.incubator.apache.org/api/python/index.html) in the fit function.
We insert our callback function `add_scalar` into the corresponding API
```python
lenet_model.fit(train_iter,
eval_data=val_iter,
optimizer='sgd',
optimizer_params={'learning_rate':0.1},
eval_metric='acc',
# Here we embed our custom callback function
batch_end_callback=[add_scalar()],
num_epoch=2)
```
That's all. In the training process of MXNet, our callback function is called to record the accuracy at the end of each training batch.
The rate of accuracy will continue to rise until more than 95%.
The following is the accuracy of the two epochs:
<p align=center><img width="50%" src="https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/epoch2_small.png?raw=true" /></p>
## Display the model graph with VisualDL
VisualDL helps users understand the composition of the model more intuitively by visualizing deep learning models.
VisualDL can visualize ONNX format models, which is widely supported.
Users may use MXNet to train the model, then convert it into ONNX format with [ONNX-MXNet](https://github.com/onnx/onnx-mxnet) tool, and then visualize it.
Here we use the existing model that has been transformed from MXNet to ONNX, [Super_Resolution model](https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx).
To display the model graph via VisualDL, pass the model file path with the parameter -m to the VisualDL
```bash
visualdl --logdir=/workspace -m /workspace/super_resolution_mnist.onnx --port=8888
```
The model graph is as follows:
<p align=center><img width="70%" src="https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/mxnet_graph.gif?raw=true" /></p>
You can download the full size image [here](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/mxnet/super_resolution_graph.png).
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import logging
import mxnet as mx
# Here we import LogWriter so that we can write log data while MXNet is training
from visualdl import LogWriter
# Download MNIST data
mnist = mx.test_utils.get_mnist()
batch_size = 100
# Provide a folder to store data for log, model, image, etc. VisualDL's visualization will be
# based on this folder.
logdir = "./tmp"
# Initialize a logger instance. Parameter 'sync_cycle' means write a log every 10 operations on
# memory.
logger = LogWriter(logdir, sync_cycle=10)
# mark the components with 'train' label.
with logger.mode("train"):
# scalar0 is used to record scalar metrics while MXNet is training. We will record accuracy.
# In the visualization, we can see the accuracy is increasing as more training steps happen.
scalar0 = logger.scalar("scalars/scalar0")
image0 = logger.image("images/image0", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=100)
# Record training steps
cnt_step = 0
# MXNet provides many callback interface. Here we define our own callback method and it is called
# after every batch.
# https://mxnet.incubator.apache.org/api/python/callback/callback.html
def add_scalar():
def _callback(param):
with logger.mode("train"):
global cnt_step
# Here the value is the accuracy we want to record
# https://mxnet.incubator.apache.org/_modules/mxnet/callback.html
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
scalar0.add_record(cnt_step, value)
cnt_step += 1
return _callback
def add_image_histogram():
def _callback(iter_no, sym, arg, aux):
image0.start_sampling()
weight = arg['fullyconnected1_weight'].asnumpy()
shape = [100, 50]
data = weight.flatten()
image0.add_sample(shape, list(data))
histogram0.add_record(iter_no, list(data))
image0.finish_sampling()
return _callback
# Start to build CNN in MXNet, train MNIST dataset. For more info, check MXNet's official website:
# https://mxnet.incubator.apache.org/tutorials/python/mnist.html
logging.getLogger().setLevel(logging.DEBUG) # logging to stdout
train_iter = mx.io.NDArrayIter(
mnist['train_data'], mnist['train_label'], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'],
batch_size)
data = mx.sym.var('data')
# first conv layer
conv1 = mx.sym.Convolution(data=data, kernel=(5, 5), num_filter=20)
tanh1 = mx.sym.Activation(data=conv1, act_type="tanh")
pool1 = mx.sym.Pooling(
data=tanh1, pool_type="max", kernel=(2, 2), stride=(2, 2))
# second conv layer
conv2 = mx.sym.Convolution(data=pool1, kernel=(5, 5), num_filter=50)
tanh2 = mx.sym.Activation(data=conv2, act_type="tanh")
pool2 = mx.sym.Pooling(
data=tanh2, pool_type="max", kernel=(2, 2), stride=(2, 2))
# first fullc layer
flatten = mx.sym.flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.sym.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=10)
# softmax loss
lenet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')
# create a trainable module on CPU
lenet_model = mx.mod.Module(symbol=lenet, context=mx.cpu())
# train with the same
lenet_model.fit(
train_iter,
eval_data=val_iter,
optimizer='sgd',
optimizer_params={'learning_rate': 0.1},
eval_metric='acc',
# integrate our customized callback method
batch_end_callback=[add_scalar()],
epoch_end_callback=[add_image_histogram()],
num_epoch=5)
test_iter = mx.io.NDArrayIter(mnist['test_data'], None, batch_size)
prob = lenet_model.predict(test_iter)
test_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'],
batch_size)
# predict accuracy for lenet
acc = mx.metric.Accuracy()
lenet_model.score(test_iter, acc)
print(acc)
# 如何在PaddlePaddle中使用VisualDL
下面我们演示一下如何在PaddlePaddle中使用VisualDL,从而可以把PaddlePaddle的训练过程可视化出来。我们将以PaddlePaddle用卷积神经网络(CNN, Convolutional Neural Network)来训练
[Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) 数据集作为例子。
以下范例是在官方Paddle Book [Example](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification)
的基础上用PaddlePaddle's Fluid API修改。
完整的演示程序可以在[这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/paddle/paddle_cifar10.py)下载。
这程序是在Paddle v2 0.11版本上开发。可以用```pip install paddlepaddle``````docker pull paddlepaddle/paddle:0.11.0```来安装。注意Paddle还没支持Python3和protobuf需要3.5+。
如果出现`TypeError: __init__() got an unexpected keyword argument 'file'`, 是因为protobuf不是3.5以上,运行`pip install --upgrade protobuf`就能解决。
安装详细流程请看[这里](http://paddlepaddle.org/docs/0.11.0/documentation/cn/getstarted/build_and_install/index_en.html)
首先我们创建Loggers来记录不同种类的数据:
```python
# create VisualDL logger and directory
logdir = "./tmp"
logwriter = LogWriter(logdir, sync_cycle=10)
# create 'train' run
with logwriter.mode("train") as writer:
# create 'loss' scalar tag to keep track of loss function
loss_scalar = writer.scalar("loss")
with logwriter.mode("train") as writer:
acc_scalar = writer.scalar("acc")
num_samples = 4
with logwriter.mode("train") as writer:
conv_image = writer.image("conv_image", num_samples, 1) #show 4 samples for every 1 step
input_image = writer.image("input_image", num_samples, 1)
with logwriter.mode("train") as writer:
param1_histgram = writer.histogram("param1", 100) #100 buckets, e.g 100 data sets in a histograms
```
我们再来用Paddle v2 Fluid APIs创建VGG CNN模型:
```python
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
return fc2, conv1
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
net, conv1 = vgg16_bn_drop(images)
predict = fluid.layers.fc(
input=net,
size=classdim,
act='softmax',
param_attr=ParamAttr(name="param1", initializer=NormalInitializer()))
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
BATCH_SIZE = 16
PASS_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
exe.run(fluid.default_startup_program())
```
接下来我们开始训练并且同时用 VisualDL 来采集相关数据
```python
for pass_id in range(PASS_NUM):
accuracy.reset(exe)
for data in train_reader():
loss, conv1_out, param1, acc = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, conv1, param1_var] + accuracy.metrics)
pass_acc = accuracy.eval(exe)
# all code below is for VisualDL
# start picking sample from beginning
if sample_num == 0:
input_image.start_sampling()
conv_image.start_sampling()
idx1 = input_image.is_sample_taken()
idx2 = conv_image.is_sample_taken()
assert idx1 == idx2
idx = idx1
if idx != -1:
image_data = data[0][0]
# reshape the image to 32x32 and 3 channels
input_image_data = np.transpose(
image_data.reshape(data_shape), axes=[1, 2, 0])
# add sample to VisualDL Image Writer to view input image
input_image.set_sample(idx, input_image_data.shape,
input_image_data.flatten())
conv_image_data = conv1_out[0][0]
# add sample to view conv image
conv_image.set_sample(idx, conv_image_data.shape,
conv_image_data.flatten())
sample_num += 1
# when we have enough samples, call finish sampling()
if sample_num % num_samples == 0:
input_image.finish_sampling()
conv_image.finish_sampling()
sample_num = 0
# add record for loss and accuracy to scalar
loss_scalar.add_record(step, loss)
acc_scalar.add_record(step, acc)
param1_histgram.add_record(step, param1.flatten())
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
pass_acc))
step += 1
```
训练结束后,各个组件的可视化结果如下:
关于accuracy和loss的数值图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/paddle_demo_figs/paddle_scalar.png?raw=true" />
</p>
训练过后的来源图和卷积权重图的各四个样本如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/paddle_demo_figs/paddle_image.png?raw=true" />
</p>
# How to use VisualDL in PaddlePaddle
Here we will show you how to use VisualDL with PaddlePaddle so that you can visualize the training process of PaddlePaddle.
We will use the Paddle Convolution Neural Network to train the [Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset as an example.
This example is the modification with fluid PaddlePaddle's API from this official Paddle Book
[Example](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification)
The full demonstration code can be downloaded in [here](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/paddle/paddle_cifar10.py).
The script is based on Paddle v2 0.11. You can do ```pip install paddlepaddle``` or ```docker pull paddlepaddle/paddle:0.11.0```. Notice Paddle does not support Python3 yet and protobuf version needs to be 3.5+.
If you encounter the error `TypeError: __init__() got an unexpected keyword argument 'file'`, that is due to protobuf version is not 3.5+,simply run `pip install --upgrade protobuf` will fix the issue.
For details, please follow Paddle's installation guide [here](http://paddlepaddle.org/docs/0.11.0/documentation/en/getstarted/build_and_install/index_en.html)
First we initialize Loggers for different types of record as follows:
```python
# create VisualDL logger and directory
logdir = "./tmp"
logwriter = LogWriter(logdir, sync_cycle=10)
# create 'train' run
with logwriter.mode("train") as writer:
# create 'loss' scalar tag to keep track of loss function
loss_scalar = writer.scalar("loss")
with logwriter.mode("train") as writer:
acc_scalar = writer.scalar("acc")
num_samples = 4
with logwriter.mode("train") as writer:
conv_image = writer.image("conv_image", num_samples, 1) #show 4 samples for every 1 step
input_image = writer.image("input_image", num_samples, 1)
with logwriter.mode("train") as writer:
param1_histgram = writer.histogram("param1", 100) #100 buckets, e.g 100 data sets in a histograms
```
We use Paddle v2 Fluid APIs to define our VGG CNN model as follows:
```python
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
return fc2, conv1
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
net, conv1 = vgg16_bn_drop(images)
predict = fluid.layers.fc(
input=net,
size=classdim,
act='softmax',
param_attr=ParamAttr(name="param1", initializer=NormalInitializer()))
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
BATCH_SIZE = 16
PASS_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
exe.run(fluid.default_startup_program())
```
Then we start to train and use VisualDL to record data at the same time.
```python
for pass_id in range(PASS_NUM):
accuracy.reset(exe)
for data in train_reader():
loss, conv1_out, param1, acc = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, conv1, param1_var] + accuracy.metrics)
pass_acc = accuracy.eval(exe)
# all code below is for VisualDL
# start picking sample from beginning
if sample_num == 0:
input_image.start_sampling()
conv_image.start_sampling()
idx1 = input_image.is_sample_taken()
idx2 = conv_image.is_sample_taken()
assert idx1 == idx2
idx = idx1
if idx != -1:
image_data = data[0][0]
# reshape the image to 32x32 and 3 channels
input_image_data = np.transpose(
image_data.reshape(data_shape), axes=[1, 2, 0])
# add sample to VisualDL Image Writer to view input image
input_image.set_sample(idx, input_image_data.shape,
input_image_data.flatten())
conv_image_data = conv1_out[0][0]
# add sample to view conv image
conv_image.set_sample(idx, conv_image_data.shape,
conv_image_data.flatten())
sample_num += 1
# when we have enough samples, call finish sampling()
if sample_num % num_samples == 0:
input_image.finish_sampling()
conv_image.finish_sampling()
sample_num = 0
# add record for loss and accuracy to scalar
loss_scalar.add_record(step, loss)
acc_scalar.add_record(step, acc)
param1_histgram.add_record(step, param1.flatten())
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
pass_acc))
step += 1
```
After the training, launch VisualDL and here is the results.
The scalar diagram of the accuracy and loss is as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/paddle_demo_figs/paddle_scalar.png?raw=true" />
</p>
The 4 samples of input image and the convolution layer image after the training are as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/paddle_demo_figs/paddle_image.png?raw=true" />
</p>
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
from __future__ import print_function
import numpy as np
from visualdl import LogWriter
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.framework as framework
from paddle.v2.fluid.initializer import NormalInitializer
from paddle.v2.fluid.param_attr import ParamAttr
# create VisualDL logger and directory
logdir = "./tmp"
logwriter = LogWriter(logdir, sync_cycle=10)
# create 'train' run
with logwriter.mode("train") as writer:
# create 'loss' scalar tag to keep track of loss function
loss_scalar = writer.scalar("loss")
with logwriter.mode("train") as writer:
acc_scalar = writer.scalar("acc")
num_samples = 4
with logwriter.mode("train") as writer:
conv_image = writer.image("conv_image", num_samples,
1) # show 4 samples for every 1 step
input_image = writer.image("input_image", num_samples, 1)
with logwriter.mode("train") as writer:
param1_histgram = writer.histogram(
"param1", 100) # 100 buckets, e.g 100 data sets in a histograms
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
return fc2, conv1
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
net, conv1 = vgg16_bn_drop(images)
predict = fluid.layers.fc(
input=net,
size=classdim,
act='softmax',
param_attr=ParamAttr(name="param1", initializer=NormalInitializer()))
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
BATCH_SIZE = 16
PASS_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
exe.run(fluid.default_startup_program())
step = 0
sample_num = 0
start_up_program = framework.default_startup_program()
param1_var = start_up_program.global_block().var("param1")
for pass_id in range(PASS_NUM):
accuracy.reset(exe)
for data in train_reader():
loss, conv1_out, param1, acc = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, conv1, param1_var] + accuracy.metrics)
pass_acc = accuracy.eval(exe)
# all code below is for VisualDL
# start picking sample from beginning
if sample_num == 0:
input_image.start_sampling()
conv_image.start_sampling()
idx1 = input_image.is_sample_taken()
idx2 = conv_image.is_sample_taken()
assert idx1 == idx2
idx = idx1
if idx != -1:
image_data = data[0][0]
# reshape the image to 32x32 and 3 channels
input_image_data = np.transpose(
image_data.reshape(data_shape), axes=[1, 2, 0])
# add sample to VisualDL Image Writer to view input image
input_image.set_sample(idx, input_image_data.shape,
input_image_data.flatten())
conv_image_data = conv1_out[0][0]
# add sample to view conv image
conv_image.set_sample(idx, conv_image_data.shape,
conv_image_data.flatten())
sample_num += 1
# when we have enough samples, call finish sampling()
if sample_num % num_samples == 0:
input_image.finish_sampling()
conv_image.finish_sampling()
sample_num = 0
# add record for loss and accuracy to scalar
loss_scalar.add_record(step, loss)
acc_scalar.add_record(step, acc)
param1_histgram.add_record(step, param1.flatten())
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
pass_acc))
step += 1
# this model is slow, so if we can train two mini batch, we think it works properly.
# exit(0)
exit(1)
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import paddle
import paddle.fluid as fluid
from visualdl import LogWriter
# define a LeNet-5 nn
def lenet_5(img, label):
conv1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv1_bn = fluid.layers.batch_norm(input=conv1)
conv2 = fluid.nets.simple_img_conv_pool(
input=conv1_bn,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
predition = fluid.layers.fc(input=conv2, size=10, act="softmax")
cost = fluid.layers.cross_entropy(input=predition, label=label)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predition, label=label)
return avg_cost, acc
# train the nn
def train():
img = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost, acc = lenet_5(img, label)
# get the mnist dataset
train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=64)
# define the loss
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_cost)
# running on cpu
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe = fluid.Executor(place)
log_writter = LogWriter("./vdl_log", sync_cycle=10)
with log_writter.mode("train") as logger:
scalar_loss = logger.scalar(tag="loss")
scalar_accuracy = logger.scalar(tag="accuracy")
num_samples = 10
image_input = logger.image(tag="input", num_samples=num_samples)
histogram = logger.histogram(tag="histogram", num_buckets=50)
# init all param
exe.run(fluid.default_startup_program())
step = 0
sample_num = 0
epochs = 5
param_name = fluid.default_startup_program().global_block().all_parameters(
)[0].name
# start to train
for i in range(epochs):
for batch in train_reader():
cost, accuracy, input, param = exe.run(
feed=feeder.feed(batch),
fetch_list=[avg_cost.name, acc.name, img.name, param_name])
step += 1
# record the loss and accuracy
scalar_loss.add_record(step, cost)
scalar_accuracy.add_record(step, accuracy)
if sample_num % num_samples == 0:
image_input.start_sampling()
idx = image_input.is_sample_taken()
if idx != -1:
# the first image in the batch data
image_data = input[0]
# the image shape recrod in VDL is H * W * C
image_data = image_data.reshape([28, 28, 1])
image_input.set_sample(idx, image_data.shape,
100 * image_data.flatten())
sample_num += 1
if sample_num % num_samples == 0:
image_input.finish_sampling()
sample_num = 0
# record the parameter trend
histogram.add_record(step, param.flatten())
if __name__ == "__main__":
train()
Subproject commit 1f8ed8767802e1aea667c4a760a77be7146b916f
# 如何在PyTorch中使用VisualDL
下面我们演示一下如何在PyTorch中使用VisualDL,从而可以把PyTorch的训练过程以及最后的模型可视化出来。我们将以PyTorch用卷积神经网络(CNN, Convolutional Neural Network)来训练
[Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) 数据集作为例子。
程序的主体来自PyTorch的 [Tutorial](http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html)
我们同时提供了 Jupyter Notebook 的可交互版本。请参见本文件夹里面的 pytorch_cifar10.ipynb
```python
import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib
matplotlib.use('Agg')
from visualdl import LogWriter
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=500,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=500,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
fig, ax = plt.subplots()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# we can either show the image or save it locally
# plt.show()
fig.savefig('out' + str(np.random.randint(0, 10000)) + '.pdf')
```
我们可以预览一下将要分析的 Cifar10 图片集:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/pytorch_cifar10_show_image.png?raw=true" />
</p>
然后我们开始创建 VisualDL 的数据采集 loggers
```python
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_pytorch_train_loss = logger.scalar("scalars/scalar_pytorch_train_loss")
image1 = logger.image("images/image1", 1)
image2 = logger.image("images/image2", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=100)
```
Cifar10 中有 50000 个训练图像和 10000 个测试图像。我们每 500 个作为一个训练集,图片采样也选 500 。 每个训练集 (batch) 是如下的维度:
500 x 3 x 32 x 32
接下来我们开始创建 CNN 模型
```python
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# Define a Convolution Neural Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
```
接下来我们开始训练并且同时用 VisualDL 来采集相关数据
```python
# Train the network
for epoch in range(5): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# use VisualDL to retrieve metrics
# scalar
scalar_pytorch_train_loss.add_record(train_step, float(loss))
# histogram
weight_list = net.conv1.weight.view(6*3*5*5, -1)
histogram0.add_record(train_step, weight_list)
# image
image1.start_sampling()
image1.add_sample([96, 25], net.conv2.weight.view(16*6*5*5, -1))
image1.finish_sampling()
image2.start_sampling()
image2.add_sample([18, 25], net.conv1.weight.view(6*3*5*5, -1))
image2.finish_sampling()
train_step += 1
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
```
最后,因为 PyTorch 采用 Dynamic Computation Graphs,我们用一个 dummy 输入来空跑一下模型,以便产生图
```python
import torch.onnx
dummy_input = Variable(torch.randn(4, 3, 32, 32))
torch.onnx.export(net, dummy_input, "pytorch_cifar10.onnx")
print('Done')
```
训练结束后,各个组件的可视化结果如下:
关于误差的数值图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_scalar.png?raw=true" />
</p>
训练过后的第一,第二层卷积权重图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_image.png?raw=true" />
</p>
训练参数的柱状图的如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_hist.png?raw=true" />
</p>
模型图的效果如下:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_graph.png?raw=true" />
</p>
生成的完整效果图可以在[这里](https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/graph.png?raw=true)下载。
# How to use VisualDL in PyTorch
Here we will show you how to use VisualDL in PyTorch so that you can visualize the training process of PyTorch.
We will use the PyTorch Convolution Neural Network to train the [Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset as an example.
The training program comes from the [PyTorch Tutorial](http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html).
```python
import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib
matplotlib.use('Agg')
from visualdl import LogWriter
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=500,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=500,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
fig, ax = plt.subplots()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# we can either show the image or save it locally
# plt.show()
fig.savefig('out' + str(np.random.randint(0, 10000)) + '.pdf')
```
We can preview the Cifar10 picture set to be analyzed:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/pytorch_cifar10_show_image.png?raw=true" />
</p>
We just need to create the VisualDL data collection loggers in the code
```python
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_pytorch_train_loss = logger.scalar("scalars/scalar_pytorch_train_loss")
image1 = logger.image("images/image1", 1)
image2 = logger.image("images/image2", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=100)
```
There are 50000 training images and 10000 test images in Cifar10. We set the training set size to 500,
and picture sampling rate to 500. The training set (batch) dimension is:
500 x 3 x 32 x 32
Then we start to build the CNN model
```python
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# Define a Convolution Neural Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
```
Then we start to train and use VisualDL to collect data at the same time
```python
# Train the network
for epoch in range(5): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# use VisualDL to retrieve metrics
# scalar
scalar_pytorch_train_loss.add_record(train_step, float(loss))
# histogram
weight_list = net.conv1.weight.view(6*3*5*5, -1)
histogram0.add_record(train_step, weight_list)
# image
image1.start_sampling()
image1.add_sample([96, 25], net.conv2.weight.view(16*6*5*5, -1))
image1.finish_sampling()
image2.start_sampling()
image2.add_sample([18, 25], net.conv1.weight.view(6*3*5*5, -1))
image2.finish_sampling()
train_step += 1
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
```
PyTorch support ONNX standard and it can export its model into ONNX.
PyTorch runs a single round of inference to trace the graph. We use a dummy input to run the model to produce the ONNX model
```python
import torch.onnx
dummy_input = Variable(torch.randn(4, 3, 32, 32))
torch.onnx.export(net, dummy_input, "pytorch_cifar10.onnx")
print('Done')
```
After the training, the visual results of each component are as follows:
The scalar diagram of the error is as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_scalar.png?raw=true" />
</p>
The picture of the first, second layer convolution weight after the training are as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_image.png?raw=true" />
</p>
The histograms of the training parameters is as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_hist.png?raw=true" />
</p>
The model graph is as follows:
<p align=center>
<img width="70%" src="https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/sc_graph.png?raw=true" />
</p>
You can download the full size image [here](https://github.com/daming-lu/large_files/blob/master/pytorch_demo_figs/graph.png?Raw=true).
# 如何用VisualDL可视化 Embedding
在这里,我们将向您展示如何在 PyTorch 中使用 VisualDL 可视化 Embedding。
Embedding 常用于自然语言处理中,他能将语义使用高维向量来表示。
Embedding 可视化有助于验证训练算法,Embedding 可视化会将高维向量压缩到二维/三维空间,
两个词越接近,它们共享的语义意义就越明显。
我们使用 PyTorch [embedding 示例](http://pytorch.org/tutorials/beginner/nlp/word_embeddings_tutorial.html) 示例作为基础。
以下就是全部的 embedding Python 脚本,
您可以直接在 Python 环境中测试它。
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
for i in range(len(test_sentence) - 2)]
# print the first 3, just so you can see what they look like
print(trigrams[:3])
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
total_loss = torch.Tensor([0])
for context, target in trigrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in variables)
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs)
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a variable)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
total_loss += loss.item()
losses.append(total_loss)
print(losses) # The loss decreased every iteration over the training data!
```
这是生成第一个 embedding 所需的所有代码。
现在,让我们添加一小段代码来将 embedding 存储到 VisualDL 日志中,之后就能利用 VisualDL 来进行可视化。
```
# Import VisualDL
from visualdl import LogWriter
# VisualDL setup
logw = LogWriter("./embedding_log", sync_cycle=10000)
with logw.mode('train') as logger:
embedding = logger.embedding()
embeddings_list = model.embeddings.weight.data.numpy() # convert to numpy array
# VisualDL embedding log writer takes two parameters
# The first parameter is embedding list. The type is list[list[float]]
# The second parameter is word_dict. The type is dictionary<string, int>.
embedding.add_embeddings_with_word_dict(embeddings_list, word_to_ix)
```
将上述代码嵌入到您的embedding训练程序中,
这将 embedding 和 word_dict 保存到 `./embedding_log` 文件夹中。
现在我们可以用 `visualdl --logdir=./embedding_log` 执行VisualDL,
使用浏览器导航到 `localhost:8080`,切换到 `High Dimensional`
你可以下载教程代码 [这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/pytorch/pytorch_word2vec.py)
# How to visualize embedding with VisualDL
Here we would like to show you how to visualize embeddings with
VisualDL in PyTorch.
Embedding is often used in NLP(Nature Language Processing), it can represent the
sematic meanings with high dimensional vectors.
Embedding visualization is useful to verify the training algorithm,
as visualization can reduce the high dimensional vector to 2D / 3D spaces.
The closer two words are, the more sematic meaning they share.
We use the PyTorch [embedding example](http://pytorch.org/tutorials/beginner/nlp/word_embeddings_tutorial.html) as
the base. Here is the whole embedding program. The following block is a working python script.
Feel free to test it in your python environment.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
for i in range(len(test_sentence) - 2)]
# print the first 3, just so you can see what they look like
print(trigrams[:3])
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
total_loss = torch.Tensor([0])
for context, target in trigrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in variables)
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs)
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a variable)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
total_loss += loss.item()
losses.append(total_loss)
print(losses) # The loss decreased every iteration over the training data!
```
That's all the code you need to generate your first embedding.
Now, let us just add a little bit of code to store the embedding to VisualDL log
so we can visualize it later.
```
# Import VisualDL
from visualdl import LogWriter
# VisualDL setup
logw = LogWriter("./embedding_log", sync_cycle=10000)
with logw.mode('train') as logger:
embedding = logger.embedding()
embeddings_list = model.embeddings.weight.data.numpy() # convert to numpy array
# VisualDL embedding log writer takes two parameters
# The first parameter is embedding list. The type is list[list[float]]
# The second parameter is word_dict. The type is dictionary<string, int>.
embedding.add_embeddings_with_word_dict(embeddings_list, word_to_ix)
```
Insert the above code snippet into your embedding training program.
This will save the embeddings and the word dictionary to the `./embedding_log` folder.
We can now activate the VisualDL by running `visualdl --logdir=./embedding_log`.
Use your browser to navigate to `localhost:8080`, switch the tab to `High Dimensional`
You can download the tutorial code [here](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/pytorch/pytorch_word2vec.py).
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"如何在PyTorch中使用VisualDL\n",
"=====================\n",
"\n",
"下面我们演示一下如何在PyTorch中使用VisualDL,从而可以把PyTorch的训练过程以及最后的模型可视化出来。我们将以PyTorch用卷积神经网络(CNN, Convolutional Neural Network)来训练 [Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) 数据集作为例子。\n",
"\n",
"程序的主体来自PyTorch的 [Tutorial](http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import torch\n",
"import torchvision\n",
"import torchvision.transforms as transforms\n",
"from torch.autograd import Variable\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torch.optim as optim\n",
"\n",
"import matplotlib\n",
"matplotlib.use('Agg')\n",
"\n",
"from visualdl import LogWriter\n",
"\n",
"\n",
"transform = transforms.Compose(\n",
" [transforms.ToTensor(),\n",
" transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n",
"\n",
"trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n",
" download=True, transform=transform)\n",
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=500,\n",
" shuffle=True, num_workers=2)\n",
"\n",
"testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n",
" download=True, transform=transform)\n",
"testloader = torch.utils.data.DataLoader(testset, batch_size=500,\n",
" shuffle=False, num_workers=2)\n",
"\n",
"classes = ('plane', 'car', 'bird', 'cat',\n",
" 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n",
"\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"\n",
"# functions to show an image\n",
"def imshow(img):\n",
" img = img / 2 + 0.5 # unnormalize\n",
" npimg = img.numpy()\n",
" fig, ax = plt.subplots()\n",
" plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
" # we can either show the image or save it locally\n",
" # plt.show()\n",
" fig.savefig('out' + str(np.random.randint(0, 10000)) + '.pdf')\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"然后我们开始创建 VisualDL 的数据采集 loggers\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"logdir = \"/workspace\"\n",
"logger = LogWriter(logdir, sync_cycle=100)\n",
"\n",
"# mark the components with 'train' label.\n",
"with logger.mode(\"train\"):\n",
" # create a scalar component called 'scalars/'\n",
" scalar_pytorch_train_loss = logger.scalar(\"scalars/scalar_pytorch_train_loss\")\n",
" image1 = logger.image(\"images/image1\", 1)\n",
" image2 = logger.image(\"images/image2\", 1)\n",
" histogram0 = logger.histogram(\"histogram/histogram0\", num_buckets=100)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Cifar10 中有 50000 个训练图像和 10000 个测试图像。我们每 500 个作为一个训练集,图片采样也选 500 。 每个训练集 (batch) 是如下的维度:\n",
"\n",
"500 x 3 x 32 x 32\n",
"\n",
"接下来我们开始创建 CNN 模型\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# get some random training images\n",
"dataiter = iter(trainloader)\n",
"images, labels = dataiter.next()\n",
"\n",
"# show images\n",
"imshow(torchvision.utils.make_grid(images))\n",
"# print labels\n",
"print(' '.join('%5s' % classes[labels[j]] for j in range(4)))\n",
"\n",
"# Define a Convolution Neural Network\n",
"class Net(nn.Module):\n",
" def __init__(self):\n",
" super(Net, self).__init__()\n",
" self.conv1 = nn.Conv2d(3, 6, 5)\n",
" self.pool = nn.MaxPool2d(2, 2)\n",
" self.conv2 = nn.Conv2d(6, 16, 5)\n",
" self.fc1 = nn.Linear(16 * 5 * 5, 120)\n",
" self.fc2 = nn.Linear(120, 84)\n",
" self.fc3 = nn.Linear(84, 10)\n",
"\n",
" def forward(self, x):\n",
" x = self.pool(F.relu(self.conv1(x)))\n",
" x = self.pool(F.relu(self.conv2(x)))\n",
" x = x.view(-1, 16 * 5 * 5)\n",
" x = F.relu(self.fc1(x))\n",
" x = F.relu(self.fc2(x))\n",
" x = self.fc3(x)\n",
" return x\n",
"\n",
"\n",
"net = Net()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"接下来我们开始训练并且同时用 VisualDL 来采集相关数据\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Train the network\n",
"for epoch in range(5): # loop over the dataset multiple times\n",
" running_loss = 0.0\n",
" for i, data in enumerate(trainloader, 0):\n",
" # get the inputs\n",
" inputs, labels = data\n",
"\n",
" # wrap them in Variable\n",
" inputs, labels = Variable(inputs), Variable(labels)\n",
"\n",
" # zero the parameter gradients\n",
" optimizer.zero_grad()\n",
"\n",
" # forward + backward + optimize\n",
" outputs = net(inputs)\n",
" loss = criterion(outputs, labels)\n",
"\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" # use VisualDL to retrieve metrics\n",
" # scalar\n",
" scalar_pytorch_train_loss.add_record(train_step, float(loss))\n",
"\n",
" # histogram\n",
" weight_list = net.conv1.weight.view(6*3*5*5, -1)\n",
" histogram0.add_record(train_step, weight_list)\n",
"\n",
" # image\n",
" image1.start_sampling()\n",
" image1.add_sample([96, 25], net.conv2.weight.view(16*6*5*5, -1))\n",
" image1.finish_sampling()\n",
"\n",
" image2.start_sampling()\n",
" image2.add_sample([18, 25], net.conv1.weight.view(6*3*5*5, -1))\n",
" image2.finish_sampling()\n",
"\n",
"\n",
" train_step += 1\n",
"\n",
" # print statistics\n",
" running_loss += loss.data[0]\n",
" if i % 2000 == 1999: # print every 2000 mini-batches\n",
" print('[%d, %5d] loss: %.3f' %\n",
" (epoch + 1, i + 1, running_loss / 2000))\n",
" running_loss = 0.0\n",
"\n",
"print('Finished Training')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"最后,因为 PyTorch 采用 Dynamic Computation Graphs,我们用一个 dummy 输入来空跑一下模型,以便产生图"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import torch.onnx\n",
"dummy_input = Variable(torch.randn(4, 3, 32, 32))\n",
"torch.onnx.export(net, dummy_input, \"pytorch_cifar10.onnx\")\n",
"\n",
"print('Done')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.14"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.onnx
import matplotlib
from visualdl import LogWriter
import matplotlib.pyplot as plt
import numpy as np
matplotlib.use('Agg')
transform = transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=500, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(
testset, batch_size=500, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
fig, ax = plt.subplots()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# we can either show the image or save it locally
# plt.show()
fig.savefig('out' + str(np.random.randint(0, 10000)) + '.pdf')
logdir = "./workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_pytorch_train_loss = logger.scalar(
"scalars/scalar_pytorch_train_loss")
image1 = logger.image("images/image1", 1)
image2 = logger.image("images/image2", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=100)
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# Define a Convolution Neural Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
# Define a Loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
train_step = 0
# Train the network
for epoch in range(5): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# use VisualDL to retrieve metrics
# scalar
scalar_pytorch_train_loss.add_record(train_step, float(loss))
# histogram
weight_list = net.conv1.weight.view(6 * 3 * 5 * 5, -1)
histogram0.add_record(train_step, weight_list)
# image
image1.start_sampling()
image1.add_sample([96, 25], net.conv2.weight.view(16 * 6 * 5 * 5, -1))
image1.finish_sampling()
image2.start_sampling()
image2.add_sample([18, 25], net.conv1.weight.view(6 * 3 * 5 * 5, -1))
image2.finish_sampling()
train_step += 1
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
dummy_input = Variable(torch.randn(4, 3, 32, 32))
torch.onnx.export(net, dummy_input, "pytorch_cifar10.onnx")
print('Done')
此差异已折叠。
此差异已折叠。
此差异已折叠。
../README.cn.md
\ No newline at end of file
../README.md
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
../../demo/
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册