提交 02e4dad0 编写于 作者: D Dong Daxiang 提交者: GitHub

Merge pull request #124 from PaddlePaddle/merge_longteng

 add server python API
...@@ -46,11 +46,11 @@ set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING ...@@ -46,11 +46,11 @@ set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
set(THIRD_PARTY_BUILD_TYPE Release) set(THIRD_PARTY_BUILD_TYPE Release)
option(WITH_AVX "Compile Paddle Serving with AVX intrinsics" ${AVX_FOUND}) option(WITH_AVX "Compile Paddle Serving with AVX intrinsics" OFF)
option(WITH_MKL "Compile Paddle Serving with MKL support." ${AVX_FOUND}) option(WITH_MKL "Compile Paddle Serving with MKL support." OFF)
option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU" ${CUDA_FOUND}) option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU" OFF)
option(CLIENT_ONLY "Compile client libraries and demos only" FALSE) option(CLIENT_ONLY "Compile client libraries and demos only" OFF)
option(WITH_ELASTIC_CTR "Compile ELASITC-CTR solution" FALSE) option(WITH_ELASTIC_CTR "Compile ELASITC-CTR solution" OFF)
set(WITH_MKLML ${WITH_MKL}) set(WITH_MKLML ${WITH_MKL})
if (NOT DEFINED WITH_MKLDNN) if (NOT DEFINED WITH_MKLDNN)
...@@ -128,14 +128,10 @@ if (NOT CLIENT_ONLY) ...@@ -128,14 +128,10 @@ if (NOT CLIENT_ONLY)
endif() endif()
add_subdirectory(core) add_subdirectory(core)
if(NOT CLIENT_ONLY) if(NOT CLIENT_ONLY)
add_subdirectory(paddle_inference) add_subdirectory(paddle_inference)
endif() endif()
if(CLIENT_ONLY)
add_subdirectory(python) add_subdirectory(python)
set(PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE}) #add_subdirectory(examples)
set(PYTHON_LIBRARIES ${PYTHON_LIB})
endif()
add_subdirectory(examples)
...@@ -36,7 +36,7 @@ SET(PADDLE_VERSION "latest") ...@@ -36,7 +36,7 @@ SET(PADDLE_VERSION "latest")
if (WITH_GPU) if (WITH_GPU)
SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-gpu-cuda${CUDA_VERSION_MAJOR}-cudnn7-avx-mkl") SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-gpu-cuda${CUDA_VERSION_MAJOR}-cudnn7-avx-mkl")
else() else()
if (AVX_FOUND) if (WITH_AVX)
if (WITH_MKLML) if (WITH_MKLML)
SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-cpu-avx-mkl") SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-cpu-avx-mkl")
else() else()
...@@ -62,10 +62,13 @@ ExternalProject_Add( ...@@ -62,10 +62,13 @@ ExternalProject_Add(
INSTALL_COMMAND INSTALL_COMMAND
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include &&
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib &&
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party && ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party
${CMAKE_COMMAND} -E copy ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so
) )
if (WITH_MKLML)
file(COPY ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 DESTINATION ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so FOLLOW_SYMLINK_CHAIN)
endif()
INCLUDE_DIRECTORIES(${PADDLE_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${PADDLE_INCLUDE_DIR})
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${PADDLE_INSTALL_DIR}/third_party/install/mklml/lib") SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${PADDLE_INSTALL_DIR}/third_party/install/mklml/lib")
LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mklml/lib) LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mklml/lib)
...@@ -73,6 +76,9 @@ LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mklml/lib) ...@@ -73,6 +76,9 @@ LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mklml/lib)
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib") SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib")
LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib) LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib)
ADD_LIBRARY(openblas STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET openblas PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/openblas/lib/libopenblas.a)
ADD_LIBRARY(paddle_fluid STATIC IMPORTED GLOBAL) ADD_LIBRARY(paddle_fluid STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.a) SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.a)
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
SET(PADDLE_SOURCES_DIR ${THIRD_PARTY_PATH}/Paddle)
SET(PADDLE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/Paddle/)
SET(PADDLE_INCLUDE_DIR "${PADDLE_INSTALL_DIR}/include" CACHE PATH "PaddlePaddle include directory." FORCE)
SET(PADDLE_LIBRARIES "${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.a" CACHE FILEPATH "Paddle library." FORCE)
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/Paddle/fluid_install_dir)
# Reference https://stackoverflow.com/questions/45414507/pass-a-list-of-prefix-paths-to-externalproject-add-in-cmake-args
set(prefix_path "${THIRD_PARTY_PATH}/install/gflags|${THIRD_PARTY_PATH}/install/leveldb|${THIRD_PARTY_PATH}/install/snappy|${THIRD_PARTY_PATH}/install/gtest|${THIRD_PARTY_PATH}/install/protobuf|${THIRD_PARTY_PATH}/install/zlib|${THIRD_PARTY_PATH}/install/glog")
message( "WITH_GPU = ${WITH_GPU}")
# If minimal .a is need, you can set WITH_DEBUG_SYMBOLS=OFF
ExternalProject_Add(
extern_paddle
${EXTERNAL_PROJECT_LOG_ARGS}
# TODO(wangguibao): change to de newst repo when they changed.
GIT_REPOSITORY "https://github.com/PaddlePaddle/Paddle"
GIT_TAG "v1.5.1"
PREFIX ${PADDLE_SOURCES_DIR}
UPDATE_COMMAND ""
BINARY_DIR ${CMAKE_BINARY_DIR}/Paddle
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_INSTALL_PREFIX=${PADDLE_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=${PADDLE_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
-DCMAKE_PREFIX_PATH=${prefix_path}
-DCMAKE_BINARY_DIR=${CMAKE_CURRENT_BINARY_DIR}
-DWITH_SWIG_PY=OFF
-DWITH_PYTHON=OFF
-DWITH_MKL=OFF
-DWITH_AVX=OFF
-DWITH_MKLDNN=OFF
-DWITH_GPU=OFF
-DWITH_FLUID_ONLY=ON
-DWITH_TESTING=OFF
-DWITH_DISTRIBUTE=OFF
-DON_INFER=ON
${EXTERNAL_OPTIONAL_ARGS}
LIST_SEPARATOR |
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PADDLE_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR:PATH=${PADDLE_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
BUILD_COMMAND $(MAKE)
INSTALL_COMMAND $(MAKE) fluid_lib_dist
)
ExternalProject_Get_Property(extern_paddle BINARY_DIR)
ADD_LIBRARY(paddle_fluid STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/fluid_install_dir/paddle/fluid/inference/libpaddle_fluid.a)
LIST(APPEND external_project_dependencies paddle)
ADD_LIBRARY(snappystream STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET snappystream PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/fluid_install_dir/third_party/install/snappystream/lib/libsnappystream.a)
ADD_LIBRARY(openblas STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET openblas PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/fluid_install_dir/third_party/install/openblas/lib/libopenblas.a)
ADD_LIBRARY(xxhash STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET xxhash PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/fluid_install_dir/third_party/install/xxhash/lib/libxxhash.a)
LIST(APPEND paddle_depend_libs
snappystream
snappy
xxhash
openblas)
...@@ -25,4 +25,5 @@ endif() ...@@ -25,4 +25,5 @@ endif()
if (NOT CLIENT_ONLY) if (NOT CLIENT_ONLY)
add_subdirectory(predictor) add_subdirectory(predictor)
add_subdirectory(general-server)
endif() endif()
...@@ -29,11 +29,43 @@ FILE(GLOB inc ${CMAKE_CURRENT_BINARY_DIR}/*.pb.h) ...@@ -29,11 +29,43 @@ FILE(GLOB inc ${CMAKE_CURRENT_BINARY_DIR}/*.pb.h)
install(FILES ${inc} install(FILES ${inc}
DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/configure) DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/configure)
py_proto_compile(general_model_config_py_proto SRCS proto/general_model_config.proto)
add_custom_target(general_model_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(general_model_config_py_proto general_model_config_py_proto_init)
if (CLIENT_ONLY)
py_proto_compile(sdk_configure_py_proto SRCS proto/sdk_configure.proto) py_proto_compile(sdk_configure_py_proto SRCS proto/sdk_configure.proto)
add_custom_target(sdk_configure_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(sdk_configure_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(sdk_configure_py_proto sdk_configure_py_proto_init) add_dependencies(sdk_configure_py_proto sdk_configure_py_proto_init)
add_custom_command(TARGET sdk_configure_py_proto POST_BUILD add_custom_command(TARGET sdk_configure_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto
COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/proto COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto
COMMENT "Copy generated python proto into directory paddle_serving/proto." COMMENT "Copy generated python proto into directory paddle_serving_client/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
add_custom_command(TARGET general_model_config_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto
COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto
COMMENT "Copy generated general_model_config proto file into directory paddle_serving_client/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
if (NOT CLIENT_ONLY)
py_proto_compile(server_config_py_proto SRCS proto/server_configure.proto)
add_custom_target(server_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(server_config_py_proto server_config_py_proto_init)
add_custom_command(TARGET server_config_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
COMMENT "Copy generated python proto into directory paddle_serving_server/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINRARY_DIR})
add_custom_command(TARGET general_model_config_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
COMMENT "Copy generated general_model_config proto file into directory paddle_serving_server/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
...@@ -19,13 +19,17 @@ ...@@ -19,13 +19,17 @@
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace configure { namespace configure {
int read_proto_conf(const std::string &conf_path,
const std::string &conf_file,
google::protobuf::Message *conf);
int write_proto_conf(google::protobuf::Message *message, int read_proto_conf(const std::string &conf_full_path,
const std::string &output_path, google::protobuf::Message *conf);
const std::string &output_file);
int read_proto_conf(const std::string &conf_path,
const std::string &conf_file,
google::protobuf::Message *conf);
int write_proto_conf(google::protobuf::Message *message,
const std::string &output_path,
const std::string &output_file);
} // namespace configure } // namespace configure
} // namespace paddle_serving } // namespace paddle_serving
......
...@@ -16,14 +16,16 @@ syntax = "proto2"; ...@@ -16,14 +16,16 @@ syntax = "proto2";
package baidu.paddle_serving.configure; package baidu.paddle_serving.configure;
message FeedVar { message FeedVar {
required string name = 1; optional string name = 1;
required bool is_lod_tensor = 2; optional string alias_name = 2;
required int32 feed_type = 3; optional bool is_lod_tensor = 3 [ default = false ];
repeated int32 shape = 4; optional int32 feed_type = 4 [ default = 0 ];
repeated int32 shape = 5;
} }
message FetchVar { message FetchVar {
required string name = 1; optional string name = 1;
repeated int32 shape = 2; optional string alias_name = 2;
repeated int32 shape = 3;
} }
message GeneralModelConfig { message GeneralModelConfig {
repeated FeedVar feed_var = 1; repeated FeedVar feed_var = 1;
......
...@@ -31,6 +31,24 @@ namespace baidu { ...@@ -31,6 +31,24 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace configure { namespace configure {
int read_proto_conf(const std::string &conf_file_full_path,
google::protobuf::Message *conf) {
int fd = open(conf_file_full_path.c_str(), O_RDONLY);
if (fd == -1) {
LOG(WARNING) << "File not found: " << conf_file_full_path.c_str();
return -1;
}
google::protobuf::io::FileInputStream input(fd);
bool success = google::protobuf::TextFormat::Parse(&input, conf);
close(fd);
if (!success) {
return -1;
}
return 0;
}
int read_proto_conf(const std::string &conf_path, int read_proto_conf(const std::string &conf_path,
const std::string &conf_file, const std::string &conf_file,
google::protobuf::Message *conf) { google::protobuf::Message *conf) {
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
set(SOURCE_FILE cube-agent.go)
add_go_executable(cube-agent ${SOURCE_FILE})
add_dependencies(cube-agent agent-docopt-go)
add_dependencies(cube-agent agent-logex)
add_dependencies(cube-agent agent-pipeline)
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"errors"
_ "github.com/Badangel/logex"
"strings"
"sync"
)
var (
Dir string
WorkerNum int
QueueCapacity int32
MasterHost []string
MasterPort []string
TestHostname string
TestIdc string
ShardLock sync.RWMutex
CmdWorkPool *WorkPool
CmdWorkFilter sync.Map
)
type (
Status struct {
Status string `json:"status"`
Version string `json:"version"`
}
MasterResp struct {
Success string `json:"success"`
Message string `json:"message"`
Data string `json:"data"`
}
ShardInfo struct {
DictName string
ShardSeq int
SlotIdList string
DataDir string
Service string `json:"service,omitempty"`
Libcube string `json:"libcube,omitempty"`
}
CubeResp struct {
Status int `json:"status"`
CurVersion string `json:"cur_version"`
BgVersion string `json:"bg_version"`
}
)
var BUILTIN_STATUS = Status{"RUNNING", "3.0.0.1"}
var ShardInfoMap map[string]map[string]*ShardInfo
var disks []string
func GetMaster(master string) (host, port string, err error) {
if len(ShardInfoMap) < 1 {
return "", "", errors.New("empty master list.")
}
if master == "" {
return MasterHost[0], MasterPort[0], nil
}
if _, ok := ShardInfoMap[master]; ok {
m := strings.Split(master, ":")
if len(m) != 2 {
return MasterHost[0], MasterPort[0], nil
}
return m[0], m[1], nil
} else {
return MasterHost[0], MasterPort[0], nil
}
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/Badangel/logex"
)
type handlerFunc func(subpath string, m map[string]string, b []byte) (string, string, error)
var ( // key = subpath; eg: path="/checker/job", key="job"
getHandler map[string]handlerFunc
putHandler map[string]handlerFunc
deleteHandler map[string]handlerFunc
postHandler map[string]handlerFunc
)
func StartHttp(addr string) error {
// init handlers:
initGetHandlers()
initPostHandlers()
http.HandleFunc("/agent/", handleRest)
logex.Notice("start http ", addr)
return http.ListenAndServe(addr, nil)
}
func handleRest(w http.ResponseWriter, r *http.Request) {
var (
req_log string
status int32
)
time_begin := time.Now()
cont_type := make([]string, 1, 1)
cont_type[0] = "application/json"
header := w.Header()
header["Content-Type"] = cont_type
w.Header().Add("Access-Control-Allow-Origin", "*")
m := parseHttpKv(r)
b, _ := ioutil.ReadAll(r.Body)
req_log = fmt.Sprintf("handle %v %v %v from %v, len(m)=%v, m=%+v",
r.Method, r.URL.Path, r.URL.RawQuery, r.RemoteAddr, len(m), m)
api := r.URL.Path
var showHandler map[string]handlerFunc
switch r.Method {
case "GET":
showHandler = getHandler
case "POST": // create
showHandler = postHandler
case "PUT": // update
showHandler = putHandler
case "DELETE":
showHandler = deleteHandler
default:
logex.Warningf(`{"error":1, "message":"unsupport method %v"}`, r.Method)
}
handler, ok := showHandler[api]
if !ok {
key_list := make([]string, 0, len(showHandler))
for key := range showHandler {
key_list = append(key_list, key)
}
status = 2
fmt.Fprintf(w, `{"success":"%v", "message":"wrong api", "method":"%s", "api":"%s", "api_list":"%v"}`,
status, r.Method, api, key_list)
logex.Noticef(`%v, time=%v, status=%v`,
req_log, time.Now().Sub(time_begin).Nanoseconds()/1000000, status)
return
}
var s string
rst, handle_log, err := handler(api, m, b)
if err == nil {
status = 0
s = fmt.Sprintf(`{"success":"%v", "message":"query ok", "data":%s}`, status, rst)
} else {
status = 255
s = fmt.Sprintf(`{"success":"%v", "message":%v, "data":%s}`,
status, quote(err.Error()), rst)
}
if isJsonDict(s) {
fmt.Fprintln(w, s)
} else {
logex.Fatalf("invalid json: %v", s)
}
if err == nil {
logex.Noticef(`%v, time=%v, status=%v, handle_log=%v`,
req_log, time.Now().Sub(time_begin).Nanoseconds()/1000000,
status, quote(handle_log))
} else {
logex.Noticef(`%v, time=%v, status=%v, err=%v, handle_log=%v`,
req_log, time.Now().Sub(time_begin).Nanoseconds()/1000000,
status, quote(err.Error()), quote(handle_log))
}
}
func parseHttpKv(r *http.Request) map[string]string {
r.ParseForm()
m := make(map[string]string)
for k, v := range r.Form {
switch k {
case "user": // remove @baidu.com for user
m[k] = strings.Split(v[0], "@")[0]
default:
m[k] = v[0]
}
}
// allow passing hostname for debug
if _, ok := m["hostname"]; !ok {
ip := r.RemoteAddr[:strings.Index(r.RemoteAddr, ":")]
m["hostname"], _ = getHostname(ip)
}
return m
}
// restReq sends a restful request to requrl and returns response body.
func restReq(method, requrl string, timeout int, kv *map[string]string) (string, error) {
logex.Debug("####restReq####")
logex.Debug(*kv)
data := url.Values{}
if kv != nil {
for k, v := range *kv {
logex.Trace("req set:", k, v)
data.Set(k, v)
}
}
if method == "GET" || method == "DELETE" {
requrl = requrl + "?" + data.Encode()
data = url.Values{}
}
logex.Notice(method, requrl)
req, err := http.NewRequest(method, requrl, bytes.NewBufferString(data.Encode()))
if err != nil {
logex.Warning("NewRequest failed:", err)
return "", err
}
if method == "POST" || method == "PUT" {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
}
client := &http.Client{}
client.Timeout = time.Duration(timeout) * time.Second
resp, err := client.Do(req)
if err != nil {
logex.Warning("Do failed:", err)
return "", err
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
logex.Warning("resp status: " + resp.Status)
return "", errors.New("resp status: " + resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
return string(body), err
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/Badangel/logex"
)
// restReq sends a restful request to requrl and returns response body.
func RestReq(method, requrl string, timeout int, kv *map[string]string) (string, error) {
data := url.Values{}
if kv != nil {
for k, v := range *kv {
//logex.Trace("req set:", k, v)
data.Set(k, v)
}
}
if method == "GET" || method == "DELETE" {
requrl = requrl + "?" + data.Encode()
data = url.Values{}
}
//logex.Notice(method, requrl)
req, err := http.NewRequest(method, requrl, bytes.NewBufferString(data.Encode()))
if err != nil {
logex.Warning("NewRequest failed:", err)
return "", err
}
if method == "POST" || method == "PUT" {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
}
client := &http.Client{}
client.Timeout = time.Duration(timeout) * time.Second
resp, err := client.Do(req)
if err != nil {
logex.Warning("Do failed:", err)
return "", err
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
logex.Warning("resp status: " + resp.Status)
return "", errors.New("resp status: " + resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
return string(body), err
}
// restReq sends a restful request to requrl and returns response body as json.
func JsonReq(method, requrl string, timeout int, kv *map[string]string,
out interface{}) error {
s, err := RestReq(method, requrl, timeout, kv)
logex.Debugf("json request method:[%v], requrl:[%s], timeout:[%v], map[%v], out_str:[%s]", method, requrl, timeout, kv, s)
if err != nil {
return err
}
return json.Unmarshal([]byte(s), out)
}
func GetHdfsMeta(src string) (master, ugi, path string, err error) {
//src = "hdfs://root:rootpasst@st1-inf-platform0.st01.baidu.com:54310/user/mis_user/news_dnn_ctr_cube_1/1501836820/news_dnn_ctr_cube_1_part54.tar"
//src = "hdfs://st1-inf-platform0.st01.baidu.com:54310/user/mis_user/news_dnn_ctr_cube_1/1501836820/news_dnn_ctr_cube_1_part54.tar"
ugiBegin := strings.Index(src, "//")
ugiPos := strings.LastIndex(src, "@")
if ugiPos != -1 && ugiBegin != -1 {
ugi = src[ugiBegin+2 : ugiPos]
}
src1 := strings.Replace(strings.Replace(src, "hdfs://", "", 1), ugi, "", 1)
if ugi != "" {
src1 = src1[1:]
}
pos := strings.Index(src1, "/")
if pos != -1 {
master = src1[0:pos]
path = src1[pos:]
} else {
logex.Warningf("failed to get the master or path for (%s)", src)
err = errors.New("invalid master or path found")
}
logex.Debugf("parse the (%s) succ, master is %s, ugi is (%s), path is %s", src, master, ugi, path)
return
}
func getHostIp() (string, error) {
if addrs, err := net.InterfaceAddrs(); err == nil {
for _, addr := range addrs {
ips := addr.String()
logex.Debugf("get host ip: %v", ips)
if strings.HasPrefix(ips, "127") {
continue
} else {
list := strings.Split(ips, "/")
if len(list) != 2 {
continue
}
return list[0], nil
}
}
}
return "unkown ip", errors.New("get host ip failed")
}
func getHostname(ip string) (hostname string, err error) {
if hostnames, err := net.LookupAddr(ip); err != nil {
hostname = ip
//logex.Warningf("cannot find the hostname of ip (%s), error (%v)", ip, err)
} else {
if len(hostnames) > 0 {
hostname = hostnames[0]
} else {
hostname = ip
}
}
return hostname, err
}
func GetLocalHostname() (hostname string, err error) {
if ip, err := getHostIp(); err == nil {
return getHostname(ip)
} else {
return "unkown ip", err
}
}
func GetLocalHostnameCmd() (hostname string, err error) {
cmd := "hostname"
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if stdout != "" && err == nil {
hostname := strings.TrimSpace(stdout)
index := strings.LastIndex(hostname, ".baidu.com")
if index > 0 {
return hostname[:strings.LastIndex(hostname, ".baidu.com")], nil
} else {
return hostname, nil
}
} else {
logex.Debugf("using hostname cmd failed. err:%v", err)
return GetLocalHostname()
}
}
// quote quotes string for json output. eg: s="123", quote(s)=`"123"`
func quote(s string) string {
return fmt.Sprintf("%q", s)
}
// quoteb quotes byte array for json output.
func quoteb(b []byte) string {
return quote(string(b))
}
// quotea quotes string array for json output
func quotea(a []string) string {
b, _ := json.Marshal(a)
return string(b)
}
func isJsonDict(s string) bool {
var js map[string]interface{}
return json.Unmarshal([]byte(s), &js) == nil
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"github.com/Badangel/logex"
"github.com/Badangel/pipeline"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
)
const (
COMMAND_DOWNLOAD = "download"
COMMAND_RELOAD = "reload"
COMMAND_SWITCH = "enable"
COMMAND_ROLLBACK = "rollback"
COMMAND_CHECK = "check"
COMMAND_CLEAR = "clear"
COMMAND_POP = "pop"
RETRY_TIMES = 3
REQUEST_MASTER_TIMEOUT_SECOND = 60
MAX_DOWN_CO = 7
RELOAD_RETRY_TIMES = 3
RELOAD_RETRY_INTERVAL_SECOND = 10
DOWNLOAD_DONE_MARK_FILE = ".download_done"
)
type VersionInfo struct {
Version string
Depend string
Source string
}
type Work struct {
DictName string `json:"dict_name"`
ShardSeq int `json:"shard_seq"`
DeployPath string `json:"deploy_path"`
Command string `json:"command"`
Version string `json:"version"`
Depend string `json:"depend"`
Source string `json:"source"`
Mode string `json:"mode"`
DictMode string `json:"dict_mode"`
Port string `json:"port"`
bRollback bool `json:"b_rollback"`
RollbackInfo []VersionInfo `json:"rollback_info"`
Status string `json:"status"`
FinishStatus string `json:"finish_status"`
Service string `json:"service,omitempty"`
VersionSign string `json:"version_sign,omitempty"`
MasterAddress string `json:"master_address,omitempty"`
ActiveVersionList string `json:"active_version_list,omitempty"`
}
func (work *Work) Token() string {
return work.DictName + strconv.Itoa(work.ShardSeq) + work.Service
}
func (work *Work) Valid() bool {
if work.Command == "" ||
work.Version == "" ||
work.Depend == "" {
return false
}
return true
}
func (work *Work) DoWork() error {
var err error
if !work.Valid() {
err = errors.New("Work is invalid")
return err
}
switch work.Command {
case COMMAND_DOWNLOAD:
err = work.Download()
case COMMAND_RELOAD:
err = work.Reload()
case COMMAND_SWITCH:
err = work.Enable()
case COMMAND_CHECK:
err = work.Check()
case COMMAND_CLEAR:
err = work.Clear()
case COMMAND_POP:
err = work.Pop()
default:
logex.Debugf("Invalid command %s received", work.Command)
err = errors.New("Invalid command.")
}
return err
}
func GetDownloadDirs(dictName, service, version, depend, deployPath string, shardSeq,
split int) ([]string, error) {
dirs := make([]string, 0, split)
if deployPath == "" {
return dirs, errors.New("Invalid Deploy path")
}
parentDir := getParentDir(version, depend)
if split < 2 {
disk := path.Join(deployPath, "cube_data")
if service == "" {
dirs = append(dirs, path.Join(disk, strconv.Itoa(shardSeq), parentDir))
} else {
dirs = append(dirs, path.Join(disk, strconv.Itoa(shardSeq), parentDir+"-"+dictName))
}
} else {
for i := 0; i < split; i++ {
disk := path.Join(deployPath, "cube_data")
if service == "" {
dirs = append(dirs, path.Join(disk, strconv.Itoa(shardSeq), strconv.Itoa(i), parentDir))
} else {
dirs = append(dirs, path.Join(disk, strconv.Itoa(shardSeq),
parentDir+"-"+dictName))
}
}
}
return dirs, nil
}
func GetDataLinkDirs(dictName, service, version, depend, deployPath string, shardSeq,
split int) []string {
dirs := make([]string, 0, split)
parentDir := getParentDir(version, depend)
if split < 2 {
disk := path.Join(deployPath, "data")
if service == "" {
dirs = append(dirs, path.Join(disk, parentDir))
}
} else {
for i := 0; i < split; i++ {
disk := path.Join(deployPath, "data")
if service == "" {
dirs = append(dirs, path.Join(disk, strconv.Itoa(i), parentDir))
}
}
}
return dirs
}
func (work *Work) Download() (err error) {
err = DoDownload(work.DictName, work.Service, work.Version, work.Depend, work.Mode, work.Source,
work.DeployPath, work.ShardSeq)
if err != nil {
logex.Warningf("download error, failed to download %s, dir is %s, error is (+%v)", work.Source, work.DeployPath, err)
return
}
if work.Service == "" {
err = UnTar(work.DictName, work.Service, work.Version, work.Depend, work.Source,
work.DeployPath, work.ShardSeq)
if err == nil {
dataPath := path.Join(work.DeployPath, "data")
// remove all old links
if work.Mode == "base" || len(work.RollbackInfo) != 0 {
cmd := fmt.Sprintf("ls -l %s | grep -E 'data.|index.' | awk '{print $9}'", dataPath)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err == nil && stdout != "" {
fileNameLi := strings.Split(strings.TrimSpace(stdout), "\n")
for _, file := range fileNameLi {
err = os.Remove(path.Join(dataPath, file))
logex.Debugf("os.Remove(%s) error (%+v) ", path.Join(dataPath, file), err)
}
}
}
// create symbolic link to the version rollbacked
err = CreateSymlink(work.DictName, work.Service, work.Version, work.Depend, dataPath,
work.DeployPath, work.ShardSeq, len(strings.Split(work.Source, ";")))
} else {
logex.Warningf("download error, failed to untar for %s, dir is %s, error is (+%v)", work.Source, work.DeployPath, err)
}
}
if err == nil {
// clear history data
work.clearData()
work.clearLink()
} else {
logex.Warningf("create symlink failed, error is (+%v)", err)
}
return
}
func (work *Work) clearData() (err error) {
split := len(strings.Split(work.Source, ";"))
downloadDirs, err := GetDownloadDirs(work.DictName, work.Service, work.Version, work.Depend,
work.DeployPath, work.ShardSeq, split)
if err != nil {
logex.Warningf("clearData failed, error is (+%v)", err)
return
}
for _, downloadDir := range downloadDirs {
parentDir, _ := filepath.Split(downloadDir)
cmd := fmt.Sprintf("ls -l %s | grep -v %s | awk '{print $9}'", parentDir, work.Depend)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err != nil || stdout == "" || work.Service != "" {
cmd = fmt.Sprintf("find %s -type d -ctime +1 -print | xargs -i rm -rf {}", parentDir)
_, _, err = RetryCmd(cmd, RETRY_TIMES)
} else {
rmList := strings.Split(strings.TrimSpace(stdout), "\n")
for i := 0; i < len(rmList); i++ {
if rmList[i] == "" {
continue
}
cmd = fmt.Sprintf("rm -rf %s/%s*", parentDir, rmList[i])
_, _, err = RetryCmd(cmd, RETRY_TIMES)
}
}
}
return
}
func (work *Work) clearPatchData() (err error) {
if work.Service != "" {
return
}
split := len(strings.Split(work.Source, ";"))
downloadDirs, err := GetDownloadDirs(work.DictName, work.Service, work.Version, work.Depend,
work.DeployPath, work.ShardSeq, split)
if err != nil {
logex.Warningf("clearPatchData failed, error is (+%v)", err)
return
}
for _, downloadDir := range downloadDirs {
parentDir, _ := filepath.Split(downloadDir)
cmd := fmt.Sprintf("ls -l %s | grep %s_ | awk '{print $9}'", parentDir, work.Depend)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err == nil && stdout != "" {
rmList := strings.Split(strings.TrimSpace(stdout), "\n")
for i := 0; i < len(rmList); i++ {
if rmList[i] == "" {
continue
}
cmd = fmt.Sprintf("rm -rf %s/%s*", parentDir, rmList[i])
_, _, err = RetryCmd(cmd, RETRY_TIMES)
}
}
}
return
}
func (work *Work) clearLink() (err error) {
if work.Service != "" {
return
}
split := len(strings.Split(work.Source, ";"))
dataLinkDirs := GetDataLinkDirs(work.DictName, work.Service, work.Version, work.Depend,
work.DeployPath, work.ShardSeq, split)
for _, linkDir := range dataLinkDirs {
parentDir, _ := filepath.Split(linkDir)
cmd := fmt.Sprintf("ls -l %s | grep -v %s | awk '{print $9}'", parentDir, work.Depend)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err != nil || stdout == "" {
cmd = fmt.Sprintf("find %s -type d -ctime +1 -print | xargs -i rm -rf {}", parentDir)
_, _, err = RetryCmd(cmd, RETRY_TIMES)
} else {
rmList := strings.Split(strings.TrimSpace(stdout), "\n")
for i := 0; i < len(rmList); i++ {
if rmList[i] == "" {
continue
}
cmd = fmt.Sprintf("rm -rf %s/%s*", parentDir, rmList[i])
_, _, err = RetryCmd(cmd, RETRY_TIMES)
}
}
}
return
}
func (work *Work) clearPatchLink() (err error) {
if work.Service != "" {
return
}
split := len(strings.Split(work.Source, ";"))
dataLinkDirs := GetDataLinkDirs(work.DictName, work.Service, work.Version, work.Depend,
work.DeployPath, work.ShardSeq, split)
for _, linkDir := range dataLinkDirs {
parentDir, _ := filepath.Split(linkDir)
cmd := fmt.Sprintf("ls -l %s | grep %s_ | awk '{print $9}'", parentDir, work.Depend)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err == nil && stdout != "" {
rmList := strings.Split(strings.TrimSpace(stdout), "\n")
for i := 0; i < len(rmList); i++ {
if rmList[i] == "" {
continue
}
cmd = fmt.Sprintf("rm -rf %s/%s*", parentDir, rmList[i])
_, _, err = RetryCmd(cmd, RETRY_TIMES)
}
}
}
return
}
func UnTar(dictName, service, version, depend, source, deployPath string, shardSeq int) (err error) {
sources := strings.Split(source, ";")
downloadDirs, err := GetDownloadDirs(dictName, service, version, depend, deployPath, shardSeq,
len(sources))
if err != nil {
logex.Warningf("UnTar failed, error is (+%v)", err)
return
}
for i := 0; i < len(sources); i++ {
fileName := GetFileName(sources[i])
untarCmd := fmt.Sprintf("tar xvf %s -C %s", path.Join(downloadDirs[i], fileName), downloadDirs[i])
_, _, err = RetryCmd(untarCmd, RETRY_TIMES)
}
return
}
func CreateSymlink(dictName, service, version, depend, dataPath, deployPath string, shardSeq,
split int) (err error) {
downloadDirs, err := GetDownloadDirs(dictName, service, version, depend, deployPath, shardSeq, split)
if err != nil {
logex.Warningf("CreateSymlink failed, error is (+%v)", err)
}
for i, downloadDir := range downloadDirs {
cmd := fmt.Sprintf("ls -l %s | grep -E 'data.|index.' | awk '{print $NF}'", downloadDir)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err == nil && stdout != "" {
fileNameLi := strings.Split(strings.TrimSpace(stdout), "\n")
versionDir := getParentDir(version, depend)
versionFile := path.Join(dataPath, "VERSION")
dataSubPath := ""
if split > 1 {
dataSubPath = path.Join(dataPath, strconv.Itoa(i), versionDir)
} else {
dataSubPath = path.Join(dataPath, versionDir)
}
if err = os.MkdirAll(dataSubPath, 0755); err != nil {
// return err
logex.Warningf("os.Mkdir %s failed, err:[%v]", dataSubPath, err)
}
if dataSubPath != "" {
cmd = fmt.Sprintf("find %s/.. -type d -ctime +5 -print | xargs -i rm -rf {}", dataSubPath)
_, _, err = RetryCmd(cmd, RETRY_TIMES)
}
for _, file := range fileNameLi {
dataLink := ""
tempDataPath := ""
if split > 1 {
dataLink = path.Join(dataPath, strconv.Itoa(i), file)
tempDataPath = path.Join(dataPath, strconv.Itoa(i))
} else {
dataLink = path.Join(dataPath, file)
tempDataPath = dataPath
}
cmd = fmt.Sprintf("rm -rf %s", dataLink)
_, stderr, _ := RetryCmd(cmd, RETRY_TIMES)
logex.Noticef("rm -rf %s, err:[%s]", dataLink, stderr)
// create new symlink
err = os.Symlink(path.Join(downloadDir, file), dataLink)
logex.Noticef("os.Symlink %s %s return (%+v)", path.Join(downloadDir, file), dataLink, err)
fmt.Println("os.Symlink: ", path.Join(downloadDir, file), dataLink, err)
cmd = fmt.Sprintf("cp -d %s/index.* %s/", tempDataPath, dataSubPath)
_, stderr, _ = RetryCmd(cmd, RETRY_TIMES)
logex.Noticef("cp -d index Symlink to version dir %s, err:[%s]", dataSubPath, stderr)
cmd = fmt.Sprintf("cp -d %s/data.* %s/", tempDataPath, dataSubPath)
_, stderr, _ = RetryCmd(cmd, RETRY_TIMES)
logex.Noticef("cp -d data Symlink to version dir %s, err:[%s]", dataSubPath, stderr)
}
cmd = fmt.Sprintf("echo %s > %s", versionDir, versionFile)
if _, _, err = RetryCmd(cmd, RETRY_TIMES); err != nil {
return err
}
}
}
return
}
func (work *Work) CheckToReload() bool {
statusCmd := fmt.Sprintf("curl -s -d '{\"cmd\":\"status\"}' http://127.0.0.1:%s/ControlService/cmd", work.Port)
stdout, _, _ := RetryCmd(statusCmd, RETRY_TIMES)
var resp CubeResp
json.Unmarshal([]byte(stdout), &resp)
version := getParentDir(work.Version, work.Depend)
if resp.CurVersion == "" && resp.BgVersion == "" {
logex.Noticef("cube version empty")
return true
}
if resp.CurVersion == version || resp.BgVersion == version {
logex.Noticef("cube version has matched. version: %s", version)
return false
}
return true
}
func (work *Work) Reload() (err error) {
if work.Port == "" {
err = errors.New("Reload with invalid port.")
return
}
if !work.CheckToReload() {
work.writeStatus("finish_reload", "succ")
return
}
work.writeStatus("prepare_reload", "")
var stdout string
versionPath := getParentDir(work.Version, work.Depend)
bgLoadCmd := "bg_load_base"
if work.Mode == "delta" {
bgLoadCmd = "bg_load_patch"
}
if work.ActiveVersionList == "" {
work.ActiveVersionList = "[]"
}
for i := 0; i < RELOAD_RETRY_TIMES; i++ {
reloadCmd := fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} -d '{\"cmd\":\"%s\",\"version_path\":\"/%s\"}' http://127.0.0.1:%s/ControlService/cmd", bgLoadCmd, versionPath, work.Port)
fmt.Println("reload: ", reloadCmd)
stdout, _, _ = RetryCmd(reloadCmd, 1)
fmt.Println("reload stdout: ", stdout)
if strings.TrimSpace(stdout) == "200" {
logex.Debugf("bg_load_base return succ")
break
} else {
logex.Warning("bg_load_base return failed")
time.Sleep(RELOAD_RETRY_INTERVAL_SECOND * time.Second)
}
}
if strings.TrimSpace(stdout) == "200" {
work.writeStatus("finish_reload", "succ")
} else {
work.writeStatus("finish_reload", "failed")
err = errors.New("reload failed.")
}
return
}
func (work *Work) Clear() (err error) {
work.Service = ""
var stdout string
var clearCmd string
for i := 0; i < RETRY_TIMES; i++ {
clearCmd = fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} -d '{\"cmd\":\"clear\",\"table_name\":\"%s\"}' http://127.0.0.1:%s/NodeControlService/cmd", work.DictName, work.Port)
fmt.Println("clear: ", clearCmd)
stdout, _, _ = RetryCmd(clearCmd, 1)
fmt.Println("clear stdout: ", stdout)
if strings.TrimSpace(stdout) == "200" {
logex.Debugf("clear return succ")
break
} else {
logex.Warning("clear return failed")
time.Sleep(RELOAD_RETRY_INTERVAL_SECOND * time.Second)
}
}
if strings.TrimSpace(stdout) == "200" {
err = work.writeStatus("succ", "")
} else {
err = work.writeStatus("failed", "")
}
return
}
func (work *Work) Check() (err error) {
if work.Service != "" || work.VersionSign == "" {
return
}
var dataLinkDirs []string
split := len(strings.Split(work.Source, ";"))
dataLinkDirs = GetDataLinkDirs(work.DictName, work.Service, work.Version, work.Depend,
work.DeployPath, work.ShardSeq, split)
if _, t_err := os.Stat(work.DeployPath); os.IsNotExist(t_err) {
logex.Noticef("check DeployPath[%s] not exists.", work.DeployPath)
return
}
check_succ := true
for _, linkDir := range dataLinkDirs {
parentDir, _ := filepath.Split(linkDir)
cmd := fmt.Sprintf("ls -l %s | grep %s | awk '{print $9}' | grep -v data | grep -v index", parentDir, work.Depend)
stdout, _, err := RetryCmd(cmd, RETRY_TIMES)
if err != nil || stdout == "" {
check_succ = false
break
} else {
versionList := strings.Split(strings.TrimSpace(stdout), "\n")
logex.Noticef("calc ver_sign for [%v]", versionList)
var version_sign string
var version string
for i := 0; i < len(versionList); i++ {
split_index := strings.Index(versionList[i], "_")
if split_index > 0 && split_index < len(versionList[i]) {
version = versionList[i][split_index+1:]
} else {
version = versionList[i]
}
if version_sign == "" {
version_sign = fmt.Sprintf("%x", md5.Sum([]byte(version)))
} else {
version_sign = fmt.Sprintf("%x", md5.Sum([]byte(version_sign)))
}
}
if version_sign != work.VersionSign {
logex.Warningf("version_sign check failed. real[%v] expect[%v]", version_sign, work.VersionSign)
check_succ = false
break
}
}
}
if !check_succ {
work.clearPatchData()
work.clearPatchLink()
master_host, master_port, _ := GetMaster(work.MasterAddress)
cmd := fmt.Sprintf("cd %s && export STRATEGY_DIR=%s && ./downloader -h %s -p %s -d %s -s %d",
work.DeployPath, work.DeployPath, master_host, master_port, work.DictName, work.ShardSeq)
_, _, err = RetryCmd(cmd, RETRY_TIMES)
}
return
}
func (work *Work) Enable() (err error) {
if work.Port == "" {
err = errors.New("Enable with invalid port")
return
}
var stdout string
var cmd string
versionPath := getParentDir(work.Version, work.Depend)
for i := 0; i < RELOAD_RETRY_TIMES; i++ {
if work.Service != "" {
cmd = fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} -d '{\"cmd\":\"reload_model\",\"version\":\"%s-%s\",\"dict_name\":\"%s\"}' http://127.0.0.1:%s/ControlService/cmd",
versionPath, work.DictName, work.DictName, work.Port)
} else {
cmd = fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} -d '{\"cmd\":\"enable\",\"version\":\"%s\"}' http://127.0.0.1:%s/ControlService/cmd", versionPath, work.Port)
}
stdout, _, _ = RetryCmd(cmd, 1)
if strings.TrimSpace(stdout) == "200" {
logex.Debugf("enable return succ for %s, work dir is %s", work.Source, work.DeployPath)
break
} else {
logex.Warningf("enable return failed for %s, work dir is %s, error is (%+v)", work.Source, work.DeployPath, err)
time.Sleep(RELOAD_RETRY_INTERVAL_SECOND * time.Second)
}
}
if strings.TrimSpace(stdout) == "200" {
err = work.writeStatus("succ", "")
} else {
err = work.writeStatus("failed", "")
}
if work.Service == "" {
cmd = fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} -d '{\"cmd\":\"bg_unload\"}' http://127.0.0.1:%s/ControlService/cmd", work.Port)
stdout, _, _ = RetryCmd(cmd, RETRY_TIMES)
if strings.TrimSpace(stdout) == "200" {
logex.Debugf("unload return succ")
} else {
logex.Warning("unload return failed")
}
}
RemoveStateFile(work.DictName, work.ShardSeq, work.Service)
return
}
func (work *Work) Pop() (err error) {
var stdout string
var cmd string
if work.ActiveVersionList == "" {
work.ActiveVersionList = "[]"
}
for i := 0; i < RELOAD_RETRY_TIMES; i++ {
cmd = fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} -d '{\"cmd\":\"pop\",\"table_name\":\"%s\",\"active_versions\":%v}' http://127.0.0.1:%s/NodeControlService/cmd", work.DictName, work.ActiveVersionList, work.Port)
fmt.Println("pop: ", cmd)
stdout, _, _ = RetryCmd(cmd, 1)
fmt.Println("pop stdout: ", stdout)
if strings.TrimSpace(stdout) == "200" {
logex.Debugf("pop return succ")
break
} else {
logex.Warning("pop return failed")
time.Sleep(RELOAD_RETRY_INTERVAL_SECOND * time.Second)
}
}
if strings.TrimSpace(stdout) == "200" {
err = work.writeStatus("succ", "")
} else {
err = work.writeStatus("failed", "")
}
RemoveStateFile(work.DictName, work.ShardSeq, work.Service)
return
}
func writeStateFile(dictName string, shardSeq int, service, state string) {
stateFile := fmt.Sprintf(".state_%s_%d", dictName, shardSeq)
if service != "" {
stateFile = stateFile + "_" + service
}
cmd := fmt.Sprintf("echo '%s' > %s/state/%s", state, Dir, stateFile)
if _, _, err := RetryCmd(cmd, RETRY_TIMES); err != nil {
logex.Warningf("%s error (%+v)", cmd, err)
}
}
func RemoveStateFile(dictName string, shardSeq int, service string) {
stateFile := fmt.Sprintf(".state_%s_%d", dictName, shardSeq)
if service != "" {
stateFile = stateFile + "_" + service
}
cmd := fmt.Sprintf("rm -f %s/state/%s", Dir, stateFile)
if _, _, err := RetryCmd(cmd, RETRY_TIMES); err != nil {
logex.Warningf("%s error (%+v)", cmd, err)
}
}
func (work *Work) writeStatus(status string, finishStatus string) (err error) {
work.Status = status
work.FinishStatus = finishStatus
state, _ := json.Marshal(work)
writeStateFile(work.DictName, work.ShardSeq, work.Service, string(state))
return
}
func DoDownloadIndividual(source, downloadDir string, isService bool, timeOut int, ch chan error, wg *sync.WaitGroup) {
err := errors.New("DoDownloadIndividual start")
for i := 0; i < RETRY_TIMES; i++ {
err = FtpDownload(source, downloadDir, timeOut)
if err == nil {
logex.Debugf("download %s to %s succ", source, downloadDir)
if !isService {
err = FtpDownload(source+".md5", downloadDir, timeOut)
}
} else {
logex.Warningf("download error , source %s, downloadDir %s, err (%+v)", source, downloadDir, err)
continue
}
if err == nil && isService {
// touch download_succ file
cmd := fmt.Sprintf("touch %s", path.Join(downloadDir, DOWNLOAD_DONE_MARK_FILE))
RetryCmd(cmd, RETRY_TIMES)
break
}
// download md5 file succ, md5check
if err == nil {
// md5sum -c
fileName := GetFileName(source)
err = checkMd5(path.Join(downloadDir, fileName), path.Join(downloadDir, fileName+".md5"))
logex.Warningf("md5sum check %s %s return (%+v)", path.Join(downloadDir, fileName), path.Join(downloadDir, fileName+".md5"), err)
if err == nil {
// touch download_succ file
cmd := fmt.Sprintf("touch %s", path.Join(downloadDir, DOWNLOAD_DONE_MARK_FILE))
RetryCmd(cmd, RETRY_TIMES)
logex.Debugf("md5sum ok, source is %s, dir is %s", source, downloadDir)
break
} else {
logex.Warningf("md5sum error, source is %s, dir is %s", source, downloadDir)
continue
}
} else {
logex.Warningf("download %s return (%+v)", source+".md5", err)
continue
}
}
ch <- err
wg.Done()
}
func checkSources(source string) ([]string, error) {
sources := strings.Split(source, ";")
for i := 0; i < len(sources); i++ {
if sources[i] == "" || (!strings.HasPrefix(sources[i], "ftp://") && !strings.HasPrefix(sources[i], "http://")) {
return sources, errors.New("Invalid sources")
}
}
return sources, nil
}
func DoDownload(dictName, service, version, depend, mode, source, deployPath string,
shardSeq int) (err error) {
sources, err := checkSources(source)
if err != nil {
logex.Warningf("checkSources %s return (%+v)", source, err)
return
}
downloadDirs, err := GetDownloadDirs(dictName, service, version, depend, deployPath, shardSeq,
len(sources))
if err != nil {
logex.Warningf("GetDownloadDirs %s return (%+v)", source, err)
return
}
version_suffix := ""
if service != "" {
version_suffix = version_suffix + "-" + dictName
}
if !checkToDownload(downloadDirs) {
cmd := fmt.Sprintf("cd %s/cube_data && echo %s > VERSION && cp VERSION VERSION-%s",
deployPath, getParentDir(version, depend)+version_suffix, dictName)
_, _, err = RetryCmd(cmd, RETRY_TIMES)
logex.Debugf("echo VERSION cmd:[%s] err:[%v]", cmd, err)
return
}
ch := make(chan error, len(sources))
wg := sync.WaitGroup{}
j := 0
numCo := 0
for ; j < len(sources); j++ {
if numCo >= MAX_DOWN_CO {
wg.Wait()
logex.Noticef("DoDownload co down.")
numCo = 0
}
numCo += 1
wg.Add(1)
time.Sleep(2000 * time.Millisecond)
timeOut := 900
if mode == "base" {
timeOut = 3600
}
go DoDownloadIndividual(sources[j], downloadDirs[j], (service != ""), timeOut, ch, &wg)
}
wg.Wait()
close(ch)
for err = range ch {
if err != nil {
return
}
}
cmd := fmt.Sprintf("cd %s/cube_data && echo %s > VERSION && cp VERSION VERSION-%s",
deployPath, getParentDir(version, depend)+version_suffix, dictName)
_, _, err = RetryCmd(cmd, RETRY_TIMES)
logex.Debugf("echo VERSION cmd:[%s] err:[%v]", cmd, err)
return
}
func FtpDownload(source string, dest string, timeOut int) (err error) {
dlCmd := fmt.Sprintf("wget --quiet --level=100 -P %s %s --limit-rate=10240k", dest, source)
fmt.Println(dlCmd)
_, _, err = RetryCmdWithSleep(dlCmd, RETRY_TIMES)
return
}
func checkToDownload(downloadDirs []string) bool {
for _, v := range downloadDirs {
if _, err := os.Stat(path.Join(v, DOWNLOAD_DONE_MARK_FILE)); err != nil {
logex.Noticef("check [%v] not exists.", v)
return true
}
}
return false
}
// simple hash
func getDownloadDisk(dictName string, shardSeq int) string {
index := len(dictName) * shardSeq % len(disks)
return disks[index]
}
func getParentDir(version string, depend string) (dir string) {
if version == depend {
dir = depend
} else {
dir = depend + "_" + version
}
return
}
func GetFileName(source string) (fileName string) {
s := strings.Split(source, "/")
fileName = s[len(s)-1]
return
}
func checkMd5(file string, fileMd5 string) (err error) {
cmd := fmt.Sprintf("md5sum %s | awk '{print $1}'", file)
stdout, _, _ := pipeline.Run(exec.Command("/bin/sh", "-c", cmd))
real_md5 := stdout.String()
cmd = fmt.Sprintf("cat %s | awk '{print $1}'", fileMd5)
stdout, _, _ = pipeline.Run(exec.Command("/bin/sh", "-c", cmd))
given_md5 := stdout.String()
if real_md5 != given_md5 {
logex.Warningf("checkMd5 failed real_md5[%s] given_md5[%s]", real_md5, given_md5)
err = errors.New("checkMd5 failed")
}
return
}
func RetryCmd(cmd string, retryTimes int) (stdoutStr string, stderrStr string, err error) {
for i := 0; i < retryTimes; i++ {
stdout, stderr, e := pipeline.Run(exec.Command("/bin/sh", "-c", cmd))
stdoutStr = stdout.String()
stderrStr = stderr.String()
err = e
logex.Debugf("cmd %s, stdout: %s, stderr: %s, err: (%+v)", cmd, stdoutStr, stderrStr, err)
if err == nil {
break
}
}
return
}
func RetryCmdWithSleep(cmd string, retryTimes int) (stdoutStr string, stderrStr string, err error) {
for i := 0; i < retryTimes; i++ {
stdout, stderr, e := pipeline.Run(exec.Command("/bin/sh", "-c", cmd))
stdoutStr = stdout.String()
stderrStr = stderr.String()
err = e
logex.Debugf("cmd %s, stdout: %s, stderr: %s, err: (%+v)", cmd, stdoutStr, stderrStr, err)
if err == nil {
break
}
time.Sleep(10000 * time.Millisecond)
}
return
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"errors"
"fmt"
"sync"
"sync/atomic"
)
type (
workType struct {
poolWorker PoolWorker
resultChannel chan error
}
WorkPool struct {
queueChannel chan workType
workChannel chan PoolWorker
queuedWorkNum int32
activeWorkerNum int32
queueCapacity int32
workFilter sync.Map
}
)
type PoolWorker interface {
Token() string
DoWork()
}
func NewWorkPool(workerNum int, queueCapacity int32) *WorkPool {
workPool := WorkPool{
queueChannel: make(chan workType),
workChannel: make(chan PoolWorker, queueCapacity),
queuedWorkNum: 0,
activeWorkerNum: 0,
queueCapacity: queueCapacity,
}
for i := 0; i < workerNum; i++ {
go workPool.startWorkRoutine()
}
go workPool.startQueueRoutine()
return &workPool
}
func (workPool *WorkPool) startWorkRoutine() {
for {
select {
case work := <-workPool.workChannel:
workPool.doWork(work)
break
}
}
}
func (workPool *WorkPool) startQueueRoutine() {
for {
select {
case queueItem := <-workPool.queueChannel:
if atomic.AddInt32(&workPool.queuedWorkNum, 0) == workPool.queueCapacity {
queueItem.resultChannel <- fmt.Errorf("work pool fulled with %v pending works", QueueCapacity)
continue
}
atomic.AddInt32(&workPool.queuedWorkNum, 1)
workPool.workChannel <- queueItem.poolWorker
queueItem.resultChannel <- nil
break
}
}
}
func (workPool *WorkPool) doWork(poolWorker PoolWorker) {
defer atomic.AddInt32(&workPool.activeWorkerNum, -1)
defer workPool.workFilter.Delete(poolWorker.Token())
atomic.AddInt32(&workPool.queuedWorkNum, -1)
atomic.AddInt32(&workPool.activeWorkerNum, 1)
poolWorker.DoWork()
}
func (workPool *WorkPool) PostWorkWithToken(poolWorker PoolWorker) (err error) {
if _, ok := workPool.workFilter.Load(poolWorker.Token()); ok {
return errors.New("another work with same key is doing.")
}
workPool.workFilter.Store(poolWorker.Token(), true)
return workPool.PostWork(poolWorker)
}
func (workPool *WorkPool) PostWork(poolWorker PoolWorker) (err error) {
work := workType{poolWorker, make(chan error)}
defer close(work.resultChannel)
workPool.queueChannel <- work
err = <-work.resultChannel
return err
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"agent"
"fmt"
"github.com/Badangel/logex"
"github.com/docopt/docopt-go"
"os"
"path/filepath"
"runtime"
"strconv"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
agent.Dir, _ = filepath.Abs(filepath.Dir(os.Args[0]))
usage := fmt.Sprintf(`Usage: ./m_master [options]
Options:
-n WORKERNUM set worker num.
-q QUEUENUM set queue num.
-P LISTEN_PORT agent listen port
Log options:
-l LOG_LEVEL set log level, values: 0,1,2,4,8,16. [default: 16]
--log_dir=DIR set log output dir. [default: ./log]
--log_name=NAME set log name. [default: m_agent]`, agent.Dir)
opts, err := docopt.Parse(usage, nil, true, "Cube Agent Checker 1.0.0", false)
if err != nil {
fmt.Println("ERROR:", err)
os.Exit(1)
}
log_level, _ := strconv.Atoi(opts["-l"].(string))
log_name := opts["--log_name"].(string)
log_dir := opts["--log_dir"].(string)
logex.SetLevel(getLogLevel(log_level))
if err := logex.SetUpFileLogger(log_dir, log_name, nil); err != nil {
fmt.Println("ERROR:", err)
}
logex.Notice("--- NEW SESSION -------------------------")
logex.Notice(">>> log_level:", log_level)
agent.WorkerNum = 10
if opts["-n"] != nil {
n, err := strconv.Atoi(opts["-n"].(string))
if err == nil {
agent.WorkerNum = n
}
}
agent.QueueCapacity = 20
if opts["-q"] != nil {
q, err := strconv.Atoi(opts["-q"].(string))
if err == nil {
agent.QueueCapacity = int32(q)
}
}
agent.CmdWorkPool = agent.NewWorkPool(agent.WorkerNum, agent.QueueCapacity)
if opts["-P"] == nil {
logex.Fatalf("ERROR: -P LISTEN PORT must be set!")
os.Exit(255)
}
agentPort := opts["-P"].(string)
logex.Notice(">>> starting server...")
addr := ":" + agentPort
if agent.StartHttp(addr) != nil {
logex.Noticef("cant start http(addr=%v). quit.", addr)
os.Exit(0)
}
}
func getLogLevel(log_level int) logex.Level {
switch log_level {
case 16:
return logex.DEBUG
case 8:
return logex.TRACE
case 4:
return logex.NOTICE
case 2:
return logex.WARNING
case 1:
return logex.FATAL
case 0:
return logex.NONE
}
return logex.DEBUG
}
...@@ -45,7 +45,7 @@ class PredictorClient { ...@@ -45,7 +45,7 @@ class PredictorClient {
PredictorClient() {} PredictorClient() {}
~PredictorClient() {} ~PredictorClient() {}
void init(const std::string& client_conf); int init(const std::string& client_conf);
void set_predictor_conf(const std::string& conf_path, void set_predictor_conf(const std::string& conf_path,
const std::string& conf_file); const std::string& conf_file);
......
...@@ -27,45 +27,42 @@ using baidu::paddle_serving::predictor::general_model::FetchInst; ...@@ -27,45 +27,42 @@ using baidu::paddle_serving::predictor::general_model::FetchInst;
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace general_model { namespace general_model {
using configure::GeneralModelConfig;
void PredictorClient::init(const std::string &conf_file) { int PredictorClient::init(const std::string &conf_file) {
_conf_file = conf_file; try {
std::ifstream fin(conf_file); GeneralModelConfig model_config;
if (!fin) { if (configure::read_proto_conf(conf_file.c_str(),
LOG(ERROR) << "Your inference conf file can not be found"; &model_config) != 0) {
exit(-1); LOG(ERROR) << "Failed to load general model config"
} << ", file path: " << conf_file;
_feed_name_to_idx.clear(); return -1;
_fetch_name_to_idx.clear(); }
_shape.clear(); _feed_name_to_idx.clear();
int feed_var_num = 0; _fetch_name_to_idx.clear();
int fetch_var_num = 0; _shape.clear();
fin >> feed_var_num >> fetch_var_num; int feed_var_num = model_config.feed_var_size();
std::string name; int fetch_var_num = model_config.fetch_var_size();
std::string fetch_var_name; for (int i = 0; i < feed_var_num; ++i) {
int shape_num = 0; _feed_name_to_idx[model_config.feed_var(i).alias_name()] = i;
int dim = 0; std::vector<int> tmp_feed_shape;
int type_value = 0; for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) {
for (int i = 0; i < feed_var_num; ++i) { tmp_feed_shape.push_back(model_config.feed_var(i).shape(j));
fin >> name; }
_feed_name_to_idx[name] = i; _type.push_back(model_config.feed_var(i).feed_type());
fin >> shape_num; _shape.push_back(tmp_feed_shape);
std::vector<int> tmp_feed_shape;
for (int j = 0; j < shape_num; ++j) {
fin >> dim;
tmp_feed_shape.push_back(dim);
} }
fin >> type_value;
_type.push_back(type_value);
_shape.push_back(tmp_feed_shape);
}
for (int i = 0; i < fetch_var_num; ++i) { for (int i = 0; i < fetch_var_num; ++i) {
fin >> name; _fetch_name_to_idx[model_config.fetch_var(i).alias_name()] = i;
fin >> fetch_var_name; _fetch_name_to_var_name[model_config.fetch_var(i).alias_name()] =
_fetch_name_to_idx[name] = i; model_config.fetch_var(i).name();
_fetch_name_to_var_name[name] = fetch_var_name; }
} catch (std::exception& e) {
LOG(ERROR) << "Failed load general model config" << e.what();
return -1;
} }
return 0;
} }
void PredictorClient::set_predictor_conf(const std::string &conf_path, void PredictorClient::set_predictor_conf(const std::string &conf_path,
......
...@@ -33,7 +33,7 @@ PYBIND11_MODULE(serving_client, m) { ...@@ -33,7 +33,7 @@ PYBIND11_MODULE(serving_client, m) {
.def(py::init()) .def(py::init())
.def("init", .def("init",
[](PredictorClient &self, const std::string &conf) { [](PredictorClient &self, const std::string &conf) {
self.init(conf); return self.init(conf);
}) })
.def("set_predictor_conf", .def("set_predictor_conf",
[](PredictorClient &self, [](PredictorClient &self,
......
include_directories(SYSTEM ${CMAKE_CURRENT_LIST_DIR}/../kvdb/include)
include(op/CMakeLists.txt)
include(proto/CMakeLists.txt)
add_executable(serving ${serving_srcs})
add_dependencies(serving pdcodegen fluid_cpu_engine pdserving paddle_fluid
opencv_imgcodecs cube-api)
if (WITH_GPU)
add_dependencies(serving fluid_gpu_engine)
endif()
target_include_directories(serving PUBLIC
${CMAKE_CURRENT_BINARY_DIR}/../../core/predictor
)
if(WITH_GPU)
target_link_libraries(serving -Wl,--whole-archive fluid_gpu_engine
-Wl,--no-whole-archive)
endif()
target_link_libraries(serving -Wl,--whole-archive fluid_cpu_engine
-Wl,--no-whole-archive)
target_link_libraries(serving paddle_fluid ${paddle_depend_libs})
target_link_libraries(serving pdserving)
target_link_libraries(serving cube-api)
target_link_libraries(serving kvdb rocksdb)
if(WITH_GPU)
target_link_libraries(serving ${CUDA_LIBRARIES})
endif()
if(WITH_MKL)
target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2)
else()
target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2)
endif()
install(TARGETS serving
RUNTIME DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/serving/bin)
install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/conf DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/serving/)
FILE(GLOB inc ${CMAKE_CURRENT_BINARY_DIR}/*.pb.h)
install(FILES ${inc}
DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/serving)
if (${WITH_MKL})
install(FILES
${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libmklml_intel.so
${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libiomp5.so
${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mkldnn/lib/libmkldnn.so.0
DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/serving/bin)
endif()
FILE(GLOB op_srcs ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
LIST(APPEND serving_srcs ${op_srcs})
...@@ -12,15 +12,16 @@ ...@@ -12,15 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "examples/demo-serving/op/general_infer_op.h"
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_infer_op.h"
#include "core/general-server/op/general_reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "examples/demo-serving/op/general_reader_op.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#else #else
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#endif #endif
#include "examples/demo-serving/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "examples/demo-serving/op/general_reader_op.h"
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
...@@ -34,6 +34,8 @@ int conf_check(const Request *req, ...@@ -34,6 +34,8 @@ int conf_check(const Request *req,
const std::shared_ptr<PaddleGeneralModelConfig> &model_config) { const std::shared_ptr<PaddleGeneralModelConfig> &model_config) {
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
if (var_num != model_config->_feed_type.size()) { if (var_num != model_config->_feed_type.size()) {
VLOG(2) << "var num: " << var_num;
VLOG(2) << "model config var num: " << model_config->_feed_type.size();
LOG(ERROR) << "feed var number not match."; LOG(ERROR) << "feed var number not match.";
return -1; return -1;
} }
...@@ -84,7 +86,7 @@ int GeneralReaderOp::inference() { ...@@ -84,7 +86,7 @@ int GeneralReaderOp::inference() {
} }
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
VLOG(3) << "var num: " << var_num; VLOG(2) << "var num: " << var_num;
// read config // read config
LOG(INFO) << "start to call load general model_conf op"; LOG(INFO) << "start to call load general model_conf op";
...@@ -112,7 +114,7 @@ int GeneralReaderOp::inference() { ...@@ -112,7 +114,7 @@ int GeneralReaderOp::inference() {
paddle::PaddleTensor lod_tensor; paddle::PaddleTensor lod_tensor;
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
elem_type[i] = req->insts(0).tensor_array(i).elem_type(); elem_type[i] = req->insts(0).tensor_array(i).elem_type();
VLOG(3) << "var[" << i << "] has elem type: " << elem_type[i]; VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i];
if (elem_type[i] == 0) { // int64 if (elem_type[i] == 0) { // int64
elem_size[i] = sizeof(int64_t); elem_size[i] = sizeof(int64_t);
lod_tensor.dtype = paddle::PaddleDType::INT64; lod_tensor.dtype = paddle::PaddleDType::INT64;
...@@ -124,17 +126,17 @@ int GeneralReaderOp::inference() { ...@@ -124,17 +126,17 @@ int GeneralReaderOp::inference() {
if (req->insts(0).tensor_array(i).shape(0) == -1) { if (req->insts(0).tensor_array(i).shape(0) == -1) {
lod_tensor.lod.resize(1); lod_tensor.lod.resize(1);
lod_tensor.lod[0].push_back(0); lod_tensor.lod[0].push_back(0);
VLOG(3) << "var[" << i << "] is lod_tensor"; VLOG(2) << "var[" << i << "] is lod_tensor";
} else { } else {
lod_tensor.shape.push_back(batch_size); lod_tensor.shape.push_back(batch_size);
capacity[i] = 1; capacity[i] = 1;
for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) {
int dim = req->insts(0).tensor_array(i).shape(k); int dim = req->insts(0).tensor_array(i).shape(k);
VLOG(3) << "shape for var[" << i << "]: " << dim; VLOG(2) << "shape for var[" << i << "]: " << dim;
capacity[i] *= dim; capacity[i] *= dim;
lod_tensor.shape.push_back(dim); lod_tensor.shape.push_back(dim);
} }
VLOG(3) << "var[" << i << "] is tensor, capacity: " << capacity[i]; VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i];
} }
if (i == 0) { if (i == 0) {
lod_tensor.name = "words"; lod_tensor.name = "words";
...@@ -149,19 +151,19 @@ int GeneralReaderOp::inference() { ...@@ -149,19 +151,19 @@ int GeneralReaderOp::inference() {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
const Tensor &tensor = req->insts(j).tensor_array(i); const Tensor &tensor = req->insts(j).tensor_array(i);
int data_len = tensor.data_size(); int data_len = tensor.data_size();
VLOG(3) << "tensor size for var[" << i << "]: " << tensor.data_size(); VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size();
int cur_len = in->at(i).lod[0].back(); int cur_len = in->at(i).lod[0].back();
VLOG(3) << "current len: " << cur_len; VLOG(2) << "current len: " << cur_len;
in->at(i).lod[0].push_back(cur_len + data_len); in->at(i).lod[0].push_back(cur_len + data_len);
VLOG(3) << "new len: " << cur_len + data_len; VLOG(2) << "new len: " << cur_len + data_len;
} }
in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]); in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]);
in->at(i).shape = {in->at(i).lod[0].back(), 1}; in->at(i).shape = {in->at(i).lod[0].back(), 1};
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is lod_tensor and len=" << in->at(i).lod[0].back(); << "] is lod_tensor and len=" << in->at(i).lod[0].back();
} else { } else {
in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]);
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is tensor and capacity=" << batch_size * capacity[i]; << "] is tensor and capacity=" << batch_size * capacity[i];
} }
} }
...@@ -198,14 +200,14 @@ int GeneralReaderOp::inference() { ...@@ -198,14 +200,14 @@ int GeneralReaderOp::inference() {
} }
} }
VLOG(3) << "read data from client success"; VLOG(2) << "read data from client success";
// print request // print request
std::ostringstream oss; std::ostringstream oss;
int64_t *example = reinterpret_cast<int64_t *>((*in)[0].data.data()); int64_t *example = reinterpret_cast<int64_t *>((*in)[0].data.data());
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
oss << *(example + i) << " "; oss << *(example + i) << " ";
} }
VLOG(3) << "head element of first feed var : " << oss.str(); VLOG(2) << "head element of first feed var : " << oss.str();
// //
return 0; return 0;
} }
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#endif #endif
#include <string> #include <string>
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "examples/demo-serving/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "examples/demo-serving/load_general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
LIST(APPEND protofiles
${CMAKE_CURRENT_LIST_DIR}/load_general_model_service.proto
${CMAKE_CURRENT_LIST_DIR}/general_model_service.proto
)
PROTOBUF_GENERATE_SERVING_CPP(TRUE PROTO_SRCS PROTO_HDRS ${protofiles})
LIST(APPEND serving_srcs ${PROTO_SRCS})
...@@ -12,39 +12,37 @@ ...@@ -12,39 +12,37 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package agent syntax = "proto2";
import "pds_option.proto";
import ( import "builtin_format.proto";
"encoding/json" package baidu.paddle_serving.predictor.general_model;
"fmt"
"github.com/Badangel/logex" option cc_generic_services = true;
)
message Tensor {
func initPostHandlers() { repeated bytes data = 1;
postHandler = map[string]handlerFunc{ optional int32 elem_type = 2;
"/agent/cmd": PostCmd, repeated int32 shape = 3;
} };
}
message FeedInst {
func PostCmd(subpath string, m map[string]string, b []byte) (string, string, error) { repeated Tensor tensor_array = 1;
var work Work };
err := json.Unmarshal(b, &work)
if err != nil { message FetchInst {
logex.Warningf("Unmarshal from %s error (+%v)", string(b), err) repeated Tensor tensor_array = 1;
return quote(""), "", fmt.Errorf("Work json unmarshal work failed, %v", err) };
}
message Request {
if _, ok := CmdWorkFilter.Load(work.Token()); ok { repeated FeedInst insts = 1;
logex.Warningf("Another work with same token is doing. Token(%s)", work.Token()) };
return quote(""), "", fmt.Errorf("Another work with same key is doing.", err)
} message Response {
repeated FetchInst insts = 1;
CmdWorkFilter.Store(work.Token(), true) };
err = work.DoWork()
CmdWorkFilter.Delete(work.Token()) service GeneralModelService {
if err != nil { rpc inference(Request) returns (Response);
return quote(""), "", fmt.Errorf("Do work failed.", err) rpc debug(Request) returns (Response);
} option (pds.options).generate_impl = true;
};
return quote(""), "", err
}
...@@ -12,24 +12,19 @@ ...@@ -12,24 +12,19 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package agent syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.load_general_model_service;
import ( option cc_generic_services = true;
"encoding/json"
"fmt"
)
func initGetHandlers() { message RequestAndResponse {
getHandler = map[string]handlerFunc{ required int32 a = 1;
"/agent/status": GetStatus, required float b = 2;
} };
}
func GetStatus(subpath string, m map[string]string, b []byte) (string, string, error) { service LoadGeneralModelService {
b, err := json.Marshal(BUILTIN_STATUS) rpc inference(RequestAndResponse) returns (RequestAndResponse);
if err != nil { rpc debug(RequestAndResponse) returns (RequestAndResponse);
return quote(""), "", fmt.Errorf("json marshal failed, %v", err) option (pds.options).generate_impl = true;
} };
return string(b), "", err
}
...@@ -38,12 +38,12 @@ DEFINE_int32( ...@@ -38,12 +38,12 @@ DEFINE_int32(
0, 0,
"Number of pthreads that server runs on, not change if this value <= 0"); "Number of pthreads that server runs on, not change if this value <= 0");
DEFINE_int32(reload_interval_s, 10, ""); DEFINE_int32(reload_interval_s, 10, "");
DEFINE_bool(enable_model_toolkit, false, "enable model toolkit"); DEFINE_bool(enable_model_toolkit, true, "enable model toolkit");
DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list"); DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list");
DEFINE_bool(enable_cube, false, "enable cube"); DEFINE_bool(enable_cube, false, "enable cube");
DEFINE_string(general_model_path, "./conf", ""); DEFINE_string(general_model_path, "./conf", "");
DEFINE_string(general_model_file, "general_model.prototxt", ""); DEFINE_string(general_model_file, "general_model.prototxt", "");
DEFINE_bool(enable_general_model, false, "enable general model"); DEFINE_bool(enable_general_model, true, "enable general model");
const char* START_OP_NAME = "startup_op"; const char* START_OP_NAME = "startup_op";
} // namespace predictor } // namespace predictor
......
...@@ -155,8 +155,11 @@ int Resource::initialize(const std::string& path, const std::string& file) { ...@@ -155,8 +155,11 @@ int Resource::initialize(const std::string& path, const std::string& file) {
// model config // model config
int Resource::general_model_initialize(const std::string& path, int Resource::general_model_initialize(const std::string& path,
const std::string& file) { const std::string& file) {
VLOG(2) << "general model path: " << path;
VLOG(2) << "general model file: " << file;
if (!FLAGS_enable_general_model) { if (!FLAGS_enable_general_model) {
return 0; LOG(ERROR) << "general model is not enabled";
return -1;
} }
ResourceConf resource_conf; ResourceConf resource_conf;
if (configure::read_proto_conf(path, file, &resource_conf) != 0) { if (configure::read_proto_conf(path, file, &resource_conf) != 0) {
...@@ -183,6 +186,8 @@ int Resource::general_model_initialize(const std::string& path, ...@@ -183,6 +186,8 @@ int Resource::general_model_initialize(const std::string& path,
_config.reset(new PaddleGeneralModelConfig()); _config.reset(new PaddleGeneralModelConfig());
int feed_var_num = model_config.feed_var_size(); int feed_var_num = model_config.feed_var_size();
VLOG(2) << "load general model config";
VLOG(2) << "feed var num: " << feed_var_num;
_config->_feed_name.resize(feed_var_num); _config->_feed_name.resize(feed_var_num);
_config->_feed_type.resize(feed_var_num); _config->_feed_type.resize(feed_var_num);
_config->_is_lod_feed.resize(feed_var_num); _config->_is_lod_feed.resize(feed_var_num);
...@@ -190,15 +195,23 @@ int Resource::general_model_initialize(const std::string& path, ...@@ -190,15 +195,23 @@ int Resource::general_model_initialize(const std::string& path,
_config->_feed_shape.resize(feed_var_num); _config->_feed_shape.resize(feed_var_num);
for (int i = 0; i < feed_var_num; ++i) { for (int i = 0; i < feed_var_num; ++i) {
_config->_feed_name[i] = model_config.feed_var(i).name(); _config->_feed_name[i] = model_config.feed_var(i).name();
VLOG(2) << "feed var[" << i << "]: "
<< _config->_feed_name[i];
_config->_feed_type[i] = model_config.feed_var(i).feed_type(); _config->_feed_type[i] = model_config.feed_var(i).feed_type();
VLOG(2) << "feed type[" << i << "]: "
<< _config->_feed_type[i];
if (model_config.feed_var(i).is_lod_tensor()) { if (model_config.feed_var(i).is_lod_tensor()) {
VLOG(2) << "var[" << i << "] is lod tensor";
_config->_feed_shape[i] = {-1}; _config->_feed_shape[i] = {-1};
_config->_is_lod_feed[i] = true; _config->_is_lod_feed[i] = true;
} else { } else {
VLOG(2) << "var[" << i << "] is tensor";
_config->_capacity[i] = 1; _config->_capacity[i] = 1;
_config->_is_lod_feed[i] = false; _config->_is_lod_feed[i] = false;
for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) { for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) {
int32_t dim = model_config.feed_var(i).shape(j); int32_t dim = model_config.feed_var(i).shape(j);
VLOG(2) << "var[" << i << "].shape[" << i << "]: " << dim;
_config->_feed_shape[i].push_back(dim); _config->_feed_shape[i].push_back(dim);
_config->_capacity[i] *= dim; _config->_capacity[i] *= dim;
} }
......
...@@ -126,10 +126,6 @@ int main(int argc, char** argv) { ...@@ -126,10 +126,6 @@ int main(int argc, char** argv) {
return 0; return 0;
} }
if (!FLAGS_g) {
google::SetCommandLineOption("flagfile", "conf/gflags.conf");
}
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
g_change_server_port(); g_change_server_port();
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include "core/configure/include/configure_parser.h" #include "core/configure/include/configure_parser.h"
#include "core/configure/sdk_configure.pb.h" #include "core/configure/sdk_configure.pb.h"
#include "core/configure/general_model_config.pb.h"
#include "core/sdk-cpp/include/utils.h" #include "core/sdk-cpp/include/utils.h"
......
...@@ -71,8 +71,14 @@ target_link_libraries(serving kvdb rocksdb) ...@@ -71,8 +71,14 @@ target_link_libraries(serving kvdb rocksdb)
if(WITH_GPU) if(WITH_GPU)
target_link_libraries(serving ${CUDA_LIBRARIES}) target_link_libraries(serving ${CUDA_LIBRARIES})
endif() endif()
target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread
-lcrypto -lm -lrt -lssl -ldl -lz -lbz2) if(WITH_MKL)
message("lalalala: " ${WITH_MKL})
target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2)
else()
message("hehehehe: " ${WITH_MKL})
target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2)
endif()
install(TARGETS serving install(TARGETS serving
RUNTIME DESTINATION RUNTIME DESTINATION
...@@ -85,10 +91,10 @@ install(FILES ${inc} ...@@ -85,10 +91,10 @@ install(FILES ${inc}
DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/serving) DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/serving)
if (${WITH_MKL}) if (${WITH_MKL})
install(FILES install(FILES
${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libmklml_intel.so ${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libmklml_intel.so
${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libiomp5.so ${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libiomp5.so
${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mkldnn/lib/libmkldnn.so.0 ${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mkldnn/lib/libmkldnn.so.0
DESTINATION DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/serving/bin) ${PADDLE_SERVING_INSTALL_DIR}/demo/serving/bin)
endif() endif()
feed_var { is_lod_feed: true
name: "words" is_lod_feed: false
is_lod_tensor: true is_lod_feed: true
feed_type: 0 feed_type: 1
feed_type: 0
feed_type: 1
feed_shape {
shape: -1 shape: -1
} }
feed_var {
name: "label" feed_shape {
is_lod_tensor: false
feed_type: 0
shape: 1
}
fetch_var {
name: "cost"
shape: 1
}
fetch_var {
name: "acc"
shape: 1 shape: 1
}
fetch_var {
name: "prediction"
shape: 2 shape: 2
shape: 3
}
feed_shape {
shape: -1
} }
--enable_model_toolkit --enable_model_toolkit
--enable_cube=false --enable_cube=false
--enable_general_model=true --enable_general_model=true
--general_model_path=./conf
--general_model_file=general_model.prototxt
model_toolkit_path: "./conf/" model_toolkit_path: "./conf/"
model_toolkit_file: "model_toolkit.prototxt" model_toolkit_file: "model_toolkit.prototxt"
cube_config_file: "./conf/cube.conf" cube_config_file: "./conf/cube.conf"
general_model_path: "./conf/"
general_model_file: "general_model.prototxt"
...@@ -40,6 +40,6 @@ services { ...@@ -40,6 +40,6 @@ services {
workflows: "workflow9" workflows: "workflow9"
} }
services { services {
name: "GeneralModelService" name: "LoadGeneralModelService"
workflows: "workflow11" workflows: "workflow10"
} }
\ No newline at end of file
...@@ -95,24 +95,7 @@ workflows { ...@@ -95,24 +95,7 @@ workflows {
name: "workflow10" name: "workflow10"
workflow_type: "Sequence" workflow_type: "Sequence"
nodes { nodes {
name: "general_model_op" name: "load_general_model_conf_op"
type: "GeneralModelOp" type: "LoadGeneralModelConfOp"
}
}
workflows {
name: "workflow11"
workflow_type: "Sequence"
nodes {
name: "general_reader_op"
type: "GeneralReaderOp"
}
nodes {
name: "general_infer_op"
type: "GeneralInferOp"
dependencies {
name: "general_reader_op"
mode: "RO"
}
} }
} }
13d73780-de4f-4b8c-9040-34e5adc9f9ae
因为 它太大了无法显示 source diff 。你可以改为 查看blob
2020/01/10-09:39:03.152794 7f45d8eaa440 RocksDB version: 6.2.4
2020/01/10-09:39:03.152835 7f45d8eaa440 Git sha rocksdb_build_git_sha:3513d4e93f8530ac44cc0c1efea04be4259c6938
2020/01/10-09:39:03.152838 7f45d8eaa440 Compile date Dec 23 2019
2020/01/10-09:39:03.152842 7f45d8eaa440 DB SUMMARY
2020/01/10-09:39:03.152858 7f45d8eaa440 SST files in kvdb dir, Total Num: 0, files:
2020/01/10-09:39:03.152861 7f45d8eaa440 Write Ahead Log file in kvdb:
2020/01/10-09:39:03.152864 7f45d8eaa440 Options.error_if_exists: 0
2020/01/10-09:39:03.152866 7f45d8eaa440 Options.create_if_missing: 1
2020/01/10-09:39:03.152867 7f45d8eaa440 Options.paranoid_checks: 1
2020/01/10-09:39:03.152868 7f45d8eaa440 Options.env: 0x1bed2ac0
2020/01/10-09:39:03.152870 7f45d8eaa440 Options.info_log: 0x206faac0
2020/01/10-09:39:03.152871 7f45d8eaa440 Options.max_file_opening_threads: 16
2020/01/10-09:39:03.152872 7f45d8eaa440 Options.statistics: (nil)
2020/01/10-09:39:03.152874 7f45d8eaa440 Options.use_fsync: 0
2020/01/10-09:39:03.152875 7f45d8eaa440 Options.max_log_file_size: 0
2020/01/10-09:39:03.152876 7f45d8eaa440 Options.max_manifest_file_size: 1073741824
2020/01/10-09:39:03.152877 7f45d8eaa440 Options.log_file_time_to_roll: 0
2020/01/10-09:39:03.152879 7f45d8eaa440 Options.keep_log_file_num: 1000
2020/01/10-09:39:03.152880 7f45d8eaa440 Options.recycle_log_file_num: 0
2020/01/10-09:39:03.152881 7f45d8eaa440 Options.allow_fallocate: 1
2020/01/10-09:39:03.152882 7f45d8eaa440 Options.allow_mmap_reads: 0
2020/01/10-09:39:03.152883 7f45d8eaa440 Options.allow_mmap_writes: 0
2020/01/10-09:39:03.152884 7f45d8eaa440 Options.use_direct_reads: 0
2020/01/10-09:39:03.152886 7f45d8eaa440 Options.use_direct_io_for_flush_and_compaction: 0
2020/01/10-09:39:03.152887 7f45d8eaa440 Options.create_missing_column_families: 0
2020/01/10-09:39:03.152890 7f45d8eaa440 Options.db_log_dir:
2020/01/10-09:39:03.152891 7f45d8eaa440 Options.wal_dir: kvdb
2020/01/10-09:39:03.152892 7f45d8eaa440 Options.table_cache_numshardbits: 6
2020/01/10-09:39:03.152893 7f45d8eaa440 Options.max_subcompactions: 1
2020/01/10-09:39:03.152894 7f45d8eaa440 Options.max_background_flushes: -1
2020/01/10-09:39:03.152896 7f45d8eaa440 Options.WAL_ttl_seconds: 0
2020/01/10-09:39:03.152897 7f45d8eaa440 Options.WAL_size_limit_MB: 0
2020/01/10-09:39:03.152898 7f45d8eaa440 Options.manifest_preallocation_size: 4194304
2020/01/10-09:39:03.152900 7f45d8eaa440 Options.is_fd_close_on_exec: 1
2020/01/10-09:39:03.152901 7f45d8eaa440 Options.advise_random_on_open: 1
2020/01/10-09:39:03.152902 7f45d8eaa440 Options.db_write_buffer_size: 0
2020/01/10-09:39:03.152903 7f45d8eaa440 Options.write_buffer_manager: 0x206fab20
2020/01/10-09:39:03.152905 7f45d8eaa440 Options.access_hint_on_compaction_start: 1
2020/01/10-09:39:03.152906 7f45d8eaa440 Options.new_table_reader_for_compaction_inputs: 0
2020/01/10-09:39:03.152907 7f45d8eaa440 Options.random_access_max_buffer_size: 1048576
2020/01/10-09:39:03.152908 7f45d8eaa440 Options.use_adaptive_mutex: 0
2020/01/10-09:39:03.152909 7f45d8eaa440 Options.rate_limiter: (nil)
2020/01/10-09:39:03.152911 7f45d8eaa440 Options.sst_file_manager.rate_bytes_per_sec: 0
2020/01/10-09:39:03.152912 7f45d8eaa440 Options.wal_recovery_mode: 2
2020/01/10-09:39:03.152913 7f45d8eaa440 Options.enable_thread_tracking: 0
2020/01/10-09:39:03.152914 7f45d8eaa440 Options.enable_pipelined_write: 0
2020/01/10-09:39:03.152922 7f45d8eaa440 Options.allow_concurrent_memtable_write: 1
2020/01/10-09:39:03.152924 7f45d8eaa440 Options.enable_write_thread_adaptive_yield: 1
2020/01/10-09:39:03.152925 7f45d8eaa440 Options.write_thread_max_yield_usec: 100
2020/01/10-09:39:03.152926 7f45d8eaa440 Options.write_thread_slow_yield_usec: 3
2020/01/10-09:39:03.152927 7f45d8eaa440 Options.row_cache: None
2020/01/10-09:39:03.152928 7f45d8eaa440 Options.wal_filter: None
2020/01/10-09:39:03.152930 7f45d8eaa440 Options.avoid_flush_during_recovery: 0
2020/01/10-09:39:03.152931 7f45d8eaa440 Options.allow_ingest_behind: 0
2020/01/10-09:39:03.152932 7f45d8eaa440 Options.preserve_deletes: 0
2020/01/10-09:39:03.152934 7f45d8eaa440 Options.two_write_queues: 0
2020/01/10-09:39:03.152935 7f45d8eaa440 Options.manual_wal_flush: 0
2020/01/10-09:39:03.152936 7f45d8eaa440 Options.atomic_flush: 0
2020/01/10-09:39:03.152937 7f45d8eaa440 Options.avoid_unnecessary_blocking_io: 0
2020/01/10-09:39:03.152938 7f45d8eaa440 Options.max_background_jobs: 2
2020/01/10-09:39:03.152939 7f45d8eaa440 Options.max_background_compactions: -1
2020/01/10-09:39:03.152941 7f45d8eaa440 Options.avoid_flush_during_shutdown: 0
2020/01/10-09:39:03.152942 7f45d8eaa440 Options.writable_file_max_buffer_size: 1048576
2020/01/10-09:39:03.152943 7f45d8eaa440 Options.delayed_write_rate : 16777216
2020/01/10-09:39:03.152944 7f45d8eaa440 Options.max_total_wal_size: 0
2020/01/10-09:39:03.152945 7f45d8eaa440 Options.delete_obsolete_files_period_micros: 21600000000
2020/01/10-09:39:03.152947 7f45d8eaa440 Options.stats_dump_period_sec: 600
2020/01/10-09:39:03.152948 7f45d8eaa440 Options.stats_persist_period_sec: 600
2020/01/10-09:39:03.152949 7f45d8eaa440 Options.stats_history_buffer_size: 1048576
2020/01/10-09:39:03.152950 7f45d8eaa440 Options.max_open_files: -1
2020/01/10-09:39:03.152952 7f45d8eaa440 Options.bytes_per_sync: 0
2020/01/10-09:39:03.152953 7f45d8eaa440 Options.wal_bytes_per_sync: 0
2020/01/10-09:39:03.152954 7f45d8eaa440 Options.strict_bytes_per_sync: 0
2020/01/10-09:39:03.152955 7f45d8eaa440 Options.compaction_readahead_size: 0
2020/01/10-09:39:03.152956 7f45d8eaa440 Compression algorithms supported:
2020/01/10-09:39:03.152975 7f45d8eaa440 kZSTDNotFinalCompression supported: 0
2020/01/10-09:39:03.152981 7f45d8eaa440 kZSTD supported: 0
2020/01/10-09:39:03.152982 7f45d8eaa440 kXpressCompression supported: 0
2020/01/10-09:39:03.152983 7f45d8eaa440 kLZ4HCCompression supported: 0
2020/01/10-09:39:03.152984 7f45d8eaa440 kLZ4Compression supported: 0
2020/01/10-09:39:03.152986 7f45d8eaa440 kBZip2Compression supported: 1
2020/01/10-09:39:03.152987 7f45d8eaa440 kZlibCompression supported: 1
2020/01/10-09:39:03.152988 7f45d8eaa440 kSnappyCompression supported: 0
2020/01/10-09:39:03.152991 7f45d8eaa440 Fast CRC32 supported: Supported on x86
2020/01/10-09:39:03.153040 7f45d8eaa440 [/db_impl_open.cc:242] Creating manifest 1
2020/01/10-09:39:03.155451 7f45d8eaa440 [/version_set.cc:4053] Recovering from manifest file: kvdb/MANIFEST-000001
2020/01/10-09:39:03.155527 7f45d8eaa440 [/column_family.cc:482] --------------- Options for column family [default]:
2020/01/10-09:39:03.155531 7f45d8eaa440 Options.comparator: leveldb.BytewiseComparator
2020/01/10-09:39:03.155533 7f45d8eaa440 Options.merge_operator: None
2020/01/10-09:39:03.155535 7f45d8eaa440 Options.compaction_filter: None
2020/01/10-09:39:03.155536 7f45d8eaa440 Options.compaction_filter_factory: None
2020/01/10-09:39:03.155537 7f45d8eaa440 Options.memtable_factory: SkipListFactory
2020/01/10-09:39:03.155539 7f45d8eaa440 Options.table_factory: BlockBasedTable
2020/01/10-09:39:03.155577 7f45d8eaa440 table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x1e67c230)
cache_index_and_filter_blocks: 0
cache_index_and_filter_blocks_with_high_priority: 0
pin_l0_filter_and_index_blocks_in_cache: 0
pin_top_level_index_and_filter: 1
index_type: 0
data_block_index_type: 0
index_shortening: 1
data_block_hash_table_util_ratio: 0.750000
hash_index_allow_collision: 1
checksum: 1
no_block_cache: 0
block_cache: 0x206f8040
block_cache_name: LRUCache
block_cache_options:
capacity : 8388608
num_shard_bits : 4
strict_capacity_limit : 0
memory_allocator : None
high_pri_pool_ratio: 0.000
block_cache_compressed: (nil)
persistent_cache: (nil)
block_size: 4096
block_size_deviation: 10
block_restart_interval: 16
index_block_restart_interval: 1
metadata_block_size: 4096
partition_filters: 0
use_delta_encoding: 1
filter_policy: nullptr
whole_key_filtering: 1
verify_compression: 0
read_amp_bytes_per_bit: 0
format_version: 2
enable_index_compression: 1
block_align: 0
2020/01/10-09:39:03.155593 7f45d8eaa440 Options.write_buffer_size: 67108864
2020/01/10-09:39:03.155594 7f45d8eaa440 Options.max_write_buffer_number: 2
2020/01/10-09:39:03.155596 7f45d8eaa440 Options.compression: NoCompression
2020/01/10-09:39:03.155598 7f45d8eaa440 Options.bottommost_compression: Disabled
2020/01/10-09:39:03.155599 7f45d8eaa440 Options.prefix_extractor: nullptr
2020/01/10-09:39:03.155600 7f45d8eaa440 Options.memtable_insert_with_hint_prefix_extractor: nullptr
2020/01/10-09:39:03.155602 7f45d8eaa440 Options.num_levels: 7
2020/01/10-09:39:03.155603 7f45d8eaa440 Options.min_write_buffer_number_to_merge: 1
2020/01/10-09:39:03.155604 7f45d8eaa440 Options.max_write_buffer_number_to_maintain: 0
2020/01/10-09:39:03.155606 7f45d8eaa440 Options.bottommost_compression_opts.window_bits: -14
2020/01/10-09:39:03.155607 7f45d8eaa440 Options.bottommost_compression_opts.level: 32767
2020/01/10-09:39:03.155608 7f45d8eaa440 Options.bottommost_compression_opts.strategy: 0
2020/01/10-09:39:03.155610 7f45d8eaa440 Options.bottommost_compression_opts.max_dict_bytes: 0
2020/01/10-09:39:03.155611 7f45d8eaa440 Options.bottommost_compression_opts.zstd_max_train_bytes: 0
2020/01/10-09:39:03.155612 7f45d8eaa440 Options.bottommost_compression_opts.enabled: false
2020/01/10-09:39:03.155614 7f45d8eaa440 Options.compression_opts.window_bits: -14
2020/01/10-09:39:03.155615 7f45d8eaa440 Options.compression_opts.level: 32767
2020/01/10-09:39:03.155616 7f45d8eaa440 Options.compression_opts.strategy: 0
2020/01/10-09:39:03.155617 7f45d8eaa440 Options.compression_opts.max_dict_bytes: 0
2020/01/10-09:39:03.155619 7f45d8eaa440 Options.compression_opts.zstd_max_train_bytes: 0
2020/01/10-09:39:03.155620 7f45d8eaa440 Options.compression_opts.enabled: false
2020/01/10-09:39:03.155621 7f45d8eaa440 Options.level0_file_num_compaction_trigger: 4
2020/01/10-09:39:03.155622 7f45d8eaa440 Options.level0_slowdown_writes_trigger: 20
2020/01/10-09:39:03.155624 7f45d8eaa440 Options.level0_stop_writes_trigger: 36
2020/01/10-09:39:03.155625 7f45d8eaa440 Options.target_file_size_base: 67108864
2020/01/10-09:39:03.155626 7f45d8eaa440 Options.target_file_size_multiplier: 1
2020/01/10-09:39:03.155627 7f45d8eaa440 Options.max_bytes_for_level_base: 268435456
2020/01/10-09:39:03.155629 7f45d8eaa440 Options.snap_refresh_nanos: 0
2020/01/10-09:39:03.155630 7f45d8eaa440 Options.level_compaction_dynamic_level_bytes: 0
2020/01/10-09:39:03.155631 7f45d8eaa440 Options.max_bytes_for_level_multiplier: 10.000000
2020/01/10-09:39:03.155633 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[0]: 1
2020/01/10-09:39:03.155635 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[1]: 1
2020/01/10-09:39:03.155641 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[2]: 1
2020/01/10-09:39:03.155642 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[3]: 1
2020/01/10-09:39:03.155644 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[4]: 1
2020/01/10-09:39:03.155645 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[5]: 1
2020/01/10-09:39:03.155646 7f45d8eaa440 Options.max_bytes_for_level_multiplier_addtl[6]: 1
2020/01/10-09:39:03.155647 7f45d8eaa440 Options.max_sequential_skip_in_iterations: 8
2020/01/10-09:39:03.155648 7f45d8eaa440 Options.max_compaction_bytes: 1677721600
2020/01/10-09:39:03.155650 7f45d8eaa440 Options.arena_block_size: 8388608
2020/01/10-09:39:03.155651 7f45d8eaa440 Options.soft_pending_compaction_bytes_limit: 68719476736
2020/01/10-09:39:03.155652 7f45d8eaa440 Options.hard_pending_compaction_bytes_limit: 274877906944
2020/01/10-09:39:03.155653 7f45d8eaa440 Options.rate_limit_delay_max_milliseconds: 100
2020/01/10-09:39:03.155655 7f45d8eaa440 Options.disable_auto_compactions: 0
2020/01/10-09:39:03.155656 7f45d8eaa440 Options.compaction_style: kCompactionStyleLevel
2020/01/10-09:39:03.155658 7f45d8eaa440 Options.compaction_pri: kMinOverlappingRatio
2020/01/10-09:39:03.155659 7f45d8eaa440 Options.compaction_options_universal.size_ratio: 1
2020/01/10-09:39:03.155661 7f45d8eaa440 Options.compaction_options_universal.min_merge_width: 2
2020/01/10-09:39:03.155662 7f45d8eaa440 Options.compaction_options_universal.max_merge_width: 4294967295
2020/01/10-09:39:03.155663 7f45d8eaa440 Options.compaction_options_universal.max_size_amplification_percent: 200
2020/01/10-09:39:03.155664 7f45d8eaa440 Options.compaction_options_universal.compression_size_percent: -1
2020/01/10-09:39:03.155666 7f45d8eaa440 Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
2020/01/10-09:39:03.155667 7f45d8eaa440 Options.compaction_options_fifo.max_table_files_size: 1073741824
2020/01/10-09:39:03.155668 7f45d8eaa440 Options.compaction_options_fifo.allow_compaction: 0
2020/01/10-09:39:03.155670 7f45d8eaa440 Options.table_properties_collectors:
2020/01/10-09:39:03.155671 7f45d8eaa440 Options.inplace_update_support: 0
2020/01/10-09:39:03.155672 7f45d8eaa440 Options.inplace_update_num_locks: 10000
2020/01/10-09:39:03.155673 7f45d8eaa440 Options.memtable_prefix_bloom_size_ratio: 0.000000
2020/01/10-09:39:03.155675 7f45d8eaa440 Options.memtable_whole_key_filtering: 0
2020/01/10-09:39:03.155676 7f45d8eaa440 Options.memtable_huge_page_size: 0
2020/01/10-09:39:03.155677 7f45d8eaa440 Options.bloom_locality: 0
2020/01/10-09:39:03.155678 7f45d8eaa440 Options.max_successive_merges: 0
2020/01/10-09:39:03.155679 7f45d8eaa440 Options.optimize_filters_for_hits: 0
2020/01/10-09:39:03.155681 7f45d8eaa440 Options.paranoid_file_checks: 0
2020/01/10-09:39:03.155682 7f45d8eaa440 Options.force_consistency_checks: 0
2020/01/10-09:39:03.155683 7f45d8eaa440 Options.report_bg_io_stats: 0
2020/01/10-09:39:03.155684 7f45d8eaa440 Options.ttl: 0
2020/01/10-09:39:03.155685 7f45d8eaa440 Options.periodic_compaction_seconds: 0
2020/01/10-09:39:03.156268 7f45d8eaa440 [/version_set.cc:4267] Recovered from manifest file:kvdb/MANIFEST-000001 succeeded,manifest_file_number is 1, next_file_number is 3, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
2020/01/10-09:39:03.156273 7f45d8eaa440 [/version_set.cc:4276] Column family [default] (ID 0), log number is 0
2020/01/10-09:39:03.158204 7f45d8eaa440 DB pointer 0x206f97b0
2020/01/10-09:39:03.158523 7f455a5f6700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/10-09:39:03.158563 7f455a5f6700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 0.0 total, 0.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-11:16:32.954673 7f422a851440 RocksDB version: 6.2.4
2020/01/11-11:16:32.954711 7f422a851440 Git sha rocksdb_build_git_sha:3513d4e93f8530ac44cc0c1efea04be4259c6938
2020/01/11-11:16:32.954714 7f422a851440 Compile date Dec 23 2019
2020/01/11-11:16:32.954717 7f422a851440 DB SUMMARY
2020/01/11-11:16:32.954744 7f422a851440 CURRENT file: CURRENT
2020/01/11-11:16:32.954746 7f422a851440 IDENTITY file: IDENTITY
2020/01/11-11:16:32.954750 7f422a851440 MANIFEST file: MANIFEST-000001 size: 13 Bytes
2020/01/11-11:16:32.954752 7f422a851440 SST files in kvdb dir, Total Num: 0, files:
2020/01/11-11:16:32.954754 7f422a851440 Write Ahead Log file in kvdb: 000003.log size: 0 ;
2020/01/11-11:16:32.954756 7f422a851440 Options.error_if_exists: 0
2020/01/11-11:16:32.954757 7f422a851440 Options.create_if_missing: 1
2020/01/11-11:16:32.954758 7f422a851440 Options.paranoid_checks: 1
2020/01/11-11:16:32.954760 7f422a851440 Options.env: 0x1bed2ac0
2020/01/11-11:16:32.954761 7f422a851440 Options.info_log: 0x1f4720f0
2020/01/11-11:16:32.954762 7f422a851440 Options.max_file_opening_threads: 16
2020/01/11-11:16:32.954764 7f422a851440 Options.statistics: (nil)
2020/01/11-11:16:32.954765 7f422a851440 Options.use_fsync: 0
2020/01/11-11:16:32.954766 7f422a851440 Options.max_log_file_size: 0
2020/01/11-11:16:32.954768 7f422a851440 Options.max_manifest_file_size: 1073741824
2020/01/11-11:16:32.954769 7f422a851440 Options.log_file_time_to_roll: 0
2020/01/11-11:16:32.954770 7f422a851440 Options.keep_log_file_num: 1000
2020/01/11-11:16:32.954771 7f422a851440 Options.recycle_log_file_num: 0
2020/01/11-11:16:32.954772 7f422a851440 Options.allow_fallocate: 1
2020/01/11-11:16:32.954774 7f422a851440 Options.allow_mmap_reads: 0
2020/01/11-11:16:32.954775 7f422a851440 Options.allow_mmap_writes: 0
2020/01/11-11:16:32.954776 7f422a851440 Options.use_direct_reads: 0
2020/01/11-11:16:32.954777 7f422a851440 Options.use_direct_io_for_flush_and_compaction: 0
2020/01/11-11:16:32.954778 7f422a851440 Options.create_missing_column_families: 0
2020/01/11-11:16:32.954780 7f422a851440 Options.db_log_dir:
2020/01/11-11:16:32.954782 7f422a851440 Options.wal_dir: kvdb
2020/01/11-11:16:32.954783 7f422a851440 Options.table_cache_numshardbits: 6
2020/01/11-11:16:32.954784 7f422a851440 Options.max_subcompactions: 1
2020/01/11-11:16:32.954785 7f422a851440 Options.max_background_flushes: -1
2020/01/11-11:16:32.954787 7f422a851440 Options.WAL_ttl_seconds: 0
2020/01/11-11:16:32.954788 7f422a851440 Options.WAL_size_limit_MB: 0
2020/01/11-11:16:32.954789 7f422a851440 Options.manifest_preallocation_size: 4194304
2020/01/11-11:16:32.954790 7f422a851440 Options.is_fd_close_on_exec: 1
2020/01/11-11:16:32.954791 7f422a851440 Options.advise_random_on_open: 1
2020/01/11-11:16:32.954793 7f422a851440 Options.db_write_buffer_size: 0
2020/01/11-11:16:32.954794 7f422a851440 Options.write_buffer_manager: 0x1f472150
2020/01/11-11:16:32.954795 7f422a851440 Options.access_hint_on_compaction_start: 1
2020/01/11-11:16:32.954796 7f422a851440 Options.new_table_reader_for_compaction_inputs: 0
2020/01/11-11:16:32.954797 7f422a851440 Options.random_access_max_buffer_size: 1048576
2020/01/11-11:16:32.954798 7f422a851440 Options.use_adaptive_mutex: 0
2020/01/11-11:16:32.954799 7f422a851440 Options.rate_limiter: (nil)
2020/01/11-11:16:32.954801 7f422a851440 Options.sst_file_manager.rate_bytes_per_sec: 0
2020/01/11-11:16:32.954802 7f422a851440 Options.wal_recovery_mode: 2
2020/01/11-11:16:32.954809 7f422a851440 Options.enable_thread_tracking: 0
2020/01/11-11:16:32.954810 7f422a851440 Options.enable_pipelined_write: 0
2020/01/11-11:16:32.954811 7f422a851440 Options.allow_concurrent_memtable_write: 1
2020/01/11-11:16:32.954812 7f422a851440 Options.enable_write_thread_adaptive_yield: 1
2020/01/11-11:16:32.954813 7f422a851440 Options.write_thread_max_yield_usec: 100
2020/01/11-11:16:32.954815 7f422a851440 Options.write_thread_slow_yield_usec: 3
2020/01/11-11:16:32.954816 7f422a851440 Options.row_cache: None
2020/01/11-11:16:32.954817 7f422a851440 Options.wal_filter: None
2020/01/11-11:16:32.954818 7f422a851440 Options.avoid_flush_during_recovery: 0
2020/01/11-11:16:32.954819 7f422a851440 Options.allow_ingest_behind: 0
2020/01/11-11:16:32.954820 7f422a851440 Options.preserve_deletes: 0
2020/01/11-11:16:32.954822 7f422a851440 Options.two_write_queues: 0
2020/01/11-11:16:32.954823 7f422a851440 Options.manual_wal_flush: 0
2020/01/11-11:16:32.954824 7f422a851440 Options.atomic_flush: 0
2020/01/11-11:16:32.954825 7f422a851440 Options.avoid_unnecessary_blocking_io: 0
2020/01/11-11:16:32.954826 7f422a851440 Options.max_background_jobs: 2
2020/01/11-11:16:32.954827 7f422a851440 Options.max_background_compactions: -1
2020/01/11-11:16:32.954828 7f422a851440 Options.avoid_flush_during_shutdown: 0
2020/01/11-11:16:32.954830 7f422a851440 Options.writable_file_max_buffer_size: 1048576
2020/01/11-11:16:32.954831 7f422a851440 Options.delayed_write_rate : 16777216
2020/01/11-11:16:32.954832 7f422a851440 Options.max_total_wal_size: 0
2020/01/11-11:16:32.954833 7f422a851440 Options.delete_obsolete_files_period_micros: 21600000000
2020/01/11-11:16:32.954834 7f422a851440 Options.stats_dump_period_sec: 600
2020/01/11-11:16:32.954836 7f422a851440 Options.stats_persist_period_sec: 600
2020/01/11-11:16:32.954837 7f422a851440 Options.stats_history_buffer_size: 1048576
2020/01/11-11:16:32.954838 7f422a851440 Options.max_open_files: -1
2020/01/11-11:16:32.954839 7f422a851440 Options.bytes_per_sync: 0
2020/01/11-11:16:32.954841 7f422a851440 Options.wal_bytes_per_sync: 0
2020/01/11-11:16:32.954842 7f422a851440 Options.strict_bytes_per_sync: 0
2020/01/11-11:16:32.954843 7f422a851440 Options.compaction_readahead_size: 0
2020/01/11-11:16:32.954844 7f422a851440 Compression algorithms supported:
2020/01/11-11:16:32.954856 7f422a851440 kZSTDNotFinalCompression supported: 0
2020/01/11-11:16:32.954862 7f422a851440 kZSTD supported: 0
2020/01/11-11:16:32.954863 7f422a851440 kXpressCompression supported: 0
2020/01/11-11:16:32.954864 7f422a851440 kLZ4HCCompression supported: 0
2020/01/11-11:16:32.954866 7f422a851440 kLZ4Compression supported: 0
2020/01/11-11:16:32.954867 7f422a851440 kBZip2Compression supported: 1
2020/01/11-11:16:32.954868 7f422a851440 kZlibCompression supported: 1
2020/01/11-11:16:32.954869 7f422a851440 kSnappyCompression supported: 0
2020/01/11-11:16:32.954871 7f422a851440 Fast CRC32 supported: Supported on x86
2020/01/11-11:16:32.955074 7f422a851440 [/version_set.cc:4053] Recovering from manifest file: kvdb/MANIFEST-000001
2020/01/11-11:16:32.955134 7f422a851440 [/column_family.cc:482] --------------- Options for column family [default]:
2020/01/11-11:16:32.955138 7f422a851440 Options.comparator: leveldb.BytewiseComparator
2020/01/11-11:16:32.955139 7f422a851440 Options.merge_operator: None
2020/01/11-11:16:32.955141 7f422a851440 Options.compaction_filter: None
2020/01/11-11:16:32.955142 7f422a851440 Options.compaction_filter_factory: None
2020/01/11-11:16:32.955143 7f422a851440 Options.memtable_factory: SkipListFactory
2020/01/11-11:16:32.955155 7f422a851440 Options.table_factory: BlockBasedTable
2020/01/11-11:16:32.955188 7f422a851440 table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x1df01130)
cache_index_and_filter_blocks: 0
cache_index_and_filter_blocks_with_high_priority: 0
pin_l0_filter_and_index_blocks_in_cache: 0
pin_top_level_index_and_filter: 1
index_type: 0
data_block_index_type: 0
index_shortening: 1
data_block_hash_table_util_ratio: 0.750000
hash_index_allow_collision: 1
checksum: 1
no_block_cache: 0
block_cache: 0x1d62c180
block_cache_name: LRUCache
block_cache_options:
capacity : 8388608
num_shard_bits : 4
strict_capacity_limit : 0
memory_allocator : None
high_pri_pool_ratio: 0.000
block_cache_compressed: (nil)
persistent_cache: (nil)
block_size: 4096
block_size_deviation: 10
block_restart_interval: 16
index_block_restart_interval: 1
metadata_block_size: 4096
partition_filters: 0
use_delta_encoding: 1
filter_policy: nullptr
whole_key_filtering: 1
verify_compression: 0
read_amp_bytes_per_bit: 0
format_version: 2
enable_index_compression: 1
block_align: 0
2020/01/11-11:16:32.955193 7f422a851440 Options.write_buffer_size: 67108864
2020/01/11-11:16:32.955194 7f422a851440 Options.max_write_buffer_number: 2
2020/01/11-11:16:32.955196 7f422a851440 Options.compression: NoCompression
2020/01/11-11:16:32.955198 7f422a851440 Options.bottommost_compression: Disabled
2020/01/11-11:16:32.955199 7f422a851440 Options.prefix_extractor: nullptr
2020/01/11-11:16:32.955200 7f422a851440 Options.memtable_insert_with_hint_prefix_extractor: nullptr
2020/01/11-11:16:32.955202 7f422a851440 Options.num_levels: 7
2020/01/11-11:16:32.955203 7f422a851440 Options.min_write_buffer_number_to_merge: 1
2020/01/11-11:16:32.955204 7f422a851440 Options.max_write_buffer_number_to_maintain: 0
2020/01/11-11:16:32.955205 7f422a851440 Options.bottommost_compression_opts.window_bits: -14
2020/01/11-11:16:32.955207 7f422a851440 Options.bottommost_compression_opts.level: 32767
2020/01/11-11:16:32.955208 7f422a851440 Options.bottommost_compression_opts.strategy: 0
2020/01/11-11:16:32.955209 7f422a851440 Options.bottommost_compression_opts.max_dict_bytes: 0
2020/01/11-11:16:32.955210 7f422a851440 Options.bottommost_compression_opts.zstd_max_train_bytes: 0
2020/01/11-11:16:32.955211 7f422a851440 Options.bottommost_compression_opts.enabled: false
2020/01/11-11:16:32.955213 7f422a851440 Options.compression_opts.window_bits: -14
2020/01/11-11:16:32.955214 7f422a851440 Options.compression_opts.level: 32767
2020/01/11-11:16:32.955215 7f422a851440 Options.compression_opts.strategy: 0
2020/01/11-11:16:32.955216 7f422a851440 Options.compression_opts.max_dict_bytes: 0
2020/01/11-11:16:32.955218 7f422a851440 Options.compression_opts.zstd_max_train_bytes: 0
2020/01/11-11:16:32.955219 7f422a851440 Options.compression_opts.enabled: false
2020/01/11-11:16:32.955220 7f422a851440 Options.level0_file_num_compaction_trigger: 4
2020/01/11-11:16:32.955221 7f422a851440 Options.level0_slowdown_writes_trigger: 20
2020/01/11-11:16:32.955223 7f422a851440 Options.level0_stop_writes_trigger: 36
2020/01/11-11:16:32.955224 7f422a851440 Options.target_file_size_base: 67108864
2020/01/11-11:16:32.955225 7f422a851440 Options.target_file_size_multiplier: 1
2020/01/11-11:16:32.955226 7f422a851440 Options.max_bytes_for_level_base: 268435456
2020/01/11-11:16:32.955228 7f422a851440 Options.snap_refresh_nanos: 0
2020/01/11-11:16:32.955229 7f422a851440 Options.level_compaction_dynamic_level_bytes: 0
2020/01/11-11:16:32.955230 7f422a851440 Options.max_bytes_for_level_multiplier: 10.000000
2020/01/11-11:16:32.955232 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[0]: 1
2020/01/11-11:16:32.955238 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[1]: 1
2020/01/11-11:16:32.955240 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[2]: 1
2020/01/11-11:16:32.955241 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[3]: 1
2020/01/11-11:16:32.955242 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[4]: 1
2020/01/11-11:16:32.955243 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[5]: 1
2020/01/11-11:16:32.955244 7f422a851440 Options.max_bytes_for_level_multiplier_addtl[6]: 1
2020/01/11-11:16:32.955245 7f422a851440 Options.max_sequential_skip_in_iterations: 8
2020/01/11-11:16:32.955246 7f422a851440 Options.max_compaction_bytes: 1677721600
2020/01/11-11:16:32.955248 7f422a851440 Options.arena_block_size: 8388608
2020/01/11-11:16:32.955249 7f422a851440 Options.soft_pending_compaction_bytes_limit: 68719476736
2020/01/11-11:16:32.955250 7f422a851440 Options.hard_pending_compaction_bytes_limit: 274877906944
2020/01/11-11:16:32.955251 7f422a851440 Options.rate_limit_delay_max_milliseconds: 100
2020/01/11-11:16:32.955252 7f422a851440 Options.disable_auto_compactions: 0
2020/01/11-11:16:32.955254 7f422a851440 Options.compaction_style: kCompactionStyleLevel
2020/01/11-11:16:32.955256 7f422a851440 Options.compaction_pri: kMinOverlappingRatio
2020/01/11-11:16:32.955257 7f422a851440 Options.compaction_options_universal.size_ratio: 1
2020/01/11-11:16:32.955258 7f422a851440 Options.compaction_options_universal.min_merge_width: 2
2020/01/11-11:16:32.955259 7f422a851440 Options.compaction_options_universal.max_merge_width: 4294967295
2020/01/11-11:16:32.955260 7f422a851440 Options.compaction_options_universal.max_size_amplification_percent: 200
2020/01/11-11:16:32.955262 7f422a851440 Options.compaction_options_universal.compression_size_percent: -1
2020/01/11-11:16:32.955263 7f422a851440 Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
2020/01/11-11:16:32.955264 7f422a851440 Options.compaction_options_fifo.max_table_files_size: 1073741824
2020/01/11-11:16:32.955265 7f422a851440 Options.compaction_options_fifo.allow_compaction: 0
2020/01/11-11:16:32.955267 7f422a851440 Options.table_properties_collectors:
2020/01/11-11:16:32.955268 7f422a851440 Options.inplace_update_support: 0
2020/01/11-11:16:32.955269 7f422a851440 Options.inplace_update_num_locks: 10000
2020/01/11-11:16:32.955270 7f422a851440 Options.memtable_prefix_bloom_size_ratio: 0.000000
2020/01/11-11:16:32.955272 7f422a851440 Options.memtable_whole_key_filtering: 0
2020/01/11-11:16:32.955273 7f422a851440 Options.memtable_huge_page_size: 0
2020/01/11-11:16:32.955274 7f422a851440 Options.bloom_locality: 0
2020/01/11-11:16:32.955275 7f422a851440 Options.max_successive_merges: 0
2020/01/11-11:16:32.955276 7f422a851440 Options.optimize_filters_for_hits: 0
2020/01/11-11:16:32.955277 7f422a851440 Options.paranoid_file_checks: 0
2020/01/11-11:16:32.955278 7f422a851440 Options.force_consistency_checks: 0
2020/01/11-11:16:32.955280 7f422a851440 Options.report_bg_io_stats: 0
2020/01/11-11:16:32.955281 7f422a851440 Options.ttl: 0
2020/01/11-11:16:32.955282 7f422a851440 Options.periodic_compaction_seconds: 0
2020/01/11-11:16:32.955848 7f422a851440 [/version_set.cc:4267] Recovered from manifest file:kvdb/MANIFEST-000001 succeeded,manifest_file_number is 1, next_file_number is 3, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
2020/01/11-11:16:32.955853 7f422a851440 [/version_set.cc:4276] Column family [default] (ID 0), log number is 0
2020/01/11-11:16:32.955918 7f422a851440 EVENT_LOG_v1 {"time_micros": 1578712592955905, "job": 1, "event": "recovery_started", "log_files": [3]}
2020/01/11-11:16:32.955931 7f422a851440 [/db_impl_open.cc:597] Recovering log #3 mode 2
2020/01/11-11:16:32.955994 7f422a851440 [/version_set.cc:3546] Creating manifest 5
2020/01/11-11:16:32.957004 7f422a851440 EVENT_LOG_v1 {"time_micros": 1578712592956998, "job": 1, "event": "recovery_finished"}
2020/01/11-11:16:32.958911 7f422a851440 DB pointer 0x1f470de0
2020/01/11-11:16:32.959218 7f41ac7f3700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-11:16:32.959255 7f41ac7f3700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 0.0 total, 0.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-11:26:32.959466 7f41ac7f3700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-11:26:32.959522 7f41ac7f3700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 600.0 total, 600.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 600.0 total, 600.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 600.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-11:36:32.959737 7f41ac7f3700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-11:36:32.959786 7f41ac7f3700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 1200.0 total, 600.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 1200.0 total, 600.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 1200.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-11:46:32.960005 7f41ac7f3700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-11:46:32.960058 7f41ac7f3700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 1800.0 total, 600.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 1800.0 total, 600.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 1800.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-11:56:32.960277 7f41ac7f3700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-11:56:32.960328 7f41ac7f3700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 2400.0 total, 600.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 2400.0 total, 600.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 2400.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-12:07:58.727896 7effc50af440 RocksDB version: 6.2.4
2020/01/11-12:07:58.727932 7effc50af440 Git sha rocksdb_build_git_sha:3513d4e93f8530ac44cc0c1efea04be4259c6938
2020/01/11-12:07:58.727935 7effc50af440 Compile date Dec 23 2019
2020/01/11-12:07:58.727938 7effc50af440 DB SUMMARY
2020/01/11-12:07:58.727964 7effc50af440 CURRENT file: CURRENT
2020/01/11-12:07:58.727966 7effc50af440 IDENTITY file: IDENTITY
2020/01/11-12:07:58.727969 7effc50af440 MANIFEST file: MANIFEST-000005 size: 59 Bytes
2020/01/11-12:07:58.727971 7effc50af440 SST files in kvdb dir, Total Num: 0, files:
2020/01/11-12:07:58.727973 7effc50af440 Write Ahead Log file in kvdb: 000006.log size: 0 ;
2020/01/11-12:07:58.727975 7effc50af440 Options.error_if_exists: 0
2020/01/11-12:07:58.727977 7effc50af440 Options.create_if_missing: 1
2020/01/11-12:07:58.727978 7effc50af440 Options.paranoid_checks: 1
2020/01/11-12:07:58.727979 7effc50af440 Options.env: 0x1bed2ac0
2020/01/11-12:07:58.727981 7effc50af440 Options.info_log: 0x20e963c0
2020/01/11-12:07:58.727982 7effc50af440 Options.max_file_opening_threads: 16
2020/01/11-12:07:58.727983 7effc50af440 Options.statistics: (nil)
2020/01/11-12:07:58.727984 7effc50af440 Options.use_fsync: 0
2020/01/11-12:07:58.727986 7effc50af440 Options.max_log_file_size: 0
2020/01/11-12:07:58.727987 7effc50af440 Options.max_manifest_file_size: 1073741824
2020/01/11-12:07:58.727988 7effc50af440 Options.log_file_time_to_roll: 0
2020/01/11-12:07:58.727989 7effc50af440 Options.keep_log_file_num: 1000
2020/01/11-12:07:58.727990 7effc50af440 Options.recycle_log_file_num: 0
2020/01/11-12:07:58.727992 7effc50af440 Options.allow_fallocate: 1
2020/01/11-12:07:58.727993 7effc50af440 Options.allow_mmap_reads: 0
2020/01/11-12:07:58.727994 7effc50af440 Options.allow_mmap_writes: 0
2020/01/11-12:07:58.727995 7effc50af440 Options.use_direct_reads: 0
2020/01/11-12:07:58.727996 7effc50af440 Options.use_direct_io_for_flush_and_compaction: 0
2020/01/11-12:07:58.727997 7effc50af440 Options.create_missing_column_families: 0
2020/01/11-12:07:58.728000 7effc50af440 Options.db_log_dir:
2020/01/11-12:07:58.728001 7effc50af440 Options.wal_dir: kvdb
2020/01/11-12:07:58.728002 7effc50af440 Options.table_cache_numshardbits: 6
2020/01/11-12:07:58.728003 7effc50af440 Options.max_subcompactions: 1
2020/01/11-12:07:58.728004 7effc50af440 Options.max_background_flushes: -1
2020/01/11-12:07:58.728006 7effc50af440 Options.WAL_ttl_seconds: 0
2020/01/11-12:07:58.728007 7effc50af440 Options.WAL_size_limit_MB: 0
2020/01/11-12:07:58.728008 7effc50af440 Options.manifest_preallocation_size: 4194304
2020/01/11-12:07:58.728009 7effc50af440 Options.is_fd_close_on_exec: 1
2020/01/11-12:07:58.728010 7effc50af440 Options.advise_random_on_open: 1
2020/01/11-12:07:58.728011 7effc50af440 Options.db_write_buffer_size: 0
2020/01/11-12:07:58.728012 7effc50af440 Options.write_buffer_manager: 0x20e96420
2020/01/11-12:07:58.728014 7effc50af440 Options.access_hint_on_compaction_start: 1
2020/01/11-12:07:58.728015 7effc50af440 Options.new_table_reader_for_compaction_inputs: 0
2020/01/11-12:07:58.728016 7effc50af440 Options.random_access_max_buffer_size: 1048576
2020/01/11-12:07:58.728017 7effc50af440 Options.use_adaptive_mutex: 0
2020/01/11-12:07:58.728018 7effc50af440 Options.rate_limiter: (nil)
2020/01/11-12:07:58.728020 7effc50af440 Options.sst_file_manager.rate_bytes_per_sec: 0
2020/01/11-12:07:58.728021 7effc50af440 Options.wal_recovery_mode: 2
2020/01/11-12:07:58.728028 7effc50af440 Options.enable_thread_tracking: 0
2020/01/11-12:07:58.728029 7effc50af440 Options.enable_pipelined_write: 0
2020/01/11-12:07:58.728030 7effc50af440 Options.allow_concurrent_memtable_write: 1
2020/01/11-12:07:58.728031 7effc50af440 Options.enable_write_thread_adaptive_yield: 1
2020/01/11-12:07:58.728032 7effc50af440 Options.write_thread_max_yield_usec: 100
2020/01/11-12:07:58.728034 7effc50af440 Options.write_thread_slow_yield_usec: 3
2020/01/11-12:07:58.728035 7effc50af440 Options.row_cache: None
2020/01/11-12:07:58.728036 7effc50af440 Options.wal_filter: None
2020/01/11-12:07:58.728038 7effc50af440 Options.avoid_flush_during_recovery: 0
2020/01/11-12:07:58.728039 7effc50af440 Options.allow_ingest_behind: 0
2020/01/11-12:07:58.728040 7effc50af440 Options.preserve_deletes: 0
2020/01/11-12:07:58.728041 7effc50af440 Options.two_write_queues: 0
2020/01/11-12:07:58.728042 7effc50af440 Options.manual_wal_flush: 0
2020/01/11-12:07:58.728043 7effc50af440 Options.atomic_flush: 0
2020/01/11-12:07:58.728044 7effc50af440 Options.avoid_unnecessary_blocking_io: 0
2020/01/11-12:07:58.728046 7effc50af440 Options.max_background_jobs: 2
2020/01/11-12:07:58.728047 7effc50af440 Options.max_background_compactions: -1
2020/01/11-12:07:58.728048 7effc50af440 Options.avoid_flush_during_shutdown: 0
2020/01/11-12:07:58.728049 7effc50af440 Options.writable_file_max_buffer_size: 1048576
2020/01/11-12:07:58.728050 7effc50af440 Options.delayed_write_rate : 16777216
2020/01/11-12:07:58.728051 7effc50af440 Options.max_total_wal_size: 0
2020/01/11-12:07:58.728052 7effc50af440 Options.delete_obsolete_files_period_micros: 21600000000
2020/01/11-12:07:58.728054 7effc50af440 Options.stats_dump_period_sec: 600
2020/01/11-12:07:58.728055 7effc50af440 Options.stats_persist_period_sec: 600
2020/01/11-12:07:58.728056 7effc50af440 Options.stats_history_buffer_size: 1048576
2020/01/11-12:07:58.728057 7effc50af440 Options.max_open_files: -1
2020/01/11-12:07:58.728058 7effc50af440 Options.bytes_per_sync: 0
2020/01/11-12:07:58.728060 7effc50af440 Options.wal_bytes_per_sync: 0
2020/01/11-12:07:58.728061 7effc50af440 Options.strict_bytes_per_sync: 0
2020/01/11-12:07:58.728062 7effc50af440 Options.compaction_readahead_size: 0
2020/01/11-12:07:58.728063 7effc50af440 Compression algorithms supported:
2020/01/11-12:07:58.728066 7effc50af440 kZSTDNotFinalCompression supported: 0
2020/01/11-12:07:58.728069 7effc50af440 kZSTD supported: 0
2020/01/11-12:07:58.728070 7effc50af440 kXpressCompression supported: 0
2020/01/11-12:07:58.728071 7effc50af440 kLZ4HCCompression supported: 0
2020/01/11-12:07:58.728073 7effc50af440 kLZ4Compression supported: 0
2020/01/11-12:07:58.728074 7effc50af440 kBZip2Compression supported: 1
2020/01/11-12:07:58.728076 7effc50af440 kZlibCompression supported: 1
2020/01/11-12:07:58.728077 7effc50af440 kSnappyCompression supported: 0
2020/01/11-12:07:58.728079 7effc50af440 Fast CRC32 supported: Supported on x86
2020/01/11-12:07:58.728282 7effc50af440 [/version_set.cc:4053] Recovering from manifest file: kvdb/MANIFEST-000005
2020/01/11-12:07:58.728341 7effc50af440 [/column_family.cc:482] --------------- Options for column family [default]:
2020/01/11-12:07:58.728344 7effc50af440 Options.comparator: leveldb.BytewiseComparator
2020/01/11-12:07:58.728345 7effc50af440 Options.merge_operator: None
2020/01/11-12:07:58.728347 7effc50af440 Options.compaction_filter: None
2020/01/11-12:07:58.728348 7effc50af440 Options.compaction_filter_factory: None
2020/01/11-12:07:58.728349 7effc50af440 Options.memtable_factory: SkipListFactory
2020/01/11-12:07:58.728357 7effc50af440 Options.table_factory: BlockBasedTable
2020/01/11-12:07:58.728388 7effc50af440 table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x1eca3230)
cache_index_and_filter_blocks: 0
cache_index_and_filter_blocks_with_high_priority: 0
pin_l0_filter_and_index_blocks_in_cache: 0
pin_top_level_index_and_filter: 1
index_type: 0
data_block_index_type: 0
index_shortening: 1
data_block_hash_table_util_ratio: 0.750000
hash_index_allow_collision: 1
checksum: 1
no_block_cache: 0
block_cache: 0x20e93940
block_cache_name: LRUCache
block_cache_options:
capacity : 8388608
num_shard_bits : 4
strict_capacity_limit : 0
memory_allocator : None
high_pri_pool_ratio: 0.000
block_cache_compressed: (nil)
persistent_cache: (nil)
block_size: 4096
block_size_deviation: 10
block_restart_interval: 16
index_block_restart_interval: 1
metadata_block_size: 4096
partition_filters: 0
use_delta_encoding: 1
filter_policy: nullptr
whole_key_filtering: 1
verify_compression: 0
read_amp_bytes_per_bit: 0
format_version: 2
enable_index_compression: 1
block_align: 0
2020/01/11-12:07:58.728392 7effc50af440 Options.write_buffer_size: 67108864
2020/01/11-12:07:58.728393 7effc50af440 Options.max_write_buffer_number: 2
2020/01/11-12:07:58.728395 7effc50af440 Options.compression: NoCompression
2020/01/11-12:07:58.728396 7effc50af440 Options.bottommost_compression: Disabled
2020/01/11-12:07:58.728398 7effc50af440 Options.prefix_extractor: nullptr
2020/01/11-12:07:58.728399 7effc50af440 Options.memtable_insert_with_hint_prefix_extractor: nullptr
2020/01/11-12:07:58.728400 7effc50af440 Options.num_levels: 7
2020/01/11-12:07:58.728401 7effc50af440 Options.min_write_buffer_number_to_merge: 1
2020/01/11-12:07:58.728403 7effc50af440 Options.max_write_buffer_number_to_maintain: 0
2020/01/11-12:07:58.728404 7effc50af440 Options.bottommost_compression_opts.window_bits: -14
2020/01/11-12:07:58.728413 7effc50af440 Options.bottommost_compression_opts.level: 32767
2020/01/11-12:07:58.728415 7effc50af440 Options.bottommost_compression_opts.strategy: 0
2020/01/11-12:07:58.728416 7effc50af440 Options.bottommost_compression_opts.max_dict_bytes: 0
2020/01/11-12:07:58.728417 7effc50af440 Options.bottommost_compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:07:58.728418 7effc50af440 Options.bottommost_compression_opts.enabled: false
2020/01/11-12:07:58.728420 7effc50af440 Options.compression_opts.window_bits: -14
2020/01/11-12:07:58.728421 7effc50af440 Options.compression_opts.level: 32767
2020/01/11-12:07:58.728422 7effc50af440 Options.compression_opts.strategy: 0
2020/01/11-12:07:58.728423 7effc50af440 Options.compression_opts.max_dict_bytes: 0
2020/01/11-12:07:58.728424 7effc50af440 Options.compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:07:58.728425 7effc50af440 Options.compression_opts.enabled: false
2020/01/11-12:07:58.728427 7effc50af440 Options.level0_file_num_compaction_trigger: 4
2020/01/11-12:07:58.728428 7effc50af440 Options.level0_slowdown_writes_trigger: 20
2020/01/11-12:07:58.728429 7effc50af440 Options.level0_stop_writes_trigger: 36
2020/01/11-12:07:58.728430 7effc50af440 Options.target_file_size_base: 67108864
2020/01/11-12:07:58.728431 7effc50af440 Options.target_file_size_multiplier: 1
2020/01/11-12:07:58.728432 7effc50af440 Options.max_bytes_for_level_base: 268435456
2020/01/11-12:07:58.728434 7effc50af440 Options.snap_refresh_nanos: 0
2020/01/11-12:07:58.728435 7effc50af440 Options.level_compaction_dynamic_level_bytes: 0
2020/01/11-12:07:58.728436 7effc50af440 Options.max_bytes_for_level_multiplier: 10.000000
2020/01/11-12:07:58.728438 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[0]: 1
2020/01/11-12:07:58.728445 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[1]: 1
2020/01/11-12:07:58.728447 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[2]: 1
2020/01/11-12:07:58.728448 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[3]: 1
2020/01/11-12:07:58.728449 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[4]: 1
2020/01/11-12:07:58.728450 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[5]: 1
2020/01/11-12:07:58.728451 7effc50af440 Options.max_bytes_for_level_multiplier_addtl[6]: 1
2020/01/11-12:07:58.728452 7effc50af440 Options.max_sequential_skip_in_iterations: 8
2020/01/11-12:07:58.728453 7effc50af440 Options.max_compaction_bytes: 1677721600
2020/01/11-12:07:58.728455 7effc50af440 Options.arena_block_size: 8388608
2020/01/11-12:07:58.728456 7effc50af440 Options.soft_pending_compaction_bytes_limit: 68719476736
2020/01/11-12:07:58.728457 7effc50af440 Options.hard_pending_compaction_bytes_limit: 274877906944
2020/01/11-12:07:58.728458 7effc50af440 Options.rate_limit_delay_max_milliseconds: 100
2020/01/11-12:07:58.728459 7effc50af440 Options.disable_auto_compactions: 0
2020/01/11-12:07:58.728461 7effc50af440 Options.compaction_style: kCompactionStyleLevel
2020/01/11-12:07:58.728463 7effc50af440 Options.compaction_pri: kMinOverlappingRatio
2020/01/11-12:07:58.728464 7effc50af440 Options.compaction_options_universal.size_ratio: 1
2020/01/11-12:07:58.728466 7effc50af440 Options.compaction_options_universal.min_merge_width: 2
2020/01/11-12:07:58.728467 7effc50af440 Options.compaction_options_universal.max_merge_width: 4294967295
2020/01/11-12:07:58.728468 7effc50af440 Options.compaction_options_universal.max_size_amplification_percent: 200
2020/01/11-12:07:58.728469 7effc50af440 Options.compaction_options_universal.compression_size_percent: -1
2020/01/11-12:07:58.728471 7effc50af440 Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
2020/01/11-12:07:58.728472 7effc50af440 Options.compaction_options_fifo.max_table_files_size: 1073741824
2020/01/11-12:07:58.728473 7effc50af440 Options.compaction_options_fifo.allow_compaction: 0
2020/01/11-12:07:58.728475 7effc50af440 Options.table_properties_collectors:
2020/01/11-12:07:58.728476 7effc50af440 Options.inplace_update_support: 0
2020/01/11-12:07:58.728477 7effc50af440 Options.inplace_update_num_locks: 10000
2020/01/11-12:07:58.728478 7effc50af440 Options.memtable_prefix_bloom_size_ratio: 0.000000
2020/01/11-12:07:58.728479 7effc50af440 Options.memtable_whole_key_filtering: 0
2020/01/11-12:07:58.728481 7effc50af440 Options.memtable_huge_page_size: 0
2020/01/11-12:07:58.728482 7effc50af440 Options.bloom_locality: 0
2020/01/11-12:07:58.728483 7effc50af440 Options.max_successive_merges: 0
2020/01/11-12:07:58.728484 7effc50af440 Options.optimize_filters_for_hits: 0
2020/01/11-12:07:58.728485 7effc50af440 Options.paranoid_file_checks: 0
2020/01/11-12:07:58.728486 7effc50af440 Options.force_consistency_checks: 0
2020/01/11-12:07:58.728487 7effc50af440 Options.report_bg_io_stats: 0
2020/01/11-12:07:58.728489 7effc50af440 Options.ttl: 0
2020/01/11-12:07:58.728490 7effc50af440 Options.periodic_compaction_seconds: 0
2020/01/11-12:07:58.729011 7effc50af440 [/version_set.cc:4267] Recovered from manifest file:kvdb/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 4,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
2020/01/11-12:07:58.729015 7effc50af440 [/version_set.cc:4276] Column family [default] (ID 0), log number is 4
2020/01/11-12:07:58.729081 7effc50af440 EVENT_LOG_v1 {"time_micros": 1578715678729069, "job": 1, "event": "recovery_started", "log_files": [6]}
2020/01/11-12:07:58.729092 7effc50af440 [/db_impl_open.cc:597] Recovering log #6 mode 2
2020/01/11-12:07:58.729153 7effc50af440 [/version_set.cc:3546] Creating manifest 8
2020/01/11-12:07:58.730101 7effc50af440 EVENT_LOG_v1 {"time_micros": 1578715678730097, "job": 1, "event": "recovery_finished"}
2020/01/11-12:07:58.732193 7effc50af440 DB pointer 0x20e950b0
2020/01/11-12:07:58.732503 7eff4a1fc700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-12:07:58.732538 7eff4a1fc700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 0.0 total, 0.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-12:14:13.197446 7fd2bb712440 RocksDB version: 6.2.4
2020/01/11-12:14:13.197484 7fd2bb712440 Git sha rocksdb_build_git_sha:3513d4e93f8530ac44cc0c1efea04be4259c6938
2020/01/11-12:14:13.197486 7fd2bb712440 Compile date Dec 23 2019
2020/01/11-12:14:13.197489 7fd2bb712440 DB SUMMARY
2020/01/11-12:14:13.197519 7fd2bb712440 CURRENT file: CURRENT
2020/01/11-12:14:13.197521 7fd2bb712440 IDENTITY file: IDENTITY
2020/01/11-12:14:13.197524 7fd2bb712440 MANIFEST file: MANIFEST-000008 size: 59 Bytes
2020/01/11-12:14:13.197527 7fd2bb712440 SST files in kvdb dir, Total Num: 0, files:
2020/01/11-12:14:13.197528 7fd2bb712440 Write Ahead Log file in kvdb: 000009.log size: 0 ;
2020/01/11-12:14:13.197531 7fd2bb712440 Options.error_if_exists: 0
2020/01/11-12:14:13.197532 7fd2bb712440 Options.create_if_missing: 1
2020/01/11-12:14:13.197533 7fd2bb712440 Options.paranoid_checks: 1
2020/01/11-12:14:13.197535 7fd2bb712440 Options.env: 0x1bed2ac0
2020/01/11-12:14:13.197536 7fd2bb712440 Options.info_log: 0x1fd86b00
2020/01/11-12:14:13.197537 7fd2bb712440 Options.max_file_opening_threads: 16
2020/01/11-12:14:13.197538 7fd2bb712440 Options.statistics: (nil)
2020/01/11-12:14:13.197540 7fd2bb712440 Options.use_fsync: 0
2020/01/11-12:14:13.197541 7fd2bb712440 Options.max_log_file_size: 0
2020/01/11-12:14:13.197542 7fd2bb712440 Options.max_manifest_file_size: 1073741824
2020/01/11-12:14:13.197543 7fd2bb712440 Options.log_file_time_to_roll: 0
2020/01/11-12:14:13.197545 7fd2bb712440 Options.keep_log_file_num: 1000
2020/01/11-12:14:13.197546 7fd2bb712440 Options.recycle_log_file_num: 0
2020/01/11-12:14:13.197547 7fd2bb712440 Options.allow_fallocate: 1
2020/01/11-12:14:13.197548 7fd2bb712440 Options.allow_mmap_reads: 0
2020/01/11-12:14:13.197549 7fd2bb712440 Options.allow_mmap_writes: 0
2020/01/11-12:14:13.197550 7fd2bb712440 Options.use_direct_reads: 0
2020/01/11-12:14:13.197551 7fd2bb712440 Options.use_direct_io_for_flush_and_compaction: 0
2020/01/11-12:14:13.197553 7fd2bb712440 Options.create_missing_column_families: 0
2020/01/11-12:14:13.197555 7fd2bb712440 Options.db_log_dir:
2020/01/11-12:14:13.197556 7fd2bb712440 Options.wal_dir: kvdb
2020/01/11-12:14:13.197558 7fd2bb712440 Options.table_cache_numshardbits: 6
2020/01/11-12:14:13.197559 7fd2bb712440 Options.max_subcompactions: 1
2020/01/11-12:14:13.197560 7fd2bb712440 Options.max_background_flushes: -1
2020/01/11-12:14:13.197561 7fd2bb712440 Options.WAL_ttl_seconds: 0
2020/01/11-12:14:13.197563 7fd2bb712440 Options.WAL_size_limit_MB: 0
2020/01/11-12:14:13.197564 7fd2bb712440 Options.manifest_preallocation_size: 4194304
2020/01/11-12:14:13.197565 7fd2bb712440 Options.is_fd_close_on_exec: 1
2020/01/11-12:14:13.197566 7fd2bb712440 Options.advise_random_on_open: 1
2020/01/11-12:14:13.197567 7fd2bb712440 Options.db_write_buffer_size: 0
2020/01/11-12:14:13.197568 7fd2bb712440 Options.write_buffer_manager: 0x1dd0b160
2020/01/11-12:14:13.197570 7fd2bb712440 Options.access_hint_on_compaction_start: 1
2020/01/11-12:14:13.197571 7fd2bb712440 Options.new_table_reader_for_compaction_inputs: 0
2020/01/11-12:14:13.197572 7fd2bb712440 Options.random_access_max_buffer_size: 1048576
2020/01/11-12:14:13.197573 7fd2bb712440 Options.use_adaptive_mutex: 0
2020/01/11-12:14:13.197574 7fd2bb712440 Options.rate_limiter: (nil)
2020/01/11-12:14:13.197576 7fd2bb712440 Options.sst_file_manager.rate_bytes_per_sec: 0
2020/01/11-12:14:13.197577 7fd2bb712440 Options.wal_recovery_mode: 2
2020/01/11-12:14:13.197584 7fd2bb712440 Options.enable_thread_tracking: 0
2020/01/11-12:14:13.197585 7fd2bb712440 Options.enable_pipelined_write: 0
2020/01/11-12:14:13.197586 7fd2bb712440 Options.allow_concurrent_memtable_write: 1
2020/01/11-12:14:13.197587 7fd2bb712440 Options.enable_write_thread_adaptive_yield: 1
2020/01/11-12:14:13.197589 7fd2bb712440 Options.write_thread_max_yield_usec: 100
2020/01/11-12:14:13.197590 7fd2bb712440 Options.write_thread_slow_yield_usec: 3
2020/01/11-12:14:13.197591 7fd2bb712440 Options.row_cache: None
2020/01/11-12:14:13.197592 7fd2bb712440 Options.wal_filter: None
2020/01/11-12:14:13.197594 7fd2bb712440 Options.avoid_flush_during_recovery: 0
2020/01/11-12:14:13.197595 7fd2bb712440 Options.allow_ingest_behind: 0
2020/01/11-12:14:13.197596 7fd2bb712440 Options.preserve_deletes: 0
2020/01/11-12:14:13.197597 7fd2bb712440 Options.two_write_queues: 0
2020/01/11-12:14:13.197598 7fd2bb712440 Options.manual_wal_flush: 0
2020/01/11-12:14:13.197599 7fd2bb712440 Options.atomic_flush: 0
2020/01/11-12:14:13.197600 7fd2bb712440 Options.avoid_unnecessary_blocking_io: 0
2020/01/11-12:14:13.197602 7fd2bb712440 Options.max_background_jobs: 2
2020/01/11-12:14:13.197603 7fd2bb712440 Options.max_background_compactions: -1
2020/01/11-12:14:13.197604 7fd2bb712440 Options.avoid_flush_during_shutdown: 0
2020/01/11-12:14:13.197605 7fd2bb712440 Options.writable_file_max_buffer_size: 1048576
2020/01/11-12:14:13.197606 7fd2bb712440 Options.delayed_write_rate : 16777216
2020/01/11-12:14:13.197607 7fd2bb712440 Options.max_total_wal_size: 0
2020/01/11-12:14:13.197609 7fd2bb712440 Options.delete_obsolete_files_period_micros: 21600000000
2020/01/11-12:14:13.197610 7fd2bb712440 Options.stats_dump_period_sec: 600
2020/01/11-12:14:13.197611 7fd2bb712440 Options.stats_persist_period_sec: 600
2020/01/11-12:14:13.197612 7fd2bb712440 Options.stats_history_buffer_size: 1048576
2020/01/11-12:14:13.197614 7fd2bb712440 Options.max_open_files: -1
2020/01/11-12:14:13.197615 7fd2bb712440 Options.bytes_per_sync: 0
2020/01/11-12:14:13.197616 7fd2bb712440 Options.wal_bytes_per_sync: 0
2020/01/11-12:14:13.197617 7fd2bb712440 Options.strict_bytes_per_sync: 0
2020/01/11-12:14:13.197618 7fd2bb712440 Options.compaction_readahead_size: 0
2020/01/11-12:14:13.197619 7fd2bb712440 Compression algorithms supported:
2020/01/11-12:14:13.197632 7fd2bb712440 kZSTDNotFinalCompression supported: 0
2020/01/11-12:14:13.197642 7fd2bb712440 kZSTD supported: 0
2020/01/11-12:14:13.197644 7fd2bb712440 kXpressCompression supported: 0
2020/01/11-12:14:13.197645 7fd2bb712440 kLZ4HCCompression supported: 0
2020/01/11-12:14:13.197646 7fd2bb712440 kLZ4Compression supported: 0
2020/01/11-12:14:13.197647 7fd2bb712440 kBZip2Compression supported: 1
2020/01/11-12:14:13.197649 7fd2bb712440 kZlibCompression supported: 1
2020/01/11-12:14:13.197650 7fd2bb712440 kSnappyCompression supported: 0
2020/01/11-12:14:13.197652 7fd2bb712440 Fast CRC32 supported: Supported on x86
2020/01/11-12:14:13.197844 7fd2bb712440 [/version_set.cc:4053] Recovering from manifest file: kvdb/MANIFEST-000008
2020/01/11-12:14:13.197902 7fd2bb712440 [/column_family.cc:482] --------------- Options for column family [default]:
2020/01/11-12:14:13.197906 7fd2bb712440 Options.comparator: leveldb.BytewiseComparator
2020/01/11-12:14:13.197907 7fd2bb712440 Options.merge_operator: None
2020/01/11-12:14:13.197909 7fd2bb712440 Options.compaction_filter: None
2020/01/11-12:14:13.197910 7fd2bb712440 Options.compaction_filter_factory: None
2020/01/11-12:14:13.197911 7fd2bb712440 Options.memtable_factory: SkipListFactory
2020/01/11-12:14:13.197922 7fd2bb712440 Options.table_factory: BlockBasedTable
2020/01/11-12:14:13.197955 7fd2bb712440 table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x1e99cbf0)
cache_index_and_filter_blocks: 0
cache_index_and_filter_blocks_with_high_priority: 0
pin_l0_filter_and_index_blocks_in_cache: 0
pin_top_level_index_and_filter: 1
index_type: 0
data_block_index_type: 0
index_shortening: 1
data_block_hash_table_util_ratio: 0.750000
hash_index_allow_collision: 1
checksum: 1
no_block_cache: 0
block_cache: 0x1e9cf0f0
block_cache_name: LRUCache
block_cache_options:
capacity : 8388608
num_shard_bits : 4
strict_capacity_limit : 0
memory_allocator : None
high_pri_pool_ratio: 0.000
block_cache_compressed: (nil)
persistent_cache: (nil)
block_size: 4096
block_size_deviation: 10
block_restart_interval: 16
index_block_restart_interval: 1
metadata_block_size: 4096
partition_filters: 0
use_delta_encoding: 1
filter_policy: nullptr
whole_key_filtering: 1
verify_compression: 0
read_amp_bytes_per_bit: 0
format_version: 2
enable_index_compression: 1
block_align: 0
2020/01/11-12:14:13.197959 7fd2bb712440 Options.write_buffer_size: 67108864
2020/01/11-12:14:13.197960 7fd2bb712440 Options.max_write_buffer_number: 2
2020/01/11-12:14:13.197962 7fd2bb712440 Options.compression: NoCompression
2020/01/11-12:14:13.197963 7fd2bb712440 Options.bottommost_compression: Disabled
2020/01/11-12:14:13.197965 7fd2bb712440 Options.prefix_extractor: nullptr
2020/01/11-12:14:13.197966 7fd2bb712440 Options.memtable_insert_with_hint_prefix_extractor: nullptr
2020/01/11-12:14:13.197967 7fd2bb712440 Options.num_levels: 7
2020/01/11-12:14:13.197968 7fd2bb712440 Options.min_write_buffer_number_to_merge: 1
2020/01/11-12:14:13.197970 7fd2bb712440 Options.max_write_buffer_number_to_maintain: 0
2020/01/11-12:14:13.197971 7fd2bb712440 Options.bottommost_compression_opts.window_bits: -14
2020/01/11-12:14:13.197972 7fd2bb712440 Options.bottommost_compression_opts.level: 32767
2020/01/11-12:14:13.197973 7fd2bb712440 Options.bottommost_compression_opts.strategy: 0
2020/01/11-12:14:13.197975 7fd2bb712440 Options.bottommost_compression_opts.max_dict_bytes: 0
2020/01/11-12:14:13.197976 7fd2bb712440 Options.bottommost_compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:14:13.197977 7fd2bb712440 Options.bottommost_compression_opts.enabled: false
2020/01/11-12:14:13.197978 7fd2bb712440 Options.compression_opts.window_bits: -14
2020/01/11-12:14:13.197980 7fd2bb712440 Options.compression_opts.level: 32767
2020/01/11-12:14:13.197981 7fd2bb712440 Options.compression_opts.strategy: 0
2020/01/11-12:14:13.197982 7fd2bb712440 Options.compression_opts.max_dict_bytes: 0
2020/01/11-12:14:13.197983 7fd2bb712440 Options.compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:14:13.197984 7fd2bb712440 Options.compression_opts.enabled: false
2020/01/11-12:14:13.197985 7fd2bb712440 Options.level0_file_num_compaction_trigger: 4
2020/01/11-12:14:13.197987 7fd2bb712440 Options.level0_slowdown_writes_trigger: 20
2020/01/11-12:14:13.197988 7fd2bb712440 Options.level0_stop_writes_trigger: 36
2020/01/11-12:14:13.197989 7fd2bb712440 Options.target_file_size_base: 67108864
2020/01/11-12:14:13.197990 7fd2bb712440 Options.target_file_size_multiplier: 1
2020/01/11-12:14:13.197991 7fd2bb712440 Options.max_bytes_for_level_base: 268435456
2020/01/11-12:14:13.197992 7fd2bb712440 Options.snap_refresh_nanos: 0
2020/01/11-12:14:13.197994 7fd2bb712440 Options.level_compaction_dynamic_level_bytes: 0
2020/01/11-12:14:13.197995 7fd2bb712440 Options.max_bytes_for_level_multiplier: 10.000000
2020/01/11-12:14:13.197997 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[0]: 1
2020/01/11-12:14:13.198004 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[1]: 1
2020/01/11-12:14:13.198005 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[2]: 1
2020/01/11-12:14:13.198006 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[3]: 1
2020/01/11-12:14:13.198007 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[4]: 1
2020/01/11-12:14:13.198009 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[5]: 1
2020/01/11-12:14:13.198010 7fd2bb712440 Options.max_bytes_for_level_multiplier_addtl[6]: 1
2020/01/11-12:14:13.198011 7fd2bb712440 Options.max_sequential_skip_in_iterations: 8
2020/01/11-12:14:13.198012 7fd2bb712440 Options.max_compaction_bytes: 1677721600
2020/01/11-12:14:13.198013 7fd2bb712440 Options.arena_block_size: 8388608
2020/01/11-12:14:13.198014 7fd2bb712440 Options.soft_pending_compaction_bytes_limit: 68719476736
2020/01/11-12:14:13.198016 7fd2bb712440 Options.hard_pending_compaction_bytes_limit: 274877906944
2020/01/11-12:14:13.198017 7fd2bb712440 Options.rate_limit_delay_max_milliseconds: 100
2020/01/11-12:14:13.198018 7fd2bb712440 Options.disable_auto_compactions: 0
2020/01/11-12:14:13.198020 7fd2bb712440 Options.compaction_style: kCompactionStyleLevel
2020/01/11-12:14:13.198021 7fd2bb712440 Options.compaction_pri: kMinOverlappingRatio
2020/01/11-12:14:13.198022 7fd2bb712440 Options.compaction_options_universal.size_ratio: 1
2020/01/11-12:14:13.198024 7fd2bb712440 Options.compaction_options_universal.min_merge_width: 2
2020/01/11-12:14:13.198025 7fd2bb712440 Options.compaction_options_universal.max_merge_width: 4294967295
2020/01/11-12:14:13.198026 7fd2bb712440 Options.compaction_options_universal.max_size_amplification_percent: 200
2020/01/11-12:14:13.198027 7fd2bb712440 Options.compaction_options_universal.compression_size_percent: -1
2020/01/11-12:14:13.198028 7fd2bb712440 Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
2020/01/11-12:14:13.198030 7fd2bb712440 Options.compaction_options_fifo.max_table_files_size: 1073741824
2020/01/11-12:14:13.198031 7fd2bb712440 Options.compaction_options_fifo.allow_compaction: 0
2020/01/11-12:14:13.198032 7fd2bb712440 Options.table_properties_collectors:
2020/01/11-12:14:13.198033 7fd2bb712440 Options.inplace_update_support: 0
2020/01/11-12:14:13.198034 7fd2bb712440 Options.inplace_update_num_locks: 10000
2020/01/11-12:14:13.198035 7fd2bb712440 Options.memtable_prefix_bloom_size_ratio: 0.000000
2020/01/11-12:14:13.198037 7fd2bb712440 Options.memtable_whole_key_filtering: 0
2020/01/11-12:14:13.198038 7fd2bb712440 Options.memtable_huge_page_size: 0
2020/01/11-12:14:13.198039 7fd2bb712440 Options.bloom_locality: 0
2020/01/11-12:14:13.198040 7fd2bb712440 Options.max_successive_merges: 0
2020/01/11-12:14:13.198042 7fd2bb712440 Options.optimize_filters_for_hits: 0
2020/01/11-12:14:13.198043 7fd2bb712440 Options.paranoid_file_checks: 0
2020/01/11-12:14:13.198044 7fd2bb712440 Options.force_consistency_checks: 0
2020/01/11-12:14:13.198045 7fd2bb712440 Options.report_bg_io_stats: 0
2020/01/11-12:14:13.198046 7fd2bb712440 Options.ttl: 0
2020/01/11-12:14:13.198047 7fd2bb712440 Options.periodic_compaction_seconds: 0
2020/01/11-12:14:13.198592 7fd2bb712440 [/version_set.cc:4267] Recovered from manifest file:kvdb/MANIFEST-000008 succeeded,manifest_file_number is 8, next_file_number is 10, last_sequence is 0, log_number is 7,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
2020/01/11-12:14:13.198596 7fd2bb712440 [/version_set.cc:4276] Column family [default] (ID 0), log number is 7
2020/01/11-12:14:13.198655 7fd2bb712440 EVENT_LOG_v1 {"time_micros": 1578716053198645, "job": 1, "event": "recovery_started", "log_files": [9]}
2020/01/11-12:14:13.198669 7fd2bb712440 [/db_impl_open.cc:597] Recovering log #9 mode 2
2020/01/11-12:14:13.198730 7fd2bb712440 [/version_set.cc:3546] Creating manifest 11
2020/01/11-12:14:13.199526 7fd2bb712440 EVENT_LOG_v1 {"time_micros": 1578716053199520, "job": 1, "event": "recovery_finished"}
2020/01/11-12:14:13.201585 7fd2bb712440 DB pointer 0x1fd857f0
2020/01/11-12:14:13.201860 7fd23d1f4700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-12:14:13.201894 7fd23d1f4700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 0.0 total, 0.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-12:24:05.180155 7fcf91393440 RocksDB version: 6.2.4
2020/01/11-12:24:05.180191 7fcf91393440 Git sha rocksdb_build_git_sha:3513d4e93f8530ac44cc0c1efea04be4259c6938
2020/01/11-12:24:05.180194 7fcf91393440 Compile date Dec 23 2019
2020/01/11-12:24:05.180197 7fcf91393440 DB SUMMARY
2020/01/11-12:24:05.180227 7fcf91393440 CURRENT file: CURRENT
2020/01/11-12:24:05.180229 7fcf91393440 IDENTITY file: IDENTITY
2020/01/11-12:24:05.180232 7fcf91393440 MANIFEST file: MANIFEST-000011 size: 59 Bytes
2020/01/11-12:24:05.180235 7fcf91393440 SST files in kvdb dir, Total Num: 0, files:
2020/01/11-12:24:05.180237 7fcf91393440 Write Ahead Log file in kvdb: 000012.log size: 0 ;
2020/01/11-12:24:05.180239 7fcf91393440 Options.error_if_exists: 0
2020/01/11-12:24:05.180241 7fcf91393440 Options.create_if_missing: 1
2020/01/11-12:24:05.180242 7fcf91393440 Options.paranoid_checks: 1
2020/01/11-12:24:05.180243 7fcf91393440 Options.env: 0x1bed2ac0
2020/01/11-12:24:05.180245 7fcf91393440 Options.info_log: 0x1f6774a0
2020/01/11-12:24:05.180246 7fcf91393440 Options.max_file_opening_threads: 16
2020/01/11-12:24:05.180247 7fcf91393440 Options.statistics: (nil)
2020/01/11-12:24:05.180248 7fcf91393440 Options.use_fsync: 0
2020/01/11-12:24:05.180250 7fcf91393440 Options.max_log_file_size: 0
2020/01/11-12:24:05.180251 7fcf91393440 Options.max_manifest_file_size: 1073741824
2020/01/11-12:24:05.180252 7fcf91393440 Options.log_file_time_to_roll: 0
2020/01/11-12:24:05.180254 7fcf91393440 Options.keep_log_file_num: 1000
2020/01/11-12:24:05.180255 7fcf91393440 Options.recycle_log_file_num: 0
2020/01/11-12:24:05.180256 7fcf91393440 Options.allow_fallocate: 1
2020/01/11-12:24:05.180258 7fcf91393440 Options.allow_mmap_reads: 0
2020/01/11-12:24:05.180259 7fcf91393440 Options.allow_mmap_writes: 0
2020/01/11-12:24:05.180260 7fcf91393440 Options.use_direct_reads: 0
2020/01/11-12:24:05.180261 7fcf91393440 Options.use_direct_io_for_flush_and_compaction: 0
2020/01/11-12:24:05.180262 7fcf91393440 Options.create_missing_column_families: 0
2020/01/11-12:24:05.180265 7fcf91393440 Options.db_log_dir:
2020/01/11-12:24:05.180266 7fcf91393440 Options.wal_dir: kvdb
2020/01/11-12:24:05.180268 7fcf91393440 Options.table_cache_numshardbits: 6
2020/01/11-12:24:05.180269 7fcf91393440 Options.max_subcompactions: 1
2020/01/11-12:24:05.180270 7fcf91393440 Options.max_background_flushes: -1
2020/01/11-12:24:05.180272 7fcf91393440 Options.WAL_ttl_seconds: 0
2020/01/11-12:24:05.180273 7fcf91393440 Options.WAL_size_limit_MB: 0
2020/01/11-12:24:05.180274 7fcf91393440 Options.manifest_preallocation_size: 4194304
2020/01/11-12:24:05.180275 7fcf91393440 Options.is_fd_close_on_exec: 1
2020/01/11-12:24:05.180277 7fcf91393440 Options.advise_random_on_open: 1
2020/01/11-12:24:05.180278 7fcf91393440 Options.db_write_buffer_size: 0
2020/01/11-12:24:05.180279 7fcf91393440 Options.write_buffer_manager: 0x1f677500
2020/01/11-12:24:05.180280 7fcf91393440 Options.access_hint_on_compaction_start: 1
2020/01/11-12:24:05.180281 7fcf91393440 Options.new_table_reader_for_compaction_inputs: 0
2020/01/11-12:24:05.180282 7fcf91393440 Options.random_access_max_buffer_size: 1048576
2020/01/11-12:24:05.180283 7fcf91393440 Options.use_adaptive_mutex: 0
2020/01/11-12:24:05.180285 7fcf91393440 Options.rate_limiter: (nil)
2020/01/11-12:24:05.180286 7fcf91393440 Options.sst_file_manager.rate_bytes_per_sec: 0
2020/01/11-12:24:05.180288 7fcf91393440 Options.wal_recovery_mode: 2
2020/01/11-12:24:05.180297 7fcf91393440 Options.enable_thread_tracking: 0
2020/01/11-12:24:05.180299 7fcf91393440 Options.enable_pipelined_write: 0
2020/01/11-12:24:05.180300 7fcf91393440 Options.allow_concurrent_memtable_write: 1
2020/01/11-12:24:05.180301 7fcf91393440 Options.enable_write_thread_adaptive_yield: 1
2020/01/11-12:24:05.180302 7fcf91393440 Options.write_thread_max_yield_usec: 100
2020/01/11-12:24:05.180303 7fcf91393440 Options.write_thread_slow_yield_usec: 3
2020/01/11-12:24:05.180304 7fcf91393440 Options.row_cache: None
2020/01/11-12:24:05.180306 7fcf91393440 Options.wal_filter: None
2020/01/11-12:24:05.180307 7fcf91393440 Options.avoid_flush_during_recovery: 0
2020/01/11-12:24:05.180308 7fcf91393440 Options.allow_ingest_behind: 0
2020/01/11-12:24:05.180309 7fcf91393440 Options.preserve_deletes: 0
2020/01/11-12:24:05.180310 7fcf91393440 Options.two_write_queues: 0
2020/01/11-12:24:05.180312 7fcf91393440 Options.manual_wal_flush: 0
2020/01/11-12:24:05.180313 7fcf91393440 Options.atomic_flush: 0
2020/01/11-12:24:05.180314 7fcf91393440 Options.avoid_unnecessary_blocking_io: 0
2020/01/11-12:24:05.180315 7fcf91393440 Options.max_background_jobs: 2
2020/01/11-12:24:05.180316 7fcf91393440 Options.max_background_compactions: -1
2020/01/11-12:24:05.180318 7fcf91393440 Options.avoid_flush_during_shutdown: 0
2020/01/11-12:24:05.180319 7fcf91393440 Options.writable_file_max_buffer_size: 1048576
2020/01/11-12:24:05.180320 7fcf91393440 Options.delayed_write_rate : 16777216
2020/01/11-12:24:05.180322 7fcf91393440 Options.max_total_wal_size: 0
2020/01/11-12:24:05.180323 7fcf91393440 Options.delete_obsolete_files_period_micros: 21600000000
2020/01/11-12:24:05.180324 7fcf91393440 Options.stats_dump_period_sec: 600
2020/01/11-12:24:05.180326 7fcf91393440 Options.stats_persist_period_sec: 600
2020/01/11-12:24:05.180327 7fcf91393440 Options.stats_history_buffer_size: 1048576
2020/01/11-12:24:05.180328 7fcf91393440 Options.max_open_files: -1
2020/01/11-12:24:05.180329 7fcf91393440 Options.bytes_per_sync: 0
2020/01/11-12:24:05.180330 7fcf91393440 Options.wal_bytes_per_sync: 0
2020/01/11-12:24:05.180331 7fcf91393440 Options.strict_bytes_per_sync: 0
2020/01/11-12:24:05.180333 7fcf91393440 Options.compaction_readahead_size: 0
2020/01/11-12:24:05.180334 7fcf91393440 Compression algorithms supported:
2020/01/11-12:24:05.180344 7fcf91393440 kZSTDNotFinalCompression supported: 0
2020/01/11-12:24:05.180350 7fcf91393440 kZSTD supported: 0
2020/01/11-12:24:05.180351 7fcf91393440 kXpressCompression supported: 0
2020/01/11-12:24:05.180352 7fcf91393440 kLZ4HCCompression supported: 0
2020/01/11-12:24:05.180354 7fcf91393440 kLZ4Compression supported: 0
2020/01/11-12:24:05.180355 7fcf91393440 kBZip2Compression supported: 1
2020/01/11-12:24:05.180356 7fcf91393440 kZlibCompression supported: 1
2020/01/11-12:24:05.180357 7fcf91393440 kSnappyCompression supported: 0
2020/01/11-12:24:05.180360 7fcf91393440 Fast CRC32 supported: Supported on x86
2020/01/11-12:24:05.180560 7fcf91393440 [/version_set.cc:4053] Recovering from manifest file: kvdb/MANIFEST-000011
2020/01/11-12:24:05.180623 7fcf91393440 [/column_family.cc:482] --------------- Options for column family [default]:
2020/01/11-12:24:05.180627 7fcf91393440 Options.comparator: leveldb.BytewiseComparator
2020/01/11-12:24:05.180629 7fcf91393440 Options.merge_operator: None
2020/01/11-12:24:05.180630 7fcf91393440 Options.compaction_filter: None
2020/01/11-12:24:05.180631 7fcf91393440 Options.compaction_filter_factory: None
2020/01/11-12:24:05.180632 7fcf91393440 Options.memtable_factory: SkipListFactory
2020/01/11-12:24:05.180645 7fcf91393440 Options.table_factory: BlockBasedTable
2020/01/11-12:24:05.180680 7fcf91393440 table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x1e2afd50)
cache_index_and_filter_blocks: 0
cache_index_and_filter_blocks_with_high_priority: 0
pin_l0_filter_and_index_blocks_in_cache: 0
pin_top_level_index_and_filter: 1
index_type: 0
data_block_index_type: 0
index_shortening: 1
data_block_hash_table_util_ratio: 0.750000
hash_index_allow_collision: 1
checksum: 1
no_block_cache: 0
block_cache: 0x1e2bdf80
block_cache_name: LRUCache
block_cache_options:
capacity : 8388608
num_shard_bits : 4
strict_capacity_limit : 0
memory_allocator : None
high_pri_pool_ratio: 0.000
block_cache_compressed: (nil)
persistent_cache: (nil)
block_size: 4096
block_size_deviation: 10
block_restart_interval: 16
index_block_restart_interval: 1
metadata_block_size: 4096
partition_filters: 0
use_delta_encoding: 1
filter_policy: nullptr
whole_key_filtering: 1
verify_compression: 0
read_amp_bytes_per_bit: 0
format_version: 2
enable_index_compression: 1
block_align: 0
2020/01/11-12:24:05.180684 7fcf91393440 Options.write_buffer_size: 67108864
2020/01/11-12:24:05.180686 7fcf91393440 Options.max_write_buffer_number: 2
2020/01/11-12:24:05.180687 7fcf91393440 Options.compression: NoCompression
2020/01/11-12:24:05.180689 7fcf91393440 Options.bottommost_compression: Disabled
2020/01/11-12:24:05.180690 7fcf91393440 Options.prefix_extractor: nullptr
2020/01/11-12:24:05.180692 7fcf91393440 Options.memtable_insert_with_hint_prefix_extractor: nullptr
2020/01/11-12:24:05.180693 7fcf91393440 Options.num_levels: 7
2020/01/11-12:24:05.180694 7fcf91393440 Options.min_write_buffer_number_to_merge: 1
2020/01/11-12:24:05.180696 7fcf91393440 Options.max_write_buffer_number_to_maintain: 0
2020/01/11-12:24:05.180697 7fcf91393440 Options.bottommost_compression_opts.window_bits: -14
2020/01/11-12:24:05.180698 7fcf91393440 Options.bottommost_compression_opts.level: 32767
2020/01/11-12:24:05.180699 7fcf91393440 Options.bottommost_compression_opts.strategy: 0
2020/01/11-12:24:05.180701 7fcf91393440 Options.bottommost_compression_opts.max_dict_bytes: 0
2020/01/11-12:24:05.180702 7fcf91393440 Options.bottommost_compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:24:05.180703 7fcf91393440 Options.bottommost_compression_opts.enabled: false
2020/01/11-12:24:05.180705 7fcf91393440 Options.compression_opts.window_bits: -14
2020/01/11-12:24:05.180706 7fcf91393440 Options.compression_opts.level: 32767
2020/01/11-12:24:05.180707 7fcf91393440 Options.compression_opts.strategy: 0
2020/01/11-12:24:05.180708 7fcf91393440 Options.compression_opts.max_dict_bytes: 0
2020/01/11-12:24:05.180710 7fcf91393440 Options.compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:24:05.180711 7fcf91393440 Options.compression_opts.enabled: false
2020/01/11-12:24:05.180712 7fcf91393440 Options.level0_file_num_compaction_trigger: 4
2020/01/11-12:24:05.180713 7fcf91393440 Options.level0_slowdown_writes_trigger: 20
2020/01/11-12:24:05.180715 7fcf91393440 Options.level0_stop_writes_trigger: 36
2020/01/11-12:24:05.180716 7fcf91393440 Options.target_file_size_base: 67108864
2020/01/11-12:24:05.180717 7fcf91393440 Options.target_file_size_multiplier: 1
2020/01/11-12:24:05.180718 7fcf91393440 Options.max_bytes_for_level_base: 268435456
2020/01/11-12:24:05.180720 7fcf91393440 Options.snap_refresh_nanos: 0
2020/01/11-12:24:05.180721 7fcf91393440 Options.level_compaction_dynamic_level_bytes: 0
2020/01/11-12:24:05.180722 7fcf91393440 Options.max_bytes_for_level_multiplier: 10.000000
2020/01/11-12:24:05.180724 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[0]: 1
2020/01/11-12:24:05.180731 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[1]: 1
2020/01/11-12:24:05.180732 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[2]: 1
2020/01/11-12:24:05.180733 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[3]: 1
2020/01/11-12:24:05.180734 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[4]: 1
2020/01/11-12:24:05.180735 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[5]: 1
2020/01/11-12:24:05.180737 7fcf91393440 Options.max_bytes_for_level_multiplier_addtl[6]: 1
2020/01/11-12:24:05.180738 7fcf91393440 Options.max_sequential_skip_in_iterations: 8
2020/01/11-12:24:05.180739 7fcf91393440 Options.max_compaction_bytes: 1677721600
2020/01/11-12:24:05.180740 7fcf91393440 Options.arena_block_size: 8388608
2020/01/11-12:24:05.180741 7fcf91393440 Options.soft_pending_compaction_bytes_limit: 68719476736
2020/01/11-12:24:05.180743 7fcf91393440 Options.hard_pending_compaction_bytes_limit: 274877906944
2020/01/11-12:24:05.180744 7fcf91393440 Options.rate_limit_delay_max_milliseconds: 100
2020/01/11-12:24:05.180745 7fcf91393440 Options.disable_auto_compactions: 0
2020/01/11-12:24:05.180747 7fcf91393440 Options.compaction_style: kCompactionStyleLevel
2020/01/11-12:24:05.180748 7fcf91393440 Options.compaction_pri: kMinOverlappingRatio
2020/01/11-12:24:05.180749 7fcf91393440 Options.compaction_options_universal.size_ratio: 1
2020/01/11-12:24:05.180751 7fcf91393440 Options.compaction_options_universal.min_merge_width: 2
2020/01/11-12:24:05.180752 7fcf91393440 Options.compaction_options_universal.max_merge_width: 4294967295
2020/01/11-12:24:05.180753 7fcf91393440 Options.compaction_options_universal.max_size_amplification_percent: 200
2020/01/11-12:24:05.180754 7fcf91393440 Options.compaction_options_universal.compression_size_percent: -1
2020/01/11-12:24:05.180756 7fcf91393440 Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
2020/01/11-12:24:05.180757 7fcf91393440 Options.compaction_options_fifo.max_table_files_size: 1073741824
2020/01/11-12:24:05.180758 7fcf91393440 Options.compaction_options_fifo.allow_compaction: 0
2020/01/11-12:24:05.180759 7fcf91393440 Options.table_properties_collectors:
2020/01/11-12:24:05.180761 7fcf91393440 Options.inplace_update_support: 0
2020/01/11-12:24:05.180762 7fcf91393440 Options.inplace_update_num_locks: 10000
2020/01/11-12:24:05.180763 7fcf91393440 Options.memtable_prefix_bloom_size_ratio: 0.000000
2020/01/11-12:24:05.180765 7fcf91393440 Options.memtable_whole_key_filtering: 0
2020/01/11-12:24:05.180766 7fcf91393440 Options.memtable_huge_page_size: 0
2020/01/11-12:24:05.180767 7fcf91393440 Options.bloom_locality: 0
2020/01/11-12:24:05.180768 7fcf91393440 Options.max_successive_merges: 0
2020/01/11-12:24:05.180769 7fcf91393440 Options.optimize_filters_for_hits: 0
2020/01/11-12:24:05.180770 7fcf91393440 Options.paranoid_file_checks: 0
2020/01/11-12:24:05.180771 7fcf91393440 Options.force_consistency_checks: 0
2020/01/11-12:24:05.180773 7fcf91393440 Options.report_bg_io_stats: 0
2020/01/11-12:24:05.180774 7fcf91393440 Options.ttl: 0
2020/01/11-12:24:05.180775 7fcf91393440 Options.periodic_compaction_seconds: 0
2020/01/11-12:24:05.181303 7fcf91393440 [/version_set.cc:4267] Recovered from manifest file:kvdb/MANIFEST-000011 succeeded,manifest_file_number is 11, next_file_number is 13, last_sequence is 0, log_number is 10,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
2020/01/11-12:24:05.181308 7fcf91393440 [/version_set.cc:4276] Column family [default] (ID 0), log number is 10
2020/01/11-12:24:05.181374 7fcf91393440 EVENT_LOG_v1 {"time_micros": 1578716645181364, "job": 1, "event": "recovery_started", "log_files": [12]}
2020/01/11-12:24:05.181387 7fcf91393440 [/db_impl_open.cc:597] Recovering log #12 mode 2
2020/01/11-12:24:05.181461 7fcf91393440 [/version_set.cc:3546] Creating manifest 14
2020/01/11-12:24:05.182388 7fcf91393440 EVENT_LOG_v1 {"time_micros": 1578716645182384, "job": 1, "event": "recovery_finished"}
2020/01/11-12:24:05.184452 7fcf91393440 DB pointer 0x1f676190
2020/01/11-12:24:05.184787 7fcf12bfd700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-12:24:05.184823 7fcf12bfd700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 0.0 total, 0.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-12:34:05.185022 7fcf12bfd700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-12:34:05.185072 7fcf12bfd700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 600.0 total, 600.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 600.0 total, 600.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 600.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
2020/01/11-12:35:32.758840 7f43e73ec440 RocksDB version: 6.2.4
2020/01/11-12:35:32.758875 7f43e73ec440 Git sha rocksdb_build_git_sha:3513d4e93f8530ac44cc0c1efea04be4259c6938
2020/01/11-12:35:32.758878 7f43e73ec440 Compile date Dec 23 2019
2020/01/11-12:35:32.758882 7f43e73ec440 DB SUMMARY
2020/01/11-12:35:32.758911 7f43e73ec440 CURRENT file: CURRENT
2020/01/11-12:35:32.758913 7f43e73ec440 IDENTITY file: IDENTITY
2020/01/11-12:35:32.758916 7f43e73ec440 MANIFEST file: MANIFEST-000014 size: 59 Bytes
2020/01/11-12:35:32.758919 7f43e73ec440 SST files in kvdb dir, Total Num: 0, files:
2020/01/11-12:35:32.758921 7f43e73ec440 Write Ahead Log file in kvdb: 000015.log size: 0 ;
2020/01/11-12:35:32.758923 7f43e73ec440 Options.error_if_exists: 0
2020/01/11-12:35:32.758924 7f43e73ec440 Options.create_if_missing: 1
2020/01/11-12:35:32.758926 7f43e73ec440 Options.paranoid_checks: 1
2020/01/11-12:35:32.758927 7f43e73ec440 Options.env: 0x1bed2ac0
2020/01/11-12:35:32.758928 7f43e73ec440 Options.info_log: 0x207818b0
2020/01/11-12:35:32.758930 7f43e73ec440 Options.max_file_opening_threads: 16
2020/01/11-12:35:32.758931 7f43e73ec440 Options.statistics: (nil)
2020/01/11-12:35:32.758932 7f43e73ec440 Options.use_fsync: 0
2020/01/11-12:35:32.758934 7f43e73ec440 Options.max_log_file_size: 0
2020/01/11-12:35:32.758935 7f43e73ec440 Options.max_manifest_file_size: 1073741824
2020/01/11-12:35:32.758936 7f43e73ec440 Options.log_file_time_to_roll: 0
2020/01/11-12:35:32.758937 7f43e73ec440 Options.keep_log_file_num: 1000
2020/01/11-12:35:32.758938 7f43e73ec440 Options.recycle_log_file_num: 0
2020/01/11-12:35:32.758940 7f43e73ec440 Options.allow_fallocate: 1
2020/01/11-12:35:32.758941 7f43e73ec440 Options.allow_mmap_reads: 0
2020/01/11-12:35:32.758942 7f43e73ec440 Options.allow_mmap_writes: 0
2020/01/11-12:35:32.758943 7f43e73ec440 Options.use_direct_reads: 0
2020/01/11-12:35:32.758944 7f43e73ec440 Options.use_direct_io_for_flush_and_compaction: 0
2020/01/11-12:35:32.758945 7f43e73ec440 Options.create_missing_column_families: 0
2020/01/11-12:35:32.758948 7f43e73ec440 Options.db_log_dir:
2020/01/11-12:35:32.758949 7f43e73ec440 Options.wal_dir: kvdb
2020/01/11-12:35:32.758951 7f43e73ec440 Options.table_cache_numshardbits: 6
2020/01/11-12:35:32.758952 7f43e73ec440 Options.max_subcompactions: 1
2020/01/11-12:35:32.758953 7f43e73ec440 Options.max_background_flushes: -1
2020/01/11-12:35:32.758954 7f43e73ec440 Options.WAL_ttl_seconds: 0
2020/01/11-12:35:32.758955 7f43e73ec440 Options.WAL_size_limit_MB: 0
2020/01/11-12:35:32.758957 7f43e73ec440 Options.manifest_preallocation_size: 4194304
2020/01/11-12:35:32.758958 7f43e73ec440 Options.is_fd_close_on_exec: 1
2020/01/11-12:35:32.758960 7f43e73ec440 Options.advise_random_on_open: 1
2020/01/11-12:35:32.758961 7f43e73ec440 Options.db_write_buffer_size: 0
2020/01/11-12:35:32.758962 7f43e73ec440 Options.write_buffer_manager: 0x1e591160
2020/01/11-12:35:32.758963 7f43e73ec440 Options.access_hint_on_compaction_start: 1
2020/01/11-12:35:32.758964 7f43e73ec440 Options.new_table_reader_for_compaction_inputs: 0
2020/01/11-12:35:32.758965 7f43e73ec440 Options.random_access_max_buffer_size: 1048576
2020/01/11-12:35:32.758966 7f43e73ec440 Options.use_adaptive_mutex: 0
2020/01/11-12:35:32.758967 7f43e73ec440 Options.rate_limiter: (nil)
2020/01/11-12:35:32.758969 7f43e73ec440 Options.sst_file_manager.rate_bytes_per_sec: 0
2020/01/11-12:35:32.758970 7f43e73ec440 Options.wal_recovery_mode: 2
2020/01/11-12:35:32.758977 7f43e73ec440 Options.enable_thread_tracking: 0
2020/01/11-12:35:32.758979 7f43e73ec440 Options.enable_pipelined_write: 0
2020/01/11-12:35:32.758980 7f43e73ec440 Options.allow_concurrent_memtable_write: 1
2020/01/11-12:35:32.758981 7f43e73ec440 Options.enable_write_thread_adaptive_yield: 1
2020/01/11-12:35:32.758982 7f43e73ec440 Options.write_thread_max_yield_usec: 100
2020/01/11-12:35:32.758983 7f43e73ec440 Options.write_thread_slow_yield_usec: 3
2020/01/11-12:35:32.758984 7f43e73ec440 Options.row_cache: None
2020/01/11-12:35:32.758985 7f43e73ec440 Options.wal_filter: None
2020/01/11-12:35:32.758987 7f43e73ec440 Options.avoid_flush_during_recovery: 0
2020/01/11-12:35:32.758988 7f43e73ec440 Options.allow_ingest_behind: 0
2020/01/11-12:35:32.758989 7f43e73ec440 Options.preserve_deletes: 0
2020/01/11-12:35:32.758990 7f43e73ec440 Options.two_write_queues: 0
2020/01/11-12:35:32.758991 7f43e73ec440 Options.manual_wal_flush: 0
2020/01/11-12:35:32.758993 7f43e73ec440 Options.atomic_flush: 0
2020/01/11-12:35:32.758994 7f43e73ec440 Options.avoid_unnecessary_blocking_io: 0
2020/01/11-12:35:32.758995 7f43e73ec440 Options.max_background_jobs: 2
2020/01/11-12:35:32.758996 7f43e73ec440 Options.max_background_compactions: -1
2020/01/11-12:35:32.758997 7f43e73ec440 Options.avoid_flush_during_shutdown: 0
2020/01/11-12:35:32.758998 7f43e73ec440 Options.writable_file_max_buffer_size: 1048576
2020/01/11-12:35:32.758999 7f43e73ec440 Options.delayed_write_rate : 16777216
2020/01/11-12:35:32.759001 7f43e73ec440 Options.max_total_wal_size: 0
2020/01/11-12:35:32.759002 7f43e73ec440 Options.delete_obsolete_files_period_micros: 21600000000
2020/01/11-12:35:32.759003 7f43e73ec440 Options.stats_dump_period_sec: 600
2020/01/11-12:35:32.759004 7f43e73ec440 Options.stats_persist_period_sec: 600
2020/01/11-12:35:32.759005 7f43e73ec440 Options.stats_history_buffer_size: 1048576
2020/01/11-12:35:32.759007 7f43e73ec440 Options.max_open_files: -1
2020/01/11-12:35:32.759008 7f43e73ec440 Options.bytes_per_sync: 0
2020/01/11-12:35:32.759009 7f43e73ec440 Options.wal_bytes_per_sync: 0
2020/01/11-12:35:32.759010 7f43e73ec440 Options.strict_bytes_per_sync: 0
2020/01/11-12:35:32.759011 7f43e73ec440 Options.compaction_readahead_size: 0
2020/01/11-12:35:32.759012 7f43e73ec440 Compression algorithms supported:
2020/01/11-12:35:32.759026 7f43e73ec440 kZSTDNotFinalCompression supported: 0
2020/01/11-12:35:32.759031 7f43e73ec440 kZSTD supported: 0
2020/01/11-12:35:32.759033 7f43e73ec440 kXpressCompression supported: 0
2020/01/11-12:35:32.759034 7f43e73ec440 kLZ4HCCompression supported: 0
2020/01/11-12:35:32.759035 7f43e73ec440 kLZ4Compression supported: 0
2020/01/11-12:35:32.759036 7f43e73ec440 kBZip2Compression supported: 1
2020/01/11-12:35:32.759038 7f43e73ec440 kZlibCompression supported: 1
2020/01/11-12:35:32.759039 7f43e73ec440 kSnappyCompression supported: 0
2020/01/11-12:35:32.759041 7f43e73ec440 Fast CRC32 supported: Supported on x86
2020/01/11-12:35:32.759237 7f43e73ec440 [/version_set.cc:4053] Recovering from manifest file: kvdb/MANIFEST-000014
2020/01/11-12:35:32.759300 7f43e73ec440 [/column_family.cc:482] --------------- Options for column family [default]:
2020/01/11-12:35:32.759304 7f43e73ec440 Options.comparator: leveldb.BytewiseComparator
2020/01/11-12:35:32.759305 7f43e73ec440 Options.merge_operator: None
2020/01/11-12:35:32.759306 7f43e73ec440 Options.compaction_filter: None
2020/01/11-12:35:32.759307 7f43e73ec440 Options.compaction_filter_factory: None
2020/01/11-12:35:32.759309 7f43e73ec440 Options.memtable_factory: SkipListFactory
2020/01/11-12:35:32.759320 7f43e73ec440 Options.table_factory: BlockBasedTable
2020/01/11-12:35:32.759356 7f43e73ec440 table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x1f223340)
cache_index_and_filter_blocks: 0
cache_index_and_filter_blocks_with_high_priority: 0
pin_l0_filter_and_index_blocks_in_cache: 0
pin_top_level_index_and_filter: 1
index_type: 0
data_block_index_type: 0
index_shortening: 1
data_block_hash_table_util_ratio: 0.750000
hash_index_allow_collision: 1
checksum: 1
no_block_cache: 0
block_cache: 0x1f24b760
block_cache_name: LRUCache
block_cache_options:
capacity : 8388608
num_shard_bits : 4
strict_capacity_limit : 0
memory_allocator : None
high_pri_pool_ratio: 0.000
block_cache_compressed: (nil)
persistent_cache: (nil)
block_size: 4096
block_size_deviation: 10
block_restart_interval: 16
index_block_restart_interval: 1
metadata_block_size: 4096
partition_filters: 0
use_delta_encoding: 1
filter_policy: nullptr
whole_key_filtering: 1
verify_compression: 0
read_amp_bytes_per_bit: 0
format_version: 2
enable_index_compression: 1
block_align: 0
2020/01/11-12:35:32.759360 7f43e73ec440 Options.write_buffer_size: 67108864
2020/01/11-12:35:32.759361 7f43e73ec440 Options.max_write_buffer_number: 2
2020/01/11-12:35:32.759363 7f43e73ec440 Options.compression: NoCompression
2020/01/11-12:35:32.759364 7f43e73ec440 Options.bottommost_compression: Disabled
2020/01/11-12:35:32.759366 7f43e73ec440 Options.prefix_extractor: nullptr
2020/01/11-12:35:32.759367 7f43e73ec440 Options.memtable_insert_with_hint_prefix_extractor: nullptr
2020/01/11-12:35:32.759368 7f43e73ec440 Options.num_levels: 7
2020/01/11-12:35:32.759370 7f43e73ec440 Options.min_write_buffer_number_to_merge: 1
2020/01/11-12:35:32.759371 7f43e73ec440 Options.max_write_buffer_number_to_maintain: 0
2020/01/11-12:35:32.759372 7f43e73ec440 Options.bottommost_compression_opts.window_bits: -14
2020/01/11-12:35:32.759373 7f43e73ec440 Options.bottommost_compression_opts.level: 32767
2020/01/11-12:35:32.759374 7f43e73ec440 Options.bottommost_compression_opts.strategy: 0
2020/01/11-12:35:32.759376 7f43e73ec440 Options.bottommost_compression_opts.max_dict_bytes: 0
2020/01/11-12:35:32.759377 7f43e73ec440 Options.bottommost_compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:35:32.759378 7f43e73ec440 Options.bottommost_compression_opts.enabled: false
2020/01/11-12:35:32.759379 7f43e73ec440 Options.compression_opts.window_bits: -14
2020/01/11-12:35:32.759380 7f43e73ec440 Options.compression_opts.level: 32767
2020/01/11-12:35:32.759382 7f43e73ec440 Options.compression_opts.strategy: 0
2020/01/11-12:35:32.759383 7f43e73ec440 Options.compression_opts.max_dict_bytes: 0
2020/01/11-12:35:32.759384 7f43e73ec440 Options.compression_opts.zstd_max_train_bytes: 0
2020/01/11-12:35:32.759386 7f43e73ec440 Options.compression_opts.enabled: false
2020/01/11-12:35:32.759387 7f43e73ec440 Options.level0_file_num_compaction_trigger: 4
2020/01/11-12:35:32.759388 7f43e73ec440 Options.level0_slowdown_writes_trigger: 20
2020/01/11-12:35:32.759389 7f43e73ec440 Options.level0_stop_writes_trigger: 36
2020/01/11-12:35:32.759391 7f43e73ec440 Options.target_file_size_base: 67108864
2020/01/11-12:35:32.759392 7f43e73ec440 Options.target_file_size_multiplier: 1
2020/01/11-12:35:32.759393 7f43e73ec440 Options.max_bytes_for_level_base: 268435456
2020/01/11-12:35:32.759395 7f43e73ec440 Options.snap_refresh_nanos: 0
2020/01/11-12:35:32.759396 7f43e73ec440 Options.level_compaction_dynamic_level_bytes: 0
2020/01/11-12:35:32.759397 7f43e73ec440 Options.max_bytes_for_level_multiplier: 10.000000
2020/01/11-12:35:32.759399 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[0]: 1
2020/01/11-12:35:32.759415 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[1]: 1
2020/01/11-12:35:32.759416 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[2]: 1
2020/01/11-12:35:32.759417 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[3]: 1
2020/01/11-12:35:32.759418 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[4]: 1
2020/01/11-12:35:32.759420 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[5]: 1
2020/01/11-12:35:32.759421 7f43e73ec440 Options.max_bytes_for_level_multiplier_addtl[6]: 1
2020/01/11-12:35:32.759422 7f43e73ec440 Options.max_sequential_skip_in_iterations: 8
2020/01/11-12:35:32.759423 7f43e73ec440 Options.max_compaction_bytes: 1677721600
2020/01/11-12:35:32.759424 7f43e73ec440 Options.arena_block_size: 8388608
2020/01/11-12:35:32.759425 7f43e73ec440 Options.soft_pending_compaction_bytes_limit: 68719476736
2020/01/11-12:35:32.759427 7f43e73ec440 Options.hard_pending_compaction_bytes_limit: 274877906944
2020/01/11-12:35:32.759428 7f43e73ec440 Options.rate_limit_delay_max_milliseconds: 100
2020/01/11-12:35:32.759429 7f43e73ec440 Options.disable_auto_compactions: 0
2020/01/11-12:35:32.759431 7f43e73ec440 Options.compaction_style: kCompactionStyleLevel
2020/01/11-12:35:32.759432 7f43e73ec440 Options.compaction_pri: kMinOverlappingRatio
2020/01/11-12:35:32.759433 7f43e73ec440 Options.compaction_options_universal.size_ratio: 1
2020/01/11-12:35:32.759435 7f43e73ec440 Options.compaction_options_universal.min_merge_width: 2
2020/01/11-12:35:32.759436 7f43e73ec440 Options.compaction_options_universal.max_merge_width: 4294967295
2020/01/11-12:35:32.759437 7f43e73ec440 Options.compaction_options_universal.max_size_amplification_percent: 200
2020/01/11-12:35:32.759438 7f43e73ec440 Options.compaction_options_universal.compression_size_percent: -1
2020/01/11-12:35:32.759440 7f43e73ec440 Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
2020/01/11-12:35:32.759441 7f43e73ec440 Options.compaction_options_fifo.max_table_files_size: 1073741824
2020/01/11-12:35:32.759442 7f43e73ec440 Options.compaction_options_fifo.allow_compaction: 0
2020/01/11-12:35:32.759443 7f43e73ec440 Options.table_properties_collectors:
2020/01/11-12:35:32.759445 7f43e73ec440 Options.inplace_update_support: 0
2020/01/11-12:35:32.759446 7f43e73ec440 Options.inplace_update_num_locks: 10000
2020/01/11-12:35:32.759447 7f43e73ec440 Options.memtable_prefix_bloom_size_ratio: 0.000000
2020/01/11-12:35:32.759449 7f43e73ec440 Options.memtable_whole_key_filtering: 0
2020/01/11-12:35:32.759450 7f43e73ec440 Options.memtable_huge_page_size: 0
2020/01/11-12:35:32.759451 7f43e73ec440 Options.bloom_locality: 0
2020/01/11-12:35:32.759453 7f43e73ec440 Options.max_successive_merges: 0
2020/01/11-12:35:32.759454 7f43e73ec440 Options.optimize_filters_for_hits: 0
2020/01/11-12:35:32.759455 7f43e73ec440 Options.paranoid_file_checks: 0
2020/01/11-12:35:32.759456 7f43e73ec440 Options.force_consistency_checks: 0
2020/01/11-12:35:32.759457 7f43e73ec440 Options.report_bg_io_stats: 0
2020/01/11-12:35:32.759458 7f43e73ec440 Options.ttl: 0
2020/01/11-12:35:32.759459 7f43e73ec440 Options.periodic_compaction_seconds: 0
2020/01/11-12:35:32.759987 7f43e73ec440 [/version_set.cc:4267] Recovered from manifest file:kvdb/MANIFEST-000014 succeeded,manifest_file_number is 14, next_file_number is 16, last_sequence is 0, log_number is 13,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
2020/01/11-12:35:32.759991 7f43e73ec440 [/version_set.cc:4276] Column family [default] (ID 0), log number is 13
2020/01/11-12:35:32.760049 7f43e73ec440 EVENT_LOG_v1 {"time_micros": 1578717332760044, "job": 1, "event": "recovery_started", "log_files": [15]}
2020/01/11-12:35:32.760062 7f43e73ec440 [/db_impl_open.cc:597] Recovering log #15 mode 2
2020/01/11-12:35:32.760123 7f43e73ec440 [/version_set.cc:3546] Creating manifest 17
2020/01/11-12:35:32.760790 7f43e73ec440 EVENT_LOG_v1 {"time_micros": 1578717332760786, "job": 1, "event": "recovery_finished"}
2020/01/11-12:35:32.762750 7f43e73ec440 DB pointer 0x20780540
2020/01/11-12:35:32.763037 7f43695ee700 [/db_impl.cc:779] ------- DUMPING STATS -------
2020/01/11-12:35:32.763073 7f43695ee700 [/db_impl.cc:780]
** DB Stats **
Uptime(secs): 0.0 total, 0.0 interval
Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s
Interval stall: 00:00:0.000 H:M:S, 0.0 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0
** Compaction Stats [default] **
Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Uptime(secs): 0.0 total, 0.0 interval
Flush(GB): cumulative 0.000, interval 0.000
AddFile(GB): cumulative 0.000, interval 0.000
AddFile(Total Files): cumulative 0, interval 0
AddFile(L0 Files): cumulative 0, interval 0
AddFile(Keys): cumulative 0, interval 0
Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
** File Read Latency Histogram By Level [default] **
# This is a RocksDB option file.
#
# For detailed file format spec, please refer to the example file
# in examples/rocksdb_option_file_example.ini
#
[Version]
rocksdb_version=6.2.4
options_file_version=1.1
[DBOptions]
avoid_unnecessary_blocking_io=false
allow_mmap_reads=false
allow_fallocate=true
WAL_size_limit_MB=0
writable_file_max_buffer_size=1048576
allow_mmap_writes=false
allow_concurrent_memtable_write=true
use_direct_reads=false
max_open_files=-1
strict_bytes_per_sync=false
db_write_buffer_size=0
max_background_jobs=2
WAL_ttl_seconds=0
enable_thread_tracking=false
error_if_exists=false
is_fd_close_on_exec=true
recycle_log_file_num=0
max_manifest_file_size=1073741824
skip_log_error_on_recovery=false
skip_stats_update_on_db_open=false
max_total_wal_size=0
new_table_reader_for_compaction_inputs=false
manual_wal_flush=false
compaction_readahead_size=0
atomic_flush=false
random_access_max_buffer_size=1048576
create_missing_column_families=false
wal_bytes_per_sync=0
use_adaptive_mutex=false
use_direct_io_for_flush_and_compaction=false
max_background_compactions=-1
advise_random_on_open=true
base_background_compactions=-1
max_background_flushes=-1
two_write_queues=false
table_cache_numshardbits=6
keep_log_file_num=1000
write_thread_slow_yield_usec=3
stats_dump_period_sec=600
avoid_flush_during_recovery=false
log_file_time_to_roll=0
delayed_write_rate=16777216
manifest_preallocation_size=4194304
paranoid_checks=true
max_log_file_size=0
allow_2pc=false
wal_dir=kvdb
db_log_dir=
max_subcompactions=1
create_if_missing=true
enable_pipelined_write=false
bytes_per_sync=0
stats_persist_period_sec=600
stats_history_buffer_size=1048576
fail_if_options_file_error=false
use_fsync=false
wal_recovery_mode=kPointInTimeRecovery
delete_obsolete_files_period_micros=21600000000
enable_write_thread_adaptive_yield=true
avoid_flush_during_shutdown=false
write_thread_max_yield_usec=100
info_log_level=INFO_LEVEL
max_file_opening_threads=16
dump_malloc_stats=false
allow_ingest_behind=false
access_hint_on_compaction_start=NORMAL
preserve_deletes=false
[CFOptions "default"]
sample_for_compression=0
compaction_pri=kMinOverlappingRatio
merge_operator=nullptr
compaction_filter_factory=nullptr
memtable_factory=SkipListFactory
memtable_insert_with_hint_prefix_extractor=nullptr
comparator=leveldb.BytewiseComparator
target_file_size_base=67108864
max_sequential_skip_in_iterations=8
compaction_style=kCompactionStyleLevel
max_bytes_for_level_base=268435456
bloom_locality=0
write_buffer_size=67108864
compression_per_level=
memtable_huge_page_size=0
max_successive_merges=0
arena_block_size=8388608
memtable_whole_key_filtering=false
target_file_size_multiplier=1
max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1
snap_refresh_nanos=0
num_levels=7
min_write_buffer_number_to_merge=1
max_write_buffer_number_to_maintain=0
max_write_buffer_number=2
compression=kNoCompression
level0_stop_writes_trigger=36
level0_slowdown_writes_trigger=20
compaction_filter=nullptr
level0_file_num_compaction_trigger=4
max_compaction_bytes=1677721600
compaction_options_universal={allow_trivial_move=false;size_ratio=1;min_merge_width=2;max_size_amplification_percent=200;max_merge_width=4294967295;compression_size_percent=-1;stop_style=kCompactionStopStyleTotalSize;}
memtable_prefix_bloom_size_ratio=0.000000
hard_pending_compaction_bytes_limit=274877906944
ttl=0
table_factory=BlockBasedTable
soft_pending_compaction_bytes_limit=68719476736
prefix_extractor=nullptr
bottommost_compression=kDisableCompressionOption
force_consistency_checks=false
paranoid_file_checks=false
compaction_options_fifo={allow_compaction=false;max_table_files_size=1073741824;}
max_bytes_for_level_multiplier=10.000000
optimize_filters_for_hits=false
level_compaction_dynamic_level_bytes=false
inplace_update_num_locks=10000
inplace_update_support=false
periodic_compaction_seconds=0
disable_auto_compactions=false
report_bg_io_stats=false
[TableOptions/BlockBasedTable "default"]
pin_top_level_index_and_filter=true
enable_index_compression=true
read_amp_bytes_per_bit=8589934592
format_version=2
block_align=false
metadata_block_size=4096
block_size_deviation=10
partition_filters=false
block_size=4096
index_block_restart_interval=1
no_block_cache=false
checksum=kCRC32c
whole_key_filtering=true
index_shortening=kShortenSeparators
data_block_index_type=kDataBlockBinarySearch
index_type=kBinarySearch
verify_compression=false
filter_policy=nullptr
data_block_hash_table_util_ratio=0.750000
pin_l0_filter_and_index_blocks_in_cache=false
block_restart_interval=16
cache_index_and_filter_blocks_with_high_priority=false
cache_index_and_filter_blocks=false
hash_index_allow_collision=true
flush_block_policy_factory=FlushBlockBySizePolicyFactory
# This is a RocksDB option file.
#
# For detailed file format spec, please refer to the example file
# in examples/rocksdb_option_file_example.ini
#
[Version]
rocksdb_version=6.2.4
options_file_version=1.1
[DBOptions]
avoid_unnecessary_blocking_io=false
allow_mmap_reads=false
allow_fallocate=true
WAL_size_limit_MB=0
writable_file_max_buffer_size=1048576
allow_mmap_writes=false
allow_concurrent_memtable_write=true
use_direct_reads=false
max_open_files=-1
strict_bytes_per_sync=false
db_write_buffer_size=0
max_background_jobs=2
WAL_ttl_seconds=0
enable_thread_tracking=false
error_if_exists=false
is_fd_close_on_exec=true
recycle_log_file_num=0
max_manifest_file_size=1073741824
skip_log_error_on_recovery=false
skip_stats_update_on_db_open=false
max_total_wal_size=0
new_table_reader_for_compaction_inputs=false
manual_wal_flush=false
compaction_readahead_size=0
atomic_flush=false
random_access_max_buffer_size=1048576
create_missing_column_families=false
wal_bytes_per_sync=0
use_adaptive_mutex=false
use_direct_io_for_flush_and_compaction=false
max_background_compactions=-1
advise_random_on_open=true
base_background_compactions=-1
max_background_flushes=-1
two_write_queues=false
table_cache_numshardbits=6
keep_log_file_num=1000
write_thread_slow_yield_usec=3
stats_dump_period_sec=600
avoid_flush_during_recovery=false
log_file_time_to_roll=0
delayed_write_rate=16777216
manifest_preallocation_size=4194304
paranoid_checks=true
max_log_file_size=0
allow_2pc=false
wal_dir=kvdb
db_log_dir=
max_subcompactions=1
create_if_missing=true
enable_pipelined_write=false
bytes_per_sync=0
stats_persist_period_sec=600
stats_history_buffer_size=1048576
fail_if_options_file_error=false
use_fsync=false
wal_recovery_mode=kPointInTimeRecovery
delete_obsolete_files_period_micros=21600000000
enable_write_thread_adaptive_yield=true
avoid_flush_during_shutdown=false
write_thread_max_yield_usec=100
info_log_level=INFO_LEVEL
max_file_opening_threads=16
dump_malloc_stats=false
allow_ingest_behind=false
access_hint_on_compaction_start=NORMAL
preserve_deletes=false
[CFOptions "default"]
sample_for_compression=0
compaction_pri=kMinOverlappingRatio
merge_operator=nullptr
compaction_filter_factory=nullptr
memtable_factory=SkipListFactory
memtable_insert_with_hint_prefix_extractor=nullptr
comparator=leveldb.BytewiseComparator
target_file_size_base=67108864
max_sequential_skip_in_iterations=8
compaction_style=kCompactionStyleLevel
max_bytes_for_level_base=268435456
bloom_locality=0
write_buffer_size=67108864
compression_per_level=
memtable_huge_page_size=0
max_successive_merges=0
arena_block_size=8388608
memtable_whole_key_filtering=false
target_file_size_multiplier=1
max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1
snap_refresh_nanos=0
num_levels=7
min_write_buffer_number_to_merge=1
max_write_buffer_number_to_maintain=0
max_write_buffer_number=2
compression=kNoCompression
level0_stop_writes_trigger=36
level0_slowdown_writes_trigger=20
compaction_filter=nullptr
level0_file_num_compaction_trigger=4
max_compaction_bytes=1677721600
compaction_options_universal={allow_trivial_move=false;size_ratio=1;min_merge_width=2;max_size_amplification_percent=200;max_merge_width=4294967295;compression_size_percent=-1;stop_style=kCompactionStopStyleTotalSize;}
memtable_prefix_bloom_size_ratio=0.000000
hard_pending_compaction_bytes_limit=274877906944
ttl=0
table_factory=BlockBasedTable
soft_pending_compaction_bytes_limit=68719476736
prefix_extractor=nullptr
bottommost_compression=kDisableCompressionOption
force_consistency_checks=false
paranoid_file_checks=false
compaction_options_fifo={allow_compaction=false;max_table_files_size=1073741824;}
max_bytes_for_level_multiplier=10.000000
optimize_filters_for_hits=false
level_compaction_dynamic_level_bytes=false
inplace_update_num_locks=10000
inplace_update_support=false
periodic_compaction_seconds=0
disable_auto_compactions=false
report_bg_io_stats=false
[TableOptions/BlockBasedTable "default"]
pin_top_level_index_and_filter=true
enable_index_compression=true
read_amp_bytes_per_bit=8589934592
format_version=2
block_align=false
metadata_block_size=4096
block_size_deviation=10
partition_filters=false
block_size=4096
index_block_restart_interval=1
no_block_cache=false
checksum=kCRC32c
whole_key_filtering=true
index_shortening=kShortenSeparators
data_block_index_type=kDataBlockBinarySearch
index_type=kBinarySearch
verify_compression=false
filter_policy=nullptr
data_block_hash_table_util_ratio=0.750000
pin_l0_filter_and_index_blocks_in_cache=false
block_restart_interval=16
cache_index_and_filter_blocks_with_high_priority=false
cache_index_and_filter_blocks=false
hash_index_allow_collision=true
flush_block_policy_factory=FlushBlockBySizePolicyFactory
...@@ -51,14 +51,14 @@ int GeneralModelOp::inference() { ...@@ -51,14 +51,14 @@ int GeneralModelOp::inference() {
// infer // infer
if (batch_size > 0) { if (batch_size > 0) {
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
VLOG(3) << "var num: " << var_num; VLOG(2) << "var num: " << var_num;
elem_type.resize(var_num); elem_type.resize(var_num);
elem_size.resize(var_num); elem_size.resize(var_num);
capacity.resize(var_num); capacity.resize(var_num);
paddle::PaddleTensor lod_tensor; paddle::PaddleTensor lod_tensor;
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
elem_type[i] = req->insts(0).tensor_array(i).elem_type(); elem_type[i] = req->insts(0).tensor_array(i).elem_type();
VLOG(3) << "var[" << i << "] has elem type: " << elem_type[i]; VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i];
if (elem_type[i] == 0) { // int64 if (elem_type[i] == 0) { // int64
elem_size[i] = sizeof(int64_t); elem_size[i] = sizeof(int64_t);
lod_tensor.dtype = paddle::PaddleDType::INT64; lod_tensor.dtype = paddle::PaddleDType::INT64;
...@@ -70,17 +70,17 @@ int GeneralModelOp::inference() { ...@@ -70,17 +70,17 @@ int GeneralModelOp::inference() {
if (req->insts(0).tensor_array(i).shape(0) == -1) { if (req->insts(0).tensor_array(i).shape(0) == -1) {
lod_tensor.lod.resize(1); lod_tensor.lod.resize(1);
lod_tensor.lod[0].push_back(0); lod_tensor.lod[0].push_back(0);
VLOG(3) << "var[" << i << "] is lod_tensor"; VLOG(2) << "var[" << i << "] is lod_tensor";
} else { } else {
lod_tensor.shape.push_back(batch_size); lod_tensor.shape.push_back(batch_size);
capacity[i] = 1; capacity[i] = 1;
for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) {
int dim = req->insts(0).tensor_array(i).shape(k); int dim = req->insts(0).tensor_array(i).shape(k);
VLOG(3) << "shape for var[" << i << "]: " << dim; VLOG(2) << "shape for var[" << i << "]: " << dim;
capacity[i] *= dim; capacity[i] *= dim;
lod_tensor.shape.push_back(dim); lod_tensor.shape.push_back(dim);
} }
VLOG(3) << "var[" << i << "] is tensor, capacity: " << capacity[i]; VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i];
} }
if (i == 0) { if (i == 0) {
lod_tensor.name = "words"; lod_tensor.name = "words";
...@@ -95,19 +95,19 @@ int GeneralModelOp::inference() { ...@@ -95,19 +95,19 @@ int GeneralModelOp::inference() {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
const Tensor &tensor = req->insts(j).tensor_array(i); const Tensor &tensor = req->insts(j).tensor_array(i);
int data_len = tensor.data_size(); int data_len = tensor.data_size();
VLOG(3) << "tensor size for var[" << i << "]: " << tensor.data_size(); VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size();
int cur_len = in->at(i).lod[0].back(); int cur_len = in->at(i).lod[0].back();
VLOG(3) << "current len: " << cur_len; VLOG(2) << "current len: " << cur_len;
in->at(i).lod[0].push_back(cur_len + data_len); in->at(i).lod[0].push_back(cur_len + data_len);
VLOG(3) << "new len: " << cur_len + data_len; VLOG(2) << "new len: " << cur_len + data_len;
} }
in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]); in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]);
in->at(i).shape = {in->at(i).lod[0].back(), 1}; in->at(i).shape = {in->at(i).lod[0].back(), 1};
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is lod_tensor and len=" << in->at(i).lod[0].back(); << "] is lod_tensor and len=" << in->at(i).lod[0].back();
} else { } else {
in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]);
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is tensor and capacity=" << batch_size * capacity[i]; << "] is tensor and capacity=" << batch_size * capacity[i];
} }
} }
...@@ -144,7 +144,7 @@ int GeneralModelOp::inference() { ...@@ -144,7 +144,7 @@ int GeneralModelOp::inference() {
} }
} }
VLOG(3) << "going to infer"; VLOG(2) << "going to infer";
TensorVector *out = butil::get_object<TensorVector>(); TensorVector *out = butil::get_object<TensorVector>();
if (!out) { if (!out) {
LOG(ERROR) << "Failed get tls output object"; LOG(ERROR) << "Failed get tls output object";
...@@ -157,7 +157,7 @@ int GeneralModelOp::inference() { ...@@ -157,7 +157,7 @@ int GeneralModelOp::inference() {
for (uint32_t i = 0; i < 10; i++) { for (uint32_t i = 0; i < 10; i++) {
oss << *(example + i) << " "; oss << *(example + i) << " ";
} }
VLOG(3) << "msg: " << oss.str(); VLOG(2) << "msg: " << oss.str();
// infer // infer
if (predictor::InferManager::instance().infer( if (predictor::InferManager::instance().infer(
...@@ -167,7 +167,7 @@ int GeneralModelOp::inference() { ...@@ -167,7 +167,7 @@ int GeneralModelOp::inference() {
} }
// print response // print response
float *example_1 = reinterpret_cast<float *>((*out)[0].data.data()); float *example_1 = reinterpret_cast<float *>((*out)[0].data.data());
VLOG(3) << "result: " << *example_1; VLOG(2) << "result: " << *example_1;
Response *res = mutable_data<Response>(); Response *res = mutable_data<Response>();
......
...@@ -3,7 +3,7 @@ add_library(fluid_cpu_engine ${fluid_cpu_engine_srcs}) ...@@ -3,7 +3,7 @@ add_library(fluid_cpu_engine ${fluid_cpu_engine_srcs})
target_include_directories(fluid_cpu_engine PUBLIC target_include_directories(fluid_cpu_engine PUBLIC
${CMAKE_BINARY_DIR}/Paddle/fluid_install_dir/) ${CMAKE_BINARY_DIR}/Paddle/fluid_install_dir/)
add_dependencies(fluid_cpu_engine pdserving extern_paddle configure kvdb) add_dependencies(fluid_cpu_engine pdserving extern_paddle configure kvdb)
target_link_libraries(fluid_cpu_engine pdserving paddle_fluid iomp5 mklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) target_link_libraries(fluid_cpu_engine pdserving paddle_fluid -lpthread -lcrypto -lm -lrt -lssl -ldl -lz)
install(TARGETS fluid_cpu_engine install(TARGETS fluid_cpu_engine
ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib
......
file(GLOB_RECURSE SERVING_CLIENT_PY_FILES serving_client/*.py) if (CLIENT_ONLY)
file(GLOB_RECURSE SERVING_CLIENT_PY_FILES paddle_serving_client/*.py)
set(PY_FILES ${SERVING_CLIENT_PY_FILES}) set(PY_FILES ${SERVING_CLIENT_PY_FILES})
SET(PACKAGE_NAME "serving_client") SET(PACKAGE_NAME "serving_client")
set(SETUP_LOG_FILE "setup.py.log") set(SETUP_LOG_FILE "setup.py.client.log")
endif()
if (NOT CLIENT_ONLY)
file(GLOB_RECURSE SERVING_SERVER_PY_FILES paddle_serving_server/*.py)
set(PY_FILES ${SERVING_SERVER_PY_FILES})
SET(PACKAGE_NAME "serving_server")
set(SETUP_LOG_FILE "setup.py.server.log")
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in if (CLIENT_ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.client.in
${CMAKE_CURRENT_BINARY_DIR}/setup.py) ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
endif()
if (NOT CLIENT_ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.server.in
${CMAKE_CURRENT_BINARY_DIR}/setup.py)
endif()
set(SERVING_CLIENT_CORE ${PADDLE_SERVING_BINARY_DIR}/core/general-client/serving_client.so) set(SERVING_CLIENT_CORE ${PADDLE_SERVING_BINARY_DIR}/core/general-client/serving_client.so)
message("python env: " ${py_env}) message("python env: " ${py_env})
if (CLIENT_ONLY)
add_custom_command( add_custom_command(
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving/ ${PADDLE_SERVING_BINARY_DIR}/python/ COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_client/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_SERVING_BINARY_DIR}/core/general-client/serving_client.so ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/serving_client/ COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_SERVING_BINARY_DIR}/core/general-client/serving_client.so ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_CLIENT_CORE} sdk_configure_py_proto ${PY_FILES}) DEPENDS ${SERVING_CLIENT_CORE} sdk_configure_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS serving_client ${PADDLE_SERVING_BINARY_DIR}/.timestamp) add_custom_target(paddle_python ALL DEPENDS serving_client ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
endif()
if (NOT CLIENT_ONLY)
add_custom_command(
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
endif()
set(SERVING_CLIENT_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) set(SERVING_CLIENT_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/)
set(SERVING_SERVER_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/)
if (CLIENT_ONLY)
install(DIRECTORY ${SERVING_CLIENT_PYTHON_PACKAGE_DIR} install(DIRECTORY ${SERVING_CLIENT_PYTHON_PACKAGE_DIR}
DESTINATION opt/serving_client/share/wheels DESTINATION opt/serving_client/share/wheels
) )
endif()
if (NOT CLIENT_ONLY)
install(DIRECTORY ${SERVING_SERVER_PYTHON_PACKAGE_DIR}
DESTINATION opt/serving_server/share/wheels
)
endif()
find_program(PATCHELF_EXECUTABLE patchelf) find_program(PATCHELF_EXECUTABLE patchelf)
if(NOT PATCHELF_EXECUTABLE) if(NOT PATCHELF_EXECUTABLE)
......
rm -rf imdb.vocab kvdb log *.pyc serving_client_conf serving_server_model test_data text_classification_data.tar.gz train_data work_dir1
wget https://fleet.bj.bcebos.com/text_classification_data.tar.gz wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz
tar -zxvf text_classification_data.tar.gz tar -zxvf text_classification_data.tar.gz
#wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imdb-demo%2Fimdb.tar.gz
#tar -xzf imdb-demo%2Fimdb.tar.gz
...@@ -16,7 +16,6 @@ import sys ...@@ -16,7 +16,6 @@ import sys
import paddle import paddle
import logging import logging
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle_serving as serving
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid") logger = logging.getLogger("fluid")
...@@ -42,7 +41,7 @@ if __name__ == "__main__": ...@@ -42,7 +41,7 @@ if __name__ == "__main__":
dataset = fluid.DatasetFactory().create_dataset() dataset = fluid.DatasetFactory().create_dataset()
filelist = ["train_data/%s" % x for x in os.listdir("train_data")] filelist = ["train_data/%s" % x for x in os.listdir("train_data")]
dataset.set_use_var([data, label]) dataset.set_use_var([data, label])
pipe_command = "python imdb_reader.py" pipe_command = "/home/users/dongdaxiang/paddle_whls/custom_op/paddle_release_home/python/bin/python imdb_reader.py"
dataset.set_pipe_command(pipe_command) dataset.set_pipe_command(pipe_command)
dataset.set_batch_size(4) dataset.set_batch_size(4)
dataset.set_filelist(filelist) dataset.set_filelist(filelist)
...@@ -54,15 +53,22 @@ if __name__ == "__main__": ...@@ -54,15 +53,22 @@ if __name__ == "__main__":
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
epochs = 30 epochs = 6
save_dirname = "cnn_model" save_dirname = "cnn_model"
import paddle_serving_client.io as serving_io
for i in range(epochs): for i in range(epochs):
exe.train_from_dataset(program=fluid.default_main_program(), exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset, debug=False) dataset=dataset, debug=False)
logger.info("TRAIN --> pass: {}".format(i)) logger.info("TRAIN --> pass: {}".format(i))
fluid.io.save_inference_model("%s/epoch%d.model" % (save_dirname, i), if i == 5:
[data.name, label.name], [acc], exe) serving_io.save_model("serving_server_model",
serving.save_model("%s/epoch%d.model" % (save_dirname, i), "client_config{}".format(i), "serving_client_conf",
{"words": data, "label": label}, {"words": data, "label": label},
{"acc": acc, "cost": avg_cost, "prediction": prediction}) {"cost": avg_cost, "acc": acc,
"prediction": prediction},
fluid.default_main_program())
from paddle_serving import Client from paddle_serving_client import Client
import sys import sys
client = Client() client = Client()
...@@ -7,7 +7,7 @@ client.connect(["127.0.0.1:9292"]) ...@@ -7,7 +7,7 @@ client.connect(["127.0.0.1:9292"])
for line in sys.stdin: for line in sys.stdin:
group = line.strip().split() group = line.strip().split()
words = [int(x) for x in group[1:int(group[0])]] words = [int(x) for x in group[1:int(group[0]) + 1]]
label = [int(group[-1])] label = [int(group[-1])]
feed = {"words": words, "label": label} feed = {"words": words, "label": label}
fetch = ["acc", "cost", "prediction"] fetch = ["acc", "cost", "prediction"]
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle_serving import Client from paddle_serving_client import Client
import sys import sys
import subprocess import subprocess
from multiprocessing import Pool from multiprocessing import Pool
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle_serving import Client from paddle_serving_client import Client
import sys import sys
import subprocess import subprocess
from multiprocessing import Pool from multiprocessing import Pool
......
import os
import sys
from paddle_serving_server import OpMaker
from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1])
server.prepare_server(workdir="work_dir1", port=9292, device="cpu")
server.run_server()
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
# limitations under the License. # limitations under the License.
from .serving_client import PredictorClient from .serving_client import PredictorClient
from ..proto import sdk_configure_pb2 as sdk from .proto import sdk_configure_pb2 as sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
import time import time
int_type = 0 int_type = 0
...@@ -74,34 +76,25 @@ class Client(object): ...@@ -74,34 +76,25 @@ class Client(object):
self.feed_names_to_idx_ = {} self.feed_names_to_idx_ = {}
def load_client_config(self, path): def load_client_config(self, path):
model_conf = m_config.GeneralModelConfig()
f = open(path, 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
# load configuraion here # load configuraion here
# get feed vars, fetch vars # get feed vars, fetch vars
# get feed shapes, feed types # get feed shapes, feed types
# map feed names to index # map feed names to index
self.client_handle_ = PredictorClient() self.client_handle_ = PredictorClient()
self.client_handle_.init(path) self.client_handle_.init(path)
self.feed_names_ = [] self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.fetch_names_ = [] self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
self.feed_shapes_ = [] self.feed_shapes_ = [var.shape for var in model_conf.feed_var]
self.feed_types_ = {}
self.feed_names_to_idx_ = {} self.feed_names_to_idx_ = {}
for i, var in enumerate(model_conf.feed_var):
self.feed_names_to_idx_[var.alias_name] = i
self.feed_types_[var.alias_name] = var.feed_type
with open(path) as fin:
group = fin.readline().strip().split()
feed_num = int(group[0])
fetch_num = int(group[1])
for i in range(feed_num):
group = fin.readline().strip().split()
self.feed_names_.append(group[0])
tmp_shape = []
for s in group[2:-1]:
tmp_shape.append(int(s))
self.feed_shapes_.append(tmp_shape)
self.feed_types_[group[0]] = int(group[-1])
self.feed_names_to_idx_[group[0]] = i
for i in range(fetch_num):
group = fin.readline().strip().split()
self.fetch_names_.append(group[0])
return return
def connect(self, endpoints): def connect(self, endpoints):
...@@ -148,6 +141,9 @@ class Client(object): ...@@ -148,6 +141,9 @@ class Client(object):
result = self.client_handle_.predict( result = self.client_handle_.predict(
float_slot, float_feed_names, int_slot, int_feed_names, fetch_names) float_slot, float_feed_names, int_slot, int_feed_names, fetch_names)
# TODO(guru4elephant): the order of fetch var name should be consistent with
# general_model_config, this is not friendly
# In the future, we need make the number of fetched variable changable
result_map = {} result_map = {}
for i, name in enumerate(fetch_names): for i, name in enumerate(fetch_names):
result_map[name] = result[i] result_map[name] = result[i]
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.fluid import Executor from paddle.fluid import Executor
from paddle.fluid.compiler import CompiledProgram from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.framework import Program from paddle.fluid.framework import core
from paddle.fluid.framework import default_main_program from paddle.fluid.framework import default_main_program
from paddle.fluid.framework import Program
from paddle.fluid import CPUPlace from paddle.fluid import CPUPlace
from paddle.fluid.io import save_persistables from paddle.fluid.io import save_inference_model
from ..proto import general_model_config_pb2 as model_conf
import os import os
def save_model(server_model_folder, def save_model(server_model_folder,
...@@ -26,39 +27,51 @@ def save_model(server_model_folder, ...@@ -26,39 +27,51 @@ def save_model(server_model_folder,
feed_var_dict, feed_var_dict,
fetch_var_dict, fetch_var_dict,
main_program=None): main_program=None):
if main_program is None:
main_program = default_main_program()
elif isinstance(main_program, CompiledProgram):
main_program = main_program._program
if main_program is None:
raise TypeError("program should be as Program type or None")
if not isinstance(main_program, Program):
raise TypeError("program should be as Program type or None")
executor = Executor(place=CPUPlace()) executor = Executor(place=CPUPlace())
save_persistables(executor, server_model_folder, feed_var_names = [feed_var_dict[x].name for x in feed_var_dict]
main_program) target_vars = fetch_var_dict.values()
save_inference_model(server_model_folder, feed_var_names,
target_vars, executor, main_program=main_program)
config = model_conf.GeneralModelConfig()
for key in feed_var_dict:
feed_var = model_conf.FeedVar()
feed_var.alias_name = key
feed_var.name = feed_var_dict[key].name
feed_var.is_lod_tensor = feed_var_dict[key].lod_level == 1
if feed_var_dict[key].dtype == core.VarDesc.VarType.INT32 or \
feed_var_dict[key].dtype == core.VarDesc.VarType.INT64:
feed_var.feed_type = 0
if feed_var_dict[key].dtype == core.VarDesc.VarType.FP32:
feed_var.feed_type = 1
if feed_var.is_lod_tensor:
feed_var.shape.extend([-1])
else:
tmp_shape = []
for v in feed_var_dict[key].shape:
if v >= 0:
tmp_shape.append(v)
feed_var.shape.extend(tmp_shape)
config.feed_var.extend([feed_var])
for key in fetch_var_dict:
fetch_var = model_conf.FetchVar()
fetch_var.alias_name = key
fetch_var.name = fetch_var_dict[key].name
fetch_var.shape.extend(fetch_var_dict[key].shape)
config.fetch_var.extend([fetch_var])
cmd = "mkdir -p {}".format(client_config_folder) cmd = "mkdir -p {}".format(client_config_folder)
os.system(cmd)
with open("{}/client.conf".format(client_config_folder), "w") as fout:
fout.write("{} {}\n".format(len(feed_var_dict), len(fetch_var_dict)))
for key in feed_var_dict:
fout.write("{}".format(key))
if feed_var_dict[key].lod_level == 1:
fout.write(" 1 -1\n")
elif feed_var_dict[key].lod_level == 0:
fout.write(" {}".format(len(feed_var_dict[key].shape)))
for dim in feed_var_dict[key].shape:
fout.write(" {}".format(dim))
fout.write("\n")
for key in fetch_var_dict:
fout.write("{} {}\n".format(key, fetch_var_dict[key].name))
cmd = "cp {}/client.conf {}/server.conf".format(
client_config_folder, server_model_folder)
os.system(cmd) os.system(cmd)
with open("{}/serving_client_conf.prototxt".format(client_config_folder), "w") as fout:
fout.write(str(config))
with open("{}/serving_server_conf.prototxt".format(server_model_folder), "w") as fout:
fout.write(str(config))
...@@ -13,4 +13,5 @@ ...@@ -13,4 +13,5 @@
# limitations under the License. # limitations under the License.
""" Paddle Serving Client version string """ """ Paddle Serving Client version string """
serving_client_version = "0.1.0" serving_client_version = "0.1.0"
serving_server_version = "0.1.0"
module_proto_version = "0.1.0" module_proto_version = "0.1.0"
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
class OpMaker(object):
def __init__(self):
self.op_dict = {"general_infer":"GeneralInferOp",
"general_reader":"GeneralReaderOp",
"general_single_kv":"GeneralSingleKVOp",
"general_dist_kv":"GeneralDistKVOp"}
# currently, inputs and outputs are not used
# when we have OpGraphMaker, inputs and outputs are necessary
def create(self, name, inputs=[], outputs=[]):
if name not in self.op_dict:
raise Exception("Op name {} is not supported right now".format(name))
node = server_sdk.DAGNode()
node.name = "{}_op".format(name)
node.type = self.op_dict[name]
return node
class OpSeqMaker(object):
def __init__(self):
self.workflow = server_sdk.Workflow()
self.workflow.name = "workflow1"
self.workflow.workflow_type = "Sequence"
def add_op(self, node):
if len(self.workflow.nodes) >= 1:
dep = server_sdk.DAGNodeDependency()
dep.name = self.workflow.nodes[-1].name
dep.mode = "RO"
node.dependencies.extend([dep])
self.workflow.nodes.extend([node])
def get_op_sequence(self):
workflow_conf = server_sdk.WorkflowConf()
workflow_conf.workflows.extend([self.workflow])
return workflow_conf
class Server(object):
def __init__(self):
self.server_handle_ = None
self.infer_service_conf = None
self.model_toolkit_conf = None
self.resource_conf = None
self.engine = None
self.memory_optimization = False
self.model_conf = None
self.workflow_fn = "workflow.prototxt"
self.resource_fn = "resource.prototxt"
self.infer_service_fn = "infer_service.prototxt"
self.model_toolkit_fn = "model_toolkit.prototxt"
self.general_model_config_fn = "general_model.prototxt"
self.workdir = ""
self.max_concurrency = 0
self.num_threads = 0
self.port = 8080
self.reload_interval_s = 10
def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency
def set_num_threads(self, threads):
self.num_threads = threads
def set_port(self, port):
self.port = port
def set_reload_interval(self, interval):
self.reload_interval_s = interval
def set_op_sequence(self, op_seq):
self.workflow_conf = op_seq
def set_memory_optimize(self, flag=False):
self.memory_optimization = flag
def _prepare_engine(self, model_config_path, device):
if self.model_toolkit_conf == None:
self.model_toolkit_conf = server_sdk.ModelToolkitConf()
if self.engine == None:
self.engine = server_sdk.EngineDesc()
self.model_config_path = model_config_path
self.engine.name = "general_model"
self.engine.reloadable_meta = model_config_path + "/fluid_time_file"
self.engine.reloadable_type = "timestamp_ne"
self.engine.runtime_thread_num = 0
self.engine.batch_infer_size = 0
self.engine.enable_batch_align = 0
self.engine.model_data_path = model_config_path
self.engine.enable_memory_optimization = self.memory_optimization
self.engine.static_optimization = False
self.engine.force_update_static_cache = False
if device == "cpu":
self.engine.type = "FLUID_CPU_ANALYSIS_DIR"
elif device == "gpu":
self.engine.type = "FLUID_GPU_ANALYSIS_DIR"
self.model_toolkit_conf.engines.extend([self.engine])
def _prepare_infer_service(self, port):
if self.infer_service_conf == None:
self.infer_service_conf = server_sdk.InferServiceConf()
self.infer_service_conf.port = port
infer_service = server_sdk.InferService()
infer_service.name = "GeneralModelService"
infer_service.workflows.extend(["workflow1"])
self.infer_service_conf.services.extend([infer_service])
def _prepare_resource(self, workdir):
if self.resource_conf == None:
with open("{}/{}".format(workdir, self.general_model_config_fn), "w") as fout:
fout.write(str(self.model_conf))
self.resource_conf = server_sdk.ResourceConf()
self.resource_conf.model_toolkit_path = workdir
self.resource_conf.model_toolkit_file = self.model_toolkit_fn
self.resource_conf.general_model_path = workdir
self.resource_conf.general_model_file = self.general_model_config_fn
def _write_pb_str(self, filepath, pb_obj):
with open(filepath, "w") as fout:
fout.write(str(pb_obj))
def load_model_config(self, path):
self.model_config_path = path
self.model_conf = m_config.GeneralModelConfig()
f = open("{}/serving_server_conf.prototxt".format(path), 'r')
self.model_conf = google.protobuf.text_format.Merge(
str(f.read()), self.model_conf)
# check config here
# print config here
def prepare_server(self, workdir=None, port=9292, device="cpu"):
if workdir == None:
workdir = "./tmp"
os.system("mkdir {}".format(workdir))
else:
os.system("mkdir {}".format(workdir))
os.system("touch {}/fluid_time_file".format(workdir))
self._prepare_resource(workdir)
self._prepare_engine(self.model_config_path, device)
self._prepare_infer_service(port)
self.workdir = workdir
infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
workflow_fn = "{}/{}".format(workdir, self.workflow_fn)
resource_fn = "{}/{}".format(workdir, self.resource_fn)
model_toolkit_fn = "{}/{}".format(workdir, self.model_toolkit_fn)
self._write_pb_str(infer_service_fn, self.infer_service_conf)
self._write_pb_str(workflow_fn, self.workflow_conf)
self._write_pb_str(resource_fn, self.resource_conf)
self._write_pb_str(model_toolkit_fn, self.model_toolkit_conf)
def run_server(self):
# just run server with system command
# currently we do not load cube
command = "/home/users/dongdaxiang/github_develop/Serving/build_server/core/general-server/serving" \
" -enable_model_toolkit " \
"-inferservice_path {} " \
"-inferservice_file {} " \
"-max_concurrency {} " \
"-num_threads {} " \
"-port {} " \
"-reload_interval_s {} " \
"-resource_path {} " \
"-resource_file {} " \
"-workflow_path {} " \
"-workflow_file {} ".format(
self.workdir,
self.infer_service_fn,
self.max_concurrency,
self.num_threads,
self.port,
self.reload_interval_s,
self.workdir,
self.resource_fn,
self.workdir,
self.workflow_fn)
os.system(command)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
...@@ -11,5 +11,7 @@ ...@@ -11,5 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .serving_client import Client """ Paddle Serving Client version string """
from .io import save_model serving_client_version = "0.1.0"
serving_server_version = "0.1.0"
module_proto_version = "0.1.0"
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
from setuptools import setup, Distribution, Extension
from setuptools import find_packages
from setuptools import setup
from paddle_serving_client.version import serving_client_version
def python_version():
return [int(v) for v in platform.python_version().split(".")]
max_version, mid_version, min_version = python_version()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle'
]
packages=['paddle_serving_client',
'paddle_serving_client.proto',
'paddle_serving_client.io']
package_data={'paddle_serving_client': ['serving_client.so']}
package_dir={'paddle_serving_client':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client',
'paddle_serving_client.proto':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto',
'paddle_serving_client.io':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/io'}
setup(
name='paddle-serving-client',
version=serving_client_version.replace('-', ''),
description=
('Paddle Serving Package for saved model with PaddlePaddle'),
url='https://github.com/PaddlePaddle/Serving',
author='PaddlePaddle Author',
author_email='guru4elephant@gmail.com',
install_requires=REQUIRED_PACKAGES,
packages=packages,
package_data=package_data,
package_dir=package_dir,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords=('paddle-serving serving-client deployment industrial easy-to-use'))
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
from setuptools import setup, Distribution, Extension
from setuptools import find_packages
from setuptools import setup
from paddle_serving_server.version import serving_server_version
def python_version():
return [int(v) for v in platform.python_version().split(".")]
max_version, mid_version, min_version = python_version()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle'
]
packages=['paddle_serving_server',
'paddle_serving_server.proto']
package_dir={'paddle_serving_server':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server',
'paddle_serving_server.proto':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto'}
setup(
name='paddle-serving-server',
version=serving_server_version.replace('-', ''),
description=
('Paddle Serving Package for saved model with PaddlePaddle'),
url='https://github.com/PaddlePaddle/Serving',
author='PaddlePaddle Author',
author_email='guru4elephant@gmail.com',
install_requires=REQUIRED_PACKAGES,
packages=packages,
package_dir=package_dir,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords=('paddle-serving serving-server deployment industrial easy-to-use'))
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册