diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6649489b87642eff2cf96787d6a28457c2ac69a2..a51b6e9564e843a15bb29824d81b83e79ddafef2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -3,5 +3,5 @@ image: docker:latest build: script: - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY - - docker build --rm -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA . + - docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA -f Docker/Dockerfile . - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA diff --git a/.travis.yml b/.travis.yml index 9215945e205576836b8f84b8ed8a267e10bd7996..ade98d696dc4bc51570fd4a3785c8a436bc6014c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ before_install: - cd $TRAVIS_BUILD_DIR/ext && git clone --depth=1 --single-branch git://github.com/oci-labs/clang-WebAssembly wasm-compiler - cd $TRAVIS_BUILD_DIR script: - - WASM_LLVM_CONFIG=$TRAVIS_BUILD_DIR/ext/wasm-compiler/bin/llvm-config ext/cmake-3.9.0-Linux-x86_64/bin/cmake -G Ninja -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT=$TRAVIS_BUILD_DIR/ext -DSecp256k1_ROOT_DIR=$TRAVIS_BUILD_DIR/ext -DBINARYEN_ROOT=$TRAVIS_BUILD_DIR/ext/wasm-compiler/ + - ext/cmake-3.9.0-Linux-x86_64/bin/cmake -G Ninja -DWASM_LLVM_CONFIG=$TRAVIS_BUILD_DIR/ext/wasm-compiler/bin/llvm-config -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT=$TRAVIS_BUILD_DIR/ext -DSecp256k1_ROOT_DIR=$TRAVIS_BUILD_DIR/ext -DBINARYEN_ROOT=$TRAVIS_BUILD_DIR/ext/wasm-compiler/ - ninja -j4 - tests/eosd_run_test.sh - tests/chain_test diff --git a/Dockerfile b/Docker/Dockerfile similarity index 52% rename from Dockerfile rename to Docker/Dockerfile index dd229e4f6e5195f33245e6d04ea691c976ece1b1..e4e6105f82500fe91ff6f5067824af2c7baa814f 100644 --- a/Dockerfile +++ b/Docker/Dockerfile @@ -1,26 +1,28 @@ FROM ubuntu:16.04 -LABEL authors="xiaobo (peterwillcn@gmail.com), toonsevrin (toonsevrin@gmail.com)" +LABEL maintainer="xiaobo " version="0.1.1" \ + description="This is eosio/eos image" website="https://eos.io" \ + reviewers="toonsevrin (toonsevrin@gmail.com), etc..." RUN echo 'APT::Install-Recommends 0;' >> /etc/apt/apt.conf.d/01norecommends \ - && echo 'APT::Install-Suggests 0;' >> /etc/apt/apt.conf.d/01norecommends \ - && apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo wget net-tools ca-certificates unzip + && echo 'APT::Install-Suggests 0;' >> /etc/apt/apt.conf.d/01norecommends \ + && apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo wget net-tools ca-certificates unzip RUN echo "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main" >> /etc/apt/sources.list.d/llvm.list \ - && wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - \ - && apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y git-core automake autoconf libtool build-essential pkg-config libtool \ - mpi-default-dev libicu-dev python-dev python3-dev libbz2-dev zlib1g-dev libssl-dev libgmp-dev \ - clang-4.0 lldb-4.0 lld-4.0 \ - && rm -rf /var/lib/apt/lists/* + && wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - \ + && apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y git-core automake autoconf libtool build-essential pkg-config libtool \ + mpi-default-dev libicu-dev python-dev python3-dev libbz2-dev zlib1g-dev libssl-dev libgmp-dev \ + clang-4.0 lldb-4.0 lld-4.0 \ + && rm -rf /var/lib/apt/lists/* RUN update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-4.0/bin/clang 400 \ && update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-4.0/bin/clang++ 400 RUN cd /tmp && wget https://cmake.org/files/v3.9/cmake-3.9.0-Linux-x86_64.sh \ - && mkdir /opt/cmake && chmod +x /tmp/cmake-3.9.0-Linux-x86_64.sh \ - && sh /tmp/cmake-3.9.0-Linux-x86_64.sh --prefix=/opt/cmake --skip-license \ - && ln -s /opt/cmake/bin/cmake /usr/local/bin + && mkdir /opt/cmake && chmod +x /tmp/cmake-3.9.0-Linux-x86_64.sh \ + && sh /tmp/cmake-3.9.0-Linux-x86_64.sh --prefix=/opt/cmake --skip-license \ + && ln -s /opt/cmake/bin/cmake /usr/local/bin RUN cd /tmp && wget https://dl.bintray.com/boostorg/release/1.64.0/source/boost_1_64_0.tar.gz \ && tar zxf boost_1_64_0.tar.gz \ @@ -42,10 +44,24 @@ RUN cd /tmp && mkdir wasm-compiler && cd wasm-compiler \ && cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/opt/wasm -DLLVM_TARGETS_TO_BUILD= -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly -DCMAKE_BUILD_TYPE=Release ../ \ && make -j$(nproc) install && rm -rf /tmp/wasm-compiler +RUN cd /tmp && wget https://github.com/WebAssembly/binaryen/archive/1.37.21.tar.gz && tar zxf 1.37.21.tar.gz \ + && cd binaryen-1.37.21 && cmake . && make && mkdir /opt/binaryen && mv /tmp/binaryen-1.37.21/bin /opt/binaryen \ + && ln -s /opt/binaryen/bin/* /usr/local && rm -rf /tmp/* + +# ** Following the official master branch code takes a long time to download, depending on the network speed. + +#RUN cd /tmp && git clone https://github.com/EOSIO/eos.git --recursive \ +# && mkdir -p /opt/eos/bin/data-dir && cd eos && mkdir build && cd build \ +# && cmake -DWASM_LLVM_CONFIG=/opt/wasm/bin/llvm-config -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eos ../ \ +# && make -j$(nproc) && make install && mv ../contracts / \ +# && ln -s /opt/eos/bin/eos* /usr/local/bin \ +# && rm -rf /tmp/eos* + RUN mkdir -p /opt/eos/bin/data-dir && mkdir -p /tmp/eos/build/ -COPY . /tmp/eos/ +# ** Using local code saves considerable time, but does't guarantee that your code stays up-to-date +COPY . /tmp/eos/ RUN cd /tmp/eos/build && cmake -DWASM_LLVM_CONFIG=/opt/wasm/bin/llvm-config -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eos ../ \ && make -j$(nproc) && make install && mv ../contracts / \ && ln -s /opt/eos/bin/eos* /usr/local/bin \ diff --git a/Docker/README.md b/Docker/README.md index dcc03f9715022f7ab446329b69c36a97682a7fef..1584595d76323f14bf28692a57fadac39cc4067c 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -1,16 +1,17 @@ ### Run in docker -So simple and fast operation EOS: +Simple and fast setup of EOS on Docker is also available. Firstly, install dependencies: - [Docker](https://docs.docker.com) - [Docker-compose](https://github.com/docker/compose) - [Docker-volumes](https://github.com/cpuguy83/docker-volumes) -Build eos images +Build eos image ``` -cd eos/Docker -cp ../genesis.json . -docker build --rm -t eosio/eos . +git clone https://github.com/EOSIO/eos.git --recursive +cd eos +cp genesis.json Docker +docker build -t eosio/eos -f Docker/Dockerfile . ``` Start docker @@ -18,17 +19,21 @@ Start docker ``` sudo rm -rf /data/store/eos # options sudo mkdir -p /data/store/eos -docker-compose -f docker-compose.yml up +docker-compose -f Docker/docker-compose.yml up +``` + +Get chain info + +``` +curl http://127.0.0.1:8888/v1/chain/get_info ``` Run example contracts +You can run the `eosc` commands via `docker exec` command. For example: ``` cd /data/store/eos/contracts/exchange -docker exec docker_eos_1 eosc setcode exchange contracts/exchange/exchange.wast contracts/exchange/exchange.abi - -cd /data/store/eos/contracts/currency -docker exec docker_eos_1 eosc setcode currency contracts/currency/currency.wast contracts/currency/currency.abi +docker exec docker_eos_1 eosc contract exchange contracts/exchange/exchange.wast contracts/exchange/exchange.abi ``` diff --git a/Docker/config.ini b/Docker/config.ini index 515670322be2b53d4a60a99cfcdb20ae210a1f0e..7dc52c21d57c73e8cda5dc035eda7fd49e6285b3 100644 --- a/Docker/config.ini +++ b/Docker/config.ini @@ -18,10 +18,10 @@ shared-file-dir = "blockchain" shared-file-size = 8192 # The local IP and port to listen for incoming http connections. -http-server-endpoint = 127.0.0.1:8888 +http-server-endpoint = 0.0.0.0:8888 # The local IP address and port to listen for incoming connections. -listen-endpoint = 127.0.0.1:9876 +listen-endpoint = 0.0.0.0:9876 # The IP address and port of a remote peer to sync with. # remote-endpoint = diff --git a/Docker/eosc.sh b/Docker/eosc.sh new file mode 100755 index 0000000000000000000000000000000000000000..05f0d63194bc8733bccbfd8b43bb7922112fca78 --- /dev/null +++ b/Docker/eosc.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Usage: +# Go into cmd loop: sudo ./eosc.sh +# Run single cmd: sudo ./eosc.sh + +PREFIX="docker exec docker_eos_1 eosc" +if [ -z $1 ] ; then + while : + do + read -e -p "eosc " cmd + history -s "$cmd" + $PREFIX $cmd + done +else + $PREFIX $@ +fi diff --git a/Doxygen guidelines.md b/Doxygen guidelines.md index 7ad1fc1e6cf221761ed4bffe941e4a7d2ea97923..4939f20231df54edeb5744fecd1624379f3201ef 100644 --- a/Doxygen guidelines.md +++ b/Doxygen guidelines.md @@ -1,4 +1,4 @@ -#Doxygen Guide for EOS +# Doxygen Guide for EOS
## Index diff --git a/README.md b/README.md index 10a42660e4ed7e919f4221ca80628dff5d956e01..76544e4bd0cf27efb665e11c2e8af4c917f3d155 100644 --- a/README.md +++ b/README.md @@ -338,7 +338,7 @@ sudo mkdir -p /data/store/eos docker-compose -f Docker/docker-compose.yml up ``` -Get chain info +Get chain info: ``` curl http://127.0.0.1:8888/v1/chain/get_info diff --git a/circle.yml b/circle.yml index 7efb088f0e3f5f539ac19fd2c8f673ddcedda536..ac5f3cd127a32a2d6a69e8f116ce43b0762260c1 100644 --- a/circle.yml +++ b/circle.yml @@ -4,7 +4,7 @@ machine: dependencies: override: - - cd ~/eos/Docker && docker build -t eosio/eos . + - cd ~/eos && docker build -t eosio/eos -f Docker/Dockerfile . test: pre: diff --git a/contracts/eoslib/db.h b/contracts/eoslib/db.h index aaa477eafe43bba1438d52abd42a39494090a8aa..f2b2196dd7ba62f8729660b3085dae939bb65496 100644 --- a/contracts/eoslib/db.h +++ b/contracts/eoslib/db.h @@ -60,6 +60,55 @@ extern "C" { * * @see Table class in C++ API * + * Example + * @code + * #pragma pack(push, 1) + * struct TestModel { + * AccountName name; + * unsigned char age; + * uint64_t phone; + * }; + * + * TestModel alice{ N(alice), 20, 4234622}; + * TestModel bob { N(bob), 15, 11932435}; + * TestModel carol{ N(carol), 30, 545342453}; + * TestModel dave { N(dave), 46, 6535354}; + * + * int32_t res = store_i64(CurrentCode(), N(test_table), &dave, sizeof(TestModel)); + * res = store_i64(CurrentCode(), N(test_table), &carol, sizeof(TestModel)); + * res = store_i64(CurrentCode(), N(test_table), &bob, sizeof(TestModel)); + * res = store_i64(CurrentCOde(), N(test_table), &alice, sizeof(TestModel)); + * TestModel alice; + * alice.name = N(alice); + * res = load_i64( currentCode(), currentCode(), N(test_table), &alice, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && tmp.name == N(alice) && tmp.age == 20 && tmp.phone == 4234622, "load_i64"); + * + * res = front_i64( currentCode(), currentCode(), N(test_table), &tmp, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && tmp.name == N(alice) && tmp.age == 20 && tmp.phone == 4234622, "front_i64 1"); + * + * res = back_i64( currentCode(), currentCode(), N(test_table), &tmp, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && tmp.name == N(dave) && tmp.age == 46 && tmp.phone == 6535354, "back_i64 2"); + * + * res = previous_i64( currentCode(), currentCode(), N(test_table), &tmp, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && tmp.name == N(carol) && tmp.age == 30 && tmp.phone == 545342453, "carol previous"); + * + * res = next_i64( currentCode(), currentCode(), N(test_table), &tmp, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && tmp.name == N(dave) && tmp.age == 46 && tmp.phone == 6535354, "back_i64 2"); + * + * uint64_t key = N(alice); + * res = remove_i64(currentCode(), N(test_table), &key); + * ASSERT(res == 1, "remove alice"); + * + * TestModel lb; + * lb.name = N(bob); + * res = lower_bound_i64( currentCode(), currentCode(), N(test_table), &lb, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && lb.name == N(bob), "lower_bound_i64 bob" ); + * + * TestModel ub; + * ub.name = N(alice); + * res = upper_bound_i64( currentCode(), currentCode(), N(test_table), &ub, sizeof(TestModel) ); + * ASSERT(res == sizeof(TestModel) && ub.age == 15 && ub.name == N(bob), "upper_bound_i64 bob" ); + * @endcode * @{ */ @@ -83,7 +132,21 @@ extern "C" { int32_t store_i64( AccountName scope, TableName table, const void* data, uint32_t datalen ); /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the current scope/code context to modify + * * @return 1 if the record was updated, 0 if no record with key was found + * + * @pre datalen >= sizeof(uint64_t) + * @pre data is a valid pointer to a range of memory at least datalen bytes long + * @pre *((uint64_t*)data) stores the primary key + * @pre scope is declared by the current transaction + * @pre this method is being called from an apply context (not validate or precondition) + * + * @post a record is either created or updated with the given scope and table. + * + * @throw if called with an invalid precondition execution will be aborted + * */ int32_t update_i64( AccountName scope, TableName table, const void* data, uint32_t datalen ); @@ -97,14 +160,76 @@ int32_t update_i64( AccountName scope, TableName table, const void* data, uint32 * @return the number of bytes read or -1 if key was not found */ int32_t load_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record + * @param datalen - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t front_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record + * @param datalen - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t back_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record. Should be initialized with the key to get next record. + * @param datalen - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if key was not found + */ int32_t next_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record. Should be initialized with the key to get previous record. + * @param datalen - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if key was not found + */ int32_t previous_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound. Should be initialized with the key to find lower bound of. + * @param datalen - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if key was not found + */ int32_t lower_bound_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound. Should be initialized with the key to find upper bound of. + * @param datalen - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if key was not found + */ int32_t upper_bound_i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t datalen ); /** + * @param scope - the account socpe that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table withing the scope/code context to query * @param data - must point to at lest 8 bytes containing primary key * * @return 1 if a record was removed, and 0 if no record with key was found @@ -145,39 +270,260 @@ int32_t remove_i64( AccountName scope, TableName table, void* data ); * secondary values are allowed so long as there are no duplicates of the combination {primary, secondary}. * * @see Table class in C++ API - * + * + * Example + * @code + * struct TestModel128x2 { + * uint128_t number; + * uint128_t price; + * uint64_t extra; + * uint64_t table_name; + * }; + * + * TestModel128x2 alice{0, 500, N(alice), N(table_name)}; + * TestModel128x2 bob{1, 1000, N(bob), N(table_name)}; + * TestModel128x2 carol{2, 1500, N(carol), N(table_name)}; + * TestModel128x2 dave{3, 2000, N(dave), N(table_name)}; + * int32_t res = store_i128i128(CurrentCode(), N(table_name), &alice, sizeof(TestModel128x2)); + * res = store_i128i128(CurrentCode(), N(table_name), &bob, sizeof(TestModel128x2)); + * ASSERT(res == 1, "db store failed"); + * res = store_i128i128(CurrentCode(), N(table_name), &carol, sizeof(TestModel128x2)); + * ASSERT(res == 1, "db store failed"); + * res = store_i128i128(CurrentCode(), N(table_name), &dave, sizeof(TestModel128x2)); + * ASSERT(res == 1, "db store failed"); + * + * TestModel128x2 query; + * query.number = 0; + * res = load_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 0 && query.price == 500 && query.extra == N(alice), "load"); + * + * res = front_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 3 && query.price = 2000 && query.extra == N(dave), "front"); + * + * res = next_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), & query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 2 && query.price == 1500 && query.extra == N(carol), "next"); + * + * res = back_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 0 && query.price == 500 && query.extra == N(alice), "back"); + * + * res = previous_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 1 && query.price == 1000 && query.extra == N(bob), "previous"); + * + * query.number = 0; + * res = lower_bound_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 0 && query.price == 500 && query.extra == N(alice), "lower"); + * + * res = upper_bound_primary_i128i128(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 1 && query.price == 1000 && query.extra == N(bob), "upper"); + * + * query.extra = N(bobby); + * res = update_i128128(CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == sizeof(TestModel128x2) && query.number == 1 & query.price == 1000 && query.extra == N(bobby), "update"); + * + * res = remove_i128128(CurrentCode(), N(table_name), &query, sizeof(TestModel128x2)); + * ASSERT(res == 1, "remove") + * @endcode + * * @{ */ +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - the code which owns the table + * @param table - the ID/name of the table within the current scope/code context to modify + * @param data - location to copy the record, must be initialized with the primary key to load + * @param len - length of record to copy + * @return the number of bytes read, -1 if key was not found + * + * @pre len >= sizeof(uint128_t) + * @pre data is a valid pointer to a range of memory at least datalen bytes long + * @pre *((uint128_t*)data) stores the primary key + * @pre scope is declared by the current transaction + * @pre this method is being called from an apply context (not validate or precondition) + * + * @post data will be initialized with the len bytes of record matching the key. + * + * @throw if called with an invalid precondition execution will be aborted + * + */ int32_t load_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record of primary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t front_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record of primary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t back_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record of primary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t next_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record of primary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t previous_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound of a primary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t upper_bound_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound of a primary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t lower_bound_primary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - the code which owns the table + * @param table - the ID/name of the table within the current scope/code context to modify + * @param data - location to copy the record, must be initialized with the secondary key to load + * @param len - length of record to copy + * @return the number of bytes read, -1 if key was not found + * + * @pre len >= sizeof(uint128_t) + * @pre data is a valid pointer to a range of memory at least datalen bytes long + * @pre *((uint128_t*)data) stores the secondary key + * @pre scope is declared by the current transaction + * @pre this method is being called from an apply context (not validate or precondition) + * + * @post data will be initialized with the len bytes of record matching the key. + * + * @throw if called with an invalid precondition execution will be aborted + * + */ int32_t load_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record of secondary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t front_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record of secondary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t back_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record of secondary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t next_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record of secondary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t previous_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound of given secondary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t upper_bound_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound of given secondary key; must be initialized with a key. + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t lower_bound_secondary_i128i128( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query * @param data - must point to at lest 32 bytes containing {primary,secondary} * * @return 1 if a record was removed, and 0 if no record with key was found */ int32_t remove_i128i128( AccountName scope, TableName table, const void* data ); /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must point to a at least 32 bytes containing (primary, secondary) + * @param len - the length of the data * @return 1 if a new record was created, 0 if an existing record was updated */ int32_t store_i128i128( AccountName scope, TableName table, const void* data, uint32_t len ); /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must to a at least 32 bytes containing (primary, secondary) + * @param len - the length of the data * @return 1 if the record was updated, 0 if no record with key was found */ int32_t update_i128i128( AccountName scope, TableName table, const void* data, uint32_t len ); @@ -186,7 +532,7 @@ int32_t update_i128i128( AccountName scope, TableName table, const void* data, u /** * @defgroup dbi64i64i64 Triple 64 bit Index Table - * @brief Interface to a database table with 64 bit primary, secondary and tertiary keys and arbitary binary data value. + * @brief Interface to a database table with 64 bit primary, secondary and tertiary keys and arbitrary binary data value. * @ingroup databaseC * * @param scope - the account where table data will be found @@ -210,51 +556,332 @@ int32_t update_i128i128( AccountName scope, TableName table, const void* data, u * * ``` * - * You can iterate over these indicies with primary index sorting records by { primary, secondary, tertiary }, + * You can iterate over these indices with primary index sorting records by { primary, secondary, tertiary }, * the secondary index sorting records by { secondary, tertiary } and the tertiary index sorting records by * { tertiary }. * * @see Table class in C++ API - * + * + * Example + * @code + * struct TestModel3xi64 { + * uint64_t a; + * uint64_t b; + * uint64_t c; + * uint64_t name; + * }; + * + * TestModel3xi64 alice{ 0, 0, 0, N(alice) }; + * TestModel3xi64 bob{ 1, 1, 1, N(bob) }; + * TestModel3xi64 carol{ 2, 2, 2, N(carol) }; + * TestModel3xi64 dave{ 3, 3, 3, N(dave) }; + * + * int32_t res = store_i64i64i64(CurrentCode(), N(table_name), &alice, sizeof(TestModel3xi64)); + * res = store_i64i64i64(CurrentCode(), N(table_name), &bob, sizeof(TestModel3xi64)); + * res = store_i64i64i64(CurrentCode(), N(table_name), &carol, sizeof(TestModel3xi64)); + * res = store_i64i64i64(CurrentCode(), N(table_name), &dave, sizeof(TestModel3xi64)); + * + * TestModel3xi64 query; + * query.a = 0; + * res = load_primary_i64i64i64(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel3xi64)); + * ASSERT(res == sizeof(TestModel3xi64) && query.name == N(alice), "load"); + * + * res = front_primary_i64i64i64(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel3xi64)); + * ASSERT(res == sizeof(TestModel3xi64) && query.name == N(dave), "front"); + * + * res = back_primary_i64i64i64(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel3xi64)); + * ASSERT(res == sizeof(TestModel3xi64) && query.name == N(alice), "back"); + * + * res = previous_primary_i64i64i64(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel3xi64)); + * ASSERT(res == sizeof(TestModel3xi64) && query.name == N(bob), "previous"); + * + * res = next_primary_i64i64i64(CurrentCode(), CurrentCode(), N(table_name), &query, sizeof(TestModel3xi64)); + * ASSERT(res == sizeof(TestModel3xi64) && query.name == N(alice), "next");* + * + * @endcode * @{ */ +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - the code which owns the table + * @param table - the ID/name of the table within the current scope/code context to modify + * @param data - location to copy the record, must be initialized with the (primary,secondary,tertiary) to load + * @param len - length of record to copy + * @return the number of bytes read, -1 if key was not found + * + * @pre data is a valid pointer to a range of memory at least len bytes long + * @pre *((uint64_t*)data) stores the primary key + * @pre scope is declared by the current transaction + * @pre this method is being called from an apply context (not validate or precondition) + * + * @post data will be initialized with the len bytes of record matching the key. + * + * @throw if called with an invalid precondition execution will be aborted + * + */ int32_t load_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record of primary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t front_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record of primary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t back_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record of primary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t next_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record of primary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t previous_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound of a primary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t upper_bound_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound of primary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t lower_bound_primary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - the code which owns the table + * @param table - the ID/name of the table within the current scope/code context to modify + * @param data - location to copy the record, must be initialized with the (secondary,tertiary) to load + * @param len - length of record to copy + * @return the number of bytes read, -1 if key was not found + * + * @pre data is a valid pointer to a range of memory at least len bytes long + * @pre *((uint64_t*)data) stores the secondary key + * @pre scope is declared by the current transaction + * @pre this method is being called from an apply context (not validate or precondition) + * + * @post data will be initialized with the len bytes of record matching the key. + * + * @throw if called with an invalid precondition execution will be aborted + * + */ int32_t load_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record of a secondary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t front_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record of secondary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t back_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t next_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t previous_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound of tertiary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t upper_bound_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound of secondary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t lower_bound_secondary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - the code which owns the table + * @param table - the ID/name of the table within the current scope/code context to modify + * @param data - location to copy the record, must be initialized with the (tertiary) to load + * @param len - length of record to copy + * @return the number of bytes read, -1 if key was not found + * + * @pre data is a valid pointer to a range of memory at least len bytes long + * @pre *((uint64_t*)data) stores the tertiary key + * @pre scope is declared by the current transaction + * @pre this method is being called from an apply context (not validate or precondition) + * + * @post data will be initialized with the len bytes of record matching the key. + * + * @throw if called with an invalid precondition execution will be aborted + * + */ int32_t load_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record of a tertiary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t front_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record of a tertiary key + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t back_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t next_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t previous_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound of tertiary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t upper_bound_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); + +/** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound of tertiary key; must be initialized with a key value + * @param len - the maximum length of data to read, must be greater than sizeof(uint64_t) + * + * @return the number of bytes read or -1 if no record found + */ int32_t lower_bound_tertiary_i64i64i64( AccountName scope, AccountName code, TableName table, void* data, uint32_t len ); /** - * @param data - must point to at lest 24 bytes containing {primary,secondary,tertiary} + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the name of table where record is stored + * @param data - must point to at least 24 bytes containing {primary,secondary,tertiary} * * @return 1 if a record was removed, and 0 if no record with key was found */ int32_t remove_i64i64i64( AccountName scope, TableName table, const void* data ); /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the name of table where record is stored + * @param data - must point to at least 24 bytes containing (primary,secondary,tertiary) + * @param len - length of the data * @return 1 if a new record was created, 0 if an existing record was updated */ int32_t store_i64i64i64( AccountName scope, TableName table, const void* data, uint32_t len ); /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the name of table where record is stored + * @param data - must point to at least 24 bytes containing (primary,secondary,tertiary) + * @param len - length of the data * @return 1 if the record was updated, 0 if no record with key was found */ int32_t update_i64i64i64( AccountName scope, TableName table, const void* data, uint32_t len ); diff --git a/contracts/eoslib/db.hpp b/contracts/eoslib/db.hpp index a66ca6f200b041fe67ed1519d90bb2c1d3de426b..34faa6ea04e403dd4da80bb6971c9936e91662fc 100644 --- a/contracts/eoslib/db.hpp +++ b/contracts/eoslib/db.hpp @@ -6,97 +6,229 @@ /** * @ingroup databaseCpp -struct Db -{ - - template - static bool get( TableName table, uint64_t key, T& value ){ - return get( currentCode(), table, key, value ); - } - - template - static bool get( AccountName scope, TableName table, uint64_t key, T& value ){ - return get( scope, currentCode(), table, key, value ); - } - - template - static bool get( AccountName scope, AccountName code, TableName table, uint64_t key, T& result ) { - auto read = load_i64( scope, code, table, key, &result, sizeof(result) ); - return read > 0; - } - - template - static int32_t store( TableName table, uint64_t key, const T& value ) { - return store( currentCode(), key, value ); - } - - template - static int32_t store( Name scope, TableName table, const T& value ) { - return store_i64( scope, table, &value, sizeof(value) ); - } - - static bool remove( Name scope, TableName table, uint64_t key ) { - return remove_i64( scope, table, key ); - } -}; + * Cpp implementation of database API. It is based on pimpl idiom. + * @see Table class and table_impl class */ - template struct table_impl{}; template<> struct table_impl { + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record based on primary index. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t front_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return front_primary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record based on primary index. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t back_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return back_primary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the data; must be initialized with the primary key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t load_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return load_primary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the data; must be initialized with the primary key to fetch next record. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t next_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return next_primary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the record; must be initialized with the primary key to fetch previous record. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t previous_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return previous_primary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the record. must be initialized with the primary key to fetch upper bound of. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t upper_bound_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return upper_bound_primary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the record. must be initialized with the primary key to fetch lower bound of. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t lower_bound_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return lower_bound_primary_i128i128( scope, code, table, data, len ); } + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record based on secondary index. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t front_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return front_secondary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record based on secondary index. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t back_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return back_secondary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the record based on secondary index; must be initialized with secondary key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t load_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return load_secondary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record based on secondary index; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t next_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return next_secondary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t previous_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return previous_secondary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound record; must be initialized with a key to find the upper bound. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t upper_bound_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return upper_bound_secondary_i128i128( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound record; must be initialized with a key to find the lower bound. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t lower_bound_secondary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return lower_bound_secondary_i128i128( scope, code, table, data, len ); } + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must be initialized with the key (primary, secondary) of the record to remove. + * + * @return 1 if remove successful else -1 + */ static int32_t remove( uint64_t scope, uint64_t table, const void* data ) { return remove_i128i128( scope, table, data ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must be initialized with the data to be stored. + * @param len - the maximum length of data to be be stored. + * + * @return 1 if store successful else -1 + */ static int32_t store( AccountName scope, TableName table, const void* data, uint32_t len ) { return store_i128i128( scope, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must be initialized with the data to be updated. + * @param len - the maximum length of data to be updated. + * + * @return 1 if update successful else -1 + */ static int32_t update( AccountName scope, TableName table, const void* data, uint32_t len ) { return update_i128i128( scope, table, data, len ); } @@ -104,34 +236,128 @@ struct table_impl { template<> struct table_impl { + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the front record based on primary index (only index). + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t front_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return front_i64( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the back record based on primary index (only index). + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t back_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return back_i64( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the record; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t load_primary( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return load_i64( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the next record; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t next( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return next_i64( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the previous record; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t previous( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return previous_i64( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the lower bound; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t lower_bound( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return lower_bound_i64( scope, code, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param code - identifies the code that controls write-access to the data + * @param table - the ID/name of the table within the scope/code context to query + * @param data - location to copy the upper bound; must be initialized with a key. + * @param len - the maximum length of data to read + * + * @return the number of bytes read or -1 if no record found + */ static int32_t upper_bound( uint64_t scope, uint64_t code, uint64_t table, void* data, uint32_t len ) { return upper_bound_i64( scope, code, table, data, len ); } + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must be initialized with the key of record to remove from table. + * + * @return 1 if successful else -1 + */ static int32_t remove( uint64_t scope, uint64_t table, const void* data ) { return remove_i64( scope, table, (uint64_t*)data); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must be initialized with the data to be stored. + * @param len - the maximum length of data + * + * @return 1 if successful else -1 + */ static int32_t store( AccountName scope, TableName table, const void* data, uint32_t len ) { return store_i64( scope, table, data, len ); } + + /** + * @param scope - the account scope that will be read, must exist in the transaction scopes list + * @param table - the ID/name of the table within the scope/code context to query + * @param data - must be initialized with the data to be updated. + * @param len - the maximum length of data + * + * @return 1 if successful else -1 + */ static int32_t update( AccountName scope, TableName table, const void* data, uint32_t len ) { return update_i64( scope, table, data, len ); } @@ -149,8 +375,71 @@ struct table_impl { * @tparam PrimaryType - the type of the first field stored in @ref Record * @tparam SecondaryType - the type of the second field stored in @ref Record * - * The primary and secondary indicies are sorted as N-bit unsigned integers from lowest to highest. + * The primary and secondary indices are sorted as N-bit unsigned integers from lowest to highest. + * + * @code + * struct Model { + * uint64_t primary; + * uint64_t secondary; + * uint64_t value; + * }; + * + * typedef Table MyTable; + * Model a { 1, 11, N(first) }; + * Model b { 2, 22, N(second) }; + * Model c { 3, 33, N(third) }; + * Model d { 4, 44, N(fourth) }; + * + * bool res = MyTable::store(a); + * ASSERT(res, "store"); + * + * res = MyTable::store(b); + * ASSERT(res, "store"); + * + * res = MyTable::store(c); + * ASSERT(res, "store"); + * + * res = MyTable::store(d); + * ASSERT(res, "store"); * + * Model query; + * res = MyTable::PrimaryIndex::get(1, query); + * ASSERT(res && query.primary == 1 && query.value == N(first), "first"); + * + * res = MyTable::PrimaryIndex::front(query); + * ASSERT(res && query.primary == 4 && query.value == N(fourth), "front"); + * + * res = MyTable::PrimaryIndex::back(query); + * ASSERT(res && query.primary == 1 && query.value == N(first), "back"); + * + * res = MyTable::PrimaryIndex::previous(query); + * ASSERT(res && query.primary == 2 && query.value == N(second), "previous"); + * + * res = MyTable::PrimaryIndex::next(query); + * ASSERT(res && query.primary == 1 && query.value == N(first), "first"); + * + * res = MyTable::SecondaryIndex::get(11, query); + * ASSERT(res && query.primary == 11 && query.value == N(first), "first"); + * + * res = MyTable::SecondaryIndex::front(query); + * ASSERT(res && query.secondary == 44 && query.value == N(fourth), "front"); + * + * res = MyTable::SecondaryIndex::back(query); + * ASSERT(res && query.secondary == 11 && query.value == N(first), "back"); + * + * res = MyTable::SecondaryIndex::previous(query); + * ASSERT(res && query.secondary == 22 && query.value == N(second), "previous"); + * + * res = MyTable::SecondaryIndex::next(query); + * ASSERT(res && query.secondary == 11 && query.value == N(first), "first"); + * + * res = MyTable::remove(query); + * ASSERT(res, "remove"); + * + * res = MyTable::get(query); + * ASSERT(!res, "not found already removed"); + * + * @endcode * @ingroup databaseCpp */ template @@ -163,94 +452,299 @@ struct Table { typedef PrimaryType Primary; typedef SecondaryType Secondary; + /** + * @defgroup defines the primary index + */ struct PrimaryIndex { - + /** + * @param r - reference to a record to store the front record based on primary index. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool front( Record& r, uint64_t s = scope ) { return impl::front_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param r - reference to a record to store the back record based on primary index. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool back( Record& r, uint64_t s = scope ) { return impl::back_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param r - reference to a record to store next value; must be initialized with current. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool next( Record& r, uint64_t s = scope ) { return impl::next_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param r - reference to a record to store previous value; must be initialized with current. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool previous( Record& r, uint64_t s = scope ) { return impl::previous_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param p - reference to primary key to load; must be initialized with a value; + * @param r - reference to a record to load the value to. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool get( const PrimaryType& p, Record& r, uint64_t s = scope ) { *reinterpret_cast(&r) = p; return impl::load_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param p - reference to primary key to get the lower bound of; must be initialized with a value; + * @param r - reference to a record to load the value to. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool lower_bound( const PrimaryType& p, Record& r ) { return impl::lower_bound_primary( scope, code, table, &p &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param p - reference to primary key to get the upper bound of; must be initialized with a value; + * @param r - reference to a record to load the value to. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ static bool upper_bound( const PrimaryType& p, Record& r ) { return impl::upper_bound_primary( scope, code, table, &p &r, sizeof(Record) ) == sizeof(Record); } - static bool remove( const Record& r, uint64_t s = scope ) { - return impl::remove( s, table, &r ) != 0; - } - }; - struct SecondaryIndex { - static bool front( Record& r, uint64_t s = scope ) { - return impl::front_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); - } - static bool back( Record& r, uint64_t s = scope ) { - return impl::back_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); - } - static bool next( Record& r, uint64_t s = scope ) { - return impl::next_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); - } - static bool previous( Record& r, uint64_t s = scope ) { - return impl::previous_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); - } - static bool get( const SecondaryType& p, Record& r, uint64_t s = scope ) { - return impl::load_secondary( s, code, table, &p &r, sizeof(Record) ) == sizeof(Record); - } - static bool lower_bound( const SecondaryType& p, Record& r, uint64_t s = scope ) { - return impl::lower_bound_secondary( s, code, table, &p &r, sizeof(Record) ) == sizeof(Record); - } - static bool upper_bound( const SecondaryType& p, Record& r, uint64_t s = scope ) { - return impl::upper_bound_secondary( s, code, table, &p &r, sizeof(Record) ) == sizeof(Record); - } + /** + * @param r - reference to a record to remove from table; + * @param s - account scope. default is current scope of the class + * + * @return true if successfully removed; + */ static bool remove( const Record& r, uint64_t s = scope ) { return impl::remove( s, table, &r ) != 0; } }; + /** + * @defgroup defines the secondary index + */ + struct SecondaryIndex { + /** + * @param r - reference to a record to store the front record based on secondary index. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + static bool front( Record& r, uint64_t s = scope ) { + return impl::front_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param r - reference to a record to store the back record based on secondary index. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + static bool back( Record& r, uint64_t s = scope ) { + return impl::back_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param r - reference to a record to return the next record . + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + static bool next( Record& r, uint64_t s = scope ) { + return impl::next_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param r - reference to a record to return the next record. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + static bool previous( Record& r, uint64_t s = scope ) { + return impl::previous_secondary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param p - reference to secondary index key + * @param r - reference to record to hold the value + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + static bool get( const SecondaryType& p, Record& r, uint64_t s = scope ) { + return impl::load_secondary( s, code, table, &p &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param p - reference to secondary key to get the lower bound of; must be initialized with a value; + * @param r - reference to a record to load the value to. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + + static bool lower_bound( const SecondaryType& p, Record& r, uint64_t s = scope ) { + return impl::lower_bound_secondary( s, code, table, &p &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param p - reference to secondary key to get the upper bound of; must be initialized with a value; + * @param r - reference to a record to load the value to. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + static bool upper_bound( const SecondaryType& p, Record& r, uint64_t s = scope ) { + return impl::upper_bound_secondary( s, code, table, &p &r, sizeof(Record) ) == sizeof(Record); + } + + /** + * @param r - reference to a record to be removed. + * @param s - account scope. default is current scope of the class + * + * @return true if successfully removed. + */ + static bool remove( const Record& r, uint64_t s = scope ) { + return impl::remove( s, table, &r ) != 0; + } + }; + + /** + * @param p - reference to primary key to retrieve + * @param r - reference to a record to load the value to. + * @param s - account scope. default is current scope of the class + * + * @return true if successful read. + */ + + static bool get( const PrimaryType& p, Record& r, uint64_t s = scope ) { + *reinterpret_cast(&r) = p; + return impl::load_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); + } - static bool get( const PrimaryType& p, Record& r, uint64_t s = scope ) { - *reinterpret_cast(&r) = p; - return impl::load_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); - } - - static bool store( const Record& r, uint64_t s = scope ) { - assert( impl::store( s, table, &r, sizeof(r) ), "error storing record" ); - return true; - } - static bool update( const Record& r, uint64_t s = scope ) { - assert( impl::update( s, table, &r, sizeof(r) ), "error updating record" ); - return true; - } - static bool remove( const Record& r, uint64_t s = scope ) { - return impl::remove( s, table, &r ) != 0; - } -}; + /** + * @param r - reference to a record to store. + * @param s - account scope. default is current scope of the class + * + * @return true if successful store. + */ + static bool store( const Record& r, uint64_t s = scope ) { + assert( impl::store( s, table, &r, sizeof(r) ), "error storing record" ); + return true; + } + /** + * @param r - reference to a record to update. + * @param s - account scope. default is current scope of the class + * + * @return true if successful update. + */ + static bool update( const Record& r, uint64_t s = scope ) { + assert( impl::update( s, table, &r, sizeof(r) ), "error updating record" ); + return true; + } -/** - * @brief this specialization of Table is for single-index tables - * - * @tparam scope - the default account name scope that this table is located within - * @tparam code - the code account name which has write permission to this table - * @tparam table - a unique identifier (name) for this table - * @tparam Record - the type of data stored in each row - * @tparam PrimaryType - the type of the first field stored in @ref Record - * - * @ingroup databaseCpp - */ -template + /** + * @param r - reference to a record to remove. + * @param s - account scope. default is current scope of the class + * + * @return true if successful remove. + */ + static bool remove( const Record& r, uint64_t s = scope ) { + return impl::remove( s, table, &r ) != 0; + } + }; + + + /** + * @brief this specialization of Table is for single-index tables + * + * @tparam scope - the default account name scope that this table is located within + * @tparam code - the code account name which has write permission to this table + * @tparam table - a unique identifier (name) for this table + * @tparam Record - the type of data stored in each row + * @tparam PrimaryType - the type of the first field stored in @ref Record + * + * Example + * @code + * + * struct MyModel { + * uint128_t number; + * uint64_t name; + * }; + * + * typedef Table MyTable; + * + * MyModel a { 1, N(one) }; + * MyModel b { 2, N(two) }; + * MyModel c { 3, N(three) }; + * MyModel d { 4, N(four) }; + * + * bool res = MyTable::store(a); + * ASSERT(res, "store"); + + * res = MyTable::store(b); + * ASSERT(res, "store"); + * + * res = MyTable::store(c); + * ASSERT(res, "store"); + * + * res = MyTable::store(d); + * ASSERT(res, "store"); + * + * MyModel query; + * res = MyTable::front(query); + * ASSERT(res && query.number == 4 && query.name == N(four), "front"); + * + * res = MyTable::back(query); + * ASSERT(res && query.number == 1 && query.name == N(one), "back"); + * + * res = MyTable::PrimaryIndex::previous(query); + * ASSERT(res && query.number == 2 && query.name == N(two), "previous"); + * + * res = MyTable::PrimaryIndex::next(query); + * ASSERT(res && query.number == 1 && query.name == N(one), "next"); + * + * query.number = 4; + * res = MyTable::get(query); + * ASSERT(res && query.number == 4 && query.name = N(four), "get"); + * + * query.name = N(Four); + * res = MyTable.update(query); + * ASSERT(res && query.number == 4 && query.name == N(Four), "update"); + * + * res = MyTable.remove(query); + * ASSERT(res, "remove"); + * + * res = MyTable.get(query); + * ASSERT(!res, "get of removed record"); + * + * @endcode + * @ingroup databaseCpp + */ + + template struct Table { private: typedef table_impl impl; @@ -258,65 +752,152 @@ struct Table { public: typedef PrimaryType Primary; - + /** + * @defgroup Primary Index of table + */ struct PrimaryIndex { + /** + * @param r - reference to a record to store the front. + * + * @return true if successfully retrieved the front of the table. + */ static bool front( Record& r ) { return impl::front_primary( scope, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param r - reference to a record to store the back. + * + * @return true if successfully retrieved the back of the table. + */ static bool back( Record& r ) { return impl::back_primary( scope, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param r - reference to store the next record. Must be initialized with a key. + * + * @return true if successfully retrieved the next record. + */ static bool next( Record& r ) { return impl::next_primary( scope, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param r - reference to store previous record. Must be initialized with a key. + * + * @return true if successfully retrieved the previous record. + */ static bool previous( Record& r ) { return impl::previous_primary( scope, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param p - reference to the primary key to retrieve the record. + * @param r - reference to hold the result of the query. + * @param s - scope; defaults to scope of the class. + * @return true if successfully retrieved the record. + */ static bool get( const PrimaryType& p, Record& r, uint64_t s = scope ) { *reinterpret_cast(&r) = p; return impl::load_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + + /** + * @param p - reference to the primary key to retrieve the lower bound. + * @param r - reference to hold the result of the query. + * @return true if successfully retrieved the record. + */ static bool lower_bound( const PrimaryType& p, Record& r ) { return impl::lower_bound_primary( scope, code, table, &p &r, sizeof(Record) ) == sizeof(Record); } - static bool upper_bound( const PrimaryType& p, Record& r ) { + + /** + * @param p - reference to the primary key to retrieve the upper bound. + * @param r - reference to hold the result of the query. + * @return true if successfully retrieved the record. + */ + static bool upper_bound( const PrimaryType& p, Record& r ) { return impl::upper_bound_primary( scope, code, table, &p &r, sizeof(Record) ) == sizeof(Record); } - static bool remove( const Record& r ) { + + /** + * @param r - reference to record to be removed. + * @return true if successfully removed. + */ + static bool remove( const Record& r ) { return impl::remove( scope, table, &r ) != 0; } }; - static bool front( Record& r ) { return PrimaryIndex::front(r); } - static bool back( Record& r ) { return PrimaryIndex::back(r); } - + /** + * fetches the front of the table + * @param r - reference to hold the value + * @return true if successfully retrieved the front + */ + static bool front( Record& r ) { return PrimaryIndex::front(r); } + + /** + * fetches the back of the table + * @param r - reference to hold the value + * @return true if successfully retrieved the back + */ + static bool back( Record& r ) { return PrimaryIndex::back(r); } + + /** + * retrieves the record for the specified primary key + * @param p - the primary key of the record to fetch + * @param r - reference of record to hold return value + * @param s - scope; defaults to scope of the class. + * @return true if get succeeds. + */ static bool get( const PrimaryType& p, Record& r, uint64_t s = scope ) { *reinterpret_cast(&r) = p; return impl::load_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + /** + * retrieves a record based on initialized primary key value + * @param r - reference of a record to hold return value; must be initialized to the primary key to be fetched. + * @param s - scope; defaults to scope of the class. + * @return true if get succeeds. + */ static bool get( Record& r, uint64_t s = scope ) { return impl::load_primary( s, code, table, &r, sizeof(Record) ) == sizeof(Record); } + /** + * store a record to the table + * @param r - the record to be stored. + * @param s - scope; defaults to scope of the class. + * @return true if store succeeds. + */ static bool store( const Record& r, uint64_t s = scope ) { return impl::store( s, table, &r, sizeof(r) ) != 0; } + /** + * update a record in the table. + * @param r - the record to be updated including the updated values (cannot update the index). + * @param s - scope; defaults to scope of the class. + * @return true if update succeeds. + */ static bool update( const Record& r, uint64_t s = scope ) { return impl::update( s, table, &r, sizeof(r) ) != 0; } + /** + * remove a record from the table. + * @param r - the record to be removed. + * @param s - scope; defaults to scope of the class. + * @return true if remove succeeds. + */ static bool remove( const Record& r, uint64_t s = scope ) { return impl::remove( s, table, &r ) != 0; } - }; -/** - * @ingroup databaseCpp - */ #define TABLE2(NAME, SCOPE, CODE, TABLE, TYPE, PRIMARY_NAME, PRIMARY_TYPE, SECONDARY_NAME, SECONDARY_TYPE) \ using NAME = Table; \ typedef NAME::PrimaryIndex PRIMARY_NAME; \ diff --git a/contracts/eoslib/mainpage.md b/contracts/eoslib/mainpage.md index beacaac2e894eb1172c129fc68c1efd0555c304f..6d7c803a2b5df0986fab565d546af2d8942056dc 100644 --- a/contracts/eoslib/mainpage.md +++ b/contracts/eoslib/mainpage.md @@ -3,7 +3,7 @@ Welcome to the eos.io Documentation @note this documentation is under development and may be incorrect. -This documentaiton describes the APIs available to eos.io application developers. This is the best place to +This documentation describes the APIs available to eos.io application developers. This is the best place to start your journey into smart contract development. ## Environment diff --git a/contracts/eoslib/math.h b/contracts/eoslib/math.h index dddb3ca0b1d8d5f497c23ad14dcb00680c92d1ac..24752e7fbb4e86e918ed64f908c43e7b577a5ba2 100644 --- a/contracts/eoslib/math.h +++ b/contracts/eoslib/math.h @@ -5,21 +5,180 @@ extern "C" { /** * @defgroup mathcapi Math C API - * @brief defines builtin math functions + * @brief Defines basic mathematical operations for higher abstractions to use. * @ingroup mathapi + * + * @{ */ + + /** + * Multiply two 128 bit unsigned integers and assign the value to the first parameter. + * @brief Multiply two 128 unsigned bit integers. Throws exception if pointers are invalid. + * @param self Pointer to the value to be multiplied. It will be replaced with the result. + * @param other Pointer to the Value to be multiplied. + * + * Example: + * @code + * uint128_t self(100); + * uint128_t other(100); + * multeq_i128(&self, &other); + * printi128(self); // Output: 10000 + * @endcode + */ void multeq_i128( uint128_t* self, const uint128_t* other ); + /** + * Divide two 128 bit unsigned integers and assign the value to the first parameter. + * It will throw an exception if the value of other is zero. + * @brief Divide two 128 unsigned bit integers and throws an exception in case of invalid pointers + * @param self Pointer to numerator. It will be replaced with the result + * @param other Pointer to denominator + * Example: + * @code + * uint128_t self(100); + * uint128_t other(100); + * diveq_i128(&self, &other); + * printi128(self); // Output: 1 + * @endcode + */ void diveq_i128 ( uint128_t* self, const uint128_t* other ); + /** + * Get the result of addition between two double interpreted as 64 bit unsigned integer + * This function will first reinterpret_cast both inputs to double (50 decimal digit precision), add them together, and reinterpret_cast the result back to 64 bit unsigned integer. + * @brief Addition between two double + * @param a Value in double interpreted as 64 bit unsigned integer + * @param b Value in double interpreted as 64 bit unsigned integer + * @return Result of addition reinterpret_cast to 64 bit unsigned integers + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(5), i64_to_double(10) ); + * uint64_t b = double_div( i64_to_double(5), i64_to_double(2) ); + * uint64_t res = double_add( a, b ); + * printd(res); // Output: 3 + * @endcode + */ uint64_t double_add(uint64_t a, uint64_t b); + + /** + * Get the result of multiplication between two double interpreted as 64 bit unsigned integer + * This function will first reinterpret_cast both inputs to double (50 decimal digit precision), multiply them together, and reinterpret_cast the result back to 64 bit unsigned integer. + * @brief Multiplication between two double + * @param a Value in double interpreted as 64 bit unsigned integer + * @param b Value in double interpreted as 64 bit unsigned integer + * @return Result of multiplication reinterpret_cast to 64 bit unsigned integers + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(10), i64_to_double(10) ); + * uint64_t b = double_div( i64_to_double(5), i64_to_double(2) ); + * uint64_t res = double_mult( a, b ); + * printd(res); // Output: 2.5 + * @endcode + */ uint64_t double_mult(uint64_t a, uint64_t b); + + /** + * Get the result of division between two double interpreted as 64 bit unsigned integer + * This function will first reinterpret_cast both inputs to double (50 decimal digit precision), divide numerator with denominator, and reinterpret_cast the result back to 64 bit unsigned integer. + * Throws an error if b is zero (after it is reinterpret_cast to double) + * @brief Division between two double + * @param a Numerator in double interpreted as 64 bit unsigned integer + * @param b Denominator in double interpreted as 64 bit unsigned integer + * @return Result of division reinterpret_cast to 64 bit unsigned integers + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(10), i64_to_double(100) ); + * printd(a); // Output: 0.1 + * @endcode + */ uint64_t double_div(uint64_t a, uint64_t b); - + + /** + * Get the result of less than comparison between two double + * This function will first reinterpret_cast both inputs to double (50 decimal digit precision) before doing the less than comparison. + * @brief Less than comparison between two double + * @param a Value in double interpreted as 64 bit unsigned integer + * @param b Value in double interpreted as 64 bit unsigned integer + * @return 1 if first input is smaller than second input, 0 otherwise + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(10), i64_to_double(10) ); + * uint64_t b = double_div( i64_to_double(5), i64_to_double(2) ); + * uint64_t res = double_lt( a, b ); + * printi(res); // Output: 1 + * @endcode + */ uint32_t double_lt(uint64_t a, uint64_t b); + + /** + * Get the result of equality check between two double + * This function will first reinterpret_cast both inputs to double (50 decimal digit precision) before doing equality check. + * @brief Equality check between two double + * @param a Value in double interpreted as 64 bit unsigned integer + * @param b Value in double interpreted as 64 bit unsigned integer + * @return 1 if first input is equal to second input, 0 otherwise + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(10), i64_to_double(10) ); + * uint64_t b = double_div( i64_to_double(5), i64_to_double(2) ); + * uint64_t res = double_eq( a, b ); + * printi(res); // Output: 0 + * @endcode + */ uint32_t double_eq(uint64_t a, uint64_t b); + + /** + * Get the result of greater than comparison between two double + * This function will first reinterpret_cast both inputs to double (50 decimal digit precision) before doing the greater than comparison. + * @brief Greater than comparison between two double + * @param a Value in double interpreted as 64 bit unsigned integer + * @param b Value in double interpreted as 64 bit unsigned integer + * @return 1 if first input is greater than second input, 0 otherwise + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(10), i64_to_double(10) ); + * uint64_t b = double_div( i64_to_double(5), i64_to_double(2) ); + * uint64_t res = double_gt( a, b ); + * printi(res); // Output: 0 + * @endcode + */ uint32_t double_gt(uint64_t a, uint64_t b); + /** + * Convert double (interpreted as 64 bit unsigned integer) to 64 bit unsigned integer. + * This function will first reinterpret_cast the input to double (50 decimal digit precision) then convert it to double, then reinterpret_cast it to 64 bit unsigned integer. + * @brief Convert double to 64 bit unsigned integer + * @param self Value in double interpreted as 64 bit unsigned integer + * @return Result of conversion in 64 bit unsigned integer + * + * Example: + * @code + * uint64_t a = double_div( i64_to_double(5), i64_to_double(2) ); + * uint64_t res = double_to_i64( a ); + * printi(res); // Output: 2 + * @endcode + */ uint64_t double_to_i64(uint64_t a); + + /** + * Convert 64 bit unsigned integer to double (interpreted as 64 bit unsigned integer). + * This function will convert the input to double (50 decimal digit precision) then reinterpret_cast it to 64 bit unsigned integer. + * @brief Convert 64 bit unsigned integer to double (interpreted as 64 bit unsigned integer) + * @param self Value to be converted + * @return Result of conversion in double (interpreted as 64 bit unsigned integer) + * + * Example: + * @code + * uint64_t res = i64_to_double( 3 ); + * printd(res); // Output: 3 + * @endcode + */ uint64_t i64_to_double(uint64_t a); + /// @} } // extern "C" diff --git a/contracts/eoslib/math.hpp b/contracts/eoslib/math.hpp index 60cfab9380f04b0958ed58d6bee8cbe8cd36baa7..c14d8a1f7a59a3f2b2d23a3fa524a8f5f76aabcf 100644 --- a/contracts/eoslib/math.hpp +++ b/contracts/eoslib/math.hpp @@ -5,7 +5,7 @@ namespace eos { /** * @defgroup mathapi Math API - * @brief Defines common math functions + * @brief Defines common math functions * @ingroup contractdev */ @@ -17,18 +17,47 @@ namespace eos { * @{ */ - /** @brief wraps multeq_i128 from @ref mathcapi */ + /** + * Multiply two 128 bit unsigned integers and assign the value to the first parameter. + * This wraps multeq_i128 from @ref mathcapi. + * @brief wraps multeq_i128 from @ref mathcapi + * @param self Value to be multiplied. It will be replaced with the result + * @param other Value integer to be multiplied. + * + * Example: + * @code + * uint128_t self(100); + * uint128_t other(100); + * multeq(self, other); + * std::cout << self; // Output: 10000 + * @endcode + */ inline void multeq( uint128_t& self, const uint128_t& other ) { multeq_i128( &self, &other ); } - /** @brief wraps diveq_i128 from @ref mathcapi */ + /** + * Divide two 128 bit unsigned integers and assign the value to the first parameter. + * It will throw an exception if other is zero. + * This wraps diveq_i128 from @ref mathcapi + * @brief wraps diveq_i128 from @ref mathcapi + * @param self Numerator. It will be replaced with the result + * @param other Denominator + * + * Example: + * @code + * uint128_t self(100); + * uint128_t other(100); + * diveq(self, other); + * std::cout << self; // Output: 1 + * @endcode + */ inline void diveq( uint128_t& self, const uint128_t& other ) { diveq_i128( &self, &other ); } /** - * @brief a struct that wraps uint128 integer and defines common operator overloads + * @brief A struct that wraps uint128 integer and defines common operator overloads */ struct uint128 { public: @@ -50,20 +79,20 @@ namespace eos { return a.value >= b.value; } - uint128& operator *= ( const uint128_t& other ) { + uint128& operator *= ( const uint128_t& other ) { multeq( value, other ); return *this; } - uint128& operator *= ( const uint128& other ) { + uint128& operator *= ( const uint128& other ) { multeq( value, other.value ); return *this; } - uint128& operator /= ( const uint128_t& other ) { + uint128& operator /= ( const uint128_t& other ) { diveq( value, other ); return *this; } - uint128& operator /= ( const uint128& other ) { + uint128& operator /= ( const uint128& other ) { diveq( value, other.value ); return *this; } @@ -78,15 +107,38 @@ namespace eos { }; /** - * Define similar to std::min() + * Get the smaller of the given values + * @brief Defined similar to std::min() + * @param a Value to compare + * @param b Value to compare + * @return The smaller of a and b. If they are equivalent, returns a + * + * Example: + * @code + * uint128_t a(1); + * uint128_t b(2); + * std::cout << min(a, b); // Output: 1 + * @endcode */ template T min( const T& a, const T&b ) { return a < b ? a : b; } + /** - * Define similar to std::max() - */ + * Get the greater of the given values. + * @brief Define similar to std::max() + * @param a Value to compare + * @param b Value to compare + * @return The greater of a and b. If they are equivalent, returns a + * + * Example: + * @code + * uint128_t a(1); + * uint128_t b(2); + * std::cout << max(a, b); // Output: 2 + * @endcode + */ template T max( const T& a, const T&b ) { return a > b ? a : b; diff --git a/contracts/eoslib/message.h b/contracts/eoslib/message.h index 58bbbcdb25d00a25d0a490f2476354ead9ce94d7..7e97ca67c087fdf37fbc4ec2336d7ee767bd5c50 100644 --- a/contracts/eoslib/message.h +++ b/contracts/eoslib/message.h @@ -7,69 +7,119 @@ extern "C" { * @ingroup contractdev * @brief Define API for querying message properties * + */ + + /** + * @defgroup messagecapi Message C API + * @ingroup messageapi + * @brief Define API for querying message properties + * + * * A EOS.IO message has the following abstract structure: * * ``` * struct Message { - * Name code; ///< primary account whose code defines the action - * Name action; ///< the name of the action. - * Name recipients[]; ///< accounts whose code will be notified (in addition to code) - * Name authorization[]; ///< accounts that have approved this message - * char data[]; + * AccountName code; // the contract defining the primary code to execute for code/type + * FuncName type; // the action to be taken + * AccountPermission[] authorization; // the accounts and permission levels provided + * Bytes data; // opaque data processed by code * }; * ``` - * + * * This API enables your contract to inspect the fields on the current message and act accordingly. - * - */ - - /** - * @defgroup messagecapi Message C API - * @ingroup messageapi - * @brief Define API for querying message properties + * + * Example: + * @code + * // Assume this message is used for the following examples: + * // { + * // "code": "eos", + * // "type": "transfer", + * // "authorization": [{ "account": "inita", "permission": "active" }], + * // "data": { + * // "from": "inita", + * // "to": "initb", + * // "amount": 1000 + * // } + * // } + * + * char buffer[128]; + * uint32_t total = readMessage(buffer, 5); // buffer contains the content of the message up to 5 bytes + * print(total); // Output: 5 + * + * uint32_t msgsize = messageSize(); + * print(msgsize); // Output: size of the above message's data field + * + * requireNotice(N(initc)); // initc account will be notified for this message + * + * requireAuth(N(inita)); // Do nothing since inita exists in the auth list + * requireAuth(N(initb)); // Throws an exception + * + * AccountName code = currentCode(); + * print(Name(code)); // Output: eos + * + * assert(Name(currentCode()) === "eos", "This message expects to be received by eos"); // Do nothing + * assert(Name(currentCode()) === "inita", "This message expects to be received by inita"); // Throws exception and roll back transfer transaction + * + * print(now()); // Output: timestamp of last accepted block + * + * @endcode + * * * @{ */ /** - * @param msg - a pointer where up to @ref len bytes of the current message will be coppied + * Copy up to @ref len bytes of current message to the specified location + * @brief Copy current message to the specified location + * @param msg - a pointer where up to @ref len bytes of the current message will be copied + * @param len - len of the current message to be copied * @return the number of bytes copied to msg */ uint32_t readMessage( void* msg, uint32_t len ); /** - * This method is useful for dynamicly sized messages - * + * Get the length of the current message's data field + * This method is useful for dynamically sized messages + * @brief Get the length of current message's data field * @return the length of the current message's data field */ uint32_t messageSize(); /** - * Verifies that @ref name exists in the set of notified accounts on a message. Throws if not found + * Add the specified account to set of accounts to be notified + * @brief Add the specified account to set of accounts to be notified + * @param name - name of the account to be verified */ - void requireNotice( AccountName ); + void requireNotice( AccountName name ); /** * Verifies that @ref name exists in the set of provided auths on a message. Throws if not found + * @brief Verify specified account exists in the set of provided auths + * @param name - name of the account to be verified */ void requireAuth( AccountName name ); /** - * @return the account which specifes the code that is being run + * Get the account which specifies the code that is being run + * @brief Get the account which specifies the code that is being run + * @return the account which specifies the code that is being run */ AccountName currentCode(); /** - * Aborts processing of this message and unwinds all pending changes - * + * Aborts processing of this message and unwinds all pending changes if the test condition is true + * @brief Aborts processing of this message and unwinds all pending changes * @param test - 0 to abort, 1 to ignore * @param cstr - a null terminated message to explain the reason for failure + */ void assert( uint32_t test, const char* cstr ); /** * Returns the time in seconds from 1970 of the last accepted block (not the block including this message) + * @brief Get time of the last accepted block + * @return time in seconds from 1970 of the last accepted block */ Time now(); diff --git a/contracts/eoslib/message.hpp b/contracts/eoslib/message.hpp index b2938a93b29ff4794d7bcf016c57570b3e6a6de1..4062c39eab918c0ce746f83c6ac2e3972ad4a6d6 100644 --- a/contracts/eoslib/message.hpp +++ b/contracts/eoslib/message.hpp @@ -11,12 +11,25 @@ namespace eos { * * @note There are some methods from the @ref messagecapi that can be used directly from C++ * - * @{ + * @{ */ /** + * * This method attempts to reinterpret the message body as type T. This will only work * if the message has no dynamic fields and the struct packing on type T is properly defined. + * + * @brief Interpret the message body as type T + * + * Example: + * @code + * struct dummy_message { + * char a; //1 + * unsigned long long b; //8 + * int c; //4 + * }; + * dummy_message msg = currentMessage(); + * @endcode */ template T currentMessage() { @@ -30,13 +43,19 @@ namespace eos { using ::requireNotice; /** - * All of the listed accounts must be specified on the message notice list or this method will throw - * and end execution of the message. - * - * This helper method enables you to require notice of multiple accounts with a single + * All of the listed accounts will be added to the set of accounts to be notified + * + * This helper method enables you to add multiple accounts to accounts to be notified list with a single * call rather than having to call the similar C API multiple times. * * @note message.code is also considered as part of the set of notified accounts + * + * @brief Verify specified accounts exist in the set of notified accounts + * + * Example: + * @code + * requireNotice(N(Account1), N(Account2), N(Account3)); // throws exception if any of them not in set. + * @endcode */ template void requireNotice( AccountName name, Accounts... accounts ){ diff --git a/contracts/eoslib/print.h b/contracts/eoslib/print.h index 70af9888d111753f62bfbb870d348846b95ff3a5..22d89d60e4ff63777bc0cd4fb21c4f99ac5ac56f 100644 --- a/contracts/eoslib/print.h +++ b/contracts/eoslib/print.h @@ -16,18 +16,64 @@ extern "C" { */ /** + * Prints string + * @brief Prints string * @param cstr - a null terminated string + * + * Example: + * @code + * prints("Hello World!"); // Output: Hello World! + * @endcode */ void prints( const char* cstr ); + /** - * Prints value as an integer + * Prints value as a 64 bit unsigned integer + * @brief Prints value as a 64 bit unsigned integer + * @param Value of 64 bit unsigned integer to be printed + * + * Example: + * @code + * printi(1e+18); // Output: 1000000000000000000 + * @endcode */ void printi( uint64_t value ); + /** + * Prints value as a 128 bit unsigned integer + * @brief Prints value as a 128 bit unsigned integer + * @param value 128 bit integer to be printed + * + * Example: + * @code + * uint128_t large_int(87654323456); + * printi128(large_int); // Output: 87654323456 + * @endcode + */ void printi128( const uint128_t* value ); + + /** + * Prints value as double + * @brief Prints value as double + * @param Value of double (interpreted as 64 bit unsigned integer) to be printed + * + * Example: + * @code + * uint64_t double_value = double_div( i64_to_double(5), i64_to_double(10) ); + * printd(double_value); // Output: 0.5 + * @endcode + */ void printd(uint64_t value); + /** * Prints a 64 bit names as base32 encoded string + * @brief Prints a 64 bit names as base32 encoded string + * @param Value of 64 bit names to be printed + * + * Example: + * @code + * printn(N(abcde)); // Output: abcde + * @endcode */ void printn( uint64_t name ); /// @} diff --git a/contracts/eoslib/print.hpp b/contracts/eoslib/print.hpp index 1c2f01243643e630be4d1c77516009f88cd6025e..2732ca1cf7e249ccfb288ccf8d67f7a5752d533c 100644 --- a/contracts/eoslib/print.hpp +++ b/contracts/eoslib/print.hpp @@ -7,30 +7,74 @@ namespace eos { static_assert( sizeof(long) == sizeof(int), "unexpected size difference" ); + /** + * Prints string + * @brief Prints string + * @param cstr - a null terminated string + */ inline void print( const char* ptr ) { prints(ptr); } + /** + * Prints 64 bit unsigned integer as a 64 bit unsigned integer + * @brief Prints integer 64 bit unsigned integer + * @param Value to be printed + */ inline void print( uint64_t num ) { printi(num); } + + /** + * Prints 32 bit unsigned integer as a 64 bit unsigned integer + * @brief Prints integer 32 bit unsigned integer + * @param Value to be printed + */ inline void print( uint32_t num ) { printi(num); } + + /** + * Prints integer as a 64 bit unsigned integer + * @brief Prints integer + * @param Value to be printed + */ inline void print( int num ) { printi(num); } + + /** + * Prints unsigned integer as a 64 bit unsigned integer + * @brief Prints unsigned integer + * @param Value to be printed + */ inline void print( unsigned int num ) { printi(num); } + /** + * Prints uint128 struct as 128 bit unsigned integer + * @brief Prints uint128 struct + * @param Value to be printed + */ inline void print( uint128 num ) { printi128((uint128_t*)&num); } + + /** + * Prints 128 bit unsigned integer + * @brief Prints 128 bit unsigned integer + * @param Value to be printed + */ inline void print( uint128_t num ) { printi128((uint128_t*)&num); } + /** + * Prints a 64 bit names as base32 encoded string + * @brief Prints a 64 bit names as base32 encoded string + * @param Value of 64 bit names to be printed + */ inline void print( Name name ) { printn(name.value); } @@ -63,6 +107,23 @@ namespace eos { * * @{ */ + + /** + * Print out value / list of values (except double) + * @brief Print out value / list of values + * @param a Value to be printed + * @param args Other values to be printed + * + * Example: + * @code + * char *s = "Hello World!"; + * uint64_t unsigned_64_bit_int = 1e+18; + * uint128_t unsigned_128_bit_int (87654323456); + * uint64_t string_as_unsigned_64_bit = N(abcde); + * print(s , unsigned_64_bit_int, unsigned_128_bit_int, string_as_unsigned_64_bit); + * // Ouput: Hello World!100000000000000000087654323456abcde + * @endcode + */ template void print( Arg a, Args... args ) { print(a); @@ -74,6 +135,22 @@ namespace eos { */ class iostream {}; + /** + * Overload c++ iostream + * @brief Overload c++ iostream + * @param out Output strem + * @param v Value to be printed + * + * Example: + * @code + * char *s = "Hello World!"; + * uint64_t unsigned_64_bit_int = 1e+18; + * uint128_t unsigned_128_bit_int (87654323456); + * uint64_t string_as_unsigned_64_bit = N(abcde); + * std::out << s << " " << unsigned_64_bit_int << " " << unsigned_128_bit_int << " " << string_as_unsigned_64_bit); + * // Ouput: Hello World! 1000000000000000000 87654323456 abcde + * @endcode + */ template inline iostream& operator<<( iostream& out, const T& v ) { print( v ); diff --git a/contracts/eoslib/token.hpp b/contracts/eoslib/token.hpp index c9207cf607099c391dcfb6f566b5355141b5be71..44ffc7ae00690c7a2b31e514424d1b16b6e25499 100644 --- a/contracts/eoslib/token.hpp +++ b/contracts/eoslib/token.hpp @@ -12,7 +12,7 @@ * @brief Defines the ABI for interfacing with standard-compatible token messages and database tables. * @ingroup contractdev * - * + * @{ */ @@ -20,62 +20,218 @@ namespace eos { /** + * Base token structure with checks for proper types and over/underflows. + * It supports the following operator: +=, -=, +, -, <=, <, ==, !=, >=, >, bool and also print functionality * @brief a uint64_t wrapper with checks for proper types and over/underflows. - * + * @tparam NumberType - numeric type of the token + * @tparam CurrencyType - type of the currency (e.g. eos) represented as an unsigned 64 bit integer * @ingroup tokens + * @code + * typedef eos::token MyToken; + * MyToken a(128); + * a.print(); // Output: 128 MyToken + * MyToken b(64); + * a += b; + * a.print(); // Output: 192 MyToken + * b.print(); // Output: 64 MyToken + * a -= b; + * a.print(); // Output: 128 MyToken + * b.print(); // Output: 64 MyToken + * b -= a; // Throws integer underflow exception + * MyToken c = a + b; + * c.print(); // Output: 192 MyToken + * MyToken d = a - b; + * d.print(); // Output: 64 MyToken + * MyToken maxToken(std::numeric_limits::max()); + * maxToken += b; // Throws integer overflow exception + * std::cout << (maxToken > b); // Output: true + * std::cout << (b > maxToken); // Output: false + * std::cout << (bool)maxToken; // Output: true + * std::cout << (a == b); // Output: false + * std::cout << (a != b); // Output: true + * @endcode */ template struct token { + /** + * Type of the currency (e.g. eos) represented as an unsigned 64 bit integer + * @brief Type of the currency + */ static const uint64_t currency_type = CurrencyType; + /** + * Default constructor + * @brief Default constructor + */ token(){} + + /** + * Constructor for token given quantity of tokens available + * @brief Constructor for token given quantity of tokens available + * @param v - quantity of tokens available + */ explicit token( NumberType v ):quantity(v){}; + + /** + * Quantity of tokens available + * @brief Quantity of tokens available + */ NumberType quantity = 0; + /** + * Subtracts quantity of token from this object + * Throws an exception if underflow + * @brief Subtracts quantity of token from this object + * @param a token to be subtracted + * @return this token after subtraction + */ token& operator-=( const token& a ) { assert( quantity >= a.quantity, "integer underflow subtracting token balance" ); quantity -= a.quantity; return *this; } + /** + * Adds quantity of token to this object + * Throws an exception if overflow + * @brief Adds quantity of token to this object + * @param a token to be added + * @return this token after addition + */ token& operator+=( const token& a ) { assert( quantity + a.quantity >= a.quantity, "integer overflow adding token balance" ); quantity += a.quantity; return *this; } + /** + * Adds quantity of two tokens and return a new token + * Throws an exception if overflow + * @brief Adds quantity of two tokens and return a new token + * @param a token to be added + * @param b token to be added + * @return result of addition as a new token + */ inline friend token operator+( const token& a, const token& b ) { token result = a; result += b; return result; } + /** + * Subtracts quantity of two tokens and return a new token + * Throws an exception if underflow + * @brief Subtracts quantity of two tokens and return a new token + * @param a token to be subtracted + * @param b token to be subtracted + * @return result of subtraction as a new token + */ inline friend token operator-( const token& a, const token& b ) { token result = a; result -= b; return result; } + /** + * Less than or equal to comparison operator + * @brief Less than or equal to comparison operator + * @param a token to be compared + * @param b token to be compared + * @return true if quantity of a is less than or equal to quantity of b + */ friend bool operator <= ( const token& a, const token& b ) { return a.quantity <= b.quantity; } + + /** + * Less than comparison operator + * @brief Less than comparison operator + * @param a token to be compared + * @param b token to be compared + * @return true if quantity of a is less than quantity of b + */ friend bool operator < ( const token& a, const token& b ) { return a.quantity < b.quantity; } + + /** + * Greater than or equal to comparison operator + * @brief Greater than or equal to comparison operator + * @param a token to be compared + * @param b token to be compared + * @return true if quantity of a is greater than or equal to quantity of b + */ friend bool operator >= ( const token& a, const token& b ) { return a.quantity >= b.quantity; } + + /** + * Greater than comparison operator + * @brief Greater than comparison operator + * @param a token to be compared + * @param b token to be compared + * @return true if quantity of a is greater than quantity of b + */ friend bool operator > ( const token& a, const token& b ) { return a.quantity > b.quantity; } + + /** + * Equality comparison operator + * @brief Equality comparison operator + * @param a token to be compared + * @param b token to be compared + * @return true if quantity of a is equal to quantity of b + */ friend bool operator == ( const token& a, const token& b ) { return a.quantity == b.quantity; } + /** + * Inequality comparison operator + * @brief Inequality comparison operator + * @param a token to be compared + * @param b token to be compared + * @return true if quantity of a is not equal to quantity of b + */ friend bool operator != ( const token& a, const token& b ) { return a.quantity != b.quantity; } + /** + * Boolean conversion operator + * @brief Boolean conversion operator + * @return true if quantity is not zero + */ explicit operator bool()const { return quantity != 0; } - inline void print() { + /** + * Print as string representation of the token (e.g. 1 EOS) + * @brief Print as string + */ + inline void print() { eos::print( quantity, " ", Name(CurrencyType) ); - } + } }; /** - * @brief defines a fixed precision price between two tokens. - * - * A price is written as X Base/Quote - * + * Defines a fixed precision price between two tokens. + * A price is written as X Base/Quote. Where X is a power of 10 which makes it simpler to just shift the decimal. + * It supports the following operator: /, \, <=, <, ==, !=, >=, > and also print functionality + * @brief Defines a fixed precision price between two tokens. + * @tparam BaseToken - represents the type of the base token + * @tparam QuoteToken - represents the type of the quote token * @ingroup tokens + * @code + * typedef eos::token MyBaseToken; + * typedef eos::token MyQuoteToken; + * typedef price MyBaseToQuotePrice; + * MyBaseToken zeroBaseToken; + * MyQuoteToken zeroQuoteToken; + * MyBaseToQuotePrice zeroBaseToQuote(zeroBaseToken, zeroQuoteToken); // throws invalid price exception + * MyBaseToken baseToken(128); + * MyQuoteToken quoteToken(128); + * MyBaseToQuotePrice aPrice(baseToken, quoteToken); + * aPrice.print(); // Output: 1e+15. MyBaseToken / MyQuoteToken + * MyQuoteToken anotherQuote = baseToken / price; + * std::cout << (anotherQuote == quoteToken); // Output: true + * MyBaseToken anotherBase = quoteToken * price; + * std::cout << (anotherBase == baseToken); // Output: true + * MyBaseToQuotePrice anotherPrice(baseToken, quoteToken); + * std::cout << (aPrice == anotherPrice); // Output: true + * std::cout << (aPrice != anotherPrice); // Output: false + * MyBaseToken base256(256); + * MyBaseToQuotePrice price2(base256, quoteToken); + * std::cout << (price2 > aPrice); // Output: true + * std::cout << (aPrice < price2); // Output: true + * @endcode */ template struct price @@ -90,8 +246,19 @@ namespace eos { */ static const uint64_t precision = 1000ll*1000ll*1000ll*1000ll*1000ll; + /** + * Default constructor. + * Initialize base per quote to be 1. + * @brief Default constructor. + */ price():base_per_quote(1ul){} + /** + * Construction for price given the base token and quote token + * @brief Construction for price given the base token and quote token + * @param base - base token + * @param quote - quote token + */ price( BaseToken base, QuoteToken quote ) { assert( base >= BaseToken(1ul), "invalid price" ); assert( quote >= QuoteToken(1ul), "invalid price" ); @@ -101,11 +268,25 @@ namespace eos { base_per_quote /= quote.quantity; } + /** + * Operator returns a quote token given a base token and the conversion price + * @brief Operator returns a quote token given a base token and the conversion price + * @param b - base token + * @param q - price + * @return quote token + */ friend QuoteToken operator / ( BaseToken b, const price& q ) { eos::print( "operator/ ", uint128(b.quantity), " * ", uint128( precision ), " / ", q.base_per_quote, "\n" ); return QuoteToken( uint64_t((uint128(b.quantity) * uint128(precision) / q.base_per_quote)) ); } + /** + * Operator returns a base token given a quote token and the conversion price + * @brief Operator returns a base token given a quote token and the conversion price + * @param b - quote token + * @param q - price + * @return base token + */ friend BaseToken operator * ( const QuoteToken& b, const price& q ) { eos::print( "b: ", b, " \n" ); eos::print( "operator* ", uint128(b.quantity), " * ", uint128( q.base_per_quote ), " / ", precision, "\n" ); @@ -113,36 +294,114 @@ namespace eos { return BaseToken( uint64_t((b.quantity * q.base_per_quote) / precision) ); } + /** + * Less than or equal to comparison operator + * @brief Less than or equal to comparison operator + * @param a price to be compared + * @param b price to be compared + * @return true if base per quote of a is less than or equal to base per quote of b + */ friend bool operator <= ( const price& a, const price& b ) { return a.base_per_quote <= b.base_per_quote; } + + /** + * Less than comparison operator + * @brief Less than comparison operator + * @param a price to be compared + * @param b price to be compared + * @return true if base per quote of a is less than base per quote of b + */ friend bool operator < ( const price& a, const price& b ) { return a.base_per_quote < b.base_per_quote; } + + /** + * Greater than or equal to comparison operator + * @brief Greater than or equal to comparison operator + * @param a price to be compared + * @param b price to be compared + * @return true if base per quote of a is greater than or equal to base per quote of b + */ friend bool operator >= ( const price& a, const price& b ) { return a.base_per_quote >= b.base_per_quote; } + + /** + * Greater than comparison operator + * @brief Greater than comparison operator + * @param a price to be compared + * @param b price to be compared + * @return true if base per quote of a is greater than base per quote of b + */ friend bool operator > ( const price& a, const price& b ) { return a.base_per_quote > b.base_per_quote; } + + /** + * Equality comparison operator + * @brief Equality comparison operator + * @param a price to be compared + * @param b price to be compared + * @return true if base per quote of a is equal to base per quote of b + */ friend bool operator == ( const price& a, const price& b ) { return a.base_per_quote == b.base_per_quote; } + + /** + * Inequality comparison operator + * @brief Inequality comparison operator + * @param a price to be compared + * @param b price to be compared + * @return true if base per quote of a is not equal to base per quote of b + */ friend bool operator != ( const price& a, const price& b ) { return a.base_per_quote != b.base_per_quote; } + /** + * Print as string representing the conversion + * @brief Print as string + */ inline void print() { eos::print( base_per_quote, ".", " ", Name(base_token_type::currency_type), "/", Name(quote_token_type::currency_type) ); } private: /** - * represented as number of base tokens to purchase 1 quote token + * Represented as number of base tokens to purchase 1 quote token + * @brief Represented as number of base tokens to purchase 1 quote token */ - eos::uint128 base_per_quote; + eos::uint128 base_per_quote; }; + /** + * Defines an eos tokens + * @brief Defines an eos tokens + */ typedef eos::token Tokens; /** - * @brief the binary structure of the `transfer` message type for the `eos` contract. - * + * The binary structure of the `transfer` message type for the `eos` contract. + * @brief The binary structure of the `transfer` message type for the `eos` contract. * @ingroup tokens + * @code + * Transfer MeToYou; + * MeToYou.from = N(Me); + * MeToYou.to = N(You); + * MeToYou.quantity = Tokens(100); + * @endcode */ struct Transfer { + /** + * Defines transfer action type + * @brief Defines transfer action type + */ static const uint64_t action_type = N(transfer); - + /** + * Name of the account who sends the token + * @brief Name of the account who sends the token + */ AccountName from; + /** + * Name of the account who receives the token + * @brief Name of the account who receives the token + */ AccountName to; + /** + * Quantity of token to be transferred + * @brief Quantity of token to be transferred + */ Tokens quantity; }; + /// @} tokenhppapi } // namespace eos diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index f588105548c9872f4fb8b5f97ff42509fd2bbe7c..3abbc76f714232e458ac7d6e830896219ab3bbc5 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -8,4 +8,3 @@ add_subdirectory( egenesis ) add_subdirectory( utilities ) add_subdirectory( appbase ) add_subdirectory( native_contract ) -add_subdirectory( wallet ) diff --git a/libraries/chain/chain_controller.cpp b/libraries/chain/chain_controller.cpp index 8bc1a74a2360325fbe5dd8562dca7c8730b344d9..2eb11f5459a168c74ada5e8868c6ad291b4bc4f2 100644 --- a/libraries/chain/chain_controller.cpp +++ b/libraries/chain/chain_controller.cpp @@ -718,23 +718,51 @@ void chain_controller::_apply_block(const signed_block& next_block) } FC_CAPTURE_AND_RETHROW( (next_block.block_num()) ) } +namespace { + + auto make_get_permission(const chainbase::database& db) { + return [&db](const types::AccountPermission& permission) { + auto key = boost::make_tuple(permission.account, permission.permission); + return db.get(key); + }; + } + + auto make_authority_checker(const chainbase::database& db, const flat_set& signingKeys) { + auto getPermission = make_get_permission(db); + auto getAuthority = [getPermission](const types::AccountPermission& permission) { + return getPermission(permission).auth; + }; + auto depthLimit = db.get().configuration.authDepthLimit; + return MakeAuthorityChecker(std::move(getAuthority), depthLimit, signingKeys); + } + +} + +flat_set chain_controller::get_required_keys(const SignedTransaction& trx, const flat_set& candidateKeys)const { + auto checker = make_authority_checker(_db, candidateKeys); + + for (const auto& message : trx.messages) { + for (const auto& declaredAuthority : message.authorization) { + if (!checker.satisfied(declaredAuthority)) { + EOS_ASSERT(checker.satisfied(declaredAuthority), tx_missing_sigs, + "Transaction declares authority '${auth}', but does not have signatures for it.", + ("auth", declaredAuthority)); + } + } + } + + return checker.used_keys(); +} + void chain_controller::check_transaction_authorization(const SignedTransaction& trx, bool allow_unused_signatures)const { if ((_skip_flags & skip_transaction_signatures) && (_skip_flags & skip_authority_check)) { //ilog("Skipping auth and sigs checks"); return; } - - auto getPermission = [&db=_db](const types::AccountPermission& permission) { - auto key = boost::make_tuple(permission.account, permission.permission); - return db.get(key); - }; - auto getAuthority = [&getPermission](const types::AccountPermission& permission) { - return getPermission(permission).auth; - }; - auto depthLimit = get_global_properties().configuration.authDepthLimit; + auto getPermission = make_get_permission(_db); #warning TODO: Use a real chain_id here (where is this stored? Do we still need it?) - auto checker = MakeAuthorityChecker(std::move(getAuthority), depthLimit, trx.get_signature_keys(chain_id_type{})); + auto checker = make_authority_checker(_db, trx.get_signature_keys(chain_id_type{})); for (const auto& message : trx.messages) for (const auto& declaredAuthority : message.authorization) { diff --git a/libraries/chain/include/eos/chain/chain_controller.hpp b/libraries/chain/include/eos/chain/chain_controller.hpp index 28621d292c124ff05c6266613eb1b9c021098ae8..28a485834640635d2b076213177bbb0eb30f511e 100644 --- a/libraries/chain/include/eos/chain/chain_controller.hpp +++ b/libraries/chain/include/eos/chain/chain_controller.hpp @@ -157,6 +157,15 @@ namespace eos { namespace chain { ProcessedTransaction push_transaction( const SignedTransaction& trx, uint32_t skip = skip_nothing ); ProcessedTransaction _push_transaction( const SignedTransaction& trx ); + /** + * Determine which public keys are needed to sign the given transaction. + * @param trx Transaction that requires signature + * @param candidateKeys Set of public keys to examine for applicability + * @return Subset of candidateKeys whose private keys should be used to sign transaction + * @throws fc::exception if candidateKeys does not contain all required keys + */ + flat_set get_required_keys(const SignedTransaction& trx, const flat_set& candidateKeys)const; + bool _push_block( const signed_block& b ); diff --git a/libraries/fc/src/thread/thread.cpp b/libraries/fc/src/thread/thread.cpp deleted file mode 100644 index f6890d842e7a4009bac75dbb4b8315a2c0424d77..0000000000000000000000000000000000000000 --- a/libraries/fc/src/thread/thread.cpp +++ /dev/null @@ -1,307 +0,0 @@ -#include -#include -#include -#include - -#if defined(__linux__) && !defined(NDEBUG) -# include -static void set_thread_name(const char* threadName) -{ pthread_setname_np(pthread_self(), threadName); } -#elif defined(__APPLE__) && !defined(NDEBUG) -# include -static void set_thread_name(const char* threadName) -{ pthread_setname_np(threadName); } -#else// do nothing in release mode -static void set_thread_name(const char* threadName) -{} -#endif - -namespace fc { - using namespace boost::multi_index; - - struct by_time; - typedef multi_index_container < - std::shared_ptr, - indexed_by< - ordered_non_unique< tag, const_mem_fun< scheduled_task, time_point, &scheduled_task::get_scheduled_time > > - > - > scheduled_task_index; - - thread*& current_thread() { - static __thread thread* t = nullptr; - return t; - } - - - class thread_detail { - public: - thread_detail( fc::thread& t ):fc_thread(t) { - this_id = std::this_thread::get_id(); - current_thread() = &t; - task_in_queue = nullptr; - } - ~thread_detail(){ } - - fc::thread& fc_thread; - std::thread::id this_id; - std::thread* std_thread = nullptr; - boost::fibers::promise exit_promise; - - std::string name; - - boost::fibers::condition_variable task_ready; - boost::fibers::mutex task_ready_mutex; - std::atomic task_in_queue; - - detail::task* _queue = nullptr; - detail::task* _queue_end = nullptr; - bool _running = false; - bool _done = false; - - scheduled_task_index _scheduled; - - - void async_task( detail::task* t ) { - idump((name)); - if( _done ) { - delete t; - throw std::runtime_error( "attempt to async task on thread that has quit" ); - } - - auto stale_head = task_in_queue.load(std::memory_order_relaxed); - do { t->next = stale_head; - }while( !task_in_queue.compare_exchange_weak( stale_head, t, std::memory_order_release ) ); - - if( !stale_head ) - { - dlog( "----grabbing ready mutex" ); - std::unique_lock lock(task_ready_mutex); - dlog("--- got ready mutex, notify one" ); - task_ready.notify_one(); - } - } - - bool exec_next_task() { - auto itr = _scheduled.begin(); - if( _scheduled.end() != itr && (*itr)->get_scheduled_time() < fc::time_point::now() ) { - idump(((*itr)->get_scheduled_time())); - auto tsk = *itr; - _scheduled.erase(itr); - tsk->exec(); - return true; - } - - if( !_queue ) - { - move_newly_scheduled_tasks_to_task_queue(); - if( !_queue ) return false; - } - - auto tmp = _queue; - _queue = _queue->next; - if( !_queue ) - _queue_end = nullptr; - - tmp->exec(); - delete tmp; - - return true; - } - - /** - * Start a new fiber which will process tasks until it - * blocks on some event, at that time it should resume - * the exec() loop which will look for new tasks. If there - * are no new tasks then it will block on a wait condition - * until a new task comes in. - * - */ - void exec() { - _running = true; - while( !_done ) { - move_newly_scheduled_tasks_to_task_queue(); - if( _queue || _scheduled.size() ) { - -#if 1 - boost::fibers::async( boost::fibers::launch::dispatch, [this](){ while( exec_next_task() ){} } ); -#else - // elog( "creating new fiber... " ); - static int tmp = 0; - ++tmp; - /** - * First we execute the task, then delete it, and - * finally look for other tasks to execute, and - * exit when there are no more tasks in the queue - */ - boost::fibers::fiber fib( boost::fibers::launch::dispatch, [this,t=tmp](){ - // wlog( "starting new fiber... ${d}", ("d",int64_t(t)) ); - while( exec_next_task() ){} - // dlog( "exit fiber... ${d}", ("d",int64_t(t)) ); - }); - fib.detach(); - #endif - - } else { - //ilog( "grabbing task_read_mutex..." ); - std::unique_lock lock(task_ready_mutex); - move_newly_scheduled_tasks_to_task_queue(); - if( !(_queue || _scheduled.size()) ) { - if( !_scheduled.size() ) { - // wlog( "waiting until next event" ); - task_ready.wait( lock ); - // ilog( "wake up..." ); - } else { - // wlog( "waiting for ${t} or until next event", ("t", (*_scheduled.begin())->get_scheduled_time() - fc::time_point::now() )); - task_ready.wait_until( lock, std::chrono::system_clock::time_point(std::chrono::microseconds( (*_scheduled.begin())->get_scheduled_time().time_since_epoch().count())) ); - // wlog( "done waiting... " ); - } - } - } - } - // ilog( "exec done" ); - _running = false; - } - - void move_newly_scheduled_tasks_to_task_queue() - { - // first, if there are any new tasks on 'task_in_queue', which is tasks that - // have been just been async or scheduled, but we haven't processed them. - // move them into the task_sch_queue or task_pqueue, as appropriate - - //DLN: changed from memory_order_consume for boost 1.55. - //This appears to be safest replacement for now, maybe - //can be changed to relaxed later, but needs analysis. - auto pending_list = task_in_queue.exchange(0, std::memory_order_seq_cst); - if( !pending_list ) return; - - /** reverse the list */ - detail::task* cur = pending_list; - detail::task* prev = nullptr; - detail::task* next = nullptr; - detail::task* new_end = cur; - - while( cur != nullptr ) { - next = cur->next; - cur->next = prev; - prev = cur; - cur = next; - } - - /** insert the list to the current queue */ - if( !_queue ) { - _queue = prev; - } else { - _queue_end->next = prev; - } - _queue_end = new_end; - } - }; - - namespace detail { - void thread_detail_cancel( thread& t, scheduled_task* stask ) { - t.async( [tptr=&t,stask](){ - for( auto itr = tptr->my->_scheduled.begin(); itr != tptr->my->_scheduled.end(); ++itr ) { - if( itr->get() == stask ) { - tptr->my->_scheduled.erase(itr); - return; - } - } - }); - } - } - - - thread::thread( const std::string& name ) { - boost::fibers::promise prom; - auto stdt = new std::thread( [this,name,&prom]() { - my = new thread_detail( *this ); - my->name = name; - prom.set_value(); - set_thread_name( name.c_str() ); - this->exec(); - //elog( "exit thread" ); - my->exit_promise.set_value(); - }); - prom.get_future().wait(); - my->std_thread = stdt; - } - thread::thread( thread&& mv ) - :my(mv.my){ - mv.my = nullptr; - } - - thread::thread( thread_detail* d ) { - my = new thread_detail(*this); - } - - thread::~thread() { - delete my->std_thread; - delete my; - } - - const string& thread::name()const { - return my->name; - } - - thread& thread::current() { - auto cur = current_thread(); - if( cur ) return *cur; - return *(new thread( (thread_detail*)nullptr )); - } - - void thread::quit() { - if( !my->_done && my->_running ) - async( [&](){ my->_done = true; }, "thread::quit" ).wait(); - } - - bool thread::is_running()const { - return !my->_done; - } - - bool thread::is_current()const { - return this == ¤t(); - } - - void thread::set_name( const string& n ) { - //this->async( [=]() { - my->name = n; - set_thread_name( my->name.c_str() ); - //}).wait(); - } - void thread::exec() { - if( this != ¤t() ) elog( "exec called from wrong thread" ); - else my->exec(); - } - - void thread::async_task( detail::task* t ) { - my->async_task(t); - if( !my->_running /*&& this == ¤t()*/ ) { - my->_running = true; - boost::fibers::async( boost::fibers::launch::post, [this](){ my->exec(); } ); - /* - my->exec(); - - boost::fibers::fiber fib( boost::fibers::launch::post, [&](){ - elog( "STARTING FIBER to call exec()" ); - exec(); - elog( "EXITING FIBER CALLING EXEC" ); - } ); - fib.detach(); - */ - } - } - void thread::schedule( const std::shared_ptr& stask ) { - async( [=]() { - my->_scheduled.insert( stask ); - }); - } - - void thread::join() { - quit(); - if( my->std_thread ) { - my->exit_promise.get_future().wait(); - my->std_thread->join(); - } - } - -} // namespace fc diff --git a/libraries/net/CMakeLists.txt b/libraries/net/CMakeLists.txt deleted file mode 100644 index 30091e004e0d6f2f44a94942862a4c9c87333bef..0000000000000000000000000000000000000000 --- a/libraries/net/CMakeLists.txt +++ /dev/null @@ -1,36 +0,0 @@ -file(GLOB HEADERS "include/eos/net/*.hpp") - -set(SOURCES node.cpp - stcp_socket.cpp - core_messages.cpp - peer_database.cpp - peer_connection.cpp - message_oriented_connection.cpp) - -add_library( eos_net ${SOURCES} ${HEADERS} ) - -target_link_libraries( eos_net - PUBLIC fc chainbase appbase eos_types) - -target_include_directories( eos_net - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" - PRIVATE "${CMAKE_SOURCE_DIR}/libraries/chain/include" -) - -if(MSVC) - set_source_files_properties( node.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) -endif(MSVC) - -if (USE_PCH) - set_target_properties(eos_net PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE) - cotire(eos_net) -endif(USE_PCH) - -install( TARGETS - eos_net - - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) -install( FILES ${HEADERS} DESTINATION "include/eos/net" ) diff --git a/libraries/net/core_messages.cpp b/libraries/net/core_messages.cpp deleted file mode 100644 index c39a3ed461abb2fc6ef6d7212c34ceb5ff79b5b0..0000000000000000000000000000000000000000 --- a/libraries/net/core_messages.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - - -namespace eos { namespace net { - - const core_message_type_enum trx_message::type = core_message_type_enum::trx_message_type; - const core_message_type_enum block_message::type = core_message_type_enum::block_message_type; - const core_message_type_enum item_ids_inventory_message::type = core_message_type_enum::item_ids_inventory_message_type; - const core_message_type_enum blockchain_item_ids_inventory_message::type = core_message_type_enum::blockchain_item_ids_inventory_message_type; - const core_message_type_enum fetch_blockchain_item_ids_message::type = core_message_type_enum::fetch_blockchain_item_ids_message_type; - const core_message_type_enum fetch_items_message::type = core_message_type_enum::fetch_items_message_type; - const core_message_type_enum item_not_available_message::type = core_message_type_enum::item_not_available_message_type; - const core_message_type_enum hello_message::type = core_message_type_enum::hello_message_type; - const core_message_type_enum connection_accepted_message::type = core_message_type_enum::connection_accepted_message_type; - const core_message_type_enum connection_rejected_message::type = core_message_type_enum::connection_rejected_message_type; - const core_message_type_enum address_request_message::type = core_message_type_enum::address_request_message_type; - const core_message_type_enum address_message::type = core_message_type_enum::address_message_type; - const core_message_type_enum closing_connection_message::type = core_message_type_enum::closing_connection_message_type; - const core_message_type_enum current_time_request_message::type = core_message_type_enum::current_time_request_message_type; - const core_message_type_enum current_time_reply_message::type = core_message_type_enum::current_time_reply_message_type; - const core_message_type_enum check_firewall_message::type = core_message_type_enum::check_firewall_message_type; - const core_message_type_enum check_firewall_reply_message::type = core_message_type_enum::check_firewall_reply_message_type; - const core_message_type_enum get_current_connections_request_message::type = core_message_type_enum::get_current_connections_request_message_type; - const core_message_type_enum get_current_connections_reply_message::type = core_message_type_enum::get_current_connections_reply_message_type; - -} } // eos::net - diff --git a/libraries/net/include/eos/net/config.hpp b/libraries/net/include/eos/net/config.hpp deleted file mode 100644 index 2e5f4af4ea8eb15550f71a0c59704bd2ec25ab8b..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/config.hpp +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once - -#define EOS_NET_PROTOCOL_VERSION 106 - -/** - * Define this to enable debugging code in the p2p network interface. - * This is code that would never be executed in normal operation, but is - * used for automated testing (creating artificial net splits, - * tracking where messages came from and when) - */ -#define ENABLE_P2P_DEBUGGING_API 1 - -/** - * 2MiB - */ -#define MAX_MESSAGE_SIZE 1024*1024*2 -#define EOS_NET_DEFAULT_PEER_CONNECTION_RETRY_TIME 30 // seconds - -/** - * AFter trying all peers, how long to wait before we check to - * see if there are peers we can try again. - */ -#define EOS_PEER_DATABASE_RETRY_DELAY 15 // seconds - -#define EOS_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT 5 - -#define EOS_NET_PEER_DISCONNECT_TIMEOUT 20 - -#define EOS_NET_TEST_SEED_IP "104.236.44.210" // autogenerated -#define EOS_NET_TEST_P2P_PORT 1700 -#define EOS_NET_DEFAULT_P2P_PORT 1776 -#define EOS_NET_DEFAULT_DESIRED_CONNECTIONS 20 -#define EOS_NET_DEFAULT_MAX_CONNECTIONS 200 - -#define EOS_NET_MAXIMUM_QUEUED_MESSAGES_IN_BYTES (1024 * 1024) - -/** - * When we receive a message from the network, we advertise it to - * our peers and save a copy in a cache were we will find it if - * a peer requests it. We expire out old items out of the cache - * after this number of blocks go by. - * - * Recently lowered from 30 to match the default expiration time - * the web wallet imposes on transactions. - */ -#define EOS_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS 5 - -/** - * We prevent a peer from offering us a list of blocks which, if we fetched them - * all, would result in a blockchain that extended into the future. - * This parameter gives us some wiggle room, allowing a peer to give us blocks - * that would put our blockchain up to an hour in the future, just in case - * our clock is a bit off. - */ -#define EOS_NET_FUTURE_SYNC_BLOCKS_GRACE_PERIOD_SEC (60 * 60) - -#define EOS_NET_MAX_INVENTORY_SIZE_IN_MINUTES 2 - -#define EOS_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING 200 - -/** - * During normal operation, how many items will be fetched from each - * peer at a time. This will only come into play when the network - * is being flooded -- typically transactions will be fetched as soon - * as we find out about them, so only one item will be requested - * at a time. - * - * No tests have been done to find the optimal value for this - * parameter, so consider increasing or decreasing it if performance - * during flooding is lacking. - */ -#define EOS_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION 1 - -/** - * Instead of fetching all item IDs from a peer, then fetching all blocks - * from a peer, we will interleave them. Fetch at least this many block IDs, - * then switch into block-fetching mode until the number of blocks we know about - * but haven't yet fetched drops below this - */ -#define EOS_NET_MIN_BLOCK_IDS_TO_PREFETCH 10000 - -#define EOS_NET_MAX_TRX_PER_SECOND 1000 diff --git a/libraries/net/include/eos/net/core_messages.hpp b/libraries/net/include/eos/net/core_messages.hpp deleted file mode 100644 index 04eec110a88a3687ecaa185c1a14a263ccc56f7b..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/core_messages.hpp +++ /dev/null @@ -1,525 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include - -namespace eos { namespace net { - using eos::chain::SignedTransaction; - using eos::chain::block_id_type; - using eos::chain::transaction_id_type; - using eos::chain::signed_block; - - typedef fc::ecc::public_key_data node_id_t; - typedef fc::sha256 item_hash_t; - struct item_id - { - uint32_t item_type; - item_hash_t item_hash; - - item_id() {} - item_id(uint32_t type, const item_hash_t& hash) : - item_type(type), - item_hash(hash) - {} - bool operator==(const item_id& other) const - { - return item_type == other.item_type && - item_hash == other.item_hash; - } - }; - - enum core_message_type_enum - { - trx_message_type = 1000, - block_message_type = 1001, - core_message_type_first = 5000, - item_ids_inventory_message_type = 5001, - blockchain_item_ids_inventory_message_type = 5002, - fetch_blockchain_item_ids_message_type = 5003, - fetch_items_message_type = 5004, - item_not_available_message_type = 5005, - hello_message_type = 5006, - connection_accepted_message_type = 5007, - connection_rejected_message_type = 5008, - address_request_message_type = 5009, - address_message_type = 5010, - closing_connection_message_type = 5011, - current_time_request_message_type = 5012, - current_time_reply_message_type = 5013, - check_firewall_message_type = 5014, - check_firewall_reply_message_type = 5015, - get_current_connections_request_message_type = 5016, - get_current_connections_reply_message_type = 5017, - core_message_type_last = 5099 - }; - - const uint32_t core_protocol_version = EOS_NET_PROTOCOL_VERSION; - - struct trx_message - { - static const core_message_type_enum type; - - SignedTransaction trx; - trx_message() {} - trx_message(SignedTransaction transaction) : - trx(std::move(transaction)) - {} - }; - - struct block_message - { - static const core_message_type_enum type; - - block_message(){} - block_message(const signed_block& blk ) - :block(blk),block_id(blk.id()){} - - signed_block block; - block_id_type block_id; - - }; - - struct item_ids_inventory_message - { - static const core_message_type_enum type; - - uint32_t item_type; - std::vector item_hashes_available; - - item_ids_inventory_message() {} - item_ids_inventory_message(uint32_t item_type, const std::vector& item_hashes_available) : - item_type(item_type), - item_hashes_available(item_hashes_available) - {} - }; - - struct blockchain_item_ids_inventory_message - { - static const core_message_type_enum type; - - uint32_t total_remaining_item_count; - uint32_t item_type; - std::vector item_hashes_available; - - blockchain_item_ids_inventory_message() {} - blockchain_item_ids_inventory_message(uint32_t total_remaining_item_count, - uint32_t item_type, - const std::vector& item_hashes_available) : - total_remaining_item_count(total_remaining_item_count), - item_type(item_type), - item_hashes_available(item_hashes_available) - {} - }; - - struct fetch_blockchain_item_ids_message - { - static const core_message_type_enum type; - - uint32_t item_type; - std::vector blockchain_synopsis; - - fetch_blockchain_item_ids_message() {} - fetch_blockchain_item_ids_message(uint32_t item_type, const std::vector& blockchain_synopsis) : - item_type(item_type), - blockchain_synopsis(blockchain_synopsis) - {} - }; - - struct fetch_items_message - { - static const core_message_type_enum type; - - uint32_t item_type; - std::vector items_to_fetch; - - fetch_items_message() {} - fetch_items_message(uint32_t item_type, const std::vector& items_to_fetch) : - item_type(item_type), - items_to_fetch(items_to_fetch) - {} - }; - - struct item_not_available_message - { - static const core_message_type_enum type; - - item_id requested_item; - - item_not_available_message() {} - item_not_available_message(const item_id& requested_item) : - requested_item(requested_item) - {} - }; - - struct hello_message - { - static const core_message_type_enum type; - - std::string user_agent; - uint32_t core_protocol_version; - fc::ip::address inbound_address; - uint16_t inbound_port; - uint16_t outbound_port; - node_id_t node_public_key; - fc::ecc::compact_signature signed_shared_secret; - fc::sha256 chain_id; - fc::variant_object user_data; - - hello_message() {} - hello_message(const std::string& user_agent, - uint32_t core_protocol_version, - const fc::ip::address& inbound_address, - uint16_t inbound_port, - uint16_t outbound_port, - const node_id_t& node_public_key, - const fc::ecc::compact_signature& signed_shared_secret, - const fc::sha256& chain_id_arg, - const fc::variant_object& user_data ) : - user_agent(user_agent), - core_protocol_version(core_protocol_version), - inbound_address(inbound_address), - inbound_port(inbound_port), - outbound_port(outbound_port), - node_public_key(node_public_key), - signed_shared_secret(signed_shared_secret), - chain_id(chain_id_arg), - user_data(user_data) - {} - }; - - struct connection_accepted_message - { - static const core_message_type_enum type; - - connection_accepted_message() {} - }; - - enum class rejection_reason_code { unspecified, - different_chain, - already_connected, - connected_to_self, - not_accepting_connections, - blocked, - invalid_hello_message, - client_too_old }; - - struct connection_rejected_message - { - static const core_message_type_enum type; - - std::string user_agent; - uint32_t core_protocol_version; - fc::ip::endpoint remote_endpoint; - std::string reason_string; - fc::enum_type reason_code; - - connection_rejected_message() {} - connection_rejected_message(const std::string& user_agent, uint32_t core_protocol_version, - const fc::ip::endpoint& remote_endpoint, rejection_reason_code reason_code, - const std::string& reason_string) : - user_agent(user_agent), - core_protocol_version(core_protocol_version), - remote_endpoint(remote_endpoint), - reason_string(reason_string), - reason_code(reason_code) - {} - }; - - struct address_request_message - { - static const core_message_type_enum type; - - address_request_message() {} - }; - - enum class peer_connection_direction { unknown, inbound, outbound }; - enum class firewalled_state { unknown, firewalled, not_firewalled }; - - struct address_info - { - fc::ip::endpoint remote_endpoint; - fc::time_point_sec last_seen_time; - fc::microseconds latency; - node_id_t node_id; - fc::enum_type direction; - fc::enum_type firewalled; - - address_info() {} - address_info(const fc::ip::endpoint& remote_endpoint, - const fc::time_point_sec last_seen_time, - const fc::microseconds latency, - const node_id_t& node_id, - peer_connection_direction direction, - firewalled_state firewalled) : - remote_endpoint(remote_endpoint), - last_seen_time(last_seen_time), - latency(latency), - node_id(node_id), - direction(direction), - firewalled(firewalled) - {} - }; - - struct address_message - { - static const core_message_type_enum type; - - std::vector addresses; - }; - - struct closing_connection_message - { - static const core_message_type_enum type; - - std::string reason_for_closing; - bool closing_due_to_error; - fc::oexception error; - - closing_connection_message() : closing_due_to_error(false) {} - closing_connection_message(const std::string& reason_for_closing, - bool closing_due_to_error = false, - const fc::oexception& error = fc::oexception()) : - reason_for_closing(reason_for_closing), - closing_due_to_error(closing_due_to_error), - error(error) - {} - }; - - struct current_time_request_message - { - static const core_message_type_enum type; - fc::time_point request_sent_time; - - current_time_request_message(){} - current_time_request_message(const fc::time_point request_sent_time) : - request_sent_time(request_sent_time) - {} - }; - - struct current_time_reply_message - { - static const core_message_type_enum type; - fc::time_point request_sent_time; - fc::time_point request_received_time; - fc::time_point reply_transmitted_time; - - current_time_reply_message(){} - current_time_reply_message(const fc::time_point request_sent_time, - const fc::time_point request_received_time, - const fc::time_point reply_transmitted_time = fc::time_point()) : - request_sent_time(request_sent_time), - request_received_time(request_received_time), - reply_transmitted_time(reply_transmitted_time) - {} - }; - - struct check_firewall_message - { - static const core_message_type_enum type; - node_id_t node_id; - fc::ip::endpoint endpoint_to_check; - }; - - enum class firewall_check_result - { - unable_to_check, - unable_to_connect, - connection_successful - }; - - struct check_firewall_reply_message - { - static const core_message_type_enum type; - node_id_t node_id; - fc::ip::endpoint endpoint_checked; - fc::enum_type result; - }; - - struct get_current_connections_request_message - { - static const core_message_type_enum type; - }; - - struct current_connection_data - { - uint32_t connection_duration; // in seconds - fc::ip::endpoint remote_endpoint; - node_id_t node_id; - fc::microseconds clock_offset; - fc::microseconds round_trip_delay; - fc::enum_type connection_direction; - fc::enum_type firewalled; - fc::variant_object user_data; - }; - - struct get_current_connections_reply_message - { - static const core_message_type_enum type; - uint32_t upload_rate_one_minute; - uint32_t download_rate_one_minute; - uint32_t upload_rate_fifteen_minutes; - uint32_t download_rate_fifteen_minutes; - uint32_t upload_rate_one_hour; - uint32_t download_rate_one_hour; - std::vector current_connections; - }; - - -} } // eos::net - -FC_REFLECT_ENUM( eos::net::core_message_type_enum, - (trx_message_type) - (block_message_type) - (core_message_type_first) - (item_ids_inventory_message_type) - (blockchain_item_ids_inventory_message_type) - (fetch_blockchain_item_ids_message_type) - (fetch_items_message_type) - (item_not_available_message_type) - (hello_message_type) - (connection_accepted_message_type) - (connection_rejected_message_type) - (address_request_message_type) - (address_message_type) - (closing_connection_message_type) - (current_time_request_message_type) - (current_time_reply_message_type) - (check_firewall_message_type) - (check_firewall_reply_message_type) - (get_current_connections_request_message_type) - (get_current_connections_reply_message_type) - (core_message_type_last) ) - -FC_REFLECT( eos::net::trx_message, (trx) ) -FC_REFLECT( eos::net::block_message, (block)(block_id) ) - -FC_REFLECT( eos::net::item_id, (item_type) - (item_hash) ) -FC_REFLECT( eos::net::item_ids_inventory_message, (item_type) - (item_hashes_available) ) -FC_REFLECT( eos::net::blockchain_item_ids_inventory_message, (total_remaining_item_count) - (item_type) - (item_hashes_available) ) -FC_REFLECT( eos::net::fetch_blockchain_item_ids_message, (item_type) - (blockchain_synopsis) ) -FC_REFLECT( eos::net::fetch_items_message, (item_type) - (items_to_fetch) ) -FC_REFLECT( eos::net::item_not_available_message, (requested_item) ) -FC_REFLECT( eos::net::hello_message, (user_agent) - (core_protocol_version) - (inbound_address) - (inbound_port) - (outbound_port) - (node_public_key) - (signed_shared_secret) - (chain_id) - (user_data) ) - -FC_REFLECT_EMPTY( eos::net::connection_accepted_message ) -FC_REFLECT_ENUM(eos::net::rejection_reason_code, (unspecified) - (different_chain) - (already_connected) - (connected_to_self) - (not_accepting_connections) - (blocked) - (invalid_hello_message) - (client_too_old)) -FC_REFLECT( eos::net::connection_rejected_message, (user_agent) - (core_protocol_version) - (remote_endpoint) - (reason_code) - (reason_string)) -FC_REFLECT_EMPTY( eos::net::address_request_message ) -FC_REFLECT( eos::net::address_info, (remote_endpoint) - (last_seen_time) - (latency) - (node_id) - (direction) - (firewalled) ) -FC_REFLECT( eos::net::address_message, (addresses) ) -FC_REFLECT( eos::net::closing_connection_message, (reason_for_closing) - (closing_due_to_error) - (error) ) -FC_REFLECT_ENUM(eos::net::peer_connection_direction, (unknown) - (inbound) - (outbound)) -FC_REFLECT_ENUM(eos::net::firewalled_state, (unknown) - (firewalled) - (not_firewalled)) - -FC_REFLECT(eos::net::current_time_request_message, (request_sent_time)) -FC_REFLECT(eos::net::current_time_reply_message, (request_sent_time) - (request_received_time) - (reply_transmitted_time)) -FC_REFLECT_ENUM(eos::net::firewall_check_result, (unable_to_check) - (unable_to_connect) - (connection_successful)) -FC_REFLECT(eos::net::check_firewall_message, (node_id)(endpoint_to_check)) -FC_REFLECT(eos::net::check_firewall_reply_message, (node_id)(endpoint_checked)(result)) -FC_REFLECT_EMPTY(eos::net::get_current_connections_request_message) -FC_REFLECT(eos::net::current_connection_data, (connection_duration) - (remote_endpoint) - (node_id) - (clock_offset) - (round_trip_delay) - (connection_direction) - (firewalled) - (user_data)) -FC_REFLECT(eos::net::get_current_connections_reply_message, (upload_rate_one_minute) - (download_rate_one_minute) - (upload_rate_fifteen_minutes) - (download_rate_fifteen_minutes) - (upload_rate_one_hour) - (download_rate_one_hour) - (current_connections)) - -#include -#include -#include -namespace std -{ - template<> - struct hash - { - size_t operator()(const eos::net::item_id& item_to_hash) const - { - return fc::city_hash_size_t((char*)&item_to_hash, sizeof(item_to_hash)); - } - }; -} diff --git a/libraries/net/include/eos/net/exceptions.hpp b/libraries/net/include/eos/net/exceptions.hpp deleted file mode 100644 index 0e0c3a4f4a58be7ed1d1924279467f2ad52cf882..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/exceptions.hpp +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include - -namespace eos { namespace net { - // registered in node.cpp - - FC_DECLARE_EXCEPTION( net_exception, 90000, "P2P Networking Exception" ); - FC_DECLARE_DERIVED_EXCEPTION( send_queue_overflow, eos::net::net_exception, 90001, "send queue for this peer exceeded maximum size" ); - FC_DECLARE_DERIVED_EXCEPTION( insufficient_relay_fee, eos::net::net_exception, 90002, "insufficient relay fee" ); - FC_DECLARE_DERIVED_EXCEPTION( already_connected_to_requested_peer, eos::net::net_exception, 90003, "already connected to requested peer" ); - FC_DECLARE_DERIVED_EXCEPTION( block_older_than_undo_history, eos::net::net_exception, 90004, "block is older than our undo history allows us to process" ); - FC_DECLARE_DERIVED_EXCEPTION( peer_is_on_an_unreachable_fork, eos::net::net_exception, 90005, "peer is on another fork" ); - FC_DECLARE_DERIVED_EXCEPTION( unlinkable_block_exception, eos::net::net_exception, 90006, "unlinkable block" ) - -} } diff --git a/libraries/net/include/eos/net/message.hpp b/libraries/net/include/eos/net/message.hpp deleted file mode 100644 index 0aa64af4dd7ddf1f324c97468e39f4d0b5cbfe8f..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/message.hpp +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include -#include -#include -#include -#include -#include -#include - -namespace eos { namespace net { - - /** - * Defines an 8 byte header that is always present because the minimum encrypted packet - * size is 8 bytes (blowfish). The maximum message size is defined in config.hpp. The channel, - * and message type is also included because almost every channel will have a message type - * field and we might as well include it in the 8 byte header to save space. - */ - struct message_header - { - uint32_t size; // number of bytes in message, capped at MAX_MESSAGE_SIZE - uint32_t msg_type; // every channel gets a 16 bit message type specifier - }; - - typedef fc::sha256 message_hash_type; - - /** - * Abstracts the process of packing/unpacking a message for a - * particular channel. - */ - struct message : public message_header - { - std::vector data; - - message(){} - - message( message&& m ) - :message_header(m),data( std::move(m.data) ){} - - message( const message& m ) - :message_header(m),data( m.data ){} - - /** - * Assumes that T::type specifies the message type - */ - template - message( const T& m ) - { - msg_type = T::type; - data = fc::raw::pack(m); - size = (uint32_t)data.size(); - } - - fc::sha256 id()const - { - return fc::sha256::hash( data.data(), (uint32_t)data.size() ); - } - - /** - * Automatically checks the type and deserializes T in the - * opposite process from the constructor. - */ - template - T as()const - { - try { - FC_ASSERT( msg_type == T::type ); - T tmp; - if( data.size() ) - { - fc::datastream ds( data.data(), data.size() ); - fc::raw::unpack( ds, tmp ); - } - else - { - // just to make sure that tmp shouldn't have any data - fc::datastream ds( nullptr, 0 ); - fc::raw::unpack( ds, tmp ); - } - return tmp; - } FC_RETHROW_EXCEPTIONS( warn, - "error unpacking network message as a '${type}' ${x} !=? ${msg_type}", - ("type", fc::get_typename::name() ) - ("x", T::type) - ("msg_type", msg_type) - ); - } - }; - - - - -} } // eos::net - -FC_REFLECT( eos::net::message_header, (size)(msg_type) ) -FC_REFLECT_DERIVED( eos::net::message, (eos::net::message_header), (data) ) diff --git a/libraries/net/include/eos/net/message_oriented_connection.hpp b/libraries/net/include/eos/net/message_oriented_connection.hpp deleted file mode 100644 index 619f6bd3f365ded30dd03618a1880563ed009387..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/message_oriented_connection.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include -#include - -namespace eos { namespace net { - - namespace detail { class message_oriented_connection_impl; } - - class message_oriented_connection; - - /** receives incoming messages from a message_oriented_connection object */ - class message_oriented_connection_delegate - { - public: - virtual void on_message(message_oriented_connection* originating_connection, const message& received_message) = 0; - virtual void on_connection_closed(message_oriented_connection* originating_connection) = 0; - }; - - /** uses a secure socket to create a connection that reads and writes a stream of `fc::net::message` objects */ - class message_oriented_connection - { - public: - message_oriented_connection(message_oriented_connection_delegate* delegate = nullptr); - ~message_oriented_connection(); - fc::tcp_socket& get_socket(); - - void accept(); - void bind(const fc::ip::endpoint& local_endpoint); - void connect_to(const fc::ip::endpoint& remote_endpoint); - - void send_message(const message& message_to_send); - void close_connection(); - void destroy_connection(); - - uint64_t get_total_bytes_sent() const; - uint64_t get_total_bytes_received() const; - fc::time_point get_last_message_sent_time() const; - fc::time_point get_last_message_received_time() const; - fc::time_point get_connection_time() const; - fc::sha512 get_shared_secret() const; - private: - std::unique_ptr my; - }; - typedef std::shared_ptr message_oriented_connection_ptr; - -} } // eos::net diff --git a/libraries/net/include/eos/net/node.hpp b/libraries/net/include/eos/net/node.hpp deleted file mode 100644 index 908a64cac456cbaf2b8375d3294467daf000ab64..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/node.hpp +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once - -#include -#include -#include - -#include - -#include - -namespace eos { namespace net { - - using fc::variant_object; - using eos::chain::chain_id_type; - - namespace detail - { - class node_impl; - struct node_impl_deleter - { - void operator()(node_impl*); - }; - } - - // during network development, we need to track message propagation across the network - // using a structure like this: - struct message_propagation_data - { - fc::time_point received_time; - fc::time_point validated_time; - node_id_t originating_peer; - }; - - /** - * @class node_delegate - * @brief used by node reports status to client or fetch data from client - */ - class node_delegate - { - public: - virtual ~node_delegate(){} - - /** - * If delegate has the item, the network has no need to fetch it. - */ - virtual bool has_item( const net::item_id& id ) = 0; - - /** - * @brief Called when a new block comes in from the network - * - * @param sync_mode true if the message was fetched through the sync process, false during normal operation - * @returns true if this message caused the blockchain to switch forks, false if it did not - * - * @throws exception if error validating the item, otherwise the item is - * safe to broadcast on. - */ - virtual bool handle_block( const eos::net::block_message& blk_msg, bool sync_mode, - std::vector& contained_transaction_message_ids ) = 0; - - /** - * @brief Called when a new transaction comes in from the network - * - * @throws exception if error validating the item, otherwise the item is - * safe to broadcast on. - */ - virtual void handle_transaction( const eos::net::trx_message& trx_msg ) = 0; - - /** - * @brief Called when a new message comes in from the network other than a - * block or a transaction. Currently there are no other possible - * messages, so this should never be called. - * - * @throws exception if error validating the item, otherwise the item is - * safe to broadcast on. - */ - virtual void handle_message( const message& message_to_process ) = 0; - - /** - * Assuming all data elements are ordered in some way, this method should - * return up to limit ids that occur *after* from_id. - * On return, remaining_item_count will be set to the number of items - * in our blockchain after the last item returned in the result, - * or 0 if the result contains the last item in the blockchain - */ - virtual std::vector get_block_ids(const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, - uint32_t limit = 2000) = 0; - - /** - * Given the hash of the requested data, fetch the body. - */ - virtual message get_item( const item_id& id ) = 0; - - virtual chain_id_type get_chain_id()const = 0; - - /** - * Returns a synopsis of the blockchain used for syncing. - * This consists of a list of selected item hashes from our current preferred - * blockchain, exponentially falling off into the past. Horrible explanation. - * - * If the blockchain is empty, it will return the empty list. - * If the blockchain has one block, it will return a list containing just that block. - * If it contains more than one block: - * the first element in the list will be the hash of the highest numbered block that - * we cannot undo - * the second element will be the hash of an item at the half way point in the undoable - * segment of the blockchain - * the third will be ~3/4 of the way through the undoable segment of the block chain - * the fourth will be at ~7/8... - * &c. - * the last item in the list will be the hash of the most recent block on our preferred chain - */ - virtual std::vector get_blockchain_synopsis(const item_hash_t& reference_point, - uint32_t number_of_blocks_after_reference_point) = 0; - - /** - * Call this after the call to handle_message succeeds. - * - * @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call - * @param item_count the number of items known to the node that haven't been sent to handle_item() yet. - * After `item_count` more calls to handle_item(), the node will be in sync - */ - virtual void sync_status( uint32_t item_type, uint32_t item_count ) = 0; - - /** - * Call any time the number of connected peers changes. - */ - virtual void connection_count_changed( uint32_t c ) = 0; - - virtual uint32_t get_block_number(const item_hash_t& block_id) = 0; - - /** - * Returns the time a block was produced (if block_id = 0, returns genesis time). - * If we don't know about the block, returns time_point_sec::min() - */ - virtual fc::time_point_sec get_block_time(const item_hash_t& block_id) = 0; - - virtual item_hash_t get_head_block_id() const = 0; - - virtual uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const = 0; - - virtual void error_encountered(const std::string& message, const fc::oexception& error) = 0; - virtual uint8_t get_current_block_interval_in_seconds() const = 0; - - }; - - /** - * Information about connected peers that the client may want to make - * available to the user. - */ - struct peer_status - { - uint32_t version; - fc::ip::endpoint host; - /** info contains the fields required by bitcoin-rpc's getpeerinfo call, we will likely - extend it with our own fields. */ - fc::variant_object info; - }; - - /** - * @class node - * @brief provides application independent P2P broadcast and data synchronization - * - * Unanswered questions: - * when does the node start establishing network connections and accepting peers? - * we don't have enough info to start synchronizing until sync_from() is called, - * would we have any reason to connect before that? - */ - class node : public std::enable_shared_from_this - { - public: - node(const std::string& user_agent); - ~node(); - - void close(); - - void set_node_delegate( node_delegate* del ); - - void load_configuration( const fc::path& configuration_directory ); - - virtual void listen_to_p2p_network(); - virtual void connect_to_p2p_network(); - - /** - * Add endpoint to internal level_map database of potential nodes - * to attempt to connect to. This database is consulted any time - * the number connected peers falls below the target. - */ - void add_node( const fc::ip::endpoint& ep ); - - /** - * Attempt to connect to the specified endpoint immediately. - */ - virtual void connect_to_endpoint( const fc::ip::endpoint& ep ); - - /** - * Specifies the network interface and port upon which incoming - * connections should be accepted. - */ - void listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); - - /** - * Call with true to enable listening for incoming connections - */ - void accept_incoming_connections(bool accept); - - /** - * Specifies the port upon which incoming connections should be accepted. - * @param port the port to listen on - * @param wait_if_not_available if true and the port is not available, enter a - * sleep and retry loop to wait for it to become - * available. If false and the port is not available, - * just choose a random available port - */ - void listen_on_port(uint16_t port, bool wait_if_not_available); - - /** - * Returns the endpoint the node is listening on. This is usually the same - * as the value previously passed in to listen_on_endpoint, unless we - * were unable to bind to that port. - */ - virtual fc::ip::endpoint get_actual_listening_endpoint() const; - - /** - * @return a list of peers that are currently connected. - */ - std::vector get_connected_peers() const; - - /** return the number of peers we're actively connected to */ - virtual uint32_t get_connection_count() const; - - /** - * Add message to outgoing inventory list, notify peers that - * I have a message ready. - */ - virtual void broadcast( const message& item_to_broadcast ); - virtual void broadcast_transaction( const SignedTransaction& trx ) - { - broadcast( trx_message(trx) ); - } - - /** - * Node starts the process of fetching all items after item_id of the - * given item_type. During this process messages are not broadcast. - */ - virtual void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers); - - bool is_connected() const; - - void set_advanced_node_parameters(const fc::variant_object& params); - fc::variant_object get_advanced_node_parameters(); - message_propagation_data get_transaction_propagation_data(const eos::chain::transaction_id_type& transaction_id); - message_propagation_data get_block_propagation_data(const eos::chain::block_id_type& block_id); - node_id_t get_node_id() const; - void set_allowed_peers(const std::vector& allowed_peers); - - /** - * Instructs the node to forget everything in its peer database, mostly for debugging - * problems where nodes are failing to connect to the network - */ - void clear_peer_database(); - - void set_total_bandwidth_limit(uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second); - - fc::variant_object network_get_info() const; - fc::variant_object network_get_usage_stats() const; - - std::vector get_potential_peers() const; - - void disable_peer_advertising(); - fc::variant_object get_call_statistics() const; - private: - std::unique_ptr my; - }; - - class simulated_network : public node - { - public: - ~simulated_network(); - simulated_network(const std::string& user_agent) : node(user_agent) {} - void listen_to_p2p_network() override {} - void connect_to_p2p_network() override {} - void connect_to_endpoint(const fc::ip::endpoint& ep) override {} - - fc::ip::endpoint get_actual_listening_endpoint() const override { return fc::ip::endpoint(); } - - void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) override {} - void broadcast(const message& item_to_broadcast) override; - void add_node_delegate(node_delegate* node_delegate_to_add); - - virtual uint32_t get_connection_count() const override { return 8; } - private: - struct node_info; - void message_sender(node_info* destination_node); - std::list network_nodes; - }; - - - typedef std::shared_ptr node_ptr; - typedef std::shared_ptr simulated_network_ptr; - -} } // eos::net - -FC_REFLECT(eos::net::message_propagation_data, (received_time)(validated_time)(originating_peer)); -FC_REFLECT( eos::net::peer_status, (version)(host)(info) ); diff --git a/libraries/net/include/eos/net/peer_connection.hpp b/libraries/net/include/eos/net/peer_connection.hpp deleted file mode 100644 index 512636e56e70eb44478093e5f5998646b14fc55d..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/peer_connection.hpp +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once - -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace eos { namespace net - { - struct firewall_check_state_data - { - node_id_t expected_node_id; - fc::ip::endpoint endpoint_to_test; - - // if we're coordinating a firewall check for another node, these are the helper - // nodes we've already had do the test (if this structure is still relevant, that - // that means they have all had indeterminate results - std::set nodes_already_tested; - - // If we're a just a helper node, this is the node we report back to - // when we have a result - node_id_t requesting_peer; - }; - - class peer_connection; - class peer_connection_delegate - { - public: - virtual void on_message(peer_connection* originating_peer, - const message& received_message) = 0; - virtual void on_connection_closed(peer_connection* originating_peer) = 0; - virtual message get_message_for_item(const item_id& item) = 0; - }; - - class peer_connection; - typedef std::shared_ptr peer_connection_ptr; - class peer_connection : public message_oriented_connection_delegate, - public std::enable_shared_from_this - { - public: - enum class our_connection_state - { - disconnected, - just_connected, // if in this state, we have sent a hello_message - connection_accepted, // remote side has sent us a connection_accepted, we're operating normally with them - connection_rejected // remote side has sent us a connection_rejected, we may be exchanging address with them or may just be waiting for them to close - }; - enum class their_connection_state - { - disconnected, - just_connected, // we have not yet received a hello_message - connection_accepted, // we have sent them a connection_accepted - connection_rejected // we have sent them a connection_rejected - }; - enum class connection_negotiation_status - { - disconnected, - connecting, - connected, - accepting, - accepted, - hello_sent, - peer_connection_accepted, - peer_connection_rejected, - negotiation_complete, - closing, - closed - }; - private: - peer_connection_delegate* _node; - fc::optional _remote_endpoint; - message_oriented_connection _message_connection; - - /* a base class for messages on the queue, to hide the fact that some - * messages are complete messages and some are only hashes of messages. - */ - struct queued_message - { - fc::time_point enqueue_time; - fc::time_point transmission_start_time; - fc::time_point transmission_finish_time; - - queued_message(fc::time_point enqueue_time = fc::time_point::now()) : - enqueue_time(enqueue_time) - {} - - virtual message get_message(peer_connection_delegate* node) = 0; - /** returns roughly the number of bytes of memory the message is consuming while - * it is sitting on the queue - */ - virtual size_t get_size_in_queue() = 0; - virtual ~queued_message() {} - }; - - /* when you queue up a 'real_queued_message', a full copy of the message is - * stored on the heap until it is sent - */ - struct real_queued_message : queued_message - { - message message_to_send; - size_t message_send_time_field_offset; - - real_queued_message(message message_to_send, - size_t message_send_time_field_offset = (size_t)-1) : - message_to_send(std::move(message_to_send)), - message_send_time_field_offset(message_send_time_field_offset) - {} - - message get_message(peer_connection_delegate* node) override; - size_t get_size_in_queue() override; - }; - - /* when you queue up a 'virtual_queued_message', we just queue up the hash of the - * item we want to send. When it reaches the top of the queue, we make a callback - * to the node to generate the message. - */ - struct virtual_queued_message : queued_message - { - item_id item_to_send; - - virtual_queued_message(item_id item_to_send) : - item_to_send(std::move(item_to_send)) - {} - - message get_message(peer_connection_delegate* node) override; - size_t get_size_in_queue() override; - }; - - - size_t _total_queued_messages_size; - std::queue, std::list > > _queued_messages; - fc::future _send_queued_messages_done; - public: - fc::time_point connection_initiation_time; - fc::time_point connection_closed_time; - fc::time_point connection_terminated_time; - peer_connection_direction direction; - //connection_state state; - firewalled_state is_firewalled; - fc::microseconds clock_offset; - fc::microseconds round_trip_delay; - - our_connection_state our_state; - bool they_have_requested_close; - their_connection_state their_state; - bool we_have_requested_close; - - connection_negotiation_status negotiation_status; - fc::oexception connection_closed_error; - - fc::time_point get_connection_time()const { return _message_connection.get_connection_time(); } - fc::time_point get_connection_terminated_time()const { return connection_terminated_time; } - - /// data about the peer node - /// @{ - /** node_public_key from the hello message, zero-initialized before we get the hello */ - node_id_t node_public_key; - /** the unique identifier we'll use to refer to the node with. zero-initialized before - * we receive the hello message, at which time it will be filled with either the "node_id" - * from the user_data field of the hello, or if none is present it will be filled with a - * copy of node_public_key */ - node_id_t node_id; - uint32_t core_protocol_version; - std::string user_agent; - fc::optional eos_git_revision_sha; - fc::optional eos_git_revision_unix_timestamp; - fc::optional fc_git_revision_sha; - fc::optional fc_git_revision_unix_timestamp; - fc::optional platform; - fc::optional bitness; - - // for inbound connections, these fields record what the peer sent us in - // its hello message. For outbound, they record what we sent the peer - // in our hello message - fc::ip::address inbound_address; - uint16_t inbound_port; - uint16_t outbound_port; - /// @} - - typedef std::unordered_map item_to_time_map_type; - - /// blockchain synchronization state data - /// @{ - boost::container::deque ids_of_items_to_get; /// id of items in the blockchain that this peer has told us about - std::set ids_of_items_being_processed; /// list of all items this peer has offered use that we've already handed to the client but the client hasn't finished processing - uint32_t number_of_unfetched_item_ids; /// number of items in the blockchain that follow ids_of_items_to_get but the peer hasn't yet told us their ids - bool peer_needs_sync_items_from_us; - bool we_need_sync_items_from_peer; - fc::optional, fc::time_point> > item_ids_requested_from_peer; /// we check this to detect a timed-out request and in busy() - item_to_time_map_type sync_items_requested_from_peer; /// ids of blocks we've requested from this peer during sync. fetch from another peer if this peer disconnects - item_hash_t last_block_delegate_has_seen; /// the hash of the last block this peer has told us about that the peer knows - fc::time_point_sec last_block_time_delegate_has_seen; - bool inhibit_fetching_sync_blocks; - /// @} - - /// non-synchronization state data - /// @{ - struct timestamped_item_id - { - item_id item; - fc::time_point_sec timestamp; - timestamped_item_id(const item_id& item, const fc::time_point_sec timestamp) : - item(item), - timestamp(timestamp) - {} - }; - struct timestamp_index{}; - typedef boost::multi_index_container, - std::hash >, - boost::multi_index::ordered_non_unique, - boost::multi_index::member > > > timestamped_items_set_type; - timestamped_items_set_type inventory_peer_advertised_to_us; - timestamped_items_set_type inventory_advertised_to_peer; - - item_to_time_map_type items_requested_from_peer; /// items we've requested from this peer during normal operation. fetch from another peer if this peer disconnects - /// @} - - // if they're flooding us with transactions, we set this to avoid fetching for a few seconds to let the - // blockchain catch up - fc::time_point transaction_fetching_inhibited_until; - - uint32_t last_known_fork_block_number; - - fc::future accept_or_connect_task_done; - - firewall_check_state_data *firewall_check_state; -#ifndef NDEBUG - private: - fc::thread* _thread; - unsigned _send_message_queue_tasks_running; // temporary debugging -#endif - private: - peer_connection(peer_connection_delegate* delegate); - void destroy(); - public: - static peer_connection_ptr make_shared(peer_connection_delegate* delegate); // use this instead of the constructor - virtual ~peer_connection(); - - fc::tcp_socket& get_socket(); - void accept_connection(); - void connect_to(const fc::ip::endpoint& remote_endpoint, fc::optional local_endpoint = fc::optional()); - - void on_message(message_oriented_connection* originating_connection, const message& received_message) override; - void on_connection_closed(message_oriented_connection* originating_connection) override; - - void send_queueable_message(std::unique_ptr&& message_to_send); - void send_message(const message& message_to_send, size_t message_send_time_field_offset = (size_t)-1); - void send_item(const item_id& item_to_send); - void close_connection(); - void destroy_connection(); - - uint64_t get_total_bytes_sent() const; - uint64_t get_total_bytes_received() const; - - fc::time_point get_last_message_sent_time() const; - fc::time_point get_last_message_received_time() const; - - fc::optional get_remote_endpoint(); - fc::ip::endpoint get_local_endpoint(); - void set_remote_endpoint(fc::optional new_remote_endpoint); - - bool busy(); - bool idle(); - - bool is_transaction_fetching_inhibited() const; - fc::sha512 get_shared_secret() const; - void clear_old_inventory(); - bool is_inventory_advertised_to_us_list_full_for_transactions() const; - bool is_inventory_advertised_to_us_list_full() const; - bool performing_firewall_check() const; - fc::optional get_endpoint_for_connecting() const; - private: - void send_queued_messages_task(); - void accept_connection_task(); - void connect_to_task(const fc::ip::endpoint& remote_endpoint); - }; - typedef std::shared_ptr peer_connection_ptr; - - } } // end namespace eos::net - -// not sent over the wire, just reflected for logging -FC_REFLECT_ENUM(eos::net::peer_connection::our_connection_state, (disconnected) - (just_connected) - (connection_accepted) - (connection_rejected)) -FC_REFLECT_ENUM(eos::net::peer_connection::their_connection_state, (disconnected) - (just_connected) - (connection_accepted) - (connection_rejected)) -FC_REFLECT_ENUM(eos::net::peer_connection::connection_negotiation_status, (disconnected) - (connecting) - (connected) - (accepting) - (accepted) - (hello_sent) - (peer_connection_accepted) - (peer_connection_rejected) - (negotiation_complete) - (closing) - (closed) ) - -FC_REFLECT( eos::net::peer_connection::timestamped_item_id, (item)(timestamp)); diff --git a/libraries/net/include/eos/net/peer_database.hpp b/libraries/net/include/eos/net/peer_database.hpp deleted file mode 100644 index 0733aeb1290072fc8e10ea5e64778248ed76410b..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/peer_database.hpp +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include - -#include -#include -#include -#include -#include -#include -#include - -namespace eos { namespace net { - - enum potential_peer_last_connection_disposition - { - never_attempted_to_connect, - last_connection_failed, - last_connection_rejected, - last_connection_handshaking_failed, - last_connection_succeeded - }; - - struct potential_peer_record - { - fc::ip::endpoint endpoint; - fc::time_point_sec last_seen_time; - fc::enum_type last_connection_disposition; - fc::time_point_sec last_connection_attempt_time; - uint32_t number_of_successful_connection_attempts; - uint32_t number_of_failed_connection_attempts; - fc::optional last_error; - - potential_peer_record() : - number_of_successful_connection_attempts(0), - number_of_failed_connection_attempts(0){} - - potential_peer_record(fc::ip::endpoint endpoint, - fc::time_point_sec last_seen_time = fc::time_point_sec(), - potential_peer_last_connection_disposition last_connection_disposition = never_attempted_to_connect) : - endpoint(endpoint), - last_seen_time(last_seen_time), - last_connection_disposition(last_connection_disposition), - number_of_successful_connection_attempts(0), - number_of_failed_connection_attempts(0) - {} - }; - - namespace detail - { - class peer_database_impl; - - class peer_database_iterator_impl; - class peer_database_iterator : public boost::iterator_facade - { - public: - peer_database_iterator(); - ~peer_database_iterator(); - explicit peer_database_iterator(peer_database_iterator_impl* impl); - peer_database_iterator( const peer_database_iterator& c ); - - private: - friend class boost::iterator_core_access; - void increment(); - bool equal(const peer_database_iterator& other) const; - const potential_peer_record& dereference() const; - private: - std::unique_ptr my; - }; - } - - - class peer_database - { - public: - peer_database(); - ~peer_database(); - - void open(const fc::path& databaseFilename); - void close(); - void clear(); - - void erase(const fc::ip::endpoint& endpointToErase); - - void update_entry(const potential_peer_record& updatedRecord); - potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); - fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); - - typedef detail::peer_database_iterator iterator; - iterator begin() const; - iterator end() const; - size_t size() const; - private: - std::unique_ptr my; - }; - -} } // end namespace eos::net - -FC_REFLECT_ENUM(eos::net::potential_peer_last_connection_disposition, (never_attempted_to_connect)(last_connection_failed)(last_connection_rejected)(last_connection_handshaking_failed)(last_connection_succeeded)) -FC_REFLECT(eos::net::potential_peer_record, (endpoint)(last_seen_time)(last_connection_disposition)(last_connection_attempt_time)(number_of_successful_connection_attempts)(number_of_failed_connection_attempts)(last_error) ) diff --git a/libraries/net/include/eos/net/stcp_socket.hpp b/libraries/net/include/eos/net/stcp_socket.hpp deleted file mode 100644 index 63f8b613bbd94f240462b0d76860a994b2d33d23..0000000000000000000000000000000000000000 --- a/libraries/net/include/eos/net/stcp_socket.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include -#include -#include - -namespace eos { namespace net { - -/** - * Uses ECDH to negotiate a aes key for communicating - * with other nodes on the network. - */ -class stcp_socket : public virtual fc::iostream -{ - public: - stcp_socket(); - ~stcp_socket(); - fc::tcp_socket& get_socket() { return _sock; } - void accept(); - - void connect_to( const fc::ip::endpoint& remote_endpoint ); - void bind( const fc::ip::endpoint& local_endpoint ); - - virtual size_t readsome( char* buffer, size_t max ); - virtual size_t readsome( const std::shared_ptr& buf, size_t len, size_t offset ); - virtual bool eof()const; - - virtual size_t writesome( const char* buffer, size_t len ); - virtual size_t writesome( const std::shared_ptr& buf, size_t len, size_t offset ); - - virtual void flush(); - virtual void close(); - - using istream::get; - void get( char& c ) { read( &c, 1 ); } - fc::sha512 get_shared_secret() const { return _shared_secret; } - private: - void do_key_exchange(); - - fc::sha512 _shared_secret; - fc::ecc::private_key _priv_key; - fc::array _buf; - //uint32_t _buf_len; - fc::tcp_socket _sock; - fc::aes_encoder _send_aes; - fc::aes_decoder _recv_aes; - std::shared_ptr _read_buffer; - std::shared_ptr _write_buffer; -#ifndef NDEBUG - bool _read_buffer_in_use; - bool _write_buffer_in_use; -#endif -}; - -typedef std::shared_ptr stcp_socket_ptr; - -} } // eos::net diff --git a/libraries/net/message_oriented_connection.cpp b/libraries/net/message_oriented_connection.cpp deleted file mode 100644 index 62e696a39fbab3779e779d94f262f28f902faf15..0000000000000000000000000000000000000000 --- a/libraries/net/message_oriented_connection.cpp +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef DEFAULT_LOGGER -# undef DEFAULT_LOGGER -#endif -#define DEFAULT_LOGGER "p2p" - -#ifndef NDEBUG -# define VERIFY_CORRECT_THREAD() assert(_thread->is_current()) -#else -# define VERIFY_CORRECT_THREAD() do {} while (0) -#endif - -namespace eos { namespace net { - namespace detail - { - class message_oriented_connection_impl - { - private: - message_oriented_connection* _self; - message_oriented_connection_delegate *_delegate; - stcp_socket _sock; - fc::future _read_loop_done; - uint64_t _bytes_received; - uint64_t _bytes_sent; - - fc::time_point _connected_time; - fc::time_point _last_message_received_time; - fc::time_point _last_message_sent_time; - - bool _send_message_in_progress; - -#ifndef NDEBUG - fc::thread* _thread; -#endif - - void read_loop(); - void start_read_loop(); - public: - fc::tcp_socket& get_socket(); - void accept(); - void connect_to(const fc::ip::endpoint& remote_endpoint); - void bind(const fc::ip::endpoint& local_endpoint); - - message_oriented_connection_impl(message_oriented_connection* self, - message_oriented_connection_delegate* delegate = nullptr); - ~message_oriented_connection_impl(); - - void send_message(const message& message_to_send); - void close_connection(); - void destroy_connection(); - - uint64_t get_total_bytes_sent() const; - uint64_t get_total_bytes_received() const; - - fc::time_point get_last_message_sent_time() const; - fc::time_point get_last_message_received_time() const; - fc::time_point get_connection_time() const { return _connected_time; } - fc::sha512 get_shared_secret() const; - }; - - message_oriented_connection_impl::message_oriented_connection_impl(message_oriented_connection* self, - message_oriented_connection_delegate* delegate) - : _self(self), - _delegate(delegate), - _bytes_received(0), - _bytes_sent(0), - _send_message_in_progress(false) -#ifndef NDEBUG - ,_thread(&fc::thread::current()) -#endif - { - } - message_oriented_connection_impl::~message_oriented_connection_impl() - { - VERIFY_CORRECT_THREAD(); - destroy_connection(); - } - - fc::tcp_socket& message_oriented_connection_impl::get_socket() - { - VERIFY_CORRECT_THREAD(); - return _sock.get_socket(); - } - - void message_oriented_connection_impl::accept() - { - VERIFY_CORRECT_THREAD(); - _sock.accept(); - assert(!_read_loop_done.valid()); // check to be sure we never launch two read loops - _read_loop_done = fc::async([=](){ read_loop(); }, "message read_loop"); - } - - void message_oriented_connection_impl::connect_to(const fc::ip::endpoint& remote_endpoint) - { - VERIFY_CORRECT_THREAD(); - _sock.connect_to(remote_endpoint); - assert(!_read_loop_done.valid()); // check to be sure we never launch two read loops - _read_loop_done = fc::async([=](){ read_loop(); }, "message read_loop"); - } - - void message_oriented_connection_impl::bind(const fc::ip::endpoint& local_endpoint) - { - VERIFY_CORRECT_THREAD(); - _sock.bind(local_endpoint); - } - - - void message_oriented_connection_impl::read_loop() - { - VERIFY_CORRECT_THREAD(); - const int BUFFER_SIZE = 16; - const int LEFTOVER = BUFFER_SIZE - sizeof(message_header); - static_assert(BUFFER_SIZE >= sizeof(message_header), "insufficient buffer"); - - _connected_time = fc::time_point::now(); - - fc::oexception exception_to_rethrow; - bool call_on_connection_closed = false; - - try - { - message m; - while( true ) - { - char buffer[BUFFER_SIZE]; - _sock.read(buffer, BUFFER_SIZE); - _bytes_received += BUFFER_SIZE; - memcpy((char*)&m, buffer, sizeof(message_header)); - - FC_ASSERT( m.size <= MAX_MESSAGE_SIZE, "", ("m.size",m.size)("MAX_MESSAGE_SIZE",MAX_MESSAGE_SIZE) ); - - size_t remaining_bytes_with_padding = 16 * ((m.size - LEFTOVER + 15) / 16); - m.data.resize(LEFTOVER + remaining_bytes_with_padding); //give extra 16 bytes to allow for padding added in send call - std::copy(buffer + sizeof(message_header), buffer + sizeof(buffer), m.data.begin()); - if (remaining_bytes_with_padding) - { - _sock.read(&m.data[LEFTOVER], remaining_bytes_with_padding); - _bytes_received += remaining_bytes_with_padding; - } - m.data.resize(m.size); // truncate off the padding bytes - - _last_message_received_time = fc::time_point::now(); - - try - { - // message handling errors are warnings... - _delegate->on_message(_self, m); - } - /// Dedicated catches needed to distinguish from general fc::exception - catch ( const fc::canceled_exception& e ) { throw e; } - catch ( const fc::eof_exception& e ) { throw e; } - catch ( const fc::exception& e) - { - /// Here loop should be continued so exception should be just caught locally. - wlog( "message transmission failed ${er}", ("er", e.to_detail_string() ) ); - throw; - } - } - } - catch ( const fc::canceled_exception& e ) - { - wlog( "caught a canceled_exception in read_loop. this should mean we're in the process of deleting this object already, so there's no need to notify the delegate: ${e}", ("e", e.to_detail_string() ) ); - throw; - } - catch ( const fc::eof_exception& e ) - { - wlog( "disconnected ${e}", ("e", e.to_detail_string() ) ); - call_on_connection_closed = true; - } - catch ( const fc::exception& e ) - { - elog( "disconnected ${er}", ("er", e.to_detail_string() ) ); - call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", e.to_detail_string()))); - } - catch ( const std::exception& e ) - { - elog( "disconnected ${er}", ("er", e.what() ) ); - call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", e.what()))); - } - catch ( ... ) - { - elog( "unexpected exception" ); - call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", fc::except_str()))); - } - - if (call_on_connection_closed) - _delegate->on_connection_closed(_self); - - if (exception_to_rethrow) - throw *exception_to_rethrow; - } - - void message_oriented_connection_impl::send_message(const message& message_to_send) - { - VERIFY_CORRECT_THREAD(); -#if 0 // this gets too verbose -#ifndef NDEBUG - fc::optional remote_endpoint; - if (_sock.get_socket().is_open()) - remote_endpoint = _sock.get_socket().remote_endpoint(); - struct scope_logger { - const fc::optional& endpoint; - scope_logger(const fc::optional& endpoint) : endpoint(endpoint) { dlog("entering message_oriented_connection::send_message() for peer ${endpoint}", ("endpoint", endpoint)); } - ~scope_logger() { dlog("leaving message_oriented_connection::send_message() for peer ${endpoint}", ("endpoint", endpoint)); } - } send_message_scope_logger(remote_endpoint); -#endif -#endif - struct verify_no_send_in_progress { - bool& var; - verify_no_send_in_progress(bool& var) : var(var) - { - if (var) - elog("Error: two tasks are calling message_oriented_connection::send_message() at the same time"); - assert(!var); - var = true; - } - ~verify_no_send_in_progress() { var = false; } - } _verify_no_send_in_progress(_send_message_in_progress); - - try - { - size_t size_of_message_and_header = sizeof(message_header) + message_to_send.size; - if( message_to_send.size > MAX_MESSAGE_SIZE ) - elog("Trying to send a message larger than MAX_MESSAGE_SIZE. This probably won't work..."); - //pad the message we send to a multiple of 16 bytes - size_t size_with_padding = 16 * ((size_of_message_and_header + 15) / 16); - std::unique_ptr padded_message(new char[size_with_padding]); - memcpy(padded_message.get(), (char*)&message_to_send, sizeof(message_header)); - memcpy(padded_message.get() + sizeof(message_header), message_to_send.data.data(), message_to_send.size ); - _sock.write(padded_message.get(), size_with_padding); - _sock.flush(); - _bytes_sent += size_with_padding; - _last_message_sent_time = fc::time_point::now(); - } FC_RETHROW_EXCEPTIONS( warn, "unable to send message" ); - } - - void message_oriented_connection_impl::close_connection() - { - VERIFY_CORRECT_THREAD(); - _sock.close(); - } - - void message_oriented_connection_impl::destroy_connection() - { - VERIFY_CORRECT_THREAD(); - - fc::optional remote_endpoint; - if (_sock.get_socket().is_open()) - remote_endpoint = _sock.get_socket().remote_endpoint(); - ilog( "in destroy_connection() for ${endpoint}", ("endpoint", remote_endpoint) ); - - if (_send_message_in_progress) - elog("Error: message_oriented_connection is being destroyed while a send_message is in progress. " - "The task calling send_message() should have been canceled already"); - assert(!_send_message_in_progress); - - try - { - _read_loop_done.cancel_and_wait(__FUNCTION__); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while canceling message_oriented_connection's read_loop, ignoring: ${e}", ("e",e) ); - } - catch (...) - { - wlog( "Exception thrown while canceling message_oriented_connection's read_loop, ignoring" ); - } - } - - uint64_t message_oriented_connection_impl::get_total_bytes_sent() const - { - VERIFY_CORRECT_THREAD(); - return _bytes_sent; - } - - uint64_t message_oriented_connection_impl::get_total_bytes_received() const - { - VERIFY_CORRECT_THREAD(); - return _bytes_received; - } - - fc::time_point message_oriented_connection_impl::get_last_message_sent_time() const - { - VERIFY_CORRECT_THREAD(); - return _last_message_sent_time; - } - - fc::time_point message_oriented_connection_impl::get_last_message_received_time() const - { - VERIFY_CORRECT_THREAD(); - return _last_message_received_time; - } - - fc::sha512 message_oriented_connection_impl::get_shared_secret() const - { - VERIFY_CORRECT_THREAD(); - return _sock.get_shared_secret(); - } - - } // end namespace eos::net::detail - - - message_oriented_connection::message_oriented_connection(message_oriented_connection_delegate* delegate) : - my(new detail::message_oriented_connection_impl(this, delegate)) - { - } - - message_oriented_connection::~message_oriented_connection() - { - } - - fc::tcp_socket& message_oriented_connection::get_socket() - { - return my->get_socket(); - } - - void message_oriented_connection::accept() - { - my->accept(); - } - - void message_oriented_connection::connect_to(const fc::ip::endpoint& remote_endpoint) - { - my->connect_to(remote_endpoint); - } - - void message_oriented_connection::bind(const fc::ip::endpoint& local_endpoint) - { - my->bind(local_endpoint); - } - - void message_oriented_connection::send_message(const message& message_to_send) - { - my->send_message(message_to_send); - } - - void message_oriented_connection::close_connection() - { - my->close_connection(); - } - - void message_oriented_connection::destroy_connection() - { - my->destroy_connection(); - } - - uint64_t message_oriented_connection::get_total_bytes_sent() const - { - return my->get_total_bytes_sent(); - } - - uint64_t message_oriented_connection::get_total_bytes_received() const - { - return my->get_total_bytes_received(); - } - - fc::time_point message_oriented_connection::get_last_message_sent_time() const - { - return my->get_last_message_sent_time(); - } - - fc::time_point message_oriented_connection::get_last_message_received_time() const - { - return my->get_last_message_received_time(); - } - fc::time_point message_oriented_connection::get_connection_time() const - { - return my->get_connection_time(); - } - fc::sha512 message_oriented_connection::get_shared_secret() const - { - return my->get_shared_secret(); - } - -} } // end namespace eos::net diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp deleted file mode 100644 index d8475422d4f9d5e081e8717690402cba990bbe7e..0000000000000000000000000000000000000000 --- a/libraries/net/node.cpp +++ /dev/null @@ -1,5507 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include - -//#define ENABLE_DEBUG_ULOGS - -#ifdef DEFAULT_LOGGER -# undef DEFAULT_LOGGER -#endif -#define DEFAULT_LOGGER "p2p" - -#define P2P_IN_DEDICATED_THREAD 1 - -#define INVOCATION_COUNTER(name) \ - static unsigned total_ ## name ## _counter = 0; \ - static unsigned active_ ## name ## _counter = 0; \ - struct name ## _invocation_logger { \ - unsigned *total; \ - unsigned *active; \ - name ## _invocation_logger(unsigned *total, unsigned *active) : \ - total(total), active(active) \ - { \ - ++*total; \ - ++*active; \ - dlog("NEWDEBUG: Entering " #name ", now ${total} total calls, ${active} active calls", ("total", *total)("active", *active)); \ - } \ - ~name ## _invocation_logger() \ - { \ - --*active; \ - dlog("NEWDEBUG: Leaving " #name ", now ${total} total calls, ${active} active calls", ("total", *total)("active", *active)); \ - } \ - } invocation_logger(&total_ ## name ## _counter, &active_ ## name ## _counter) - -//log these messages even at warn level when operating on the test network -#ifdef EOS_TEST_NETWORK -#define testnetlog wlog -#else -#define testnetlog(...) do {} while (0) -#endif - -namespace eos { namespace net { - - namespace detail - { - namespace bmi = boost::multi_index; - class blockchain_tied_message_cache - { - private: - static const uint32_t cache_duration_in_blocks = EOS_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS; - - struct message_hash_index{}; - struct message_contents_hash_index{}; - struct block_clock_index{}; - struct message_info - { - message_hash_type message_hash; - message message_body; - uint32_t block_clock_when_received; - - // for network performance stats - message_propagation_data propagation_data; - fc::uint256 message_contents_hash; // hash of whatever the message contains (if it's a transaction, this is the transaction id, if it's a block, it's the block_id) - - message_info( const message_hash_type& message_hash, - const message& message_body, - uint32_t block_clock_when_received, - const message_propagation_data& propagation_data, - fc::uint256 message_contents_hash ) : - message_hash( message_hash ), - message_body( message_body ), - block_clock_when_received( block_clock_when_received ), - propagation_data( propagation_data ), - message_contents_hash( message_contents_hash ) - {} - }; - typedef boost::multi_index_container - < message_info, - bmi::indexed_by< bmi::ordered_unique< bmi::tag, - bmi::member >, - bmi::ordered_non_unique< bmi::tag, - bmi::member >, - bmi::ordered_non_unique< bmi::tag, - bmi::member > > - > message_cache_container; - - message_cache_container _message_cache; - - uint32_t block_clock; - - public: - blockchain_tied_message_cache() : - block_clock( 0 ) - {} - void block_accepted(); - void cache_message(const message& message_to_cache, const message_hash_type& hash_of_message_to_cache, - const message_propagation_data& propagation_data, const fc::uint256& message_content_hash); - message get_message( const message_hash_type& hash_of_message_to_lookup ); - message_propagation_data get_message_propagation_data( const fc::sha256& hash_of_message_contents_to_lookup ) const; - size_t size() const { return _message_cache.size(); } - }; - - void blockchain_tied_message_cache::block_accepted() - { - ++block_clock; - if( block_clock > cache_duration_in_blocks ) - _message_cache.get().erase(_message_cache.get().begin(), - _message_cache.get().lower_bound(block_clock - cache_duration_in_blocks ) ); - } - - void blockchain_tied_message_cache::cache_message(const message& message_to_cache, - const message_hash_type& hash_of_message_to_cache, - const message_propagation_data& propagation_data, - const fc::uint256& message_content_hash) - { - _message_cache.insert(message_info(hash_of_message_to_cache, - message_to_cache, - block_clock, - propagation_data, - message_content_hash)); - } - - message blockchain_tied_message_cache::get_message( const message_hash_type& hash_of_message_to_lookup ) - { - message_cache_container::index::type::const_iterator iter = - _message_cache.get().find(hash_of_message_to_lookup ); - if( iter != _message_cache.get().end() ) - return iter->message_body; - FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" ); - } - - message_propagation_data blockchain_tied_message_cache::get_message_propagation_data( const fc::sha256& hash_of_message_contents_to_lookup ) const - { - if( hash_of_message_contents_to_lookup != fc::sha256() ) - { - message_cache_container::index::type::const_iterator iter = - _message_cache.get().find(hash_of_message_contents_to_lookup ); - if( iter != _message_cache.get().end() ) - return iter->propagation_data; - } - FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" ); - } - -///////////////////////////////////////////////////////////////////////////////////////////////////////// - - // This specifies configuration info for the local node. It's stored as JSON - // in the configuration directory (application data directory) - struct node_configuration - { - node_configuration() : accept_incoming_connections(true), wait_if_endpoint_is_busy(true) {} - - fc::ip::endpoint listen_endpoint; - bool accept_incoming_connections; - bool wait_if_endpoint_is_busy; - /** - * Originally, our p2p code just had a 'node-id' that was a random number identifying this node - * on the network. This is now a private key/public key pair, where the public key is used - * in place of the old random node-id. The private part is unused, but might be used in - * the future to support some notion of trusted peers. - */ - fc::ecc::private_key private_key; - }; - - -} } } // end namespace eos::net::detail -FC_REFLECT(eos::net::detail::node_configuration, (listen_endpoint) - (accept_incoming_connections) - (wait_if_endpoint_is_busy) - (private_key)); - -namespace eos { namespace net { namespace detail { - - // when requesting items from peers, we want to prioritize any blocks before - // transactions, but otherwise request items in the order we heard about them - struct prioritized_item_id - { - item_id item; - unsigned sequence_number; - fc::time_point timestamp; // the time we last heard about this item in an inventory message - - prioritized_item_id(const item_id& item, unsigned sequence_number) : - item(item), - sequence_number(sequence_number), - timestamp(fc::time_point::now()) - {} - bool operator<(const prioritized_item_id& rhs) const - { - static_assert(eos::net::block_message_type > eos::net::trx_message_type, - "block_message_type must be greater than trx_message_type for prioritized_item_ids to sort correctly"); - if (item.item_type != rhs.item.item_type) - return item.item_type > rhs.item.item_type; - return (signed)(rhs.sequence_number - sequence_number) > 0; - } - }; - -///////////////////////////////////////////////////////////////////////////////////////////////////////// - class statistics_gathering_node_delegate_wrapper : public node_delegate - { - private: - node_delegate *_node_delegate; - fc::thread *_thread; - - typedef boost::accumulators::accumulator_set > call_stats_accumulator; -#define NODE_DELEGATE_METHOD_NAMES (has_item) \ - (handle_message) \ - (handle_block) \ - (handle_transaction) \ - (get_block_ids) \ - (get_item) \ - (get_chain_id) \ - (get_blockchain_synopsis) \ - (sync_status) \ - (connection_count_changed) \ - (get_block_number) \ - (get_block_time) \ - (get_head_block_id) \ - (estimate_last_known_fork_from_git_revision_timestamp) \ - (error_encountered) \ - (get_current_block_interval_in_seconds) - - -#define DECLARE_ACCUMULATOR(r, data, method_name) \ - mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator)); \ - mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator)); \ - mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator)); - BOOST_PP_SEQ_FOR_EACH(DECLARE_ACCUMULATOR, unused, NODE_DELEGATE_METHOD_NAMES) -#undef DECLARE_ACCUMULATOR - - class call_statistics_collector - { - private: - fc::time_point _call_requested_time; - fc::time_point _begin_execution_time; - fc::time_point _execution_completed_time; - const char* _method_name; - call_stats_accumulator* _execution_accumulator; - call_stats_accumulator* _delay_before_accumulator; - call_stats_accumulator* _delay_after_accumulator; - public: - class actual_execution_measurement_helper - { - call_statistics_collector &_collector; - public: - actual_execution_measurement_helper(call_statistics_collector& collector) : - _collector(collector) - { - _collector.starting_execution(); - } - ~actual_execution_measurement_helper() - { - _collector.execution_completed(); - } - }; - call_statistics_collector(const char* method_name, - call_stats_accumulator* execution_accumulator, - call_stats_accumulator* delay_before_accumulator, - call_stats_accumulator* delay_after_accumulator) : - _call_requested_time(fc::time_point::now()), - _method_name(method_name), - _execution_accumulator(execution_accumulator), - _delay_before_accumulator(delay_before_accumulator), - _delay_after_accumulator(delay_after_accumulator) - {} - ~call_statistics_collector() - { - fc::time_point end_time(fc::time_point::now()); - fc::microseconds actual_execution_time(_execution_completed_time - _begin_execution_time); - fc::microseconds delay_before(_begin_execution_time - _call_requested_time); - fc::microseconds delay_after(end_time - _execution_completed_time); - fc::microseconds total_duration(actual_execution_time + delay_before + delay_after); - (*_execution_accumulator)(actual_execution_time.count()); - (*_delay_before_accumulator)(delay_before.count()); - (*_delay_after_accumulator)(delay_after.count()); - if (total_duration > fc::milliseconds(500)) - { - ilog("Call to method node_delegate::${method} took ${total_duration}us, longer than our target maximum of 500ms", - ("method", _method_name) - ("total_duration", total_duration.count())); - ilog("Actual execution took ${execution_duration}us, with a ${delegate_delay}us delay before the delegate thread started " - "executing the method, and a ${p2p_delay}us delay after it finished before the p2p thread started processing the response", - ("execution_duration", actual_execution_time) - ("delegate_delay", delay_before) - ("p2p_delay", delay_after)); - } - } - void starting_execution() - { - _begin_execution_time = fc::time_point::now(); - } - void execution_completed() - { - _execution_completed_time = fc::time_point::now(); - } - }; - public: - statistics_gathering_node_delegate_wrapper(node_delegate* delegate, fc::thread* thread_for_delegate_calls); - - fc::variant_object get_call_statistics(); - - bool has_item( const net::item_id& id ) override; - void handle_message( const message& ) override; - bool handle_block( const eos::net::block_message& block_message, bool sync_mode, std::vector& contained_transaction_message_ids ) override; - void handle_transaction( const eos::net::trx_message& transaction_message ) override; - std::vector get_block_ids(const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, - uint32_t limit = 2000) override; - message get_item( const item_id& id ) override; - chain_id_type get_chain_id() const override; - std::vector get_blockchain_synopsis(const item_hash_t& reference_point, - uint32_t number_of_blocks_after_reference_point) override; - void sync_status( uint32_t item_type, uint32_t item_count ) override; - void connection_count_changed( uint32_t c ) override; - uint32_t get_block_number(const item_hash_t& block_id) override; - fc::time_point_sec get_block_time(const item_hash_t& block_id) override; - item_hash_t get_head_block_id() const override; - uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override; - void error_encountered(const std::string& message, const fc::oexception& error) override; - uint8_t get_current_block_interval_in_seconds() const override; - }; - -///////////////////////////////////////////////////////////////////////////////////////////////////////// - - class node_impl : public peer_connection_delegate - { - public: -#ifdef P2P_IN_DEDICATED_THREAD - std::shared_ptr _thread; -#endif // P2P_IN_DEDICATED_THREAD - std::unique_ptr _delegate; - fc::sha256 _chain_id; - -#define NODE_CONFIGURATION_FILENAME "node_config.json" -#define POTENTIAL_PEER_DATABASE_FILENAME "peers.json" - fc::path _node_configuration_directory; - node_configuration _node_configuration; - - /// stores the endpoint we're listening on. This will be the same as - // _node_configuration.listen_endpoint, unless that endpoint was already - // in use. - fc::ip::endpoint _actual_listening_endpoint; - - /// we determine whether we're firewalled by asking other nodes. Store the result here: - firewalled_state _is_firewalled; - /// if we're behind NAT, our listening endpoint address will appear different to the rest of the world. store it here. - fc::optional _publicly_visible_listening_endpoint; - fc::time_point _last_firewall_check_message_sent; - - /// used by the task that manages connecting to peers - // @{ - std::list _add_once_node_list; /// list of peers we want to connect to as soon as possible - - peer_database _potential_peer_db; - fc::promise::ptr _retrigger_connect_loop_promise; - bool _potential_peer_database_updated; - fc::future _p2p_network_connect_loop_done; - // @} - - /// used by the task that fetches sync items during synchronization - // @{ - fc::promise::ptr _retrigger_fetch_sync_items_loop_promise; - bool _sync_items_to_fetch_updated; - fc::future _fetch_sync_items_loop_done; - - typedef std::unordered_map active_sync_requests_map; - - active_sync_requests_map _active_sync_requests; /// list of sync blocks we've asked for from peers but have not yet received - std::list _new_received_sync_items; /// list of sync blocks we've just received but haven't yet tried to process - std::list _received_sync_items; /// list of sync blocks we've received, but can't yet process because we are still missing blocks that come earlier in the chain - // @} - - fc::future _process_backlog_of_sync_blocks_done; - bool _suspend_fetching_sync_blocks; - - /// used by the task that fetches items during normal operation - // @{ - fc::promise::ptr _retrigger_fetch_item_loop_promise; - bool _items_to_fetch_updated; - fc::future _fetch_item_loop_done; - - struct item_id_index{}; - typedef boost::multi_index_container >, - boost::multi_index::hashed_unique, - boost::multi_index::member, - std::hash > > - > items_to_fetch_set_type; - unsigned _items_to_fetch_sequence_counter; - items_to_fetch_set_type _items_to_fetch; /// list of items we know another peer has and we want - peer_connection::timestamped_items_set_type _recently_failed_items; /// list of transactions we've recently pushed and had rejected by the delegate - // @} - - /// used by the task that advertises inventory during normal operation - // @{ - fc::promise::ptr _retrigger_advertise_inventory_loop_promise; - fc::future _advertise_inventory_loop_done; - std::unordered_set _new_inventory; /// list of items we have received but not yet advertised to our peers - // @} - - fc::future _terminate_inactive_connections_loop_done; - uint8_t _recent_block_interval_in_seconds; // a cached copy of the block interval, to avoid a thread hop to the blockchain to get the current value - - std::string _user_agent_string; - /** _node_public_key is a key automatically generated when the client is first run, stored in - * node_config.json. It doesn't really have much of a purpose yet, there was just some thought - * that we might someday have a use for nodes having a private key (sent in hello messages) - */ - node_id_t _node_public_key; - /** - * _node_id is a random number generated each time the client is launched, used to prevent us - * from connecting to the same client multiple times (sent in hello messages). - * Since this was introduced after the hello_message was finalized, this is sent in the - * user_data field. - * While this shares the same underlying type as a public key, it is really just a random - * number. - */ - node_id_t _node_id; - - /** if we have less than `_desired_number_of_connections`, we will try to connect with more nodes */ - uint32_t _desired_number_of_connections; - /** if we have _maximum_number_of_connections or more, we will refuse any inbound connections */ - uint32_t _maximum_number_of_connections; - /** retry connections to peers that have failed or rejected us this often, in seconds */ - uint32_t _peer_connection_retry_timeout; - /** how many seconds of inactivity are permitted before disconnecting a peer */ - uint32_t _peer_inactivity_timeout; - - fc::tcp_server _tcp_server; - fc::future _accept_loop_complete; - - /** Stores all connections which have not yet finished key exchange or are still sending initial handshaking messages - * back and forth (not yet ready to initiate syncing) */ - std::unordered_set _handshaking_connections; - /** stores fully established connections we're either syncing with or in normal operation with */ - std::unordered_set _active_connections; - /** stores connections we've closed (sent closing message, not actually closed), but are still waiting for the remote end to close before we delete them */ - std::unordered_set _closing_connections; - /** stores connections we've closed, but are still waiting for the OS to notify us that the socket is really closed */ - std::unordered_set _terminating_connections; - - boost::circular_buffer _most_recent_blocks_accepted; // the /n/ most recent blocks we've accepted (currently tuned to the max number of connections) - - uint32_t _sync_item_type; - uint32_t _total_number_of_unfetched_items; /// the number of items we still need to fetch while syncing - std::vector _hard_fork_block_numbers; /// list of all block numbers where there are hard forks - - blockchain_tied_message_cache _message_cache; /// cache message we have received and might be required to provide to other peers via inventory requests - - fc::rate_limiting_group _rate_limiter; - - uint32_t _last_reported_number_of_connections; // number of connections last reported to the client (to avoid sending duplicate messages) - - bool _peer_advertising_disabled; - - fc::future _fetch_updated_peer_lists_loop_done; - - boost::circular_buffer _average_network_read_speed_seconds; - boost::circular_buffer _average_network_write_speed_seconds; - boost::circular_buffer _average_network_read_speed_minutes; - boost::circular_buffer _average_network_write_speed_minutes; - boost::circular_buffer _average_network_read_speed_hours; - boost::circular_buffer _average_network_write_speed_hours; - unsigned _average_network_usage_second_counter; - unsigned _average_network_usage_minute_counter; - - fc::time_point_sec _bandwidth_monitor_last_update_time; - fc::future _bandwidth_monitor_loop_done; - - fc::future _dump_node_status_task_done; - - /* We have two alternate paths through the schedule_peer_for_deletion code -- one that - * uses a mutex to prevent one fiber from adding items to the queue while another is deleting - * items from it, and one that doesn't. The one that doesn't is simpler and more efficient - * code, but we're keeping around the version that uses the mutex because it crashes, and - * this crash probably indicates a bug in our underlying threading code that needs - * fixing. To produce the bug, define USE_PEERS_TO_DELETE_MUTEX and then connect up - * to the network and set your desired/max connection counts high - */ -//#define USE_PEERS_TO_DELETE_MUTEX 1 -#ifdef USE_PEERS_TO_DELETE_MUTEX - fc::mutex _peers_to_delete_mutex; -#endif - std::list _peers_to_delete; - fc::future _delayed_peer_deletion_task_done; - -#ifdef ENABLE_P2P_DEBUGGING_API - std::set _allowed_peers; -#endif // ENABLE_P2P_DEBUGGING_API - - bool _node_is_shutting_down; // set to true when we begin our destructor, used to prevent us from starting new tasks while we're shutting down - - unsigned _maximum_number_of_blocks_to_handle_at_one_time; - unsigned _maximum_number_of_sync_blocks_to_prefetch; - unsigned _maximum_blocks_per_peer_during_syncing; - - std::list > _handle_message_calls_in_progress; - - node_impl(const std::string& user_agent); - virtual ~node_impl(); - - void save_node_configuration(); - - void p2p_network_connect_loop(); - void trigger_p2p_network_connect_loop(); - - bool have_already_received_sync_item( const item_hash_t& item_hash ); - void request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request ); - void request_sync_items_from_peer( const peer_connection_ptr& peer, const std::vector& items_to_request ); - void fetch_sync_items_loop(); - void trigger_fetch_sync_items_loop(); - - bool is_item_in_any_peers_inventory(const item_id& item) const; - void fetch_items_loop(); - void trigger_fetch_items_loop(); - - void advertise_inventory_loop(); - void trigger_advertise_inventory_loop(); - - void terminate_inactive_connections_loop(); - - void fetch_updated_peer_lists_loop(); - void update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second); - void bandwidth_monitor_loop(); - void dump_node_status_task(); - - bool is_accepting_new_connections(); - bool is_wanting_new_connections(); - uint32_t get_number_of_connections(); - peer_connection_ptr get_peer_by_node_id(const node_id_t& id); - - bool is_already_connected_to_id(const node_id_t& node_id); - bool merge_address_info_with_potential_peer_database( const std::vector addresses ); - void display_current_connections(); - uint32_t calculate_unsynced_block_count_from_all_peers(); - std::vector create_blockchain_synopsis_for_peer( const peer_connection* peer ); - void fetch_next_batch_of_item_ids_from_peer( peer_connection* peer, bool reset_fork_tracking_data_for_peer = false ); - - fc::variant_object generate_hello_user_data(); - void parse_hello_user_data_for_peer( peer_connection* originating_peer, const fc::variant_object& user_data ); - - void on_message( peer_connection* originating_peer, - const message& received_message ) override; - - void on_hello_message( peer_connection* originating_peer, - const hello_message& hello_message_received ); - - void on_connection_accepted_message( peer_connection* originating_peer, - const connection_accepted_message& connection_accepted_message_received ); - - void on_connection_rejected_message( peer_connection* originating_peer, - const connection_rejected_message& connection_rejected_message_received ); - - void on_address_request_message( peer_connection* originating_peer, - const address_request_message& address_request_message_received ); - - void on_address_message( peer_connection* originating_peer, - const address_message& address_message_received ); - - void on_fetch_blockchain_item_ids_message( peer_connection* originating_peer, - const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received ); - - void on_blockchain_item_ids_inventory_message( peer_connection* originating_peer, - const blockchain_item_ids_inventory_message& blockchain_item_ids_inventory_message_received ); - - void on_fetch_items_message( peer_connection* originating_peer, - const fetch_items_message& fetch_items_message_received ); - - void on_item_not_available_message( peer_connection* originating_peer, - const item_not_available_message& item_not_available_message_received ); - - void on_item_ids_inventory_message( peer_connection* originating_peer, - const item_ids_inventory_message& item_ids_inventory_message_received ); - - void on_closing_connection_message( peer_connection* originating_peer, - const closing_connection_message& closing_connection_message_received ); - - void on_current_time_request_message( peer_connection* originating_peer, - const current_time_request_message& current_time_request_message_received ); - - void on_current_time_reply_message( peer_connection* originating_peer, - const current_time_reply_message& current_time_reply_message_received ); - - void forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state); - - void on_check_firewall_message(peer_connection* originating_peer, - const check_firewall_message& check_firewall_message_received); - - void on_check_firewall_reply_message(peer_connection* originating_peer, - const check_firewall_reply_message& check_firewall_reply_message_received); - - void on_get_current_connections_request_message(peer_connection* originating_peer, - const get_current_connections_request_message& get_current_connections_request_message_received); - - void on_get_current_connections_reply_message(peer_connection* originating_peer, - const get_current_connections_reply_message& get_current_connections_reply_message_received); - - void on_connection_closed(peer_connection* originating_peer) override; - - void send_sync_block_to_node_delegate(const eos::net::block_message& block_message_to_send); - void process_backlog_of_sync_blocks(); - void trigger_process_backlog_of_sync_blocks(); - void process_block_during_sync(peer_connection* originating_peer, const eos::net::block_message& block_message, const message_hash_type& message_hash); - void process_block_during_normal_operation(peer_connection* originating_peer, const eos::net::block_message& block_message, const message_hash_type& message_hash); - void process_block_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash); - - void process_ordinary_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash); - - void start_synchronizing(); - void start_synchronizing_with_peer(const peer_connection_ptr& peer); - - void new_peer_just_added(const peer_connection_ptr& peer); /// called after a peer finishes handshaking, kicks off syncing - - void close(); - - void accept_connection_task(peer_connection_ptr new_peer); - void accept_loop(); - void send_hello_message(const peer_connection_ptr& peer); - void connect_to_task(peer_connection_ptr new_peer, const fc::ip::endpoint& remote_endpoint); - bool is_connection_to_endpoint_in_progress(const fc::ip::endpoint& remote_endpoint); - - void move_peer_to_active_list(const peer_connection_ptr& peer); - void move_peer_to_closing_list(const peer_connection_ptr& peer); - void move_peer_to_terminating_list(const peer_connection_ptr& peer); - - peer_connection_ptr get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); - - void dump_node_status(); - - void delayed_peer_deletion_task(); - void schedule_peer_for_deletion(const peer_connection_ptr& peer_to_delete); - - void disconnect_from_peer( peer_connection* originating_peer, - const std::string& reason_for_disconnect, - bool caused_by_error = false, - const fc::oexception& additional_data = fc::oexception() ); - - // methods implementing node's public interface - void set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls); - void load_configuration( const fc::path& configuration_directory ); - void listen_to_p2p_network(); - void connect_to_p2p_network(); - void add_node( const fc::ip::endpoint& ep ); - void initiate_connect_to(const peer_connection_ptr& peer); - void connect_to_endpoint(const fc::ip::endpoint& ep); - void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); - void accept_incoming_connections(bool accept); - void listen_on_port( uint16_t port, bool wait_if_not_available ); - - fc::ip::endpoint get_actual_listening_endpoint() const; - std::vector get_connected_peers() const; - uint32_t get_connection_count() const; - - void broadcast(const message& item_to_broadcast, const message_propagation_data& propagation_data); - void broadcast(const message& item_to_broadcast); - void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers); - bool is_connected() const; - std::vector get_potential_peers() const; - void set_advanced_node_parameters( const fc::variant_object& params ); - - fc::variant_object get_advanced_node_parameters(); - message_propagation_data get_transaction_propagation_data( const eos::net::transaction_id_type& transaction_id ); - message_propagation_data get_block_propagation_data( const eos::net::block_id_type& block_id ); - - node_id_t get_node_id() const; - void set_allowed_peers( const std::vector& allowed_peers ); - void clear_peer_database(); - void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ); - void disable_peer_advertising(); - fc::variant_object get_call_statistics() const; - message get_message_for_item(const item_id& item) override; - - fc::variant_object network_get_info() const; - fc::variant_object network_get_usage_stats() const; - - bool is_hard_fork_block(uint32_t block_number) const; - uint32_t get_next_known_hard_fork_block_number(uint32_t block_number) const; - }; // end class node_impl - - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - void node_impl_deleter::operator()(node_impl* impl_to_delete) - { -#ifdef P2P_IN_DEDICATED_THREAD - std::weak_ptr weak_thread; - if (impl_to_delete) - { - std::shared_ptr impl_thread(impl_to_delete->_thread); - weak_thread = impl_thread; - impl_thread->async([impl_to_delete](){ delete impl_to_delete; }, "delete node_impl").wait(); - dlog("deleting the p2p thread"); - } - if (weak_thread.expired()) - dlog("done deleting the p2p thread"); - else - dlog("failed to delete the p2p thread, we must be leaking a smart pointer somewhere"); -#else // P2P_IN_DEDICATED_THREAD - delete impl_to_delete; -#endif // P2P_IN_DEDICATED_THREAD - } - -#ifdef P2P_IN_DEDICATED_THREAD -# define VERIFY_CORRECT_THREAD() assert(_thread->is_current()) -#else -# define VERIFY_CORRECT_THREAD() do {} while (0) -#endif - -#define MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME 200 -#define MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH (10 * MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME) - - node_impl::node_impl(const std::string& user_agent) : -#ifdef P2P_IN_DEDICATED_THREAD - _thread(std::make_shared("p2p")), -#endif // P2P_IN_DEDICATED_THREAD - _delegate(nullptr), - _is_firewalled(firewalled_state::unknown), - _potential_peer_database_updated(false), - _sync_items_to_fetch_updated(false), - _suspend_fetching_sync_blocks(false), - _items_to_fetch_updated(false), - _items_to_fetch_sequence_counter(0), - _recent_block_interval_in_seconds(config::BlockIntervalSeconds), - _user_agent_string(user_agent), - _desired_number_of_connections(EOS_NET_DEFAULT_DESIRED_CONNECTIONS), - _maximum_number_of_connections(EOS_NET_DEFAULT_MAX_CONNECTIONS), - _peer_connection_retry_timeout(EOS_NET_DEFAULT_PEER_CONNECTION_RETRY_TIME), - _peer_inactivity_timeout(EOS_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT), - _most_recent_blocks_accepted(_maximum_number_of_connections), - _total_number_of_unfetched_items(0), - _rate_limiter(0, 0), - _last_reported_number_of_connections(0), - _peer_advertising_disabled(false), - _average_network_read_speed_seconds(60), - _average_network_write_speed_seconds(60), - _average_network_read_speed_minutes(60), - _average_network_write_speed_minutes(60), - _average_network_read_speed_hours(72), - _average_network_write_speed_hours(72), - _average_network_usage_second_counter(0), - _average_network_usage_minute_counter(0), - _node_is_shutting_down(false), - _maximum_number_of_blocks_to_handle_at_one_time(MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME), - _maximum_number_of_sync_blocks_to_prefetch(MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH), - _maximum_blocks_per_peer_during_syncing(EOS_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING) - { - _rate_limiter.set_actual_rate_time_constant(fc::seconds(2)); - fc::rand_pseudo_bytes(&_node_id.data[0], (int)_node_id.size()); - } - - node_impl::~node_impl() - { - VERIFY_CORRECT_THREAD(); - ilog( "cleaning up node" ); - _node_is_shutting_down = true; - - for (const peer_connection_ptr& active_peer : _active_connections) - { - fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - } - } - - try - { - ilog( "close" ); - close(); - } - catch ( const fc::exception& e ) - { - wlog( "unexpected exception on close ${e}", ("e", e) ); - } - ilog( "done" ); - } - - void node_impl::save_node_configuration() - { - VERIFY_CORRECT_THREAD(); - if( fc::exists(_node_configuration_directory ) ) - { - fc::path configuration_file_name( _node_configuration_directory / NODE_CONFIGURATION_FILENAME ); - try - { - fc::json::save_to_file( _node_configuration, configuration_file_name ); - } - catch (const fc::canceled_exception&) - { - throw; - } - catch ( const fc::exception& except ) - { - elog( "error writing node configuration to file ${filename}: ${error}", - ( "filename", configuration_file_name )("error", except.to_detail_string() ) ); - } - } - } - - void node_impl::p2p_network_connect_loop() - { - VERIFY_CORRECT_THREAD(); - while (!_p2p_network_connect_loop_done.canceled()) - { - try - { - dlog("Starting an iteration of p2p_network_connect_loop()."); - display_current_connections(); - - // add-once peers bypass our checks on the maximum/desired number of connections (but they will still be counted against the totals once they're connected) - if (!_add_once_node_list.empty()) - { - std::list add_once_node_list; - add_once_node_list.swap(_add_once_node_list); - dlog("Processing \"add once\" node list containing ${count} peers:", ("count", add_once_node_list.size())); - for (const potential_peer_record& add_once_peer : add_once_node_list) - { - dlog(" ${peer}", ("peer", add_once_peer.endpoint)); - } - for (const potential_peer_record& add_once_peer : add_once_node_list) - { - // see if we have an existing connection to that peer. If we do, disconnect them and - // then try to connect the next time through the loop - peer_connection_ptr existing_connection_ptr = get_connection_to_endpoint( add_once_peer.endpoint ); - if(!existing_connection_ptr) - connect_to_endpoint(add_once_peer.endpoint); - } - dlog("Done processing \"add once\" node list"); - } - - while (is_wanting_new_connections()) - { - bool initiated_connection_this_pass = false; - _potential_peer_database_updated = false; - - for (peer_database::iterator iter = _potential_peer_db.begin(); - iter != _potential_peer_db.end() && is_wanting_new_connections(); - ++iter) - { - fc::microseconds delay_until_retry = fc::seconds((iter->number_of_failed_connection_attempts + 1) * _peer_connection_retry_timeout); - - if (!is_connection_to_endpoint_in_progress(iter->endpoint) && - ((iter->last_connection_disposition != last_connection_failed && - iter->last_connection_disposition != last_connection_rejected && - iter->last_connection_disposition != last_connection_handshaking_failed) || - (fc::time_point::now() - iter->last_connection_attempt_time) > delay_until_retry)) - { - connect_to_endpoint(iter->endpoint); - initiated_connection_this_pass = true; - } - } - - if (!initiated_connection_this_pass && !_potential_peer_database_updated) - break; - } - - display_current_connections(); - - // if we broke out of the while loop, that means either we have connected to enough nodes, or - // we don't have any good candidates to connect to right now. -#if 0 - try - { - _retrigger_connect_loop_promise = fc::promise::ptr( new fc::promise("eos::net::retrigger_connect_loop") ); - if( is_wanting_new_connections() || !_add_once_node_list.empty() ) - { - if( is_wanting_new_connections() ) - dlog( "Still want to connect to more nodes, but I don't have any good candidates. Trying again in 15 seconds" ); - else - dlog( "I still have some \"add once\" nodes to connect to. Trying again in 15 seconds" ); - _retrigger_connect_loop_promise->wait_until( fc::time_point::now() + fc::seconds(EOS_PEER_DATABASE_RETRY_DELAY ) ); - } - else - { - dlog( "I don't need any more connections, waiting forever until something changes" ); - _retrigger_connect_loop_promise->wait(); - } - } - catch ( fc::timeout_exception& ) //intentionally not logged - { - } // catch -#else - fc::usleep(fc::seconds(10)); -#endif - } - catch (const fc::canceled_exception&) - { - throw; - } - FC_CAPTURE_AND_LOG( () ) - }// while(!canceled) - } - - void node_impl::trigger_p2p_network_connect_loop() - { - VERIFY_CORRECT_THREAD(); - dlog( "Triggering connect loop now" ); - _potential_peer_database_updated = true; - //if( _retrigger_connect_loop_promise ) - // _retrigger_connect_loop_promise->set_value(); - } - - bool node_impl::have_already_received_sync_item( const item_hash_t& item_hash ) - { - VERIFY_CORRECT_THREAD(); - return std::find_if(_received_sync_items.begin(), _received_sync_items.end(), - [&item_hash]( const eos::net::block_message& message ) { return message.block_id == item_hash; } ) != _received_sync_items.end() || - std::find_if(_new_received_sync_items.begin(), _new_received_sync_items.end(), - [&item_hash]( const eos::net::block_message& message ) { return message.block_id == item_hash; } ) != _new_received_sync_items.end(); ; - } - - void node_impl::request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request ) - { - VERIFY_CORRECT_THREAD(); - dlog( "requesting item ${item_hash} from peer ${endpoint}", ("item_hash", item_to_request )("endpoint", peer->get_remote_endpoint() ) ); - item_id item_id_to_request( eos::net::block_message_type, item_to_request ); - _active_sync_requests.insert( active_sync_requests_map::value_type(item_to_request, fc::time_point::now() ) ); - peer->sync_items_requested_from_peer.insert( peer_connection::item_to_time_map_type::value_type(item_id_to_request, fc::time_point::now() ) ); - std::vector items_to_fetch; - peer->send_message( fetch_items_message(item_id_to_request.item_type, std::vector{item_id_to_request.item_hash} ) ); - } - - void node_impl::request_sync_items_from_peer( const peer_connection_ptr& peer, const std::vector& items_to_request ) - { - VERIFY_CORRECT_THREAD(); - dlog( "requesting ${item_count} item(s) ${items_to_request} from peer ${endpoint}", - ("item_count", items_to_request.size())("items_to_request", items_to_request)("endpoint", peer->get_remote_endpoint()) ); - for (const item_hash_t& item_to_request : items_to_request) - { - _active_sync_requests.insert( active_sync_requests_map::value_type(item_to_request, fc::time_point::now() ) ); - item_id item_id_to_request( eos::net::block_message_type, item_to_request ); - peer->sync_items_requested_from_peer.insert( peer_connection::item_to_time_map_type::value_type(item_id_to_request, fc::time_point::now() ) ); - } - peer->send_message(fetch_items_message(eos::net::block_message_type, items_to_request)); - } - - void node_impl::fetch_sync_items_loop() - { - VERIFY_CORRECT_THREAD(); - while( !_fetch_sync_items_loop_done.canceled() ) - { - _sync_items_to_fetch_updated = false; - dlog( "beginning another iteration of the sync items loop" ); - - if (!_suspend_fetching_sync_blocks) - { - std::map > sync_item_requests_to_send; - - { - ASSERT_TASK_NOT_PREEMPTED(); - std::set sync_items_to_request; - - // for each idle peer that we're syncing with - for( const peer_connection_ptr& peer : _active_connections ) - { - if( peer->we_need_sync_items_from_peer && - sync_item_requests_to_send.find(peer) == sync_item_requests_to_send.end() && // if we've already scheduled a request for this peer, don't consider scheduling another - peer->idle() ) - { - if (!peer->inhibit_fetching_sync_blocks) - { - // loop through the items it has that we don't yet have on our blockchain - for( unsigned i = 0; i < peer->ids_of_items_to_get.size(); ++i ) - { - item_hash_t item_to_potentially_request = peer->ids_of_items_to_get[i]; - // if we don't already have this item in our temporary storage and we haven't requested from another syncing peer - if( !have_already_received_sync_item(item_to_potentially_request) && // already got it, but for some reson it's still in our list of items to fetch - sync_items_to_request.find(item_to_potentially_request) == sync_items_to_request.end() && // we have already decided to request it from another peer during this iteration - _active_sync_requests.find(item_to_potentially_request) == _active_sync_requests.end() ) // we've requested it in a previous iteration and we're still waiting for it to arrive - { - // then schedule a request from this peer - sync_item_requests_to_send[peer].push_back(item_to_potentially_request); - sync_items_to_request.insert( item_to_potentially_request ); - if (sync_item_requests_to_send[peer].size() >= _maximum_blocks_per_peer_during_syncing) - break; - } - } - } - } - } - } // end non-preemptable section - - // make all the requests we scheduled in the loop above - for( auto sync_item_request : sync_item_requests_to_send ) - request_sync_items_from_peer( sync_item_request.first, sync_item_request.second ); - sync_item_requests_to_send.clear(); - } - else - dlog("fetch_sync_items_loop is suspended pending backlog processing"); - - if( !_sync_items_to_fetch_updated ) - { - dlog( "no sync items to fetch right now, going to sleep" ); - _retrigger_fetch_sync_items_loop_promise = fc::promise::ptr( new fc::promise("eos::net::retrigger_fetch_sync_items_loop") ); - _retrigger_fetch_sync_items_loop_promise->wait(); - _retrigger_fetch_sync_items_loop_promise.reset(); - } - } // while( !canceled ) - } - - void node_impl::trigger_fetch_sync_items_loop() - { - VERIFY_CORRECT_THREAD(); - dlog( "Triggering fetch sync items loop now" ); - _sync_items_to_fetch_updated = true; - if( _retrigger_fetch_sync_items_loop_promise ) - _retrigger_fetch_sync_items_loop_promise->set_value(); - } - - bool node_impl::is_item_in_any_peers_inventory(const item_id& item) const - { - for( const peer_connection_ptr& peer : _active_connections ) - { - if (peer->inventory_peer_advertised_to_us.find(item) != peer->inventory_peer_advertised_to_us.end() ) - return true; - } - return false; - } - - void node_impl::fetch_items_loop() - { - VERIFY_CORRECT_THREAD(); - while (!_fetch_item_loop_done.canceled()) - { - _items_to_fetch_updated = false; - dlog("beginning an iteration of fetch items (${count} items to fetch)", - ("count", _items_to_fetch.size())); - - fc::time_point oldest_timestamp_to_fetch = fc::time_point::now() - fc::seconds(_recent_block_interval_in_seconds * EOS_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS); - fc::time_point next_peer_unblocked_time = fc::time_point::maximum(); - - // we need to construct a list of items to request from each peer first, - // then send the messages (in two steps, to avoid yielding while iterating) - // we want to evenly distribute our requests among our peers. - struct requested_item_count_index {}; - struct peer_and_items_to_fetch - { - peer_connection_ptr peer; - std::vector item_ids; - peer_and_items_to_fetch(const peer_connection_ptr& peer) : peer(peer) {} - bool operator<(const peer_and_items_to_fetch& rhs) const { return peer < rhs.peer; } - size_t number_of_items() const { return item_ids.size(); } - }; - typedef boost::multi_index_container >, - boost::multi_index::ordered_non_unique, - boost::multi_index::const_mem_fun > > > fetch_messages_to_send_set; - fetch_messages_to_send_set items_by_peer; - - // initialize the fetch_messages_to_send with an empty set of items for all idle peers - for (const peer_connection_ptr& peer : _active_connections) - if (peer->idle()) - items_by_peer.insert(peer_and_items_to_fetch(peer)); - - // now loop over all items we want to fetch - for (auto item_iter = _items_to_fetch.begin(); item_iter != _items_to_fetch.end();) - { - if (item_iter->timestamp < oldest_timestamp_to_fetch) - { - // this item has probably already fallen out of our peers' caches, we'll just ignore it. - // this can happen during flooding, and the _items_to_fetch could otherwise get clogged - // with a bunch of items that we'll never be able to request from any peer - wlog("Unable to fetch item ${item} before its likely expiration time, removing it from our list of items to fetch", ("item", item_iter->item)); - item_iter = _items_to_fetch.erase(item_iter); - } - else - { - // find a peer that has it, we'll use the one who has the least requests going to it to load balance - bool item_fetched = false; - for (auto peer_iter = items_by_peer.get().begin(); peer_iter != items_by_peer.get().end(); ++peer_iter) - { - const peer_connection_ptr& peer = peer_iter->peer; - // if they have the item and we haven't already decided to ask them for too many other items - if (peer_iter->item_ids.size() < EOS_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION && - peer->inventory_peer_advertised_to_us.find(item_iter->item) != peer->inventory_peer_advertised_to_us.end()) - { - if (item_iter->item.item_type == eos::net::trx_message_type && peer->is_transaction_fetching_inhibited()) - next_peer_unblocked_time = std::min(peer->transaction_fetching_inhibited_until, next_peer_unblocked_time); - else - { - //dlog("requesting item ${hash} from peer ${endpoint}", - // ("hash", iter->item.item_hash)("endpoint", peer->get_remote_endpoint())); - item_id item_id_to_fetch = item_iter->item; - peer->items_requested_from_peer.insert(peer_connection::item_to_time_map_type::value_type(item_id_to_fetch, fc::time_point::now())); - item_iter = _items_to_fetch.erase(item_iter); - item_fetched = true; - items_by_peer.get().modify(peer_iter, [&item_id_to_fetch](peer_and_items_to_fetch& peer_and_items) { - peer_and_items.item_ids.push_back(item_id_to_fetch); - }); - break; - } - } - } - if (!item_fetched) - ++item_iter; - } - } - - // we've figured out which peer will be providing each item, now send the messages. - for (const peer_and_items_to_fetch& peer_and_items : items_by_peer) - { - // the item lists are heterogenous and - // the fetch_items_message can only deal with one item type at a time. - std::map > items_to_fetch_by_type; - for (const item_id& item : peer_and_items.item_ids) - items_to_fetch_by_type[item.item_type].push_back(item.item_hash); - for (auto& items_by_type : items_to_fetch_by_type) - { - dlog("requesting ${count} items of type ${type} from peer ${endpoint}: ${hashes}", - ("count", items_by_type.second.size())("type", (uint32_t)items_by_type.first) - ("endpoint", peer_and_items.peer->get_remote_endpoint()) - ("hashes", items_by_type.second)); - peer_and_items.peer->send_message(fetch_items_message(items_by_type.first, - items_by_type.second)); - } - } - items_by_peer.clear(); - - if (!_items_to_fetch_updated) - { - _retrigger_fetch_item_loop_promise = fc::promise::ptr(new fc::promise("eos::net::retrigger_fetch_item_loop")); - fc::microseconds time_until_retrigger = fc::microseconds::maximum(); - if (next_peer_unblocked_time != fc::time_point::maximum()) - time_until_retrigger = next_peer_unblocked_time - fc::time_point::now(); - try - { - if (time_until_retrigger > fc::microseconds(0)) - _retrigger_fetch_item_loop_promise->wait(time_until_retrigger); - } - catch (const fc::timeout_exception&) - { - dlog("Resuming fetch_items_loop due to timeout -- one of our peers should no longer be throttled"); - } - _retrigger_fetch_item_loop_promise.reset(); - } - } // while (!canceled) - } - - void node_impl::trigger_fetch_items_loop() - { - VERIFY_CORRECT_THREAD(); - _items_to_fetch_updated = true; - if( _retrigger_fetch_item_loop_promise ) - _retrigger_fetch_item_loop_promise->set_value(); - } - - void node_impl::advertise_inventory_loop() - { - VERIFY_CORRECT_THREAD(); - while (!_advertise_inventory_loop_done.canceled()) - { - dlog("beginning an iteration of advertise inventory"); - // swap inventory into local variable, clearing the node's copy - std::unordered_set inventory_to_advertise; - inventory_to_advertise.swap(_new_inventory); - - // process all inventory to advertise and construct the inventory messages we'll send - // first, then send them all in a batch (to avoid any fiber interruption points while - // we're computing the messages) - std::list > inventory_messages_to_send; - - for (const peer_connection_ptr& peer : _active_connections) - { - // only advertise to peers who are in sync with us - wdump((peer->peer_needs_sync_items_from_us)); - if( !peer->peer_needs_sync_items_from_us ) - { - std::map > items_to_advertise_by_type; - // don't send the peer anything we've already advertised to it - // or anything it has advertised to us - // group the items we need to send by type, because we'll need to send one inventory message per type - unsigned total_items_to_send_to_this_peer = 0; - wdump((inventory_to_advertise)); - for (const item_id& item_to_advertise : inventory_to_advertise) - { - if (peer->inventory_advertised_to_peer.find(item_to_advertise) != peer->inventory_advertised_to_peer.end() ) - wdump((*peer->inventory_advertised_to_peer.find(item_to_advertise))); - if (peer->inventory_peer_advertised_to_us.find(item_to_advertise) != peer->inventory_peer_advertised_to_us.end() ) - wdump((*peer->inventory_peer_advertised_to_us.find(item_to_advertise))); - - if (peer->inventory_advertised_to_peer.find(item_to_advertise) == peer->inventory_advertised_to_peer.end() && - peer->inventory_peer_advertised_to_us.find(item_to_advertise) == peer->inventory_peer_advertised_to_us.end()) - { - items_to_advertise_by_type[item_to_advertise.item_type].push_back(item_to_advertise.item_hash); - peer->inventory_advertised_to_peer.insert(peer_connection::timestamped_item_id(item_to_advertise, fc::time_point::now())); - ++total_items_to_send_to_this_peer; - if (item_to_advertise.item_type == trx_message_type) - testnetlog("advertising transaction ${id} to peer ${endpoint}", ("id", item_to_advertise.item_hash)("endpoint", peer->get_remote_endpoint())); - dlog("advertising item ${id} to peer ${endpoint}", ("id", item_to_advertise.item_hash)("endpoint", peer->get_remote_endpoint())); - } - } - dlog("advertising ${count} new item(s) of ${types} type(s) to peer ${endpoint}", - ("count", total_items_to_send_to_this_peer) - ("types", items_to_advertise_by_type.size()) - ("endpoint", peer->get_remote_endpoint())); - for (auto items_group : items_to_advertise_by_type) - inventory_messages_to_send.push_back(std::make_pair(peer, item_ids_inventory_message(items_group.first, items_group.second))); - } - peer->clear_old_inventory(); - } - - for (auto iter = inventory_messages_to_send.begin(); iter != inventory_messages_to_send.end(); ++iter) - iter->first->send_message(iter->second); - inventory_messages_to_send.clear(); - - if (_new_inventory.empty()) - { - _retrigger_advertise_inventory_loop_promise = fc::promise::ptr(new fc::promise("eos::net::retrigger_advertise_inventory_loop")); - _retrigger_advertise_inventory_loop_promise->wait(); - _retrigger_advertise_inventory_loop_promise.reset(); - } - } // while(!canceled) - } - - void node_impl::trigger_advertise_inventory_loop() - { - VERIFY_CORRECT_THREAD(); - if( _retrigger_advertise_inventory_loop_promise ) - _retrigger_advertise_inventory_loop_promise->set_value(); - } - - void node_impl::terminate_inactive_connections_loop() - { - VERIFY_CORRECT_THREAD(); - std::list peers_to_disconnect_gently; - std::list peers_to_disconnect_forcibly; - std::list peers_to_send_keep_alive; - std::list peers_to_terminate; - - _recent_block_interval_in_seconds = _delegate->get_current_block_interval_in_seconds(); - - // Disconnect peers that haven't sent us any data recently - // These numbers are just guesses and we need to think through how this works better. - // If we and our peers get disconnected from the rest of the network, we will not - // receive any blocks or transactions from the rest of the world, and that will - // probably make us disconnect from our peers even though we have working connections to - // them (but they won't have sent us anything since they aren't getting blocks either). - // This might not be so bad because it could make us initiate more connections and - // reconnect with the rest of the network, or it might just futher isolate us. - { - // As usual, the first step is to walk through all our peers and figure out which - // peers need action (disconneting, sending keepalives, etc), then we walk through - // those lists yielding at our leisure later. - ASSERT_TASK_NOT_PREEMPTED(); - - uint32_t handshaking_timeout = _peer_inactivity_timeout; - fc::time_point handshaking_disconnect_threshold = fc::time_point::now() - fc::seconds(handshaking_timeout); - for( const peer_connection_ptr handshaking_peer : _handshaking_connections ) - if( handshaking_peer->connection_initiation_time < handshaking_disconnect_threshold && - handshaking_peer->get_last_message_received_time() < handshaking_disconnect_threshold && - handshaking_peer->get_last_message_sent_time() < handshaking_disconnect_threshold ) - { - wlog( "Forcibly disconnecting from handshaking peer ${peer} due to inactivity of at least ${timeout} seconds", - ( "peer", handshaking_peer->get_remote_endpoint() )("timeout", handshaking_timeout ) ); - wlog("Peer's negotiating status: ${status}, bytes sent: ${sent}, bytes received: ${received}", - ("status", handshaking_peer->negotiation_status) - ("sent", handshaking_peer->get_total_bytes_sent()) - ("received", handshaking_peer->get_total_bytes_received())); - handshaking_peer->connection_closed_error = fc::exception(FC_LOG_MESSAGE(warn, "Terminating handshaking connection due to inactivity of ${timeout} seconds. Negotiating status: ${status}, bytes sent: ${sent}, bytes received: ${received}", - ("peer", handshaking_peer->get_remote_endpoint()) - ("timeout", handshaking_timeout) - ("status", handshaking_peer->negotiation_status) - ("sent", handshaking_peer->get_total_bytes_sent()) - ("received", handshaking_peer->get_total_bytes_received()))); - peers_to_disconnect_forcibly.push_back( handshaking_peer ); - } - - // timeout for any active peers is two block intervals - uint32_t active_disconnect_timeout = 10 * _recent_block_interval_in_seconds; - uint32_t active_send_keepalive_timeout = active_disconnect_timeout / 2; - - // set the ignored request time out to 1 second. When we request a block - // or transaction from a peer, this timeout determines how long we wait for them - // to reply before we give up and ask another peer for the item. - // Ideally this should be significantly shorter than the block interval, because - // we'd like to realize the block isn't coming and fetch it from a different - // peer before the next block comes in. At the current target of 3 second blocks, - // 1 second seems reasonable. When we get closer to our eventual target of 1 second - // blocks, this will need to be re-evaluated (i.e., can we set the timeout to 500ms - // and still handle normal network & processing delays without excessive disconnects) - fc::microseconds active_ignored_request_timeout = fc::seconds(1); - - fc::time_point active_disconnect_threshold = fc::time_point::now() - fc::seconds(active_disconnect_timeout); - fc::time_point active_send_keepalive_threshold = fc::time_point::now() - fc::seconds(active_send_keepalive_timeout); - fc::time_point active_ignored_request_threshold = fc::time_point::now() - active_ignored_request_timeout; - for( const peer_connection_ptr& active_peer : _active_connections ) - { - if( active_peer->connection_initiation_time < active_disconnect_threshold && - active_peer->get_last_message_received_time() < active_disconnect_threshold ) - { - wlog( "Closing connection with peer ${peer} due to inactivity of at least ${timeout} seconds", - ( "peer", active_peer->get_remote_endpoint() )("timeout", active_disconnect_timeout ) ); - peers_to_disconnect_gently.push_back( active_peer ); - } - else - { - bool disconnect_due_to_request_timeout = false; - for (const peer_connection::item_to_time_map_type::value_type& item_and_time : active_peer->sync_items_requested_from_peer) - if (item_and_time.second < active_ignored_request_threshold) - { - wlog("Disconnecting peer ${peer} because they didn't respond to my request for sync item ${id}", - ("peer", active_peer->get_remote_endpoint())("id", item_and_time.first.item_hash)); - disconnect_due_to_request_timeout = true; - break; - } - if (!disconnect_due_to_request_timeout && - active_peer->item_ids_requested_from_peer && - active_peer->item_ids_requested_from_peer->get<1>() < active_ignored_request_threshold) - { - wlog("Disconnecting peer ${peer} because they didn't respond to my request for sync item ids after ${synopsis}", - ("peer", active_peer->get_remote_endpoint()) - ("synopsis", active_peer->item_ids_requested_from_peer->get<0>())); - disconnect_due_to_request_timeout = true; - } - if (!disconnect_due_to_request_timeout) - for (const peer_connection::item_to_time_map_type::value_type& item_and_time : active_peer->items_requested_from_peer) - if (item_and_time.second < active_ignored_request_threshold) - { - wlog("Disconnecting peer ${peer} because they didn't respond to my request for item ${id}", - ("peer", active_peer->get_remote_endpoint())("id", item_and_time.first.item_hash)); - disconnect_due_to_request_timeout = true; - break; - } - if (disconnect_due_to_request_timeout) - { - // we should probably disconnect nicely and give them a reason, but right now the logic - // for rescheduling the requests only executes when the connection is fully closed, - // and we want to get those requests rescheduled as soon as possible - peers_to_disconnect_forcibly.push_back(active_peer); - } - else if (active_peer->connection_initiation_time < active_send_keepalive_threshold && - active_peer->get_last_message_received_time() < active_send_keepalive_threshold) - { - wlog( "Sending a keepalive message to peer ${peer} who hasn't sent us any messages in the last ${timeout} seconds", - ( "peer", active_peer->get_remote_endpoint() )("timeout", active_send_keepalive_timeout ) ); - peers_to_send_keep_alive.push_back(active_peer); - } - } - } - - fc::time_point closing_disconnect_threshold = fc::time_point::now() - fc::seconds(EOS_NET_PEER_DISCONNECT_TIMEOUT); - for( const peer_connection_ptr& closing_peer : _closing_connections ) - if( closing_peer->connection_closed_time < closing_disconnect_threshold ) - { - // we asked this peer to close their connectoin to us at least EOS_NET_PEER_DISCONNECT_TIMEOUT - // seconds ago, but they haven't done it yet. Terminate the connection now - wlog( "Forcibly disconnecting peer ${peer} who failed to close their connection in a timely manner", - ( "peer", closing_peer->get_remote_endpoint() ) ); - peers_to_disconnect_forcibly.push_back( closing_peer ); - } - - uint32_t failed_terminate_timeout_seconds = 120; - fc::time_point failed_terminate_threshold = fc::time_point::now() - fc::seconds(failed_terminate_timeout_seconds); - for (const peer_connection_ptr& peer : _terminating_connections ) - if (peer->get_connection_terminated_time() != fc::time_point::min() && - peer->get_connection_terminated_time() < failed_terminate_threshold) - { - wlog("Terminating connection with peer ${peer}, closing the connection didn't work", ("peer", peer->get_remote_endpoint())); - peers_to_terminate.push_back(peer); - } - - // That's the end of the sorting step; now all peers that require further processing are now in one of the - // lists peers_to_disconnect_gently, peers_to_disconnect_forcibly, peers_to_send_keep_alive, or peers_to_terminate - - // if we've decided to delete any peers, do it now; in its current implementation this doesn't yield, - // and once we start yielding, we may find that we've moved that peer to another list (closed or active) - // and that triggers assertions, maybe even errors - for (const peer_connection_ptr& peer : peers_to_terminate ) - { - assert(_terminating_connections.find(peer) != _terminating_connections.end()); - _terminating_connections.erase(peer); - schedule_peer_for_deletion(peer); - } - peers_to_terminate.clear(); - - // if we're going to abruptly disconnect anyone, do it here - // (it doesn't yield). I don't think there would be any harm if this were - // moved to the yielding section - for( const peer_connection_ptr& peer : peers_to_disconnect_forcibly ) - { - move_peer_to_terminating_list(peer); - peer->close_connection(); - } - peers_to_disconnect_forcibly.clear(); - } // end ASSERT_TASK_NOT_PREEMPTED() - - // Now process the peers that we need to do yielding functions with (disconnect sends a message with the - // disconnect reason, so it may yield) - for( const peer_connection_ptr& peer : peers_to_disconnect_gently ) - { - fc::exception detailed_error( FC_LOG_MESSAGE(warn, "Disconnecting due to inactivity", - ( "last_message_received_seconds_ago", (peer->get_last_message_received_time() - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) - ( "last_message_sent_seconds_ago", (peer->get_last_message_sent_time() - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) - ( "inactivity_timeout", _active_connections.find(peer ) != _active_connections.end() ? _peer_inactivity_timeout * 10 : _peer_inactivity_timeout ) ) ); - disconnect_from_peer( peer.get(), "Disconnecting due to inactivity", false, detailed_error ); - } - peers_to_disconnect_gently.clear(); - - for( const peer_connection_ptr& peer : peers_to_send_keep_alive ) - peer->send_message(current_time_request_message(), - offsetof(current_time_request_message, request_sent_time)); - peers_to_send_keep_alive.clear(); - - if (!_node_is_shutting_down && !_terminate_inactive_connections_loop_done.canceled()) - _terminate_inactive_connections_loop_done = fc::schedule( [this](){ terminate_inactive_connections_loop(); }, - fc::time_point::now() + fc::seconds(EOS_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT / 2), - "terminate_inactive_connections_loop" ); - } - - void node_impl::fetch_updated_peer_lists_loop() - { - VERIFY_CORRECT_THREAD(); - - std::list original_active_peers(_active_connections.begin(), _active_connections.end()); - for( const peer_connection_ptr& active_peer : original_active_peers ) - { - try - { - active_peer->send_message(address_request_message()); - } - catch ( const fc::canceled_exception& ) - { - throw; - } - catch (const fc::exception& e) - { - dlog("Caught exception while sending address request message to peer ${peer} : ${e}", - ("peer", active_peer->get_remote_endpoint())("e", e)); - } - } - - // this has nothing to do with updating the peer list, but we need to prune this list - // at regular intervals, this is a fine place to do it. - fc::time_point_sec oldest_failed_ids_to_keep(fc::time_point::now() - fc::minutes(15)); - auto oldest_failed_ids_to_keep_iter = _recently_failed_items.get().lower_bound(oldest_failed_ids_to_keep); - auto begin_iter = _recently_failed_items.get().begin(); - _recently_failed_items.get().erase(begin_iter, oldest_failed_ids_to_keep_iter); - - if (!_node_is_shutting_down && !_fetch_updated_peer_lists_loop_done.canceled() ) - _fetch_updated_peer_lists_loop_done = fc::schedule( [this](){ fetch_updated_peer_lists_loop(); }, - fc::time_point::now() + fc::minutes(15), - "fetch_updated_peer_lists_loop" ); - } - void node_impl::update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second) - { - VERIFY_CORRECT_THREAD(); - _average_network_read_speed_seconds.push_back(bytes_read_this_second); - _average_network_write_speed_seconds.push_back(bytes_written_this_second); - ++_average_network_usage_second_counter; - if (_average_network_usage_second_counter >= 60) - { - _average_network_usage_second_counter = 0; - ++_average_network_usage_minute_counter; - uint32_t average_read_this_minute = (uint32_t)boost::accumulate(_average_network_read_speed_seconds, uint64_t(0)) / (uint32_t)_average_network_read_speed_seconds.size(); - _average_network_read_speed_minutes.push_back(average_read_this_minute); - uint32_t average_written_this_minute = (uint32_t)boost::accumulate(_average_network_write_speed_seconds, uint64_t(0)) / (uint32_t)_average_network_write_speed_seconds.size(); - _average_network_write_speed_minutes.push_back(average_written_this_minute); - if (_average_network_usage_minute_counter >= 60) - { - _average_network_usage_minute_counter = 0; - uint32_t average_read_this_hour = (uint32_t)boost::accumulate(_average_network_read_speed_minutes, uint64_t(0)) / (uint32_t)_average_network_read_speed_minutes.size(); - _average_network_read_speed_hours.push_back(average_read_this_hour); - uint32_t average_written_this_hour = (uint32_t)boost::accumulate(_average_network_write_speed_minutes, uint64_t(0)) / (uint32_t)_average_network_write_speed_minutes.size(); - _average_network_write_speed_hours.push_back(average_written_this_hour); - } - } - } - void node_impl::bandwidth_monitor_loop() - { - VERIFY_CORRECT_THREAD(); - fc::time_point_sec current_time = fc::time_point::now(); - - if (_bandwidth_monitor_last_update_time == fc::time_point_sec::min()) - _bandwidth_monitor_last_update_time = current_time; - - uint32_t seconds_since_last_update = current_time.sec_since_epoch() - _bandwidth_monitor_last_update_time.sec_since_epoch(); - seconds_since_last_update = std::max(UINT32_C(1), seconds_since_last_update); - uint32_t bytes_read_this_second = _rate_limiter.get_actual_download_rate(); - uint32_t bytes_written_this_second = _rate_limiter.get_actual_upload_rate(); - for (uint32_t i = 0; i < seconds_since_last_update - 1; ++i) - update_bandwidth_data(0, 0); - update_bandwidth_data(bytes_read_this_second, bytes_written_this_second); - _bandwidth_monitor_last_update_time = current_time; - - if (!_node_is_shutting_down && !_bandwidth_monitor_loop_done.canceled()) - _bandwidth_monitor_loop_done = fc::schedule( [=](){ bandwidth_monitor_loop(); }, - fc::time_point::now() + fc::seconds(1), - "bandwidth_monitor_loop" ); - } - - void node_impl::dump_node_status_task() - { - VERIFY_CORRECT_THREAD(); - dump_node_status(); - if (!_node_is_shutting_down && !_dump_node_status_task_done.canceled()) - _dump_node_status_task_done = fc::schedule([=](){ dump_node_status_task(); }, - fc::time_point::now() + fc::minutes(1), - "dump_node_status_task"); - } - - void node_impl::delayed_peer_deletion_task() - { - VERIFY_CORRECT_THREAD(); -#ifdef USE_PEERS_TO_DELETE_MUTEX - fc::scoped_lock lock(_peers_to_delete_mutex); - dlog("in delayed_peer_deletion_task with ${count} in queue", ("count", _peers_to_delete.size())); - _peers_to_delete.clear(); - dlog("_peers_to_delete cleared"); -#else - while (!_peers_to_delete.empty()) - { - std::list peers_to_delete_copy; - dlog("beginning an iteration of delayed_peer_deletion_task with ${count} in queue", ("count", _peers_to_delete.size())); - peers_to_delete_copy.swap(_peers_to_delete); - } - dlog("leaving delayed_peer_deletion_task"); -#endif - } - - void node_impl::schedule_peer_for_deletion(const peer_connection_ptr& peer_to_delete) - { - VERIFY_CORRECT_THREAD(); - - assert(_handshaking_connections.find(peer_to_delete) == _handshaking_connections.end()); - assert(_active_connections.find(peer_to_delete) == _active_connections.end()); - assert(_closing_connections.find(peer_to_delete) == _closing_connections.end()); - assert(_terminating_connections.find(peer_to_delete) == _terminating_connections.end()); - -#ifdef USE_PEERS_TO_DELETE_MUTEX - dlog("scheduling peer for deletion: ${peer} (may block on a mutex here)", ("peer", peer_to_delete->get_remote_endpoint())); - - unsigned number_of_peers_to_delete; - { - fc::scoped_lock lock(_peers_to_delete_mutex); - _peers_to_delete.emplace_back(peer_to_delete); - number_of_peers_to_delete = _peers_to_delete.size(); - } - dlog("peer scheduled for deletion: ${peer}", ("peer", peer_to_delete->get_remote_endpoint())); - - if (!_node_is_shutting_down && - (!_delayed_peer_deletion_task_done.valid() || _delayed_peer_deletion_task_done.ready())) - { - dlog("asyncing delayed_peer_deletion_task to delete ${size} peers", ("size", number_of_peers_to_delete)); - _delayed_peer_deletion_task_done = fc::async([this](){ delayed_peer_deletion_task(); }, "delayed_peer_deletion_task" ); - } - else - dlog("delayed_peer_deletion_task is already scheduled (current size of _peers_to_delete is ${size})", ("size", number_of_peers_to_delete)); -#else - dlog("scheduling peer for deletion: ${peer} (this will not block)", ("peer", peer_to_delete->get_remote_endpoint())); - _peers_to_delete.push_back(peer_to_delete); - if (!_node_is_shutting_down && - (!_delayed_peer_deletion_task_done.valid() || _delayed_peer_deletion_task_done.ready())) - { - dlog("asyncing delayed_peer_deletion_task to delete ${size} peers", ("size", _peers_to_delete.size())); - _delayed_peer_deletion_task_done = fc::async([this](){ delayed_peer_deletion_task(); }, "delayed_peer_deletion_task" ); - } - else - dlog("delayed_peer_deletion_task is already scheduled (current size of _peers_to_delete is ${size})", ("size", _peers_to_delete.size())); - -#endif - } - - bool node_impl::is_accepting_new_connections() - { - VERIFY_CORRECT_THREAD(); - return !_p2p_network_connect_loop_done.canceled() && get_number_of_connections() <= _maximum_number_of_connections; - } - - bool node_impl::is_wanting_new_connections() - { - VERIFY_CORRECT_THREAD(); - return !_p2p_network_connect_loop_done.canceled() && get_number_of_connections() < _desired_number_of_connections; - } - - uint32_t node_impl::get_number_of_connections() - { - VERIFY_CORRECT_THREAD(); - return (uint32_t)(_handshaking_connections.size() + _active_connections.size()); - } - - peer_connection_ptr node_impl::get_peer_by_node_id(const node_id_t& node_id) - { - for (const peer_connection_ptr& active_peer : _active_connections) - if (node_id == active_peer->node_id) - return active_peer; - for (const peer_connection_ptr& handshaking_peer : _handshaking_connections) - if (node_id == handshaking_peer->node_id) - return handshaking_peer; - return peer_connection_ptr(); - } - - bool node_impl::is_already_connected_to_id(const node_id_t& node_id) - { - VERIFY_CORRECT_THREAD(); - if (node_id == _node_id) - { - dlog("is_already_connected_to_id returning true because the peer is us"); - return true; - } - for (const peer_connection_ptr active_peer : _active_connections) - if (node_id == active_peer->node_id) - { - dlog("is_already_connected_to_id returning true because the peer is already in our active list"); - return true; - } - for (const peer_connection_ptr handshaking_peer : _handshaking_connections) - if (node_id == handshaking_peer->node_id) - { - dlog("is_already_connected_to_id returning true because the peer is already in our handshaking list"); - return true; - } - return false; - } - - // merge addresses received from a peer into our database - bool node_impl::merge_address_info_with_potential_peer_database(const std::vector addresses) - { - VERIFY_CORRECT_THREAD(); - bool new_information_received = false; - for (const address_info& address : addresses) - { - if (address.firewalled == eos::net::firewalled_state::not_firewalled) - { - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); - if (address.last_seen_time > updated_peer_record.last_seen_time) - new_information_received = true; - updated_peer_record.last_seen_time = std::max(address.last_seen_time, updated_peer_record.last_seen_time); - _potential_peer_db.update_entry(updated_peer_record); - } - } - return new_information_received; - } - - void node_impl::display_current_connections() - { - VERIFY_CORRECT_THREAD(); - dlog("Currently have ${current} of [${desired}/${max}] connections", - ("current", get_number_of_connections()) - ("desired", _desired_number_of_connections) - ("max", _maximum_number_of_connections)); - dlog(" my id is ${id}", ("id", _node_id)); - - for (const peer_connection_ptr& active_connection : _active_connections) - { - dlog(" active: ${endpoint} with ${id} [${direction}]", - ("endpoint", active_connection->get_remote_endpoint()) - ("id", active_connection->node_id) - ("direction", active_connection->direction)); - } - for (const peer_connection_ptr& handshaking_connection : _handshaking_connections) - { - dlog(" handshaking: ${endpoint} with ${id} [${direction}]", - ("endpoint", handshaking_connection->get_remote_endpoint()) - ("id", handshaking_connection->node_id) - ("direction", handshaking_connection->direction)); - } - } - - void node_impl::on_message( peer_connection* originating_peer, const message& received_message ) - { - VERIFY_CORRECT_THREAD(); - message_hash_type message_hash = received_message.id(); - dlog("handling message ${type} ${hash} size ${size} from peer ${endpoint}", - ("type", eos::net::core_message_type_enum(received_message.msg_type))("hash", message_hash) - ("size", received_message.size) - ("endpoint", originating_peer->get_remote_endpoint())); - switch ( received_message.msg_type ) - { - case core_message_type_enum::hello_message_type: - on_hello_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::connection_accepted_message_type: - on_connection_accepted_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::connection_rejected_message_type: - on_connection_rejected_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::address_request_message_type: - on_address_request_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::address_message_type: - on_address_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::fetch_blockchain_item_ids_message_type: - on_fetch_blockchain_item_ids_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::blockchain_item_ids_inventory_message_type: - on_blockchain_item_ids_inventory_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::fetch_items_message_type: - on_fetch_items_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::item_not_available_message_type: - on_item_not_available_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::item_ids_inventory_message_type: - on_item_ids_inventory_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::closing_connection_message_type: - on_closing_connection_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::block_message_type: - process_block_message(originating_peer, received_message, message_hash); - break; - case core_message_type_enum::current_time_request_message_type: - on_current_time_request_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::current_time_reply_message_type: - on_current_time_reply_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::check_firewall_message_type: - on_check_firewall_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::check_firewall_reply_message_type: - on_check_firewall_reply_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::get_current_connections_request_message_type: - on_get_current_connections_request_message(originating_peer, received_message.as()); - break; - case core_message_type_enum::get_current_connections_reply_message_type: - on_get_current_connections_reply_message(originating_peer, received_message.as()); - break; - - default: - // ignore any message in between core_message_type_first and _last that we don't handle above - // to allow us to add messages in the future - if (received_message.msg_type < core_message_type_enum::core_message_type_first || - received_message.msg_type > core_message_type_enum::core_message_type_last) - process_ordinary_message(originating_peer, received_message, message_hash); - break; - } - } - - - fc::variant_object node_impl::generate_hello_user_data() - { - VERIFY_CORRECT_THREAD(); - // for the time being, shoehorn a bunch of properties into the user_data variant object, - // which lets us add and remove fields without changing the protocol. Once we - // settle on what we really want in there, we'll likely promote them to first - // class fields in the hello message - fc::mutable_variant_object user_data; - user_data["fc_git_revision_sha"] = fc::git_revision_sha; - user_data["fc_git_revision_unix_timestamp"] = fc::git_revision_unix_timestamp; -#if defined( __APPLE__ ) - user_data["platform"] = "osx"; -#elif defined( __linux__ ) - user_data["platform"] = "linux"; -#elif defined( _MSC_VER ) - user_data["platform"] = "win32"; -#else - user_data["platform"] = "other"; -#endif - user_data["bitness"] = sizeof(void*) * 8; - - user_data["node_id"] = _node_id; - - item_hash_t head_block_id = _delegate->get_head_block_id(); - user_data["last_known_block_hash"] = head_block_id; - user_data["last_known_block_number"] = _delegate->get_block_number(head_block_id); - user_data["last_known_block_time"] = _delegate->get_block_time(head_block_id); - - if (!_hard_fork_block_numbers.empty()) - user_data["last_known_fork_block_number"] = _hard_fork_block_numbers.back(); - - return user_data; - } - void node_impl::parse_hello_user_data_for_peer(peer_connection* originating_peer, const fc::variant_object& user_data) - { - VERIFY_CORRECT_THREAD(); - // try to parse data out of the user_agent string - if (user_data.contains("eos_git_revision_sha")) - originating_peer->eos_git_revision_sha = user_data["eos_git_revision_sha"].as_string(); - if (user_data.contains("eos_git_revision_unix_timestamp")) - originating_peer->eos_git_revision_unix_timestamp = fc::time_point_sec(user_data["eos_git_revision_unix_timestamp"].as()); - if (user_data.contains("fc_git_revision_sha")) - originating_peer->fc_git_revision_sha = user_data["fc_git_revision_sha"].as_string(); - if (user_data.contains("fc_git_revision_unix_timestamp")) - originating_peer->fc_git_revision_unix_timestamp = fc::time_point_sec(user_data["fc_git_revision_unix_timestamp"].as()); - if (user_data.contains("platform")) - originating_peer->platform = user_data["platform"].as_string(); - if (user_data.contains("bitness")) - originating_peer->bitness = user_data["bitness"].as(); - if (user_data.contains("node_id")) - originating_peer->node_id = user_data["node_id"].as(); - if (user_data.contains("last_known_fork_block_number")) - originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as(); - } - - void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ) - { - VERIFY_CORRECT_THREAD(); - // this already_connected check must come before we fill in peer data below - node_id_t peer_node_id = hello_message_received.node_public_key; - try - { - peer_node_id = hello_message_received.user_data["node_id"].as(); - } - catch (const fc::exception&) - { - // either it's not there or it's not a valid session id. either way, ignore. - } - bool already_connected_to_this_peer = is_already_connected_to_id(peer_node_id); - - // validate the node id - fc::sha256::encoder shared_secret_encoder; - fc::sha512 shared_secret = originating_peer->get_shared_secret(); - shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); - fc::ecc::public_key expected_node_public_key(hello_message_received.signed_shared_secret, shared_secret_encoder.result(), false); - - // store off the data provided in the hello message - originating_peer->user_agent = hello_message_received.user_agent; - originating_peer->node_public_key = hello_message_received.node_public_key; - originating_peer->node_id = hello_message_received.node_public_key; // will probably be overwritten in parse_hello_user_data_for_peer() - originating_peer->core_protocol_version = hello_message_received.core_protocol_version; - originating_peer->inbound_address = hello_message_received.inbound_address; - originating_peer->inbound_port = hello_message_received.inbound_port; - originating_peer->outbound_port = hello_message_received.outbound_port; - - parse_hello_user_data_for_peer(originating_peer, hello_message_received.user_data); - - // if they didn't provide a last known fork, try to guess it - if (originating_peer->last_known_fork_block_number == 0 && - originating_peer->eos_git_revision_unix_timestamp) - { - uint32_t unix_timestamp = originating_peer->eos_git_revision_unix_timestamp->sec_since_epoch(); - originating_peer->last_known_fork_block_number = _delegate->estimate_last_known_fork_from_git_revision_timestamp(unix_timestamp); - } - - // now decide what to do with it - if (originating_peer->their_state == peer_connection::their_connection_state::just_connected) - { - if (hello_message_received.node_public_key != expected_node_public_key.serialize()) - { - wlog("Invalid signature in hello message from peer ${peer}", ("peer", originating_peer->get_remote_endpoint())); - std::string rejection_message("Invalid signature in hello message"); - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::invalid_hello_message, - rejection_message); - - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message( message(connection_rejected ) ); - // for this type of message, we're immediately disconnecting this peer - disconnect_from_peer( originating_peer, "Invalid signature in hello message" ); - return; - } - if (hello_message_received.chain_id != _chain_id) - { - wlog("Received hello message from peer on a different chain: ${message}", ("message", hello_message_received)); - std::ostringstream rejection_message; - rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << - " and you're on " << hello_message_received.chain_id.str(); - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::different_chain, - rejection_message.str()); - - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message(message(connection_rejected)); - // for this type of message, we're immediately disconnecting this peer, instead of trying to - // allowing her to ask us for peers (any of our peers will be on the same chain as us, so there's no - // benefit of sharing them) - disconnect_from_peer(originating_peer, "You are on a different chain from me"); - return; - } - if (originating_peer->last_known_fork_block_number != 0) - { - uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(originating_peer->last_known_fork_block_number); - if (next_fork_block_number != 0) - { - // we know about a fork they don't. See if we've already passed that block. If we have, don't let them - // connect because we won't be able to give them anything useful - uint32_t head_block_num = _delegate->get_block_number(_delegate->get_head_block_id()); - if (next_fork_block_number < head_block_num) - { -#ifdef ENABLE_DEBUG_ULOGS - ulog("Rejecting connection from peer because their version is too old. Their version date: ${date}", ("date", originating_peer->eos_git_revision_unix_timestamp)); -#endif - wlog("Received hello message from peer running a version of that can only understand blocks up to #${their_hard_fork}, but I'm at head block number #${my_block_number}", - ("their_hard_fork", next_fork_block_number)("my_block_number", head_block_num)); - std::ostringstream rejection_message; - rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num; - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::unspecified, - rejection_message.str() ); - - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message(message(connection_rejected)); - // for this type of message, we're immediately disconnecting this peer, instead of trying to - // allowing her to ask us for peers (any of our peers will be on the same chain as us, so there's no - // benefit of sharing them) - disconnect_from_peer(originating_peer, "Your client is too old, please upgrade"); - return; - } - } - } - if (already_connected_to_this_peer) - { - - connection_rejected_message connection_rejected; - if (_node_id == originating_peer->node_id) - connection_rejected = connection_rejected_message(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::connected_to_self, - "I'm connecting to myself"); - else - connection_rejected = connection_rejected_message(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::already_connected, - "I'm already connected to you"); - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message(message(connection_rejected)); - dlog("Received a hello_message from peer ${peer} that I'm already connected to (with id ${id}), rejection", - ("peer", originating_peer->get_remote_endpoint()) - ("id", originating_peer->node_id)); - } -#ifdef ENABLE_P2P_DEBUGGING_API - else if(!_allowed_peers.empty() && - _allowed_peers.find(originating_peer->node_id) == _allowed_peers.end()) - { - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::blocked, - "you are not in my allowed_peers list"); - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message( message(connection_rejected ) ); - dlog( "Received a hello_message from peer ${peer} who isn't in my allowed_peers list, rejection", ("peer", originating_peer->get_remote_endpoint() ) ); - } -#endif // ENABLE_P2P_DEBUGGING_API - else - { - // whether we're planning on accepting them as a peer or not, they seem to be a valid node, - // so add them to our database if they're not firewalled - - // in the hello message, the peer sent us the IP address and port it thought it was connecting from. - // If they match the IP and port we see, we assume that they're actually on the internet and they're not - // firewalled. - fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); - if( peers_actual_outbound_endpoint.get_address() == originating_peer->inbound_address && - peers_actual_outbound_endpoint.port() == originating_peer->outbound_port ) - { - if( originating_peer->inbound_port == 0 ) - { - dlog( "peer does not appear to be firewalled, but they did not give an inbound port so I'm treating them as if they are." ); - originating_peer->is_firewalled = firewalled_state::firewalled; - } - else - { - // peer is not firewalled, add it to our database - fc::ip::endpoint peers_inbound_endpoint(originating_peer->inbound_address, originating_peer->inbound_port); - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(peers_inbound_endpoint); - _potential_peer_db.update_entry(updated_peer_record); - originating_peer->is_firewalled = firewalled_state::not_firewalled; - } - } - else - { - dlog("peer is firewalled: they think their outbound endpoint is ${reported_endpoint}, but I see it as ${actual_endpoint}", - ("reported_endpoint", fc::ip::endpoint(originating_peer->inbound_address, originating_peer->outbound_port)) - ("actual_endpoint", peers_actual_outbound_endpoint)); - originating_peer->is_firewalled = firewalled_state::firewalled; - } - - if (!is_accepting_new_connections()) - { - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::not_accepting_connections, - "not accepting any more incoming connections"); - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message(message(connection_rejected)); - dlog("Received a hello_message from peer ${peer}, but I'm not accepting any more connections, rejection", - ("peer", originating_peer->get_remote_endpoint())); - } - else - { - originating_peer->their_state = peer_connection::their_connection_state::connection_accepted; - originating_peer->send_message(message(connection_accepted_message())); - dlog("Received a hello_message from peer ${peer}, sending reply to accept connection", - ("peer", originating_peer->get_remote_endpoint())); - } - } - } - else - { - // we can wind up here if we've connected to ourself, and the source and - // destination endpoints are the same, causing messages we send out - // to arrive back on the initiating socket instead of the receiving - // socket. If we did a complete job of enumerating local addresses, - // we could avoid directly connecting to ourselves, or at least detect - // immediately when we did it and disconnect. - - // The only way I know of that we'd get an unexpected hello that we - // can't really guard against is if we do a simulatenous open, we - // probably need to think through that case. We're not attempting that - // yet, though, so it's ok to just disconnect here. - wlog("unexpected hello_message from peer, disconnecting"); - disconnect_from_peer(originating_peer, "Received a unexpected hello_message"); - } - } - - void node_impl::on_connection_accepted_message(peer_connection* originating_peer, const connection_accepted_message& connection_accepted_message_received) - { - VERIFY_CORRECT_THREAD(); - dlog("Received a connection_accepted in response to my \"hello\" from ${peer}", ("peer", originating_peer->get_remote_endpoint())); - originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_accepted; - originating_peer->our_state = peer_connection::our_connection_state::connection_accepted; - originating_peer->send_message(address_request_message()); - fc::time_point now = fc::time_point::now(); - if (_is_firewalled == firewalled_state::unknown && - _last_firewall_check_message_sent < now - fc::minutes(5) && - originating_peer->core_protocol_version >= 106) - { - wlog("I don't know if I'm firewalled. Sending a firewall check message to peer ${peer}", - ("peer", originating_peer->get_remote_endpoint())); - originating_peer->firewall_check_state = new firewall_check_state_data; - - originating_peer->send_message(check_firewall_message()); - _last_firewall_check_message_sent = now; - } - } - - void node_impl::on_connection_rejected_message(peer_connection* originating_peer, const connection_rejected_message& connection_rejected_message_received) - { - VERIFY_CORRECT_THREAD(); - if (originating_peer->our_state == peer_connection::our_connection_state::just_connected) - { - ilog("Received a rejection from ${peer} in response to my \"hello\", reason: \"${reason}\"", - ("peer", originating_peer->get_remote_endpoint()) - ("reason", connection_rejected_message_received.reason_string)); - - if (connection_rejected_message_received.reason_code == rejection_reason_code::connected_to_self) - { - _potential_peer_db.erase(originating_peer->get_socket().remote_endpoint()); - move_peer_to_closing_list(originating_peer->shared_from_this()); - originating_peer->close_connection(); - } - else - { - // update our database to record that we were rejected so we won't try to connect again for a while - // this only happens on connections we originate, so we should already know that peer is not firewalled - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(originating_peer->get_socket().remote_endpoint()); - if (updated_peer_record) - { - updated_peer_record->last_connection_disposition = last_connection_rejected; - updated_peer_record->last_connection_attempt_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - } - - originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_rejected; - originating_peer->our_state = peer_connection::our_connection_state::connection_rejected; - originating_peer->send_message(address_request_message()); - } - else - FC_THROW( "unexpected connection_rejected_message from peer" ); - } - - void node_impl::on_address_request_message(peer_connection* originating_peer, const address_request_message& address_request_message_received) - { - VERIFY_CORRECT_THREAD(); - dlog("Received an address request message"); - - address_message reply; - if (!_peer_advertising_disabled) - { - reply.addresses.reserve(_active_connections.size()); - for (const peer_connection_ptr& active_peer : _active_connections) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - - reply.addresses.emplace_back(address_info(*active_peer->get_remote_endpoint(), - fc::time_point::now(), - active_peer->round_trip_delay, - active_peer->node_id, - active_peer->direction, - active_peer->is_firewalled)); - } - } - originating_peer->send_message(reply); - } - - void node_impl::on_address_message(peer_connection* originating_peer, const address_message& address_message_received) - { - VERIFY_CORRECT_THREAD(); - dlog("Received an address message containing ${size} addresses", ("size", address_message_received.addresses.size())); - for (const address_info& address : address_message_received.addresses) - { - dlog(" ${endpoint} last seen ${time}", ("endpoint", address.remote_endpoint)("time", address.last_seen_time)); - } - std::vector updated_addresses = address_message_received.addresses; - for (address_info& address : updated_addresses) - address.last_seen_time = fc::time_point_sec(fc::time_point::now()); - bool new_information_received = merge_address_info_with_potential_peer_database(updated_addresses); - if (new_information_received) - trigger_p2p_network_connect_loop(); - - if (_handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) - { - // if we were handshaking, we need to continue with the next step in handshaking (which is either - // ending handshaking and starting synchronization or disconnecting) - if( originating_peer->our_state == peer_connection::our_connection_state::connection_rejected) - disconnect_from_peer(originating_peer, "You rejected my connection request (hello message) so I'm disconnecting"); - else if (originating_peer->their_state == peer_connection::their_connection_state::connection_rejected) - disconnect_from_peer(originating_peer, "I rejected your connection request (hello message) so I'm disconnecting"); - else - { - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - // mark the connection as successful in the database - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_connection_disposition = last_connection_succeeded; - _potential_peer_db.update_entry(*updated_peer_record); - } - } - - originating_peer->negotiation_status = peer_connection::connection_negotiation_status::negotiation_complete; - move_peer_to_active_list(originating_peer->shared_from_this()); - new_peer_just_added(originating_peer->shared_from_this()); - } - } - // else if this was an active connection, then this was just a reply to our periodic address requests. - // we've processed it, there's nothing else to do - } - - void node_impl::on_fetch_blockchain_item_ids_message(peer_connection* originating_peer, - const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received) - { - VERIFY_CORRECT_THREAD(); - item_id peers_last_item_seen = item_id(fetch_blockchain_item_ids_message_received.item_type, item_hash_t()); - if (fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty()) - { - dlog("sync: received a request for item ids starting at the beginning of the chain from peer ${peer_endpoint} (full request: ${synopsis})", - ("peer_endpoint", originating_peer->get_remote_endpoint()) - ("synopsis", fetch_blockchain_item_ids_message_received.blockchain_synopsis)); - } - else - { - item_hash_t peers_last_item_hash_seen = fetch_blockchain_item_ids_message_received.blockchain_synopsis.back(); - dlog("sync: received a request for item ids after ${last_item_seen} from peer ${peer_endpoint} (full request: ${synopsis})", - ("last_item_seen", peers_last_item_hash_seen) - ("peer_endpoint", originating_peer->get_remote_endpoint()) - ("synopsis", fetch_blockchain_item_ids_message_received.blockchain_synopsis)); - peers_last_item_seen.item_hash = peers_last_item_hash_seen; - } - - blockchain_item_ids_inventory_message reply_message; - reply_message.item_type = fetch_blockchain_item_ids_message_received.item_type; - reply_message.total_remaining_item_count = 0; - try - { - reply_message.item_hashes_available = _delegate->get_block_ids(fetch_blockchain_item_ids_message_received.blockchain_synopsis, - reply_message.total_remaining_item_count); - } - catch (const peer_is_on_an_unreachable_fork&) - { - dlog("Peer is on a fork and there's no set of blocks we can provide to switch them to our fork"); - // we reply with an empty list as if we had an empty blockchain; - // we don't want to disconnect because they may be able to provide - // us with blocks on their chain - } - - bool disconnect_from_inhibited_peer = false; - // if our client doesn't have any items after the item the peer requested, it will send back - // a list containing the last item the peer requested - wdump((reply_message)(fetch_blockchain_item_ids_message_received.blockchain_synopsis)); - if( reply_message.item_hashes_available.empty() ) - originating_peer->peer_needs_sync_items_from_us = false; /* I have no items in my blockchain */ - else if( !fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty() && - reply_message.item_hashes_available.size() == 1 && - std::find(fetch_blockchain_item_ids_message_received.blockchain_synopsis.begin(), - fetch_blockchain_item_ids_message_received.blockchain_synopsis.end(), - reply_message.item_hashes_available.back() ) != fetch_blockchain_item_ids_message_received.blockchain_synopsis.end() ) - { - /* the last item in the peer's list matches the last item in our list */ - originating_peer->peer_needs_sync_items_from_us = false; - if (originating_peer->inhibit_fetching_sync_blocks) - disconnect_from_inhibited_peer = true; // delay disconnecting until after we send our reply to this fetch_blockchain_item_ids_message - } - else - originating_peer->peer_needs_sync_items_from_us = true; - - if (!originating_peer->peer_needs_sync_items_from_us) - { - dlog("sync: peer is already in sync with us ${p}", ("p", originating_peer->we_need_sync_items_from_peer)); - // if we thought we had all the items this peer had, but now it turns out that we don't - // have the last item it requested to send from, - // we need to kick off another round of synchronization - if (!originating_peer->we_need_sync_items_from_peer && - !fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty() && - !_delegate->has_item(peers_last_item_seen)) - { - dlog("sync: restarting sync with peer ${peer}", ("peer", originating_peer->get_remote_endpoint())); - start_synchronizing_with_peer(originating_peer->shared_from_this()); - } - } - else - { - dlog("sync: peer is out of sync, sending peer ${count} items ids: first: ${first_item_id}, last: ${last_item_id}", - ("count", reply_message.item_hashes_available.size()) - ("first_item_id", reply_message.item_hashes_available.front()) - ("last_item_id", reply_message.item_hashes_available.back())); - if (!originating_peer->we_need_sync_items_from_peer && - !fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty() && - !_delegate->has_item(peers_last_item_seen)) - { - dlog("sync: restarting sync with peer ${peer}", ("peer", originating_peer->get_remote_endpoint())); - start_synchronizing_with_peer(originating_peer->shared_from_this()); - } - } - originating_peer->send_message(reply_message); - - if (disconnect_from_inhibited_peer) - { - // the peer has all of our blocks, and we don't want any of theirs, so disconnect them - disconnect_from_peer(originating_peer, "you are on a fork that I'm unable to switch to"); - return; - } - - if (originating_peer->direction == peer_connection_direction::inbound && - _handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) - { - // handshaking is done, move the connection to fully active status and start synchronizing - dlog("peer ${endpoint} which was handshaking with us has started synchronizing with us, start syncing with it", - ("endpoint", originating_peer->get_remote_endpoint())); - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - // mark the connection as successful in the database - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(*inbound_endpoint); - updated_peer_record.last_connection_disposition = last_connection_succeeded; - _potential_peer_db.update_entry(updated_peer_record); - } - - // transition it to our active list - move_peer_to_active_list(originating_peer->shared_from_this()); - new_peer_just_added(originating_peer->shared_from_this()); - } - } - - uint32_t node_impl::calculate_unsynced_block_count_from_all_peers() - { - VERIFY_CORRECT_THREAD(); - uint32_t max_number_of_unfetched_items = 0; - for( const peer_connection_ptr& peer : _active_connections ) - { - uint32_t this_peer_number_of_unfetched_items = (uint32_t)peer->ids_of_items_to_get.size() + peer->number_of_unfetched_item_ids; - max_number_of_unfetched_items = std::max(max_number_of_unfetched_items, - this_peer_number_of_unfetched_items); - } - return max_number_of_unfetched_items; - } - - // get a blockchain synopsis that makes sense to send to the given peer. - // If the peer isn't yet syncing with us, this is just a synopsis of our active blockchain - // If the peer is syncing with us, it is a synopsis of our active blockchain plus the - // blocks the peer has already told us it has - std::vector node_impl::create_blockchain_synopsis_for_peer( const peer_connection* peer ) - { - VERIFY_CORRECT_THREAD(); - item_hash_t reference_point = peer->last_block_delegate_has_seen; - uint32_t reference_point_block_num = _delegate->get_block_number(peer->last_block_delegate_has_seen); - - // when we call _delegate->get_blockchain_synopsis(), we may yield and there's a - // chance this peer's state will change before we get control back. Save off - // the stuff necessary for generating the synopsis. - // This is pretty expensive, we should find a better way to do this - std::vector original_ids_of_items_to_get(peer->ids_of_items_to_get.begin(), peer->ids_of_items_to_get.end()); - uint32_t number_of_blocks_after_reference_point = original_ids_of_items_to_get.size(); - - std::vector synopsis = _delegate->get_blockchain_synopsis(reference_point, number_of_blocks_after_reference_point); - -#if 0 - // just for debugging, enable this and set a breakpoint to step through - if (synopsis.empty()) - synopsis = _delegate->get_blockchain_synopsis(reference_point, number_of_blocks_after_reference_point); - - // TODO: it's possible that the returned synopsis is empty if the blockchain is empty (that's fine) - // or if the reference point is now past our undo history (that's not). - // in the second case, we should mark this peer as one we're unable to sync with and - // disconnect them. - if (reference_point != item_hash_t() && synopsis.empty()) - FC_THROW_EXCEPTION(block_older_than_undo_history, "You are on a fork I'm unable to switch to"); -#endif - - if( number_of_blocks_after_reference_point ) - { - // then the synopsis is incomplete, add the missing elements from ids_of_items_to_get - uint32_t first_block_num_in_ids_to_get = _delegate->get_block_number(original_ids_of_items_to_get.front()); - uint32_t true_high_block_num = first_block_num_in_ids_to_get + original_ids_of_items_to_get.size() - 1; - - // in order to generate a seamless synopsis, we need to be using the same low_block_num as the - // backend code; the first block in the synopsis will be the low block number it used - uint32_t low_block_num = synopsis.empty() ? 1 : _delegate->get_block_number(synopsis.front()); - - do - { - if( low_block_num >= first_block_num_in_ids_to_get ) - synopsis.push_back(original_ids_of_items_to_get[low_block_num - first_block_num_in_ids_to_get]); - low_block_num += (true_high_block_num - low_block_num + 2 ) / 2; - } - while ( low_block_num <= true_high_block_num ); - assert(synopsis.back() == original_ids_of_items_to_get.back()); - } - return synopsis; - } - - void node_impl::fetch_next_batch_of_item_ids_from_peer( peer_connection* peer, bool reset_fork_tracking_data_for_peer /* = false */ ) - { - VERIFY_CORRECT_THREAD(); - if( reset_fork_tracking_data_for_peer ) - { - peer->last_block_delegate_has_seen = item_hash_t(); - peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hash_t()); - } - - fc::oexception synopsis_exception; - try - { - std::vector blockchain_synopsis = create_blockchain_synopsis_for_peer( peer ); - - item_hash_t last_item_seen = blockchain_synopsis.empty() ? item_hash_t() : blockchain_synopsis.back(); - dlog( "sync: sending a request for the next items after ${last_item_seen} to peer ${peer}, (full request is ${blockchain_synopsis})", - ( "last_item_seen", last_item_seen ) - ( "peer", peer->get_remote_endpoint() ) - ( "blockchain_synopsis", blockchain_synopsis ) ); - peer->item_ids_requested_from_peer = boost::make_tuple( blockchain_synopsis, fc::time_point::now() ); - peer->send_message( fetch_blockchain_item_ids_message(_sync_item_type, blockchain_synopsis ) ); - } - catch (const block_older_than_undo_history& e) - { - synopsis_exception = e; - } - if (synopsis_exception) - disconnect_from_peer(peer, "You are on a fork I'm unable to switch to"); - } - - void node_impl::on_blockchain_item_ids_inventory_message(peer_connection* originating_peer, - const blockchain_item_ids_inventory_message& blockchain_item_ids_inventory_message_received ) - { - VERIFY_CORRECT_THREAD(); - // ignore unless we asked for the data - if( originating_peer->item_ids_requested_from_peer ) - { - // verify that the peer's the block ids the peer sent is a valid response to our request; - // It should either be an empty list of blocks, or a list of blocks that builds off of one of - // the blocks in the synopsis we sent - if (!blockchain_item_ids_inventory_message_received.item_hashes_available.empty()) - { - // what's more, it should be a sequential list of blocks, verify that first - uint32_t first_block_number_in_reponse = _delegate->get_block_number(blockchain_item_ids_inventory_message_received.item_hashes_available.front()); - for (unsigned i = 1; i < blockchain_item_ids_inventory_message_received.item_hashes_available.size(); ++i) - { - uint32_t actual_num = _delegate->get_block_number(blockchain_item_ids_inventory_message_received.item_hashes_available[i]); - uint32_t expected_num = first_block_number_in_reponse + i; - if (actual_num != expected_num) - { - wlog("Invalid response from peer ${peer_endpoint}. The list of blocks they provided is not sequential, " - "the ${position}th block in their reply was block number ${actual_num}, " - "but it should have been number ${expected_num}", - ("peer_endpoint", originating_peer->get_remote_endpoint()) - ("position", i) - ("actual_num", actual_num) - ("expected_num", expected_num)); - fc::exception error_for_peer(FC_LOG_MESSAGE(error, - "You gave an invalid response to my request for sync blocks. The list of blocks you provided is not sequential, " - "the ${position}th block in their reply was block number ${actual_num}, " - "but it should have been number ${expected_num}", - ("position", i) - ("actual_num", actual_num) - ("expected_num", expected_num))); - disconnect_from_peer(originating_peer, - "You gave an invalid response to my request for sync blocks", - true, error_for_peer); - return; - } - } - - const std::vector& synopsis_sent_in_request = originating_peer->item_ids_requested_from_peer->get<0>(); - const item_hash_t& first_item_hash = blockchain_item_ids_inventory_message_received.item_hashes_available.front(); - - if (synopsis_sent_in_request.empty()) - { - // if we sent an empty synopsis, we were asking for all blocks, so the first block should be block 1 - if (_delegate->get_block_number(first_item_hash) != 1) - { - wlog("Invalid response from peer ${peer_endpoint}. We requested a list of sync blocks starting from the beginning of the chain, " - "but they provided a list of blocks starting with ${first_block}", - ("peer_endpoint", originating_peer->get_remote_endpoint()) - ("first_block", first_item_hash)); - fc::exception error_for_peer(FC_LOG_MESSAGE(error, "You gave an invalid response for my request for sync blocks. I asked for blocks starting from the beginning of the chain, " - "but you returned a list of blocks starting with ${first_block}", - ("first_block", first_item_hash))); - disconnect_from_peer(originating_peer, - "You gave an invalid response to my request for sync blocks", - true, error_for_peer); - return; - } - } - else // synopsis was not empty, we expect a response building off one of the blocks we sent - { - if (boost::range::find(synopsis_sent_in_request, first_item_hash) == synopsis_sent_in_request.end()) - { - wlog("Invalid response from peer ${peer_endpoint}. We requested a list of sync blocks based on the synopsis ${synopsis}, but they " - "provided a list of blocks starting with ${first_block}", - ("peer_endpoint", originating_peer->get_remote_endpoint()) - ("synopsis", synopsis_sent_in_request) - ("first_block", first_item_hash)); - fc::exception error_for_peer(FC_LOG_MESSAGE(error, "You gave an invalid response for my request for sync blocks. I asked for blocks following something in " - "${synopsis}, but you returned a list of blocks starting with ${first_block} which wasn't one of your choices", - ("synopsis", synopsis_sent_in_request) - ("first_block", first_item_hash))); - disconnect_from_peer(originating_peer, - "You gave an invalid response to my request for sync blocks", - true, error_for_peer); - return; - } - } - } - originating_peer->item_ids_requested_from_peer.reset(); - - dlog( "sync: received a list of ${count} available items from ${peer_endpoint}", - ( "count", blockchain_item_ids_inventory_message_received.item_hashes_available.size() ) - ( "peer_endpoint", originating_peer->get_remote_endpoint() ) ); - //for( const item_hash_t& item_hash : blockchain_item_ids_inventory_message_received.item_hashes_available ) - //{ - // dlog( "sync: ${hash}", ("hash", item_hash ) ); - //} - - // if the peer doesn't have any items after the one we asked for - if( blockchain_item_ids_inventory_message_received.total_remaining_item_count == 0 && - ( blockchain_item_ids_inventory_message_received.item_hashes_available.empty() || // there are no items in the peer's blockchain. this should only happen if our blockchain was empty when we requested, might want to verify that. - ( blockchain_item_ids_inventory_message_received.item_hashes_available.size() == 1 && - _delegate->has_item( item_id(blockchain_item_ids_inventory_message_received.item_type, - blockchain_item_ids_inventory_message_received.item_hashes_available.front() ) ) ) ) && // we've already seen the last item in the peer's blockchain - originating_peer->ids_of_items_to_get.empty() && - originating_peer->number_of_unfetched_item_ids == 0 ) // <-- is the last check necessary? - { - dlog( "sync: peer said we're up-to-date, entering normal operation with this peer" ); - originating_peer->we_need_sync_items_from_peer = false; - - uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); - _total_number_of_unfetched_items = new_number_of_unfetched_items; - if( new_number_of_unfetched_items == 0 ) - _delegate->sync_status( blockchain_item_ids_inventory_message_received.item_type, 0 ); - - return; - } - - std::deque item_hashes_received( blockchain_item_ids_inventory_message_received.item_hashes_available.begin(), - blockchain_item_ids_inventory_message_received.item_hashes_available.end() ); - originating_peer->number_of_unfetched_item_ids = blockchain_item_ids_inventory_message_received.total_remaining_item_count; - // flush any items this peer sent us that we've already received and processed from another peer - if (!item_hashes_received.empty() && - originating_peer->ids_of_items_to_get.empty()) - { - bool is_first_item_for_other_peer = false; - for (const peer_connection_ptr& peer : _active_connections) - if (peer != originating_peer->shared_from_this() && - !peer->ids_of_items_to_get.empty() && - peer->ids_of_items_to_get.front() == blockchain_item_ids_inventory_message_received.item_hashes_available.front()) - { - dlog("The item ${newitem} is the first item for peer ${peer}", - ("newitem", blockchain_item_ids_inventory_message_received.item_hashes_available.front()) - ("peer", peer->get_remote_endpoint())); - is_first_item_for_other_peer = true; - break; - } - dlog("is_first_item_for_other_peer: ${is_first}. item_hashes_received.size() = ${size}", - ("is_first", is_first_item_for_other_peer)("size", item_hashes_received.size())); - if (!is_first_item_for_other_peer) - { - while (!item_hashes_received.empty() && - _delegate->has_item(item_id(blockchain_item_ids_inventory_message_received.item_type, - item_hashes_received.front()))) - { - assert(item_hashes_received.front() != item_hash_t()); - originating_peer->last_block_delegate_has_seen = item_hashes_received.front(); - originating_peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hashes_received.front()); - dlog("popping item because delegate has already seen it. peer ${peer}'s last block the delegate has seen is now ${block_id} (actual block #${actual_block_num})", - ("peer", originating_peer->get_remote_endpoint()) - ("block_id", originating_peer->last_block_delegate_has_seen) - ("actual_block_num", _delegate->get_block_number(item_hashes_received.front()))); - - item_hashes_received.pop_front(); - } - dlog("after removing all items we have already seen, item_hashes_received.size() = ${size}", ("size", item_hashes_received.size())); - } - } - else if (!item_hashes_received.empty()) - { - // we received a list of items and we already have a list of items to fetch from this peer. - // In the normal case, this list will immediately follow the existing list, meaning the - // last hash of our existing list will match the first hash of the new list. - - // In the much less likely case, we've received a partial list of items from the peer, then - // the peer switched forks before sending us the remaining list. In this case, the first - // hash in the new list may not be the last hash in the existing list (it may be earlier, or - // it may not exist at all. - - // In either case, pop items off the back of our existing list until we find our first - // item, then append our list. - while (!originating_peer->ids_of_items_to_get.empty()) - { - if (item_hashes_received.front() != originating_peer->ids_of_items_to_get.back()) - originating_peer->ids_of_items_to_get.pop_back(); - else - break; - } - if (originating_peer->ids_of_items_to_get.empty()) - { - // this happens when the peer has switched forks between the last inventory message and - // this one, and there weren't any unfetched items in common - // We don't know where in the blockchain the new front() actually falls, all we can - // expect is that it is a block that we knew about because it should be one of the - // blocks we sent in the initial synopsis. - assert(_delegate->has_item(item_id(_sync_item_type, item_hashes_received.front()))); - originating_peer->last_block_delegate_has_seen = item_hashes_received.front(); - originating_peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hashes_received.front()); - item_hashes_received.pop_front(); - } - else - { - // the common simple case: the new list extends the old. pop off the duplicate element - originating_peer->ids_of_items_to_get.pop_back(); - } - } - - if (!item_hashes_received.empty() && !originating_peer->ids_of_items_to_get.empty()) - assert(item_hashes_received.front() != originating_peer->ids_of_items_to_get.back()); - - // append the remaining items to the peer's list - boost::push_back(originating_peer->ids_of_items_to_get, item_hashes_received); - - originating_peer->number_of_unfetched_item_ids = blockchain_item_ids_inventory_message_received.total_remaining_item_count; - - // at any given time, there's a maximum number of blocks that can possibly be out there - // [(now - genesis time) / block interval]. If they offer us more blocks than that, - // they must be an attacker or have a buggy client. - fc::time_point_sec minimum_time_of_last_offered_block = - originating_peer->last_block_time_delegate_has_seen + // timestamp of the block immediately before the first unfetched block - originating_peer->number_of_unfetched_item_ids * config::BlockIntervalSeconds; - fc::time_point_sec now = fc::time_point::now(); - if (minimum_time_of_last_offered_block > now + EOS_NET_FUTURE_SYNC_BLOCKS_GRACE_PERIOD_SEC) - { - wlog("Disconnecting from peer ${peer} who offered us an implausible number of blocks, their last block would be in the future (${timestamp})", - ("peer", originating_peer->get_remote_endpoint()) - ("timestamp", minimum_time_of_last_offered_block)); - fc::exception error_for_peer(FC_LOG_MESSAGE(error, "You offered me a list of more sync blocks than could possibly exist. Total blocks offered: ${blocks}, Minimum time of the last block you offered: ${minimum_time_of_last_offered_block}, Now: ${now}", - ("blocks", originating_peer->number_of_unfetched_item_ids) - ("minimum_time_of_last_offered_block", minimum_time_of_last_offered_block) - ("now", now))); - disconnect_from_peer(originating_peer, - "You offered me a list of more sync blocks than could possibly exist", - true, error_for_peer); - return; - } - - uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); - if (new_number_of_unfetched_items != _total_number_of_unfetched_items) - _delegate->sync_status(blockchain_item_ids_inventory_message_received.item_type, - new_number_of_unfetched_items); - _total_number_of_unfetched_items = new_number_of_unfetched_items; - - if (blockchain_item_ids_inventory_message_received.total_remaining_item_count != 0) - { - // the peer hasn't sent us all the items it knows about. - if (originating_peer->ids_of_items_to_get.size() > EOS_NET_MIN_BLOCK_IDS_TO_PREFETCH) - { - // we have a good number of item ids from this peer, start fetching blocks from it; - // we'll switch back later to finish the job. - trigger_fetch_sync_items_loop(); - } - else - { - // keep fetching the peer's list of sync items until we get enough to switch into block- - // fetchimg mode - fetch_next_batch_of_item_ids_from_peer(originating_peer); - } - } - else - { - // the peer has told us about all of the items it knows - if (!originating_peer->ids_of_items_to_get.empty()) - { - // we now know about all of the items the peer knows about, and there are some items on the list - // that we should try to fetch. Kick off the fetch loop. - trigger_fetch_sync_items_loop(); - } - else - { - // If we get here, the peer has sent us a non-empty list of items, but we have already - // received all of the items from other peers. Send a new request to the peer to - // see if we're really in sync - fetch_next_batch_of_item_ids_from_peer(originating_peer); - } - } - } - else - { - wlog("sync: received a list of sync items available, but I didn't ask for any!"); - } - } - - message node_impl::get_message_for_item(const item_id& item) - { - try - { - return _message_cache.get_message(item.item_hash); - } - catch (fc::key_not_found_exception&) - {} - try - { - return _delegate->get_item(item); - } - catch (fc::key_not_found_exception&) - {} - return item_not_available_message(item); - } - - void node_impl::on_fetch_items_message(peer_connection* originating_peer, const fetch_items_message& fetch_items_message_received) - { - VERIFY_CORRECT_THREAD(); - dlog("received items request for ids ${ids} of type ${type} from peer ${endpoint}", - ("ids", fetch_items_message_received.items_to_fetch) - ("type", fetch_items_message_received.item_type) - ("endpoint", originating_peer->get_remote_endpoint())); - - fc::optional last_block_message_sent; - - std::list reply_messages; - for (const item_hash_t& item_hash : fetch_items_message_received.items_to_fetch) - { - try - { - message requested_message = _message_cache.get_message(item_hash); - dlog("received item request for item ${id} from peer ${endpoint}, returning the item from my message cache", - ("endpoint", originating_peer->get_remote_endpoint()) - ("id", requested_message.id())); - reply_messages.push_back(requested_message); - if (fetch_items_message_received.item_type == block_message_type) - last_block_message_sent = requested_message; - continue; - } - catch (fc::key_not_found_exception&) - { - // it wasn't in our local cache, that's ok ask the client - } - - item_id item_to_fetch(fetch_items_message_received.item_type, item_hash); - try - { - message requested_message = _delegate->get_item(item_to_fetch); - dlog("received item request from peer ${endpoint}, returning the item from delegate with id ${id} size ${size}", - ("id", requested_message.id()) - ("size", requested_message.size) - ("endpoint", originating_peer->get_remote_endpoint())); - reply_messages.push_back(requested_message); - if (fetch_items_message_received.item_type == block_message_type) - last_block_message_sent = requested_message; - continue; - } - catch (fc::key_not_found_exception&) - { - reply_messages.push_back(item_not_available_message(item_to_fetch)); - dlog("received item request from peer ${endpoint} but we don't have it", - ("endpoint", originating_peer->get_remote_endpoint())); - } - } - - // if we sent them a block, update our record of the last block they've seen accordingly - if (last_block_message_sent) - { - eos::net::block_message block = last_block_message_sent->as(); - originating_peer->last_block_delegate_has_seen = block.block_id; - originating_peer->last_block_time_delegate_has_seen = _delegate->get_block_time(block.block_id); - } - - for (const message& reply : reply_messages) - { - if (reply.msg_type == block_message_type) - originating_peer->send_item(item_id(block_message_type, reply.as().block_id)); - else - originating_peer->send_message(reply); - } - } - - void node_impl::on_item_not_available_message( peer_connection* originating_peer, const item_not_available_message& item_not_available_message_received ) - { - VERIFY_CORRECT_THREAD(); - const item_id& requested_item = item_not_available_message_received.requested_item; - auto regular_item_iter = originating_peer->items_requested_from_peer.find(requested_item); - if (regular_item_iter != originating_peer->items_requested_from_peer.end()) - { - originating_peer->items_requested_from_peer.erase( regular_item_iter ); - originating_peer->inventory_peer_advertised_to_us.erase( requested_item ); - if (is_item_in_any_peers_inventory(requested_item)) - _items_to_fetch.insert(prioritized_item_id(requested_item, _items_to_fetch_sequence_counter++)); - wlog("Peer doesn't have the requested item."); - trigger_fetch_items_loop(); - return; - } - - auto sync_item_iter = originating_peer->sync_items_requested_from_peer.find(requested_item); - if (sync_item_iter != originating_peer->sync_items_requested_from_peer.end()) - { - originating_peer->sync_items_requested_from_peer.erase(sync_item_iter); - - if (originating_peer->peer_needs_sync_items_from_us) - originating_peer->inhibit_fetching_sync_blocks = true; - else - disconnect_from_peer(originating_peer, "You are missing a sync item you claim to have, your database is probably corrupted. Try --rebuild-index.",true, - fc::exception(FC_LOG_MESSAGE(error,"You are missing a sync item you claim to have, your database is probably corrupted. Try --rebuild-index.", - ("item_id",requested_item)))); - wlog("Peer doesn't have the requested sync item. This really shouldn't happen"); - trigger_fetch_sync_items_loop(); - return; - } - - dlog("Peer doesn't have an item we're looking for, which is fine because we weren't looking for it"); - } - - void node_impl::on_item_ids_inventory_message(peer_connection* originating_peer, const item_ids_inventory_message& item_ids_inventory_message_received) - { - VERIFY_CORRECT_THREAD(); - - // expire old inventory so we'll be making decisions our about whether to fetch blocks below based only on recent inventory - originating_peer->clear_old_inventory(); - - dlog( "received inventory of ${count} items from peer ${endpoint}", - ( "count", item_ids_inventory_message_received.item_hashes_available.size() )("endpoint", originating_peer->get_remote_endpoint() ) ); - for( const item_hash_t& item_hash : item_ids_inventory_message_received.item_hashes_available ) - { - item_id advertised_item_id(item_ids_inventory_message_received.item_type, item_hash); - bool we_advertised_this_item_to_a_peer = false; - bool we_requested_this_item_from_a_peer = false; - for (const peer_connection_ptr peer : _active_connections) - { - if (peer->inventory_advertised_to_peer.find(advertised_item_id) != peer->inventory_advertised_to_peer.end()) - { - we_advertised_this_item_to_a_peer = true; - break; - } - if (peer->items_requested_from_peer.find(advertised_item_id) != peer->items_requested_from_peer.end()) - we_requested_this_item_from_a_peer = true; - } - - // if we have already advertised it to a peer, we must have it, no need to do anything else - if (!we_advertised_this_item_to_a_peer) - { - // if the peer has flooded us with transactions, don't add these to the inventory to prevent our - // inventory list from growing without bound. We try to allow fetching blocks even when - // we've stopped fetching transactions. - if ((item_ids_inventory_message_received.item_type == eos::net::trx_message_type && - originating_peer->is_inventory_advertised_to_us_list_full_for_transactions()) || - originating_peer->is_inventory_advertised_to_us_list_full()) - break; - originating_peer->inventory_peer_advertised_to_us.insert(peer_connection::timestamped_item_id(advertised_item_id, fc::time_point::now())); - if (!we_requested_this_item_from_a_peer) - { - if (_recently_failed_items.find(item_id(item_ids_inventory_message_received.item_type, item_hash)) != _recently_failed_items.end()) - { - dlog("not adding ${item_hash} to our list of items to fetch because we've recently fetched a copy and it failed to push", - ("item_hash", item_hash)); - } - else - { - auto items_to_fetch_iter = _items_to_fetch.get().find(advertised_item_id); - if (items_to_fetch_iter == _items_to_fetch.get().end()) - { - // it's new to us - _items_to_fetch.insert(prioritized_item_id(advertised_item_id, _items_to_fetch_sequence_counter++)); - dlog("adding item ${item_hash} from inventory message to our list of items to fetch", - ("item_hash", item_hash)); - trigger_fetch_items_loop(); - } - else - { - // another peer has told us about this item already, but this peer just told us it has the item - // too, we can expect it to be around in this peer's cache for longer, so update its timestamp - _items_to_fetch.get().modify(items_to_fetch_iter, - [](prioritized_item_id& item) { item.timestamp = fc::time_point::now(); }); - } - } - } - } - } - - } - - void node_impl::on_closing_connection_message( peer_connection* originating_peer, const closing_connection_message& closing_connection_message_received ) - { - VERIFY_CORRECT_THREAD(); - originating_peer->they_have_requested_close = true; - - if( closing_connection_message_received.closing_due_to_error ) - { - elog( "Peer ${peer} is disconnecting us because of an error: ${msg}, exception: ${error}", - ( "peer", originating_peer->get_remote_endpoint() ) - ( "msg", closing_connection_message_received.reason_for_closing ) - ( "error", closing_connection_message_received.error ) ); - std::ostringstream message; - message << "Peer " << fc::variant( originating_peer->get_remote_endpoint() ).as_string() << - " disconnected us: " << closing_connection_message_received.reason_for_closing; - fc::exception detailed_error(FC_LOG_MESSAGE(warn, "Peer ${peer} is disconnecting us because of an error: ${msg}, exception: ${error}", - ( "peer", originating_peer->get_remote_endpoint() ) - ( "msg", closing_connection_message_received.reason_for_closing ) - ( "error", closing_connection_message_received.error ) )); - _delegate->error_encountered( message.str(), - detailed_error ); - } - else - { - wlog( "Peer ${peer} is disconnecting us because: ${msg}", - ( "peer", originating_peer->get_remote_endpoint() ) - ( "msg", closing_connection_message_received.reason_for_closing ) ); - } - if( originating_peer->we_have_requested_close ) - originating_peer->close_connection(); - } - - void node_impl::on_connection_closed(peer_connection* originating_peer) - { - VERIFY_CORRECT_THREAD(); - peer_connection_ptr originating_peer_ptr = originating_peer->shared_from_this(); - _rate_limiter.remove_tcp_socket( &originating_peer->get_socket() ); - - // if we closed the connection (due to timeout or handshake failure), we should have recorded an - // error message to store in the peer database when we closed the connection - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (originating_peer->connection_closed_error && inbound_endpoint) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_error = *originating_peer->connection_closed_error; - _potential_peer_db.update_entry(*updated_peer_record); - } - } - - _closing_connections.erase(originating_peer_ptr); - _handshaking_connections.erase(originating_peer_ptr); - _terminating_connections.erase(originating_peer_ptr); - if (_active_connections.find(originating_peer_ptr) != _active_connections.end()) - { - _active_connections.erase(originating_peer_ptr); - - if (inbound_endpoint && originating_peer_ptr->get_remote_endpoint()) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - } - } - - ilog("Remote peer ${endpoint} closed their connection to us", ("endpoint", originating_peer->get_remote_endpoint())); - display_current_connections(); - trigger_p2p_network_connect_loop(); - - // notify the node delegate so it can update the display - if( _active_connections.size() != _last_reported_number_of_connections ) - { - _last_reported_number_of_connections = (uint32_t)_active_connections.size(); - _delegate->connection_count_changed( _last_reported_number_of_connections ); - } - - // if we had delegated a firewall check to this peer, send it to another peer - if (originating_peer->firewall_check_state) - { - if (originating_peer->firewall_check_state->requesting_peer != node_id_t()) - { - // it's a check we're doing for another node - firewall_check_state_data* firewall_check_state = originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - forward_firewall_check_to_next_available_peer(firewall_check_state); - } - else - { - // we were asking them to check whether we're firewalled. we'll just let it - // go for now - delete originating_peer->firewall_check_state; - } - } - - // if we had requested any sync or regular items from this peer that we haven't - // received yet, reschedule them to be fetched from another peer - if (!originating_peer->sync_items_requested_from_peer.empty()) - { - for (auto sync_item_and_time : originating_peer->sync_items_requested_from_peer) - _active_sync_requests.erase(sync_item_and_time.first.item_hash); - trigger_fetch_sync_items_loop(); - } - - if (!originating_peer->items_requested_from_peer.empty()) - { - for (auto item_and_time : originating_peer->items_requested_from_peer) - { - if (is_item_in_any_peers_inventory(item_and_time.first)) - _items_to_fetch.insert(prioritized_item_id(item_and_time.first, _items_to_fetch_sequence_counter++)); - } - trigger_fetch_items_loop(); - } - - schedule_peer_for_deletion(originating_peer_ptr); - } - - void node_impl::send_sync_block_to_node_delegate(const eos::net::block_message& block_message_to_send) - { - dlog("in send_sync_block_to_node_delegate()"); - bool client_accepted_block = false; - bool discontinue_fetching_blocks_from_peer = false; - - fc::oexception handle_message_exception; - - try - { - std::vector contained_transaction_message_ids; - _delegate->handle_block(block_message_to_send, true, contained_transaction_message_ids); - ilog("Successfully pushed sync block ${num} (id:${id})", - ("num", block_message_to_send.block.block_num()) - ("id", block_message_to_send.block_id)); - _most_recent_blocks_accepted.push_back(block_message_to_send.block_id); - - client_accepted_block = true; - } - catch (const block_older_than_undo_history& e) - { - wlog("Failed to push sync block ${num} (id:${id}): block is on a fork older than our undo history would " - "allow us to switch to: ${e}", - ("num", block_message_to_send.block.block_num()) - ("id", block_message_to_send.block_id) - ("e", (fc::exception)e)); - handle_message_exception = e; - discontinue_fetching_blocks_from_peer = true; - } - catch (const fc::canceled_exception&) - { - throw; - } - catch (const fc::exception& e) - { - wlog("Failed to push sync block ${num} (id:${id}): client rejected sync block sent by peer: ${e}", - ("num", block_message_to_send.block.block_num()) - ("id", block_message_to_send.block_id) - ("e", e)); - handle_message_exception = e; - } - - // build up lists for any potentially-blocking operations we need to do, then do them - // at the end of this function - std::set peers_with_newly_empty_item_lists; - std::set peers_we_need_to_sync_to; - std::map > peers_to_disconnect; // map peer -> pair - - if( client_accepted_block ) - { - --_total_number_of_unfetched_items; - dlog("sync: client accpted the block, we now have only ${count} items left to fetch before we're in sync", - ("count", _total_number_of_unfetched_items)); - bool is_fork_block = is_hard_fork_block(block_message_to_send.block.block_num()); - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - bool disconnecting_this_peer = false; - if (is_fork_block) - { - // we just pushed a hard fork block. Find out if this peer is running a client - // that will be unable to process future blocks - if (peer->last_known_fork_block_number != 0) - { - uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(peer->last_known_fork_block_number); - if (next_fork_block_number != 0 && - next_fork_block_number <= block_message_to_send.block.block_num()) - { - std::ostringstream disconnect_reason_stream; - disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.block_num(); - peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(), - fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}", - ("block_number", block_message_to_send.block.block_num()))))); -#ifdef ENABLE_DEBUG_ULOGS - ulog("Disconnecting from peer during sync because their version is too old. Their version date: ${date}", ("date", peer->eos_git_revision_unix_timestamp)); -#endif - disconnecting_this_peer = true; - } - } - } - if (!disconnecting_this_peer && - peer->ids_of_items_to_get.empty() && peer->ids_of_items_being_processed.empty()) - { - dlog( "Cannot pop first element off peer ${peer}'s list, its list is empty", ("peer", peer->get_remote_endpoint() ) ); - // we don't know for sure that this peer has the item we just received. - // If peer is still syncing to us, we know they will ask us for - // sync item ids at least one more time and we'll notify them about - // the item then, so there's no need to do anything. If we still need items - // from them, we'll be asking them for more items at some point, and - // that will clue them in that they are out of sync. If we're fully in sync - // we need to kick off another round of synchronization with them so they can - // find out about the new item. - if (!peer->peer_needs_sync_items_from_us && !peer->we_need_sync_items_from_peer) - { - dlog("We will be restarting synchronization with peer ${peer}", ("peer", peer->get_remote_endpoint())); - peers_we_need_to_sync_to.insert(peer); - } - } - else if (!disconnecting_this_peer) - { - auto items_being_processed_iter = peer->ids_of_items_being_processed.find(block_message_to_send.block_id); - if (items_being_processed_iter != peer->ids_of_items_being_processed.end()) - { - peer->last_block_delegate_has_seen = block_message_to_send.block_id; - peer->last_block_time_delegate_has_seen = block_message_to_send.block.timestamp; - - peer->ids_of_items_being_processed.erase(items_being_processed_iter); - dlog("Removed item from ${endpoint}'s list of items being processed, still processing ${len} blocks", - ("endpoint", peer->get_remote_endpoint())("len", peer->ids_of_items_being_processed.size())); - - // if we just received the last item in our list from this peer, we will want to - // send another request to find out if we are in sync, but we can't do this yet - // (we don't want to allow a fiber swap in the middle of popping items off the list) - if (peer->ids_of_items_to_get.empty() && - peer->number_of_unfetched_item_ids == 0 && - peer->ids_of_items_being_processed.empty()) - peers_with_newly_empty_item_lists.insert(peer); - - // in this case, we know the peer was offering us this exact item, no need to - // try to inform them of its existence - } - } - } - } - else - { - // invalid message received - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - if (peer->ids_of_items_being_processed.find(block_message_to_send.block_id) != peer->ids_of_items_being_processed.end()) - { - if (discontinue_fetching_blocks_from_peer) - { - wlog("inhibiting fetching sync blocks from peer ${endpoint} because it is on a fork that's too old", - ("endpoint", peer->get_remote_endpoint())); - peer->inhibit_fetching_sync_blocks = true; - } - else - peers_to_disconnect[peer] = std::make_pair(std::string("You offered us a block that we reject as invalid"), fc::oexception(handle_message_exception)); - } - } - } - - for (auto& peer_to_disconnect : peers_to_disconnect) - { - const peer_connection_ptr& peer = peer_to_disconnect.first; - std::string reason_string; - fc::oexception reason_exception; - std::tie(reason_string, reason_exception) = peer_to_disconnect.second; - wlog("disconnecting client ${endpoint} because it offered us the rejected block", - ("endpoint", peer->get_remote_endpoint())); - disconnect_from_peer(peer.get(), reason_string, true, reason_exception); - } - for (const peer_connection_ptr& peer : peers_with_newly_empty_item_lists) - fetch_next_batch_of_item_ids_from_peer(peer.get()); - - for (const peer_connection_ptr& peer : peers_we_need_to_sync_to) - start_synchronizing_with_peer(peer); - - dlog("Leaving send_sync_block_to_node_delegate"); - - if (// _suspend_fetching_sync_blocks && <-- you can use this if "maximum_number_of_blocks_to_handle_at_one_time" == "maximum_number_of_sync_blocks_to_prefetch" - !_node_is_shutting_down && - (!_process_backlog_of_sync_blocks_done.valid() || _process_backlog_of_sync_blocks_done.ready())) - _process_backlog_of_sync_blocks_done = fc::async([=](){ process_backlog_of_sync_blocks(); }, - "process_backlog_of_sync_blocks"); - } - - void node_impl::process_backlog_of_sync_blocks() - { - VERIFY_CORRECT_THREAD(); - // garbage-collect the list of async tasks here for lack of a better place - for (auto calls_iter = _handle_message_calls_in_progress.begin(); - calls_iter != _handle_message_calls_in_progress.end();) - { - if (calls_iter->ready()) - calls_iter = _handle_message_calls_in_progress.erase(calls_iter); - else - ++calls_iter; - } - - dlog("in process_backlog_of_sync_blocks"); - if (_handle_message_calls_in_progress.size() >= _maximum_number_of_blocks_to_handle_at_one_time) - { - dlog("leaving process_backlog_of_sync_blocks because we're already processing too many blocks"); - return; // we will be rescheduled when the next block finishes its processing - } - dlog("currently ${count} blocks in the process of being handled", ("count", _handle_message_calls_in_progress.size())); - - - if (_suspend_fetching_sync_blocks) - { - dlog("resuming processing sync block backlog because we only ${count} blocks in progress", - ("count", _handle_message_calls_in_progress.size())); - _suspend_fetching_sync_blocks = false; - } - - - // when syncing with multiple peers, it's possible that we'll have hundreds of blocks ready to push - // to the client at once. This can be slow, and we need to limit the number we push at any given - // time to allow network traffic to continue so we don't end up disconnecting from peers - //fc::time_point start_time = fc::time_point::now(); - //fc::time_point when_we_should_yield = start_time + fc::seconds(1); - - bool block_processed_this_iteration; - unsigned blocks_processed = 0; - - std::set peers_with_newly_empty_item_lists; - std::set peers_we_need_to_sync_to; - std::map peers_with_rejected_block; - - do - { - std::copy(std::make_move_iterator(_new_received_sync_items.begin()), - std::make_move_iterator(_new_received_sync_items.end()), - std::front_inserter(_received_sync_items)); - _new_received_sync_items.clear(); - dlog("currently ${count} sync items to consider", ("count", _received_sync_items.size())); - - block_processed_this_iteration = false; - for (auto received_block_iter = _received_sync_items.begin(); - received_block_iter != _received_sync_items.end(); - ++received_block_iter) - { - - // find out if this block is the next block on the active chain or one of the forks - bool potential_first_block = false; - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - if (!peer->ids_of_items_to_get.empty() && - peer->ids_of_items_to_get.front() == received_block_iter->block_id) - { - potential_first_block = true; - peer->ids_of_items_to_get.pop_front(); - peer->ids_of_items_being_processed.insert(received_block_iter->block_id); - } - } - - // if it is, process it, remove it from all sync peers lists - if (potential_first_block) - { - // we can get into an interesting situation near the end of synchronization. We can be in - // sync with one peer who is sending us the last block on the chain via a regular inventory - // message, while at the same time still be synchronizing with a peer who is sending us the - // block through the sync mechanism. Further, we must request both blocks because - // we don't know they're the same (for the peer in normal operation, it has only told us the - // message id, for the peer in the sync case we only known the block_id). - if (std::find(_most_recent_blocks_accepted.begin(), _most_recent_blocks_accepted.end(), - received_block_iter->block_id) == _most_recent_blocks_accepted.end()) - { - eos::net::block_message block_message_to_process = *received_block_iter; - _received_sync_items.erase(received_block_iter); - _handle_message_calls_in_progress.emplace_back(fc::async([this, block_message_to_process](){ - send_sync_block_to_node_delegate(block_message_to_process); - }, "send_sync_block_to_node_delegate")); - ++blocks_processed; - block_processed_this_iteration = true; - } - else - dlog("Already received and accepted this block (presumably through normal inventory mechanism), treating it as accepted"); - - break; // start iterating _received_sync_items from the beginning - } // end if potential_first_block - } // end for each block in _received_sync_items - - if (_handle_message_calls_in_progress.size() >= _maximum_number_of_blocks_to_handle_at_one_time) - { - dlog("stopping processing sync block backlog because we have ${count} blocks in progress", - ("count", _handle_message_calls_in_progress.size())); - //ulog("stopping processing sync block backlog because we have ${count} blocks in progress, total on hand: ${received}", - // ("count", _handle_message_calls_in_progress.size())("received", _received_sync_items.size())); - if (_received_sync_items.size() >= _maximum_number_of_sync_blocks_to_prefetch) - _suspend_fetching_sync_blocks = true; - break; - } - } while (block_processed_this_iteration); - - dlog("leaving process_backlog_of_sync_blocks, ${count} processed", ("count", blocks_processed)); - - if (!_suspend_fetching_sync_blocks) - trigger_fetch_sync_items_loop(); - } - - void node_impl::trigger_process_backlog_of_sync_blocks() - { - if (!_node_is_shutting_down && - (!_process_backlog_of_sync_blocks_done.valid() || _process_backlog_of_sync_blocks_done.ready())) - _process_backlog_of_sync_blocks_done = fc::async([=](){ process_backlog_of_sync_blocks(); }, "process_backlog_of_sync_blocks"); - } - - void node_impl::process_block_during_sync( peer_connection* originating_peer, - const eos::net::block_message& block_message_to_process, const message_hash_type& message_hash ) - { - VERIFY_CORRECT_THREAD(); - dlog( "received a sync block from peer ${endpoint}", ("endpoint", originating_peer->get_remote_endpoint() ) ); - - // add it to the front of _received_sync_items, then process _received_sync_items to try to - // pass as many messages as possible to the client. - _new_received_sync_items.push_front( block_message_to_process ); - trigger_process_backlog_of_sync_blocks(); - } - - void node_impl::process_block_during_normal_operation( peer_connection* originating_peer, - const eos::net::block_message& block_message_to_process, - const message_hash_type& message_hash ) - { - fc::time_point message_receive_time = fc::time_point::now(); - - dlog( "received a block from peer ${endpoint}, passing it to client", ("endpoint", originating_peer->get_remote_endpoint() ) ); - std::set peers_to_disconnect; - std::string disconnect_reason; - fc::oexception disconnect_exception; - fc::oexception restart_sync_exception; - try - { - // we can get into an intersting situation near the end of synchronization. We can be in - // sync with one peer who is sending us the last block on the chain via a regular inventory - // message, while at the same time still be synchronizing with a peer who is sending us the - // block through the sync mechanism. Further, we must request both blocks because - // we don't know they're the same (for the peer in normal operation, it has only told us the - // message id, for the peer in the sync case we only known the block_id). - fc::time_point message_validated_time; - if (std::find(_most_recent_blocks_accepted.begin(), _most_recent_blocks_accepted.end(), - block_message_to_process.block_id) == _most_recent_blocks_accepted.end()) - { - std::vector contained_transaction_message_ids; - _delegate->handle_block(block_message_to_process, false, contained_transaction_message_ids); - message_validated_time = fc::time_point::now(); - ilog("Successfully pushed block ${num} (id:${id})", - ("num", block_message_to_process.block.block_num()) - ("id", block_message_to_process.block_id)); - _most_recent_blocks_accepted.push_back(block_message_to_process.block_id); - - bool new_transaction_discovered = false; - for (const item_hash_t& transaction_message_hash : contained_transaction_message_ids) - { - size_t items_erased = _items_to_fetch.get().erase(item_id(trx_message_type, transaction_message_hash)); - // there are two ways we could behave here: we could either act as if we received - // the transaction outside the block and offer it to our peers, or we could just - // forget about it (we would still advertise this block to our peers so they should - // get the transaction through that mechanism). - // We take the second approach, bring in the next if block to try the first approach - //if (items_erased) - //{ - // new_transaction_discovered = true; - // _new_inventory.insert(item_id(trx_message_type, transaction_message_hash)); - //} - } - if (new_transaction_discovered) - trigger_advertise_inventory_loop(); - } - else - dlog( "Already received and accepted this block (presumably through sync mechanism), treating it as accepted" ); - - dlog( "client validated the block, advertising it to other peers" ); - - item_id block_message_item_id(core_message_type_enum::block_message_type, message_hash); - uint32_t block_number = block_message_to_process.block.block_num(); - fc::time_point_sec block_time = block_message_to_process.block.timestamp; - - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - auto iter = peer->inventory_peer_advertised_to_us.find(block_message_item_id); - if (iter != peer->inventory_peer_advertised_to_us.end()) - { - // this peer offered us the item. It will eventually expire from the peer's - // inventory_peer_advertised_to_us list after some time has passed (currently 2 minutes). - // For now, it will remain there, which will prevent us from offering the peer this - // block back when we rebroadcast the block below - peer->last_block_delegate_has_seen = block_message_to_process.block_id; - peer->last_block_time_delegate_has_seen = block_time; - } - peer->clear_old_inventory(); - } - message_propagation_data propagation_data{message_receive_time, message_validated_time, originating_peer->node_id}; - broadcast( block_message_to_process, propagation_data ); - _message_cache.block_accepted(); - - if (is_hard_fork_block(block_number)) - { - // we just pushed a hard fork block. Find out if any of our peers are running clients - // that will be unable to process future blocks - for (const peer_connection_ptr& peer : _active_connections) - { - if (peer->last_known_fork_block_number != 0) - { - uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(peer->last_known_fork_block_number); - if (next_fork_block_number != 0 && - next_fork_block_number <= block_number) - { - peers_to_disconnect.insert(peer); -#ifdef ENABLE_DEBUG_ULOGS - ulog("Disconnecting from peer because their version is too old. Their version date: ${date}", ("date", peer->eos_git_revision_unix_timestamp)); -#endif - } - } - } - if (!peers_to_disconnect.empty()) - { - std::ostringstream disconnect_reason_stream; - disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_number; - disconnect_reason = disconnect_reason_stream.str(); - disconnect_exception = fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}", - ("block_number", block_number))); - } - } - } - catch (const fc::canceled_exception&) - { - throw; - } - catch (const unlinkable_block_exception& e) - { - restart_sync_exception = e; - } - catch (const fc::exception& e) - { - // client rejected the block. Disconnect the client and any other clients that offered us this block - wlog("Failed to push block ${num} (id:${id}), client rejected block sent by peer", - ("num", block_message_to_process.block.block_num()) - ("id", block_message_to_process.block_id)); - - disconnect_exception = e; - disconnect_reason = "You offered me a block that I have deemed to be invalid"; - - peers_to_disconnect.insert( originating_peer->shared_from_this() ); - for (const peer_connection_ptr& peer : _active_connections) - if (!peer->ids_of_items_to_get.empty() && peer->ids_of_items_to_get.front() == block_message_to_process.block_id) - peers_to_disconnect.insert(peer); - } - - if (restart_sync_exception) - { - wlog("Peer ${peer} sent me a block that didn't link to our blockchain. Restarting sync mode with them to get the missing block. " - "Error pushing block was: ${e}", - ("peer", originating_peer->get_remote_endpoint()) - ("e", *restart_sync_exception)); - start_synchronizing_with_peer(originating_peer->shared_from_this()); - } - - for (const peer_connection_ptr& peer : peers_to_disconnect) - { - wlog("disconnecting client ${endpoint} because it offered us the rejected block", ("endpoint", peer->get_remote_endpoint())); - disconnect_from_peer(peer.get(), disconnect_reason, true, *disconnect_exception); - } - } - void node_impl::process_block_message(peer_connection* originating_peer, - const message& message_to_process, - const message_hash_type& message_hash) - { - VERIFY_CORRECT_THREAD(); - // find out whether we requested this item while we were synchronizing or during normal operation - // (it's possible that we request an item during normal operation and then get kicked into sync - // mode before we receive and process the item. In that case, we should process the item as a normal - // item to avoid confusing the sync code) - eos::net::block_message block_message_to_process(message_to_process.as()); - auto item_iter = originating_peer->items_requested_from_peer.find(item_id(eos::net::block_message_type, message_hash)); - if (item_iter != originating_peer->items_requested_from_peer.end()) - { - originating_peer->items_requested_from_peer.erase(item_iter); - process_block_during_normal_operation(originating_peer, block_message_to_process, message_hash); - if (originating_peer->idle()) - trigger_fetch_items_loop(); - return; - } - else - { - // not during normal operation. see if we requested it during sync - auto sync_item_iter = originating_peer->sync_items_requested_from_peer.find(item_id(eos::net::block_message_type, - block_message_to_process.block_id)); - if (sync_item_iter != originating_peer->sync_items_requested_from_peer.end()) - { - originating_peer->sync_items_requested_from_peer.erase(sync_item_iter); - _active_sync_requests.erase(block_message_to_process.block_id); - process_block_during_sync(originating_peer, block_message_to_process, message_hash); - if (originating_peer->idle()) - { - // we have finished fetching a batch of items, so we either need to grab another batch of items - // or we need to get another list of item ids. - if (originating_peer->number_of_unfetched_item_ids > 0 && - originating_peer->ids_of_items_to_get.size() < EOS_NET_MIN_BLOCK_IDS_TO_PREFETCH) - fetch_next_batch_of_item_ids_from_peer(originating_peer); - else - trigger_fetch_sync_items_loop(); - } - return; - } - } - - // if we get here, we didn't request the message, we must have a misbehaving peer - wlog("received a block ${block_id} I didn't ask for from peer ${endpoint}, disconnecting from peer", - ("endpoint", originating_peer->get_remote_endpoint()) - ("block_id", block_message_to_process.block_id)); - fc::exception detailed_error(FC_LOG_MESSAGE(error, "You sent me a block that I didn't ask for, block_id: ${block_id}", - ("block_id", block_message_to_process.block_id) - ("eos_git_revision_sha", originating_peer->eos_git_revision_sha) - ("eos_git_revision_unix_timestamp", originating_peer->eos_git_revision_unix_timestamp) - ("fc_git_revision_sha", originating_peer->fc_git_revision_sha) - ("fc_git_revision_unix_timestamp", originating_peer->fc_git_revision_unix_timestamp))); - disconnect_from_peer(originating_peer, "You sent me a block that I didn't ask for", true, detailed_error); - } - - void node_impl::on_current_time_request_message(peer_connection* originating_peer, - const current_time_request_message& current_time_request_message_received) - { - VERIFY_CORRECT_THREAD(); - fc::time_point request_received_time(fc::time_point::now()); - current_time_reply_message reply(current_time_request_message_received.request_sent_time, - request_received_time); - originating_peer->send_message(reply, offsetof(current_time_reply_message, reply_transmitted_time)); - } - - void node_impl::on_current_time_reply_message(peer_connection* originating_peer, - const current_time_reply_message& current_time_reply_message_received) - { - VERIFY_CORRECT_THREAD(); - fc::time_point reply_received_time = fc::time_point::now(); - originating_peer->clock_offset = fc::microseconds(((current_time_reply_message_received.request_received_time - current_time_reply_message_received.request_sent_time) + - (current_time_reply_message_received.reply_transmitted_time - reply_received_time)).count() / 2); - originating_peer->round_trip_delay = (reply_received_time - current_time_reply_message_received.request_sent_time) - - (current_time_reply_message_received.reply_transmitted_time - current_time_reply_message_received.request_received_time); - } - - void node_impl::forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state) - { - for (const peer_connection_ptr& peer : _active_connections) - { - if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test - !peer->firewall_check_state && // the peer isn't already performing a check for another node - firewall_check_state->nodes_already_tested.find(peer->node_id) == firewall_check_state->nodes_already_tested.end() && - peer->core_protocol_version >= 106) - { - wlog("forwarding firewall check for node ${to_check} to peer ${checker}", - ("to_check", firewall_check_state->endpoint_to_test) - ("checker", peer->get_remote_endpoint())); - firewall_check_state->nodes_already_tested.insert(peer->node_id); - peer->firewall_check_state = firewall_check_state; - check_firewall_message check_request; - check_request.endpoint_to_check = firewall_check_state->endpoint_to_test; - check_request.node_id = firewall_check_state->expected_node_id; - peer->send_message(check_request); - return; - } - } - wlog("Unable to forward firewall check for node ${to_check} to any other peers, returning 'unable'", - ("to_check", firewall_check_state->endpoint_to_test)); - - peer_connection_ptr originating_peer = get_peer_by_node_id(firewall_check_state->expected_node_id); - if (originating_peer) - { - check_firewall_reply_message reply; - reply.node_id = firewall_check_state->expected_node_id; - reply.endpoint_checked = firewall_check_state->endpoint_to_test; - reply.result = firewall_check_result::unable_to_check; - originating_peer->send_message(reply); - } - delete firewall_check_state; - } - - void node_impl::on_check_firewall_message(peer_connection* originating_peer, - const check_firewall_message& check_firewall_message_received) - { - VERIFY_CORRECT_THREAD(); - - if (check_firewall_message_received.node_id == node_id_t() && - check_firewall_message_received.endpoint_to_check == fc::ip::endpoint()) - { - // originating_peer is asking us to test whether it is firewalled - // we're not going to try to connect back to the originating peer directly, - // instead, we're going to coordinate requests by asking some of our peers - // to try to connect to the originating peer, and relay the results back - wlog("Peer ${peer} wants us to check whether it is firewalled", ("peer", originating_peer->get_remote_endpoint())); - firewall_check_state_data* firewall_check_state = new firewall_check_state_data; - // if they are using the same inbound and outbound port, try connecting to their outbound endpoint. - // if they are using a different inbound port, use their outbound address but the inbound port they reported - fc::ip::endpoint endpoint_to_check = originating_peer->get_socket().remote_endpoint(); - if (originating_peer->inbound_port != originating_peer->outbound_port) - endpoint_to_check = fc::ip::endpoint(endpoint_to_check.get_address(), originating_peer->inbound_port); - firewall_check_state->endpoint_to_test = endpoint_to_check; - firewall_check_state->expected_node_id = originating_peer->node_id; - firewall_check_state->requesting_peer = originating_peer->node_id; - - forward_firewall_check_to_next_available_peer(firewall_check_state); - } - else - { - // we're being asked to check another node - // first, find out if we're currently connected to that node. If we are, we - // can't perform the test - if (is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress(check_firewall_message_received.endpoint_to_check)) - { - check_firewall_reply_message reply; - reply.node_id = check_firewall_message_received.node_id; - reply.endpoint_checked = check_firewall_message_received.endpoint_to_check; - reply.result = firewall_check_result::unable_to_check; - } - else - { - // we're not connected to them, so we need to set up a connection to them - // to test. - peer_connection_ptr peer_for_testing(peer_connection::make_shared(this)); - peer_for_testing->firewall_check_state = new firewall_check_state_data; - peer_for_testing->firewall_check_state->endpoint_to_test = check_firewall_message_received.endpoint_to_check; - peer_for_testing->firewall_check_state->expected_node_id = check_firewall_message_received.node_id; - peer_for_testing->firewall_check_state->requesting_peer = originating_peer->node_id; - peer_for_testing->set_remote_endpoint(check_firewall_message_received.endpoint_to_check); - initiate_connect_to(peer_for_testing); - } - } - } - - void node_impl::on_check_firewall_reply_message(peer_connection* originating_peer, - const check_firewall_reply_message& check_firewall_reply_message_received) - { - VERIFY_CORRECT_THREAD(); - - if (originating_peer->firewall_check_state && - originating_peer->firewall_check_state->requesting_peer != node_id_t()) - { - // then this is a peer that is helping us check the firewalled state of one of our other peers - // and they're reporting back - // if they got a result, return it to the original peer. if they were unable to check, - // we'll try another peer. - wlog("Peer ${reporter} reports firewall check status ${status} for ${peer}", - ("reporter", originating_peer->get_remote_endpoint()) - ("status", check_firewall_reply_message_received.result) - ("peer", check_firewall_reply_message_received.endpoint_checked)); - - if (check_firewall_reply_message_received.result == firewall_check_result::unable_to_connect || - check_firewall_reply_message_received.result == firewall_check_result::connection_successful) - { - peer_connection_ptr original_peer = get_peer_by_node_id(originating_peer->firewall_check_state->requesting_peer); - if (original_peer) - { - if (check_firewall_reply_message_received.result == firewall_check_result::connection_successful) - { - // if we previously thought this peer was firewalled, mark them as not firewalled - if (original_peer->is_firewalled != firewalled_state::not_firewalled) - { - - original_peer->is_firewalled = firewalled_state::not_firewalled; - // there should be no old entry if we thought they were firewalled, so just create a new one - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(*inbound_endpoint); - updated_peer_record.last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(updated_peer_record); - } - } - } - original_peer->send_message(check_firewall_reply_message_received); - } - delete originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - } - else - { - // they were unable to check for us, ask another peer - firewall_check_state_data* firewall_check_state = originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - forward_firewall_check_to_next_available_peer(firewall_check_state); - } - } - else if (originating_peer->firewall_check_state) - { - // this is a reply to a firewall check we initiated. - wlog("Firewall check we initiated has returned with result: ${result}, endpoint = ${endpoint}", - ("result", check_firewall_reply_message_received.result) - ("endpoint", check_firewall_reply_message_received.endpoint_checked)); - if (check_firewall_reply_message_received.result == firewall_check_result::connection_successful) - { - _is_firewalled = firewalled_state::not_firewalled; - _publicly_visible_listening_endpoint = check_firewall_reply_message_received.endpoint_checked; - } - else if (check_firewall_reply_message_received.result == firewall_check_result::unable_to_connect) - { - _is_firewalled = firewalled_state::firewalled; - _publicly_visible_listening_endpoint = fc::optional(); - } - delete originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - } - else - { - wlog("Received a firewall check reply to a request I never sent"); - } - - } - - void node_impl::on_get_current_connections_request_message(peer_connection* originating_peer, - const get_current_connections_request_message& get_current_connections_request_message_received) - { - VERIFY_CORRECT_THREAD(); - get_current_connections_reply_message reply; - - if (!_average_network_read_speed_minutes.empty()) - { - reply.upload_rate_one_minute = _average_network_write_speed_minutes.back(); - reply.download_rate_one_minute = _average_network_read_speed_minutes.back(); - - size_t minutes_to_average = std::min(_average_network_write_speed_minutes.size(), (size_t)15); - boost::circular_buffer::iterator start_iter = _average_network_write_speed_minutes.end() - minutes_to_average; - reply.upload_rate_fifteen_minutes = std::accumulate(start_iter, _average_network_write_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - start_iter = _average_network_read_speed_minutes.end() - minutes_to_average; - reply.download_rate_fifteen_minutes = std::accumulate(start_iter, _average_network_read_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - - minutes_to_average = std::min(_average_network_write_speed_minutes.size(), (size_t)60); - start_iter = _average_network_write_speed_minutes.end() - minutes_to_average; - reply.upload_rate_one_hour = std::accumulate(start_iter, _average_network_write_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - start_iter = _average_network_read_speed_minutes.end() - minutes_to_average; - reply.download_rate_one_hour = std::accumulate(start_iter, _average_network_read_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - } - - fc::time_point now = fc::time_point::now(); - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - current_connection_data data_for_this_peer; - data_for_this_peer.connection_duration = now.sec_since_epoch() - peer->connection_initiation_time.sec_since_epoch(); - if (peer->get_remote_endpoint()) // should always be set for anyone we're actively connected to - data_for_this_peer.remote_endpoint = *peer->get_remote_endpoint(); - data_for_this_peer.clock_offset = peer->clock_offset; - data_for_this_peer.round_trip_delay = peer->round_trip_delay; - data_for_this_peer.node_id = peer->node_id; - data_for_this_peer.connection_direction = peer->direction; - data_for_this_peer.firewalled = peer->is_firewalled; - fc::mutable_variant_object user_data; - if (peer->eos_git_revision_sha) - user_data["eos_git_revision_sha"] = *peer->eos_git_revision_sha; - if (peer->eos_git_revision_unix_timestamp) - user_data["eos_git_revision_unix_timestamp"] = *peer->eos_git_revision_unix_timestamp; - if (peer->fc_git_revision_sha) - user_data["fc_git_revision_sha"] = *peer->fc_git_revision_sha; - if (peer->fc_git_revision_unix_timestamp) - user_data["fc_git_revision_unix_timestamp"] = *peer->fc_git_revision_unix_timestamp; - if (peer->platform) - user_data["platform"] = *peer->platform; - if (peer->bitness) - user_data["bitness"] = *peer->bitness; - user_data["user_agent"] = peer->user_agent; - - user_data["last_known_block_hash"] = peer->last_block_delegate_has_seen; - user_data["last_known_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); - user_data["last_known_block_time"] = peer->last_block_time_delegate_has_seen; - - data_for_this_peer.user_data = user_data; - reply.current_connections.emplace_back(data_for_this_peer); - } - originating_peer->send_message(reply); - } - - void node_impl::on_get_current_connections_reply_message(peer_connection* originating_peer, - const get_current_connections_reply_message& get_current_connections_reply_message_received) - { - VERIFY_CORRECT_THREAD(); - } - - - // this handles any message we get that doesn't require any special processing. - // currently, this is any message other than block messages and p2p-specific - // messages. (transaction messages would be handled here, for example) - // this just passes the message to the client, and does the bookkeeping - // related to requesting and rebroadcasting the message. - void node_impl::process_ordinary_message( peer_connection* originating_peer, - const message& message_to_process, const message_hash_type& message_hash ) - { - VERIFY_CORRECT_THREAD(); - fc::time_point message_receive_time = fc::time_point::now(); - - // only process it if we asked for it - auto iter = originating_peer->items_requested_from_peer.find( item_id(message_to_process.msg_type, message_hash) ); - if( iter == originating_peer->items_requested_from_peer.end() ) - { - wlog( "received a message I didn't ask for from peer ${endpoint}, disconnecting from peer", - ( "endpoint", originating_peer->get_remote_endpoint() ) ); - fc::exception detailed_error( FC_LOG_MESSAGE(error, "You sent me a message that I didn't ask for, message_hash: ${message_hash}", - ( "message_hash", message_hash ) ) ); - disconnect_from_peer( originating_peer, "You sent me a message that I didn't request", true, detailed_error ); - return; - } - else - { - originating_peer->items_requested_from_peer.erase( iter ); - if (originating_peer->idle()) - trigger_fetch_items_loop(); - - // Next: have the delegate process the message - fc::time_point message_validated_time; - try - { - if (message_to_process.msg_type == trx_message_type) - { - trx_message transaction_message_to_process = message_to_process.as(); - dlog("passing message containing transaction ${trx} to client", ("trx", transaction_message_to_process.trx.id())); - _delegate->handle_transaction(transaction_message_to_process); - } - else - _delegate->handle_message( message_to_process ); - message_validated_time = fc::time_point::now(); - } - catch ( const fc::canceled_exception& ) - { - throw; - } - catch ( const fc::exception& e ) - { - wlog( "client rejected message sent by peer ${peer}, ${e}", ("peer", originating_peer->get_remote_endpoint() )("e", e) ); - // record it so we don't try to fetch this item again - _recently_failed_items.insert(peer_connection::timestamped_item_id(item_id(message_to_process.msg_type, message_hash ), fc::time_point::now())); - return; - } - - // finally, if the delegate validated the message, broadcast it to our other peers - message_propagation_data propagation_data{message_receive_time, message_validated_time, originating_peer->node_id}; - broadcast( message_to_process, propagation_data ); - } - } - - void node_impl::start_synchronizing_with_peer( const peer_connection_ptr& peer ) - { - VERIFY_CORRECT_THREAD(); - peer->ids_of_items_to_get.clear(); - peer->number_of_unfetched_item_ids = 0; - peer->we_need_sync_items_from_peer = true; - peer->last_block_delegate_has_seen = item_hash_t(); - peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hash_t()); - peer->inhibit_fetching_sync_blocks = false; - fetch_next_batch_of_item_ids_from_peer( peer.get() ); - } - - void node_impl::start_synchronizing() - { - for( const peer_connection_ptr& peer : _active_connections ) - start_synchronizing_with_peer( peer ); - } - - void node_impl::new_peer_just_added( const peer_connection_ptr& peer ) - { - VERIFY_CORRECT_THREAD(); - peer->send_message(current_time_request_message(), - offsetof(current_time_request_message, request_sent_time)); - start_synchronizing_with_peer( peer ); - if( _active_connections.size() != _last_reported_number_of_connections ) - { - _last_reported_number_of_connections = (uint32_t)_active_connections.size(); - _delegate->connection_count_changed( _last_reported_number_of_connections ); - } - } - - void node_impl::close() - { - VERIFY_CORRECT_THREAD(); - - try - { - _potential_peer_db.close(); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while closing P2P peer database, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while closing P2P peer database, ignoring" ); - } - - // First, stop accepting incoming network connections - try - { - _tcp_server.close(); - dlog("P2P TCP server closed"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while closing P2P TCP server, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while closing P2P TCP server, ignoring" ); - } - - try - { - _accept_loop_complete.cancel_and_wait("node_impl::close()"); - dlog("P2P accept loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating P2P accept loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating P2P accept loop, ignoring" ); - } - - // terminate all of our long-running loops (these run continuously instead of rescheduling themselves) - try - { - _p2p_network_connect_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_p2p_network_connect_loop(); - _p2p_network_connect_loop_done.wait(); - dlog("P2P connect loop terminated"); - } - catch ( const fc::canceled_exception& ) - { - dlog("P2P connect loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating P2P connect loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating P2P connect loop, ignoring" ); - } - - try - { - _process_backlog_of_sync_blocks_done.cancel_and_wait("node_impl::close()"); - dlog("Process backlog of sync items task terminated"); - } - catch ( const fc::canceled_exception& ) - { - dlog("Process backlog of sync items task terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Process backlog of sync items task, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Process backlog of sync items task, ignoring" ); - } - - unsigned handle_message_call_count = 0; - while( true ) - { - auto it = _handle_message_calls_in_progress.begin(); - if( it == _handle_message_calls_in_progress.end() ) - break; - if( it->ready() || it->error() || it->canceled() ) - { - _handle_message_calls_in_progress.erase( it ); - continue; - } - ++handle_message_call_count; - try - { - it->cancel_and_wait("node_impl::close()"); - dlog("handle_message call #${count} task terminated", ("count", handle_message_call_count)); - } - catch ( const fc::canceled_exception& ) - { - dlog("handle_message call #${count} task terminated", ("count", handle_message_call_count)); - } - catch ( const fc::exception& e ) - { - wlog("Exception thrown while terminating handle_message call #${count} task, ignoring: ${e}", ("e", e)("count", handle_message_call_count)); - } - catch (...) - { - wlog("Exception thrown while terminating handle_message call #${count} task, ignoring",("count", handle_message_call_count)); - } - } - - try - { - _fetch_sync_items_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_fetch_sync_items_loop(); - _fetch_sync_items_loop_done.wait(); - dlog("Fetch sync items loop terminated"); - } - catch ( const fc::canceled_exception& ) - { - dlog("Fetch sync items loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Fetch sync items loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Fetch sync items loop, ignoring" ); - } - - try - { - _fetch_item_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_fetch_items_loop(); - _fetch_item_loop_done.wait(); - dlog("Fetch items loop terminated"); - } - catch ( const fc::canceled_exception& ) - { - dlog("Fetch items loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Fetch items loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Fetch items loop, ignoring" ); - } - - try - { - _advertise_inventory_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_advertise_inventory_loop(); - _advertise_inventory_loop_done.wait(); - dlog("Advertise inventory loop terminated"); - } - catch ( const fc::canceled_exception& ) - { - dlog("Advertise inventory loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Advertise inventory loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Advertise inventory loop, ignoring" ); - } - - - // Next, terminate our existing connections. First, close all of the connections nicely. - // This will close the sockets and may result in calls to our "on_connection_closing" - // method to inform us that the connection really closed (or may not if we manage to cancel - // the read loop before it gets an EOF). - // operate off copies of the lists in case they change during iteration - std::list all_peers; - boost::push_back(all_peers, _active_connections); - boost::push_back(all_peers, _handshaking_connections); - boost::push_back(all_peers, _closing_connections); - - for (const peer_connection_ptr& peer : all_peers) - { - try - { - peer->destroy_connection(); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while closing peer connection, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while closing peer connection, ignoring" ); - } - } - - // and delete all of the peer_connection objects - _active_connections.clear(); - _handshaking_connections.clear(); - _closing_connections.clear(); - all_peers.clear(); - - { -#ifdef USE_PEERS_TO_DELETE_MUTEX - fc::scoped_lock lock(_peers_to_delete_mutex); -#endif - try - { - _delayed_peer_deletion_task_done.cancel_and_wait("node_impl::close()"); - dlog("Delayed peer deletion task terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Delayed peer deletion task, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Delayed peer deletion task, ignoring" ); - } - _peers_to_delete.clear(); - } - - // Now that there are no more peers that can call methods on us, there should be no - // chance for one of our loops to be rescheduled, so we can safely terminate all of - // our loops now - try - { - _terminate_inactive_connections_loop_done.cancel_and_wait("node_impl::close()"); - dlog("Terminate inactive connections loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Terminate inactive connections loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Terminate inactive connections loop, ignoring" ); - } - - try - { - _fetch_updated_peer_lists_loop_done.cancel_and_wait("node_impl::close()"); - dlog("Fetch updated peer lists loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Fetch updated peer lists loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Fetch updated peer lists loop, ignoring" ); - } - - try - { - _bandwidth_monitor_loop_done.cancel_and_wait("node_impl::close()"); - dlog("Bandwidth monitor loop terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Bandwidth monitor loop, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Bandwidth monitor loop, ignoring" ); - } - - try - { - _dump_node_status_task_done.cancel_and_wait("node_impl::close()"); - dlog("Dump node status task terminated"); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while terminating Dump node status task, ignoring: ${e}", ("e", e) ); - } - catch (...) - { - wlog( "Exception thrown while terminating Dump node status task, ignoring" ); - } - } // node_impl::close() - - void node_impl::accept_connection_task( peer_connection_ptr new_peer ) - { - VERIFY_CORRECT_THREAD(); - new_peer->accept_connection(); // this blocks until the secure connection is fully negotiated - send_hello_message(new_peer); - } - - void node_impl::accept_loop() - { - VERIFY_CORRECT_THREAD(); - while ( !_accept_loop_complete.canceled() ) - { - peer_connection_ptr new_peer(peer_connection::make_shared(this)); - - try - { - _tcp_server.accept( new_peer->get_socket() ); - ilog( "accepted inbound connection from ${remote_endpoint}", ("remote_endpoint", new_peer->get_socket().remote_endpoint() ) ); - if (_node_is_shutting_down) - return; - new_peer->connection_initiation_time = fc::time_point::now(); - _handshaking_connections.insert( new_peer ); - _rate_limiter.add_tcp_socket( &new_peer->get_socket() ); - std::weak_ptr new_weak_peer(new_peer); - new_peer->accept_or_connect_task_done = fc::async( [this, new_weak_peer]() { - peer_connection_ptr new_peer(new_weak_peer.lock()); - assert(new_peer); - if (!new_peer) - return; - accept_connection_task(new_peer); - }, "accept_connection_task" ); - - // limit the rate at which we accept connections to mitigate DOS attacks - fc::usleep( fc::milliseconds(10) ); - } FC_CAPTURE_AND_LOG( () ) - } - } // accept_loop() - - void node_impl::send_hello_message(const peer_connection_ptr& peer) - { - VERIFY_CORRECT_THREAD(); - peer->negotiation_status = peer_connection::connection_negotiation_status::hello_sent; - - fc::sha256::encoder shared_secret_encoder; - fc::sha512 shared_secret = peer->get_shared_secret(); - shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); - fc::ecc::compact_signature signature = _node_configuration.private_key.sign_compact(shared_secret_encoder.result()); - - // in the hello messsage, we send three things: - // ip address - // outbound port - // inbound port - // The peer we're connecting to will assume we're firewalled if the - // ip address and outbound port we send don't match the values it sees on its remote endpoint - // - // if we know that we're behind a NAT that will allow incoming connections because our firewall - // detection figured it out, send those values instead. - - fc::ip::endpoint local_endpoint(peer->get_socket().local_endpoint()); - uint16_t listening_port = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint.port() : 0; - - if (_is_firewalled == firewalled_state::not_firewalled && - _publicly_visible_listening_endpoint) - { - local_endpoint = *_publicly_visible_listening_endpoint; - listening_port = _publicly_visible_listening_endpoint->port(); - } - - hello_message hello(_user_agent_string, - core_protocol_version, - local_endpoint.get_address(), - listening_port, - local_endpoint.port(), - _node_public_key, - signature, - _chain_id, - generate_hello_user_data()); - - peer->send_message(message(hello)); - } - - void node_impl::connect_to_task(peer_connection_ptr new_peer, - const fc::ip::endpoint& remote_endpoint) - { - VERIFY_CORRECT_THREAD(); - - if (!new_peer->performing_firewall_check()) - { - // create or find the database entry for the new peer - // if we're connecting to them, we believe they're not firewalled - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); - updated_peer_record.last_connection_disposition = last_connection_failed; - updated_peer_record.last_connection_attempt_time = fc::time_point::now();; - _potential_peer_db.update_entry(updated_peer_record); - } - else - { - wlog("connecting to peer ${peer} for firewall check", ("peer", new_peer->get_remote_endpoint())); - } - - fc::oexception connect_failed_exception; - - try - { - new_peer->connect_to(remote_endpoint, _actual_listening_endpoint); // blocks until the connection is established and secure connection is negotiated - - // we connected to the peer. guess they're not firewalled.... - new_peer->is_firewalled = firewalled_state::not_firewalled; - - // connection succeeded, we've started handshaking. record that in our database - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); - updated_peer_record.last_connection_disposition = last_connection_handshaking_failed; - updated_peer_record.number_of_successful_connection_attempts++; - updated_peer_record.last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(updated_peer_record); - } - catch (const fc::exception& except) - { - connect_failed_exception = except; - } - - if (connect_failed_exception && !new_peer->performing_firewall_check()) - { - // connection failed. record that in our database - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); - updated_peer_record.last_connection_disposition = last_connection_failed; - updated_peer_record.number_of_failed_connection_attempts++; - if (new_peer->connection_closed_error) - updated_peer_record.last_error = *new_peer->connection_closed_error; - else - updated_peer_record.last_error = *connect_failed_exception; - _potential_peer_db.update_entry(updated_peer_record); - } - - if (new_peer->performing_firewall_check()) - { - // we were connecting to test whether the node is firewalled, and we now know the result. - // send a message back to the requester - peer_connection_ptr requesting_peer = get_peer_by_node_id(new_peer->firewall_check_state->requesting_peer); - if (requesting_peer) - { - check_firewall_reply_message reply; - reply.endpoint_checked = new_peer->firewall_check_state->endpoint_to_test; - reply.node_id = new_peer->firewall_check_state->expected_node_id; - reply.result = connect_failed_exception ? - firewall_check_result::unable_to_connect : - firewall_check_result::connection_successful; - wlog("firewall check of ${peer_checked} ${success_or_failure}, sending reply to ${requester}", - ("peer_checked", new_peer->get_remote_endpoint()) - ("success_or_failure", connect_failed_exception ? "failed" : "succeeded" ) - ("requester", requesting_peer->get_remote_endpoint())); - - requesting_peer->send_message(reply); - } - } - - if (connect_failed_exception || new_peer->performing_firewall_check()) - { - // if the connection failed or if this connection was just intended to check - // whether the peer is firewalled, we want to disconnect now. - _handshaking_connections.erase(new_peer); - _terminating_connections.erase(new_peer); - assert(_active_connections.find(new_peer) == _active_connections.end()); - _active_connections.erase(new_peer); - assert(_closing_connections.find(new_peer) == _closing_connections.end()); - _closing_connections.erase(new_peer); - - display_current_connections(); - trigger_p2p_network_connect_loop(); - schedule_peer_for_deletion(new_peer); - - if (connect_failed_exception) - throw *connect_failed_exception; - } - else - { - // connection was successful and we want to stay connected - fc::ip::endpoint local_endpoint = new_peer->get_local_endpoint(); - new_peer->inbound_address = local_endpoint.get_address(); - new_peer->inbound_port = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint.port() : 0; - new_peer->outbound_port = local_endpoint.port(); - - new_peer->our_state = peer_connection::our_connection_state::just_connected; - new_peer->their_state = peer_connection::their_connection_state::just_connected; - send_hello_message(new_peer); - dlog("Sent \"hello\" to peer ${peer}", ("peer", new_peer->get_remote_endpoint())); - } - } - - // methods implementing node's public interface - void node_impl::set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls) - { - VERIFY_CORRECT_THREAD(); - _delegate.reset(); - if (del) - _delegate.reset(new statistics_gathering_node_delegate_wrapper(del, thread_for_delegate_calls)); - if( _delegate ) - _chain_id = del->get_chain_id(); - } - - void node_impl::load_configuration( const fc::path& configuration_directory ) - { - VERIFY_CORRECT_THREAD(); - _node_configuration_directory = configuration_directory; - fc::path configuration_file_name( _node_configuration_directory / NODE_CONFIGURATION_FILENAME ); - bool node_configuration_loaded = false; - if( fc::exists(configuration_file_name ) ) - { - try - { - _node_configuration = fc::json::from_file( configuration_file_name ).as(); - ilog( "Loaded configuration from file ${filename}", ("filename", configuration_file_name ) ); - - if( _node_configuration.private_key == fc::ecc::private_key() ) - _node_configuration.private_key = fc::ecc::private_key::generate(); - - node_configuration_loaded = true; - } - catch ( fc::parse_error_exception& parse_error ) - { - elog( "malformed node configuration file ${filename}: ${error}", - ( "filename", configuration_file_name )("error", parse_error.to_detail_string() ) ); - } - catch ( fc::exception& except ) - { - elog( "unexpected exception while reading configuration file ${filename}: ${error}", - ( "filename", configuration_file_name )("error", except.to_detail_string() ) ); - } - } - - if( !node_configuration_loaded ) - { - _node_configuration = detail::node_configuration(); - -#ifdef EOS_TEST_NETWORK - uint32_t port = EOS_NET_TEST_P2P_PORT + EOS_TEST_NETWORK_VERSION; -#else - uint32_t port = EOS_NET_DEFAULT_P2P_PORT; -#endif - _node_configuration.listen_endpoint.set_port( port ); - _node_configuration.accept_incoming_connections = true; - _node_configuration.wait_if_endpoint_is_busy = false; - - ilog( "generating new private key for this node" ); - _node_configuration.private_key = fc::ecc::private_key::generate(); - } - - _node_public_key = _node_configuration.private_key.get_public_key().serialize(); - - fc::path potential_peer_database_file_name(_node_configuration_directory / POTENTIAL_PEER_DATABASE_FILENAME); - try - { - _potential_peer_db.open(potential_peer_database_file_name); - - // push back the time on all peers loaded from the database so we will be able to retry them immediately - for (peer_database::iterator itr = _potential_peer_db.begin(); itr != _potential_peer_db.end(); ++itr) - { - potential_peer_record updated_peer_record = *itr; - updated_peer_record.last_connection_attempt_time = std::min(updated_peer_record.last_connection_attempt_time, - fc::time_point::now() - fc::seconds(_peer_connection_retry_timeout)); - _potential_peer_db.update_entry(updated_peer_record); - } - - trigger_p2p_network_connect_loop(); - } - catch (fc::exception& except) - { - elog("unable to open peer database ${filename}: ${error}", - ("filename", potential_peer_database_file_name)("error", except.to_detail_string())); - throw; - } - } - - void node_impl::listen_to_p2p_network() - { - VERIFY_CORRECT_THREAD(); - if (!_node_configuration.accept_incoming_connections) - { - wlog("accept_incoming_connections is false, p2p network will not accept any incoming connections"); - return; - } - - assert(_node_public_key != fc::ecc::public_key_data()); - - fc::ip::endpoint listen_endpoint = _node_configuration.listen_endpoint; - if( listen_endpoint.port() != 0 ) - { - // if the user specified a port, we only want to bind to it if it's not already - // being used by another application. During normal operation, we set the - // SO_REUSEADDR/SO_REUSEPORT flags so that we can bind outbound sockets to the - // same local endpoint as we're listening on here. On some platforms, setting - // those flags will prevent us from detecting that other applications are - // listening on that port. We'd like to detect that, so we'll set up a temporary - // tcp server without that flag to see if we can listen on that port. - bool first = true; - for( ;; ) - { - bool listen_failed = false; - - try - { - fc::tcp_server temporary_server; - if( listen_endpoint.get_address() != fc::ip::address() ) - temporary_server.listen( listen_endpoint ); - else - temporary_server.listen( listen_endpoint.port() ); - break; - } - catch ( const fc::exception&) - { - listen_failed = true; - } - - if (listen_failed) - { - if( _node_configuration.wait_if_endpoint_is_busy ) - { - std::ostringstream error_message_stream; - if( first ) - { - error_message_stream << "Unable to listen for connections on port " << listen_endpoint.port() - << ", retrying in a few seconds\n"; - error_message_stream << "You can wait for it to become available, or restart this program using\n"; - error_message_stream << "the --p2p-port option to specify another port\n"; - first = false; - } - else - { - error_message_stream << "\nStill waiting for port " << listen_endpoint.port() << " to become available\n"; - } - std::string error_message = error_message_stream.str(); - ulog(error_message); - _delegate->error_encountered( error_message, fc::oexception() ); - fc::usleep( fc::seconds(5 ) ); - } - else // don't wait, just find a random port - { - wlog( "unable to bind on the requested endpoint ${endpoint}, which probably means that endpoint is already in use", - ( "endpoint", listen_endpoint ) ); - listen_endpoint.set_port( 0 ); - } - } // if (listen_failed) - } // for(;;) - } // if (listen_endpoint.port() != 0) - else // port is 0 - { - // if they requested a random port, we'll just assume it's available - // (it may not be due to ip address, but we'll detect that in the next step) - } - - _tcp_server.set_reuse_address(); - try - { - if( listen_endpoint.get_address() != fc::ip::address() ) - _tcp_server.listen( listen_endpoint ); - else - _tcp_server.listen( listen_endpoint.port() ); - _actual_listening_endpoint = _tcp_server.get_local_endpoint(); - ilog( "listening for connections on endpoint ${endpoint} (our first choice)", - ( "endpoint", _actual_listening_endpoint ) ); - } - catch ( fc::exception& e ) - { - FC_RETHROW_EXCEPTION( e, error, "unable to listen on ${endpoint}", ("endpoint",listen_endpoint ) ); - } - } - - void node_impl::connect_to_p2p_network() - { - VERIFY_CORRECT_THREAD(); - assert(_node_public_key != fc::ecc::public_key_data()); - - assert(!_accept_loop_complete.valid() && - !_p2p_network_connect_loop_done.valid() && - !_fetch_sync_items_loop_done.valid() && - !_fetch_item_loop_done.valid() && - !_advertise_inventory_loop_done.valid() && - !_terminate_inactive_connections_loop_done.valid() && - !_fetch_updated_peer_lists_loop_done.valid() && - !_bandwidth_monitor_loop_done.valid() && - !_dump_node_status_task_done.valid()); - if (_node_configuration.accept_incoming_connections) - _accept_loop_complete = fc::async( [=](){ accept_loop(); }, "accept_loop"); - _p2p_network_connect_loop_done = fc::async( [=]() { p2p_network_connect_loop(); }, "p2p_network_connect_loop" ); - _fetch_sync_items_loop_done = fc::async( [=]() { fetch_sync_items_loop(); }, "fetch_sync_items_loop" ); - _fetch_item_loop_done = fc::async( [=]() { fetch_items_loop(); }, "fetch_items_loop" ); - _advertise_inventory_loop_done = fc::async( [=]() { advertise_inventory_loop(); }, "advertise_inventory_loop" ); - _terminate_inactive_connections_loop_done = fc::async( [=]() { terminate_inactive_connections_loop(); }, "terminate_inactive_connections_loop" ); - _fetch_updated_peer_lists_loop_done = fc::async([=](){ fetch_updated_peer_lists_loop(); }, "fetch_updated_peer_lists_loop"); - _bandwidth_monitor_loop_done = fc::async([=](){ bandwidth_monitor_loop(); }, "bandwidth_monitor_loop"); - _dump_node_status_task_done = fc::async([=](){ dump_node_status_task(); }, "dump_node_status_task"); - } - - void node_impl::add_node(const fc::ip::endpoint& ep) - { - VERIFY_CORRECT_THREAD(); - // if we're connecting to them, we believe they're not firewalled - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(ep); - - // if we've recently connected to this peer, reset the last_connection_attempt_time to allow - // us to immediately retry this peer - updated_peer_record.last_connection_attempt_time = std::min(updated_peer_record.last_connection_attempt_time, - fc::time_point::now() - fc::seconds(_peer_connection_retry_timeout)); - _add_once_node_list.push_back(updated_peer_record); - _potential_peer_db.update_entry(updated_peer_record); - trigger_p2p_network_connect_loop(); - } - - void node_impl::initiate_connect_to(const peer_connection_ptr& new_peer) - { - new_peer->get_socket().open(); - new_peer->get_socket().set_reuse_address(); - new_peer->connection_initiation_time = fc::time_point::now(); - _handshaking_connections.insert(new_peer); - _rate_limiter.add_tcp_socket(&new_peer->get_socket()); - - if (_node_is_shutting_down) - return; - - std::weak_ptr new_weak_peer(new_peer); - new_peer->accept_or_connect_task_done = fc::async([this, new_weak_peer](){ - peer_connection_ptr new_peer(new_weak_peer.lock()); - assert(new_peer); - if (!new_peer) - return; - connect_to_task(new_peer, *new_peer->get_remote_endpoint()); - }, "connect_to_task"); - } - - void node_impl::connect_to_endpoint(const fc::ip::endpoint& remote_endpoint) - { - VERIFY_CORRECT_THREAD(); - if (is_connection_to_endpoint_in_progress(remote_endpoint)) - FC_THROW_EXCEPTION(already_connected_to_requested_peer, "already connected to requested endpoint ${endpoint}", - ("endpoint", remote_endpoint)); - - dlog("node_impl::connect_to_endpoint(${endpoint})", ("endpoint", remote_endpoint)); - peer_connection_ptr new_peer(peer_connection::make_shared(this)); - new_peer->set_remote_endpoint(remote_endpoint); - initiate_connect_to(new_peer); - } - - peer_connection_ptr node_impl::get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ) - { - VERIFY_CORRECT_THREAD(); - for( const peer_connection_ptr& active_peer : _active_connections ) - { - fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) - return active_peer; - } - for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) - { - fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) - return handshaking_peer; - } - return peer_connection_ptr(); - } - - bool node_impl::is_connection_to_endpoint_in_progress( const fc::ip::endpoint& remote_endpoint ) - { - VERIFY_CORRECT_THREAD(); - return get_connection_to_endpoint( remote_endpoint ) != peer_connection_ptr(); - } - - void node_impl::move_peer_to_active_list(const peer_connection_ptr& peer) - { - VERIFY_CORRECT_THREAD(); - _active_connections.insert(peer); - _handshaking_connections.erase(peer); - _closing_connections.erase(peer); - _terminating_connections.erase(peer); - } - - void node_impl::move_peer_to_closing_list(const peer_connection_ptr& peer) - { - VERIFY_CORRECT_THREAD(); - _active_connections.erase(peer); - _handshaking_connections.erase(peer); - _closing_connections.insert(peer); - _terminating_connections.erase(peer); - } - - void node_impl::move_peer_to_terminating_list(const peer_connection_ptr& peer) - { - VERIFY_CORRECT_THREAD(); - _active_connections.erase(peer); - _handshaking_connections.erase(peer); - _closing_connections.erase(peer); - _terminating_connections.insert(peer); - } - - void node_impl::dump_node_status() - { - VERIFY_CORRECT_THREAD(); - ilog( "----------------- PEER STATUS UPDATE --------------------" ); - ilog( " number of peers: ${active} active, ${handshaking}, ${closing} closing. attempting to maintain ${desired} - ${maximum} peers", - ( "active", _active_connections.size() )("handshaking", _handshaking_connections.size() )("closing",_closing_connections.size() ) - ( "desired", _desired_number_of_connections )("maximum", _maximum_number_of_connections ) ); - for( const peer_connection_ptr& peer : _active_connections ) - { - ilog( " active peer ${endpoint} peer_is_in_sync_with_us:${in_sync_with_us} we_are_in_sync_with_peer:${in_sync_with_them}", - ( "endpoint", peer->get_remote_endpoint() ) - ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us )("in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); - if( peer->we_need_sync_items_from_peer ) - ilog( " above peer has ${count} sync items we might need", ("count", peer->ids_of_items_to_get.size() ) ); - if (peer->inhibit_fetching_sync_blocks) - ilog( " we are not fetching sync blocks from the above peer (inhibit_fetching_sync_blocks == true)" ); - - } - for( const peer_connection_ptr& peer : _handshaking_connections ) - { - ilog( " handshaking peer ${endpoint} in state ours(${our_state}) theirs(${their_state})", - ( "endpoint", peer->get_remote_endpoint() )("our_state", peer->our_state )("their_state", peer->their_state ) ); - } - - ilog( "--------- MEMORY USAGE ------------" ); - ilog( "node._active_sync_requests size: ${size}", ("size", _active_sync_requests.size() ) ); - ilog( "node._received_sync_items size: ${size}", ("size", _received_sync_items.size() ) ); - ilog( "node._new_received_sync_items size: ${size}", ("size", _new_received_sync_items.size() ) ); - ilog( "node._items_to_fetch size: ${size}", ("size", _items_to_fetch.size() ) ); - ilog( "node._new_inventory size: ${size}", ("size", _new_inventory.size() ) ); - ilog( "node._message_cache size: ${size}", ("size", _message_cache.size() ) ); - for( const peer_connection_ptr& peer : _active_connections ) - { - ilog( " peer ${endpoint}", ("endpoint", peer->get_remote_endpoint() ) ); - ilog( " peer.ids_of_items_to_get size: ${size}", ("size", peer->ids_of_items_to_get.size() ) ); - ilog( " peer.inventory_peer_advertised_to_us size: ${size}", ("size", peer->inventory_peer_advertised_to_us.size() ) ); - ilog( " peer.inventory_advertised_to_peer size: ${size}", ("size", peer->inventory_advertised_to_peer.size() ) ); - ilog( " peer.items_requested_from_peer size: ${size}", ("size", peer->items_requested_from_peer.size() ) ); - ilog( " peer.sync_items_requested_from_peer size: ${size}", ("size", peer->sync_items_requested_from_peer.size() ) ); - } - ilog( "--------- END MEMORY USAGE ------------" ); - } - - void node_impl::disconnect_from_peer( peer_connection* peer_to_disconnect, - const std::string& reason_for_disconnect, - bool caused_by_error /* = false */, - const fc::oexception& error /* = fc::oexception() */ ) - { - VERIFY_CORRECT_THREAD(); - move_peer_to_closing_list(peer_to_disconnect->shared_from_this()); - - if (peer_to_disconnect->they_have_requested_close) - { - // the peer has already told us that it's ready to close the connection, so just close the connection - peer_to_disconnect->close_connection(); - } - else - { - // we're the first to try to want to close the connection - fc::optional inbound_endpoint = peer_to_disconnect->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - if (error) - updated_peer_record->last_error = error; - else - updated_peer_record->last_error = fc::exception(FC_LOG_MESSAGE(info, reason_for_disconnect.c_str())); - _potential_peer_db.update_entry(*updated_peer_record); - } - } - peer_to_disconnect->we_have_requested_close = true; - peer_to_disconnect->connection_closed_time = fc::time_point::now(); - - closing_connection_message closing_message( reason_for_disconnect, caused_by_error, error ); - peer_to_disconnect->send_message( closing_message ); - } - - // notify the user. This will be useful in testing, but we might want to remove it later; - // it makes good sense to notify the user if other nodes think she is behaving badly, but - // if we're just detecting and dissconnecting other badly-behaving nodes, they don't really care. - if (caused_by_error) - { - std::ostringstream error_message; - error_message << "I am disconnecting peer " << fc::variant( peer_to_disconnect->get_remote_endpoint() ).as_string() << - " for reason: " << reason_for_disconnect; - _delegate->error_encountered(error_message.str(), fc::oexception()); - dlog(error_message.str()); - } - else - dlog("Disconnecting from ${peer} for ${reason}", ("peer",peer_to_disconnect->get_remote_endpoint()) ("reason",reason_for_disconnect)); - // peer_to_disconnect->close_connection(); - } - - void node_impl::listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) - { - VERIFY_CORRECT_THREAD(); - _node_configuration.listen_endpoint = ep; - _node_configuration.wait_if_endpoint_is_busy = wait_if_not_available; - save_node_configuration(); - } - - void node_impl::accept_incoming_connections(bool accept) - { - VERIFY_CORRECT_THREAD(); - _node_configuration.accept_incoming_connections = accept; - save_node_configuration(); - } - - void node_impl::listen_on_port( uint16_t port, bool wait_if_not_available ) - { - VERIFY_CORRECT_THREAD(); - _node_configuration.listen_endpoint = fc::ip::endpoint( fc::ip::address(), port ); - _node_configuration.wait_if_endpoint_is_busy = wait_if_not_available; - save_node_configuration(); - } - - fc::ip::endpoint node_impl::get_actual_listening_endpoint() const - { - VERIFY_CORRECT_THREAD(); - return _actual_listening_endpoint; - } - - std::vector node_impl::get_connected_peers() const - { - VERIFY_CORRECT_THREAD(); - std::vector statuses; - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - peer_status this_peer_status; - this_peer_status.version = 0; - fc::optional endpoint = peer->get_remote_endpoint(); - if (endpoint) - this_peer_status.host = *endpoint; - fc::mutable_variant_object peer_details; - peer_details["addr"] = endpoint ? (std::string)*endpoint : std::string(); - peer_details["addrlocal"] = (std::string)peer->get_local_endpoint(); - peer_details["services"] = "00000001"; - peer_details["lastsend"] = peer->get_last_message_sent_time().sec_since_epoch(); - peer_details["lastrecv"] = peer->get_last_message_received_time().sec_since_epoch(); - peer_details["bytessent"] = peer->get_total_bytes_sent(); - peer_details["bytesrecv"] = peer->get_total_bytes_received(); - peer_details["conntime"] = peer->get_connection_time(); - peer_details["pingtime"] = ""; - peer_details["pingwait"] = ""; - peer_details["version"] = ""; - peer_details["subver"] = peer->user_agent; - peer_details["inbound"] = peer->direction == peer_connection_direction::inbound; - peer_details["firewall_status"] = peer->is_firewalled; - peer_details["startingheight"] = ""; - peer_details["banscore"] = ""; - peer_details["syncnode"] = ""; - - if (peer->fc_git_revision_sha) - { - std::string revision_string = *peer->fc_git_revision_sha; - if (*peer->fc_git_revision_sha == fc::git_revision_sha) - revision_string += " (same as ours)"; - else - revision_string += " (different from ours)"; - peer_details["fc_git_revision_sha"] = revision_string; - - } - if (peer->fc_git_revision_unix_timestamp) - { - peer_details["fc_git_revision_unix_timestamp"] = *peer->fc_git_revision_unix_timestamp; - std::string age_string = fc::get_approximate_relative_time_string( *peer->fc_git_revision_unix_timestamp); - if (*peer->fc_git_revision_unix_timestamp == fc::time_point_sec(fc::git_revision_unix_timestamp)) - age_string += " (same as ours)"; - else if (*peer->fc_git_revision_unix_timestamp > fc::time_point_sec(fc::git_revision_unix_timestamp)) - age_string += " (newer than ours)"; - else - age_string += " (older than ours)"; - peer_details["fc_git_revision_age"] = age_string; - } - - if (peer->platform) - peer_details["platform"] = *peer->platform; - - // provide these for debugging - // warning: these are just approximations, if the peer is "downstream" of us, they may - // have received blocks from other peers that we are unaware of - peer_details["current_head_block"] = peer->last_block_delegate_has_seen; - peer_details["current_head_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); - peer_details["current_head_block_time"] = peer->last_block_time_delegate_has_seen; - - this_peer_status.info = peer_details; - statuses.push_back(this_peer_status); - } - return statuses; - } - - uint32_t node_impl::get_connection_count() const - { - VERIFY_CORRECT_THREAD(); - return (uint32_t)_active_connections.size(); - } - - void node_impl::broadcast( const message& item_to_broadcast, const message_propagation_data& propagation_data ) - { - VERIFY_CORRECT_THREAD(); - fc::uint256 hash_of_message_contents; - if( item_to_broadcast.msg_type == eos::net::block_message_type ) - { - eos::net::block_message block_message_to_broadcast = item_to_broadcast.as(); - hash_of_message_contents = block_message_to_broadcast.block_id; // for debugging - _most_recent_blocks_accepted.push_back( block_message_to_broadcast.block_id ); - } - else if( item_to_broadcast.msg_type == eos::net::trx_message_type ) - { - eos::net::trx_message transaction_message_to_broadcast = item_to_broadcast.as(); - hash_of_message_contents = transaction_message_to_broadcast.trx.id(); // for debugging - dlog( "broadcasting trx: ${trx}", ("trx", transaction_message_to_broadcast) ); - } - message_hash_type hash_of_item_to_broadcast = item_to_broadcast.id(); - - _message_cache.cache_message( item_to_broadcast, hash_of_item_to_broadcast, propagation_data, hash_of_message_contents ); - _new_inventory.insert( item_id(item_to_broadcast.msg_type, hash_of_item_to_broadcast ) ); - trigger_advertise_inventory_loop(); - } - - void node_impl::broadcast( const message& item_to_broadcast ) - { - VERIFY_CORRECT_THREAD(); - // this version is called directly from the client - message_propagation_data propagation_data{fc::time_point::now(), fc::time_point::now(), _node_id}; - broadcast( item_to_broadcast, propagation_data ); - } - - void node_impl::sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) - { - VERIFY_CORRECT_THREAD(); - _most_recent_blocks_accepted.clear(); - _sync_item_type = current_head_block.item_type; - _most_recent_blocks_accepted.push_back(current_head_block.item_hash); - _hard_fork_block_numbers = hard_fork_block_numbers; - } - - bool node_impl::is_connected() const - { - VERIFY_CORRECT_THREAD(); - return !_active_connections.empty(); - } - - std::vector node_impl::get_potential_peers() const - { - VERIFY_CORRECT_THREAD(); - std::vector result; - // use explicit iterators here, for some reason the mac compiler can't used ranged-based for loops here - for (peer_database::iterator itr = _potential_peer_db.begin(); itr != _potential_peer_db.end(); ++itr) - result.push_back(*itr); - return result; - } - - void node_impl::set_advanced_node_parameters(const fc::variant_object& params) - { - VERIFY_CORRECT_THREAD(); - if (params.contains("peer_connection_retry_timeout")) - _peer_connection_retry_timeout = params["peer_connection_retry_timeout"].as(); - if (params.contains("desired_number_of_connections")) - _desired_number_of_connections = params["desired_number_of_connections"].as(); - if (params.contains("maximum_number_of_connections")) - _maximum_number_of_connections = params["maximum_number_of_connections"].as(); - if (params.contains("maximum_number_of_blocks_to_handle_at_one_time")) - _maximum_number_of_blocks_to_handle_at_one_time = params["maximum_number_of_blocks_to_handle_at_one_time"].as(); - if (params.contains("maximum_number_of_sync_blocks_to_prefetch")) - _maximum_number_of_sync_blocks_to_prefetch = params["maximum_number_of_sync_blocks_to_prefetch"].as(); - if (params.contains("maximum_blocks_per_peer_during_syncing")) - _maximum_blocks_per_peer_during_syncing = params["maximum_blocks_per_peer_during_syncing"].as(); - - _desired_number_of_connections = std::min(_desired_number_of_connections, _maximum_number_of_connections); - - while (_active_connections.size() > _maximum_number_of_connections) - disconnect_from_peer(_active_connections.begin()->get(), - "I have too many connections open"); - trigger_p2p_network_connect_loop(); - } - - fc::variant_object node_impl::get_advanced_node_parameters() - { - VERIFY_CORRECT_THREAD(); - fc::mutable_variant_object result; - result["peer_connection_retry_timeout"] = _peer_connection_retry_timeout; - result["desired_number_of_connections"] = _desired_number_of_connections; - result["maximum_number_of_connections"] = _maximum_number_of_connections; - result["maximum_number_of_blocks_to_handle_at_one_time"] = _maximum_number_of_blocks_to_handle_at_one_time; - result["maximum_number_of_sync_blocks_to_prefetch"] = _maximum_number_of_sync_blocks_to_prefetch; - result["maximum_blocks_per_peer_during_syncing"] = _maximum_blocks_per_peer_during_syncing; - return result; - } - - message_propagation_data node_impl::get_transaction_propagation_data( const eos::net::transaction_id_type& transaction_id ) - { - VERIFY_CORRECT_THREAD(); - return _message_cache.get_message_propagation_data( transaction_id ); - } - - message_propagation_data node_impl::get_block_propagation_data( const eos::net::block_id_type& block_id ) - { - VERIFY_CORRECT_THREAD(); - return _message_cache.get_message_propagation_data( block_id ); - } - - node_id_t node_impl::get_node_id() const - { - VERIFY_CORRECT_THREAD(); - return _node_id; - } - void node_impl::set_allowed_peers(const std::vector& allowed_peers) - { - VERIFY_CORRECT_THREAD(); -#ifdef ENABLE_P2P_DEBUGGING_API - _allowed_peers.clear(); - _allowed_peers.insert(allowed_peers.begin(), allowed_peers.end()); - std::list peers_to_disconnect; - if (!_allowed_peers.empty()) - for (const peer_connection_ptr& peer : _active_connections) - if (_allowed_peers.find(peer->node_id) == _allowed_peers.end()) - peers_to_disconnect.push_back(peer); - for (const peer_connection_ptr& peer : peers_to_disconnect) - disconnect_from_peer(peer.get(), "My allowed_peers list has changed, and you're no longer allowed. Bye."); -#endif // ENABLE_P2P_DEBUGGING_API - } - void node_impl::clear_peer_database() - { - VERIFY_CORRECT_THREAD(); - _potential_peer_db.clear(); - } - - void node_impl::set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ) - { - VERIFY_CORRECT_THREAD(); - _rate_limiter.set_upload_limit( upload_bytes_per_second ); - _rate_limiter.set_download_limit( download_bytes_per_second ); - } - - void node_impl::disable_peer_advertising() - { - VERIFY_CORRECT_THREAD(); - _peer_advertising_disabled = true; - } - - fc::variant_object node_impl::get_call_statistics() const - { - VERIFY_CORRECT_THREAD(); - return _delegate->get_call_statistics(); - } - - fc::variant_object node_impl::network_get_info() const - { - VERIFY_CORRECT_THREAD(); - fc::mutable_variant_object info; - info["listening_on"] = _actual_listening_endpoint; - info["node_public_key"] = _node_public_key; - info["node_id"] = _node_id; - info["firewalled"] = _is_firewalled; - return info; - } - fc::variant_object node_impl::network_get_usage_stats() const - { - VERIFY_CORRECT_THREAD(); - std::vector network_usage_by_second; - network_usage_by_second.reserve(_average_network_read_speed_seconds.size()); - std::transform(_average_network_read_speed_seconds.begin(), _average_network_read_speed_seconds.end(), - _average_network_write_speed_seconds.begin(), - std::back_inserter(network_usage_by_second), - std::plus()); - - std::vector network_usage_by_minute; - network_usage_by_minute.reserve(_average_network_read_speed_minutes.size()); - std::transform(_average_network_read_speed_minutes.begin(), _average_network_read_speed_minutes.end(), - _average_network_write_speed_minutes.begin(), - std::back_inserter(network_usage_by_minute), - std::plus()); - - std::vector network_usage_by_hour; - network_usage_by_hour.reserve(_average_network_read_speed_hours.size()); - std::transform(_average_network_read_speed_hours.begin(), _average_network_read_speed_hours.end(), - _average_network_write_speed_hours.begin(), - std::back_inserter(network_usage_by_hour), - std::plus()); - - fc::mutable_variant_object result; - result["usage_by_second"] = network_usage_by_second; - result["usage_by_minute"] = network_usage_by_minute; - result["usage_by_hour"] = network_usage_by_hour; - return result; - } - - bool node_impl::is_hard_fork_block(uint32_t block_number) const - { - return std::binary_search(_hard_fork_block_numbers.begin(), _hard_fork_block_numbers.end(), block_number); - } - uint32_t node_impl::get_next_known_hard_fork_block_number(uint32_t block_number) const - { - auto iter = std::upper_bound(_hard_fork_block_numbers.begin(), _hard_fork_block_numbers.end(), - block_number); - return iter != _hard_fork_block_numbers.end() ? *iter : 0; - } - - } // end namespace detail - - - - ///////////////////////////////////////////////////////////////////////////////////////////////////////////// - // implement node functions, they call the matching function in to detail::node_impl in the correct thread // - -#ifdef P2P_IN_DEDICATED_THREAD -# define INVOKE_IN_IMPL(method_name, ...) \ - return my->_thread->async([&](){ return my->method_name(__VA_ARGS__); }, "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait() -#else -# define INVOKE_IN_IMPL(method_name, ...) \ - return my->method_name(__VA_ARGS__) -#endif // P2P_IN_DEDICATED_THREAD - - node::node(const std::string& user_agent) : - my(new detail::node_impl(user_agent)) - { - } - - node::~node() - { - } - - void node::set_node_delegate( node_delegate* del ) - { - fc::thread* delegate_thread = &fc::thread::current(); - INVOKE_IN_IMPL(set_node_delegate, del, delegate_thread); - } - - void node::load_configuration( const fc::path& configuration_directory ) - { - INVOKE_IN_IMPL(load_configuration, configuration_directory); - } - - void node::listen_to_p2p_network() - { - INVOKE_IN_IMPL(listen_to_p2p_network); - } - - void node::connect_to_p2p_network() - { - INVOKE_IN_IMPL(connect_to_p2p_network); - } - - void node::add_node( const fc::ip::endpoint& ep ) - { - INVOKE_IN_IMPL(add_node, ep); - } - - void node::connect_to_endpoint( const fc::ip::endpoint& remote_endpoint ) - { - INVOKE_IN_IMPL(connect_to_endpoint, remote_endpoint); - } - - void node::listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available) - { - INVOKE_IN_IMPL(listen_on_endpoint, ep, wait_if_not_available); - } - - void node::accept_incoming_connections(bool accept) - { - INVOKE_IN_IMPL(accept_incoming_connections, accept); - } - - void node::listen_on_port( uint16_t port, bool wait_if_not_available ) - { - INVOKE_IN_IMPL(listen_on_port, port, wait_if_not_available); - } - - fc::ip::endpoint node::get_actual_listening_endpoint() const - { - INVOKE_IN_IMPL(get_actual_listening_endpoint); - } - - std::vector node::get_connected_peers() const - { - INVOKE_IN_IMPL(get_connected_peers); - } - - uint32_t node::get_connection_count() const - { - INVOKE_IN_IMPL(get_connection_count); - } - - void node::broadcast( const message& msg ) - { - INVOKE_IN_IMPL(broadcast, msg); - } - - void node::sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) - { - INVOKE_IN_IMPL(sync_from, current_head_block, hard_fork_block_numbers); - } - - bool node::is_connected() const - { - INVOKE_IN_IMPL(is_connected); - } - - std::vector node::get_potential_peers()const - { - INVOKE_IN_IMPL(get_potential_peers); - } - - void node::set_advanced_node_parameters( const fc::variant_object& params ) - { - INVOKE_IN_IMPL(set_advanced_node_parameters, params); - } - - fc::variant_object node::get_advanced_node_parameters() - { - INVOKE_IN_IMPL(get_advanced_node_parameters); - } - - message_propagation_data node::get_transaction_propagation_data( const eos::net::transaction_id_type& transaction_id ) - { - INVOKE_IN_IMPL(get_transaction_propagation_data, transaction_id); - } - - message_propagation_data node::get_block_propagation_data( const eos::net::block_id_type& block_id ) - { - INVOKE_IN_IMPL(get_block_propagation_data, block_id); - } - - node_id_t node::get_node_id() const - { - INVOKE_IN_IMPL(get_node_id); - } - - void node::set_allowed_peers( const std::vector& allowed_peers ) - { - INVOKE_IN_IMPL(set_allowed_peers, allowed_peers); - } - - void node::clear_peer_database() - { - INVOKE_IN_IMPL(clear_peer_database); - } - - void node::set_total_bandwidth_limit(uint32_t upload_bytes_per_second, - uint32_t download_bytes_per_second) - { - INVOKE_IN_IMPL(set_total_bandwidth_limit, upload_bytes_per_second, download_bytes_per_second); - } - - void node::disable_peer_advertising() - { - INVOKE_IN_IMPL(disable_peer_advertising); - } - - fc::variant_object node::get_call_statistics() const - { - INVOKE_IN_IMPL(get_call_statistics); - } - - fc::variant_object node::network_get_info() const - { - INVOKE_IN_IMPL(network_get_info); - } - - fc::variant_object node::network_get_usage_stats() const - { - INVOKE_IN_IMPL(network_get_usage_stats); - } - - void node::close() - { - INVOKE_IN_IMPL(close); - } - - struct simulated_network::node_info - { - node_delegate* delegate; - fc::future message_sender_task_done; - std::queue messages_to_deliver; - node_info(node_delegate* delegate) : delegate(delegate) {} - }; - - simulated_network::~simulated_network() - { - for( node_info* network_node_info : network_nodes ) - { - network_node_info->message_sender_task_done.cancel_and_wait("~simulated_network()"); - delete network_node_info; - } - } - - void simulated_network::message_sender(node_info* destination_node) - { - while (!destination_node->messages_to_deliver.empty()) - { - try - { - const message& message_to_deliver = destination_node->messages_to_deliver.front(); - if (message_to_deliver.msg_type == trx_message_type) - destination_node->delegate->handle_transaction(message_to_deliver.as()); - else if (message_to_deliver.msg_type == block_message_type) - { - std::vector contained_transaction_message_ids; - destination_node->delegate->handle_block(message_to_deliver.as(), false, contained_transaction_message_ids); - } - else - destination_node->delegate->handle_message(message_to_deliver); - } - catch ( const fc::exception& e ) - { - elog( "${r}", ("r",e) ); - } - destination_node->messages_to_deliver.pop(); - } - } - - void simulated_network::broadcast( const message& item_to_broadcast ) - { - for (node_info* network_node_info : network_nodes) - { - network_node_info->messages_to_deliver.emplace(item_to_broadcast); - if (!network_node_info->message_sender_task_done.valid() || network_node_info->message_sender_task_done.ready()) - network_node_info->message_sender_task_done = fc::async([=](){ message_sender(network_node_info); }, "simulated_network_sender"); - } - } - - void simulated_network::add_node_delegate( node_delegate* node_delegate_to_add ) - { - network_nodes.push_back(new node_info(node_delegate_to_add)); - } - - namespace detail - { -#define ROLLING_WINDOW_SIZE 1000 -#define INITIALIZE_ACCUMULATOR(r, data, method_name) \ - , BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator))(boost::accumulators::tag::rolling_window::window_size = ROLLING_WINDOW_SIZE) \ - , BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator))(boost::accumulators::tag::rolling_window::window_size = ROLLING_WINDOW_SIZE) \ - , BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator))(boost::accumulators::tag::rolling_window::window_size = ROLLING_WINDOW_SIZE) - - - statistics_gathering_node_delegate_wrapper::statistics_gathering_node_delegate_wrapper(node_delegate* delegate, fc::thread* thread_for_delegate_calls) : - _node_delegate(delegate), - _thread(thread_for_delegate_calls) - BOOST_PP_SEQ_FOR_EACH(INITIALIZE_ACCUMULATOR, unused, NODE_DELEGATE_METHOD_NAMES) - {} -#undef INITIALIZE_ACCUMULATOR - - fc::variant_object statistics_gathering_node_delegate_wrapper::get_call_statistics() - { - fc::mutable_variant_object statistics; - std::ostringstream note; - note << "All times are in microseconds, mean is the average of the last " << ROLLING_WINDOW_SIZE << " call times"; - statistics["_note"] = note.str(); - -#define ADD_STATISTICS_FOR_METHOD(r, data, method_name) \ - fc::mutable_variant_object BOOST_PP_CAT(method_name, _stats); \ - BOOST_PP_CAT(method_name, _stats)["min"] = boost::accumulators::min(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["mean"] = boost::accumulators::rolling_mean(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["max"] = boost::accumulators::max(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["sum"] = boost::accumulators::sum(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_before_min"] = boost::accumulators::min(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_before_mean"] = boost::accumulators::rolling_mean(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_before_max"] = boost::accumulators::max(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_before_sum"] = boost::accumulators::sum(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_after_min"] = boost::accumulators::min(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_after_mean"] = boost::accumulators::rolling_mean(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_after_max"] = boost::accumulators::max(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["delay_after_sum"] = boost::accumulators::sum(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator))); \ - BOOST_PP_CAT(method_name, _stats)["count"] = boost::accumulators::count(BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator))); \ - statistics[BOOST_PP_STRINGIZE(method_name)] = BOOST_PP_CAT(method_name, _stats); - - BOOST_PP_SEQ_FOR_EACH(ADD_STATISTICS_FOR_METHOD, unused, NODE_DELEGATE_METHOD_NAMES) -#undef ADD_STATISTICS_FOR_METHOD - - return statistics; - } - - template - struct invoke_in_appbase { - template - T operator()( Lambda&& l )const { - typename fc::promise::ptr p(new fc::promise( "invoke in appbase" )); - appbase::app().get_io_service().post( [&](){ - try { - p->set_value( l() ); - } catch ( const fc::exception& e ) { - p->set_exception( e.dynamic_copy_exception() ); - } - } ); - return p->wait(); - } - }; - template<> - struct invoke_in_appbase { - template - void operator()( Lambda&& l )const { - fc::promise::ptr p(new fc::promise( "invoke in appbase" )); - appbase::app().get_io_service().post( [&](){ - try { - l(); - p->set_value(); - } catch ( const fc::exception& e ) { - p->set_exception( e.dynamic_copy_exception() ); - } - } ); - p->wait(); - } - }; - -// define VERBOSE_NODE_DELEGATE_LOGGING to log whenever the node delegate throws exceptions -//#define VERBOSE_NODE_DELEGATE_LOGGING -#ifdef VERBOSE_NODE_DELEGATE_LOGGING -# define INVOKE_AND_COLLECT_STATISTICS(method_name, ...) \ - try \ - { \ - call_statistics_collector statistics_collector(#method_name, \ - &_ ## method_name ## _execution_accumulator, \ - &_ ## method_name ## _delay_before_accumulator, \ - &_ ## method_name ## _delay_after_accumulator); \ - if (_thread->is_current()) \ - { \ - call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ - return _node_delegate->method_name(__VA_ARGS__); \ - } \ - else \ - return _thread->async([&](){ \ - call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ - return _node_delegate->method_name(__VA_ARGS__); \ - }, "invoke " BOOST_STRINGIZE(method_name)).wait(); \ - } \ - catch (const fc::exception& e) \ - { \ - dlog("node_delegate threw fc::exception: ${e}", ("e", e)); \ - throw; \ - } \ - catch (const std::exception& e) \ - { \ - dlog("node_delegate threw std::exception: ${e}", ("e", e.what())); \ - throw; \ - } \ - catch (...) \ - { \ - dlog("node_delegate threw unrecognized exception"); \ - throw; \ - } -#else -# define INVOKE_AND_COLLECT_STATISTICS(method_name, ...) \ - call_statistics_collector statistics_collector(#method_name, \ - &_ ## method_name ## _execution_accumulator, \ - &_ ## method_name ## _delay_before_accumulator, \ - &_ ## method_name ## _delay_after_accumulator); \ - if (_thread->is_current()) \ - { \ - call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ - return _node_delegate->method_name(__VA_ARGS__); \ - } \ - else \ - { \ - using T = decltype( _node_delegate->method_name(__VA_ARGS__) ); \ - return invoke_in_appbase()( [&]() mutable { \ - call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ - return _node_delegate->method_name(__VA_ARGS__); \ - }); \ - } - -#endif - bool statistics_gathering_node_delegate_wrapper::has_item( const net::item_id& id ) - { - INVOKE_AND_COLLECT_STATISTICS(has_item, id); - } - - void statistics_gathering_node_delegate_wrapper::handle_message( const message& message_to_handle ) - { - INVOKE_AND_COLLECT_STATISTICS(handle_message, message_to_handle); - } - - bool statistics_gathering_node_delegate_wrapper::handle_block(const eos::net::block_message& block_message, bool sync_mode, std::vector& contained_transaction_message_ids) - { - INVOKE_AND_COLLECT_STATISTICS(handle_block, block_message, sync_mode, contained_transaction_message_ids); - } - - void statistics_gathering_node_delegate_wrapper::handle_transaction( const eos::net::trx_message& transaction_message ) - { - INVOKE_AND_COLLECT_STATISTICS(handle_transaction, transaction_message); - } - - std::vector statistics_gathering_node_delegate_wrapper::get_block_ids(const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, - uint32_t limit /* = 2000 */) - { - INVOKE_AND_COLLECT_STATISTICS(get_block_ids, blockchain_synopsis, remaining_item_count, limit); - } - - message statistics_gathering_node_delegate_wrapper::get_item( const item_id& id ) - { - INVOKE_AND_COLLECT_STATISTICS(get_item, id); - } - - chain_id_type statistics_gathering_node_delegate_wrapper::get_chain_id() const - { - INVOKE_AND_COLLECT_STATISTICS(get_chain_id); - } - - std::vector statistics_gathering_node_delegate_wrapper::get_blockchain_synopsis(const item_hash_t& reference_point, uint32_t number_of_blocks_after_reference_point) - { - INVOKE_AND_COLLECT_STATISTICS(get_blockchain_synopsis, reference_point, number_of_blocks_after_reference_point); - } - - void statistics_gathering_node_delegate_wrapper::sync_status( uint32_t item_type, uint32_t item_count ) - { - INVOKE_AND_COLLECT_STATISTICS(sync_status, item_type, item_count); - } - - void statistics_gathering_node_delegate_wrapper::connection_count_changed( uint32_t c ) - { - INVOKE_AND_COLLECT_STATISTICS(connection_count_changed, c); - } - - uint32_t statistics_gathering_node_delegate_wrapper::get_block_number(const item_hash_t& block_id) - { - // this function doesn't need to block, - ASSERT_TASK_NOT_PREEMPTED(); - return _node_delegate->get_block_number(block_id); - } - - fc::time_point_sec statistics_gathering_node_delegate_wrapper::get_block_time(const item_hash_t& block_id) - { - INVOKE_AND_COLLECT_STATISTICS(get_block_time, block_id); - } - - item_hash_t statistics_gathering_node_delegate_wrapper::get_head_block_id() const - { - INVOKE_AND_COLLECT_STATISTICS(get_head_block_id); - } - - uint32_t statistics_gathering_node_delegate_wrapper::estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const - { - INVOKE_AND_COLLECT_STATISTICS(estimate_last_known_fork_from_git_revision_timestamp, unix_timestamp); - } - - void statistics_gathering_node_delegate_wrapper::error_encountered(const std::string& message, const fc::oexception& error) - { - INVOKE_AND_COLLECT_STATISTICS(error_encountered, message, error); - } - - uint8_t statistics_gathering_node_delegate_wrapper::get_current_block_interval_in_seconds() const - { - INVOKE_AND_COLLECT_STATISTICS(get_current_block_interval_in_seconds); - } - -#undef INVOKE_AND_COLLECT_STATISTICS - - } // end namespace detail - -} } // end namespace eos::net diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp deleted file mode 100644 index 856a25bb61e5a4db87d9d3280c9ff626ab622062..0000000000000000000000000000000000000000 --- a/libraries/net/peer_connection.cpp +++ /dev/null @@ -1,516 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include -#include -#include -#include - -#include - -#ifdef DEFAULT_LOGGER -# undef DEFAULT_LOGGER -#endif -#define DEFAULT_LOGGER "p2p" - -#ifndef NDEBUG -# define VERIFY_CORRECT_THREAD() assert(_thread->is_current()) -#else -# define VERIFY_CORRECT_THREAD() do {} while (0) -#endif - -namespace eos { namespace net - { - message peer_connection::real_queued_message::get_message(peer_connection_delegate*) - { - if (message_send_time_field_offset != (size_t)-1) - { - // patch the current time into the message. Since this operates on the packed version of the structure, - // it won't work for anything after a variable-length field - std::vector packed_current_time = fc::raw::pack(fc::time_point::now()); - assert(message_send_time_field_offset + packed_current_time.size() <= message_to_send.data.size()); - memcpy(message_to_send.data.data() + message_send_time_field_offset, - packed_current_time.data(), packed_current_time.size()); - } - return message_to_send; - } - size_t peer_connection::real_queued_message::get_size_in_queue() - { - return message_to_send.data.size(); - } - message peer_connection::virtual_queued_message::get_message(peer_connection_delegate* node) - { - return node->get_message_for_item(item_to_send); - } - - size_t peer_connection::virtual_queued_message::get_size_in_queue() - { - return sizeof(item_id); - } - - peer_connection::peer_connection(peer_connection_delegate* delegate) : - _node(delegate), - _message_connection(this), - _total_queued_messages_size(0), - direction(peer_connection_direction::unknown), - is_firewalled(firewalled_state::unknown), - our_state(our_connection_state::disconnected), - they_have_requested_close(false), - their_state(their_connection_state::disconnected), - we_have_requested_close(false), - negotiation_status(connection_negotiation_status::disconnected), - number_of_unfetched_item_ids(0), - peer_needs_sync_items_from_us(true), - we_need_sync_items_from_peer(true), - inhibit_fetching_sync_blocks(false), - transaction_fetching_inhibited_until(fc::time_point::min()), - last_known_fork_block_number(0), - firewall_check_state(nullptr) -#ifndef NDEBUG - ,_thread(&fc::thread::current()), - _send_message_queue_tasks_running(0) -#endif - { - } - - peer_connection_ptr peer_connection::make_shared(peer_connection_delegate* delegate) - { - // The lifetime of peer_connection objects is managed by shared_ptrs in node. The peer_connection - // is responsible for notifying the node when it should be deleted, and the process of deleting it - // cleans up the peer connection's asynchronous tasks which are responsible for notifying the node - // when it should be deleted. - // To ease this vicious cycle, we slightly delay the execution of the destructor until the - // current task yields. In the (not uncommon) case where it is the task executing - // connect_to or read_loop, this allows the task to finish before the destructor is forced - // to cancel it. - return peer_connection_ptr(new peer_connection(delegate)); - //, [](peer_connection* peer_to_delete){ fc::async([peer_to_delete](){delete peer_to_delete;}); }); - } - - void peer_connection::destroy() - { - VERIFY_CORRECT_THREAD(); - -#if 0 // this gets too verbose -#ifndef NDEBUG - struct scope_logger { - fc::optional endpoint; - scope_logger(const fc::optional& endpoint) : endpoint(endpoint) { dlog("entering peer_connection::destroy() for peer ${endpoint}", ("endpoint", endpoint)); } - ~scope_logger() { dlog("leaving peer_connection::destroy() for peer ${endpoint}", ("endpoint", endpoint)); } - } send_message_scope_logger(get_remote_endpoint()); -#endif -#endif - - try - { - dlog("calling close_connection()"); - close_connection(); - dlog("close_connection completed normally"); - } - catch ( const fc::canceled_exception& ) - { - assert(false && "the task that deletes peers should not be canceled because it will prevent us from cleaning up correctly"); - } - catch ( ... ) - { - dlog("close_connection threw"); - } - - try - { - dlog("canceling _send_queued_messages task"); - _send_queued_messages_done.cancel_and_wait(__FUNCTION__); - dlog("cancel_and_wait completed normally"); - } - catch( const fc::exception& e ) - { - wlog("Unexpected exception from peer_connection's send_queued_messages_task : ${e}", ("e", e)); - } - catch( ... ) - { - wlog("Unexpected exception from peer_connection's send_queued_messages_task"); - } - - try - { - dlog("canceling accept_or_connect_task"); - accept_or_connect_task_done.cancel_and_wait(__FUNCTION__); - dlog("accept_or_connect_task completed normally"); - } - catch( const fc::exception& e ) - { - wlog("Unexpected exception from peer_connection's accept_or_connect_task : ${e}", ("e", e)); - } - catch( ... ) - { - wlog("Unexpected exception from peer_connection's accept_or_connect_task"); - } - - _message_connection.destroy_connection(); // shut down the read loop - } - - peer_connection::~peer_connection() - { - VERIFY_CORRECT_THREAD(); - destroy(); - } - - fc::tcp_socket& peer_connection::get_socket() - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_socket(); - } - - void peer_connection::accept_connection() - { - VERIFY_CORRECT_THREAD(); - - struct scope_logger { - scope_logger() { dlog("entering peer_connection::accept_connection()"); } - ~scope_logger() { dlog("leaving peer_connection::accept_connection()"); } - } accept_connection_scope_logger; - - try - { - assert( our_state == our_connection_state::disconnected && - their_state == their_connection_state::disconnected ); - direction = peer_connection_direction::inbound; - negotiation_status = connection_negotiation_status::accepting; - _message_connection.accept(); // perform key exchange - negotiation_status = connection_negotiation_status::accepted; - _remote_endpoint = _message_connection.get_socket().remote_endpoint(); - - // firewall-detecting info is pretty useless for inbound connections, but initialize - // it the best we can - fc::ip::endpoint local_endpoint = _message_connection.get_socket().local_endpoint(); - inbound_address = local_endpoint.get_address(); - inbound_port = local_endpoint.port(); - outbound_port = inbound_port; - - their_state = their_connection_state::just_connected; - our_state = our_connection_state::just_connected; - ilog( "established inbound connection from ${remote_endpoint}, sending hello", ("remote_endpoint", _message_connection.get_socket().remote_endpoint() ) ); - } - catch ( const fc::exception& e ) - { - wlog( "error accepting connection ${e}", ("e", e.to_detail_string() ) ); - throw; - } - } - - void peer_connection::connect_to( const fc::ip::endpoint& remote_endpoint, fc::optional local_endpoint ) - { - VERIFY_CORRECT_THREAD(); - try - { - assert( our_state == our_connection_state::disconnected && - their_state == their_connection_state::disconnected ); - direction = peer_connection_direction::outbound; - - _remote_endpoint = remote_endpoint; - if( local_endpoint ) - { - // the caller wants us to bind the local side of this socket to a specific ip/port - // This depends on the ip/port being unused, and on being able to set the - // SO_REUSEADDR/SO_REUSEPORT flags, and either of these might fail, so we need to - // detect if this fails. - try - { - _message_connection.bind( *local_endpoint ); - } - catch ( const fc::canceled_exception& ) - { - throw; - } - catch ( const fc::exception& except ) - { - wlog( "Failed to bind to desired local endpoint ${endpoint}, will connect using an OS-selected endpoint: ${except}", ("endpoint", *local_endpoint )("except", except ) ); - } - } - negotiation_status = connection_negotiation_status::connecting; - _message_connection.connect_to( remote_endpoint ); - negotiation_status = connection_negotiation_status::connected; - their_state = their_connection_state::just_connected; - our_state = our_connection_state::just_connected; - ilog( "established outbound connection to ${remote_endpoint}", ("remote_endpoint", remote_endpoint ) ); - } - catch ( fc::exception& e ) - { - elog( "fatal: error connecting to peer ${remote_endpoint}: ${e}", ("remote_endpoint", remote_endpoint )("e", e.to_detail_string() ) ); - throw; - } - } // connect_to() - - void peer_connection::on_message( message_oriented_connection* originating_connection, const message& received_message ) - { - VERIFY_CORRECT_THREAD(); - _node->on_message( this, received_message ); - } - - void peer_connection::on_connection_closed( message_oriented_connection* originating_connection ) - { - VERIFY_CORRECT_THREAD(); - negotiation_status = connection_negotiation_status::closed; - _node->on_connection_closed( this ); - } - - void peer_connection::send_queued_messages_task() - { - VERIFY_CORRECT_THREAD(); -#ifndef NDEBUG - struct counter { - unsigned& _send_message_queue_tasks_counter; - counter(unsigned& var) : _send_message_queue_tasks_counter(var) { /* dlog("entering peer_connection::send_queued_messages_task()"); */ assert(_send_message_queue_tasks_counter == 0); ++_send_message_queue_tasks_counter; } - ~counter() { assert(_send_message_queue_tasks_counter == 1); --_send_message_queue_tasks_counter; /* dlog("leaving peer_connection::send_queued_messages_task()"); */ } - } concurrent_invocation_counter(_send_message_queue_tasks_running); -#endif - while (!_queued_messages.empty()) - { - _queued_messages.front()->transmission_start_time = fc::time_point::now(); - message message_to_send = _queued_messages.front()->get_message(_node); - try - { - //dlog("peer_connection::send_queued_messages_task() calling message_oriented_connection::send_message() " - // "to send message of type ${type} for peer ${endpoint}", - // ("type", message_to_send.msg_type)("endpoint", get_remote_endpoint())); - _message_connection.send_message(message_to_send); - //dlog("peer_connection::send_queued_messages_task()'s call to message_oriented_connection::send_message() completed normally for peer ${endpoint}", - // ("endpoint", get_remote_endpoint())); - } - catch (const fc::canceled_exception&) - { - dlog("message_oriented_connection::send_message() was canceled, rethrowing canceled_exception"); - throw; - } - catch (const fc::exception& send_error) - { - elog("Error sending message: ${exception}. Closing connection.", ("exception", send_error)); - try - { - close_connection(); - } - catch (const fc::exception& close_error) - { - elog("Caught error while closing connection: ${exception}", ("exception", close_error)); - } - return; - } - catch (const std::exception& e) - { - elog("message_oriented_exception::send_message() threw a std::exception(): ${what}", ("what", e.what())); - } - catch (...) - { - elog("message_oriented_exception::send_message() threw an unhandled exception"); - } - _queued_messages.front()->transmission_finish_time = fc::time_point::now(); - _total_queued_messages_size -= _queued_messages.front()->get_size_in_queue(); - _queued_messages.pop(); - } - //dlog("leaving peer_connection::send_queued_messages_task() due to queue exhaustion"); - } - - void peer_connection::send_queueable_message(std::unique_ptr&& message_to_send) - { - VERIFY_CORRECT_THREAD(); - _total_queued_messages_size += message_to_send->get_size_in_queue(); - _queued_messages.emplace(std::move(message_to_send)); - if (_total_queued_messages_size > EOS_NET_MAXIMUM_QUEUED_MESSAGES_IN_BYTES) - { - elog("send queue exceeded maximum size of ${max} bytes (current size ${current} bytes)", - ("max", EOS_NET_MAXIMUM_QUEUED_MESSAGES_IN_BYTES)("current", _total_queued_messages_size)); - try - { - close_connection(); - } - catch (const fc::exception& e) - { - elog("Caught error while closing connection: ${exception}", ("exception", e)); - } - return; - } - - if( _send_queued_messages_done.valid() && _send_queued_messages_done.canceled() ) - FC_THROW_EXCEPTION(fc::exception, "Attempting to send a message on a connection that is being shut down"); - - if (!_send_queued_messages_done.valid() || _send_queued_messages_done.ready()) - { - //dlog("peer_connection::send_message() is firing up send_queued_message_task"); - _send_queued_messages_done = fc::async([this](){ send_queued_messages_task(); }, "send_queued_messages_task"); - } - //else - // dlog("peer_connection::send_message() doesn't need to fire up send_queued_message_task, it's already running"); - } - - void peer_connection::send_message(const message& message_to_send, size_t message_send_time_field_offset) - { - VERIFY_CORRECT_THREAD(); - //dlog("peer_connection::send_message() enqueueing message of type ${type} for peer ${endpoint}", - // ("type", message_to_send.msg_type)("endpoint", get_remote_endpoint())); - std::unique_ptr message_to_enqueue(new real_queued_message(message_to_send, message_send_time_field_offset)); - send_queueable_message(std::move(message_to_enqueue)); - } - - void peer_connection::send_item(const item_id& item_to_send) - { - VERIFY_CORRECT_THREAD(); - //dlog("peer_connection::send_item() enqueueing message of type ${type} for peer ${endpoint}", - // ("type", item_to_send.item_type)("endpoint", get_remote_endpoint())); - std::unique_ptr message_to_enqueue(new virtual_queued_message(item_to_send)); - send_queueable_message(std::move(message_to_enqueue)); - } - - void peer_connection::close_connection() - { - VERIFY_CORRECT_THREAD(); - negotiation_status = connection_negotiation_status::closing; - if (connection_terminated_time != fc::time_point::min()) - connection_terminated_time = fc::time_point::now(); - _message_connection.close_connection(); - } - - void peer_connection::destroy_connection() - { - VERIFY_CORRECT_THREAD(); - negotiation_status = connection_negotiation_status::closing; - destroy(); - } - - uint64_t peer_connection::get_total_bytes_sent() const - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_total_bytes_sent(); - } - - uint64_t peer_connection::get_total_bytes_received() const - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_total_bytes_received(); - } - - fc::time_point peer_connection::get_last_message_sent_time() const - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_last_message_sent_time(); - } - - fc::time_point peer_connection::get_last_message_received_time() const - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_last_message_received_time(); - } - - fc::optional peer_connection::get_remote_endpoint() - { - VERIFY_CORRECT_THREAD(); - return _remote_endpoint; - } - fc::ip::endpoint peer_connection::get_local_endpoint() - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_socket().local_endpoint(); - } - - void peer_connection::set_remote_endpoint( fc::optional new_remote_endpoint ) - { - VERIFY_CORRECT_THREAD(); - _remote_endpoint = new_remote_endpoint; - } - - bool peer_connection::busy() - { - VERIFY_CORRECT_THREAD(); - return !items_requested_from_peer.empty() || !sync_items_requested_from_peer.empty() || item_ids_requested_from_peer; - } - - bool peer_connection::idle() - { - VERIFY_CORRECT_THREAD(); - return !busy(); - } - - bool peer_connection::is_transaction_fetching_inhibited() const - { - VERIFY_CORRECT_THREAD(); - return transaction_fetching_inhibited_until > fc::time_point::now(); - } - - fc::sha512 peer_connection::get_shared_secret() const - { - VERIFY_CORRECT_THREAD(); - return _message_connection.get_shared_secret(); - } - - void peer_connection::clear_old_inventory() - { - VERIFY_CORRECT_THREAD(); - fc::time_point_sec oldest_inventory_to_keep(fc::time_point::now() - fc::minutes(EOS_NET_MAX_INVENTORY_SIZE_IN_MINUTES)); - - // expire old items from inventory_advertised_to_peer - auto oldest_inventory_to_keep_iter = inventory_advertised_to_peer.get().lower_bound(oldest_inventory_to_keep); - auto begin_iter = inventory_advertised_to_peer.get().begin(); - unsigned number_of_elements_advertised_to_peer_to_discard = std::distance(begin_iter, oldest_inventory_to_keep_iter); - inventory_advertised_to_peer.get().erase(begin_iter, oldest_inventory_to_keep_iter); - - // also expire items from inventory_peer_advertised_to_us - oldest_inventory_to_keep_iter = inventory_peer_advertised_to_us.get().lower_bound(oldest_inventory_to_keep); - begin_iter = inventory_peer_advertised_to_us.get().begin(); - unsigned number_of_elements_peer_advertised_to_discard = std::distance(begin_iter, oldest_inventory_to_keep_iter); - inventory_peer_advertised_to_us.get().erase(begin_iter, oldest_inventory_to_keep_iter); - dlog("Expiring old inventory for peer ${peer}: removing ${to_peer} items advertised to peer (${remain_to_peer} left), and ${to_us} advertised to us (${remain_to_us} left)", - ("peer", get_remote_endpoint()) - ("to_peer", number_of_elements_advertised_to_peer_to_discard)("remain_to_peer", inventory_advertised_to_peer.size()) - ("to_us", number_of_elements_peer_advertised_to_discard)("remain_to_us", inventory_peer_advertised_to_us.size())); - } - - // we have a higher limit for blocks than transactions so we will still fetch blocks even when transactions are throttled - bool peer_connection::is_inventory_advertised_to_us_list_full_for_transactions() const - { - VERIFY_CORRECT_THREAD(); - return inventory_peer_advertised_to_us.size() > EOS_NET_MAX_INVENTORY_SIZE_IN_MINUTES * EOS_NET_MAX_TRX_PER_SECOND * 60; - } - - bool peer_connection::is_inventory_advertised_to_us_list_full() const - { - VERIFY_CORRECT_THREAD(); - // allow the total inventory size to be the maximum number of transactions we'll store in the inventory (above) - // plus the maximum number of blocks that would be generated in EOS_NET_MAX_INVENTORY_SIZE_IN_MINUTES (plus one, - // to give us some wiggle room) - return inventory_peer_advertised_to_us.size() > - EOS_NET_MAX_INVENTORY_SIZE_IN_MINUTES * EOS_NET_MAX_TRX_PER_SECOND * 60 + - (EOS_NET_MAX_INVENTORY_SIZE_IN_MINUTES + 1) * 60 / config::BlockIntervalSeconds; - } - - bool peer_connection::performing_firewall_check() const - { - return firewall_check_state && firewall_check_state->requesting_peer != node_id_t(); - } - - fc::optional peer_connection::get_endpoint_for_connecting() const - { - if (inbound_port) - return fc::ip::endpoint(inbound_address, inbound_port); - return fc::optional(); - } - -} } // end namespace eos::net diff --git a/libraries/net/peer_database.cpp b/libraries/net/peer_database.cpp deleted file mode 100644 index 880a93754418ce313e454e4bc4a3969e31364328..0000000000000000000000000000000000000000 --- a/libraries/net/peer_database.cpp +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - - - -namespace eos { namespace net { - namespace detail - { - using namespace boost::multi_index; - - class peer_database_impl - { - public: - struct last_seen_time_index {}; - struct endpoint_index {}; - typedef boost::multi_index_container, - member >, - hashed_unique, - member, - std::hash > > > potential_peer_set; - - private: - potential_peer_set _potential_peer_set; - fc::path _peer_database_filename; - - public: - void open(const fc::path& databaseFilename); - void close(); - void clear(); - void erase(const fc::ip::endpoint& endpointToErase); - void update_entry(const potential_peer_record& updatedRecord); - potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); - fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); - - peer_database::iterator begin() const; - peer_database::iterator end() const; - size_t size() const; - }; - - class peer_database_iterator_impl - { - public: - typedef peer_database_impl::potential_peer_set::index::type::iterator last_seen_time_index_iterator; - last_seen_time_index_iterator _iterator; - peer_database_iterator_impl(const last_seen_time_index_iterator& iterator) : - _iterator(iterator) - {} - }; - peer_database_iterator::peer_database_iterator( const peer_database_iterator& c ) : - boost::iterator_facade(c){} - - void peer_database_impl::open(const fc::path& peer_database_filename) - { - _peer_database_filename = peer_database_filename; - if (fc::exists(_peer_database_filename)) - { - try - { - std::vector peer_records = fc::json::from_file(_peer_database_filename).as >(); - std::copy(peer_records.begin(), peer_records.end(), std::inserter(_potential_peer_set, _potential_peer_set.end())); -#define MAXIMUM_PEERDB_SIZE 1000 - if (_potential_peer_set.size() > MAXIMUM_PEERDB_SIZE) - { - // prune database to a reasonable size - auto iter = _potential_peer_set.begin(); - std::advance(iter, MAXIMUM_PEERDB_SIZE); - _potential_peer_set.erase(iter, _potential_peer_set.end()); - } - } - catch (const fc::exception& e) - { - elog("error opening peer database file ${peer_database_filename}, starting with a clean database", - ("peer_database_filename", _peer_database_filename)); - } - } - } - - void peer_database_impl::close() - { - std::vector peer_records; - peer_records.reserve(_potential_peer_set.size()); - std::copy(_potential_peer_set.begin(), _potential_peer_set.end(), std::back_inserter(peer_records)); - - try - { - fc::path peer_database_filename_dir = _peer_database_filename.parent_path(); - if (!fc::exists(peer_database_filename_dir)) - fc::create_directories(peer_database_filename_dir); - fc::json::save_to_file(peer_records, _peer_database_filename); - } - catch (const fc::exception& e) - { - elog("error saving peer database to file ${peer_database_filename}", - ("peer_database_filename", _peer_database_filename)); - } - _potential_peer_set.clear(); - } - - void peer_database_impl::clear() - { - _potential_peer_set.clear(); - } - - void peer_database_impl::erase(const fc::ip::endpoint& endpointToErase) - { - auto iter = _potential_peer_set.get().find(endpointToErase); - if (iter != _potential_peer_set.get().end()) - _potential_peer_set.get().erase(iter); - } - - void peer_database_impl::update_entry(const potential_peer_record& updatedRecord) - { - auto iter = _potential_peer_set.get().find(updatedRecord.endpoint); - if (iter != _potential_peer_set.get().end()) - _potential_peer_set.get().modify(iter, [&updatedRecord](potential_peer_record& record) { record = updatedRecord; }); - else - _potential_peer_set.get().insert(updatedRecord); - } - - potential_peer_record peer_database_impl::lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup) - { - auto iter = _potential_peer_set.get().find(endpointToLookup); - if (iter != _potential_peer_set.get().end()) - return *iter; - return potential_peer_record(endpointToLookup); - } - - fc::optional peer_database_impl::lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup) - { - auto iter = _potential_peer_set.get().find(endpointToLookup); - if (iter != _potential_peer_set.get().end()) - return *iter; - return fc::optional(); - } - - peer_database::iterator peer_database_impl::begin() const - { - return peer_database::iterator(new peer_database_iterator_impl(_potential_peer_set.get().begin())); - } - - peer_database::iterator peer_database_impl::end() const - { - return peer_database::iterator(new peer_database_iterator_impl(_potential_peer_set.get().end())); - } - - size_t peer_database_impl::size() const - { - return _potential_peer_set.size(); - } - - peer_database_iterator::peer_database_iterator() - { - } - - peer_database_iterator::~peer_database_iterator() - { - } - - peer_database_iterator::peer_database_iterator(peer_database_iterator_impl* impl) : - my(impl) - { - } - - void peer_database_iterator::increment() - { - ++my->_iterator; - } - - bool peer_database_iterator::equal(const peer_database_iterator& other) const - { - return my->_iterator == other.my->_iterator; - } - - const potential_peer_record& peer_database_iterator::dereference() const - { - return *my->_iterator; - } - - } // end namespace detail - - peer_database::peer_database() : - my(new detail::peer_database_impl) - { - } - - peer_database::~peer_database() - {} - - void peer_database::open(const fc::path& databaseFilename) - { - my->open(databaseFilename); - } - - void peer_database::close() - { - my->close(); - } - - void peer_database::clear() - { - my->clear(); - } - - void peer_database::erase(const fc::ip::endpoint& endpointToErase) - { - my->erase(endpointToErase); - } - - void peer_database::update_entry(const potential_peer_record& updatedRecord) - { - my->update_entry(updatedRecord); - } - - potential_peer_record peer_database::lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup) - { - return my->lookup_or_create_entry_for_endpoint(endpointToLookup); - } - - fc::optional peer_database::lookup_entry_for_endpoint(const fc::ip::endpoint& endpoint_to_lookup) - { - return my->lookup_entry_for_endpoint(endpoint_to_lookup); - } - - peer_database::iterator peer_database::begin() const - { - return my->begin(); - } - - peer_database::iterator peer_database::end() const - { - return my->end(); - } - - size_t peer_database::size() const - { - return my->size(); - } - -} } // end namespace eos::net diff --git a/libraries/net/stcp_socket.cpp b/libraries/net/stcp_socket.cpp deleted file mode 100644 index 14d45cd83d56c5cb17f4e0c557a8c21a98aaf419..0000000000000000000000000000000000000000 --- a/libraries/net/stcp_socket.cpp +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include - -namespace eos { namespace net { - -stcp_socket::stcp_socket() -//:_buf_len(0) -#ifndef NDEBUG - : _read_buffer_in_use(false), - _write_buffer_in_use(false) -#endif -{ -} -stcp_socket::~stcp_socket() -{ -} - -void stcp_socket::do_key_exchange() -{ - _priv_key = fc::ecc::private_key::generate(); - fc::ecc::public_key pub = _priv_key.get_public_key(); - fc::ecc::public_key_data s = pub.serialize(); - std::shared_ptr serialized_key_buffer(new char[sizeof(fc::ecc::public_key_data)], [](char* p){ delete[] p; }); - memcpy(serialized_key_buffer.get(), (char*)&s, sizeof(fc::ecc::public_key_data)); - _sock.write( serialized_key_buffer, sizeof(fc::ecc::public_key_data) ); - _sock.read( serialized_key_buffer, sizeof(fc::ecc::public_key_data) ); - fc::ecc::public_key_data rpub; - memcpy((char*)&rpub, serialized_key_buffer.get(), sizeof(fc::ecc::public_key_data)); - - _shared_secret = _priv_key.get_shared_secret( rpub ); -// ilog("shared secret ${s}", ("s", shared_secret) ); - _send_aes.init( fc::sha256::hash( (char*)&_shared_secret, sizeof(_shared_secret) ), - fc::city_hash_crc_128((char*)&_shared_secret,sizeof(_shared_secret) ) ); - _recv_aes.init( fc::sha256::hash( (char*)&_shared_secret, sizeof(_shared_secret) ), - fc::city_hash_crc_128((char*)&_shared_secret,sizeof(_shared_secret) ) ); -} - - -void stcp_socket::connect_to( const fc::ip::endpoint& remote_endpoint ) -{ - _sock.connect_to( remote_endpoint ); - do_key_exchange(); -} - -void stcp_socket::bind( const fc::ip::endpoint& local_endpoint ) -{ - _sock.bind(local_endpoint); -} - -/** - * This method must read at least 16 bytes at a time from - * the underlying TCP socket so that it can decrypt them. It - * will buffer any left-over. - */ -size_t stcp_socket::readsome( char* buffer, size_t len ) -{ try { - assert( len > 0 && (len % 16) == 0 ); - -#ifndef NDEBUG - // This code was written with the assumption that you'd only be making one call to readsome - // at a time so it reuses _read_buffer. If you really need to make concurrent calls to - // readsome(), you'll need to prevent reusing _read_buffer here - struct check_buffer_in_use { - bool& _buffer_in_use; - check_buffer_in_use(bool& buffer_in_use) : _buffer_in_use(buffer_in_use) { assert(!_buffer_in_use); _buffer_in_use = true; } - ~check_buffer_in_use() { assert(_buffer_in_use); _buffer_in_use = false; } - } buffer_in_use_checker(_read_buffer_in_use); -#endif - - const size_t read_buffer_length = 4096; - if (!_read_buffer) - _read_buffer.reset(new char[read_buffer_length], [](char* p){ delete[] p; }); - - len = std::min(read_buffer_length, len); - - size_t s = _sock.readsome( _read_buffer, len, 0 ); - if( s % 16 ) - { - _sock.read(_read_buffer, 16 - (s%16), s); - s += 16-(s%16); - } - _recv_aes.decode( _read_buffer.get(), s, buffer ); - return s; -} FC_RETHROW_EXCEPTIONS( warn, "", ("len",len) ) } - -size_t stcp_socket::readsome( const std::shared_ptr& buf, size_t len, size_t offset ) -{ - return readsome(buf.get() + offset, len); -} - -bool stcp_socket::eof()const -{ - return _sock.eof(); -} - -size_t stcp_socket::writesome( const char* buffer, size_t len ) -{ try { - assert( len > 0 && (len % 16) == 0 ); - -#ifndef NDEBUG - // This code was written with the assumption that you'd only be making one call to writesome - // at a time so it reuses _write_buffer. If you really need to make concurrent calls to - // writesome(), you'll need to prevent reusing _write_buffer here - struct check_buffer_in_use { - bool& _buffer_in_use; - check_buffer_in_use(bool& buffer_in_use) : _buffer_in_use(buffer_in_use) { assert(!_buffer_in_use); _buffer_in_use = true; } - ~check_buffer_in_use() { assert(_buffer_in_use); _buffer_in_use = false; } - } buffer_in_use_checker(_write_buffer_in_use); -#endif - - const std::size_t write_buffer_length = 4096; - if (!_write_buffer) - _write_buffer.reset(new char[write_buffer_length], [](char* p){ delete[] p; }); - len = std::min(write_buffer_length, len); - memset(_write_buffer.get(), 0, len); // just in case aes.encode screws up - /** - * every sizeof(crypt_buf) bytes the aes channel - * has an error and doesn't decrypt properly... disable - * for now because we are going to upgrade to something - * better. - */ - uint32_t ciphertext_len = _send_aes.encode( buffer, len, _write_buffer.get() ); - assert(ciphertext_len == len); - _sock.write( _write_buffer, ciphertext_len ); - return ciphertext_len; -} FC_RETHROW_EXCEPTIONS( warn, "", ("len",len) ) } - -size_t stcp_socket::writesome( const std::shared_ptr& buf, size_t len, size_t offset ) -{ - return writesome(buf.get() + offset, len); -} - -void stcp_socket::flush() -{ - _sock.flush(); -} - - -void stcp_socket::close() -{ - try - { - _sock.close(); - }FC_RETHROW_EXCEPTIONS( warn, "error closing stcp socket" ); -} - -void stcp_socket::accept() -{ - do_key_exchange(); -} - - -}} // namespace eos::net - diff --git a/libraries/wallet/CMakeLists.txt b/libraries/wallet/CMakeLists.txt deleted file mode 100644 index 80923e3c26aa63e32257f4045076c8f14af56658..0000000000000000000000000000000000000000 --- a/libraries/wallet/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -file(GLOB HEADERS "include/eos/wallet/*.hpp") - -add_library( eos_wallet - wallet.cpp - - ${HEADERS} - ) -target_link_libraries( eos_wallet fc eos_chain ) -target_include_directories( eos_wallet - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" ) - -INSTALL( TARGETS - eos_wallet - - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) -INSTALL( FILES ${HEADERS} DESTINATION "include/eos/wallet" ) diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 85eaccbaafbb7b9c1e5460abaa330d7ac0650a5f..1ff12fb0dc73ceb1e006c65a31e0a8c85224188a 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -6,3 +6,5 @@ add_subdirectory(chain_api_plugin) add_subdirectory(producer_plugin) add_subdirectory(account_history_plugin) add_subdirectory(account_history_api_plugin) +add_subdirectory(wallet_plugin) +add_subdirectory(wallet_api_plugin) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 345000f44ed3b85e309975a655de58847b1020d4..ff30a46caa06fd54fcbaa1eee9f81ca4a13f7b82 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -54,6 +54,7 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_table_rows), CHAIN_RO_CALL(abi_json_to_bin), CHAIN_RO_CALL(abi_bin_to_json), + CHAIN_RO_CALL(get_required_keys), CHAIN_RW_CALL(push_block), CHAIN_RW_CALL(push_transaction), CHAIN_RW_CALL(push_transactions) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index b62196db71389e4457fb2f16556d1354257a31e1..6c961219eb2c7a829f650d8f1048d5cec9d09602 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -12,6 +12,8 @@ #include #include +#include + #include #include @@ -322,5 +324,14 @@ read_only::abi_bin_to_json_result read_only::abi_bin_to_json( const read_only::a return result; } +read_only::get_required_keys_result read_only::get_required_keys( const get_required_keys_params& params )const { + auto pretty_input = db.transaction_from_variant(params.transaction); + auto required_keys_set = db.get_required_keys(pretty_input, params.available_keys); + get_required_keys_result result; + result.required_keys = required_keys_set; + return result; +} + + } // namespace chain_apis } // namespace eos diff --git a/plugins/chain_plugin/include/eos/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eos/chain_plugin/chain_plugin.hpp index 78b0eabb8220d019bb8c084d443650bc88873006..44936118af577af0f732f4cbcb20c62d42466aae 100644 --- a/plugins/chain_plugin/include/eos/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eos/chain_plugin/chain_plugin.hpp @@ -7,6 +7,8 @@ #include +#include + namespace fc { class variant; } namespace eos { @@ -14,8 +16,10 @@ namespace eos { using std::unique_ptr; using namespace appbase; using chain::Name; - using fc::optional; using chain::uint128_t; + using chain::public_key_type; + using fc::optional; + using boost::container::flat_set; namespace chain_apis { struct empty{}; @@ -93,6 +97,16 @@ public: abi_bin_to_json_result abi_bin_to_json( const abi_bin_to_json_params& params )const; + struct get_required_keys_params { + fc::variant transaction; + flat_set available_keys; + }; + struct get_required_keys_result { + flat_set required_keys; + }; + + get_required_keys_result get_required_keys( const get_required_keys_params& params)const; + struct get_block_params { string block_num_or_id; @@ -276,3 +290,5 @@ FC_REFLECT( eos::chain_apis::read_only::abi_json_to_bin_params, (code)(action)(a FC_REFLECT( eos::chain_apis::read_only::abi_json_to_bin_result, (binargs)(required_scope)(required_auth) ) FC_REFLECT( eos::chain_apis::read_only::abi_bin_to_json_params, (code)(action)(binargs) ) FC_REFLECT( eos::chain_apis::read_only::abi_bin_to_json_result, (args)(required_scope)(required_auth) ) +FC_REFLECT( eos::chain_apis::read_only::get_required_keys_params, (transaction)(available_keys) ) +FC_REFLECT( eos::chain_apis::read_only::get_required_keys_result, (required_keys) ) diff --git a/plugins/net_plugin/include/eos/net_plugin/protocol.hpp b/plugins/net_plugin/include/eos/net_plugin/protocol.hpp index 44587aff1e68120fe18875d8fb479d8fad6aa3d2..c414bc8d3a7cc7527319471fc643c3125813a6d3 100644 --- a/plugins/net_plugin/include/eos/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eos/net_plugin/protocol.hpp @@ -21,17 +21,15 @@ namespace eos { struct notice_message { vector known_trx; - vector known_blocks; }; struct request_message { vector req_trx; - vector req_blocks; }; struct block_summary_message { - signed_block block; + block_id_type block; vector trx_ids; }; @@ -59,8 +57,8 @@ FC_REFLECT( eos::handshake_message, (os)(agent) ) FC_REFLECT( eos::block_summary_message, (block)(trx_ids) ) -FC_REFLECT( eos::notice_message, (known_trx)(known_blocks) ) -FC_REFLECT( eos::request_message, (req_trx)(req_blocks) ) +FC_REFLECT( eos::notice_message, (known_trx) ) +FC_REFLECT( eos::request_message, (req_trx) ) FC_REFLECT( eos::sync_request_message, (start_block)(end_block) ) /** diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8a2fcaaf2e3f37506aee61b852e5236944daa375..335655720f6bf34e4e9c4731ea06ad9d774e0263 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -15,7 +15,7 @@ #include #include -#include +#include namespace eos { using std::vector; @@ -101,54 +101,85 @@ namespace eos { class connection : public std::enable_shared_from_this { public: - connection( socket_ptr s, bool try_recon ) + connection( string endpoint ) : block_state(), trx_state(), in_sync_state(), out_sync_state(), - socket(s), - shared_peers(), + socket( std::make_shared( std::ref( app().get_io_service() ))), pending_message_size(), pending_message_buffer(), remote_node_id(), last_handshake(), out_queue(), - try_reconnect (try_recon) + connecting (false), + peer_addr (endpoint) { - wlog( "created connection" ); + wlog( "created connection to ${n}", ("n", endpoint) ); pending_message_buffer.resize( 1024*1024*4 ); auto *rnd = remote_node_id.data(); rnd[0] = 0; + } - - + connection( socket_ptr s ) + : block_state(), + trx_state(), + in_sync_state(), + out_sync_state(), + socket( s ), + pending_message_size(), + pending_message_buffer(), + remote_node_id(), + last_handshake(), + out_queue(), + connecting (false), + peer_addr () + { + wlog( "created connection from client" ); + pending_message_buffer.resize( 1024*1024*4 ); + auto *rnd = remote_node_id.data(); + rnd[0] = 0; } ~connection() { - wlog( "released connection" ); + if (peer_addr.empty()) + wlog( "released connection from client" ); + else + wlog( "released connection to server at ${addr}", ("addr", peer_addr) ); } + block_state_index block_state; transaction_state_index trx_state; - sync_request_index in_sync_state; - sync_request_index out_sync_state; + sync_request_index in_sync_state; // we are requesting info from this peer + sync_request_index out_sync_state; // this peer is requesting info from us socket_ptr socket; - set shared_peers; - set mutual_peers; uint32_t pending_message_size; vector pending_message_buffer; - vector raw_recv; - vector raw_send; fc::sha256 remote_node_id; handshake_message last_handshake; std::deque out_queue; uint32_t mtu; - bool try_reconnect; + bool connecting; + string peer_addr; + + void reset () { + in_sync_state.clear(); + out_sync_state.clear(); + block_state.clear(); + trx_state.clear(); + } + + void close () { + out_queue.clear(); + if (socket) { + socket->close(); + } + } void send_handshake ( ) { - dlog ("sending new handshake message" ); handshake_message hello; handshake_initializer::populate(hello); send (hello); @@ -246,21 +277,8 @@ namespace eos { > node_transaction_index; - static boost::thread_specific_ptr last_recd_txn; - static net_plugin_impl *my_impl; - - class last_recd_txn_guard { - public: - last_recd_txn_guard(transaction_id_type tid ) { - last_recd_txn.reset (new transaction_id_type (tid)); - } - ~last_recd_txn_guard () { - dlog ("TSS Guard dtor 1"); - last_recd_txn.reset (0); - dlog ("TSS Guard dtor 2"); - } - }; + static net_plugin_impl *my_impl; class net_plugin_impl { public: @@ -272,86 +290,68 @@ namespace eos { std::set resolved_nodes; std::set learned_nodes; - std::set pending_sockets; std::set< connection_ptr > connections; bool done = false; + unique_ptr connector_check; + unique_ptr transaction_check; + boost::asio::steady_timer::duration connector_period; + boost::asio::steady_timer::duration txn_exp_period; - int16_t network_version = 0; - chain_id_type chain_id; ///< used to identify chain - fc::sha256 node_id; ///< used to identify peers and prevent self-connect + int16_t network_version; + chain_id_type chain_id; + fc::sha256 node_id; string user_agent_name; chain_plugin* chain_plug; int32_t just_send_it_max; + bool send_whole_blocks; node_transaction_index local_txns; vector pending_notify; - void connect( const string& peer_addr ) { - auto host = peer_addr.substr( 0, peer_addr.find(':') ); - auto port = peer_addr.substr( host.size()+1, host.size() ); + shared_ptr resolver; + + + void connect( connection_ptr c ) { + c->connecting = true; + auto host = c->peer_addr.substr( 0, c->peer_addr.find(':') ); + auto port = c->peer_addr.substr( host.size()+1, host.size() ); idump((host)(port)); - auto resolver = std::make_shared( std::ref( app().get_io_service() ) ); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); // Note: need to add support for IPv6 too resolver->async_resolve( query, - [resolver,peer_addr,this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ){ + [c, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ){ if( !err ) { - connect( resolver, endpoint_itr ); + connect( c, endpoint_itr ); } else { - elog( "Unable to resolve ${peer_addr}: ${error}", ( "peer_addr", peer_addr )("error", err.message() ) ); + elog( "Unable to resolve ${peer_addr}: ${error}", ( "peer_addr", c->peer_addr )("error", err.message() ) ); } }); } - void connect( std::shared_ptr resolver, tcp::resolver::iterator endpoint_itr ) { - auto sock = std::make_shared( std::ref( app().get_io_service() ) ); - pending_sockets.insert( sock ); - + void connect( connection_ptr c, tcp::resolver::iterator endpoint_itr ) { auto current_endpoint = *endpoint_itr; ++endpoint_itr; - sock->async_connect( current_endpoint, - [sock,resolver,endpoint_itr, this] + c->socket->async_connect( current_endpoint, + [c,endpoint_itr, this] ( const boost::system::error_code& err ) { - pending_sockets.erase( sock ); if( !err ) { - start_session( std::make_shared(sock, true)); + start_session( c ); } else { if( endpoint_itr != tcp::resolver::iterator() ) { - connect( resolver, endpoint_itr ); + connect( c, endpoint_itr ); + } + else { + c->connecting = false; } } } ); } -#if 0 - /** - * This thread performs high level coordination among multiple connections and - * ensures connections are cleaned up, reconnected, etc. - */ - void network_loop() { - try { - ilog( "starting network loop" ); - while( !done ) { - for( auto itr = connections.begin(); itr != connections.end(); ) { - auto con = *itr; - if( !con->socket->is_open() ) { - close(con); - itr = connections.begin(); - continue; - } - ++itr; - } - } - ilog("network loop done"); - } FC_CAPTURE_AND_RETHROW() } -#endif - - void start_session(connection_ptr con ) { - connections.insert (con); + con->connecting = false; uint32_t mtu = 1300; // need a way to query this if (mtu < just_send_it_max) { just_send_it_max = mtu; @@ -370,7 +370,9 @@ namespace eos { auto socket = std::make_shared( std::ref( app().get_io_service() ) ); acceptor->async_accept( *socket, [socket,this]( boost::system::error_code ec ) { if( !ec ) { - start_session( std::make_shared( socket, false ) ); + connection_ptr c = std::make_shared( socket ); + connections.insert( c ); + start_session( c ); start_listen_loop(); } else { elog( "Error accepting connection: ${m}", ("m", ec.message() ) ); @@ -414,40 +416,11 @@ namespace eos { return fc::ip::endpoint (addr,ep.port()); } - // template - void send_all (const SignedTransaction &msg) { + template + void send_all (const net_message &msg, VerifierFunc verify) { for (auto &c : connections) { - if (c->out_sync_state.size() == 0) { - const auto& bs = c->trx_state.find(msg.id()); - if (bs == c->trx_state.end()) { - c->trx_state.insert(transaction_state({msg.id(),true,true,(uint32_t)-1, - fc::time_point(),fc::time_point()})); - } - c->send(msg); - } - } - } - - void send_all (const block_summary_message &msg) { - for (auto &c : connections) { - const auto& bs = c->block_state.find(msg.block.id()); - if (bs == c->block_state.end()) { - c->block_state.insert ((block_state){msg.block.id(),true,true,fc::time_point()}); - if (c->out_sync_state.size() == 0) - c->send(msg); - } - } - } - - void send_all (const notice_message &msg) { - for (auto &c : connections) { - if (c->out_sync_state.size() == 0) { - for (const auto& b : msg.known_blocks) { - const auto& bs = c->block_state.find(b); - if (bs == c->block_state.end()) { - c->block_state.insert ((block_state){b,false,true,fc::time_point()}); - } - } + if (c->out_sync_state.size() == 0 && + verify (c)) { c->send(msg); } } @@ -472,16 +445,7 @@ namespace eos { } } - void forward (connection_ptr source, const net_message &msg) { - for (auto c : connections ) { - if (c != source) { - c->send (msg); - } - } - } - void handle_message (connection_ptr c, const handshake_message &msg) { - dlog ("got a handshake message from ${p}", ("p", msg.p2p_address)); if (msg.node_id == node_id) { elog ("Self connection detected. Closing connection"); close(c); @@ -493,34 +457,48 @@ namespace eos { return; } if (msg.network_version != network_version) { - elog ("Peer network id does not match "); + elog ("Peer network version does not match "); close (c); return; } chain_controller& cc = chain_plug->chain(); + uint32_t lib_num = cc.last_irreversible_block_num (); + uint32_t peer_lib = msg.last_irreversible_block_num; + bool on_fork = false; + if (peer_lib <= lib_num && peer_lib > 0) { + try { + block_id_type peer_lib_id = cc.get_block_id_for_num (peer_lib); + on_fork = (msg.last_irreversible_block_id != peer_lib_id); + } + catch (...) { + wlog ("caught an exception getting block id for ${pl}",("pl",peer_lib)); + on_fork = true; + } + if (on_fork) { + elog ("Peer chain is forked"); + close (c); + return; + } + } + uint32_t head = cc.head_block_num (); if ( msg.head_num > head) { shared_fetch (head, msg.head_num); } - dlog ("setting remote node id = ${n}",("n", msg.node_id)); - if ( c->remote_node_id != msg.node_id) { - if (c->try_reconnect) { - dlog ("adding ${pn} to resolved node list", ("pn", msg.node_id)); + c->reset(); + if (c->peer_addr.length() > 0) { auto old_id = resolved_nodes.find (c->remote_node_id); if (old_id != resolved_nodes.end()) { - dlog ("first purging old id"); resolved_nodes.erase(old_id); } resolved_nodes.insert (msg.node_id); } else { - dlog ("adding ${pn} to learned node list", ("pn", msg.node_id)); auto old_id = learned_nodes.find (c->remote_node_id); if (old_id != learned_nodes.end()) { - dlog ("first purging old id"); learned_nodes.erase(old_id); } learned_nodes.insert (msg.node_id); @@ -532,17 +510,10 @@ namespace eos { } void handle_message (connection_ptr c, const notice_message &msg) { - dlog ("got a notice message"); + //peer tells us about one or more blocks or txns. We need to forward only those + //we don't already know about. and for each peer note that it knows notice_message fwd; request_message req; - for (const auto& b : msg.known_blocks) { - const auto &bs = c->block_state.find(b); - if (bs == c->block_state.end()) { - c->block_state.insert((block_state){b,true,true,fc::time_point()}); - fwd.known_blocks.push_back(b); - req.req_blocks.push_back(b); - } - } for (const auto& t : msg.known_trx) { const auto &tx = c->trx_state.find(t); @@ -553,14 +524,15 @@ namespace eos { req.req_trx.push_back(t); } } - if (fwd.known_blocks.size() > 0 || fwd.known_trx.size() > 0) { - forward (c, fwd); + if (fwd.known_trx.size() > 0) { + send_all (fwd, [c,fwd](connection_ptr cptr) -> bool { + return cptr != c; + }); c->send(req); } - } + } void handle_message (connection_ptr c, const request_message &msg) { - dlog ("got a request message"); // collect a list of transactions that were found. // collect a second list of transaction ids that were not found but are otherwise known by some peers // finally, what remains are future(?) transactions @@ -573,13 +545,12 @@ namespace eos { send_now.push_back(txn->transaction); } else { - dlog ("request message looping through peers"); - int cycle_count = 4; + int cycle_count = 2; auto loop_start = conn_ndx++; while (conn_ndx != loop_start) { if (conn_ndx == connections.end()) { if (--cycle_count == 0) { - dlog ("breaking out of stupid loop"); + elog("loop cycled twice, something is wrong"); break; } conn_ndx = connections.begin(); @@ -591,7 +562,8 @@ namespace eos { } auto txn = conn_ndx->get()->trx_state.get().find(t); if (txn != conn_ndx->get()->trx_state.end()) { - // add to forward_to list + + //forward_to[conn_ndx]->push_back(t); break; } ++conn_ndx; @@ -600,89 +572,88 @@ namespace eos { } if (!send_now.empty()) { + for (auto &t : send_now) { + c->send (t); + } } } void handle_message (connection_ptr c, const sync_request_message &msg) { - // dlog ("got a sync request message for blocks ${s} to ${e}", - // ("s",msg.start_block)("e", msg.end_block)); sync_state req = {msg.start_block,msg.end_block,msg.start_block-1,time_point::now()}; c->out_sync_state.insert (req); c->write_block_backlog (); } void handle_message (connection_ptr c, const block_summary_message &msg) { - dlog ("got a block summary message blkid = ${b} from peer ${p}", ("b",msg.block.id())("p", c->remote_node_id)); -#warning ("TODO: reconstruct actual block from cached transactions") const auto& itr = c->block_state.get(); - auto bs = itr.find(msg.block.id()); + auto bs = itr.find(msg.block); if (bs == c->block_state.end()) { - dlog ("not found, forwarding on"); - c->block_state.insert (block_state({msg.block.id(),true,true,fc::time_point()})); - forward (c, msg); + c->block_state.insert (block_state({msg.block,true,true,fc::time_point()})); + send_all (msg, [c](connection_ptr cptr) -> bool { + return cptr != c; + }); } else { if (!bs->is_known) { - dlog ("found, but !is_known, forwarding on"); block_state value = *bs; value.is_known= true; c->block_state.insert (std::move(value)); - forward (c, msg); + send_all (msg, [c](connection_ptr cptr) -> bool { + return cptr != c; + }); } } +#warning ("TODO: reconstruct actual block from cached transactions") + signed_block sb; chain_controller &cc = chain_plug->chain(); - if (!cc.is_known_block(msg.block.id()) ) { + if (!cc.is_known_block(msg.block) ) { try { - chain_plug->accept_block(msg.block, false); - dlog ("successfully accepted block"); + chain_plug->accept_block(sb, false); } catch (const unlinkable_block_exception &ex) { - elog (" caught unlinkable block exception #${n}",("n",msg.block.block_num())); - // close (c); + elog (" caught unlinkable block exception #${n}",("n",sb.block_num())); + close (c); } catch (const assert_exception &ex) { // received a block due to out of sequence - elog (" caught assertion #${n}",("n",msg.block.block_num())); - // close (c); + elog (" caught assertion #${n}",("n",sb.block_num())); + close (c); } } } void handle_message (connection_ptr c, const SignedTransaction &msg) { + auto txn = local_txns.get().find(msg.id()); + if (txn != local_txns.end()) { + return; + } chain_controller &cc = chain_plug->chain(); if (!cc.is_known_transaction(msg.id())) { - last_recd_txn_guard tls_guard(msg.id()); chain_plug->accept_transaction (msg); - uint16_t bn = static_cast(msg.refBlockNum); - node_transaction_state nts = {msg.id(),time_point::now(),msg.expiration, - msg,bn, true}; - local_txns.insert(nts); - forward(c, msg); } } void handle_message (connection_ptr c, const signed_block &msg) { - uint32_t bn = msg.block_num(); - dlog ("got a signed_block, num = ${n}", ("n", bn)); chain_controller &cc = chain_plug->chain(); if (cc.is_known_block(msg.id())) { - dlog ("block id ${id} is known", ("id", msg.id()) ); return; } uint32_t num = 0; - - for( auto ss = c->in_sync_state.begin(); ss != c->in_sync_state.end(); ++ss ) { - if (msg.block_num() == ss->last + 1 && msg.block_num() <= ss->end_block) { - num = msg.block_num(); - ss.get_node()->value().last = num; - break; + bool syncing = c->in_sync_state.size() > 0; + if (syncing) { + for( auto ss = c->in_sync_state.begin(); ss != c->in_sync_state.end(); ss++ ) { + if (msg.block_num() == ss->last + 1 && msg.block_num() <= ss->end_block) { + num = msg.block_num(); + ss.get_node()->value().last = num; + break; + } + } + if (num == 0) { + elog ("Got out-of-order block ${n}",("n",msg.block_num())); + close (c); + return; } - } - if (num == 0) { - elog ("Got out-of-order block ${n}",("n",msg.block_num())); - close (c); - return; } try { - chain_plug->accept_block(msg, true); + chain_plug->accept_block(msg, syncing); } catch (const unlinkable_block_exception &ex) { elog ("unable to accpt block #${n}",("n",num)); close (c); @@ -724,41 +695,102 @@ namespace eos { } else { elog( "Error reading message from connection: ${m}", ("m", ec.message() ) ); } - if ( c->try_reconnect ) { -#warning ("TODO: Add reconnect logic after a read failure"); - } - else { - close( c ); - } + close( c ); }); } + void start_conn_timer () { + connector_check->expires_from_now (connector_period); + connector_check->async_wait ([&](boost::system::error_code ec) { + if (!ec) { + connection_monitor (); + } + else { + elog ("Error from connection check monitor: ${m}", ("m", ec.message())); + start_conn_timer (); + } + }); + } - void close( connection_ptr c ) { - if( c->socket ) - c->socket->close(); - connections.erase( c ); - c.reset (); + void start_txn_timer () { + transaction_check->expires_from_now (txn_exp_period); + transaction_check->async_wait ([&](boost::system::error_code ec) { + if (!ec) { + expire_txns (); + } + else { + elog ("Error from connection check monitor: ${m}", ("m", ec.message())); + start_txn_timer (); + } + }); } - void send_all_txn (const SignedTransaction& txn) { - if (last_recd_txn.get() && *last_recd_txn.get() == txn.id()) { - dlog ("skipping our received transacton"); - return; - } + void start_monitors () { + connector_check.reset(new boost::asio::steady_timer (app().get_io_service())); + transaction_check.reset(new boost::asio::steady_timer (app().get_io_service())); + start_conn_timer(); + start_txn_timer(); + } - if (true) { //txn.get_size() <= just_send_it_max) { - send_all (txn); - return; + void expire_txns () { + start_txn_timer (); +#warning ("TODO: Add by-expiry purging code"); + } + + void connection_monitor () { + start_conn_timer(); + vector discards; + for (auto &c : connections ) { + if (!c->socket->is_open() && !c->connecting) { + if (c->peer_addr.length() > 0) { + connect (c); + } + else { + discards.push_back (c); + } + } + } + if (discards.size () ) { + for (auto &c : discards) { + connections.erase( c ); + c.reset (); + } } + } - uint32_t psize = (pending_notify.size()+1) * sizeof (txn.id()); - if (psize >= my_impl->just_send_it_max) { - notice_message nm = {vector(), pending_notify}; - send_all (nm); + void close( connection_ptr c ) { + c->close(); + } + + void send_all_txn (const SignedTransaction& txn) { + uint16_t bn = static_cast(txn.refBlockNum); + node_transaction_state nts = {txn.id(),time_point::now(),txn.expiration, + txn,bn, true}; + local_txns.insert(nts); + + if (sizeof(txn) <= just_send_it_max) { + send_all (txn, [txn](connection_ptr c) -> bool { + const auto& bs = c->trx_state.find(txn.id()); + bool unknown = bs == c->trx_state.end(); + if (unknown) + c->trx_state.insert(transaction_state({txn.id(),true,true,(uint32_t)-1, + fc::time_point(),fc::time_point() })); + return unknown; + }); + } + else { + pending_notify.push_back (txn.id()); + notice_message nm = {pending_notify}; + send_all (nm, [txn](connection_ptr c) -> bool { + const auto& bs = c->trx_state.find(txn.id()); + bool unknown = bs == c->trx_state.end(); + if (unknown) + c->trx_state.insert(transaction_state({txn.id(),false,true,(uint32_t)-1, + fc::time_point(),fc::time_point() })); + return unknown; + }); pending_notify.clear(); } - pending_notify.push_back(txn.id()); } /** @@ -830,7 +862,7 @@ namespace eos { void net_plugin::plugin_initialize( const variables_map& options ) { ilog("Initialize net plugin"); - auto resolver = std::make_shared( std::ref( app().get_io_service() ) ); + my->resolver = std::make_shared( std::ref( app().get_io_service() ) ); if( options.count( "listen-endpoint" ) ) { my->p2p_address = options.at("listen-endpoint").as< string >(); auto host = my->p2p_address.substr( 0, my->p2p_address.find(':') ); @@ -839,7 +871,7 @@ namespace eos { tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); // Note: need to add support for IPv6 too? - my->listen_endpoint = *resolver->resolve( query); + my->listen_endpoint = *my->resolver->resolve( query); my->acceptor.reset( new tcp::acceptor( app().get_io_service() ) ); } @@ -861,6 +893,8 @@ namespace eos { } } + my->send_whole_blocks = true; + if( options.count( "remote-endpoint" ) ) { my->supplied_peers = options.at( "remote-endpoint" ).as< vector >(); } @@ -870,25 +904,29 @@ namespace eos { my->chain_plug = app().find_plugin(); my->chain_plug->get_chain_id(my->chain_id); fc::rand_pseudo_bytes(my->node_id.data(), my->node_id.data_size()); - dlog("my node_id = ${n}", ("n",my->node_id)); + + my->connector_period = std::chrono::seconds (30); + my->txn_exp_period = std::chrono::seconds (3); my->just_send_it_max = 1300; } void net_plugin::plugin_startup() { if( my->acceptor ) { - my->acceptor->open(my->listen_endpoint.protocol()); my->acceptor->set_option(tcp::acceptor::reuse_address(true)); my->acceptor->bind(my->listen_endpoint); my->acceptor->listen(); - my->chain_plug->chain().on_pending_transaction.connect( &net_plugin_impl::pending_txn ); - my->start_listen_loop(); } + my->chain_plug->chain().on_pending_transaction.connect (&net_plugin_impl::pending_txn); + my->start_monitors(); + for( auto seed_node : my->supplied_peers ) { - my->connect( seed_node ); + connection_ptr c = std::make_shared(seed_node); + my->connections.insert (c); + my->connect( c ); } boost::asio::signal_set signals (app().get_io_service(), SIGINT, SIGTERM); signals.async_wait ([this](const boost::system::error_code &ec, int signum) { @@ -917,6 +955,10 @@ namespace eos { } FC_CAPTURE_AND_RETHROW() } void net_plugin::broadcast_block (const chain::signed_block &sb) { + if (my->send_whole_blocks) { + my->send_all (sb,[](connection_ptr c) -> bool { return true; }); + return; + } vector trxs; if (!sb.cycles.empty()) { for (const auto& cyc : sb.cycles) { @@ -928,14 +970,16 @@ namespace eos { } } - vector blks; - blks.push_back (sb.id()); - notice_message nm = {blks, my->pending_notify}; - my->send_all (nm); - - block_summary_message bsm = {sb, trxs}; - my->send_all (bsm); - my->pending_notify.clear(); + block_summary_message bsm = {sb.id(), trxs}; + my->send_all (bsm,[sb](connection_ptr c) -> bool { + return true; + const auto& bs = c->block_state.find(sb.id()); + if (bs == c->block_state.end()) { + c->block_state.insert ((block_state){sb.id(),true,true,fc::time_point()}); + return true; + } + return false; + }); } } diff --git a/plugins/wallet_api_plugin/CMakeLists.txt b/plugins/wallet_api_plugin/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..91912ac000e7f32b1eae855a30cb4f559116af53 --- /dev/null +++ b/plugins/wallet_api_plugin/CMakeLists.txt @@ -0,0 +1,16 @@ +file(GLOB HEADERS "include/eos/wallet_api_plugin/*.hpp") +add_library( wallet_api_plugin + wallet_api_plugin.cpp + ${HEADERS} ) + +target_link_libraries( wallet_api_plugin wallet_plugin http_plugin appbase ) +target_include_directories( wallet_api_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +install( TARGETS + wallet_api_plugin + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +install( FILES ${HEADERS} DESTINATION "include/eos/wallet_api_plugin" ) diff --git a/plugins/wallet_api_plugin/include/eos/wallet_api_plugin/wallet_api_plugin.hpp b/plugins/wallet_api_plugin/include/eos/wallet_api_plugin/wallet_api_plugin.hpp new file mode 100644 index 0000000000000000000000000000000000000000..1e997753c18bf3aab426483be69b772643d2620b --- /dev/null +++ b/plugins/wallet_api_plugin/include/eos/wallet_api_plugin/wallet_api_plugin.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +#include + +namespace eos { + +using namespace appbase; + +class wallet_api_plugin : public plugin { +public: + APPBASE_PLUGIN_REQUIRES((wallet_plugin) (http_plugin)) + + wallet_api_plugin() = default; + wallet_api_plugin(const wallet_api_plugin&) = delete; + wallet_api_plugin(wallet_api_plugin&&) = delete; + wallet_api_plugin& operator=(const wallet_api_plugin&) = delete; + wallet_api_plugin& operator=(wallet_api_plugin&&) = delete; + virtual ~wallet_api_plugin() override = default; + + virtual void set_program_options(options_description& cli, options_description& cfg) override {} + void plugin_initialize(const variables_map& vm); + void plugin_startup(); + void plugin_shutdown() {} + +private: +}; + +} diff --git a/plugins/wallet_api_plugin/wallet_api_plugin.cpp b/plugins/wallet_api_plugin/wallet_api_plugin.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ef2864dbe1fef722f75d573d4372da9d3a46175 --- /dev/null +++ b/plugins/wallet_api_plugin/wallet_api_plugin.cpp @@ -0,0 +1,117 @@ +#include +#include +#include +#include + +#include +#include + +#include + +namespace eos { namespace detail { + struct wallet_api_plugin_empty {}; +}} + +FC_REFLECT(eos::detail::wallet_api_plugin_empty, ); + +namespace eos { + + +using namespace eos; + + +#define CALL(api_name, api_handle, call_name, INVOKE) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [&api_handle](string, string body, url_response_callback cb) mutable { \ + try { \ + if (body.empty()) body = "{}"; \ + INVOKE \ + cb(200, fc::json::to_string(result)); \ + } catch (fc::eof_exception&) { \ + cb(400, "Invalid arguments"); \ + elog("Unable to parse arguments: ${args}", ("args", body)); \ + } catch (fc::exception& e) { \ + cb(500, e.to_detail_string()); \ + elog("Exception encountered while processing ${call}: ${e}", ("call", #api_name "." #call_name)("e", e)); \ + } \ + }} + +#define INVOKE_R_R(api_handle, call_name, in_param) \ + auto result = api_handle.call_name(fc::json::from_string(body).as()); + +#define INVOKE_R_R_R_R(api_handle, call_name, in_param0, in_param1, in_param2) \ + const auto& vs = fc::json::json::from_string(body).as(); \ + auto result = api_handle.call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); + +#define INVOKE_R_V(api_handle, call_name) \ + auto result = api_handle.call_name(); + +#define INVOKE_V_R(api_handle, call_name, in_param) \ + api_handle.call_name(fc::json::from_string(body).as()); \ + eos::detail::wallet_api_plugin_empty result; + +#define INVOKE_V_R_R(api_handle, call_name, in_param0, in_param1) \ + const auto& vs = fc::json::json::from_string(body).as(); \ + api_handle.call_name(vs.at(0).as(), vs.at(1).as()); \ + eos::detail::wallet_api_plugin_empty result; + +#define INVOKE_V_V(api_handle, call_name) \ + api_handle.call_name(); \ + eos::detail::wallet_api_plugin_empty result; + + +void wallet_api_plugin::plugin_startup() { + ilog("starting wallet_api_plugin"); + // lifetime of plugin is lifetime of application + auto& wallet_mgr = app().get_plugin().get_wallet_manager(); + + // TODO: http_plugin needs to add ability to restrict to localhost, once added add call here. + // TODO: For now see TODO below. + + app().get_plugin().add_api({ + CALL(wallet, wallet_mgr, set_timeout, + INVOKE_V_R(wallet_mgr, set_timeout, int64_t)), + CALL(wallet, wallet_mgr, sign_transaction, + INVOKE_R_R_R_R(wallet_mgr, sign_transaction, chain::SignedTransaction, flat_set, chain::chain_id_type)), + CALL(wallet, wallet_mgr, create, + INVOKE_R_R(wallet_mgr, create, std::string)), + CALL(wallet, wallet_mgr, open, + INVOKE_V_R(wallet_mgr, open, std::string)), + CALL(wallet, wallet_mgr, lock_all, + INVOKE_V_V(wallet_mgr, lock_all)), + CALL(wallet, wallet_mgr, lock, + INVOKE_V_R(wallet_mgr, lock, std::string)), + CALL(wallet, wallet_mgr, unlock, + INVOKE_V_R_R(wallet_mgr, unlock, std::string, std::string)), + CALL(wallet, wallet_mgr, import_key, + INVOKE_V_R_R(wallet_mgr, import_key, std::string, std::string)), + CALL(wallet, wallet_mgr, list_wallets, + INVOKE_R_V(wallet_mgr, list_wallets)), + CALL(wallet, wallet_mgr, list_keys, + INVOKE_R_V(wallet_mgr, list_keys)), + CALL(wallet, wallet_mgr, get_public_keys, + INVOKE_R_V(wallet_mgr, get_public_keys)) + }); +} + +void wallet_api_plugin::plugin_initialize(const variables_map& options) { + // TODO: see TODO above, this is temporary until http_plugin has option to restrict to localhost + if (options.count("http-server-endpoint")) { + const auto& lipstr = options.at("http-server-endpoint").as(); + const auto& host = lipstr.substr(0, lipstr.find(':')); + if (host != "localhost" && host != "127.0.0.1") { + FC_THROW("wallet api restricted to localhost"); + } + } +} + + +#undef INVOKE_R_R +#undef INVOKE_R_R_R_R +#undef INVOKE_R_V +#undef INVOKE_V_R +#undef INVOKE_V_R_R +#undef INVOKE_V_V +#undef CALL + +} diff --git a/plugins/wallet_plugin/CMakeLists.txt b/plugins/wallet_plugin/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d6101e6d5cb82d78cc31fd1db486e3393ef9094 --- /dev/null +++ b/plugins/wallet_plugin/CMakeLists.txt @@ -0,0 +1,18 @@ +file(GLOB HEADERS "include/eos/wallet_plugin/*.hpp") +add_library( wallet_plugin + wallet.cpp + wallet_plugin.cpp + wallet_manager.cpp + ${HEADERS} ) + +target_link_libraries( wallet_plugin eos_chain appbase ) +target_include_directories( wallet_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +install( TARGETS + wallet_plugin + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +install( FILES ${HEADERS} DESTINATION "include/eos/wallet_plugin" ) diff --git a/libraries/wallet/include/eos/wallet/wallet.hpp b/plugins/wallet_plugin/include/eos/wallet_plugin/wallet.hpp similarity index 90% rename from libraries/wallet/include/eos/wallet/wallet.hpp rename to plugins/wallet_plugin/include/eos/wallet_plugin/wallet.hpp index 7168c0cfbedbbddb875e77d635828dbbf2813708..55fb421e3fe6d2d46525542bfab615cee4a105aa 100644 --- a/libraries/wallet/include/eos/wallet/wallet.hpp +++ b/plugins/wallet_plugin/include/eos/wallet_plugin/wallet.hpp @@ -15,18 +15,6 @@ typedef uint16_t transaction_handle_type; struct wallet_data { vector cipher_keys; /** encrypted keys */ - - string ws_server = "localhost"; - uint16_t ws_port = 8090; - string ws_user; - string ws_password; -}; - -enum authority_type -{ - owner, - active, - posting }; namespace detail { @@ -35,7 +23,7 @@ class wallet_api_impl; /** * This wallet assumes it is connected to the database server with a high-bandwidth, low-latency connection and - * performs minimal caching. This API could be provided locally to be used by a web interface. + * performs minimal caching. */ class wallet_api { @@ -59,6 +47,11 @@ class wallet_api */ string get_private_key( public_key_type pubkey )const; + /** + * Get the private key corresponding to a public key or nothing. + */ + optional try_get_private_key(const public_key_type& id)const; + /** * @param role - active | owner | posting | memo */ @@ -165,13 +158,6 @@ struct plain_keys { } } -FC_REFLECT( eos::wallet::wallet_data, - (cipher_keys) - (ws_server) - (ws_user) - (ws_password) - ) +FC_REFLECT( eos::wallet::wallet_data, (cipher_keys) ) FC_REFLECT( eos::wallet::plain_keys, (checksum)(keys) ) - -FC_REFLECT_ENUM( eos::wallet::authority_type, (owner)(active)(posting) ) diff --git a/plugins/wallet_plugin/include/eos/wallet_plugin/wallet_manager.hpp b/plugins/wallet_plugin/include/eos/wallet_plugin/wallet_manager.hpp new file mode 100644 index 0000000000000000000000000000000000000000..53e0b0cefbd17d15d432f278e2b67a370e136b3b --- /dev/null +++ b/plugins/wallet_plugin/include/eos/wallet_plugin/wallet_manager.hpp @@ -0,0 +1,112 @@ +#pragma once +#include +#include +#include +#include + +namespace fc { class variant; } + +namespace eos { +namespace wallet { + +/// Provides associate of wallet name to wallet and manages the interaction with each wallet. +/// +/// The name of the wallet is also used as part of the file name by wallet_api. See wallet_manager::create. +/// No const methods because timeout may cause lock_all() to be called. +class wallet_manager { +public: + wallet_manager() = default; + wallet_manager(const wallet_manager&) = delete; + wallet_manager(wallet_manager&&) = delete; + wallet_manager& operator=(const wallet_manager&) = delete; + wallet_manager& operator=(wallet_manager&&) = delete; + ~wallet_manager() = default; + + /// Set the path for location of wallet files. + /// @param p path to override default ./ location of wallet files. + void set_dir(const boost::filesystem::path& p) { dir = p; } + + /// Set the timeout for locking all wallets. + /// If set then after t seconds of inactivity then lock_all(). + /// Activity is defined as any wallet_manager method call below. + void set_timeout(const std::chrono::seconds& t); + + /// @see wallet_manager::set_timeout(const std::chrono::seconds& t) + /// @param secs The timeout in seconds. + void set_timeout(int64_t secs) { set_timeout(std::chrono::seconds(secs)); } + + /// Sign transaction with the private keys specified via their public keys. + /// Use chain_controller::get_required_keys to determine which keys are needed for txn. + /// @param txn the transaction to sign. + /// @param keys the public keys of the corresponding private keys to sign the transaction with + /// @param id the chain_id to sign transaction with. + /// @return txn signed + /// @throws fc::exception if corresponding private keys not found in unlocked wallets + chain::SignedTransaction sign_transaction(const chain::SignedTransaction& txn, const flat_set& keys, + const chain::chain_id_type& id); + + /// Create a new wallet. + /// A new wallet is created in file dir/{name}.wallet see set_dir. + /// The new wallet is unlocked after creation. + /// @param name of the wallet and name of the file without ext .wallet. + /// @return Plaintext password that is needed to unlock wallet. Caller is responsible for saving password otherwise + /// they will not be able to unlock their wallet. Note user supplied passwords are not supported. + /// @throws fc::exception if wallet with name already exists (or filename already exists) + std::string create(const std::string& name); + + /// Open an existing wallet file dir/{name}.wallet. + /// Note this does not unlock the wallet, see wallet_manager::unlock. + /// @param name of the wallet file (minus ext .wallet) to open. + /// @throws fc::exception if unable to find/open the wallet file. + void open(const std::string& name); + + /// @return A list of wallet names with " *" appended if the wallet is unlocked. + std::vector list_wallets(); + + /// @return A list of private keys from all unlocked wallets in wif format. + std::vector list_keys(); + + /// @return A set of public keys from all unlocked wallets, use with chain_controller::get_required_keys. + flat_set get_public_keys(); + + /// Locks all the unlocked wallets. + void lock_all(); + + /// Lock the specified wallet. + /// No-op if wallet already locked. + /// @param name the name of the wallet to lock. + /// @throws fc::exception if wallet with name not found. + void lock(const std::string& name); + + /// Unlock the specified wallet. + /// The wallet remains unlocked until ::lock is called or program exit. + /// @param name the name of the wallet to lock. + /// @param password the plaintext password returned from ::create. + /// @throws fc::exception if wallet not found or invalid password. + void unlock(const std::string& name, const std::string& password); + + /// Import private key into specified wallet. + /// Imports a WIF Private Key into specified wallet. + /// Wallet must be opened and unlocked. + /// @param name the name of the wallet to import into. + /// @param wif_key the WIF Private Key to import, e.g. 5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3 + /// @throws fc::exception if wallet not found or locked. + void import_key(const std::string& name, const std::string& wif_key); + +private: + /// Verify timeout has not occurred and reset timeout if not. + /// Calls lock_all() if timeout has passed. + void check_timeout(); + +private: + using timepoint_t = std::chrono::time_point; + std::map> wallets; + std::chrono::seconds timeout = std::chrono::seconds::max(); ///< how long to wait before calling lock_all() + mutable timepoint_t timeout_time = timepoint_t::max(); ///< when to call lock_all() + boost::filesystem::path dir = "."; +}; + +} // namespace wallet +} // namespace eos + + diff --git a/plugins/wallet_plugin/include/eos/wallet_plugin/wallet_plugin.hpp b/plugins/wallet_plugin/include/eos/wallet_plugin/wallet_plugin.hpp new file mode 100644 index 0000000000000000000000000000000000000000..025c4319b4f2e0f090770a966e045247254c0cce --- /dev/null +++ b/plugins/wallet_plugin/include/eos/wallet_plugin/wallet_plugin.hpp @@ -0,0 +1,42 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace fc { class variant; } + +namespace eos { + using namespace appbase; + + namespace wallet { + class wallet_manager; + } + using namespace wallet; + +class wallet_plugin : public plugin { +public: + APPBASE_PLUGIN_REQUIRES() + + wallet_plugin(); + wallet_plugin(const wallet_plugin&) = delete; + wallet_plugin(wallet_plugin&&) = delete; + wallet_plugin& operator=(const wallet_plugin&) = delete; + wallet_plugin& operator=(wallet_plugin&&) = delete; + virtual ~wallet_plugin() override = default; + + virtual void set_program_options(options_description& cli, options_description& cfg) override; + void plugin_initialize(const variables_map& options); + void plugin_startup() {} + void plugin_shutdown() {} + + // api interface provider + wallet_manager& get_wallet_manager(); + +private: + std::unique_ptr wallet_manager_ptr; +}; + +} + diff --git a/libraries/wallet/wallet.cpp b/plugins/wallet_plugin/wallet.cpp similarity index 94% rename from libraries/wallet/wallet.cpp rename to plugins/wallet_plugin/wallet.cpp index 976783fab88dda43d03b0f6261c4eb07c90f2cdd..81fc8e3b186dd71889337b4d63a3e9e0dc5b3fae 100644 --- a/libraries/wallet/wallet.cpp +++ b/plugins/wallet_plugin/wallet.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include @@ -58,11 +58,8 @@ public: wallet_api_impl( wallet_api& s, const wallet_data& initial_data ) : self( s ) { - _wallet.ws_server = initial_data.ws_server; - _wallet.ws_port = initial_data.ws_port; - _wallet.ws_user = initial_data.ws_user; - _wallet.ws_password = initial_data.ws_password; } + virtual ~wallet_api_impl() {} @@ -191,6 +188,10 @@ public: // http://en.wikipedia.org/wiki/Most_vexing_parse // ofstream outfile{ wallet_filename }; + if (!outfile) { + elog("Unable to open file: ${fn}", ("fn", wallet_filename)); + FC_THROW("Unable to open file: ${fn}", ("fn", wallet_filename)); + } outfile.write( data.c_str(), data.length() ); outfile.flush(); outfile.close(); @@ -316,6 +317,12 @@ string wallet_api::get_private_key( public_key_type pubkey )const return key_to_wif( my->get_private_key( pubkey ) ); } +optional wallet_api::try_get_private_key(const public_key_type& id)const +{ + return my->try_get_private_key(id); +} + + pair wallet_api::get_private_key_from_password( string account, string role, string password )const { auto seed = account + role + password; FC_ASSERT( seed.size() ); @@ -324,5 +331,10 @@ pair wallet_api::get_private_key_from_password( string a return std::make_pair( public_key_type( priv.get_public_key() ), key_to_wif( priv ) ); } +void wallet_api::set_wallet_filename(string wallet_filename) +{ + my->_wallet_filename = wallet_filename; +} + } } // eos::wallet diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0fead926b0ead5888143cbf5067a51eba0d3a218 --- /dev/null +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -0,0 +1,176 @@ +#include +#include + +namespace eos { +namespace wallet { + +constexpr auto file_ext = ".wallet"; +constexpr auto password_prefix = "PW"; + +std::string gen_password() { + auto key = fc::ecc::private_key::generate(); + return password_prefix + utilities::key_to_wif(key); + +} + +void wallet_manager::set_timeout(const std::chrono::seconds& t) { + timeout = t; + timeout_time = std::chrono::system_clock::now() + timeout; +} + +void wallet_manager::check_timeout() { + if (timeout_time != timepoint_t::max()) { + const auto& now = std::chrono::system_clock::now(); + if (now >= timeout_time + timeout) { + lock_all(); + } + timeout_time = now + timeout; + } +} + +std::string wallet_manager::create(const std::string& name) { + check_timeout(); + std::string password = gen_password(); + + auto wallet_filename = dir / (name + file_ext); + if (fc::exists( dir / wallet_filename)) { + FC_THROW("Wallet with name: ${n} already exists.", ("n", name)); + } + + wallet_data d; + auto wallet = make_unique(d); + wallet->set_password(password); + wallet->set_wallet_filename(wallet_filename.string()); + wallet->unlock(password); + wallet->save_wallet_file(); + wallets.emplace(name, std::move(wallet)); + + return password; +} + +void wallet_manager::open(const std::string& name) { + check_timeout(); + wallet_data d; + auto wallet = std::make_unique(d); + auto wallet_filename = dir / (name + file_ext); + wallet->set_wallet_filename(wallet_filename.string()); + if (!wallet->load_wallet_file()) { + FC_THROW("Unable to open file: ${f}", ("f", wallet_filename.string())); + } + wallets.emplace(name, std::move(wallet)); +} + +std::vector wallet_manager::list_wallets() { + check_timeout(); + std::vector result; + for (const auto& i : wallets) { + if (i.second->is_locked()) { + result.emplace_back(i.first); + } else { + result.emplace_back(i.first + " *"); + } + } + return result; +} + +std::vector wallet_manager::list_keys() { + check_timeout(); + std::vector result; + for (const auto& i : wallets) { + if (!i.second->is_locked()) { + const auto& keys = i.second->list_keys(); + for (const auto& i : keys) { + result.emplace_back(i.second); + } + } + } + return result; +} + +flat_set wallet_manager::get_public_keys() { + check_timeout(); + flat_set result; + for (const auto& i : wallets) { + if (!i.second->is_locked()) { + const auto& keys = i.second->list_keys(); + for (const auto& i : keys) { + result.emplace(i.first); + } + } + } + return result; +} + + +void wallet_manager::lock_all() { + // no call to check_timeout since we are locking all anyway + for (auto& i : wallets) { + if (!i.second->is_locked()) { + i.second->lock(); + } + } +} + +void wallet_manager::lock(const std::string& name) { + check_timeout(); + if (wallets.count(name) == 0) { + FC_THROW("Wallet not found: ${w}", ("w", name)); + } + auto& w = wallets.at(name); + if (w->is_locked()) { + return; + } + w->lock(); +} + +void wallet_manager::unlock(const std::string& name, const std::string& password) { + check_timeout(); + if (wallets.count(name) == 0) { + FC_THROW("Wallet not found: ${w}", ("w", name)); + } + auto& w = wallets.at(name); + if (!w->is_locked()) { + return; + } + w->unlock(password); +} + +void wallet_manager::import_key(const std::string& name, const std::string& wif_key) { + check_timeout(); + if (wallets.count(name) == 0) { + FC_THROW("Wallet not found: ${w}", ("w", name)); + } + auto& w = wallets.at(name); + if (w->is_locked()) { + FC_THROW("Wallet is locked: ${w}", ("w", name)); + } + w->import_key(wif_key); +} + +chain::SignedTransaction +wallet_manager::sign_transaction(const chain::SignedTransaction& txn, const flat_set& keys, const chain::chain_id_type& id) { + check_timeout(); + chain::SignedTransaction stxn(txn); + + for (const auto& pk : keys) { + bool found = false; + for (const auto& i : wallets) { + if (!i.second->is_locked()) { + const auto& k = i.second->try_get_private_key(pk); + if (k) { + stxn.sign(*k, id); + found = true; + break; // inner for + } + } + } + if (!found) { + FC_THROW("Public key not found in unlocked wallets ${k}", ("k", pk)); + } + } + + return stxn; +} + +} // namespace wallet +} // namespace eos diff --git a/plugins/wallet_plugin/wallet_plugin.cpp b/plugins/wallet_plugin/wallet_plugin.cpp new file mode 100644 index 0000000000000000000000000000000000000000..469d937d18dba2abb160a52c49810eb6e76bd614 --- /dev/null +++ b/plugins/wallet_plugin/wallet_plugin.cpp @@ -0,0 +1,45 @@ +#include +#include +#include +#include + +namespace fc { class variant; } + +namespace eos { + +wallet_plugin::wallet_plugin() + : wallet_manager_ptr(new wallet_manager()) { +} + +wallet_manager& wallet_plugin::get_wallet_manager() { + return *wallet_manager_ptr; +} + +void wallet_plugin::set_program_options(options_description& cli, options_description& cfg) { + cli.add_options() + ("wallet-dir", bpo::value()->default_value("."), + "The path of the wallet files (absolute path or relative to application data dir)") + ("unlock-timeout", bpo::value(), + "Timeout for unlocked wallet in seconds. " + "Wallets will automatically lock after specified number of seconds of inactivity. " + "Activity is defined as any wallet command e.g. list-wallets.") + ; +} + +void wallet_plugin::plugin_initialize(const variables_map& options) { + ilog("initializing wallet plugin"); + + if (options.count("wallet-dir")) { + auto dir = options.at("wallet-dir").as(); + if (dir.is_relative()) + wallet_manager_ptr->set_dir(app().data_dir() / dir); + else + wallet_manager_ptr->set_dir(dir); + } + if (options.count("unlock-timeout")) { + auto timeout = options.at("unlock-timeout").as(); + std::chrono::seconds t(timeout); + wallet_manager_ptr->set_timeout(t); + } +} +} // namespace eos diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 17065b07daaca6518c79ad0fb9829e0b59d66d0b..8dec651fce3dbeb6b95b3857c575e46ec3c9a2a7 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory( eosd ) add_subdirectory( eosc ) +add_subdirectory( eos-walletd ) add_subdirectory( launcher ) diff --git a/programs/eos-walletd/CMakeLists.txt b/programs/eos-walletd/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3da7d1aa4add08bd509db637789ac2e1493d584 --- /dev/null +++ b/programs/eos-walletd/CMakeLists.txt @@ -0,0 +1,24 @@ +add_executable( eos-walletd main.cpp ) +if( UNIX AND NOT APPLE ) + set(rt_library rt ) +endif() + +find_package( Gperftools QUIET ) +if( GPERFTOOLS_FOUND ) + message( STATUS "Found gperftools; compiling eos-walletd with TCMalloc") + list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) +endif() + +target_link_libraries( eos-walletd + PRIVATE appbase + PRIVATE wallet_api_plugin wallet_plugin + PRIVATE http_plugin + PRIVATE eos_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + +install( TARGETS + eos-walletd + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) diff --git a/programs/eos-walletd/main.cpp b/programs/eos-walletd/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0a0848126b3a22ce9bbdf073f57ffc730dd1a288 --- /dev/null +++ b/programs/eos-walletd/main.cpp @@ -0,0 +1,35 @@ +#include + +#include +#include "eos/wallet_plugin/wallet_plugin.hpp" +#include "eos/wallet_api_plugin/wallet_api_plugin.hpp" + +#include +#include + +#include + +using namespace appbase; +using namespace eos; + +int main(int argc, char** argv) +{ + try { + app().register_plugin(); + app().register_plugin(); + app().register_plugin(); + if(!app().initialize(argc, argv)) + return -1; + app().startup(); + app().exec(); + } catch (const fc::exception& e) { + elog("${e}", ("e",e.to_detail_string())); + } catch (const boost::exception& e) { + elog("${e}", ("e",boost::diagnostic_information(e))); + } catch (const std::exception& e) { + elog("${e}", ("e",e.what())); + } catch (...) { + elog("unknown exception"); + } + return 0; +} diff --git a/programs/eosc/main.cpp b/programs/eosc/main.cpp index fb6fad39b49bb736cad8167bbb17d79c4e634a17..af8ce84100ea50fd970fb4bb414f7356398a2c72 100644 --- a/programs/eosc/main.cpp +++ b/programs/eosc/main.cpp @@ -33,6 +33,12 @@ string program = "eosc"; string host = "localhost"; uint32_t port = 8888; +// restricting use of wallet to localhost +const string wallet_host = "localhost"; + +// TODO: make wallet_port a cli option when above host/port is made a cli option +constexpr uint32_t wallet_port = 8899; + const string chain_func_base = "/v1/chain"; const string get_info_func = chain_func_base + "/get_info"; const string push_txn_func = chain_func_base + "/push_transaction"; @@ -40,6 +46,7 @@ const string push_txns_func = chain_func_base + "/push_transactions"; const string json_to_bin_func = chain_func_base + "/abi_json_to_bin"; const string get_block_func = chain_func_base + "/get_block"; const string get_account_func = chain_func_base + "/get_account"; +const string get_required_keys = chain_func_base + "/get_required_keys"; const string account_history_func_base = "/v1/account_history"; const string get_transaction_func = account_history_func_base + "/get_transaction"; @@ -47,6 +54,19 @@ const string get_transactions_func = account_history_func_base + "/get_transacti const string get_key_accounts_func = account_history_func_base + "/get_key_accounts"; const string get_controlled_accounts_func = account_history_func_base + "/get_controlled_accounts"; +const string wallet_func_base = "/v1/wallet"; +const string wallet_create = wallet_func_base + "/create"; +const string wallet_open = wallet_func_base + "/open"; +const string wallet_list = wallet_func_base + "/list_wallets"; +const string wallet_list_keys = wallet_func_base + "/list_keys"; +const string wallet_public_keys = wallet_func_base + "/get_public_keys"; +const string wallet_lock = wallet_func_base + "/lock"; +const string wallet_lock_all = wallet_func_base + "/lock_all"; +const string wallet_unlock = wallet_func_base + "/unlock"; +const string wallet_import_key = wallet_func_base + "/import_key"; +const string wallet_sign_trx = wallet_func_base + "/sign_transaction"; + + inline std::vector sort_names( std::vector&& names ) { std::sort( names.begin(), names.end() ); auto itr = std::unique( names.begin(), names.end() ); @@ -105,17 +125,34 @@ eos::chain_apis::read_only::get_info_results get_info() { return call(host, port, get_info_func ).as(); } -fc::variant push_transaction( SignedTransaction& trx ) { +void sign_transaction(SignedTransaction& trx) { + // TODO better error checking + const auto& public_keys = call(wallet_host, wallet_port, wallet_public_keys); + auto get_arg = fc::mutable_variant_object + ("transaction", trx) + ("available_keys", public_keys); + const auto& required_keys = call(host, port, get_required_keys, get_arg); + // TODO determine chain id + fc::variants sign_args = {fc::variant(trx), required_keys["required_keys"], fc::variant(chain_id_type{})}; + const auto& signed_trx = call(wallet_host, wallet_port, wallet_sign_trx, sign_args); + trx = signed_trx.as(); +} + +fc::variant push_transaction( SignedTransaction& trx, bool sign ) { auto info = get_info(); trx.expiration = info.head_block_time + 100; //chain.head_block_time() + 100; transaction_set_reference_block(trx, info.head_block_id); boost::sort( trx.scope ); + if (sign) { + sign_transaction(trx); + } + return call( push_txn_func, trx ); } -void create_account(Name creator, Name newaccount, public_key_type owner, public_key_type active) { +void create_account(Name creator, Name newaccount, public_key_type owner, public_key_type active, bool sign) { auto owner_auth = eos::chain::Authority{1, {{owner, 1}}, {}}; auto active_auth = eos::chain::Authority{1, {{active, 1}}, {}}; auto recovery_auth = eos::chain::Authority{1, {}, {{{creator, "active"}, 1}}}; @@ -127,17 +164,7 @@ void create_account(Name creator, Name newaccount, public_key_type owner, public transaction_emplace_message(trx, config::EosContractName, vector{{creator,"active"}}, "newaccount", types::newaccount{creator, newaccount, owner_auth, active_auth, recovery_auth, deposit}); - if (creator == "inita") - { - fc::optional private_key = eos::utilities::wif_to_key("5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"); - if (private_key) - { - wlog("public key ${k}",("k", private_key->get_public_key())); - trx.sign(*private_key, eos::chain::chain_id_type{}); - } - } - - std::cout << fc::json::to_pretty_string(push_transaction(trx)) << std::endl; + std::cout << fc::json::to_pretty_string(push_transaction(trx, sign)) << std::endl; } int main( int argc, char** argv ) { @@ -160,13 +187,15 @@ int main( int argc, char** argv ) { string name; string ownerKey; string activeKey; + bool sign = false; auto createAccount = create->add_subcommand("account", "Create a new account on the blockchain", false); createAccount->add_option("creator", creator, "The name of the account creating the new account")->required(); createAccount->add_option("name", name, "The name of the new account")->required(); createAccount->add_option("OwnerKey", ownerKey, "The owner public key for the account")->required(); createAccount->add_option("ActiveKey", activeKey, "The active public key for the account")->required(); + createAccount->add_flag("-s,--sign", sign, "Specify if unlocked wallet keys should be used to sign transaction"); createAccount->set_callback([&] { - create_account(creator, name, public_key_type(ownerKey), public_key_type(activeKey)); + create_account(creator, name, public_key_type(ownerKey), public_key_type(activeKey), sign); }); // Get subcommand @@ -211,7 +240,7 @@ int main( int argc, char** argv ) { auto getServants = get->add_subcommand("servants", "Retrieve accounts which are servants of a given account ", false); getServants->add_option("account", controllingAccount, "The name of the controlling account")->required(); getServants->set_callback([&] { - auto arg = fc::mutable_variant_object( "accountName", controllingAccount); + auto arg = fc::mutable_variant_object( "controlling_account", controllingAccount); std::cout << fc::json::to_pretty_string(call(get_controlled_accounts_func, arg)) << std::endl; }); @@ -224,6 +253,23 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(call(get_transaction_func, arg)) << std::endl; }); + // get transactions + string account_name; + string skip_seq; + string num_seq; + auto getTransactions = get->add_subcommand("transactions", "Retrieve all transactions with specific account name referenced in their scope", false); + getTransactions->add_option("account_name", account_name, "Name of account to query on")->required(); + getTransactions->add_option("skip_seq", skip_seq, "Number of most recent transactions to skip (0 would start at most recent transaction)"); + getTransactions->add_option("num_seq", num_seq, "Number of transactions to return"); + getTransactions->set_callback([&] { + auto arg = (skip_seq.empty()) + ? fc::mutable_variant_object( "account_name", account_name) + : (num_seq.empty()) + ? fc::mutable_variant_object( "account_name", account_name)("skip_seq", skip_seq) + : fc::mutable_variant_object( "account_name", account_name)("skip_seq", skip_seq)("num_seq", num_seq); + std::cout << fc::json::to_pretty_string(call(get_transactions_func, arg)) << std::endl; + }); + // Contract subcommand string account; string wastPath; @@ -234,6 +280,7 @@ int main( int argc, char** argv ) { ->check(CLI::ExistingFile); auto abi = contractSubcommand->add_option("abi-file,-a,--abi", abiPath, "The ABI for the contract") ->check(CLI::ExistingFile); + contractSubcommand->add_flag("-s,--sign", sign, "Specify if unlocked wallet keys should be used to sign transaction"); contractSubcommand->set_callback([&] { std::string wast; std::cout << "Reading WAST..." << std::endl; @@ -253,7 +300,7 @@ int main( int argc, char** argv ) { "setcode", handler); std::cout << "Publishing contract..." << std::endl; - std::cout << fc::json::to_pretty_string(push_transaction(trx)) << std::endl; + std::cout << fc::json::to_pretty_string(push_transaction(trx, sign)) << std::endl; }); // Transfer subcommand @@ -265,7 +312,8 @@ int main( int argc, char** argv ) { transfer->add_option("sender", sender, "The account sending EOS")->required(); transfer->add_option("recipient", recipient, "The account receiving EOS")->required(); transfer->add_option("amount", amount, "The amount of EOS to send")->required(); - transfer->add_option("memo", amount, "The memo for the transfer"); + transfer->add_option("memo", memo, "The memo for the transfer"); + transfer->add_flag("-s,--sign", sign, "Specify if unlocked wallet keys should be used to sign transaction"); transfer->set_callback([&] { SignedTransaction trx; trx.scope = sort_names({sender,recipient}); @@ -275,10 +323,87 @@ int main( int argc, char** argv ) { auto info = get_info(); trx.expiration = info.head_block_time + 100; //chain.head_block_time() + 100; transaction_set_reference_block(trx, info.head_block_id); + if (sign) { + sign_transaction(trx); + } std::cout << fc::json::to_pretty_string( call( push_txn_func, trx )) << std::endl; }); + + // Wallet subcommand + auto wallet = app.add_subcommand( "wallet", "Interact with local wallet", false ); + + // create wallet + string wallet_name; + auto createWallet = wallet->add_subcommand("create", "Create a new wallet locally", false); + createWallet->add_option("name", wallet_name, "The name of the new wallet")->required(); + createWallet->set_callback([&wallet_name] { + const auto& v = call(wallet_host, wallet_port, wallet_create, wallet_name); + std::cout << "Save password to use in the future to unlock this wallet." << std::endl; + std::cout << "Without password imported keys will not be retrievable." << std::endl; + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // open wallet + auto openWallet = wallet->add_subcommand("open", "Open an existing wallet", false); + openWallet->add_option("name", wallet_name, "The name of the wallet to open")->required(); + openWallet->set_callback([&wallet_name] { + const auto& v = call(wallet_host, wallet_port, wallet_open, wallet_name); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // lock wallet + auto lockWallet = wallet->add_subcommand("lock", "Lock wallet", false); + lockWallet->add_option("name", wallet_name, "The name of the wallet to lock")->required(); + lockWallet->set_callback([&wallet_name] { + const auto& v = call(wallet_host, wallet_port, wallet_lock, wallet_name); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // lock all wallets + auto locakAllWallets = wallet->add_subcommand("lock_all", "Lock all unlocked wallets", false); + locakAllWallets->set_callback([] { + const auto& v = call(wallet_host, wallet_port, wallet_lock_all); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // unlock wallet + string wallet_pw; + auto unlockWallet = wallet->add_subcommand("unlock", "Unlock wallet", false); + unlockWallet->add_option("name", wallet_name, "The name of the wallet to unlock")->required(); + unlockWallet->add_option("password", wallet_pw, "The password returned by wallet create")->required(); + unlockWallet->set_callback([&wallet_name, &wallet_pw] { + fc::variants vs = {fc::variant(wallet_name), fc::variant(wallet_pw)}; + const auto& v = call(wallet_host, wallet_port, wallet_unlock, vs); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // import keys into wallet + string wallet_key; + auto importWallet = wallet->add_subcommand("import", "Import private key into wallet", false); + importWallet->add_option("name", wallet_name, "The name of the wallet to import key into")->required(); + importWallet->add_option("key", wallet_key, "Private key in WIF format to import")->required(); + importWallet->set_callback([&wallet_name, &wallet_key] { + fc::variants vs = {fc::variant(wallet_name), fc::variant(wallet_key)}; + const auto& v = call(wallet_host, wallet_port, wallet_import_key, vs); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // list wallets + auto listWallet = wallet->add_subcommand("list", "List opened wallets, * = unlocked", false); + listWallet->set_callback([] { + const auto& v = call(wallet_host, wallet_port, wallet_list); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + + // list keys + auto listKeys = wallet->add_subcommand("keys", "List of private keys from all unlocked wallets in wif format.", false); + listKeys->set_callback([] { + const auto& v = call(wallet_host, wallet_port, wallet_list_keys); + std::cout << fc::json::to_pretty_string(v) << std::endl; + }); + // Benchmark subcommand auto benchmark = app.add_subcommand( "benchmark", "Configure and execute benchmarks", false ); auto benchmark_setup = benchmark->add_subcommand( "setup", "Configures initial condition for benchmark" ); @@ -413,6 +538,7 @@ int main( int argc, char** argv ) { messageSubcommand->add_option("-p,--permission", permissions, "An account and permission level to authorize, as in 'account@permission'"); messageSubcommand->add_option("-s,--scope", scopes, "An account in scope for this operation", true); + messageSubcommand->add_flag("--sign", sign, "Specify if unlocked wallet keys should be used to sign transaction"); messageSubcommand->set_callback([&] { ilog("Converting argument to binary..."); auto arg= fc::mutable_variant_object @@ -434,7 +560,7 @@ int main( int argc, char** argv ) { fixedPermissions.back()}, result.get_object()["binargs"].as()); trx.scope.assign(scopes.begin(), scopes.end()); - ilog("Transaction result:\n${r}", ("r", fc::json::to_pretty_string(push_transaction(trx)))); + ilog("Transaction result:\n${r}", ("r", fc::json::to_pretty_string(push_transaction(trx, sign)))); }); // push transaction @@ -461,10 +587,17 @@ int main( int argc, char** argv ) { return app.exit(e); } catch (const fc::exception& e) { auto errorString = e.to_detail_string(); - if (errorString.find("Connection refused") != string::npos) - elog("Failed to connect to eosd at ${ip}:${port}; is eosd running?", ("ip", host)("port", port)); - else + if (errorString.find("Connection refused") != string::npos) { + if (errorString.find(fc::json::to_string(port)) != string::npos) { + elog("Failed to connect to eosd at ${ip}:${port}; is eosd running?", ("ip", host)("port", port)); + } else if (errorString.find(fc::json::to_string(wallet_port)) != string::npos) { + elog("Failed to connect to eos-walletd at ${ip}:${port}; is eos-walletd running?", ("ip", wallet_host)("port", wallet_port)); + } else { + elog("Failed to connect with error: ${e}", ("e", e.to_detail_string())); + } + } else { elog("Failed with error: ${e}", ("e", e.to_detail_string())); + } return 1; } diff --git a/programs/eosd/CMakeLists.txt b/programs/eosd/CMakeLists.txt index e29f68199ca5fa6d9fc13f5e0855ad7b8a0f60f6..2a01bec879c3609d9350d2e2e9178e02ca28982a 100644 --- a/programs/eosd/CMakeLists.txt +++ b/programs/eosd/CMakeLists.txt @@ -5,12 +5,16 @@ endif() find_package( Gperftools QUIET ) if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling steemd with TCMalloc") + message( STATUS "Found gperftools; compiling eosd with TCMalloc") list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() target_link_libraries( eosd - PRIVATE appbase account_history_api_plugin account_history_plugin chain_api_plugin producer_plugin chain_plugin net_plugin http_plugin eos_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE appbase + PRIVATE account_history_api_plugin account_history_plugin + PRIVATE chain_api_plugin producer_plugin chain_plugin + PRIVATE net_plugin http_plugin + PRIVATE eos_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) install( TARGETS eosd diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 4c77715b669049108d37e68414323477648200ea..96ea26cfc703cb6bfa5a09b30af67d91ec9e9cb7 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -8,7 +8,7 @@ endif() file(GLOB UNIT_TESTS "tests/*.cpp") add_executable( chain_test ${UNIT_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( chain_test eos_native_contract eos_chain chainbase eos_utilities eos_egenesis_none eos_wallet fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( chain_test eos_native_contract eos_chain chainbase eos_utilities eos_egenesis_none wallet_plugin fc ${PLATFORM_SPECIFIC_LIBS} ) if(WASM_TOOLCHAIN) file(GLOB SLOW_TESTS "slow_tests/*.cpp") diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index e6b841388ad283590c6a50beab1e20a2bd88ef14..2f324a0a75db879cb12f34b653b068aeaca4f6ac 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -168,21 +168,11 @@ types::PublicKey testing_blockchain::get_block_signing_key(const types::AccountN } void testing_blockchain::sign_transaction(SignedTransaction& trx) const { - auto GetAuthority = [this](const types::AccountPermission& permission) { - auto key = boost::make_tuple(permission.account, permission.permission); - return db.get(key).auth; - }; - auto checker = MakeAuthorityChecker(GetAuthority, get_global_properties().configuration.authDepthLimit, - fixture.available_keys()); - - for (const auto& message : trx.messages) - for (const auto& authorization : message.authorization) - if (!checker.satisfied(authorization)) - elog("Attempting to automatically sign transaction, but testing_fixture doesn't have the keys!"); - - for (const auto& key : checker.used_keys()) + auto keys = get_required_keys(trx, fixture.available_keys()); + for (const auto& k : keys) { // TODO: Use a real chain_id here - trx.sign(fixture.get_private_key(key), chain_id_type{}); + trx.sign(fixture.get_private_key(k), chain_id_type{}); + } } fc::optional testing_blockchain::push_transaction(SignedTransaction trx, uint32_t skip_flags) { diff --git a/tests/tests/chain_tests.cpp b/tests/tests/chain_tests.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e6122f6c6472ec91f261af12ddbd781c22cd929b --- /dev/null +++ b/tests/tests/chain_tests.cpp @@ -0,0 +1,54 @@ +#include + +#include +#include +#include + +#include + +#include + +#include +#include + +#include "../common/database_fixture.hpp" + +using namespace eos; +using namespace chain; + + +BOOST_AUTO_TEST_SUITE(chain_tests) + +// Test transaction signature chain_controller::get_required_keys +BOOST_FIXTURE_TEST_CASE(get_required_keys, testing_fixture) +{ try { + Make_Blockchain(chain) + + chain.set_auto_sign_transactions(false); + chain.set_skip_transaction_signature_checking(false); + + SignedTransaction trx; + trx.messages.resize(1); + transaction_set_reference_block(trx, chain.head_block_id()); + trx.expiration = chain.head_block_time() + 100; + trx.scope = sort_names( {"inita", "initb"} ); + types::transfer trans = { "inita", "initb", (100), "" }; + + trx.messages[0].type = "transfer"; + trx.messages[0].authorization = {{"inita", "active"}}; + trx.messages[0].code = config::EosContractName; + transaction_set_message(trx, 0, "transfer", trans); + BOOST_REQUIRE_THROW(chain.push_transaction(trx), tx_missing_sigs); + + auto required_keys = chain.get_required_keys(trx, available_keys()); + BOOST_CHECK(required_keys.size() < available_keys().size()); // otherwise not a very good test + chain.sign_transaction(trx); // uses get_required_keys + chain.push_transaction(trx); + + BOOST_CHECK_EQUAL(chain.get_liquid_balance("inita"), Asset(100000 - 100)); + BOOST_CHECK_EQUAL(chain.get_liquid_balance("initb"), Asset(100000 + 100)); + +} FC_LOG_AND_RETHROW() } + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/misc_tests.cpp b/tests/tests/misc_tests.cpp index 984e4c5ca750f8e50434fd9b7f8108cafbbd1b1b..d5b5cd9c1f3ae1562d5295a1b39ca2a8f73d7aa9 100644 --- a/tests/tests/misc_tests.cpp +++ b/tests/tests/misc_tests.cpp @@ -4,16 +4,17 @@ #include #include -#include #include #include +using namespace eos::chain; #include "../common/testing_macros.hpp" namespace eos { using namespace chain; +using namespace std; BOOST_AUTO_TEST_SUITE(misc_tests) @@ -264,57 +265,6 @@ BOOST_AUTO_TEST_CASE(authority_checker) } } FC_LOG_AND_RETHROW() } -/// Test creating the wallet -BOOST_AUTO_TEST_CASE(wallet_test) -{ try { - using namespace eos::wallet; - using namespace eos::utilities; - - wallet_data d; - d.ws_server = "test_server"; - d.ws_port = 99; - d.ws_user = "bob"; - d.ws_password = "user_pwd"; - - wallet_api wallet(d); - BOOST_CHECK(wallet.is_locked()); - - wallet.set_password("pass"); - BOOST_CHECK(wallet.is_locked()); - - wallet.unlock("pass"); - BOOST_CHECK(!wallet.is_locked()); - - BOOST_CHECK_EQUAL(0, wallet.list_keys().size()); - - auto priv = fc::ecc::private_key::generate(); - auto pub = public_key_type( priv.get_public_key() ); - auto wif = key_to_wif(priv.get_secret()); - wallet.import_key(wif); - BOOST_CHECK_EQUAL(1, wallet.list_keys().size()); - - auto privCopy = wallet.get_private_key(pub); - BOOST_CHECK_EQUAL(wif, privCopy); - - wallet.lock(); - BOOST_CHECK(wallet.is_locked()); - wallet.unlock("pass"); - BOOST_CHECK_EQUAL(1, wallet.list_keys().size()); - wallet.save_wallet_file("wallet_test.json"); - - wallet_data d2; - wallet_api wallet2(d2); - - BOOST_CHECK(wallet2.is_locked()); - wallet2.load_wallet_file("wallet_test.json"); - BOOST_CHECK(wallet2.is_locked()); - - wallet2.unlock("pass"); - BOOST_CHECK_EQUAL(1, wallet2.list_keys().size()); - - auto privCopy2 = wallet2.get_private_key(pub); - BOOST_CHECK_EQUAL(wif, privCopy2); -} FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/wallet_tests.cpp b/tests/tests/wallet_tests.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3858ee95f4800116ef62c9c68d92dc1d3e583ff0 --- /dev/null +++ b/tests/tests/wallet_tests.cpp @@ -0,0 +1,160 @@ +#include +#include +#include +#include + +#include + +namespace eos { + +BOOST_AUTO_TEST_SUITE(wallet_tests) + + +/// Test creating the wallet +BOOST_AUTO_TEST_CASE(wallet_test) +{ try { + using namespace eos::wallet; + using namespace eos::utilities; + + wallet_data d; + wallet_api wallet(d); + BOOST_CHECK(wallet.is_locked()); + + wallet.set_password("pass"); + BOOST_CHECK(wallet.is_locked()); + + wallet.unlock("pass"); + BOOST_CHECK(!wallet.is_locked()); + + wallet.set_wallet_filename("test"); + BOOST_CHECK_EQUAL("test", wallet.get_wallet_filename()); + + BOOST_CHECK_EQUAL(0, wallet.list_keys().size()); + + auto priv = fc::ecc::private_key::generate(); + auto pub = public_key_type( priv.get_public_key() ); + auto wif = key_to_wif(priv.get_secret()); + wallet.import_key(wif); + BOOST_CHECK_EQUAL(1, wallet.list_keys().size()); + + auto privCopy = wallet.get_private_key(pub); + BOOST_CHECK_EQUAL(wif, privCopy); + + wallet.lock(); + BOOST_CHECK(wallet.is_locked()); + wallet.unlock("pass"); + BOOST_CHECK_EQUAL(1, wallet.list_keys().size()); + wallet.save_wallet_file("wallet_test.json"); + + wallet_data d2; + wallet_api wallet2(d2); + + BOOST_CHECK(wallet2.is_locked()); + wallet2.load_wallet_file("wallet_test.json"); + BOOST_CHECK(wallet2.is_locked()); + + wallet2.unlock("pass"); + BOOST_CHECK_EQUAL(1, wallet2.list_keys().size()); + + auto privCopy2 = wallet2.get_private_key(pub); + BOOST_CHECK_EQUAL(wif, privCopy2); +} FC_LOG_AND_RETHROW() } + +/// Test wallet manager +BOOST_AUTO_TEST_CASE(wallet_manager_test) +{ try { + using namespace eos::wallet; + + if (fc::exists("test.wallet")) fc::remove("test.wallet"); + if (fc::exists("test2.wallet")) fc::remove("test2.wallet"); + + constexpr auto key1 = "5JktVNHnRX48BUdtewU7N1CyL4Z886c42x7wYW7XhNWkDQRhdcS"; + constexpr auto key2 = "5Ju5RTcVDo35ndtzHioPMgebvBM6LkJ6tvuU6LTNQv8yaz3ggZr"; + constexpr auto key3 = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; + + wallet_manager wm; + BOOST_CHECK_EQUAL(0, wm.list_wallets().size()); + BOOST_CHECK_EQUAL(0, wm.list_keys().size()); + BOOST_CHECK_NO_THROW(wm.lock_all()); + + BOOST_CHECK_THROW(wm.lock("test"), fc::exception); + BOOST_CHECK_THROW(wm.unlock("test", "pw"), fc::exception); + BOOST_CHECK_THROW(wm.import_key("test", "pw"), fc::exception); + + auto pw = wm.create("test"); + BOOST_CHECK(!pw.empty()); + BOOST_CHECK_EQUAL(0, pw.find("PW")); // starts with PW + BOOST_CHECK_EQUAL(1, wm.list_wallets().size()); + BOOST_CHECK_EQUAL(0, wm.list_keys().size()); // no keys + BOOST_CHECK(wm.list_wallets().at(0).find("*") != std::string::npos); + wm.lock("test"); + BOOST_CHECK(wm.list_wallets().at(0).find("*") == std::string::npos); + wm.unlock("test", pw); + BOOST_CHECK(wm.list_wallets().at(0).find("*") != std::string::npos); + wm.import_key("test", key1); + BOOST_CHECK_EQUAL(1, wm.list_keys().size()); + auto keys = wm.list_keys(); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key1) != keys.cend()); + wm.import_key("test", key2); + keys = wm.list_keys(); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key1) != keys.cend()); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key2) != keys.cend()); + wm.lock("test"); + BOOST_CHECK_EQUAL(0, wm.list_keys().size()); + wm.unlock("test", pw); + BOOST_CHECK_EQUAL(2, wm.list_keys().size()); + wm.lock_all(); + BOOST_CHECK_EQUAL(0, wm.list_keys().size()); + BOOST_CHECK(wm.list_wallets().at(0).find("*") == std::string::npos); + + auto pw2 = wm.create("test2"); + BOOST_CHECK_EQUAL(2, wm.list_wallets().size()); + BOOST_CHECK_EQUAL(0, wm.list_keys().size()); + wm.import_key("test2", key3); + keys = wm.list_keys(); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key1) == keys.cend()); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key2) == keys.cend()); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key3) != keys.cend()); + wm.unlock("test", pw); + keys = wm.list_keys(); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key1) != keys.cend()); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key2) != keys.cend()); + BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), key3) != keys.cend()); + + fc::optional optional_private_key1 = utilities::wif_to_key(key1); + fc::optional optional_private_key2 = utilities::wif_to_key(key2); + fc::optional optional_private_key3 = utilities::wif_to_key(key3); + + chain::SignedTransaction trx; + Name sender("billgates"); + Name recipient("kevinheifner"); + uint64_t amount = 100000000; + trx.scope = {sender,recipient}; + transaction_emplace_message(trx,config::EosContractName, vector{{sender,"active"}}, "transfer", + types::transfer{sender, recipient, amount, "deposit"}); + trx = wm.sign_transaction(trx, + { optional_private_key1->get_public_key(), optional_private_key2->get_public_key(), optional_private_key3->get_public_key()}, + chain_id_type{}); + const auto& pks = trx.get_signature_keys(chain_id_type{}); + BOOST_CHECK_EQUAL(3, pks.size()); + BOOST_CHECK(find(pks.cbegin(), pks.cend(), optional_private_key1->get_public_key()) != pks.cend()); + BOOST_CHECK(find(pks.cbegin(), pks.cend(), optional_private_key2->get_public_key()) != pks.cend()); + BOOST_CHECK(find(pks.cbegin(), pks.cend(), optional_private_key3->get_public_key()) != pks.cend()); + + BOOST_CHECK_EQUAL(3, wm.list_keys().size()); + wm.set_timeout(chrono::seconds(0)); + BOOST_CHECK_EQUAL(0, wm.list_keys().size()); + + + BOOST_CHECK(fc::exists("test.wallet")); + BOOST_CHECK(fc::exists("test2.wallet")); + fc::remove("test.wallet"); + fc::remove("test2.wallet"); + +} FC_LOG_AND_RETHROW() } + + + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace eos