提交 b42bd534 编写于 作者: A Alexey Milovidov

Merge branch 'master' into correct-parse-floats

......@@ -96,10 +96,14 @@ endif ()
if (USE_INTERNAL_POCO_LIBRARY)
set (save_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
set (_save ${ENABLE_TESTS})
set (ENABLE_TESTS 0)
set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1)
add_subdirectory (poco)
unset (CMAKE_DISABLE_FIND_PACKAGE_ZLIB)
set (ENABLE_TESTS ${_save})
set (CMAKE_CXX_FLAGS ${save_CMAKE_CXX_FLAGS})
set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS})
endif ()
# This strings autochanged from release_lib.sh:
set(VERSION_DESCRIBE v1.1.54332-testing)
set(VERSION_REVISION 54332)
set(VERSION_DESCRIBE v1.1.54333-testing)
set(VERSION_REVISION 54333)
# end of autochange
set (VERSION_MAJOR 1)
......
......@@ -83,7 +83,7 @@ private:
}
ColumnAggregateFunction(const ColumnAggregateFunction & src_)
: arenas(src_.arenas), func(src_.func), src(src_.getPtr())
: arenas(src_.arenas), func(src_.func), src(src_.getPtr()), data(src_.data.begin(), src_.data.end())
{
}
......
......@@ -22,17 +22,21 @@ const char * auto_config_build[]
"BUILD_COMPILE_DEFINITIONS", "@BUILD_COMPILE_DEFINITIONS@",
"BUILD_INCLUDE_DIRECTORIES", "@BUILD_INCLUDE_DIRECTORIES@",
"STATIC", "@USE_STATIC_LIBRARIES@",
"USE_CAPNP", "@USE_CAPNP@",
"USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@",
"USE_INTERNAL_MEMCPY", "@USE_INTERNAL_MEMCPY@",
"USE_GLIBC_COMPATIBILITY", "@GLIBC_COMPATIBILITY@",
"USE_JEMALLOC", "@USE_JEMALLOC@",
"USE_TCMALLOC", "@USE_TCMALLOC@",
"USE_UNWIND", "@USE_UNWIND@",
"USE_ICU", "@USE_ICU@",
"USE_MYSQL", "@USE_MYSQL@",
"USE_RE2_ST", "@USE_RE2_ST@",
"USE_VECTORCLASS", "@USE_VECTORCLASS@",
"USE_RDKAFKA", "@USE_RDKAFKA@",
"USE_CAPNP", "@USE_CAPNP@",
"USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@",
"USE_Poco_DataODBC", "@Poco_DataODBC_FOUND",
"USE_Poco_MongoDB", "@Poco_MongoDB_FOUND",
"USE_Poco_NetSSL", "@Poco_NetSSL_FOUND",
"USE_Poco_DataODBC", "@Poco_DataODBC_FOUND@",
"USE_Poco_MongoDB", "@Poco_MongoDB_FOUND@",
"USE_Poco_NetSSL", "@Poco_NetSSL_FOUND@",
nullptr, nullptr
};
......@@ -67,7 +67,7 @@ const Block & TotalsHavingBlockInputStream::getTotals()
|| totals_mode == TotalsMode::AFTER_HAVING_INCLUSIVE
|| (totals_mode == TotalsMode::AFTER_HAVING_AUTO
&& static_cast<double>(passed_keys) / total_keys >= auto_include_threshold))
addToTotals(current_totals, overflow_aggregates, nullptr);
addToTotals(overflow_aggregates, nullptr);
}
totals = header.cloneWithColumns(std::move(current_totals));
......@@ -110,7 +110,7 @@ Block TotalsHavingBlockInputStream::readImpl()
if (filter_column_name.empty())
{
addToTotals(current_totals, block, nullptr);
addToTotals(block, nullptr);
}
else
{
......@@ -127,9 +127,9 @@ Block TotalsHavingBlockInputStream::readImpl()
/// Add values to `totals` (if it was not already done).
if (totals_mode == TotalsMode::BEFORE_HAVING)
addToTotals(current_totals, block, nullptr);
addToTotals(block, nullptr);
else
addToTotals(current_totals, block, filter_description.data);
addToTotals(block, filter_description.data);
/// Filter the block by expression in HAVING.
size_t columns = finalized.columns();
......@@ -155,11 +155,10 @@ Block TotalsHavingBlockInputStream::readImpl()
}
void TotalsHavingBlockInputStream::addToTotals(MutableColumns & totals, const Block & block, const IColumn::Filter * filter)
void TotalsHavingBlockInputStream::addToTotals(const Block & block, const IColumn::Filter * filter)
{
bool need_init = totals.empty();
bool need_init = !arena;
ArenaPtr arena;
if (need_init)
arena = std::make_shared<Arena>();
......@@ -174,7 +173,7 @@ void TotalsHavingBlockInputStream::addToTotals(MutableColumns & totals, const Bl
{
MutableColumnPtr new_column = current.type->createColumn();
current.type->insertDefaultInto(*new_column);
totals.emplace_back(std::move(new_column));
current_totals.emplace_back(std::move(new_column));
}
continue;
}
......@@ -193,11 +192,11 @@ void TotalsHavingBlockInputStream::addToTotals(MutableColumns & totals, const Bl
function->create(data);
target->getData().push_back(data);
totals.emplace_back(std::move(target));
current_totals.emplace_back(std::move(target));
}
else
{
auto & target = typeid_cast<ColumnAggregateFunction &>(*totals[i]);
auto & target = typeid_cast<ColumnAggregateFunction &>(*current_totals[i]);
function = target.getAggregateFunction().get();
data = target.getData()[0];
}
......
......@@ -51,9 +51,11 @@ private:
/// Here, total values are accumulated. After the work is finished, they will be placed in IProfilingBlockInputStream::totals.
MutableColumns current_totals;
/// Arena for aggregate function states in totals.
ArenaPtr arena;
/// If filter == nullptr - add all rows. Otherwise, only the rows that pass the filter (HAVING).
void addToTotals(MutableColumns & totals, const Block & block, const IColumn::Filter * filter);
void addToTotals(const Block & block, const IColumn::Filter * filter);
};
}
......@@ -6,6 +6,7 @@
#include <Storages/StorageDictionary.h>
#include <Storages/StorageFactory.h>
#include <Interpreters/Context.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Interpreters/ExternalDictionaries.h>
#include <Parsers/ASTLiteral.h>
#include <common/logger_useful.h>
......@@ -97,6 +98,7 @@ void registerStorageDictionary(StorageFactory & factory)
throw Exception("Storage Dictionary requires single parameter: name of dictionary",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
args.engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(args.engine_args[0], args.local_context);
String dictionary_name = typeid_cast<const ASTLiteral &>(*args.engine_args[0]).value.safeGet<String>();
const auto & dictionary = args.context.getExternalDictionaries().getDictionary(dictionary_name);
......
......@@ -141,9 +141,9 @@ void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns, con
columns[j++]->insertDefault();
}
columns[j++]->insert(part->getColumnCompressedSize(column.name));
columns[j++]->insert(part->getColumnUncompressedSize(column.name));
columns[j++]->insert(part->getColumnMrkSize(column.name));
columns[j++]->insert(static_cast<UInt64>(part->getColumnCompressedSize(column.name)));
columns[j++]->insert(static_cast<UInt64>(part->getColumnUncompressedSize(column.name)));
columns[j++]->insert(static_cast<UInt64>(part->getColumnMrkSize(column.name)));
if (has_state_column)
columns[j++]->insert(part->stateString());
......
......@@ -89,6 +89,8 @@ def main(args):
base_dir = os.path.abspath(args.queries)
tmp_dir = os.path.abspath(args.tmp)
passed_total = 0
skipped_total = 0
failures_total = 0
# Keep same default values as in queries/0_stateless/00000_sh_lib.sh
......@@ -114,6 +116,7 @@ def main(args):
print("\nRunning {} tests.\n".format(suite))
failures = 0
failures_chain = 0
if 'stateful' in suite and not is_data_present():
print("Won't run stateful tests because test data wasn't loaded. See README.txt.")
continue
......@@ -156,9 +159,11 @@ def main(args):
if not args.zookeeper and 'zookeeper' in name:
report_testcase.append(et.Element("skipped", attrib = {"message": "no zookeeper"}))
print(MSG_SKIPPED + " - no zookeeper")
skipped_total += 1
elif not args.shard and 'shard' in name:
report_testcase.append(et.Element("skipped", attrib = {"message": "no shard"}))
print(MSG_SKIPPED + " - no shard")
skipped_total += 1
else:
disabled_file = os.path.join(suite_dir, name) + '.disabled'
......@@ -191,7 +196,7 @@ def main(args):
failure = et.Element("failure", attrib = {"message": "Timeout"})
report_testcase.append(failure)
failures = failures + 1
failures += 1
print("{0} - Timeout!".format(MSG_FAIL))
else:
stdout = open(stdout_file, 'r').read() if os.path.exists(stdout_file) else ''
......@@ -207,7 +212,8 @@ def main(args):
stdout_element.text = et.CDATA(stdout)
report_testcase.append(stdout_element)
failures = failures + 1
failures += 1
failures_chain += 1
print("{0} - return code {1}".format(MSG_FAIL, proc.returncode))
if stderr:
......@@ -227,7 +233,8 @@ def main(args):
stderr_element.text = et.CDATA(stderr)
report_testcase.append(stderr_element)
failures = failures + 1
failures += 1
failures_chain += 1
print("{0} - having stderror:\n{1}".format(MSG_FAIL, stderr.encode('utf-8')))
elif 'Exception' in stdout:
failure = et.Element("error", attrib = {"message": "having exception"})
......@@ -237,7 +244,8 @@ def main(args):
stdout_element.text = et.CDATA(stdout)
report_testcase.append(stdout_element)
failures = failures + 1
failures += 1
failures_chain += 1
print("{0} - having exception:\n{1}".format(MSG_FAIL, stdout.encode('utf-8')))
elif not os.path.isfile(reference_file):
skipped = et.Element("skipped", attrib = {"message": "no reference file"})
......@@ -260,9 +268,11 @@ def main(args):
stdout_element.text = et.CDATA(remove_control_characters(diff))
report_testcase.append(stdout_element)
failures = failures + 1
failures += 1
print("{0} - result differs with reference:\n{1}".format(MSG_FAIL, diff.encode('utf-8')))
else:
passed_total += 1
failures_chain = 0
print(MSG_OK)
if os.path.exists(stdout_file):
os.remove(stdout_file)
......@@ -276,25 +286,28 @@ def main(args):
error = et.Element("error", attrib = {"type": exc_type.__name__, "message": str(exc_value)})
report_testcase.append(error)
failures = failures + 1
failures += 1
print("{0} - Test internal error: {1}\n{2}".format(MSG_FAIL, exc_type.__name__, exc_value))
finally:
dump_report(args.output, suite, name, report_testcase)
if failures_chain >= 20:
break
failures_total = failures_total + failures
if failures_total > 0:
print(colored("\nHaving {0} errors!".format(failures_total), "red", attrs=["bold"]))
print(colored("\nHaving {failures_total} errors! {passed_total} tests passed. {skipped_total} tests skipped.".format(passed_total = passed_total, skipped_total = skipped_total, failures_total = failures_total), "red", attrs=["bold"]))
sys.exit(1)
else:
print(colored("\nAll tests passed.", "green", attrs=["bold"]))
print(colored("\n{passed_total} tests passed. {skipped_total} tests skipped.".format(passed_total = passed_total, skipped_total = skipped_total), "green", attrs=["bold"]))
sys.exit(0)
if __name__ == '__main__':
parser = ArgumentParser(description = 'ClickHouse functional tests')
parser.add_argument('-q', '--queries', default = 'queries', help = 'Path to queries dir')
parser.add_argument('--tmp', default = 'queries', help = 'Path to tmp dir')
parser.add_argument('-q', '--queries', help = 'Path to queries dir')
parser.add_argument('--tmp', help = 'Path to tmp dir')
parser.add_argument('-b', '--binary', default = 'clickhouse', help = 'Main clickhouse binary')
parser.add_argument('-c', '--client', help = 'Client program')
parser.add_argument('--clientconfig', help = 'Client config (if you use not default ports)')
......@@ -314,6 +327,16 @@ if __name__ == '__main__':
group.add_argument('--no-shard', action = 'store_false', default = None, dest = 'shard', help = 'Do not run shard related tests')
args = parser.parse_args()
if args.queries is None and os.path.isdir('queries'):
args.queries = 'queries'
if args.tmp is None:
args.tmp = args.queries
else:
args.queries = '/usr/share/clickhouse-test/queries'
if args.tmp is None:
args.tmp = '/tmp/clickhouse-test'
if args.client is None:
args.client = args.binary + '-client'
if args.clientconfig:
......
......@@ -7,11 +7,18 @@ This directory contains tests that involve several ClickHouse instances, custom
Prerequisites:
* Ubuntu 14.04 (Trusty).
* [docker](https://www.docker.com/community-edition#/download). Minimum required API version: 1.25, check with `docker version`.
You must install latest Docker from
https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository
Don't use Docker from your system repository.
* [pip](https://pypi.python.org/pypi/pip). To install: `sudo apt-get install python-pip`
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml`
If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login.
(You must close all your sessions (for example, restart your computer))
To check, that you have access to Docker, run `docker ps`.
Run the tests with the `pytest` command. To select which tests to run, use: `pytest -k <test_name_pattern>`
......
*** Without PARTITION BY and ORDER BY ***
1
2
*** Replicated with sampling ***
1
*** Replacing with implicit version ***
......
123456789 Hello 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890
234567890 World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890
0 World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890
SET group_by_two_level_threshold = 1, max_threads = 1;
SELECT
k,
anyLast(s)
FROM
(
SELECT
123456789 AS k,
'Hello 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890' AS s
UNION ALL
SELECT
234567890,
'World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890'
)
GROUP BY k
WITH TOTALS
HAVING length(anyLast(s)) > 0
ORDER BY k;
/* There was a bug in implementation of WITH TOTALS.
* When there was more than one block after aggregation,
* nullptr is passed to IAggregateFunction::merge instead of pointer to valid Arena.
*
* To reproduce, we set 'group_by_two_level_threshold' to small value to enable two-level aggregation.
* Only in two-level aggregation there are many blocks after GROUP BY.
*
* Also use UNION ALL in subquery to generate two blocks before GROUP BY.
* Because two-level aggregation may be triggered only after a block is processed.
*
* Use large numbers as a key, because for 8, 16 bit numbers,
* two-level aggregation is not possible as simple aggregation method is used.
* These numbers are happy to hash to different buckets and we thus we have two blocks after GROUP BY.
*
* Also we use long strings (at least 64 bytes) in aggregation state,
* because aggregate functions min/max/any/anyLast use Arena only for long enough strings.
*
* And we use function 'anyLast' for method IAggregateFunction::merge to be called for every new value.
*
* We use useless HAVING (that is always true), because in absense of HAVING,
* TOTALS are calculated in a simple way in same pass during aggregation, not in TotalsHavingBlockInputStream,
* and bug doesn't trigger.
*
* We use ORDER BY for result of the test to be deterministic.
* max_threads = 1 for deterministic order of result in subquery and the value of 'anyLast'.
*/
clickhouse (1.1.54332) unstable; urgency=low
clickhouse (1.1.54333) unstable; urgency=low
* Modified source code
-- <robot-metrika-test@yandex-team.ru> Thu, 11 Jan 2018 10:25:25 +0300
-- <robot-metrika-test@yandex-team.ru> Fri, 12 Jan 2018 12:43:30 +0300
#!/bin/sh
ccache --show-stats
tar-ignore
tar-ignore="contrib/poco/openssl/*"
tar-ignore="ClickHouse/contrib/poco/openssl/*"
tar-ignore="ClickHouse/build*"
......@@ -68,7 +68,7 @@ if [ -z "$THREAD_COUNT" ] ; then
THREAD_COUNT=`nproc || grep -c ^processor /proc/cpuinfo`
fi
CMAKE_FLAGS+=" $LIBTCMALLOC_OPTS -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE -DUSE_EMBEDDED_COMPILER=1"
CMAKE_FLAGS=" $LIBTCMALLOC_OPTS -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE -DUSE_EMBEDDED_COMPILER=1 $CMAKE_FLAGS"
export CMAKE_FLAGS
REVISION+=$VERSION_POSTFIX
......@@ -83,6 +83,7 @@ if [ -z "$USE_PBUILDER" ] ; then
-b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS}
else
export DIST=${DIST:=artful}
export SET_BUILDRESULT=${SET_BUILDRESULT:=$CURDIR/..}
. $CURDIR/debian/.pbuilderrc
if [[ -n "$FORCE_PBUILDER_CREATE" || ! -e "$BASETGZ" ]] ; then
sudo --preserve-env pbuilder create --configfile $CURDIR/debian/.pbuilderrc
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册