未验证 提交 a7384b4d 编写于 作者: A alexey-milovidov 提交者: GitHub

Merge branch 'master' into MoreGCCWarnings

......@@ -297,7 +297,7 @@
* MADV_FREE, though typically with higher
* system overhead.
*/
#define JEMALLOC_PURGE_MADVISE_FREE
// #define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
......
Subproject commit 0795fa6bc443666068bec56bf700e1f488f592f1
Subproject commit d6a10dd3db55d8f7f9e464db9151874cde1f79ec
......@@ -34,10 +34,10 @@ SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM
SELECT URL, count() AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
SELECT 1, URL, count() AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
SELECT ClientIP AS x, x - 1, x - 2, x - 3, count() AS c FROM {table} GROUP BY x, x - 1, x - 2, x - 3 ORDER BY c DESC LIMIT 10;
SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND notEmpty(URL) GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND notEmpty(Title) GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, ((SearchEngineID = 0 AND AdvEngineID = 0) ? Referer : '') AS Src, URL AS Dst, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = halfMD5('http://example.ru/') GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
SELECT WindowClientWidth, WindowClientHeight, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = halfMD5('http://example.ru/') GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT toStartOfMinute(EventTime) AS Minute, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND notEmpty(URL) GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND notEmpty(Title) GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, ((SearchEngineID = 0 AND AdvEngineID = 0) ? Referer : '') AS Src, URL AS Dst, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = halfMD5('http://example.ru/') GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
SELECT WindowClientWidth, WindowClientHeight, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = halfMD5('http://example.ru/') GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT toStartOfMinute(EventTime) AS Minute, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
......@@ -34,10 +34,10 @@ SELECT WatchID, ClientIP, count(1) AS c, sum(Refresh), avg(ResolutionWidth) FROM
SELECT URL, count(1) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10
SELECT 1, URL, count(1) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10
SELECT ClientIP AS x, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(1) AS c FROM {table} GROUP BY x, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10
SELECT URL, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND DontCountHits =0 AND Refresh = 0 AND URL <>'' GROUP BY URL ORDER BY PageViews DESC LIMIT 10
SELECT Title, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate BETWEEN '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND DontCountHits=0 AND Refresh=0 AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10
SELECT URL, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh = 0 AND IsLink <> 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, case when (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END Src, URL AS Dst, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND TraficSourceID IN (-1, 6) AND RefererHash = 7135345792483900000 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100
SELECT WindowClientWidth, WindowClientHeight, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND DontCountHits =0 AND URLHash = 7135345792483900000 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT date_trunc('minute', EventTime) AS Minute, count(1) AS PageViews FROM {table} WHERE CounterID = 34 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND DontCountHits =0 GROUP BY Minute ORDER BY Minute;
SELECT URL, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND DontCountHits =0 AND Refresh = 0 AND URL <>'' GROUP BY URL ORDER BY PageViews DESC LIMIT 10
SELECT Title, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate BETWEEN '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND DontCountHits=0 AND Refresh=0 AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10
SELECT URL, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh = 0 AND IsLink <> 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, case when (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END Src, URL AS Dst, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND TraficSourceID IN (-1, 6) AND RefererHash = 7135345792483900000 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100
SELECT WindowClientWidth, WindowClientHeight, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND DontCountHits =0 AND URLHash = 7135345792483900000 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT date_trunc('minute', EventTime) AS Minute, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND DontCountHits =0 GROUP BY Minute ORDER BY Minute;
......@@ -92,18 +92,18 @@ SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*) as c, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') as src FROM hits_100m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, URL, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') ORDER BY c DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*) as c, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') as src FROM hits_100m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, URL, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') ORDER BY c DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT unix_timestamp(EventTime) - SECOND(EventTime) AS m, count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY unix_timestamp(EventTime) - SECOND(EventTime) ORDER BY m;
SELECT unix_timestamp(EventTime) - SECOND(EventTime) AS m, count(*) FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY unix_timestamp(EventTime) - SECOND(EventTime) ORDER BY m;
......@@ -92,20 +92,20 @@ SELECT SQL_NO_CACHE 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY coun
SELECT SQL_NO_CACHE ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
SELECT SQL_NO_CACHE URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT SQL_NO_CACHE URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT SQL_NO_CACHE Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT SQL_NO_CACHE Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT SQL_NO_CACHE URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT SQL_NO_CACHE URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT SQL_NO_CACHE TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT SQL_NO_CACHE TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT SQL_NO_CACHE URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT SQL_NO_CACHE URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT SQL_NO_CACHE WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT SQL_NO_CACHE WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT SQL_NO_CACHE EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
\ No newline at end of file
SELECT SQL_NO_CACHE EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
\ No newline at end of file
......@@ -94,20 +94,20 @@ SELECT 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY count(*) DESC LIM
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
\ No newline at end of file
SELECT EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
\ No newline at end of file
......@@ -34,10 +34,10 @@ SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM
SELECT URL, count(*) FROM hits_10m GROUP BY URL ORDER BY count(*) DESC LIMIT 10;
SELECT 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY count(*) DESC LIMIT 10;
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
......@@ -92,20 +92,20 @@ SELECT 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY count(*) DESC LIM
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND URL <> '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND URL <> '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT EventTime - extract (SECOND from EventTime) AS M, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND NOT Refresh AND NOT DontCountHits GROUP BY M ORDER BY M;
\ No newline at end of file
SELECT EventTime - extract (SECOND from EventTime) AS M, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND NOT Refresh AND NOT DontCountHits GROUP BY M ORDER BY M;
\ No newline at end of file
......@@ -34,10 +34,10 @@ SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM
SELECT URL, count(*) FROM {table} GROUP BY URL ORDER BY count(*) DESC LIMIT 10;
SELECT 1, URL, count(*) FROM {table} GROUP BY 1, URL ORDER BY count(*) DESC LIMIT 10;
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT TIME_SLICE(EventTime, 1, 'MINUTE') AS Minute, count(*) AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
SELECT TIME_SLICE(EventTime, 1, 'MINUTE') AS Minute, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
......@@ -375,6 +375,12 @@ int Server::main(const std::vector<std::string> & /*args*/)
Poco::File(user_files_path).createDirectories();
}
{
std::string dictionaries_lib_path = config().getString("dictionaries_lib_path", path + "dictionaries_lib/");
global_context->setDictionariesLibPath(dictionaries_lib_path);
Poco::File(dictionaries_lib_path).createDirectories();
}
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
......
......@@ -263,7 +263,7 @@ size_t ColumnUnique<ColumnType>::uniqueInsert(const Field & x)
return getNullValueIndex();
if (size_of_value_if_fixed)
return uniqueInsertData(&x.get<char>(), size_of_value_if_fixed);
return uniqueInsertData(&x.reinterpret<char>(), size_of_value_if_fixed);
auto & val = x.get<String>();
return uniqueInsertData(val.data(), val.size());
......
......@@ -694,11 +694,25 @@ template <> struct Field::EnumToType<Field::Types::Decimal64> { using Type = Dec
template <> struct Field::EnumToType<Field::Types::Decimal128> { using Type = DecimalField<Decimal128>; };
template <> struct Field::EnumToType<Field::Types::AggregateFunctionState> { using Type = DecimalField<AggregateFunctionStateData>; };
inline constexpr bool isInt64FieldType(Field::Types::Which t)
{
return t == Field::Types::Int64
|| t == Field::Types::UInt64;
}
// Field value getter with type checking in debug builds.
template <typename T>
T & Field::get()
{
using ValueType = std::decay_t<T>;
//assert(TypeToEnum<NearestFieldType<ValueType>>::value == which);
#ifndef NDEBUG
// Disregard signedness when converting between int64 types.
constexpr Field::Types::Which target = TypeToEnum<NearestFieldType<ValueType>>::value;
assert(target == which
|| (isInt64FieldType(target) && isInt64FieldType(which)));
#endif
ValueType * MAY_ALIAS ptr = reinterpret_cast<ValueType *>(&storage);
return *ptr;
}
......
......@@ -15,7 +15,7 @@ list(REMOVE_ITEM clickhouse_dictionaries_sources DictionaryFactory.cpp Dictionar
list(REMOVE_ITEM clickhouse_dictionaries_headers DictionaryFactory.h DictionarySourceFactory.h DictionaryStructure.h getDictionaryConfigurationFromAST.h)
add_library(clickhouse_dictionaries ${clickhouse_dictionaries_sources})
target_link_libraries(clickhouse_dictionaries PRIVATE dbms clickhouse_common_io ${BTRIE_LIBRARIES})
target_link_libraries(clickhouse_dictionaries PRIVATE dbms clickhouse_common_io string_utils ${BTRIE_LIBRARIES})
if(Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories(clickhouse_dictionaries SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
......
......@@ -210,7 +210,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & context) -> DictionarySourcePtr
const Context & context,
bool /* check_config */) -> DictionarySourcePtr
{
return std::make_unique<ClickHouseDictionarySource>(dict_struct, config, config_prefix + ".clickhouse", sample_block, context);
};
......
......@@ -27,7 +27,8 @@ DictionaryPtr DictionaryFactory::create(
const std::string & name,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
const Context & context) const
const Context & context,
bool check_source_config) const
{
Poco::Util::AbstractConfiguration::Keys keys;
const auto & layout_prefix = config_prefix + ".layout";
......@@ -38,7 +39,7 @@ DictionaryPtr DictionaryFactory::create(
const DictionaryStructure dict_struct{config, config_prefix + ".structure"};
DictionarySourcePtr source_ptr = DictionarySourceFactory::instance().create(name, config, config_prefix + ".source", dict_struct, context);
DictionarySourcePtr source_ptr = DictionarySourceFactory::instance().create(name, config, config_prefix + ".source", dict_struct, context, check_source_config);
const auto & layout_type = keys.front();
......@@ -57,7 +58,17 @@ DictionaryPtr DictionaryFactory::create(
DictionaryPtr DictionaryFactory::create(const std::string & name, const ASTCreateQuery & ast, const Context & context) const
{
auto configurationFromAST = getDictionaryConfigurationFromAST(ast);
return DictionaryFactory::create(name, *configurationFromAST, "dictionary", context);
return DictionaryFactory::create(name, *configurationFromAST, "dictionary", context, true);
}
bool DictionaryFactory::isComplex(const std::string & layout_type) const
{
auto found = layout_complexity.find(layout_type);
if (found != layout_complexity.end())
return found->second;
throw Exception{"Unknown dictionary layout type: " + layout_type, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG};
}
......
......@@ -29,12 +29,16 @@ public:
static DictionaryFactory & instance();
/// Create dictionary from AbstractConfiguration parsed
/// from xml-file on filesystem.
DictionaryPtr create(
const std::string & name,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
const Context & context) const;
const Context & context,
bool check_source_config = false) const;
/// Create dictionary from DDL-query
DictionaryPtr create(const std::string & name,
const ASTCreateQuery & ast,
const Context & context) const;
......@@ -46,7 +50,7 @@ public:
const std::string & config_prefix,
DictionarySourcePtr source_ptr)>;
bool isComplex(const std::string & layout_type) const { return layout_complexity.at(layout_type); }
bool isComplex(const std::string & layout_type) const;
void registerLayout(const std::string & layout_type, Creator create_layout, bool is_complex);
......
......@@ -80,7 +80,8 @@ DictionarySourcePtr DictionarySourceFactory::create(
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
const DictionaryStructure & dict_struct,
const Context & context) const
const Context & context,
bool check_config) const
{
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_prefix, keys);
......@@ -95,7 +96,7 @@ DictionarySourcePtr DictionarySourceFactory::create(
{
const auto & create_source = found->second;
auto sample_block = createSampleBlock(dict_struct);
return create_source(dict_struct, config, config_prefix, sample_block, context);
return create_source(dict_struct, config, config_prefix, sample_block, context, check_config);
}
throw Exception{name + ": unknown dictionary source type: " + source_type, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG};
......
......@@ -31,7 +31,8 @@ public:
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & context)>;
const Context & context,
bool check_config)>;
DictionarySourceFactory();
......@@ -42,7 +43,8 @@ public:
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
const DictionaryStructure & dict_struct,
const Context & context) const;
const Context & context,
bool check_config) const;
private:
using SourceRegistry = std::unordered_map<std::string, Creator>;
......
......@@ -21,6 +21,10 @@ namespace DB
{
static const UInt64 max_block_size = 8192;
namespace ErrorCodes
{
extern const int DICTIONARY_ACCESS_DENIED;
}
namespace
{
......@@ -218,12 +222,21 @@ void registerDictionarySourceExecutable(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & context) -> DictionarySourcePtr
const Context & context,
bool check_config) -> DictionarySourcePtr
{
if (dict_struct.has_expressions)
throw Exception{"Dictionary source of type `executable` does not support attribute expressions", ErrorCodes::LOGICAL_ERROR};
return std::make_unique<ExecutableDictionarySource>(dict_struct, config, config_prefix + ".executable", sample_block, context);
/// Executable dictionaries may execute arbitrary commands.
/// It's OK for dictionaries created by administrator from xml-file, but
/// maybe dangerous for dictionaries created from DDL-queries.
if (check_config)
throw Exception("Dictionaries with Executable dictionary source is not allowed", ErrorCodes::DICTIONARY_ACCESS_DENIED);
return std::make_unique<ExecutableDictionarySource>(
dict_struct, config, config_prefix + ".executable",
sample_block, context);
};
factory.registerSource("executable", createTableSource);
}
......
......@@ -11,16 +11,31 @@ namespace DB
{
static const UInt64 max_block_size = 8192;
namespace ErrorCodes
{
extern const int PATH_ACCESS_DENIED;
}
FileDictionarySource::FileDictionarySource(
const std::string & filename_, const std::string & format_, Block & sample_block_, const Context & context_)
: filename{filename_}, format{format_}, sample_block{sample_block_}, context(context_)
const std::string & filepath_, const std::string & format_,
Block & sample_block_, const Context & context_, bool check_config)
: filepath{filepath_}
, format{format_}
, sample_block{sample_block_}
, context(context_)
{
if (check_config)
{
const String user_files_path = context.getUserFilesPath();
if (!startsWith(filepath, user_files_path))
throw Exception("File path " + filepath + " is not inside " + user_files_path, ErrorCodes::PATH_ACCESS_DENIED);
}
}
FileDictionarySource::FileDictionarySource(const FileDictionarySource & other)
: filename{other.filename}
: filepath{other.filepath}
, format{other.format}
, sample_block{other.sample_block}
, context(other.context)
......@@ -31,7 +46,7 @@ FileDictionarySource::FileDictionarySource(const FileDictionarySource & other)
BlockInputStreamPtr FileDictionarySource::loadAll()
{
auto in_ptr = std::make_unique<ReadBufferFromFile>(filename);
auto in_ptr = std::make_unique<ReadBufferFromFile>(filepath);
auto stream = context.getInputFormat(format, *in_ptr, sample_block, max_block_size);
last_modification = getLastModification();
......@@ -41,13 +56,13 @@ BlockInputStreamPtr FileDictionarySource::loadAll()
std::string FileDictionarySource::toString() const
{
return "File: " + filename + ' ' + format;
return "File: " + filepath + ' ' + format;
}
Poco::Timestamp FileDictionarySource::getLastModification() const
{
return Poco::File{filename}.getLastModified();
return Poco::File{filepath}.getLastModified();
}
void registerDictionarySourceFile(DictionarySourceFactory & factory)
......@@ -56,15 +71,16 @@ void registerDictionarySourceFile(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & context) -> DictionarySourcePtr
const Context & context,
bool check_config) -> DictionarySourcePtr
{
if (dict_struct.has_expressions)
throw Exception{"Dictionary source of type `file` does not support attribute expressions", ErrorCodes::LOGICAL_ERROR};
const auto filename = config.getString(config_prefix + ".file.path");
const auto filepath = config.getString(config_prefix + ".file.path");
const auto format = config.getString(config_prefix + ".file.format");
return std::make_unique<FileDictionarySource>(filename, format, sample_block, context);
return std::make_unique<FileDictionarySource>(filepath, format, sample_block, context, check_config);
};
factory.registerSource("file", createTableSource);
......
......@@ -13,7 +13,8 @@ class Context;
class FileDictionarySource final : public IDictionarySource
{
public:
FileDictionarySource(const std::string & filename_, const std::string & format_, Block & sample_block_, const Context & context_);
FileDictionarySource(const std::string & filepath_, const std::string & format_,
Block & sample_block_, const Context & context_, bool check_config);
FileDictionarySource(const FileDictionarySource & other);
......@@ -47,7 +48,7 @@ public:
private:
Poco::Timestamp getLastModification() const;
const std::string filename;
const std::string filepath;
const std::string format;
Block sample_block;
const Context & context;
......
......@@ -25,7 +25,8 @@ HTTPDictionarySource::HTTPDictionarySource(
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block_,
const Context & context_)
const Context & context_,
bool check_config)
: log(&Logger::get("HTTPDictionarySource"))
, update_time{std::chrono::system_clock::from_time_t(0)}
, dict_struct{dict_struct_}
......@@ -36,6 +37,10 @@ HTTPDictionarySource::HTTPDictionarySource(
, context(context_)
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context))
{
if (check_config)
context.getRemoteHostFilter().checkURL(Poco::URI(url));
const auto & credentials_prefix = config_prefix + ".credentials";
if (config.has(credentials_prefix))
......@@ -187,12 +192,15 @@ void registerDictionarySourceHTTP(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & context) -> DictionarySourcePtr
const Context & context,
bool check_config) -> DictionarySourcePtr
{
if (dict_struct.has_expressions)
throw Exception{"Dictionary source of type `http` does not support attribute expressions", ErrorCodes::LOGICAL_ERROR};
return std::make_unique<HTTPDictionarySource>(dict_struct, config, config_prefix + ".http", sample_block, context);
return std::make_unique<HTTPDictionarySource>(
dict_struct, config, config_prefix + ".http",
sample_block, context, check_config);
};
factory.registerSource("http", createTableSource);
}
......
......@@ -25,7 +25,8 @@ public:
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block_,
const Context & context_);
const Context & context_,
bool check_config);
HTTPDictionarySource(const HTTPDictionarySource & other);
HTTPDictionarySource & operator=(const HTTPDictionarySource &) = delete;
......
......@@ -6,6 +6,7 @@
#include <ext/bit_cast.h>
#include <ext/range.h>
#include <ext/scope_guard.h>
#include <Common/StringUtils/StringUtils.h>
#include "DictionarySourceFactory.h"
#include "DictionaryStructure.h"
#include "LibraryDictionarySourceExternal.h"
......@@ -19,6 +20,7 @@ namespace ErrorCodes
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
extern const int FILE_DOESNT_EXIST;
extern const int EXTERNAL_LIBRARY_ERROR;
extern const int PATH_ACCESS_DENIED;
}
......@@ -121,17 +123,28 @@ LibraryDictionarySource::LibraryDictionarySource(
const DictionaryStructure & dict_struct_,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix_,
Block & sample_block_)
Block & sample_block_,
const Context & context,
bool check_config)
: log(&Logger::get("LibraryDictionarySource"))
, dict_struct{dict_struct_}
, config_prefix{config_prefix_}
, path{config.getString(config_prefix + ".path", "")}
, sample_block{sample_block_}
{
if (check_config)
{
const String dictionaries_lib_path = context.getDictionariesLibPath();
if (!startsWith(path, dictionaries_lib_path))
throw Exception("LibraryDictionarySource: Library path " + dictionaries_lib_path + " is not inside " + dictionaries_lib_path, ErrorCodes::PATH_ACCESS_DENIED);
}
if (!Poco::File(path).exists())
throw Exception(
"LibraryDictionarySource: Can't load lib " + toString() + ": " + Poco::File(path).path() + " - File doesn't exist",
ErrorCodes::FILE_DOESNT_EXIST);
description.init(sample_block);
library = std::make_shared<SharedLibrary>(path, RTLD_LAZY
#if defined(RTLD_DEEPBIND) && !defined(ADDRESS_SANITIZER) // Does not exists in FreeBSD. Cannot work with Address Sanitizer.
......@@ -285,9 +298,10 @@ void registerDictionarySourceLibrary(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context &) -> DictionarySourcePtr
const Context & context,
bool check_config) -> DictionarySourcePtr
{
return std::make_unique<LibraryDictionarySource>(dict_struct, config, config_prefix + ".library", sample_block);
return std::make_unique<LibraryDictionarySource>(dict_struct, config, config_prefix + ".library", sample_block, context, check_config);
};
factory.registerSource("library", createTableSource);
}
......
......@@ -32,7 +32,9 @@ public:
const DictionaryStructure & dict_struct_,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix_,
Block & sample_block_);
Block & sample_block_,
const Context & context,
bool check_config);
LibraryDictionarySource(const LibraryDictionarySource & other);
LibraryDictionarySource & operator=(const LibraryDictionarySource &) = delete;
......
......@@ -16,7 +16,8 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & /* context */) -> DictionarySourcePtr {
const Context & /* context */,
bool /* check_config */) -> DictionarySourcePtr {
#if USE_POCO_MONGODB
return std::make_unique<MongoDBDictionarySource>(dict_struct, config, config_prefix + ".mongodb", sample_block);
#else
......
......@@ -18,7 +18,8 @@ void registerDictionarySourceMysql(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & /* context */) -> DictionarySourcePtr {
const Context & /* context */,
bool /* check_config */) -> DictionarySourcePtr {
#if USE_MYSQL
return std::make_unique<MySQLDictionarySource>(dict_struct, config, config_prefix + ".mysql", sample_block);
#else
......
......@@ -16,7 +16,8 @@ namespace DB
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
Block & sample_block,
const Context & /* context */) -> DictionarySourcePtr {
const Context & /* context */,
bool /* check_config */) -> DictionarySourcePtr {
#if USE_POCO_REDIS
return std::make_unique<RedisDictionarySource>(dict_struct, config, config_prefix + ".redis", sample_block);
#else
......
......@@ -240,7 +240,8 @@ void registerDictionarySourceXDBC(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
const Context & context) -> DictionarySourcePtr {
const Context & context,
bool /* check_config */) -> DictionarySourcePtr {
#if USE_POCO_SQLODBC || USE_POCO_DATAODBC
BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(
context, context.getSettings().http_receive_timeout, config.getString(config_prefix + ".odbc.connection_string"));
......@@ -264,7 +265,8 @@ void registerDictionarySourceJDBC(DictionarySourceFactory & factory)
const Poco::Util::AbstractConfiguration & /* config */,
const std::string & /* config_prefix */,
Block & /* sample_block */,
const Context & /* context */) -> DictionarySourcePtr {
const Context & /* context */,
bool /* check_config */) -> DictionarySourcePtr {
throw Exception{"Dictionary source of type `jdbc` is disabled until consistent support for nullable fields.",
ErrorCodes::SUPPORT_IS_DISABLED};
// BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<JDBCBridgeMixin>>(config, context.getSettings().http_receive_timeout, config.getString(config_prefix + ".connection_string"));
......
......@@ -119,9 +119,8 @@ struct IntegerRoundingComputation
}
case RoundingMode::Round:
{
bool negative = x < 0;
if (negative)
x = -x;
if (x < 0)
x -= scale;
switch (tie_breaking_mode)
{
case TieBreakingMode::Auto:
......@@ -131,14 +130,14 @@ struct IntegerRoundingComputation
{
T quotient = (x + scale / 2) / scale;
if (quotient * scale == x + scale / 2)
x = (quotient & ~1) * scale;
// round half to even
x = ((quotient + (x < 0)) & ~1) * scale;
else
// round the others as usual
x = quotient * scale;
break;
}
}
if (negative)
x = -x;
return x;
}
}
......
......@@ -126,7 +126,11 @@ ColumnPtr wrapInNullable(const ColumnPtr & src, const Block & block, const Colum
/// Const Nullable that are NULL.
if (elem.column->onlyNull())
return block.getByPosition(result).type->createColumnConst(input_rows_count, Null());
{
auto result_type = block.getByPosition(result).type;
assert(result_type->isNullable());
return result_type->createColumnConstWithDefaultValue(input_rows_count);
}
if (isColumnConst(*elem.column))
continue;
......@@ -281,7 +285,13 @@ bool ExecutableFunctionAdaptor::defaultImplementationForNulls(
if (null_presence.has_null_constant)
{
block.getByPosition(result).column = block.getByPosition(result).type->createColumnConst(input_rows_count, Null());
auto & result_column = block.getByPosition(result).column;
auto result_type = block.getByPosition(result).type;
// Default implementation for nulls returns null result for null arguments,
// so the result type must be nullable.
assert(result_type->isNullable());
result_column = result_type->createColumnConstWithDefaultValue(input_rows_count);
return true;
}
......
......@@ -18,7 +18,7 @@ namespace ErrorCodes
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
static DateTime64::NativeType nowSubsecond(UInt32 scale)
static Field nowSubsecond(UInt32 scale)
{
const Int32 fractional_scale = 9;
timespec spec;
......@@ -36,7 +36,8 @@ static DateTime64::NativeType nowSubsecond(UInt32 scale)
else if (adjust_scale > 0)
components.fractional /= intExp10(adjust_scale);
return DecimalUtils::decimalFromComponents<DateTime64>(components, scale).value;
return DecimalField(DecimalUtils::decimalFromComponents<DateTime64>(components, scale),
scale);
}
class FunctionNow64 : public IFunction
......
......@@ -796,7 +796,11 @@ private:
table_num_to_num = std::make_unique<NumToNum>();
auto & table = *table_num_to_num;
for (size_t i = 0; i < size; ++i)
table[from[i].get<UInt64>()] = (*used_to)[i].get<UInt64>();
{
// Field may be of Float type, but for the purpose of bitwise
// equality we can treat them as UInt64, hence the reinterpret().
table[from[i].reinterpret<UInt64>()] = (*used_to)[i].reinterpret<UInt64>();
}
}
else if (from[0].getType() != Field::Types::String && to[0].getType() == Field::Types::String)
{
......@@ -806,7 +810,7 @@ private:
{
const String & str_to = to[i].get<const String &>();
StringRef ref{string_pool.insert(str_to.data(), str_to.size() + 1), str_to.size() + 1};
table[from[i].get<UInt64>()] = ref;
table[from[i].reinterpret<UInt64>()] = ref;
}
}
else if (from[0].getType() == Field::Types::String && to[0].getType() != Field::Types::String)
......@@ -817,7 +821,7 @@ private:
{
const String & str_from = from[i].get<const String &>();
StringRef ref{string_pool.insert(str_from.data(), str_from.size() + 1), str_from.size() + 1};
table[ref] = (*used_to)[i].get<UInt64>();
table[ref] = (*used_to)[i].reinterpret<UInt64>();
}
}
else if (from[0].getType() == Field::Types::String && to[0].getType() == Field::Types::String)
......
......@@ -3,14 +3,58 @@
#if USE_AWS_S3
#include <IO/S3Common.h>
#include <IO/WriteHelpers.h>
#include <IO/WriteBufferFromString.h>
#include <regex>
#include <aws/s3/S3Client.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
#include <aws/core/utils/logging/LogSystemInterface.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <common/logger_useful.h>
namespace
{
const std::pair<LogsLevel, Message::Priority> & convertLogLevel(Aws::Utils::Logging::LogLevel log_level)
{
static const std::unordered_map<Aws::Utils::Logging::LogLevel, std::pair<LogsLevel, Message::Priority>> mapping = {
{Aws::Utils::Logging::LogLevel::Off, {LogsLevel::none, Message::PRIO_FATAL}},
{Aws::Utils::Logging::LogLevel::Fatal, {LogsLevel::error, Message::PRIO_FATAL}},
{Aws::Utils::Logging::LogLevel::Error, {LogsLevel::error, Message::PRIO_ERROR}},
{Aws::Utils::Logging::LogLevel::Warn, {LogsLevel::warning, Message::PRIO_WARNING}},
{Aws::Utils::Logging::LogLevel::Info, {LogsLevel::information, Message::PRIO_INFORMATION}},
{Aws::Utils::Logging::LogLevel::Debug, {LogsLevel::debug, Message::PRIO_DEBUG}},
{Aws::Utils::Logging::LogLevel::Trace, {LogsLevel::trace, Message::PRIO_TRACE}},
};
return mapping.at(log_level);
}
class AWSLogger : public Aws::Utils::Logging::LogSystemInterface
{
public:
~AWSLogger() final = default;
Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; }
void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final
{
auto & [level, prio] = convertLogLevel(log_level);
LOG_SIMPLE(log, std::string(tag) + ": " + format_str, level, prio);
}
void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final
{
auto & [level, prio] = convertLogLevel(log_level);
LOG_SIMPLE(log, std::string(tag) + ": " + message_stream.str(), level, prio);
}
void Flush() final {}
private:
Poco::Logger * log = &Poco::Logger::get("AWSClient");
};
}
namespace DB
{
......@@ -25,10 +69,12 @@ namespace S3
{
aws_options = Aws::SDKOptions {};
Aws::InitAPI(aws_options);
Aws::Utils::Logging::InitializeAWSLogging(std::make_shared<AWSLogger>());
}
ClientFactory::~ClientFactory()
{
Aws::Utils::Logging::ShutdownAWSLogging();
Aws::ShutdownAPI(aws_options);
}
......
......@@ -127,6 +127,7 @@ struct ContextShared
String tmp_path; /// The path to the temporary files that occur when processing the request.
String flags_path; /// Path to the directory with some control flags for server maintenance.
String user_files_path; /// Path to the directory with user provided files, usable by 'file' table function.
String dictionaries_lib_path; /// Path to the directory with user provided binaries and libraries for external dictionaries.
ConfigurationPtr config; /// Global configuration settings.
Databases databases; /// List of databases and tables in them.
......@@ -544,6 +545,12 @@ String Context::getUserFilesPath() const
return shared->user_files_path;
}
String Context::getDictionariesLibPath() const
{
auto lock = getLock();
return shared->dictionaries_lib_path;
}
void Context::setPath(const String & path)
{
auto lock = getLock();
......@@ -558,6 +565,9 @@ void Context::setPath(const String & path)
if (shared->user_files_path.empty())
shared->user_files_path = shared->path + "user_files/";
if (shared->dictionaries_lib_path.empty())
shared->dictionaries_lib_path = shared->path + "dictionaries_lib/";
}
void Context::setTemporaryPath(const String & path)
......@@ -578,6 +588,12 @@ void Context::setUserFilesPath(const String & path)
shared->user_files_path = path;
}
void Context::setDictionariesLibPath(const String & path)
{
auto lock = getLock();
shared->dictionaries_lib_path = path;
}
void Context::setConfig(const ConfigurationPtr & config)
{
auto lock = getLock();
......
......@@ -191,11 +191,13 @@ public:
String getTemporaryPath() const;
String getFlagsPath() const;
String getUserFilesPath() const;
String getDictionariesLibPath() const;
void setPath(const String & path);
void setTemporaryPath(const String & path);
void setFlagsPath(const String & path);
void setUserFilesPath(const String & path);
void setDictionariesLibPath(const String & path);
using ConfigurationPtr = Poco::AutoPtr<Poco::Util::AbstractConfiguration>;
......
......@@ -15,9 +15,13 @@ ExternalDictionariesLoader::ExternalDictionariesLoader(Context & context_)
ExternalLoader::LoadablePtr ExternalDictionariesLoader::create(
const std::string & name, const Poco::Util::AbstractConfiguration & config, const std::string & key_in_config) const
const std::string & name, const Poco::Util::AbstractConfiguration & config,
const std::string & key_in_config, const std::string & repository_name) const
{
return DictionaryFactory::instance().create(name, config, key_in_config, context);
/// For dictionaries from databases (created with DDL qureies) we have to perform
/// additional checks, so we identify them here.
bool dictionary_from_database = !repository_name.empty();
return DictionaryFactory::instance().create(name, config, key_in_config, context, dictionary_from_database);
}
void ExternalDictionariesLoader::addConfigRepository(
......
......@@ -35,7 +35,7 @@ public:
protected:
LoadablePtr create(const std::string & name, const Poco::Util::AbstractConfiguration & config,
const std::string & key_in_config) const override;
const std::string & key_in_config, const std::string & repository_name) const override;
friend class StorageSystemDictionaries;
friend class DatabaseDictionary;
......
......@@ -1257,7 +1257,7 @@ ExternalLoader::LoadablePtr ExternalLoader::createObject(
if (previous_version)
return previous_version->clone();
return create(name, *config.config, config.key_in_config);
return create(name, *config.config, config.key_in_config, config.repository_name);
}
std::vector<std::pair<String, Int8>> ExternalLoader::getStatusEnumAllPossibleValues()
......
......@@ -173,7 +173,7 @@ public:
void reloadConfig(const String & repository_name, const String & path) const;
protected:
virtual LoadablePtr create(const String & name, const Poco::Util::AbstractConfiguration & config, const String & key_in_config) const = 0;
virtual LoadablePtr create(const String & name, const Poco::Util::AbstractConfiguration & config, const String & key_in_config, const String & repository_name) const = 0;
private:
struct ObjectConfig;
......
......@@ -18,7 +18,8 @@ ExternalModelsLoader::ExternalModelsLoader(Context & context_)
}
std::shared_ptr<const IExternalLoadable> ExternalModelsLoader::create(
const std::string & name, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix) const
const std::string & name, const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix, const std::string & /* repository_name */) const
{
String type = config.getString(config_prefix + ".type");
ExternalLoadableLifetime lifetime(config, config_prefix + ".lifetime");
......
......@@ -31,7 +31,7 @@ public:
protected:
LoadablePtr create(const std::string & name, const Poco::Util::AbstractConfiguration & config,
const std::string & key_in_config) const override;
const std::string & key_in_config, const std::string & repository_name) const override;
friend class StorageSystemModels;
private:
......
......@@ -501,15 +501,12 @@ void Join::initRightBlockStructure()
JoinCommon::convertColumnsToNullable(saved_block_sample, (isFull(kind) ? right_table_keys.columns() : 0));
}
Block Join::structureRightBlock(const Block & source_block) const
Block Join::structureRightBlock(const Block & block) const
{
/// Rare case, when joined columns are constant. To avoid code bloat, simply materialize them.
Block block = materializeBlock(source_block);
Block structured_block;
for (auto & sample_column : saved_block_sample.getColumnsWithTypeAndName())
{
auto & column = block.getByName(sample_column.name);
ColumnWithTypeAndName column = block.getByName(sample_column.name);
if (sample_column.column->isNullable())
JoinCommon::convertColumnToNullable(column);
structured_block.insert(column);
......@@ -518,14 +515,16 @@ Block Join::structureRightBlock(const Block & source_block) const
return structured_block;
}
bool Join::addJoinedBlock(const Block & block)
bool Join::addJoinedBlock(const Block & source_block)
{
if (empty())
throw Exception("Logical error: Join was not initialized", ErrorCodes::LOGICAL_ERROR);
/// Rare case, when keys are constant. To avoid code bloat, simply materialize them.
Columns materialized_columns;
ColumnRawPtrs key_columns = JoinCommon::temporaryMaterializeColumns(block, key_names_right, materialized_columns);
/// There's no optimization for right side const columns. Remove constness if any.
Block block = materializeBlock(source_block);
size_t rows = block.rows();
ColumnRawPtrs key_columns = JoinCommon::materializeColumnsInplace(block, key_names_right);
/// We will insert to the map only keys, where all components are not NULL.
ConstNullMapPtr null_map{};
......@@ -549,7 +548,6 @@ bool Join::addJoinedBlock(const Block & block)
blocks.emplace_back(std::move(structured_block));
Block * stored_block = &blocks.back();
size_t rows = block.rows();
if (rows)
has_no_rows_in_maps = false;
......@@ -886,9 +884,9 @@ void Join::joinBlockImpl(
constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right);
constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left));
/// Rare case, when keys are constant. To avoid code bloat, simply materialize them.
Columns materialized_columns;
ColumnRawPtrs key_columns = JoinCommon::temporaryMaterializeColumns(block, key_names_left, materialized_columns);
/// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them.
Columns materialized_keys = JoinCommon::materializeColumns(block, key_names_left);
ColumnRawPtrs key_columns = JoinCommon::getRawPointers(materialized_keys);
/// Keys with NULL value in any column won't join to anything.
ConstNullMapPtr null_map{};
......
......@@ -84,8 +84,16 @@ void NO_INLINE Set::insertFromBlockImplCase(
for (size_t i = 0; i < rows; ++i)
{
if constexpr (has_null_map)
{
if ((*null_map)[i])
{
if constexpr (build_filter)
{
(*out_filter)[i] = false;
}
continue;
}
}
[[maybe_unused]] auto emplace_result = state.emplaceKey(method.data, i, variants.string_pool);
......
......@@ -48,19 +48,43 @@ void removeColumnNullability(ColumnWithTypeAndName & column)
}
}
ColumnRawPtrs temporaryMaterializeColumns(const Block & block, const Names & names, Columns & materialized)
ColumnRawPtrs materializeColumnsInplace(Block & block, const Names & names)
{
ColumnRawPtrs ptrs;
ptrs.reserve(names.size());
for (auto & column_name : names)
{
auto & column = block.getByName(column_name).column;
column = recursiveRemoveLowCardinality(column->convertToFullColumnIfConst());
ptrs.push_back(column.get());
}
return ptrs;
}
Columns materializeColumns(const Block & block, const Names & names)
{
Columns materialized;
materialized.reserve(names.size());
for (auto & column_name : names)
{
const auto & src_column = block.getByName(column_name).column;
materialized.emplace_back(recursiveRemoveLowCardinality(src_column->convertToFullColumnIfConst()));
ptrs.push_back(materialized.back().get());
}
return materialized;
}
ColumnRawPtrs getRawPointers(const Columns & columns)
{
ColumnRawPtrs ptrs;
ptrs.reserve(columns.size());
for (auto & column : columns)
ptrs.push_back(column.get());
return ptrs;
}
......
......@@ -16,7 +16,9 @@ namespace JoinCommon
void convertColumnToNullable(ColumnWithTypeAndName & column);
void convertColumnsToNullable(Block & block, size_t starting_pos = 0);
void removeColumnNullability(ColumnWithTypeAndName & column);
ColumnRawPtrs temporaryMaterializeColumns(const Block & block, const Names & names, Columns & materialized);
Columns materializeColumns(const Block & block, const Names & names);
ColumnRawPtrs materializeColumnsInplace(Block & block, const Names & names);
ColumnRawPtrs getRawPointers(const Columns & columns);
void removeLowCardinalityInplace(Block & block);
/// Split key and other columns by keys name list
......
......@@ -2069,7 +2069,8 @@ void MergeTreeData::renameTempPartAndReplace(
*/
if (increment)
{
part_info.min_block = part_info.max_block = part_info.mutation = increment->get();
part_info.min_block = part_info.max_block = increment->get();
part_info.mutation = 0; /// it's equal to min_block by default
part_name = part->getNewName(part_info);
}
else
......
......@@ -113,9 +113,7 @@ void checkCreationIsAllowed(Context & context_global, const std::string & db_dir
throw Exception("Part path " + table_path + " is not inside " + db_dir_path, ErrorCodes::DATABASE_ACCESS_DENIED);
Poco::File table_path_poco_file = Poco::File(table_path);
if (!table_path_poco_file.exists())
throw Exception("File " + table_path + " is not exist", ErrorCodes::FILE_DOESNT_EXIST);
else if (table_path_poco_file.isDirectory())
if (table_path_poco_file.exists() && table_path_poco_file.isDirectory())
throw Exception("File " + table_path + " must not be a directory", ErrorCodes::INCORRECT_FILE_NAME);
}
}
......@@ -150,7 +148,12 @@ StorageFile::StorageFile(
poco_path = Poco::Path(db_dir_path_abs, poco_path);
const std::string path = poco_path.absolute().toString();
paths = listFilesWithRegexpMatching("/", path);
if (path.find_first_of("*?{") == std::string::npos)
{
paths.push_back(path);
}
else
paths = listFilesWithRegexpMatching("/", path);
for (const auto & cur_path : paths)
checkCreationIsAllowed(context_global, db_dir_path_abs, cur_path);
is_db_table = false;
......@@ -264,6 +267,11 @@ BlockInputStreams StorageFile::read(
BlockInputStreams blocks_input;
if (use_table_fd) /// need to call ctr BlockInputStream
paths = {""}; /// when use fd, paths are empty
else
{
if (paths.size() == 1 && !Poco::File(paths[0]).exists())
throw Exception("File " + paths[0] + " doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
}
blocks_input.reserve(paths.size());
for (const auto & file_path : paths)
{
......
......@@ -625,8 +625,21 @@ class ClickHouseInstance:
return self.client.query_and_get_answer_with_error(sql, stdin, timeout, settings, user)
# Connects to the instance via HTTP interface, sends a query and returns the answer
def http_query(self, sql, data=None):
return urllib.urlopen("http://" + self.ip_address + ":8123/?query=" + urllib.quote(sql, safe=''), data).read()
def http_query(self, sql, data=None, params=None, user=None):
if params is None:
params = {}
else:
params = params.copy()
params["query"] = sql
auth = ""
if user:
auth = "{}@".format(user)
url = "http://" + auth + self.ip_address + ":8123/?" + urllib.urlencode(params)
return urllib.urlopen(url, data).read()
def restart_clickhouse(self, stop_start_wait_sec=5, kill=False):
if not self.stay_alive:
......@@ -684,6 +697,10 @@ class ClickHouseInstance:
time.sleep(0.5)
local_counter += 1
# force kill if server hangs
if self.get_process_pid("clickhouse server"):
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(9)], user='root')
if callback_onstop:
callback_onstop(self)
self.exec_in_container(
......
......@@ -329,12 +329,14 @@ def test_version_update_two_nodes(start_dynamic_cluster):
node11.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=5)
node12.query("INSERT INTO table_with_default_granularity_new VALUES (toDate('2018-10-01'), 3, 333), (toDate('2018-10-02'), 4, 444)")
node11.restart_with_latest_version(signal=9) # just to be sure
node11.restart_with_latest_version() # just to be sure
node11.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=5)
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=5)
node11.query("SELECT COUNT() FROM table_with_default_granularity_new") == "4\n"
node12.query("SELECT COUNT() FROM table_with_default_granularity_new") == "4\n"
node11.query("SYSTEM SYNC REPLICA table_with_default_granularity")
node11.query("INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 5, 333), (toDate('2018-10-02'), 6, 444)")
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity")
assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '6\n'
......@@ -50,35 +50,35 @@ def remove_part_from_disk(node, table, part_name):
def test_check_normal_table_corruption(started_cluster):
node1.query("INSERT INTO non_replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)")
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201902", settings={"check_query_single_value_result": 0}) == "201902_1_1_0_1\t1\t\n"
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201902", settings={"check_query_single_value_result": 0}) == "201902_1_1_0\t1\t\n"
remove_checksums_on_disk(node1, "non_replicated_mt", "201902_1_1_0_1")
remove_checksums_on_disk(node1, "non_replicated_mt", "201902_1_1_0")
assert node1.query("CHECK TABLE non_replicated_mt", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0_1\t1\tChecksums recounted and written to disk."
assert node1.query("CHECK TABLE non_replicated_mt", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0\t1\tChecksums recounted and written to disk."
assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n"
remove_checksums_on_disk(node1, "non_replicated_mt", "201902_1_1_0_1")
remove_checksums_on_disk(node1, "non_replicated_mt", "201902_1_1_0")
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201902", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0_1\t1\tChecksums recounted and written to disk."
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201902", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0\t1\tChecksums recounted and written to disk."
assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n"
corrupt_data_part_on_disk(node1, "non_replicated_mt", "201902_1_1_0_1")
corrupt_data_part_on_disk(node1, "non_replicated_mt", "201902_1_1_0")
assert node1.query("CHECK TABLE non_replicated_mt", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0_1\t0\tCannot read all data. Bytes read: 2. Bytes expected: 16."
assert node1.query("CHECK TABLE non_replicated_mt", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0\t0\tCannot read all data. Bytes read: 2. Bytes expected: 16."
assert node1.query("CHECK TABLE non_replicated_mt", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0_1\t0\tCannot read all data. Bytes read: 2. Bytes expected: 16."
assert node1.query("CHECK TABLE non_replicated_mt", settings={"check_query_single_value_result": 0}).strip() == "201902_1_1_0\t0\tCannot read all data. Bytes read: 2. Bytes expected: 16."
node1.query("INSERT INTO non_replicated_mt VALUES (toDate('2019-01-01'), 1, 10), (toDate('2019-01-01'), 2, 12)")
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201901", settings={"check_query_single_value_result": 0}) == "201901_2_2_0_2\t1\t\n"
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201901", settings={"check_query_single_value_result": 0}) == "201901_2_2_0\t1\t\n"
corrupt_data_part_on_disk(node1, "non_replicated_mt", "201901_2_2_0_2")
corrupt_data_part_on_disk(node1, "non_replicated_mt", "201901_2_2_0")
remove_checksums_on_disk(node1, "non_replicated_mt", "201901_2_2_0_2")
remove_checksums_on_disk(node1, "non_replicated_mt", "201901_2_2_0")
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201901", settings={"check_query_single_value_result": 0}) == "201901_2_2_0_2\t0\tCheck of part finished with error: \\'Cannot read all data. Bytes read: 2. Bytes expected: 16.\\'\n"
assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201901", settings={"check_query_single_value_result": 0}) == "201901_2_2_0\t0\tCheck of part finished with error: \\'Cannot read all data. Bytes read: 2. Bytes expected: 16.\\'\n"
def test_check_replicated_table_simple(started_cluster):
......
......@@ -37,8 +37,8 @@ def test_allow_databases(start_cluster):
assert node5.query("SELECT name FROM system.databases WHERE name = 'db1'") == "db1\n"
assert node5.query("SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table' ") == "test_table\n"
assert node5.query("SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'") == "date\nk1\nv1\n"
assert node5.query("SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0_1\n"
assert node5.query("SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0_1\n20000101_20000101_1_1_0_1\n20000101_20000101_1_1_0_1\n"
assert node5.query("SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0\n"
assert node5.query("SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n"
assert node5.query("SELECT name FROM system.databases WHERE name = 'db1'", user="test_allow").strip() == ""
assert node5.query("SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table'", user="test_allow").strip() == ""
......
......@@ -16,4 +16,8 @@
<path>./clickhouse/</path>
<users_config>users.xml</users_config>
<dictionaries_config>/etc/clickhouse-server/config.d/*.xml</dictionaries_config>
<remote_url_allow_hosts>
<host>node1</host>
</remote_url_allow_hosts>
</yandex>
......@@ -182,3 +182,35 @@ def test_conflicting_name(started_cluster):
# old version still works
node3.query("select dictGetUInt8('test.conflicting_dictionary', 'SomeValue1', toUInt64(17))") == '17\n'
def test_http_dictionary_restrictions(started_cluster):
try:
node3.query("""
CREATE DICTIONARY test.restricted_http_dictionary (
id UInt64,
value String
)
PRIMARY KEY id
LAYOUT(FLAT())
SOURCE(HTTP(URL 'http://somehost.net' FORMAT TabSeparated))
LIFETIME(1)
""")
node3.query("SELECT dictGetString('test.restricted_http_dictionary', 'value', toUInt64(1))")
except QueryRuntimeException as ex:
assert 'is not allowed in config.xml' in str(ex)
def test_file_dictionary_restrictions(started_cluster):
try:
node3.query("""
CREATE DICTIONARY test.restricted_file_dictionary (
id UInt64,
value String
)
PRIMARY KEY id
LAYOUT(FLAT())
SOURCE(FILE(PATH '/usr/bin/cat' FORMAT TabSeparated))
LIFETIME(1)
""")
node3.query("SELECT dictGetString('test.restricted_file_dictionary', 'value', toUInt64(1))")
except QueryRuntimeException as ex:
assert 'is not inside' in str(ex)
......@@ -24,6 +24,5 @@ def test_file_path_escaping(started_cluster):
node.query('''INSERT INTO test.`T.a_b,l-e!` VALUES (1);''')
node.query('''ALTER TABLE test.`T.a_b,l-e!` FREEZE;''')
node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0_1/%7EId.bin"])
node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/shadow/1/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0_1/%7EId.bin"])
node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin"])
node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/shadow/1/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin"])
......@@ -10,7 +10,12 @@ path_to_userfiles_from_defaut_config = "/var/lib/clickhouse/user_files/" # sho
def start_cluster():
try:
cluster.start()
yield cluster
except Exception as ex:
print(ex)
raise ex
finally:
cluster.shutdown()
......@@ -57,6 +62,7 @@ def test_linear_structure(start_cluster):
test_requests = [("file{0..9}", "10"),
("file?", "10"),
("nothing*", "0"),
("file{0..9}{0..9}{0..9}", "10"),
("file???", "10"),
("file*", "20"),
......@@ -92,7 +98,7 @@ def test_deep_structure(start_cluster):
for i in range(10):
for j in range(10):
for k in range(10):
files.append("directory1/big_dir/file"+str(i)+str(j)+str(k))
files.append("directory1/big_dir/file" + str(i) + str(j) + str(k))
for dir in dirs:
files.append(dir+"file")
......@@ -119,3 +125,5 @@ def test_table_function(start_cluster):
node.exec_in_container(['bash', '-c', 'touch {}some/path/to/data.CSV'.format(path_to_userfiles_from_defaut_config)])
node.query("insert into table function file('some/path/to/data.CSV', CSV, 'n UInt8, s String') select number, concat('str_', toString(number)) from numbers(100000)")
assert node.query("select count() from file('some/path/to/data.CSV', CSV, 'n UInt8, s String')").rstrip() == '100000'
node.query("insert into table function file('nonexist.csv', 'CSV', 'val1 UInt32') values (1)")
assert node.query("select * from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip()== '1'
......@@ -146,11 +146,11 @@ def test_inserts_batching(started_cluster):
# 4. Full batch of inserts after ALTER (that have different block structure).
# 5. What was left to insert with the column structure before ALTER.
expected = '''\
20000101_20000101_1_1_0_1\t[1]
20000101_20000101_2_2_0_2\t[2,3,4]
20000101_20000101_3_3_0_3\t[5,6,7]
20000101_20000101_4_4_0_4\t[10,11,12]
20000101_20000101_5_5_0_5\t[8,9]
20000101_20000101_1_1_0\t[1]
20000101_20000101_2_2_0\t[2,3,4]
20000101_20000101_3_3_0\t[5,6,7]
20000101_20000101_4_4_0\t[10,11,12]
20000101_20000101_5_5_0\t[8,9]
'''
assert TSV(result) == TSV(expected)
......
......@@ -65,30 +65,30 @@ def partition_complex_assert_checksums():
" | sort" \
" | uniq"
checksums = "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition/19700102_2_2_0_2/k.bin\n" \
"082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition/19700201_1_1_0_1/v1.bin\n" \
"13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition/19700102_2_2_0_2/minmax_p.idx\n" \
"25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition/19700102_2_2_0_2/partition.dat\n" \
"3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition/19700201_1_1_0_1/partition.dat\n" \
"37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition/19700102_2_2_0_2/checksums.txt\n" \
"38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition/19700102_2_2_0_2/v1.bin\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0_2/k.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0_2/p.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0_2/v1.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0_1/k.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0_1/p.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0_1/v1.mrk\n" \
"55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition/19700201_1_1_0_1/primary.idx\n" \
"5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition/19700201_1_1_0_1/checksums.txt\n" \
"77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition/19700102_2_2_0_2/columns.txt\n" \
"77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition/19700201_1_1_0_1/columns.txt\n" \
"88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition/19700201_1_1_0_1/p.bin\n" \
"9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition/19700102_2_2_0_2/primary.idx\n" \
"c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition/19700102_2_2_0_2/count.txt\n" \
"c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition/19700201_1_1_0_1/count.txt\n" \
"cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition/19700102_2_2_0_2/p.bin\n" \
"e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition/19700201_1_1_0_1/k.bin\n" \
"f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition/19700201_1_1_0_1/minmax_p.idx\n"
checksums = "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition/19700102_2_2_0/k.bin\n" \
"082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition/19700201_1_1_0/v1.bin\n" \
"13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition/19700102_2_2_0/minmax_p.idx\n" \
"25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition/19700102_2_2_0/partition.dat\n" \
"3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition/19700201_1_1_0/partition.dat\n" \
"37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition/19700102_2_2_0/checksums.txt\n" \
"38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition/19700102_2_2_0/v1.bin\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0/k.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0/p.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0/v1.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0/k.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0/p.mrk\n" \
"4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0/v1.mrk\n" \
"55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition/19700201_1_1_0/primary.idx\n" \
"5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition/19700201_1_1_0/checksums.txt\n" \
"77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition/19700102_2_2_0/columns.txt\n" \
"77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition/19700201_1_1_0/columns.txt\n" \
"88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition/19700201_1_1_0/p.bin\n" \
"9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition/19700102_2_2_0/primary.idx\n" \
"c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition/19700102_2_2_0/count.txt\n" \
"c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition/19700201_1_1_0/count.txt\n" \
"cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition/19700102_2_2_0/p.bin\n" \
"e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition/19700201_1_1_0/k.bin\n" \
"f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition/19700201_1_1_0/minmax_p.idx\n"
assert TSV(exec_bash(cmd).replace(' ', '\t')) == TSV(checksums)
......@@ -149,7 +149,7 @@ def test_cannot_attach_active_part(cannot_attach_active_part_table):
assert 0 <= error.find('Invalid part name')
res = q("SElECT name FROM system.parts WHERE table='attach_active' AND database='test' ORDER BY name")
assert TSV(res) == TSV('0_1_1_0_1\n1_2_2_0_2\n2_3_3_0_3\n3_4_4_0_4')
assert TSV(res) == TSV('0_1_1_0\n1_2_2_0\n2_3_3_0\n3_4_4_0')
assert TSV(q("SElECT count(), sum(n) FROM test.attach_active")) == TSV('16\t120')
......@@ -171,30 +171,30 @@ def test_attach_check_all_parts(attach_check_all_parts_table):
q("ALTER TABLE test.attach_partition DETACH PARTITION 0")
path_to_detached = path_to_data + 'data/test/attach_partition/detached/'
exec_bash('mkdir {}'.format(path_to_detached + '0_5_5_0_5'))
exec_bash('cp -pr {} {}'.format(path_to_detached + '0_1_1_0_1', path_to_detached + 'attaching_0_6_6_0_6'))
exec_bash('cp -pr {} {}'.format(path_to_detached + '0_3_3_0_3', path_to_detached + 'deleting_0_7_7_0_7'))
exec_bash('mkdir {}'.format(path_to_detached + '0_5_5_0'))
exec_bash('cp -pr {} {}'.format(path_to_detached + '0_1_1_0', path_to_detached + 'attaching_0_6_6_0'))
exec_bash('cp -pr {} {}'.format(path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0'))
error = instance.client.query_and_get_error("ALTER TABLE test.attach_partition ATTACH PARTITION 0")
assert 0 <= error.find('No columns in part 0_5_5_0')
parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name")
assert TSV(parts) == TSV('1_2_2_0_2\n1_4_4_0_4')
assert TSV(parts) == TSV('1_2_2_0\n1_4_4_0')
detached = q("SELECT name FROM system.detached_parts "
"WHERE table='attach_partition' AND database='test' ORDER BY name")
assert TSV(detached) == TSV('0_1_1_0_1\n0_3_3_0_3\n0_5_5_0_5\nattaching_0_6_6_0_6\ndeleting_0_7_7_0_7')
assert TSV(detached) == TSV('0_1_1_0\n0_3_3_0\n0_5_5_0\nattaching_0_6_6_0\ndeleting_0_7_7_0')
exec_bash('rm -r {}'.format(path_to_detached + '0_5_5_0_5'))
exec_bash('rm -r {}'.format(path_to_detached + '0_5_5_0'))
q("ALTER TABLE test.attach_partition ATTACH PARTITION 0")
parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name")
expected = '0_5_5_0_5\n0_6_6_0_6\n1_2_2_0_2\n1_4_4_0_4'
expected = '0_5_5_0\n0_6_6_0\n1_2_2_0\n1_4_4_0'
assert TSV(parts) == TSV(expected)
assert TSV(q("SElECT count(), sum(n) FROM test.attach_partition")) == TSV('16\t120')
detached = q("SELECT name FROM system.detached_parts "
"WHERE table='attach_partition' AND database='test' ORDER BY name")
assert TSV(detached) == TSV('attaching_0_6_6_0_6\ndeleting_0_7_7_0_7')
assert TSV(detached) == TSV('attaching_0_6_6_0\ndeleting_0_7_7_0')
@pytest.fixture
......@@ -217,28 +217,27 @@ def test_drop_detached_parts(drop_detached_parts_table):
q("ALTER TABLE test.drop_detached DETACH PARTITION 1")
path_to_detached = path_to_data + 'data/test/drop_detached/detached/'
exec_bash('mkdir {}'.format(path_to_detached + 'attaching_0_6_6_0_6'))
exec_bash('mkdir {}'.format(path_to_detached + 'deleting_0_7_7_0_7'))
exec_bash('mkdir {}'.format(path_to_detached + 'attaching_0_6_6_0'))
exec_bash('mkdir {}'.format(path_to_detached + 'deleting_0_7_7_0'))
exec_bash('mkdir {}'.format(path_to_detached + 'any_other_name'))
exec_bash('mkdir {}'.format(path_to_detached + 'prefix_1_2_2_0_2'))
exec_bash('mkdir {}'.format(path_to_detached + 'prefix_1_2_2_0_0'))
error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART '../1_2_2_0_2'", settings=s)
error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART '../1_2_2_0'", settings=s)
assert 0 <= error.find('Invalid part name')
q("ALTER TABLE test.drop_detached DROP DETACHED PART '0_1_1_0_1'", settings=s)
q("ALTER TABLE test.drop_detached DROP DETACHED PART '0_1_1_0'", settings=s)
error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART 'attaching_0_6_6_0_6'", settings=s)
error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART 'attaching_0_6_6_0'", settings=s)
assert 0 <= error.find('Cannot drop part')
error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART 'deleting_0_7_7_0_7'", settings=s)
error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART 'deleting_0_7_7_0'", settings=s)
assert 0 <= error.find('Cannot drop part')
q("ALTER TABLE test.drop_detached DROP DETACHED PART 'any_other_name'", settings=s)
detached = q("SElECT name FROM system.detached_parts WHERE table='drop_detached' AND database='test' ORDER BY name")
assert TSV(detached) == TSV('0_3_3_0_3\n1_2_2_0_2\n1_4_4_0_4\nattaching_0_6_6_0_6\ndeleting_0_7_7_0_7\nprefix_1_2_2_0_2')
assert TSV(detached) == TSV('0_3_3_0\n1_2_2_0\n1_4_4_0\nattaching_0_6_6_0\ndeleting_0_7_7_0\nprefix_1_2_2_0_0')
q("ALTER TABLE test.drop_detached DROP DETACHED PARTITION 1", settings=s)
detached = q("SElECT name FROM system.detached_parts WHERE table='drop_detached' AND database='test' ORDER BY name")
assert TSV(detached) == TSV('0_3_3_0_3\nattaching_0_6_6_0_6\ndeleting_0_7_7_0_7')
assert TSV(detached) == TSV('0_3_3_0\nattaching_0_6_6_0\ndeleting_0_7_7_0')
......@@ -14,6 +14,14 @@
</force_index_by_date>
</constraints>
</default>
<readonly_profile>
<readonly>1</readonly>
</readonly_profile>
<no_dll_profile>
<allow_ddl>0</allow_ddl>
</no_dll_profile>
</profiles>
<users>
......@@ -25,6 +33,22 @@
<profile>default</profile>
<quota>default</quota>
</default>
<readonly_user>
<password></password>
<networks incl="networks" replace="replace">
<ip>::/0</ip>
</networks>
<profile>readonly_profile</profile>
<quota>default</quota>
</readonly_user>
<no_dll_user>
<password></password>
<networks incl="networks" replace="replace">
<ip>::/0</ip>
</networks>
<profile>no_dll_profile</profile>
<quota>default</quota>
</no_dll_user>
</users>
<quotas>
......
......@@ -29,68 +29,112 @@ def test_system_settings(started_cluster):
"readonly\t0\t\\N\t\\N\t0\n"
def test_read_only_constraint(started_cluster):
# Change a setting for session with SET.
assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
"0\n"
expected_error = "Setting force_index_by_date should not be changed"
assert expected_error in instance.query_and_get_error("SET force_index_by_date=1")
# Change a setting for query with SETTINGS.
assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
"0\n"
def test_system_constraints(started_cluster):
assert_query_settings(instance, "SELECT 1",
settings={'readonly': 0},
exception="Cannot modify 'readonly'",
user="readonly_user")
assert expected_error in instance.query_and_get_error(
"SELECT value FROM system.settings WHERE name='force_index_by_date' "
"SETTINGS force_index_by_date=1")
assert_query_settings(instance, "SELECT 1",
settings={'allow_ddl': 1},
exception="Cannot modify 'allow_ddl'",
user="no_dll_user")
def test_min_constraint(started_cluster):
# Change a setting for session with SET.
assert instance.query("SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"10000000000\n"
def test_read_only_constraint(started_cluster):
# Default value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='force_index_by_date'",
settings={},
result="0")
assert instance.query("SET max_memory_usage=5000000000;\n"
"SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"5000000000\n"
# Invalid value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='force_index_by_date'",
settings={'force_index_by_date': 1},
result=None,
exception="Setting force_index_by_date should not be changed")
expected_error = "Setting max_memory_usage shouldn't be less than 5000000000"
assert expected_error in instance.query_and_get_error("SET max_memory_usage=4999999999")
# Change a setting for query with SETTINGS.
assert instance.query("SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"10000000000\n"
def test_min_constraint(started_cluster):
# Default value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'",
{},
result="10000000000")
assert instance.query("SET max_memory_usage=5000000001;\n"
"SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"5000000001\n"
# Valid value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'",
settings={'max_memory_usage': 5000000000},
result="5000000000")
assert expected_error in instance.query_and_get_error(
"SELECT value FROM system.settings WHERE name='max_memory_usage' "
"SETTINGS max_memory_usage=4999999999")
# Invalid value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'",
settings={'max_memory_usage': 4999999999},
result=None,
exception="Setting max_memory_usage shouldn't be less than 5000000000")
def test_max_constraint(started_cluster):
# Change a setting for session with SET.
assert instance.query("SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"10000000000\n"
assert instance.query("SET max_memory_usage=20000000000;\n"
"SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"20000000000\n"
expected_error = "Setting max_memory_usage shouldn't be greater than 20000000000"
assert expected_error in instance.query_and_get_error("SET max_memory_usage=20000000001")
# Change a setting for query with SETTINGS.
assert instance.query("SELECT value FROM system.settings WHERE name='max_memory_usage'") ==\
"10000000000\n"
assert instance.query("SELECT value FROM system.settings WHERE name='max_memory_usage' "
"SETTINGS max_memory_usage=19999999999") == "19999999999\n"
assert expected_error in instance.query_and_get_error(
"SELECT value FROM system.settings WHERE name='max_memory_usage' "
"SETTINGS max_memory_usage=20000000001")
\ No newline at end of file
# Default value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'",
{},
result="10000000000")
# Valid value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'",
settings={'max_memory_usage': 20000000000},
result="20000000000")
# Invalid value
assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'",
settings={'max_memory_usage': 20000000001},
result=None,
exception="Setting max_memory_usage shouldn't be greater than 20000000000")
def assert_query_settings(instance, query, settings, result=None, exception=None, user=None):
"""
Try and send the query with custom settings via all available methods:
1. TCP Protocol with settings packet
2. HTTP Protocol with settings params
3. TCP Protocol with session level settings
4. TCP Protocol with query level settings
"""
if not settings:
settings = {}
# tcp level settings
if exception:
assert exception in instance.query_and_get_error(query, settings=settings, user=user)
else:
assert instance.query(query, settings=settings, user=user).strip() == result
# http level settings
if exception:
assert exception in instance.http_query(query, params=settings, user=user)
else:
assert instance.http_query(query, params=settings, user=user).strip() == result
# session level settings
queries = ""
for k, v in settings.items():
queries += "SET {}={};\n".format(k, v)
queries += query
if exception:
assert exception in instance.query_and_get_error(queries, user=user)
else:
assert instance.query(queries, user=user).strip() == result
if settings:
query += " SETTINGS "
for ix, (k, v) in enumerate(settings.items()):
query += "{} = {}".format(k, v)
if ix != len(settings) - 1:
query += ", "
if exception:
assert exception in instance.query_and_get_error(queries, user=user)
else:
assert instance.query(queries, user=user).strip() == result
*** Not partitioned ***
Parts before OPTIMIZE:
tuple() all_1_1_0_1
tuple() all_2_2_0_2
tuple() all_1_1_0
tuple() all_2_2_0
Parts after OPTIMIZE:
tuple() all_1_2_1_2
tuple() all_1_2_1
Sum before DETACH PARTITION:
15
Sum after DETACH PARTITION:
0
system.detached_parts after DETACH PARTITION:
default not_partitioned all all_1_2_1_2 default 1 2 1
default not_partitioned all all_1_2_1 default 1 2 1
*** Partitioned by week ***
Parts before OPTIMIZE:
1999-12-27 19991227_1_1_0_1
2000-01-03 20000103_2_2_0_2
2000-01-03 20000103_3_3_0_3
1999-12-27 19991227_1_1_0
2000-01-03 20000103_2_2_0
2000-01-03 20000103_3_3_0
Parts after OPTIMIZE:
1999-12-27 19991227_1_1_0_1
2000-01-03 20000103_2_3_1_3
1999-12-27 19991227_1_1_0
2000-01-03 20000103_2_3_1
Sum before DROP PARTITION:
15
Sum after DROP PARTITION:
12
*** Partitioned by a (Date, UInt8) tuple ***
Parts before OPTIMIZE:
(\'2000-01-01\',1) 20000101-1_1_1_0_1
(\'2000-01-01\',1) 20000101-1_5_5_0_5
(\'2000-01-01\',2) 20000101-2_2_2_0_2
(\'2000-01-02\',1) 20000102-1_3_3_0_3
(\'2000-01-02\',1) 20000102-1_4_4_0_4
(\'2000-01-01\',1) 20000101-1_1_1_0
(\'2000-01-01\',1) 20000101-1_5_5_0
(\'2000-01-01\',2) 20000101-2_2_2_0
(\'2000-01-02\',1) 20000102-1_3_3_0
(\'2000-01-02\',1) 20000102-1_4_4_0
Parts after OPTIMIZE:
(\'2000-01-01\',1) 20000101-1_1_5_1_5
(\'2000-01-01\',2) 20000101-2_2_2_0_2
(\'2000-01-02\',1) 20000102-1_3_4_1_4
(\'2000-01-01\',1) 20000101-1_1_5_1
(\'2000-01-01\',2) 20000101-2_2_2_0
(\'2000-01-02\',1) 20000102-1_3_4_1
Sum before DETACH PARTITION:
15
Sum after DETACH PARTITION:
9
*** Partitioned by String ***
Parts before OPTIMIZE:
bbb 7d878f3d88441d2b3dc371e2a3050f6d_2_2_0_2
bbb 7d878f3d88441d2b3dc371e2a3050f6d_3_3_0_3
aaa 9b50856126a8a6064f11f027d455bf58_1_1_0_1
aaa 9b50856126a8a6064f11f027d455bf58_4_4_0_4
bbb 7d878f3d88441d2b3dc371e2a3050f6d_2_2_0
bbb 7d878f3d88441d2b3dc371e2a3050f6d_3_3_0
aaa 9b50856126a8a6064f11f027d455bf58_1_1_0
aaa 9b50856126a8a6064f11f027d455bf58_4_4_0
Parts after OPTIMIZE:
bbb 7d878f3d88441d2b3dc371e2a3050f6d_2_2_0_2
bbb 7d878f3d88441d2b3dc371e2a3050f6d_3_3_0_3
aaa 9b50856126a8a6064f11f027d455bf58_1_4_1_4
bbb 7d878f3d88441d2b3dc371e2a3050f6d_2_2_0
bbb 7d878f3d88441d2b3dc371e2a3050f6d_3_3_0
aaa 9b50856126a8a6064f11f027d455bf58_1_4_1
Sum before DROP PARTITION:
15
Sum after DROP PARTITION:
8
*** Table without columns with fixed size ***
Parts:
1 1_1_1_0_1 2
2 2_2_2_0_2 2
1 1_1_1_0 2
2 2_2_2_0 2
Before DROP PARTITION:
a
aa
......
......@@ -17,7 +17,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutat
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
sleep 0.1
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0_1', '20010101_2_2_0_2'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation'"
......@@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1"
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0_1', '20010101_2_2_0_2'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
sleep 0.1
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
......
19701001_1_1_0_1 0
19701002_2_2_0_2 0
19701003_3_3_0_3 0
19701001_1_1_0 0
19701002_2_2_0 0
19701003_3_3_0 0
freeze one
19701001_1_1_0_1 0
19701002_2_2_0_2 1
19701003_3_3_0_3 0
19701001_1_1_0 0
19701002_2_2_0 1
19701003_3_3_0 0
freeze all
19701001_1_1_0_1 1
19701002_2_2_0_2 1
19701003_3_3_0_3 1
19701001_1_1_0 1
19701002_2_2_0 1
19701003_3_3_0 1
1970-10-01 00:00:01
1970-10-02 00:00:01
1970-10-02 00:00:02
1970-10-03 00:00:01
19701001_1_1_0_1 1
19701002_2_2_0_2 1
19701002_4_4_0_4 0
19701003_3_3_0_3 1
19701001_1_1_0 1
19701002_2_2_0 1
19701002_4_4_0 0
19701003_3_3_0 1
201901_1_1_0_1 1
201901_1_1_0 1
========
201901_1_1_0_1 1
201901_2_2_0_2 1
201901_1_1_0 1
201901_2_2_0 1
========
201901_1_2_1_2 1
201901_1_2_1 1
========
201901_1_2_1_2 1
201902_3_3_0_3 1
201901_1_2_1 1
201902_3_3_0 1
========
201902_3_4_1_4 1
201902_3_4_1 1
20000101_20000101_1_1_0_1 test_00961 1c63ae7a38eb76e2a71c28aaf0b3ae4d 0053df9b467cc5483e752ec62e91cfd4 da96ff1e527a8a1f908ddf2b1d0af239
20000101_20000101_1_1_0 test_00961 1c63ae7a38eb76e2a71c28aaf0b3ae4d 0053df9b467cc5483e752ec62e91cfd4 da96ff1e527a8a1f908ddf2b1d0af239
......@@ -15,7 +15,7 @@ ALTER TABLE table_01 DETACH PARTITION ID '20191001';
SELECT COUNT() FROM table_01;
ALTER TABLE table_01 ATTACH PART '20191001_1_1_0_1';
ALTER TABLE table_01 ATTACH PART '20191001_1_1_0';
SELECT COUNT() FROM table_01;
......
all_1_1_0_1 1
all_1_1_0_1 1
all_1_1_0_1 1
all_1_1_0_1 1
all_1_1_0_1 1
all_2_2_0_2 1
all_1_2_1_2 1
all_1_1_0 1
all_1_1_0 1
all_1_1_0 1
all_1_1_0 1
all_1_1_0 1
all_2_2_0 1
all_1_2_1 1
DROP DATABASE IF EXISTS dictdb;
CREATE DATABASE dictdb ENGINE=Ordinary;
CREATE DICTIONARY dictdb.restricted_dict (
key UInt64,
value String
)
PRIMARY KEY key
SOURCE(EXECUTABLE(COMMAND 'echo -E "1\thello"' FORMAT TabSeparated))
LIFETIME(MIN 0 MAX 1)
LAYOUT(CACHE(SIZE_IN_CELLS 10));
-- because of lazy load we can check only in dictGet query
select dictGetString('dictdb.restricted_dict', 'value', toUInt64(1)); -- {serverError 482}
select 'Ok.';
DROP DICTIONARY IF EXISTS dictdb.restricted_dict;
DROP DATABASE IF EXISTS dictdb;
4999950000
0 ['0_1_1_0','1_2_2_0'] 2
5000050000
1 [] 0
4999950000
0 ['0_0_0_0','1_0_0_0'] 2
5000050000
1 [] 0
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
. $CURDIR/mergetree_mutations.lib
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS table_for_mutations"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE table_for_mutations(k UInt32, v1 UInt64) ENGINE MergeTree ORDER BY k PARTITION BY modulo(k, 2)"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES"
${CLICKHOUSE_CLIENT} --query="INSERT INTO table_for_mutations select number, number from numbers(100000)"
${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM table_for_mutations"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE table_for_mutations UPDATE v1 = v1 + 1 WHERE 1"
${CLICKHOUSE_CLIENT} --query="SELECT is_done, parts_to_do_names, parts_to_do FROM system.mutations where table = 'table_for_mutations'"
${CLICKHOUSE_CLIENT} --query="SYSTEM START MERGES"
wait_for_mutation "table_for_mutations" "mutation_3.txt"
${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM table_for_mutations"
${CLICKHOUSE_CLIENT} --query="SELECT is_done, parts_to_do_names, parts_to_do FROM system.mutations where table = 'table_for_mutations'"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS table_for_mutations"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS replicated_table_for_mutations"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE replicated_table_for_mutations(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/replicated_table_for_mutations', '1') ORDER BY k PARTITION BY modulo(k, 2)"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES"
${CLICKHOUSE_CLIENT} --query="INSERT INTO replicated_table_for_mutations select number, number from numbers(100000)"
${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM replicated_table_for_mutations"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE replicated_table_for_mutations UPDATE v1 = v1 + 1 WHERE 1"
${CLICKHOUSE_CLIENT} --query="SELECT is_done, parts_to_do_names, parts_to_do FROM system.mutations where table = 'replicated_table_for_mutations'"
${CLICKHOUSE_CLIENT} --query="SYSTEM START MERGES"
wait_for_mutation "replicated_table_for_mutations" "0000000000"
${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM replicated_table_for_mutations"
${CLICKHOUSE_CLIENT} --query="SELECT is_done, parts_to_do_names, parts_to_do FROM system.mutations where table = 'replicated_table_for_mutations'"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS replicated_table_for_mutations"
DROP TABLE IF EXISTS Alpha;
DROP TABLE IF EXISTS Beta;
CREATE TABLE Alpha (foo String, bar UInt64) ENGINE = Memory;
CREATE TABLE Beta (foo LowCardinality(String), baz UInt64) ENGINE = Memory;
INSERT INTO Alpha VALUES ('a', 1);
INSERT INTO Beta VALUES ('a', 2), ('b', 3);
SELECT * FROM Alpha FULL JOIN (SELECT 'b' as foo) USING (foo);
SELECT * FROM Alpha FULL JOIN Beta USING (foo);
SELECT * FROM Alpha FULL JOIN Beta ON Alpha.foo = Beta.foo;
DROP TABLE Alpha;
DROP TABLE Beta;
......@@ -113,4 +113,17 @@ Features:
- Refactorings.
- Search and Navigation.
### Looker
[Looker](https://looker.com) is a data platform and business intelligence tool with support for 50+ database dialects including ClickHouse. Looker is available as a SaaS platform and self-hosted. Users
can use Looker via the browser to explore data, build visualizations and dashboards, schedule reports, and share their
insights with colleagues. Looker provides a rich set of tools to embed these features in other applications, and an API
to integrate data with other applications.
Features:
- Designed around ease of use and self-service for end users.
- Easy and agile development using LookML, a language which supports currated
[Data Modeling](https://looker.com/platform/data-modeling) to support report writers and end users.
- Powerful workflow integration via Looker's [Data Actions](https://looker.com/platform/actions).
[Original article](https://clickhouse.yandex/docs/en/interfaces/third-party/gui/) <!--hide-->
......@@ -72,6 +72,8 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
## Details of Implementation
- Multiple `SELECT` queries can be performed concurrently, but `INSERT` queries will wait each other.
- Supported creating new file by `INSERT` query.
- If file exists, `INSERT` would append new values in it.
- Not supported:
- `ALTER`
- `SELECT ... SAMPLE`
......
......@@ -310,9 +310,8 @@ ClickHouse использует небольшое подмножество фу
## 7. Сопровождение разработки.
### 7.1. ICU в submodules.
### 7.1. + ICU в submodules.
[Иван Лежанкин](https://github.com/abyss7).
Добавление в submodules также нужно для Аркадии (7.26).
### 7.2. LLVM в submodules.
......@@ -396,7 +395,7 @@ Wolf Kreuzerkrieg. Возможно, его уже не интересует э
Вместо этого предлагается в качестве примера изучить прототип текстового редактора Kilo: https://viewsourcecode.org/snaptoken/kilo/ и реализовать всю необходимую функциональность.
### 7.15. Замена libressl обратно на openssl.
### 7.15. + Замена libressl обратно на openssl.
Поводом использования libressl послужило желание нашего хорошего друга из известной компании несколько лет назад. Но сейчас ситуация состоит в том, что openssl продолжает развиваться, а libressl не особо, и можно спокойно менять обратно.
......@@ -421,10 +420,12 @@ Wolf Kreuzerkrieg. Возможно, его уже не интересует э
Сейчас людям приходится делать несколько кликов, чтобы их скачать.
[Иван Лежанкин](https://github.com/abyss7) или [Александр Сапин](https://github.com/alesapin).
### 7.19. Доделать (проверить) автосборку под AArch64.
### 7.19. + Доделать (проверить) автосборку под AArch64.
https://github.com/ClickHouse/ClickHouse/issues/8027#issuecomment-566670282
Проверили на настоящем сервере Huawei, а также в специальном Docker контейнере, который содержит внутри qemu-user-static.
Также можно проверить на Cavium, на Raspberry Pi а также на твоём Android телефоне.
Проверяем, что работает на Cavium и на Raspberry Pi.
[Иван Лежанкин](https://github.com/abyss7).
### 7.20. Автосборка для FreeBSD x86_64.
......
......@@ -69,6 +69,8 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
## Детали реализации
- Поддерживается одновременное выполнение множества запросов `SELECT`, запросы `INSERT` могут выполняться только последовательно.
- Поддерживается создание ещё не существующего файла при запросе `INSERT`.
- Для существующих файлов `INSERT` записывает в конец файла.
- Не поддерживается:
- использование операций `ALTER` и `SELECT...SAMPLE`;
- индексы;
......
......@@ -189,7 +189,7 @@ SELECT geohashDecode('ezs42') AS res
└─────────────────────────────────┘
```
## h3IsValid
## h3IsValid {#h3isvalid}
Проверяет корректность H3-индекса.
......@@ -234,7 +234,7 @@ h3GetResolution(h3index)
**Возвращаемые значения**
- Разрешение сетки, от 0 до 15.
- Для несуществующего идентификатора может быть возвращено произвольное значение, используйте [h3IsValid](#h3IsValid) для проверки идентификаторов
- Для несуществующего идентификатора может быть возвращено произвольное значение, используйте [h3IsValid](#h3isvalid) для проверки идентификаторов
Тип — [UInt8](../../data_types/int_uint.md).
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册