diff --git a/.clang-format b/.clang-format
index 06c3d164ee..7ddeba8128 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,6 +1,6 @@
BasedOnStyle: WebKit
Language: Cpp
-AlignAfterOpenBracket: false
+AlignAfterOpenBracket: AlwaysBreak
BreakBeforeBraces: Custom
BraceWrapping:
AfterClass: true
@@ -25,7 +25,7 @@ Standard: Cpp11
PointerAlignment: Middle
MaxEmptyLinesToKeep: 2
KeepEmptyLinesAtTheStartOfBlocks: false
-AllowShortFunctionsOnASingleLine: Empty
+AllowShortFunctionsOnASingleLine: InlineOnly
AlwaysBreakTemplateDeclarations: true
IndentCaseLabels: true
SpaceAfterTemplateKeyword: true
diff --git a/.gitignore b/.gitignore
index 8359edbabd..585a407476 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@
/build
/build_*
+/build-*
/docs/build
/docs/edit
/docs/tools/venv/
diff --git a/.gitmodules b/.gitmodules
index 035359e759..7ea7ae94b6 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -52,3 +52,6 @@
[submodule "contrib/boost"]
path = contrib/boost
url = https://github.com/ClickHouse-Extras/boost-extra.git
+[submodule "contrib/base64"]
+ path = contrib/base64
+ url = https://github.com/aklomp/base64.git
diff --git a/.travis.yml b/.travis.yml.bak
similarity index 100%
rename from .travis.yml
rename to .travis.yml.bak
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 99d022ea79..4e81432edd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,1112 +1,1125 @@
-## ClickHouse release 18.14.11, 2018-10-29
-
-### Bug fixes:
-
-* Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [#2156](https://github.com/yandex/ClickHouse/issues/2156)
-* Fixed errors when merging data in tables containing arrays inside Nested structures. [#3397](https://github.com/yandex/ClickHouse/pull/3397)
-* Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [#3429](https://github.com/yandex/ClickHouse/pull/3429)
-* Fixed an error on inserts to a Distributed table in Native format. [#3411](https://github.com/yandex/ClickHouse/issues/3411)
-
-## ClickHouse release 18.14.10, 2018-10-23
-
-* The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [#3410](https://github.com/yandex/ClickHouse/pull/3410)
-* The `enable_optimize_predicate_expression` setting is disabled by default.
-
-## ClickHouse release 18.14.9, 2018-10-16
-
-### New features:
-
-* The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [#3172](https://github.com/yandex/ClickHouse/pull/3172)
-* Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/yandex/ClickHouse/pull/2770)
-* Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/yandex/ClickHouse/pull/3210)
-* Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [#3146](https://github.com/yandex/ClickHouse/pull/3146)
-* Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [#3362](https://github.com/yandex/ClickHouse/pull/3362)
-* Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/yandex/ClickHouse/pull/3144)
-* Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3208)
-* Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [#3321](https://github.com/yandex/ClickHouse/pull/3321)
-* Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [#3251](https://github.com/yandex/ClickHouse/pull/3251)
-* Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [#3153](https://github.com/yandex/ClickHouse/pull/3153)
-
-### Experimental features:
-
-* Optimization of the GROUP BY clause for `LowCardinality data types.` [#3138](https://github.com/yandex/ClickHouse/pull/3138)
-* Optimized calculation of expressions for `LowCardinality data types.` [#3200](https://github.com/yandex/ClickHouse/pull/3200)
-
-### Improvements:
-
-* Significantly reduced memory consumption for requests with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [#3205](https://github.com/yandex/ClickHouse/pull/3205)
-* In the absence of `JOIN` (`LEFT`, `INNER`, ...), `INNER JOIN` is assumed. [#3147](https://github.com/yandex/ClickHouse/pull/3147)
-* Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3202)
-* The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/yandex/ClickHouse/pull/3210)
-* The `compile_expressions` setting (JIT compilation of expressions) is enabled by default.
-* Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message "File ... already exists", and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [#3101](https://github.com/yandex/ClickHouse/pull/3101)
-* LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [#3182](https://github.com/yandex/ClickHouse/pull/3182)
-* Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [#3182](https://github.com/yandex/ClickHouse/pull/3182)
-* Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/yandex/ClickHouse/pull/3257).
-* `ON CLUSTER` can be specified for `ALTER UPDATE` queries.
-* Improved performance for reading data in `JSONEachRow` format. [#3332](https://github.com/yandex/ClickHouse/pull/3332)
-* Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [#3306](https://github.com/yandex/ClickHouse/pull/3306)
-* Added the `TIMESTAMP` synonym for the `DateTime` type. [#3390](https://github.com/yandex/ClickHouse/pull/3390)
-* There is always space reserved for query_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools.
-* Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [#3205](https://github.com/yandex/ClickHouse/pull/3205)
-* Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [#3171](https://github.com/yandex/ClickHouse/pull/3171)
-* In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [#3166](https://github.com/yandex/ClickHouse/pull/3166)
-* The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3215).
-* Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3216)
-* The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/yandex/ClickHouse/pull/3219)
-* In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [#3330](https://github.com/yandex/ClickHouse/pull/3330)
-* Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/yandex/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264)
-* Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [#3281](https://github.com/yandex/ClickHouse/pull/3281)
-* Support for the `Decimal` data type in external dictionaries. [#3324](https://github.com/yandex/ClickHouse/pull/3324)
-* Support for the `Decimal` data type in `SummingMergeTree` tables. [#3348](https://github.com/yandex/ClickHouse/pull/3348)
-* Added specializations for `UUID` in `if`. [#3366](https://github.com/yandex/ClickHouse/pull/3366)
-* Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [#3283](https://github.com/yandex/ClickHouse/pull/3283)
-* A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/yandex/ClickHouse/pull/3375)
-
-### Bug fixes:
-
-* Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [#1702](https://github.com/yandex/ClickHouse/pull/1702)
-* Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [#3362](https://github.com/yandex/ClickHouse/pull/3362)
-* Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [#3331](https://github.com/yandex/ClickHouse/pull/3331) [#3341](https://github.com/yandex/ClickHouse/pull/3341)
-* If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn't be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [#3194](https://github.com/yandex/ClickHouse/pull/3194)
-* Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3344)
-* Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [#3247](https://github.com/yandex/ClickHouse/pull/3247)
-* Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the request if the `JOIN` is only performed on remote servers. [#3340](https://github.com/yandex/ClickHouse/pull/3340)
-* Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3215).
-* For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/yandex/ClickHouse/pull/3150)
-* If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn't start. [8218cf3a](https://github.com/yandex/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9)
-* If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/yandex/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29)
-* Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [#3229](https://github.com/yandex/ClickHouse/pull/3229)
-* Corrected type conversion between `Decimal` and integer numbers. [#3211](https://github.com/yandex/ClickHouse/pull/3211)
-* Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3231)
-* Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [#3155](https://github.com/yandex/ClickHouse/pull/3155)
-* Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/yandex/ClickHouse/pull/3163)
-* Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [#3165](https://github.com/yandex/ClickHouse/pull/3165)
-* Bug fixes in the `ALTER UPDATE` query.
-* Fixed bugs in the `odbc` table function that appeared in version 18.12. [#3197](https://github.com/yandex/ClickHouse/pull/3197)
-* Fixed the operation of aggregate functions with `StateArray` combinators. [#3188](https://github.com/yandex/ClickHouse/pull/3188)
-* Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/yandex/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179)
-* Fixed output of types for operations using `Decimal` and integer arguments. [#3224](https://github.com/yandex/ClickHouse/pull/3224)
-* Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/yandex/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a)
-* The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [#3241](https://github.com/yandex/ClickHouse/pull/3241)
-* Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/yandex/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664)
-* Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [#3254](https://github.com/yandex/ClickHouse/pull/3254)
-* Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [#3263](https://github.com/yandex/ClickHouse/pull/3263)
-* Fixed an error when using `FINAL` with `PREWHERE`. [#3298](https://github.com/yandex/ClickHouse/pull/3298)
-* Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [#3298](https://github.com/yandex/ClickHouse/pull/3298)
-* Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [#3337](https://github.com/yandex/ClickHouse/pull/3337)
-* Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [#3357](https://github.com/yandex/ClickHouse/pull/3357)
-* Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/yandex/ClickHouse/pull/3339)
-* Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [#3351](https://github.com/yandex/ClickHouse/pull/3351) [#3352](https://github.com/yandex/ClickHouse/pull/3352)
-
-### Backward incompatible changes:
-
-* Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [#3329](https://github.com/yandex/ClickHouse/pull/3329)
-
-## ClickHouse release 18.12.17, 2018-09-16
-
-### New features:
-
-* `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [#3126](https://github.com/yandex/ClickHouse/pull/3126)
-* Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/yandex/ClickHouse/pull/3123)
-* The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [#3129](https://github.com/yandex/ClickHouse/pull/3129)
-* The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [#3129](https://github.com/yandex/ClickHouse/pull/3129)
-* The `system.part_log` table now has the `partition_id` column. [#3089](https://github.com/yandex/ClickHouse/pull/3089)
-
-### Bug fixes:
-
-* `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3159)
-* Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [#3130](https://github.com/yandex/ClickHouse/pull/3130)
-* Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3107)
-* Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn't happen). [#3122](https://github.com/yandex/ClickHouse/pull/3122)
-
-### Backward incompatible changes:
-
-* The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3107)
-
-## ClickHouse release 18.12.14, 2018-09-13
-
-### New features:
-
-* Added support for `ALTER UPDATE` queries. [#3035](https://github.com/yandex/ClickHouse/pull/3035)
-* Added the `allow_ddl` option, which restricts the user's access to DDL queries. [#3104](https://github.com/yandex/ClickHouse/pull/3104)
-* Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O_DIRECT). [#3117](https://github.com/yandex/ClickHouse/pull/3117)
-* The `system.merges` system table now contains the `partition_id` column. [#3099](https://github.com/yandex/ClickHouse/pull/3099)
-
-### Improvements
-
-* If a data part remains unchanged during mutation, it isn't downloaded by replicas. [#3103](https://github.com/yandex/ClickHouse/pull/3103)
-* Autocomplete is available for names of settings when working with `clickhouse-client`. [#3106](https://github.com/yandex/ClickHouse/pull/3106)
-
-### Bug fixes:
-
-* Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [#3118](https://github.com/yandex/ClickHouse/pull/3118)
-* Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13.
-* Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3098)
-* Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3097)
-
-
-## ClickHouse release 18.12.13, 2018-09-10
-
-### New features:
-
-* Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [#2846](https://github.com/yandex/ClickHouse/pull/2846) [#2970](https://github.com/yandex/ClickHouse/pull/2970) [#3008](https://github.com/yandex/ClickHouse/pull/3008) [#3047](https://github.com/yandex/ClickHouse/pull/3047)
-* New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [#2948](https://github.com/yandex/ClickHouse/pull/2948)
-* In requests with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2787)
-* Added support for JOIN with table functions. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2907)
-* Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/yandex/ClickHouse/pull/2447)
-* Ctrl+C in clickhouse-client clears a query that was entered. [#2877](https://github.com/yandex/ClickHouse/pull/2877)
-* Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [#2982](https://github.com/yandex/ClickHouse/pull/2982)
-* Each line of the server log related to query processing shows the query ID. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
-* The `system.metrics` and `system.events` tables now have built-in documentation. [#3016](https://github.com/yandex/ClickHouse/pull/3016)
-* Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2975)
-* Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/yandex/ClickHouse/pull/2942)
-* Added the `retention` aggregate function. [Sundy Li](https://github.com/yandex/ClickHouse/pull/2887)
-* Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [#3062](https://github.com/yandex/ClickHouse/pull/3062) [#3034](https://github.com/yandex/ClickHouse/pull/3034)
-* Tables in the MergeTree family now have the virtual column `_partition_id`. [#3089](https://github.com/yandex/ClickHouse/pull/3089)
-
-### Experimental features:
-
-* Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [#2830](https://github.com/yandex/ClickHouse/pull/2830)
-* Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [#2990](https://github.com/yandex/ClickHouse/pull/2990) [#3077](https://github.com/yandex/ClickHouse/pull/3077)
-
-### Improvements:
-
-* Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag.
-* Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length.
-* Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`.
-* Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/yandex/ClickHouse/pull/2977) [#2980](https://github.com/yandex/ClickHouse/pull/2980)
-* Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2955)
-* Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [#2953](https://github.com/yandex/ClickHouse/pull/2953)
-* Fixed a performance problem in the case of a large stream of queries that result in an error (the ` _dl_addr` function is visible in `perf top`, but the server isn't using much CPU). [#2938](https://github.com/yandex/ClickHouse/pull/2938)
-* Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2907)
-* Improvements to the functionality for the `UUID` data type. [#3074](https://github.com/yandex/ClickHouse/pull/3074) [#2985](https://github.com/yandex/ClickHouse/pull/2985)
-* The `UUID` data type is supported in The-Alchemist dictionaries. [#2822](https://github.com/yandex/ClickHouse/pull/2822)
-* The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2974)
-* When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/yandex/ClickHouse/pull/2958)
-* For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [#2920](https://github.com/yandex/ClickHouse/pull/2920)
-* The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/yandex/ClickHouse/pull/2971)
-* You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [#2840](https://github.com/yandex/ClickHouse/pull/2840)
-* You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [#2840](https://github.com/yandex/ClickHouse/pull/2840)
-* Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn't happen as often.
-* The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2801)
-* Duplicate columns can be used in a `USING` clause for `JOIN`. [#3006](https://github.com/yandex/ClickHouse/pull/3006)
-* `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [#3003](https://github.com/yandex/ClickHouse/pull/3003)
-* The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2885)
-* Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/yandex/ClickHouse/pull/2909)
-* The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`.
-* `ALTER DELETE` queries work for materialized views.
-* Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables.
-* Support for `ATTACH TABLE ... ON CLUSTER` queries. [#3025](https://github.com/yandex/ClickHouse/pull/3025)
-
-### Bug fixes:
-
-* Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [#2913](https://github.com/yandex/ClickHouse/issues/2913)
-* Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [#3049](https://github.com/yandex/ClickHouse/pull/3049)
-* Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [#3038](https://github.com/yandex/ClickHouse/pull/3038)
-* Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [#2939](https://github.com/yandex/ClickHouse/pull/2939) [#2949](https://github.com/yandex/ClickHouse/pull/2949) [#2964](https://github.com/yandex/ClickHouse/pull/2964)
-* Fixed a segfault during `JOIN ... ON`. [#3000](https://github.com/yandex/ClickHouse/pull/3000)
-* Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [#2994](https://github.com/yandex/ClickHouse/pull/2994)
-* Fixed the "Not found column" error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [#3087](https://github.com/yandex/ClickHouse/pull/3087)
-* Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [#2226](https://github.com/yandex/ClickHouse/pull/2226) [#3037](https://github.com/yandex/ClickHouse/pull/3037) [#3055](https://github.com/yandex/ClickHouse/pull/3055) [#3065](https://github.com/yandex/ClickHouse/pull/3065) [#3073](https://github.com/yandex/ClickHouse/pull/3073) [#3090](https://github.com/yandex/ClickHouse/pull/3090) [#3093](https://github.com/yandex/ClickHouse/pull/3093)
-* Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [#2910](https://github.com/yandex/ClickHouse/pull/2910)
-* Fixed the incorrect result when comparing `nan` with integers. [#3024](https://github.com/yandex/ClickHouse/pull/3024)
-* Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [#2854](https://github.com/yandex/ClickHouse/pull/2854)
-* Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [#3084](https://github.com/yandex/ClickHouse/pull/3084)
-* Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously.
-* Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [#2989](https://github.com/yandex/ClickHouse/pull/2989)
-* Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2960)
-* Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [#2845](https://github.com/yandex/ClickHouse/pull/2845) [#2937](https://github.com/yandex/ClickHouse/pull/2937)
-* The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2929)
-* Fixed the segfault when re-initializing the ZooKeeper session. [#2917](https://github.com/yandex/ClickHouse/pull/2917)
-* Fixed potential blocking when working with ZooKeeper.
-* Fixed incorrect code for adding nested data structures in a `SummingMergeTree`.
-* When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/yandex/ClickHouse/pull/2808)
-
-### Security fix:
-
-* Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [#2828](https://github.com/yandex/ClickHouse/pull/2828) [#2879](https://github.com/yandex/ClickHouse/pull/2879) [#2886](https://github.com/yandex/ClickHouse/pull/2886) [#2893](https://github.com/yandex/ClickHouse/pull/2893) [#2921](https://github.com/yandex/ClickHouse/pull/2921)
-* Fixed incorrect validation of the file path in the `catBoostPool` table function. [#2894](https://github.com/yandex/ClickHouse/pull/2894)
-* The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user's configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2856)
-
-### Backward incompatible changes:
-
-* In requests with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level.
-
-### Build changes:
-
-* Most integration tests can now be run by commit.
-* Code style checks can also be run by commit.
-* The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/yandex/ClickHouse/pull/2912)
-* When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [#2957](https://github.com/yandex/ClickHouse/pull/2957)
-* Debugging the build uses the `jemalloc` debug option.
-* The interface of the library for interacting with ZooKeeper is declared abstract. [#2950](https://github.com/yandex/ClickHouse/pull/2950)
-
-## ClickHouse release 18.10.3, 2018-08-13
-
-### New features:
-
-* HTTPS can be used for replication. [#2760](https://github.com/yandex/ClickHouse/pull/2760)
-* Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [#2791](https://github.com/yandex/ClickHouse/pull/2791)
-* Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [#2834](https://github.com/yandex/ClickHouse/pull/2834)
-* Support for `UUID` in the key columns.
-
-### Improvements:
-
-* Clusters can be removed without restarting the server when they are deleted from the config files. [#2777](https://github.com/yandex/ClickHouse/pull/2777)
-* External dictionaries can be removed without restarting the server when they are removed from config files. [#2779](https://github.com/yandex/ClickHouse/pull/2779)
-* Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/yandex/ClickHouse/pull/2781)
-* Improvements for the `UUID` data type (not yet complete). [#2618](https://github.com/yandex/ClickHouse/pull/2618)
-* Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [#2815](https://github.com/yandex/ClickHouse/pull/2815)
-* Old records of completed mutations are deleted (`ALTER DELETE`). [#2784](https://github.com/yandex/ClickHouse/pull/2784)
-* Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/yandex/ClickHouse/pull/2841)
-* The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2851)
-* Added the `max_partition_size_to_drop` config option. [#2782](https://github.com/yandex/ClickHouse/pull/2782)
-* Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/yandex/ClickHouse/pull/2812)
-* Added the `max_fetch_partition_retries_count` setting. [#2831](https://github.com/yandex/ClickHouse/pull/2831)
-* Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [#2832](https://github.com/yandex/ClickHouse/pull/2832)
-* The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/yandex/ClickHouse/pull/2855)
-
-### Bug fixes:
-
-* Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0.
-* Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [#2814](https://github.com/yandex/ClickHouse/pull/2814)
-* Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [#2930](https://github.com/yandex/ClickHouse/pull/2930)
-* Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2823)
-* Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2094)
-* Fixed a memory leak if an exception occurred when connecting to a MySQL server.
-* Fixed incorrect clickhouse-client response code in case of a request error.
-* Fixed incorrect behavior of materialized views containing DISTINCT. [#2795](https://github.com/yandex/ClickHouse/issues/2795)
-
-### Backward incompatible changes
-
-* Removed support for CHECK TABLE queries for Distributed tables.
-
-### Build changes:
-
-* The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [#2773](https://github.com/yandex/ClickHouse/pull/2773)
-* Use of libressl from a submodule. [#1983](https://github.com/yandex/ClickHouse/pull/1983) [#2807](https://github.com/yandex/ClickHouse/pull/2807)
-* Use of unixodbc from a submodule. [#2789](https://github.com/yandex/ClickHouse/pull/2789)
-* Use of mariadb-connector-c from a submodule. [#2785](https://github.com/yandex/ClickHouse/pull/2785)
-* Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself).
-
-## ClickHouse release 18.6.0, 2018-08-02
-
-### New features:
-
-* Added support for ON expressions for the JOIN ON syntax:
-`JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]`
-The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [#2742](https://github.com/yandex/ClickHouse/pull/2742)
-* HTTPS can be enabled for replication. [#2760](https://github.com/yandex/ClickHouse/pull/2760)
-
-### Improvements:
-
-* The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [#2646](https://github.com/yandex/ClickHouse/pull/2646)
-
-## ClickHouse release 18.5.1, 2018-07-31
-
-### New features:
-
-* Added the hash function `murmurHash2_32` [#2756](https://github.com/yandex/ClickHouse/pull/2756).
-
-### Improvements:
-
-* Now you can use the `from_env` [#2741](https://github.com/yandex/ClickHouse/pull/2741) attribute to set values in config files from environment variables.
-* Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [#2752](https://github.com/yandex/ClickHouse/pull/2752).
-
-### Bug fixes:
-
-* Fixed a possible bug when starting a replica [#2759](https://github.com/yandex/ClickHouse/pull/2759).
-
-## ClickHouse release 18.4.0, 2018-07-28
-
-### New features:
-
-* Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721).
-* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [#2708](https://github.com/yandex/ClickHouse/pull/2708).
-* Support for `HTTP Basic` authentication in the replication protocol [#2727](https://github.com/yandex/ClickHouse/pull/2727).
-* The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699).
-* Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701).
-
-### Improvements:
-
-* The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [#2694](https://github.com/yandex/ClickHouse/pull/2694).
-* The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed.
-* Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689).
-
-### Bug fixes:
-
-* Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2))
-* Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735).
-* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee))
-* Fixed server crash when using the `countArray()` aggregate function.
-
-### Backward incompatible changes:
-
-* Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value.
-
-## ClickHouse release 18.1.0, 2018-07-23
-
-### New features:
-
-* Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([#2634](https://github.com/yandex/ClickHouse/pull/2634)).
-* Support for arbitrary types for the `uniq*` family of aggregate functions ([#2010](https://github.com/yandex/ClickHouse/issues/2010)).
-* Support for arbitrary types in comparison operators ([#2026](https://github.com/yandex/ClickHouse/issues/2026)).
-* The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([#2637](https://github.com/yandex/ClickHouse/pull/2637)).
-* Added the `arrayDistinct` function ([#2670](https://github.com/yandex/ClickHouse/pull/2670)).
-* The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/yandex/ClickHouse/pull/2566)).
-
-### Improvements:
-
-* Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backwards compatible, unless otherwise stated in the changelog.
-* Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2664)).
-* If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/yandex/ClickHouse/pull/2669)).
-
-### Bug fixes:
-
-* Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2624)).
-* Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/yandex/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)).
-* Fixed an error during a CAST to Nullable types ([#1322](https://github.com/yandex/ClickHouse/issues/1322)).
-* Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2657)).
-* Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/yandex/ClickHouse/pull/2663)).
-* Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([#2571](https://github.com/yandex/ClickHouse/issues/2571)).
-* Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn't in uppercase letters ([fe8c4d6](https://github.com/yandex/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)).
-* Added missing quoting of identifiers for queries to an external DBMS ([#2635](https://github.com/yandex/ClickHouse/issues/2635)).
-
-### Backward incompatible changes:
-
-* Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`.
-
-## ClickHouse release 1.1.54394, 2018-07-12
-
-### New features:
-
-* Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2521)).
-* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2600)).
-
-### Bug fixes:
-
-* Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388.
-* Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table.
-* The `has` function now works correctly for an array with Nullable elements ([#2115](https://github.com/yandex/ClickHouse/issues/2115)).
-* The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were requested from the table.
-* Fixed how an empty `TinyLog` table works after inserting an empty data block ([#2563](https://github.com/yandex/ClickHouse/issues/2563)).
-* The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL.
-
-## ClickHouse release 1.1.54390, 2018-07-06
-
-### New features:
-
-* Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/yandex/ClickHouse/pull/2490)).
-* Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2574)).
-* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2599)).
-
-### Improvements:
-
-* Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([#2584](https://github.com/yandex/ClickHouse/pull/2584)).
-* Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2.
-* Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([#2573](https://github.com/yandex/ClickHouse/pull/2573)).
-* Added `Nullable` support for the `runningDifference` function ([#2594](https://github.com/yandex/ClickHouse/pull/2594)).
-* Improved query analysis performance when there is a very large number of expressions ([#2572](https://github.com/yandex/ClickHouse/pull/2572)).
-* Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([#2597](https://github.com/yandex/ClickHouse/pull/2597)).
-* The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/yandex/ClickHouse/pull/2593)).
-
-### Bug fixes:
-
-* Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server.
-* Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted.
-* Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2553)).
-* Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([#2603](https://github.com/yandex/ClickHouse/pull/2603)).
-* Fixed segfault if `macros` are used but they aren't in the config file ([#2570](https://github.com/yandex/ClickHouse/pull/2570)).
-* Fixed switching to the default database when reconnecting the client ([#2583](https://github.com/yandex/ClickHouse/pull/2583)).
-* Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled.
-
-### Security fix:
-
-* Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`).
-
-## ClickHouse release 1.1.54388, 2018-06-28
-
-### New features:
-
-* Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries.
-* Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables.
-* Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2260))
-* Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`).
-* Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2294)).
-* Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2501)).
-* Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2352)).
-* New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/yandex/ClickHouse/pull/2429)).
-* The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2535)).
-* The password to `clickhouse-client` can be entered interactively.
-* Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/yandex/ClickHouse/pull/2459)).
-* Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2472)).
-* Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/yandex/ClickHouse/pull/2263))
-* Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats.
-* Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests.
-
-### Experimental features:
-
-* Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/yandex/ClickHouse/pull/2272))
-* JIT compilation to native code is now available for some expressions ([pyos](https://github.com/yandex/ClickHouse/pull/2277)).
-
-### Bug fixes:
-
-* Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`.
-* Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result.
-* Fixed an error when reading an array column from a Nested structure ([#2066](https://github.com/yandex/ClickHouse/issues/2066)).
-* Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`.
-* Fixed an error when analyzing queries with recursive aliases.
-* Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([#2525](https://github.com/yandex/ClickHouse/issues/2525)).
-* User profile settings were not applied when using sessions in the HTTP interface.
-* Fixed how settings are applied from the command line parameters in clickhouse-local.
-* The ZooKeeper client library now uses the session timeout received from the server.
-* Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout.
-* Fixed pruning of parts for queries with conditions on partition key columns ([#2342](https://github.com/yandex/ClickHouse/issues/2342)).
-* Merges are now possible after `CLEAR COLUMN IN PARTITION` ([#2315](https://github.com/yandex/ClickHouse/issues/2315)).
-* Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/yandex/ClickHouse/pull/2268)).
-* Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/yandex/ClickHouse/pull/2400)).
-* Fixed syntactic parsing and formatting of the `CAST` operator.
-* Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2411)).
-* Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/yandex/ClickHouse/pull/2448)).
-* Fixed SSRF in the remote() table function.
-* Fixed exit behavior of `clickhouse-client` in multiline mode ([#2510](https://github.com/yandex/ClickHouse/issues/2510)).
-
-### Improvements:
-
-* Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/yandex/ClickHouse/pull/1722)).
-* Improved LZ4 compression performance.
-* Faster analysis for queries with a large number of JOINs and sub-queries.
-* The DNS cache is now updated automatically when there are too many network errors.
-* Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts.
-* Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`.
-* Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match.
-* A server with replicated tables can start even if you haven't configured ZooKeeper.
-* When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/yandex/ClickHouse/pull/2325)).
-* Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/yandex/ClickHouse/pull/2421)).
-
-### Build changes:
-
-* The gcc8 compiler can be used for builds.
-* Added the ability to build llvm from submodule.
-* The version of the librdkafka library has been updated to v0.11.4.
-* Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0.
-* Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2274)).
-* Cmake now generates files for ninja by default (like when using `-G Ninja`).
-* Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/yandex/ClickHouse/pull/2519)).
-* Fixed a header file conflict in Fedora Rawhide ([#2520](https://github.com/yandex/ClickHouse/issues/2520)).
-
-### Backward incompatible changes:
-
-* Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format.
-* If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn't have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster.
-
-## ClickHouse release 1.1.54385, 2018-06-01
-
-### Bug fixes:
-
-* Fixed an error that in some cases caused ZooKeeper operations to block.
-
-## ClickHouse release 1.1.54383, 2018-05-22
-
-### Bug fixes:
-
-* Fixed a slowdown of replication queue if a table has many replicas.
-
-## ClickHouse release 1.1.54381, 2018-05-14
-
-### Bug fixes:
-
-* Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server.
-
-## ClickHouse release 1.1.54380, 2018-04-21
-
-### New features:
-
-* Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`.
-
-### Improvements:
-
-* Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`.
-* Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit.
-
-### Bug fixes:
-
-* Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`.
-* Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`.
-* Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table.
-* Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica.
-* Fixed freezing of `KILL QUERY`.
-* Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration.
-
-### Backward incompatible changes:
-
-* Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors.
-
-## ClickHouse release 1.1.54378, 2018-04-16
-
-### New features:
-
-* Logging level can be changed without restarting the server.
-* Added the `SHOW CREATE DATABASE` query.
-* The `query_id` can be passed to `clickhouse-client` (elBroom).
-* New setting: `max_network_bandwidth_for_all_users`.
-* Added support for `ALTER TABLE ... PARTITION ... ` for `MATERIALIZED VIEW`.
-* Added information about the size of data parts in uncompressed form in the system table.
-* Server-to-server encryption support for distributed tables (`1` in the replica config in ``).
-* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1`
-* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed. It's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov).
-* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson)
-* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete result.
-
-### Improvements:
-
-* `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue.
-* `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part.
-* A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov).
-* The `lengthUTF8` function runs faster (zhang2014).
-* Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards.
-* The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket's `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa).
-* More robust crash recovery for asynchronous insertion into `Distributed` tables.
-* The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊).
-
-### Bug fixes:
-
-* Fixed an error with `IN` when the left side of the expression is `Nullable`.
-* Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index.
-* The `max_execution_time` limit now works correctly with distributed queries.
-* Fixed errors when calculating the size of composite columns in the `system.columns` table.
-* Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.`
-* Fixed errors in `StorageKafka` (##2075)
-* Fixed server crashes from invalid arguments of certain aggregate functions.
-* Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables.
-* `Too many parts` state is less likely to happen when inserting into aggregated materialized views (##2084).
-* Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level.
-* Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`.
-* `SummingMergeTree` now works correctly for summation of nested data structures with a composite key.
-* Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables.
-
-### Build changes:
-
-* The build supports `ninja` instead of `make` and uses `ninja` by default for building releases.
-* Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility.
-
-### Backward incompatible changes:
-
-* Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as "at least one `arr` element belongs to the `set`". To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`.
-* Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config.
-
-## ClickHouse release 1.1.54370, 2018-03-16
-
-### New features:
-
-* Added the `system.macros` table and auto updating of macros when the config file is changed.
-* Added the `SYSTEM RELOAD CONFIG` query.
-* Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the "maximum" interval. ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2012)).
-
-### Improvements:
-
-* When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log).
-* Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`.
-
-### Bug fixes:
-
-* Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables.
-* Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers.
-* Fixed a race condition when reading from system `system.parts_columns tables.`
-* Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout.
-* Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query.
-* Fixed incorrect dates in the `system.parts` table.
-* Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster.
-* Fixed the vertical merging algorithm for an empty `ORDER BY` table.
-* Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362.
-* Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358.
-* Removed extraneous error-level logging of `Not found column ... in block`.
-
-## Clickhouse Release 1.1.54362, 2018-03-11
-
-### New features:
-
-* Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1.
-* Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard.
-* Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`.
-* An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova).
-* Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta).
-* Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings.
-* Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`.
-* Added the `arrayCumSum` function (Javi Santana).
-* Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats.
-* Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan).
-* Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier.
-* The `remote` and `cluster` table functions can be used in `INSERT` requests.
-* Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual.
-* Added the `data_path` and `metadata_path` columns to `system.tables`and` system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables.
-* Added additional information about merges in the `system.part_log` table.
-* An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov).
-* The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014).
-* Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014).
-* Support for `SHOW CREATE TABLE` for temporary tables (zhang2014).
-* Added the `system_profile` configuration parameter for the settings used by internal processes.
-* Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko).
-* Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko).
-* Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes.
-* Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table.
-* Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`.
-* Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014).
-* Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can't be listened to (useful for systems with disabled support for IPv4 or IPv6).
-* Added the `VersionedCollapsingMergeTree` table engine.
-* Support for rows and arbitrary numeric types for the `library` dictionary source.
-* `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`).
-* A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`.
-* `RENAME TABLE` can be performed for `VIEW`.
-* Added the `throwIf` function.
-* Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024).
-* The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns.
-
-### Improvements:
-
-* Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries.
-* Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts.
-* Added the `allow_distributed_ddl` option.
-* Nondeterministic functions are not allowed in expressions for `MergeTree` table keys.
-* Files with substitutions from `config.d` directories are loaded in alphabetical order.
-* Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`.
-* The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks).
-* When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server.
-* The `MkDocs` documentation generator is used.
-* When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014).
-* Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342.
-* `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337.
-* Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases.
-
-### Bug fixes:
-
-* Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`.
-* Fixed a bug in merges for `ReplacingMergeTree` tables.
-* Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`).
-* Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries.
-* Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`.
-* Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table.
-* Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata.
-* Fixed the `DROP DATABASE` query for `Dictionary` databases.
-* Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov).
-* Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014).
-* Fixed a rare case when a query to a `MergeTree` table couldn't finish (chenxing-xc).
-* Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc).
-* Fixed a slight performance regression with functions that use regular expressions.
-* Fixed a performance regression when creating multidimensional arrays from complex expressions.
-* Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata.
-* Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table.
-* Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand).
-* Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`.
-* Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables.
-* Fixed a bug when using `ALIAS` columns in `Distributed` tables.
-* Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family.
-* Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries.
-* Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments.
-* Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`.
-* Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled).
-
-### Backward incompatible changes:
-
-* Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default.
-* Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`.
-* Removed the `UnsortedMergeTree` engine.
-
-## Clickhouse Release 1.1.54343, 2018-02-05
-
-* Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`.
-* Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index.
-* Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue.
-
-## Clickhouse Release 1.1.54342, 2018-01-22
-
-This release contains bug fixes for the previous release 1.1.54337:
-
-* Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`.
-* Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d.
-* Fixed a regression in 1.1.54337: wrong default configuration in the Docker image.
-* Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`).
-* Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`).
-* Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014).
-* Fixed a bug in implementation of NULL.
-
-## Clickhouse Release 1.1.54337, 2018-01-18
-
-### New features:
-
-* Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables.
-* Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`.
-* Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected.
-* Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive.
-* Added the `toStartOfFifteenMinutes` function (Kirill Shvakov).
-* Added the `clickhouse format` tool for formatting queries.
-* Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory.
-* Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin).
-* Added a column with documentation for the `system.settings` table (Kirill Shvakov).
-* Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables.
-* Added the `system.models` table with information about loaded `CatBoost` machine learning models.
-* Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage.
-* Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function).
-* Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors.
-* The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments.
-* Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`.
-* Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov).
-* Users with the ` readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT...) (Kirill Shvakov).
-* Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša).
-* Added the `intExp3` and `intExp4` functions.
-* Added the `sumKahan` aggregate function.
-* Added the to * Number* OrNull functions, where * Number* is a numeric type.
-* Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014).
-* Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded.
-* Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova).
-* The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory.
-* Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr.
-* Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird).
-
-### Performance optimizations:
-
-* Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments.
-* Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`.
-* Improved performance of parsing and formatting `Date` and `DateTime` type values in text format.
-* Improved performance and precision of parsing floating point numbers.
-* Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` .
-* Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`.
-
-### Bug fixes:
-
-* Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates.
-* Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for ` CREATE MATERIALIZED VIEW` queries with `POPULATE` .
-* Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration.
-* Fixed unexpected results of passing the `Date` argument to `toStartOfDay` .
-* Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for ` INTERVAL n MONTH` in cases when the result has the previous year.
-* Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete.
-* Fixed `SummingMergeTree` behavior in cases when the rows summed to zero.
-* Various fixes for the `Kafka` engine (Marek Vavruša).
-* Fixed incorrect behavior of the `Join` table engine (Amos Bird).
-* Fixed incorrect allocator behavior under FreeBSD and OS X.
-* The `extractAll` function now supports empty matches.
-* Fixed an error that blocked usage of `libressl` instead of `openssl` .
-* Fixed the ` CREATE TABLE AS SELECT` query from temporary tables.
-* Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts.
-* Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod).
-* `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config).
-* Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key.
-* Fixed parsing of tuples (values of the `Tuple` data type) in text formats.
-* Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions.
-* Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to ` NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc.
-* Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc.
-* Stricter checks for allowed combinations of composite columns.
-* Fixed the overflow when specifying a very large parameter for the `FixedString` data type.
-* Fixed a bug in the `topK` aggregate function in a generic case.
-* Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator.
-* Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322).
-* Fixed the precision of the `exp10` function.
-* Fixed the behavior of the `visitParamExtract` function for better compliance with documentation.
-* Fixed the crash when incorrect data types are specified.
-* Fixed the behavior of `DISTINCT` in the case when all columns are constants.
-* Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index.
-* Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries.
-* Fixed a bug that leads to excessive rows in the result of `FULL` and ` RIGHT JOIN` (Amos Bird).
-* Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload.
-* Fixed the ` SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated.
-* Fixed the behavior of ` MATERIALIZED VIEW` after executing ` DETACH TABLE` for the table under the view (Marek Vavruša).
-
-### Build improvements:
-
-* The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment.
-* A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems.
-* Added the `clickhouse-test` package. It can be used to run functional tests.
-* The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub.
-* Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run.
-* Added support for `Cap'n'Proto` in the default build.
-* Changed the format of documentation sources from `Restricted Text` to `Markdown`.
-* Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually.
-* For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as ` clickhouse clang` and ` clickhouse lld` .
-* Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`.
-* Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools.
-
-### Backward incompatible changes:
-
-* The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn't have `Nullable` columns or if the type of your table is not `Log`, then you don't need to do anything.
-* Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default.
-* The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion.
-* Removed the ` FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird).
-* Removed the `BlockTabSeparated` format that was used solely for demonstration purposes.
-* Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com.
-* In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases.
-* Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release.
-
-### Please note when upgrading:
-
-* When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message ` unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated.
-* If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes.
-
-## ClickHouse release 1.1.54327, 2017-12-21
-
-This release contains bug fixes for the previous release 1.1.54318:
-
-* Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like ` Part ... from own log doesn't exist.` The issue is relevant even if you don't see these messages in logs.
-
-## ClickHouse release 1.1.54318, 2017-11-30
-
-This release contains bug fixes for the previous release 1.1.54310:
-
-* Fixed incorrect row deletions during merges in the SummingMergeTree engine
-* Fixed a memory leak in unreplicated MergeTree engines
-* Fixed performance degradation with frequent inserts in MergeTree engines
-* Fixed an issue that was causing the replication queue to stop running
-* Fixed rotation and archiving of server logs
-
-## ClickHouse release 1.1.54310, 2017-11-01
-
-### New features:
-
-* Custom partitioning key for the MergeTree family of table engines.
-* [ Kafka](https://clickhouse.yandex/docs/en/single/index.html#document-table_engines/kafka) table engine.
-* Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse.
-* Added support for time zones with non-integer offsets from UTC.
-* Added support for arithmetic operations with time intervals.
-* The range of values for the Date and DateTime types is extended to the year 2105.
-* Added the ` CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view).
-* Added the `ATTACH TABLE` query without arguments.
-* The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly.
-* Max size of the IP trie dictionary is increased to 128M entries.
-* Added the getSizeOfEnumType function.
-* Added the sumWithOverflow aggregate function.
-* Added support for the Cap'n Proto input format.
-* You can now customize compression level when using the zstd algorithm.
-
-### Backward incompatible changes:
-
-* Creation of temporary tables with an engine other than Memory is not allowed.
-* Explicit creation of tables with the View or MaterializedView engine is not allowed.
-* During table creation, a new check verifies that the sampling key expression is included in the primary key.
-
-### Bug fixes:
-
-* Fixed hangups when synchronously inserting into a Distributed table.
-* Fixed nonatomic adding and removing of parts in Replicated tables.
-* Data inserted into a materialized view is not subjected to unnecessary deduplication.
-* Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore.
-* Users don't need access permissions to the `default` database to create temporary tables anymore.
-* Fixed crashing when specifying the Array type without arguments.
-* Fixed hangups when the disk volume containing server logs is full.
-* Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch.
-
-### Build improvements:
-
-* Several third-party libraries (notably Poco) were updated and converted to git submodules.
-
-## ClickHouse release 1.1.54304, 2017-10-19
-
-### New features:
-
-* TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ).
-
-### Bug fixes:
-
-* `ALTER` for replicated tables now tries to start running as soon as possible.
-* Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.`
-* Fixed crashes of `clickhouse-client` when pressing ` Page Down`
-* Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL`
-* `FREEZE PARTITION` always works atomically now.
-* Empty POST requests now return a response with code 411.
-* Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).`
-* Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables.
-* Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b`
-* Users are updated correctly with invalid `users.xml`
-* Correct handling when an executable dictionary returns a non-zero response code.
-
-## ClickHouse release 1.1.54292, 2017-09-20
-
-### New features:
-
-* Added the `pointInPolygon` function for working with coordinates on a coordinate plane.
-* Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`.
-* Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers.
-* The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting ` compile = 1` , which is not used by default).
-* Reduced the time needed for dynamic compilation of queries.
-
-### Bug fixes:
-
-* Fixed an error that sometimes produced ` part ... intersects previous part` messages and weakened replica consistency.
-* Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown.
-* Removed excessive logging when restoring replicas.
-* Fixed an error in the UNION ALL implementation.
-* Fixed an error in the concat function that occurred if the first column in a block has the Array type.
-* Progress is now displayed correctly in the system.merges table.
-
-## ClickHouse release 1.1.54289, 2017-09-13
-
-### New features:
-
-* `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`.
-* Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`.
-* Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster.
-* Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`).
-* External dictionaries can be loaded from MySQL by specifying a socket in the filesystem.
-* External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters).
-* Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user.
-* Support for `DROP TABLE` for temporary tables.
-* Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats.
-* Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes).
-* FIFO locking is used during ALTER: an ALTER query isn't blocked indefinitely for continuously running queries.
-* Option to set `umask` in the config file.
-* Improved performance for queries with `DISTINCT` .
-
-### Bug fixes:
-
-* Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn't get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things.
-* Fixed randomization when choosing hosts for the connection to ZooKeeper.
-* Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost.
-* Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running ` ALTER MODIFY` on an element in a `Nested` structure.
-* Fixed an error that could cause SELECT queries to "hang".
-* Improvements to distributed DDL queries.
-* Fixed the query `CREATE TABLE ... AS `.
-* Resolved the deadlock in the ` ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables.
-* Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats.
-* Resolved the appearance of zombie processes when using a dictionary with an `executable` source.
-* Fixed segfault for the HEAD query.
-
-### Improved workflow for developing and assembling ClickHouse:
-
-* You can use `pbuilder` to build ClickHouse.
-* You can use `libc++` instead of `libstdc++` for builds on Linux.
-* Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`.
-
-### Please note when upgrading:
-
-* There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT requests will fail with the message "Merges are processing significantly slower than inserts." Use the ` SELECT * FROM system.merges` request to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don't need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ```107374182400` and restart the server.
-
-## ClickHouse release 1.1.54284, 2017-08-29
-
-* This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper.
-
-## ClickHouse release 1.1.54282, 2017-08-23
-
-This release contains bug fixes for the previous release 1.1.54276:
-
-* Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table.
-* Fixed parsing when inserting in RowBinary format if input data starts with';'.
-* Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`).
-
-## Clickhouse Release 1.1.54276, 2017-08-16
-
-### New features:
-
-* Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a`
-* INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert_distributed_sync=1.
-* Added the UUID data type for working with 16-byte identifiers.
-* Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau.
-* Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers.
-* You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries.
-* Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).`
-* Added the max_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance.
-
-### Main changes:
-
-* Security improvements: all server files are created with 0640 permissions (can be changed via config parameter).
-* Improved error messages for queries with invalid syntax.
-* Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data.
-* Significantly increased the performance of data merges for the ReplacingMergeTree engine.
-* Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed_directory_monitor_batch_inserts=1.
-
-### Backward incompatible changes:
-
-* Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays.
-
-### Complete list of changes:
-
-* Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format.
-* Optimized stream allocation when reading from a Distributed table.
-* Settings can be configured in readonly mode if the value doesn't change.
-* Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred_block_size_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns.
-* Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.`
-* Added new settings for MergeTree engines (the merge_tree section in config.xml):
- - replicated_deduplication_window_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables.
- - cleanup_delay_period sets how often to start cleanup to remove outdated data.
- - replicated_can_become_leader can prevent a replica from becoming the leader (and assigning merges).
-* Accelerated cleanup to remove outdated data from ZooKeeper.
-* Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed_ddl_task_timeout, which limits the time to wait for a response from the servers in the cluster.
-* Improved display of stack traces in the server logs.
-* Added the "none" value for the compression method.
-* You can use multiple dictionaries_config sections in config.xml.
-* It is possible to connect to MySQL through a socket in the file system.
-* The system.parts table has a new column with information about the size of marks, in bytes.
-
-### Bug fixes:
-
-* Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field.
-* Fixed a rare race condition in ReplicatedMergeTree when checking data parts.
-* Fixed possible freezing on "leader election" when starting a server.
-* The max_replica_delay_for_distributed_queries setting was ignored when using a local replica of the data source. This has been fixed.
-* Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column.
-* Fixed an exception in the multiIf function when using empty arrays or strings.
-* Fixed excessive memory allocations when deserializing Native format.
-* Fixed incorrect auto-update of Trie dictionaries.
-* Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE.
-* Fixed a crash of GROUP BY when using distributed_aggregation_memory_efficient=1.
-* Now you can specify the database.table in the right side of IN and JOIN.
-* Too many threads were used for parallel aggregation. This has been fixed.
-* Fixed how the "if" function works with FixedString arguments.
-* SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed.
-* Running `CREATE VIEW IF EXISTS no longer causes crashes.`
-* Fixed incorrect behavior when input_format_skip_unknown_fields=1 is set and there are negative numbers.
-* Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary.
-* Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables.
-* Fixed an incorrect interpretation of a SELECT query from Dictionary tables.
-* Fixed the "Cannot mremap" error when using arrays in IN and JOIN clauses with more than 2 billion elements.
-* Fixed the failover for dictionaries with MySQL as the source.
-
-### Improved workflow for developing and assembling ClickHouse:
-
-* Builds can be assembled in Arcadia.
-* You can use gcc 7 to compile ClickHouse.
-* Parallel builds using ccache+distcc are faster now.
-
-## ClickHouse release 1.1.54245, 2017-07-04
-
-### New features:
-
-* Distributed DDL (for example, `CREATE TABLE ON CLUSTER`)
-* The replicated request `ALTER TABLE CLEAR COLUMN IN PARTITION.`
-* The engine for Dictionary tables (access to dictionary data in the form of a table).
-* Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries).
-* You can check for updates to the dictionary by sending a request to the source.
-* Qualified column names
-* Quoting identifiers using double quotation marks.
-* Sessions in the HTTP interface.
-* The OPTIMIZE query for a Replicated table can can run not only on the leader.
-
-### Backward incompatible changes:
-
-* Removed SET GLOBAL.
-
-### Minor changes:
-
-* Now after an alert is triggered, the log prints the full stack trace.
-* Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives).
-
-### Bug fixes:
-
-* Fixed a bad connection "sticking" when inserting into a Distributed table.
-* GLOBAL IN now works for a query from a Merge table that looks at a Distributed table.
-* The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed.
-* Changes in how an executable source of cached external dictionaries works.
-* Fixed the comparison of strings containing null characters.
-* Fixed the comparison of Float32 primary key fields with constants.
-* Previously, an incorrect estimate of the size of a field could lead to overly large allocations.
-* Fixed a crash when querying a Nullable column added to a table using ALTER.
-* Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT.
-* Fixed an ORDER BY subquery consisting of only constant values.
-* Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE.
-* Aliases for scalar subqueries with empty results are no longer lost.
-* Now a query that used compilation does not fail with an error if the .so file gets damaged.
+## ClickHouse release 18.14.15, 2018-11-21
+
+### Bug fixes:
+* The size of memory chunk was overestimated while deserializing the column of type `Array(String)` that leads to "Memory limit exceeded" errors. The issue appeared in version 18.12.13. [#3589](https://github.com/yandex/ClickHouse/issues/3589)
+
+## ClickHouse release 18.14.14, 2018-11-20
+
+### Bug fixes:
+* Fixed `ON CLUSTER` queries when cluster configured as secure (flag ``). [#3599](https://github.com/yandex/ClickHouse/pull/3599)
+
+### Build changes:
+* Fixed problems (llvm-7 from system, macos) [#3582](https://github.com/yandex/ClickHouse/pull/3582)
+
+## ClickHouse release 18.14.11, 2018-10-29
+
+### Bug fixes:
+
+* Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [#2156](https://github.com/yandex/ClickHouse/issues/2156)
+* Fixed errors when merging data in tables containing arrays inside Nested structures. [#3397](https://github.com/yandex/ClickHouse/pull/3397)
+* Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [#3429](https://github.com/yandex/ClickHouse/pull/3429)
+* Fixed an error on inserts to a Distributed table in Native format. [#3411](https://github.com/yandex/ClickHouse/issues/3411)
+
+## ClickHouse release 18.14.10, 2018-10-23
+
+* The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [#3410](https://github.com/yandex/ClickHouse/pull/3410)
+* The `enable_optimize_predicate_expression` setting is disabled by default.
+
+## ClickHouse release 18.14.9, 2018-10-16
+
+### New features:
+
+* The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [#3172](https://github.com/yandex/ClickHouse/pull/3172)
+* Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/yandex/ClickHouse/pull/2770)
+* Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/yandex/ClickHouse/pull/3210)
+* Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [#3146](https://github.com/yandex/ClickHouse/pull/3146)
+* Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [#3362](https://github.com/yandex/ClickHouse/pull/3362)
+* Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/yandex/ClickHouse/pull/3144)
+* Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3208)
+* Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [#3321](https://github.com/yandex/ClickHouse/pull/3321)
+* Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [#3251](https://github.com/yandex/ClickHouse/pull/3251)
+* Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [#3153](https://github.com/yandex/ClickHouse/pull/3153)
+
+### Experimental features:
+
+* Optimization of the GROUP BY clause for `LowCardinality data types.` [#3138](https://github.com/yandex/ClickHouse/pull/3138)
+* Optimized calculation of expressions for `LowCardinality data types.` [#3200](https://github.com/yandex/ClickHouse/pull/3200)
+
+### Improvements:
+
+* Significantly reduced memory consumption for requests with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [#3205](https://github.com/yandex/ClickHouse/pull/3205)
+* In the absence of `JOIN` (`LEFT`, `INNER`, ...), `INNER JOIN` is assumed. [#3147](https://github.com/yandex/ClickHouse/pull/3147)
+* Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3202)
+* The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/yandex/ClickHouse/pull/3210)
+* The `compile_expressions` setting (JIT compilation of expressions) is enabled by default.
+* Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message "File ... already exists", and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [#3101](https://github.com/yandex/ClickHouse/pull/3101)
+* LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [#3182](https://github.com/yandex/ClickHouse/pull/3182)
+* Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [#3182](https://github.com/yandex/ClickHouse/pull/3182)
+* Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/yandex/ClickHouse/pull/3257).
+* `ON CLUSTER` can be specified for `ALTER UPDATE` queries.
+* Improved performance for reading data in `JSONEachRow` format. [#3332](https://github.com/yandex/ClickHouse/pull/3332)
+* Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [#3306](https://github.com/yandex/ClickHouse/pull/3306)
+* Added the `TIMESTAMP` synonym for the `DateTime` type. [#3390](https://github.com/yandex/ClickHouse/pull/3390)
+* There is always space reserved for query_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools.
+* Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [#3205](https://github.com/yandex/ClickHouse/pull/3205)
+* Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [#3171](https://github.com/yandex/ClickHouse/pull/3171)
+* In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [#3166](https://github.com/yandex/ClickHouse/pull/3166)
+* The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3215).
+* Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3216)
+* The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/yandex/ClickHouse/pull/3219)
+* In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [#3330](https://github.com/yandex/ClickHouse/pull/3330)
+* Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/yandex/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264)
+* Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [#3281](https://github.com/yandex/ClickHouse/pull/3281)
+* Support for the `Decimal` data type in external dictionaries. [#3324](https://github.com/yandex/ClickHouse/pull/3324)
+* Support for the `Decimal` data type in `SummingMergeTree` tables. [#3348](https://github.com/yandex/ClickHouse/pull/3348)
+* Added specializations for `UUID` in `if`. [#3366](https://github.com/yandex/ClickHouse/pull/3366)
+* Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [#3283](https://github.com/yandex/ClickHouse/pull/3283)
+* A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/yandex/ClickHouse/pull/3375)
+
+### Bug fixes:
+
+* Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [#1702](https://github.com/yandex/ClickHouse/pull/1702)
+* Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [#3362](https://github.com/yandex/ClickHouse/pull/3362)
+* Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [#3331](https://github.com/yandex/ClickHouse/pull/3331) [#3341](https://github.com/yandex/ClickHouse/pull/3341)
+* If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn't be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [#3194](https://github.com/yandex/ClickHouse/pull/3194)
+* Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3344)
+* Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [#3247](https://github.com/yandex/ClickHouse/pull/3247)
+* Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the request if the `JOIN` is only performed on remote servers. [#3340](https://github.com/yandex/ClickHouse/pull/3340)
+* Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/yandex/ClickHouse/pull/3215).
+* For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/yandex/ClickHouse/pull/3150)
+* If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn't start. [8218cf3a](https://github.com/yandex/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9)
+* If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/yandex/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29)
+* Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [#3229](https://github.com/yandex/ClickHouse/pull/3229)
+* Corrected type conversion between `Decimal` and integer numbers. [#3211](https://github.com/yandex/ClickHouse/pull/3211)
+* Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3231)
+* Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [#3155](https://github.com/yandex/ClickHouse/pull/3155)
+* Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/yandex/ClickHouse/pull/3163)
+* Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [#3165](https://github.com/yandex/ClickHouse/pull/3165)
+* Bug fixes in the `ALTER UPDATE` query.
+* Fixed bugs in the `odbc` table function that appeared in version 18.12. [#3197](https://github.com/yandex/ClickHouse/pull/3197)
+* Fixed the operation of aggregate functions with `StateArray` combinators. [#3188](https://github.com/yandex/ClickHouse/pull/3188)
+* Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/yandex/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179)
+* Fixed output of types for operations using `Decimal` and integer arguments. [#3224](https://github.com/yandex/ClickHouse/pull/3224)
+* Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/yandex/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a)
+* The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [#3241](https://github.com/yandex/ClickHouse/pull/3241)
+* Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/yandex/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664)
+* Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [#3254](https://github.com/yandex/ClickHouse/pull/3254)
+* Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [#3263](https://github.com/yandex/ClickHouse/pull/3263)
+* Fixed an error when using `FINAL` with `PREWHERE`. [#3298](https://github.com/yandex/ClickHouse/pull/3298)
+* Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [#3298](https://github.com/yandex/ClickHouse/pull/3298)
+* Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [#3337](https://github.com/yandex/ClickHouse/pull/3337)
+* Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [#3357](https://github.com/yandex/ClickHouse/pull/3357)
+* Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/yandex/ClickHouse/pull/3339)
+* Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [#3351](https://github.com/yandex/ClickHouse/pull/3351) [#3352](https://github.com/yandex/ClickHouse/pull/3352)
+
+### Backward incompatible changes:
+
+* Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [#3329](https://github.com/yandex/ClickHouse/pull/3329)
+
+## ClickHouse release 18.12.17, 2018-09-16
+
+### New features:
+
+* `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [#3126](https://github.com/yandex/ClickHouse/pull/3126)
+* Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/yandex/ClickHouse/pull/3123)
+* The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [#3129](https://github.com/yandex/ClickHouse/pull/3129)
+* The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [#3129](https://github.com/yandex/ClickHouse/pull/3129)
+* The `system.part_log` table now has the `partition_id` column. [#3089](https://github.com/yandex/ClickHouse/pull/3089)
+
+### Bug fixes:
+
+* `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3159)
+* Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [#3130](https://github.com/yandex/ClickHouse/pull/3130)
+* Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3107)
+* Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn't happen). [#3122](https://github.com/yandex/ClickHouse/pull/3122)
+
+### Backward incompatible changes:
+
+* The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3107)
+
+## ClickHouse release 18.12.14, 2018-09-13
+
+### New features:
+
+* Added support for `ALTER UPDATE` queries. [#3035](https://github.com/yandex/ClickHouse/pull/3035)
+* Added the `allow_ddl` option, which restricts the user's access to DDL queries. [#3104](https://github.com/yandex/ClickHouse/pull/3104)
+* Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O_DIRECT). [#3117](https://github.com/yandex/ClickHouse/pull/3117)
+* The `system.merges` system table now contains the `partition_id` column. [#3099](https://github.com/yandex/ClickHouse/pull/3099)
+
+### Improvements
+
+* If a data part remains unchanged during mutation, it isn't downloaded by replicas. [#3103](https://github.com/yandex/ClickHouse/pull/3103)
+* Autocomplete is available for names of settings when working with `clickhouse-client`. [#3106](https://github.com/yandex/ClickHouse/pull/3106)
+
+### Bug fixes:
+
+* Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [#3118](https://github.com/yandex/ClickHouse/pull/3118)
+* Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13.
+* Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3098)
+* Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/3097)
+
+
+## ClickHouse release 18.12.13, 2018-09-10
+
+### New features:
+
+* Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [#2846](https://github.com/yandex/ClickHouse/pull/2846) [#2970](https://github.com/yandex/ClickHouse/pull/2970) [#3008](https://github.com/yandex/ClickHouse/pull/3008) [#3047](https://github.com/yandex/ClickHouse/pull/3047)
+* New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [#2948](https://github.com/yandex/ClickHouse/pull/2948)
+* In requests with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2787)
+* Added support for JOIN with table functions. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2907)
+* Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/yandex/ClickHouse/pull/2447)
+* Ctrl+C in clickhouse-client clears a query that was entered. [#2877](https://github.com/yandex/ClickHouse/pull/2877)
+* Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [#2982](https://github.com/yandex/ClickHouse/pull/2982)
+* Each line of the server log related to query processing shows the query ID. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [#2482](https://github.com/yandex/ClickHouse/pull/2482)
+* The `system.metrics` and `system.events` tables now have built-in documentation. [#3016](https://github.com/yandex/ClickHouse/pull/3016)
+* Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2975)
+* Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/yandex/ClickHouse/pull/2942)
+* Added the `retention` aggregate function. [Sundy Li](https://github.com/yandex/ClickHouse/pull/2887)
+* Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [#3062](https://github.com/yandex/ClickHouse/pull/3062) [#3034](https://github.com/yandex/ClickHouse/pull/3034)
+* Tables in the MergeTree family now have the virtual column `_partition_id`. [#3089](https://github.com/yandex/ClickHouse/pull/3089)
+
+### Experimental features:
+
+* Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [#2830](https://github.com/yandex/ClickHouse/pull/2830)
+* Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [#2990](https://github.com/yandex/ClickHouse/pull/2990) [#3077](https://github.com/yandex/ClickHouse/pull/3077)
+
+### Improvements:
+
+* Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag.
+* Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length.
+* Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`.
+* Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/yandex/ClickHouse/pull/2977) [#2980](https://github.com/yandex/ClickHouse/pull/2980)
+* Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2955)
+* Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [#2953](https://github.com/yandex/ClickHouse/pull/2953)
+* Fixed a performance problem in the case of a large stream of queries that result in an error (the ` _dl_addr` function is visible in `perf top`, but the server isn't using much CPU). [#2938](https://github.com/yandex/ClickHouse/pull/2938)
+* Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2907)
+* Improvements to the functionality for the `UUID` data type. [#3074](https://github.com/yandex/ClickHouse/pull/3074) [#2985](https://github.com/yandex/ClickHouse/pull/2985)
+* The `UUID` data type is supported in The-Alchemist dictionaries. [#2822](https://github.com/yandex/ClickHouse/pull/2822)
+* The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2974)
+* When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/yandex/ClickHouse/pull/2958)
+* For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [#2920](https://github.com/yandex/ClickHouse/pull/2920)
+* The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/yandex/ClickHouse/pull/2971)
+* You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [#2840](https://github.com/yandex/ClickHouse/pull/2840)
+* You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [#2840](https://github.com/yandex/ClickHouse/pull/2840)
+* Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn't happen as often.
+* The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2801)
+* Duplicate columns can be used in a `USING` clause for `JOIN`. [#3006](https://github.com/yandex/ClickHouse/pull/3006)
+* `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [#3003](https://github.com/yandex/ClickHouse/pull/3003)
+* The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2885)
+* Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/yandex/ClickHouse/pull/2909)
+* The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`.
+* `ALTER DELETE` queries work for materialized views.
+* Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables.
+* Support for `ATTACH TABLE ... ON CLUSTER` queries. [#3025](https://github.com/yandex/ClickHouse/pull/3025)
+
+### Bug fixes:
+
+* Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [#2913](https://github.com/yandex/ClickHouse/issues/2913)
+* Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [#3049](https://github.com/yandex/ClickHouse/pull/3049)
+* Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [#3038](https://github.com/yandex/ClickHouse/pull/3038)
+* Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [#2939](https://github.com/yandex/ClickHouse/pull/2939) [#2949](https://github.com/yandex/ClickHouse/pull/2949) [#2964](https://github.com/yandex/ClickHouse/pull/2964)
+* Fixed a segfault during `JOIN ... ON`. [#3000](https://github.com/yandex/ClickHouse/pull/3000)
+* Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [#2994](https://github.com/yandex/ClickHouse/pull/2994)
+* Fixed the "Not found column" error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [#3087](https://github.com/yandex/ClickHouse/pull/3087)
+* Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [#2226](https://github.com/yandex/ClickHouse/pull/2226) [#3037](https://github.com/yandex/ClickHouse/pull/3037) [#3055](https://github.com/yandex/ClickHouse/pull/3055) [#3065](https://github.com/yandex/ClickHouse/pull/3065) [#3073](https://github.com/yandex/ClickHouse/pull/3073) [#3090](https://github.com/yandex/ClickHouse/pull/3090) [#3093](https://github.com/yandex/ClickHouse/pull/3093)
+* Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [#2910](https://github.com/yandex/ClickHouse/pull/2910)
+* Fixed the incorrect result when comparing `nan` with integers. [#3024](https://github.com/yandex/ClickHouse/pull/3024)
+* Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [#2854](https://github.com/yandex/ClickHouse/pull/2854)
+* Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [#3084](https://github.com/yandex/ClickHouse/pull/3084)
+* Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously.
+* Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [#2989](https://github.com/yandex/ClickHouse/pull/2989)
+* Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2960)
+* Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [#2845](https://github.com/yandex/ClickHouse/pull/2845) [#2937](https://github.com/yandex/ClickHouse/pull/2937)
+* The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2929)
+* Fixed the segfault when re-initializing the ZooKeeper session. [#2917](https://github.com/yandex/ClickHouse/pull/2917)
+* Fixed potential blocking when working with ZooKeeper.
+* Fixed incorrect code for adding nested data structures in a `SummingMergeTree`.
+* When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/yandex/ClickHouse/pull/2808)
+
+### Security fix:
+
+* Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [#2828](https://github.com/yandex/ClickHouse/pull/2828) [#2879](https://github.com/yandex/ClickHouse/pull/2879) [#2886](https://github.com/yandex/ClickHouse/pull/2886) [#2893](https://github.com/yandex/ClickHouse/pull/2893) [#2921](https://github.com/yandex/ClickHouse/pull/2921)
+* Fixed incorrect validation of the file path in the `catBoostPool` table function. [#2894](https://github.com/yandex/ClickHouse/pull/2894)
+* The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user's configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2856)
+
+### Backward incompatible changes:
+
+* In requests with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level.
+
+### Build changes:
+
+* Most integration tests can now be run by commit.
+* Code style checks can also be run by commit.
+* The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/yandex/ClickHouse/pull/2912)
+* When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [#2957](https://github.com/yandex/ClickHouse/pull/2957)
+* Debugging the build uses the `jemalloc` debug option.
+* The interface of the library for interacting with ZooKeeper is declared abstract. [#2950](https://github.com/yandex/ClickHouse/pull/2950)
+
+## ClickHouse release 18.10.3, 2018-08-13
+
+### New features:
+
+* HTTPS can be used for replication. [#2760](https://github.com/yandex/ClickHouse/pull/2760)
+* Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [#2791](https://github.com/yandex/ClickHouse/pull/2791)
+* Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [#2834](https://github.com/yandex/ClickHouse/pull/2834)
+* Support for `UUID` in the key columns.
+
+### Improvements:
+
+* Clusters can be removed without restarting the server when they are deleted from the config files. [#2777](https://github.com/yandex/ClickHouse/pull/2777)
+* External dictionaries can be removed without restarting the server when they are removed from config files. [#2779](https://github.com/yandex/ClickHouse/pull/2779)
+* Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/yandex/ClickHouse/pull/2781)
+* Improvements for the `UUID` data type (not yet complete). [#2618](https://github.com/yandex/ClickHouse/pull/2618)
+* Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [#2815](https://github.com/yandex/ClickHouse/pull/2815)
+* Old records of completed mutations are deleted (`ALTER DELETE`). [#2784](https://github.com/yandex/ClickHouse/pull/2784)
+* Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/yandex/ClickHouse/pull/2841)
+* The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2851)
+* Added the `max_partition_size_to_drop` config option. [#2782](https://github.com/yandex/ClickHouse/pull/2782)
+* Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/yandex/ClickHouse/pull/2812)
+* Added the `max_fetch_partition_retries_count` setting. [#2831](https://github.com/yandex/ClickHouse/pull/2831)
+* Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [#2832](https://github.com/yandex/ClickHouse/pull/2832)
+* The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/yandex/ClickHouse/pull/2855)
+
+### Bug fixes:
+
+* Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0.
+* Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [#2814](https://github.com/yandex/ClickHouse/pull/2814)
+* Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [#2930](https://github.com/yandex/ClickHouse/pull/2930)
+* Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/yandex/ClickHouse/pull/2823)
+* Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2094)
+* Fixed a memory leak if an exception occurred when connecting to a MySQL server.
+* Fixed incorrect clickhouse-client response code in case of a request error.
+* Fixed incorrect behavior of materialized views containing DISTINCT. [#2795](https://github.com/yandex/ClickHouse/issues/2795)
+
+### Backward incompatible changes
+
+* Removed support for CHECK TABLE queries for Distributed tables.
+
+### Build changes:
+
+* The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [#2773](https://github.com/yandex/ClickHouse/pull/2773)
+* Use of libressl from a submodule. [#1983](https://github.com/yandex/ClickHouse/pull/1983) [#2807](https://github.com/yandex/ClickHouse/pull/2807)
+* Use of unixodbc from a submodule. [#2789](https://github.com/yandex/ClickHouse/pull/2789)
+* Use of mariadb-connector-c from a submodule. [#2785](https://github.com/yandex/ClickHouse/pull/2785)
+* Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself).
+
+## ClickHouse release 18.6.0, 2018-08-02
+
+### New features:
+
+* Added support for ON expressions for the JOIN ON syntax:
+`JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]`
+The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [#2742](https://github.com/yandex/ClickHouse/pull/2742)
+* HTTPS can be enabled for replication. [#2760](https://github.com/yandex/ClickHouse/pull/2760)
+
+### Improvements:
+
+* The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [#2646](https://github.com/yandex/ClickHouse/pull/2646)
+
+## ClickHouse release 18.5.1, 2018-07-31
+
+### New features:
+
+* Added the hash function `murmurHash2_32` [#2756](https://github.com/yandex/ClickHouse/pull/2756).
+
+### Improvements:
+
+* Now you can use the `from_env` [#2741](https://github.com/yandex/ClickHouse/pull/2741) attribute to set values in config files from environment variables.
+* Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [#2752](https://github.com/yandex/ClickHouse/pull/2752).
+
+### Bug fixes:
+
+* Fixed a possible bug when starting a replica [#2759](https://github.com/yandex/ClickHouse/pull/2759).
+
+## ClickHouse release 18.4.0, 2018-07-28
+
+### New features:
+
+* Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721).
+* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [#2708](https://github.com/yandex/ClickHouse/pull/2708).
+* Support for `HTTP Basic` authentication in the replication protocol [#2727](https://github.com/yandex/ClickHouse/pull/2727).
+* The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699).
+* Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701).
+
+### Improvements:
+
+* The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [#2694](https://github.com/yandex/ClickHouse/pull/2694).
+* The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed.
+* Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689).
+
+### Bug fixes:
+
+* Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2))
+* Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735).
+* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee))
+* Fixed server crash when using the `countArray()` aggregate function.
+
+### Backward incompatible changes:
+
+* Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value.
+
+## ClickHouse release 18.1.0, 2018-07-23
+
+### New features:
+
+* Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([#2634](https://github.com/yandex/ClickHouse/pull/2634)).
+* Support for arbitrary types for the `uniq*` family of aggregate functions ([#2010](https://github.com/yandex/ClickHouse/issues/2010)).
+* Support for arbitrary types in comparison operators ([#2026](https://github.com/yandex/ClickHouse/issues/2026)).
+* The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([#2637](https://github.com/yandex/ClickHouse/pull/2637)).
+* Added the `arrayDistinct` function ([#2670](https://github.com/yandex/ClickHouse/pull/2670)).
+* The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/yandex/ClickHouse/pull/2566)).
+
+### Improvements:
+
+* Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backwards compatible, unless otherwise stated in the changelog.
+* Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2664)).
+* If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/yandex/ClickHouse/pull/2669)).
+
+### Bug fixes:
+
+* Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2624)).
+* Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/yandex/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)).
+* Fixed an error during a CAST to Nullable types ([#1322](https://github.com/yandex/ClickHouse/issues/1322)).
+* Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2657)).
+* Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/yandex/ClickHouse/pull/2663)).
+* Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([#2571](https://github.com/yandex/ClickHouse/issues/2571)).
+* Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn't in uppercase letters ([fe8c4d6](https://github.com/yandex/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)).
+* Added missing quoting of identifiers for queries to an external DBMS ([#2635](https://github.com/yandex/ClickHouse/issues/2635)).
+
+### Backward incompatible changes:
+
+* Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`.
+
+## ClickHouse release 1.1.54394, 2018-07-12
+
+### New features:
+
+* Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2521)).
+* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2600)).
+
+### Bug fixes:
+
+* Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388.
+* Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table.
+* The `has` function now works correctly for an array with Nullable elements ([#2115](https://github.com/yandex/ClickHouse/issues/2115)).
+* The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were requested from the table.
+* Fixed how an empty `TinyLog` table works after inserting an empty data block ([#2563](https://github.com/yandex/ClickHouse/issues/2563)).
+* The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL.
+
+## ClickHouse release 1.1.54390, 2018-07-06
+
+### New features:
+
+* Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/yandex/ClickHouse/pull/2490)).
+* Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2574)).
+* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2599)).
+
+### Improvements:
+
+* Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([#2584](https://github.com/yandex/ClickHouse/pull/2584)).
+* Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2.
+* Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([#2573](https://github.com/yandex/ClickHouse/pull/2573)).
+* Added `Nullable` support for the `runningDifference` function ([#2594](https://github.com/yandex/ClickHouse/pull/2594)).
+* Improved query analysis performance when there is a very large number of expressions ([#2572](https://github.com/yandex/ClickHouse/pull/2572)).
+* Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([#2597](https://github.com/yandex/ClickHouse/pull/2597)).
+* The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/yandex/ClickHouse/pull/2593)).
+
+### Bug fixes:
+
+* Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server.
+* Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted.
+* Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2553)).
+* Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([#2603](https://github.com/yandex/ClickHouse/pull/2603)).
+* Fixed segfault if `macros` are used but they aren't in the config file ([#2570](https://github.com/yandex/ClickHouse/pull/2570)).
+* Fixed switching to the default database when reconnecting the client ([#2583](https://github.com/yandex/ClickHouse/pull/2583)).
+* Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled.
+
+### Security fix:
+
+* Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`).
+
+## ClickHouse release 1.1.54388, 2018-06-28
+
+### New features:
+
+* Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries.
+* Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables.
+* Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2260))
+* Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`).
+* Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2294)).
+* Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2501)).
+* Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2352)).
+* New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/yandex/ClickHouse/pull/2429)).
+* The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2535)).
+* The password to `clickhouse-client` can be entered interactively.
+* Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/yandex/ClickHouse/pull/2459)).
+* Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2472)).
+* Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/yandex/ClickHouse/pull/2263))
+* Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats.
+* Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests.
+
+### Experimental features:
+
+* Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/yandex/ClickHouse/pull/2272))
+* JIT compilation to native code is now available for some expressions ([pyos](https://github.com/yandex/ClickHouse/pull/2277)).
+
+### Bug fixes:
+
+* Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`.
+* Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result.
+* Fixed an error when reading an array column from a Nested structure ([#2066](https://github.com/yandex/ClickHouse/issues/2066)).
+* Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`.
+* Fixed an error when analyzing queries with recursive aliases.
+* Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([#2525](https://github.com/yandex/ClickHouse/issues/2525)).
+* User profile settings were not applied when using sessions in the HTTP interface.
+* Fixed how settings are applied from the command line parameters in clickhouse-local.
+* The ZooKeeper client library now uses the session timeout received from the server.
+* Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout.
+* Fixed pruning of parts for queries with conditions on partition key columns ([#2342](https://github.com/yandex/ClickHouse/issues/2342)).
+* Merges are now possible after `CLEAR COLUMN IN PARTITION` ([#2315](https://github.com/yandex/ClickHouse/issues/2315)).
+* Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/yandex/ClickHouse/pull/2268)).
+* Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/yandex/ClickHouse/pull/2400)).
+* Fixed syntactic parsing and formatting of the `CAST` operator.
+* Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2411)).
+* Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/yandex/ClickHouse/pull/2448)).
+* Fixed SSRF in the remote() table function.
+* Fixed exit behavior of `clickhouse-client` in multiline mode ([#2510](https://github.com/yandex/ClickHouse/issues/2510)).
+
+### Improvements:
+
+* Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/yandex/ClickHouse/pull/1722)).
+* Improved LZ4 compression performance.
+* Faster analysis for queries with a large number of JOINs and sub-queries.
+* The DNS cache is now updated automatically when there are too many network errors.
+* Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts.
+* Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`.
+* Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match.
+* A server with replicated tables can start even if you haven't configured ZooKeeper.
+* When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/yandex/ClickHouse/pull/2325)).
+* Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/yandex/ClickHouse/pull/2421)).
+
+### Build changes:
+
+* The gcc8 compiler can be used for builds.
+* Added the ability to build llvm from submodule.
+* The version of the librdkafka library has been updated to v0.11.4.
+* Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0.
+* Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2274)).
+* Cmake now generates files for ninja by default (like when using `-G Ninja`).
+* Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/yandex/ClickHouse/pull/2519)).
+* Fixed a header file conflict in Fedora Rawhide ([#2520](https://github.com/yandex/ClickHouse/issues/2520)).
+
+### Backward incompatible changes:
+
+* Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format.
+* If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn't have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster.
+
+## ClickHouse release 1.1.54385, 2018-06-01
+
+### Bug fixes:
+
+* Fixed an error that in some cases caused ZooKeeper operations to block.
+
+## ClickHouse release 1.1.54383, 2018-05-22
+
+### Bug fixes:
+
+* Fixed a slowdown of replication queue if a table has many replicas.
+
+## ClickHouse release 1.1.54381, 2018-05-14
+
+### Bug fixes:
+
+* Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server.
+
+## ClickHouse release 1.1.54380, 2018-04-21
+
+### New features:
+
+* Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`.
+
+### Improvements:
+
+* Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`.
+* Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit.
+
+### Bug fixes:
+
+* Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`.
+* Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`.
+* Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table.
+* Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica.
+* Fixed freezing of `KILL QUERY`.
+* Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration.
+
+### Backward incompatible changes:
+
+* Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors.
+
+## ClickHouse release 1.1.54378, 2018-04-16
+
+### New features:
+
+* Logging level can be changed without restarting the server.
+* Added the `SHOW CREATE DATABASE` query.
+* The `query_id` can be passed to `clickhouse-client` (elBroom).
+* New setting: `max_network_bandwidth_for_all_users`.
+* Added support for `ALTER TABLE ... PARTITION ... ` for `MATERIALIZED VIEW`.
+* Added information about the size of data parts in uncompressed form in the system table.
+* Server-to-server encryption support for distributed tables (`1` in the replica config in ``).
+* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1`
+* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed. It's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov).
+* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson)
+* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete result.
+
+### Improvements:
+
+* `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue.
+* `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part.
+* A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov).
+* The `lengthUTF8` function runs faster (zhang2014).
+* Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards.
+* The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket's `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa).
+* More robust crash recovery for asynchronous insertion into `Distributed` tables.
+* The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊).
+
+### Bug fixes:
+
+* Fixed an error with `IN` when the left side of the expression is `Nullable`.
+* Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index.
+* The `max_execution_time` limit now works correctly with distributed queries.
+* Fixed errors when calculating the size of composite columns in the `system.columns` table.
+* Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.`
+* Fixed errors in `StorageKafka` (##2075)
+* Fixed server crashes from invalid arguments of certain aggregate functions.
+* Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables.
+* `Too many parts` state is less likely to happen when inserting into aggregated materialized views (##2084).
+* Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level.
+* Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`.
+* `SummingMergeTree` now works correctly for summation of nested data structures with a composite key.
+* Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables.
+
+### Build changes:
+
+* The build supports `ninja` instead of `make` and uses `ninja` by default for building releases.
+* Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility.
+
+### Backward incompatible changes:
+
+* Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as "at least one `arr` element belongs to the `set`". To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`.
+* Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config.
+
+## ClickHouse release 1.1.54370, 2018-03-16
+
+### New features:
+
+* Added the `system.macros` table and auto updating of macros when the config file is changed.
+* Added the `SYSTEM RELOAD CONFIG` query.
+* Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the "maximum" interval. ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2012)).
+
+### Improvements:
+
+* When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log).
+* Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`.
+
+### Bug fixes:
+
+* Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables.
+* Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers.
+* Fixed a race condition when reading from system `system.parts_columns tables.`
+* Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout.
+* Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query.
+* Fixed incorrect dates in the `system.parts` table.
+* Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster.
+* Fixed the vertical merging algorithm for an empty `ORDER BY` table.
+* Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362.
+* Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358.
+* Removed extraneous error-level logging of `Not found column ... in block`.
+
+## Clickhouse Release 1.1.54362, 2018-03-11
+
+### New features:
+
+* Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1.
+* Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard.
+* Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`.
+* An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova).
+* Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta).
+* Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings.
+* Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`.
+* Added the `arrayCumSum` function (Javi Santana).
+* Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats.
+* Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan).
+* Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier.
+* The `remote` and `cluster` table functions can be used in `INSERT` requests.
+* Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual.
+* Added the `data_path` and `metadata_path` columns to `system.tables`and` system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables.
+* Added additional information about merges in the `system.part_log` table.
+* An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov).
+* The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014).
+* Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014).
+* Support for `SHOW CREATE TABLE` for temporary tables (zhang2014).
+* Added the `system_profile` configuration parameter for the settings used by internal processes.
+* Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko).
+* Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko).
+* Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes.
+* Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table.
+* Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`.
+* Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014).
+* Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can't be listened to (useful for systems with disabled support for IPv4 or IPv6).
+* Added the `VersionedCollapsingMergeTree` table engine.
+* Support for rows and arbitrary numeric types for the `library` dictionary source.
+* `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`).
+* A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`.
+* `RENAME TABLE` can be performed for `VIEW`.
+* Added the `throwIf` function.
+* Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024).
+* The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns.
+
+### Improvements:
+
+* Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries.
+* Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts.
+* Added the `allow_distributed_ddl` option.
+* Nondeterministic functions are not allowed in expressions for `MergeTree` table keys.
+* Files with substitutions from `config.d` directories are loaded in alphabetical order.
+* Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`.
+* The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks).
+* When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server.
+* The `MkDocs` documentation generator is used.
+* When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014).
+* Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342.
+* `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337.
+* Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases.
+
+### Bug fixes:
+
+* Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`.
+* Fixed a bug in merges for `ReplacingMergeTree` tables.
+* Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`).
+* Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries.
+* Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`.
+* Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table.
+* Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata.
+* Fixed the `DROP DATABASE` query for `Dictionary` databases.
+* Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov).
+* Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014).
+* Fixed a rare case when a query to a `MergeTree` table couldn't finish (chenxing-xc).
+* Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc).
+* Fixed a slight performance regression with functions that use regular expressions.
+* Fixed a performance regression when creating multidimensional arrays from complex expressions.
+* Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata.
+* Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table.
+* Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand).
+* Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`.
+* Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables.
+* Fixed a bug when using `ALIAS` columns in `Distributed` tables.
+* Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family.
+* Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries.
+* Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments.
+* Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`.
+* Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled).
+
+### Backward incompatible changes:
+
+* Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default.
+* Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`.
+* Removed the `UnsortedMergeTree` engine.
+
+## Clickhouse Release 1.1.54343, 2018-02-05
+
+* Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`.
+* Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index.
+* Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue.
+
+## Clickhouse Release 1.1.54342, 2018-01-22
+
+This release contains bug fixes for the previous release 1.1.54337:
+
+* Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`.
+* Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d.
+* Fixed a regression in 1.1.54337: wrong default configuration in the Docker image.
+* Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`).
+* Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`).
+* Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014).
+* Fixed a bug in implementation of NULL.
+
+## Clickhouse Release 1.1.54337, 2018-01-18
+
+### New features:
+
+* Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables.
+* Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`.
+* Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected.
+* Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive.
+* Added the `toStartOfFifteenMinutes` function (Kirill Shvakov).
+* Added the `clickhouse format` tool for formatting queries.
+* Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory.
+* Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin).
+* Added a column with documentation for the `system.settings` table (Kirill Shvakov).
+* Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables.
+* Added the `system.models` table with information about loaded `CatBoost` machine learning models.
+* Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage.
+* Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function).
+* Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors.
+* The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments.
+* Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`.
+* Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov).
+* Users with the ` readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT...) (Kirill Shvakov).
+* Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša).
+* Added the `intExp3` and `intExp4` functions.
+* Added the `sumKahan` aggregate function.
+* Added the to * Number* OrNull functions, where * Number* is a numeric type.
+* Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014).
+* Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded.
+* Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova).
+* The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory.
+* Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr.
+* Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird).
+
+### Performance optimizations:
+
+* Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments.
+* Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`.
+* Improved performance of parsing and formatting `Date` and `DateTime` type values in text format.
+* Improved performance and precision of parsing floating point numbers.
+* Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` .
+* Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`.
+
+### Bug fixes:
+
+* Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates.
+* Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for ` CREATE MATERIALIZED VIEW` queries with `POPULATE` .
+* Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration.
+* Fixed unexpected results of passing the `Date` argument to `toStartOfDay` .
+* Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for ` INTERVAL n MONTH` in cases when the result has the previous year.
+* Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete.
+* Fixed `SummingMergeTree` behavior in cases when the rows summed to zero.
+* Various fixes for the `Kafka` engine (Marek Vavruša).
+* Fixed incorrect behavior of the `Join` table engine (Amos Bird).
+* Fixed incorrect allocator behavior under FreeBSD and OS X.
+* The `extractAll` function now supports empty matches.
+* Fixed an error that blocked usage of `libressl` instead of `openssl` .
+* Fixed the ` CREATE TABLE AS SELECT` query from temporary tables.
+* Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts.
+* Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod).
+* `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config).
+* Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key.
+* Fixed parsing of tuples (values of the `Tuple` data type) in text formats.
+* Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions.
+* Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to ` NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc.
+* Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc.
+* Stricter checks for allowed combinations of composite columns.
+* Fixed the overflow when specifying a very large parameter for the `FixedString` data type.
+* Fixed a bug in the `topK` aggregate function in a generic case.
+* Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator.
+* Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322).
+* Fixed the precision of the `exp10` function.
+* Fixed the behavior of the `visitParamExtract` function for better compliance with documentation.
+* Fixed the crash when incorrect data types are specified.
+* Fixed the behavior of `DISTINCT` in the case when all columns are constants.
+* Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index.
+* Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries.
+* Fixed a bug that leads to excessive rows in the result of `FULL` and ` RIGHT JOIN` (Amos Bird).
+* Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload.
+* Fixed the ` SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated.
+* Fixed the behavior of ` MATERIALIZED VIEW` after executing ` DETACH TABLE` for the table under the view (Marek Vavruša).
+
+### Build improvements:
+
+* The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment.
+* A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems.
+* Added the `clickhouse-test` package. It can be used to run functional tests.
+* The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub.
+* Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run.
+* Added support for `Cap'n'Proto` in the default build.
+* Changed the format of documentation sources from `Restricted Text` to `Markdown`.
+* Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually.
+* For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as ` clickhouse clang` and ` clickhouse lld` .
+* Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`.
+* Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools.
+
+### Backward incompatible changes:
+
+* The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn't have `Nullable` columns or if the type of your table is not `Log`, then you don't need to do anything.
+* Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default.
+* The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion.
+* Removed the ` FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird).
+* Removed the `BlockTabSeparated` format that was used solely for demonstration purposes.
+* Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com.
+* In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases.
+* Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release.
+
+### Please note when upgrading:
+
+* When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message ` unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated.
+* If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes.
+
+## ClickHouse release 1.1.54327, 2017-12-21
+
+This release contains bug fixes for the previous release 1.1.54318:
+
+* Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like ` Part ... from own log doesn't exist.` The issue is relevant even if you don't see these messages in logs.
+
+## ClickHouse release 1.1.54318, 2017-11-30
+
+This release contains bug fixes for the previous release 1.1.54310:
+
+* Fixed incorrect row deletions during merges in the SummingMergeTree engine
+* Fixed a memory leak in unreplicated MergeTree engines
+* Fixed performance degradation with frequent inserts in MergeTree engines
+* Fixed an issue that was causing the replication queue to stop running
+* Fixed rotation and archiving of server logs
+
+## ClickHouse release 1.1.54310, 2017-11-01
+
+### New features:
+
+* Custom partitioning key for the MergeTree family of table engines.
+* [ Kafka](https://clickhouse.yandex/docs/en/single/index.html#document-table_engines/kafka) table engine.
+* Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse.
+* Added support for time zones with non-integer offsets from UTC.
+* Added support for arithmetic operations with time intervals.
+* The range of values for the Date and DateTime types is extended to the year 2105.
+* Added the ` CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view).
+* Added the `ATTACH TABLE` query without arguments.
+* The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly.
+* Max size of the IP trie dictionary is increased to 128M entries.
+* Added the getSizeOfEnumType function.
+* Added the sumWithOverflow aggregate function.
+* Added support for the Cap'n Proto input format.
+* You can now customize compression level when using the zstd algorithm.
+
+### Backward incompatible changes:
+
+* Creation of temporary tables with an engine other than Memory is not allowed.
+* Explicit creation of tables with the View or MaterializedView engine is not allowed.
+* During table creation, a new check verifies that the sampling key expression is included in the primary key.
+
+### Bug fixes:
+
+* Fixed hangups when synchronously inserting into a Distributed table.
+* Fixed nonatomic adding and removing of parts in Replicated tables.
+* Data inserted into a materialized view is not subjected to unnecessary deduplication.
+* Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore.
+* Users don't need access permissions to the `default` database to create temporary tables anymore.
+* Fixed crashing when specifying the Array type without arguments.
+* Fixed hangups when the disk volume containing server logs is full.
+* Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch.
+
+### Build improvements:
+
+* Several third-party libraries (notably Poco) were updated and converted to git submodules.
+
+## ClickHouse release 1.1.54304, 2017-10-19
+
+### New features:
+
+* TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ).
+
+### Bug fixes:
+
+* `ALTER` for replicated tables now tries to start running as soon as possible.
+* Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.`
+* Fixed crashes of `clickhouse-client` when pressing ` Page Down`
+* Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL`
+* `FREEZE PARTITION` always works atomically now.
+* Empty POST requests now return a response with code 411.
+* Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).`
+* Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables.
+* Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b`
+* Users are updated correctly with invalid `users.xml`
+* Correct handling when an executable dictionary returns a non-zero response code.
+
+## ClickHouse release 1.1.54292, 2017-09-20
+
+### New features:
+
+* Added the `pointInPolygon` function for working with coordinates on a coordinate plane.
+* Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`.
+* Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers.
+* The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting ` compile = 1` , which is not used by default).
+* Reduced the time needed for dynamic compilation of queries.
+
+### Bug fixes:
+
+* Fixed an error that sometimes produced ` part ... intersects previous part` messages and weakened replica consistency.
+* Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown.
+* Removed excessive logging when restoring replicas.
+* Fixed an error in the UNION ALL implementation.
+* Fixed an error in the concat function that occurred if the first column in a block has the Array type.
+* Progress is now displayed correctly in the system.merges table.
+
+## ClickHouse release 1.1.54289, 2017-09-13
+
+### New features:
+
+* `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`.
+* Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`.
+* Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster.
+* Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`).
+* External dictionaries can be loaded from MySQL by specifying a socket in the filesystem.
+* External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters).
+* Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user.
+* Support for `DROP TABLE` for temporary tables.
+* Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats.
+* Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes).
+* FIFO locking is used during ALTER: an ALTER query isn't blocked indefinitely for continuously running queries.
+* Option to set `umask` in the config file.
+* Improved performance for queries with `DISTINCT` .
+
+### Bug fixes:
+
+* Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn't get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things.
+* Fixed randomization when choosing hosts for the connection to ZooKeeper.
+* Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost.
+* Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running ` ALTER MODIFY` on an element in a `Nested` structure.
+* Fixed an error that could cause SELECT queries to "hang".
+* Improvements to distributed DDL queries.
+* Fixed the query `CREATE TABLE ... AS `.
+* Resolved the deadlock in the ` ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables.
+* Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats.
+* Resolved the appearance of zombie processes when using a dictionary with an `executable` source.
+* Fixed segfault for the HEAD query.
+
+### Improved workflow for developing and assembling ClickHouse:
+
+* You can use `pbuilder` to build ClickHouse.
+* You can use `libc++` instead of `libstdc++` for builds on Linux.
+* Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`.
+
+### Please note when upgrading:
+
+* There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT requests will fail with the message "Merges are processing significantly slower than inserts." Use the ` SELECT * FROM system.merges` request to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don't need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ```107374182400` and restart the server.
+
+## ClickHouse release 1.1.54284, 2017-08-29
+
+* This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper.
+
+## ClickHouse release 1.1.54282, 2017-08-23
+
+This release contains bug fixes for the previous release 1.1.54276:
+
+* Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table.
+* Fixed parsing when inserting in RowBinary format if input data starts with';'.
+* Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`).
+
+## Clickhouse Release 1.1.54276, 2017-08-16
+
+### New features:
+
+* Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a`
+* INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert_distributed_sync=1.
+* Added the UUID data type for working with 16-byte identifiers.
+* Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau.
+* Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers.
+* You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries.
+* Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).`
+* Added the max_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance.
+
+### Main changes:
+
+* Security improvements: all server files are created with 0640 permissions (can be changed via config parameter).
+* Improved error messages for queries with invalid syntax.
+* Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data.
+* Significantly increased the performance of data merges for the ReplacingMergeTree engine.
+* Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed_directory_monitor_batch_inserts=1.
+
+### Backward incompatible changes:
+
+* Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays.
+
+### Complete list of changes:
+
+* Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format.
+* Optimized stream allocation when reading from a Distributed table.
+* Settings can be configured in readonly mode if the value doesn't change.
+* Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred_block_size_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns.
+* Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.`
+* Added new settings for MergeTree engines (the merge_tree section in config.xml):
+ - replicated_deduplication_window_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables.
+ - cleanup_delay_period sets how often to start cleanup to remove outdated data.
+ - replicated_can_become_leader can prevent a replica from becoming the leader (and assigning merges).
+* Accelerated cleanup to remove outdated data from ZooKeeper.
+* Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed_ddl_task_timeout, which limits the time to wait for a response from the servers in the cluster.
+* Improved display of stack traces in the server logs.
+* Added the "none" value for the compression method.
+* You can use multiple dictionaries_config sections in config.xml.
+* It is possible to connect to MySQL through a socket in the file system.
+* The system.parts table has a new column with information about the size of marks, in bytes.
+
+### Bug fixes:
+
+* Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field.
+* Fixed a rare race condition in ReplicatedMergeTree when checking data parts.
+* Fixed possible freezing on "leader election" when starting a server.
+* The max_replica_delay_for_distributed_queries setting was ignored when using a local replica of the data source. This has been fixed.
+* Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column.
+* Fixed an exception in the multiIf function when using empty arrays or strings.
+* Fixed excessive memory allocations when deserializing Native format.
+* Fixed incorrect auto-update of Trie dictionaries.
+* Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE.
+* Fixed a crash of GROUP BY when using distributed_aggregation_memory_efficient=1.
+* Now you can specify the database.table in the right side of IN and JOIN.
+* Too many threads were used for parallel aggregation. This has been fixed.
+* Fixed how the "if" function works with FixedString arguments.
+* SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed.
+* Running `CREATE VIEW IF EXISTS no longer causes crashes.`
+* Fixed incorrect behavior when input_format_skip_unknown_fields=1 is set and there are negative numbers.
+* Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary.
+* Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables.
+* Fixed an incorrect interpretation of a SELECT query from Dictionary tables.
+* Fixed the "Cannot mremap" error when using arrays in IN and JOIN clauses with more than 2 billion elements.
+* Fixed the failover for dictionaries with MySQL as the source.
+
+### Improved workflow for developing and assembling ClickHouse:
+
+* Builds can be assembled in Arcadia.
+* You can use gcc 7 to compile ClickHouse.
+* Parallel builds using ccache+distcc are faster now.
+
+## ClickHouse release 1.1.54245, 2017-07-04
+
+### New features:
+
+* Distributed DDL (for example, `CREATE TABLE ON CLUSTER`)
+* The replicated request `ALTER TABLE CLEAR COLUMN IN PARTITION.`
+* The engine for Dictionary tables (access to dictionary data in the form of a table).
+* Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries).
+* You can check for updates to the dictionary by sending a request to the source.
+* Qualified column names
+* Quoting identifiers using double quotation marks.
+* Sessions in the HTTP interface.
+* The OPTIMIZE query for a Replicated table can can run not only on the leader.
+
+### Backward incompatible changes:
+
+* Removed SET GLOBAL.
+
+### Minor changes:
+
+* Now after an alert is triggered, the log prints the full stack trace.
+* Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives).
+
+### Bug fixes:
+
+* Fixed a bad connection "sticking" when inserting into a Distributed table.
+* GLOBAL IN now works for a query from a Merge table that looks at a Distributed table.
+* The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed.
+* Changes in how an executable source of cached external dictionaries works.
+* Fixed the comparison of strings containing null characters.
+* Fixed the comparison of Float32 primary key fields with constants.
+* Previously, an incorrect estimate of the size of a field could lead to overly large allocations.
+* Fixed a crash when querying a Nullable column added to a table using ALTER.
+* Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT.
+* Fixed an ORDER BY subquery consisting of only constant values.
+* Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE.
+* Aliases for scalar subqueries with empty results are no longer lost.
+* Now a query that used compilation does not fail with an error if the .so file gets damaged.
diff --git a/CHANGELOG_RU.md b/CHANGELOG_RU.md
index a6b9ca9298..216d80f100 100644
--- a/CHANGELOG_RU.md
+++ b/CHANGELOG_RU.md
@@ -1,3 +1,16 @@
+## ClickHouse release 18.14.15, 2018-11-21
+
+### Исправления ошибок:
+* При чтении столбцов типа `Array(String)`, размер требуемого куска памяти оценивался слишком большим, что приводило к исключению "Memory limit exceeded" при выполнении запроса. Ошибка появилась в версии 18.12.13. [#3589](https://github.com/yandex/ClickHouse/issues/3589)
+
+## ClickHouse release 18.14.14, 2018-11-20
+
+### Исправления ошибок:
+* Исправлена работа запросов `ON CLUSTER` в случае, когда в конфигурации кластера включено шифрование (флаг ``). [#3599](https://github.com/yandex/ClickHouse/pull/3599)
+
+### Улучшения процесса сборки ClickHouse:
+* Испрпавлены проблемы сборки (llvm-7 из системы, macos) [#3582](https://github.com/yandex/ClickHouse/pull/3582)
+
## ClickHouse release 18.14.13, 2018-11-08
### Исправления ошибок:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 094fe8c7b0..1a8bd57e7b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -262,6 +262,7 @@ include (cmake/find_llvm.cmake)
include (cmake/find_cpuid.cmake)
include (cmake/find_hdfs3.cmake)
include (cmake/find_consistent-hashing.cmake)
+include (cmake/find_base64.cmake)
if (ENABLE_TESTS)
include (cmake/find_gtest.cmake)
endif ()
diff --git a/README.md b/README.md
index a488c55643..5ffefd793a 100644
--- a/README.md
+++ b/README.md
@@ -2,11 +2,10 @@
ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real time.
-🎤🥂 **ClickHouse Meetup in [Amsterdam on November 15](https://events.yandex.com/events/meetings/15-11-2018/)** 🍰🔥🐻
-
## Useful Links
* [Official website](https://clickhouse.yandex/) has quick high-level overview of ClickHouse on main page.
* [Tutorial](https://clickhouse.yandex/tutorial.html) shows how to set up and query small ClickHouse cluster.
* [Documentation](https://clickhouse.yandex/docs/en/) provides more in-depth information.
+* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events.
* [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any.
diff --git a/cmake/find_base64.cmake b/cmake/find_base64.cmake
new file mode 100644
index 0000000000..ad71d11c1b
--- /dev/null
+++ b/cmake/find_base64.cmake
@@ -0,0 +1,12 @@
+option (ENABLE_BASE64 "Enable base64" ON)
+
+if (ENABLE_BASE64)
+ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/base64")
+ message (WARNING "submodule contrib/base64 is missing. to fix try run: \n git submodule update --init --recursive")
+ else()
+ set (BASE64_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/base64/include)
+ set (BASE64_LIBRARY base64)
+ set (USE_BASE64 1)
+ endif()
+endif ()
+
diff --git a/cmake/find_odbc.cmake b/cmake/find_odbc.cmake
index d89e3b532d..32a410c6f1 100644
--- a/cmake/find_odbc.cmake
+++ b/cmake/find_odbc.cmake
@@ -71,10 +71,10 @@ if (ENABLE_ODBC)
)
# MinGW find usually fails
- if(MINGW)
+ if (MINGW)
set(ODBC_INCLUDE_DIRECTORIES ".")
set(ODBC_LIBRARIES odbc32)
- endif()
+ endif ()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(ODBC
@@ -82,6 +82,10 @@ if (ENABLE_ODBC)
ODBC_INCLUDE_DIRECTORIES
ODBC_LIBRARIES)
+ if (USE_STATIC_LIBRARIES)
+ list(APPEND ODBC_LIBRARIES ${LTDL_LIBRARY})
+ endif ()
+
mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES)
endif ()
endif ()
diff --git a/cmake/find_poco.cmake b/cmake/find_poco.cmake
index d8468e5306..012f269d48 100644
--- a/cmake/find_poco.cmake
+++ b/cmake/find_poco.cmake
@@ -93,8 +93,8 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
endif ()
if (OPENSSL_FOUND AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL))
- set (Poco_NetSSL_LIBRARY PocoNetSSL)
- set (Poco_Crypto_LIBRARY PocoCrypto)
+ set (Poco_NetSSL_LIBRARY PocoNetSSL ${OPENSSL_LIBRARIES})
+ set (Poco_Crypto_LIBRARY PocoCrypto ${OPENSSL_LIBRARIES})
endif ()
if (USE_STATIC_LIBRARIES AND USE_INTERNAL_ZLIB_LIBRARY)
diff --git a/cmake/lib_name.cmake b/cmake/lib_name.cmake
index b49276fc27..5c919b263e 100644
--- a/cmake/lib_name.cmake
+++ b/cmake/lib_name.cmake
@@ -1,5 +1,4 @@
set(DIVIDE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libdivide)
-set(CITYHASH_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/cityhash102/include)
set(COMMON_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/libs/libcommon/include ${ClickHouse_BINARY_DIR}/libs/libcommon/include)
set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/dbms/src ${ClickHouse_BINARY_DIR}/dbms/src)
set(DOUBLE_CONVERSION_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion)
diff --git a/cmake/print_include_directories.cmake b/cmake/print_include_directories.cmake
index 41c4773cfa..c4c5d00c54 100644
--- a/cmake/print_include_directories.cmake
+++ b/cmake/print_include_directories.cmake
@@ -10,6 +10,9 @@ list(APPEND dirs ${dirs1})
get_property (dirs1 TARGET common PROPERTY INCLUDE_DIRECTORIES)
list(APPEND dirs ${dirs1})
+get_property (dirs1 TARGET cityhash PROPERTY INCLUDE_DIRECTORIES)
+list(APPEND dirs ${dirs1})
+
if (USE_INTERNAL_BOOST_LIBRARY)
get_property (dirs1 TARGET ${Boost_PROGRAM_OPTIONS_LIBRARY} PROPERTY INCLUDE_DIRECTORIES)
list(APPEND dirs ${dirs1})
diff --git a/cmake/test_cpu.cmake b/cmake/test_cpu.cmake
index 6894c58703..c360de5b96 100644
--- a/cmake/test_cpu.cmake
+++ b/cmake/test_cpu.cmake
@@ -45,6 +45,38 @@ if (HAVE_SSE42)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
endif ()
+set (TEST_FLAG "-mssse3")
+set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
+check_cxx_source_compiles("
+ #include
+ int main() {
+ __m64 a = _mm_abs_pi8(__m64());
+ (void)a;
+ return 0;
+ }
+" HAVE_SSSE3)
+
+set (TEST_FLAG "-mavx")
+set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
+check_cxx_source_compiles("
+ #include
+ int main() {
+ auto a = _mm256_insert_epi8(__m256i(), 0, 0);
+ (void)a;
+ return 0;
+ }
+" HAVE_AVX)
+
+set (TEST_FLAG "-mavx2")
+set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
+check_cxx_source_compiles("
+ #include
+ int main() {
+ auto a = _mm256_add_epi16(__m256i(), __m256i());
+ (void)a;
+ return 0;
+ }
+" HAVE_AVX2)
# gcc -dM -E -mpopcnt - < /dev/null | sort > gcc-dump-popcnt
#define __POPCNT__ 1
@@ -65,5 +97,3 @@ if (HAVE_POPCNT AND NOT ARCH_AARCH64)
endif ()
cmake_pop_check_state ()
-
-# TODO: add here sse3 test if you want use it
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index 20c00d3f54..b7085f992c 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -200,3 +200,7 @@ if (USE_INTERNAL_HDFS3_LIBRARY)
endif ()
add_subdirectory(libhdfs3-cmake)
endif ()
+
+if (USE_BASE64)
+ add_subdirectory (base64-cmake)
+endif()
diff --git a/contrib/base64 b/contrib/base64
new file mode 160000
index 0000000000..a27c565d1b
--- /dev/null
+++ b/contrib/base64
@@ -0,0 +1 @@
+Subproject commit a27c565d1b6c676beaf297fe503c4518185666f7
diff --git a/contrib/base64-cmake/.gitignore b/contrib/base64-cmake/.gitignore
new file mode 100644
index 0000000000..0e56cf2f8c
--- /dev/null
+++ b/contrib/base64-cmake/.gitignore
@@ -0,0 +1 @@
+config.h
diff --git a/contrib/base64-cmake/CMakeLists.txt b/contrib/base64-cmake/CMakeLists.txt
new file mode 100644
index 0000000000..09abb3b02b
--- /dev/null
+++ b/contrib/base64-cmake/CMakeLists.txt
@@ -0,0 +1,52 @@
+SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/base64)
+
+set(base64_compile_instructions "")
+LIST(LENGTH base64_compile_instructions 0)
+macro(cast_to_bool var instruction)
+ if (HAVE_${var})
+ set(base64_${var} 1)
+ set(base64_${var}_opt ${instruction})
+ else()
+ set(base64_${var} 0)
+ endif()
+endmacro()
+
+cast_to_bool(SSSE3 "-mssse3")
+cast_to_bool(SSE41 "-msse4.1")
+cast_to_bool(SSE42 "-msse4.2")
+cast_to_bool(AVX "-mavx")
+cast_to_bool(AVX2 "-mavx2")
+
+# write config.h file, to include it in application
+file(READ config-header.tpl header)
+file(WRITE config.h ${header})
+file(APPEND config.h "#define HAVE_SSSE3 ${base64_SSSE3}\n")
+file(APPEND config.h "#define HAVE_SSE41 ${base64_SSE41}\n")
+file(APPEND config.h "#define HAVE_SSE42 ${base64_SSE42}\n")
+file(APPEND config.h "#define HAVE_AVX ${base64_AVX}\n")
+file(APPEND config.h "#define HAVE_AVX2 ${base64_AVX2}\n")
+
+set(HAVE_FAST_UNALIGNED_ACCESS 0)
+if (${base64_SSSE3} OR ${base64_SSE41} OR ${base64_SSE42} OR ${base64_AVX} OR ${base64_AVX2})
+ set(HAVE_FAST_UNALIGNED_ACCESS 1)
+endif ()
+
+file(APPEND config.h "#define HAVE_FAST_UNALIGNED_ACCESS " ${HAVE_FAST_UNALIGNED_ACCESS} "\n")
+
+add_library(base64 ${LINK_MODE}
+ ${LIBRARY_DIR}/lib/lib.c
+ ${LIBRARY_DIR}/lib/codec_choose.c
+ ${LIBRARY_DIR}/lib/arch/avx/codec.c
+ ${LIBRARY_DIR}/lib/arch/avx2/codec.c
+ ${LIBRARY_DIR}/lib/arch/generic/codec.c
+ ${LIBRARY_DIR}/lib/arch/neon32/codec.c
+ ${LIBRARY_DIR}/lib/arch/neon64/codec.c
+ ${LIBRARY_DIR}/lib/arch/sse41/codec.c
+ ${LIBRARY_DIR}/lib/arch/sse42/codec.c
+ ${LIBRARY_DIR}/lib/arch/ssse3/codec.c
+
+ ${LIBRARY_DIR}/lib/codecs.h
+ config.h)
+
+target_compile_options(base64 PRIVATE ${base64_SSSE3_opt} ${base64_SSE41_opt} ${base64_SSE42_opt} ${base64_AVX_opt} ${base64_AVX2_opt})
+target_include_directories(base64 PRIVATE ${LIBRARY_DIR}/include .)
\ No newline at end of file
diff --git a/contrib/base64-cmake/config-header.tpl b/contrib/base64-cmake/config-header.tpl
new file mode 100644
index 0000000000..c978ca0c08
--- /dev/null
+++ b/contrib/base64-cmake/config-header.tpl
@@ -0,0 +1,2 @@
+#define HAVE_NEON32 0
+#define HAVE_NEON64 0
diff --git a/contrib/cityhash102/CMakeLists.txt b/contrib/cityhash102/CMakeLists.txt
index eafa6f2025..c3f53a8f87 100644
--- a/contrib/cityhash102/CMakeLists.txt
+++ b/contrib/cityhash102/CMakeLists.txt
@@ -1,9 +1,8 @@
add_library(cityhash
src/city.cc
-
include/citycrc.h
include/city.h
src/config.h)
-target_include_directories (cityhash BEFORE PUBLIC include)
-target_include_directories (cityhash PRIVATE src)
+target_include_directories(cityhash BEFORE PUBLIC include)
+target_include_directories(cityhash PRIVATE src)
diff --git a/contrib/poco b/contrib/poco
index 566162b324..20c1d87777 160000
--- a/contrib/poco
+++ b/contrib/poco
@@ -1 +1 @@
-Subproject commit 566162b324e0b73eb360a1cd38077c9287cc1106
+Subproject commit 20c1d877773b6a672f1bbfe3290dfea42a117ed5
diff --git a/contrib/ssl b/contrib/ssl
index 919f6f1331..dbbbcdbbd1 160000
--- a/contrib/ssl
+++ b/contrib/ssl
@@ -1 +1 @@
-Subproject commit 919f6f1331d500bfdd26f8bbbf88e92c0119879b
+Subproject commit dbbbcdbbd17785566f8f9c107b714f9e213d7293
diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt
index d2dac6f7f6..c3f54a4bee 100644
--- a/dbms/CMakeLists.txt
+++ b/dbms/CMakeLists.txt
@@ -119,7 +119,7 @@ endif ()
if (USE_EMBEDDED_COMPILER)
llvm_libs_all(REQUIRED_LLVM_LIBRARIES)
- target_link_libraries (dbms ${REQUIRED_LLVM_LIBRARIES})
+ target_link_libraries (dbms PRIVATE ${REQUIRED_LLVM_LIBRARIES})
target_include_directories (dbms SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
endif ()
@@ -150,34 +150,49 @@ if (NOT ARCH_ARM AND CPUID_LIBRARY)
endif()
target_link_libraries (clickhouse_common_io
+ PUBLIC
common
+ PRIVATE
string_utils
widechar_width
${LINK_LIBRARIES_ONLY_ON_X86_64}
${LZ4_LIBRARY}
${ZSTD_LIBRARY}
${DOUBLE_CONVERSION_LIBRARIES}
+ pocoext
+ PUBLIC
${Poco_Net_LIBRARY}
${Poco_Util_LIBRARY}
${Poco_Foundation_LIBRARY}
+ ${RE2_LIBRARY}
+ ${RE2_ST_LIBRARY}
+ ${CITYHASH_LIBRARIES}
+ PRIVATE
${ZLIB_LIBRARIES}
${EXECINFO_LIBRARY}
${ELF_LIBRARY}
+ PUBLIC
${Boost_SYSTEM_LIBRARY}
+ PRIVATE
apple_rt
${CMAKE_DL_LIBS}
${HDFS3_LIBRARY}
)
target_link_libraries (dbms
+ PRIVATE
clickhouse_parsers
clickhouse_common_config
+ PUBLIC
clickhouse_common_io
+ pocoext
+ PUBLIC
${MYSQLXX_LIBRARY}
- ${RE2_LIBRARY}
- ${RE2_ST_LIBRARY}
+ PRIVATE
${BTRIE_LIBRARIES}
${Boost_PROGRAM_OPTIONS_LIBRARY}
+ PUBLIC
+ ${Boost_SYSTEM_LIBRARY}
)
if (NOT USE_INTERNAL_RE2_LIBRARY)
@@ -194,8 +209,8 @@ if (Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY)
endif()
if (USE_POCO_SQLODBC)
- target_link_libraries (clickhouse_common_io ${Poco_SQL_LIBRARY})
- target_link_libraries (dbms ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY})
+ target_link_libraries (clickhouse_common_io PRIVATE ${Poco_SQL_LIBRARY})
+ target_link_libraries (dbms PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY})
if (NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQL_INCLUDE_DIR})
target_include_directories (dbms SYSTEM PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQLODBC_INCLUDE_DIR} PUBLIC ${Poco_SQL_INCLUDE_DIR})
@@ -209,48 +224,44 @@ if (Poco_Data_FOUND)
endif()
if (USE_POCO_DATAODBC)
- target_link_libraries (clickhouse_common_io ${Poco_Data_LIBRARY})
- target_link_libraries (dbms ${Poco_DataODBC_LIBRARY})
+ target_link_libraries (clickhouse_common_io PRIVATE ${Poco_Data_LIBRARY})
+ target_link_libraries (dbms PRIVATE ${Poco_DataODBC_LIBRARY})
if (NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories (dbms SYSTEM PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_DataODBC_INCLUDE_DIR})
endif()
endif()
if (USE_POCO_MONGODB)
- target_link_libraries (dbms ${Poco_MongoDB_LIBRARY})
+ target_link_libraries (dbms PRIVATE ${Poco_MongoDB_LIBRARY})
endif()
if (USE_POCO_NETSSL)
- target_link_libraries (clickhouse_common_io ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
- target_link_libraries (dbms ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
+ target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
+ target_link_libraries (dbms PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
endif()
-target_link_libraries (dbms ${Poco_Foundation_LIBRARY})
+target_link_libraries (dbms PRIVATE ${Poco_Foundation_LIBRARY})
if (USE_ICU)
- target_link_libraries (dbms ${ICU_LIBS})
+ target_link_libraries (dbms PRIVATE ${ICU_LIBS})
target_include_directories (dbms SYSTEM PRIVATE ${ICU_INCLUDE_DIR})
endif ()
if (USE_CAPNP)
- target_link_libraries (dbms ${CAPNP_LIBRARY})
+ target_link_libraries (dbms PRIVATE ${CAPNP_LIBRARY})
if (NOT USE_INTERNAL_CAPNP_LIBRARY)
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${CAPNP_INCLUDE_DIR})
endif ()
endif ()
if (USE_RDKAFKA)
- target_link_libraries (dbms ${RDKAFKA_LIBRARY})
+ target_link_libraries (dbms PRIVATE ${RDKAFKA_LIBRARY})
if (NOT USE_INTERNAL_RDKAFKA_LIBRARY)
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR})
endif ()
endif ()
-target_link_libraries(dbms ${OPENSSL_CRYPTO_LIBRARY})
-
-target_link_libraries (dbms
- Threads::Threads
-)
+target_link_libraries(dbms PRIVATE ${OPENSSL_CRYPTO_LIBRARY} Threads::Threads)
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR})
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
@@ -286,6 +297,6 @@ if (ENABLE_TESTS)
# attach all dbms gtest sources
grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources)
add_executable(unit_tests_dbms ${dbms_gtest_sources})
- target_link_libraries(unit_tests_dbms gtest_main dbms)
+ target_link_libraries(unit_tests_dbms PRIVATE gtest_main dbms clickhouse_common_zookeeper)
add_check(unit_tests_dbms)
endif ()
diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake
index e62a8e4796..9f623279b9 100644
--- a/dbms/cmake/version.cmake
+++ b/dbms/cmake/version.cmake
@@ -22,3 +22,5 @@ endif ()
set (VERSION_NAME "${PROJECT_NAME}" CACHE STRING "")
set (VERSION_FULL "${VERSION_NAME} ${VERSION_STRING}" CACHE STRING "")
set (VERSION_SO "${VERSION_STRING}" CACHE STRING "")
+
+math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000")
diff --git a/dbms/programs/CMakeLists.txt b/dbms/programs/CMakeLists.txt
index 136616ca44..441b39d996 100644
--- a/dbms/programs/CMakeLists.txt
+++ b/dbms/programs/CMakeLists.txt
@@ -48,45 +48,45 @@ else ()
link_directories (${LLVM_LIBRARY_DIRS})
endif ()
add_executable (clickhouse main.cpp)
- target_link_libraries (clickhouse clickhouse_common_io)
+ target_link_libraries (clickhouse PRIVATE clickhouse_common_io)
target_include_directories (clickhouse BEFORE PRIVATE ${COMMON_INCLUDE_DIR})
target_include_directories (clickhouse PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
if (ENABLE_CLICKHOUSE_SERVER)
- target_link_libraries (clickhouse clickhouse-server-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-server-lib)
endif ()
if (ENABLE_CLICKHOUSE_CLIENT)
- target_link_libraries (clickhouse clickhouse-client-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-client-lib)
endif ()
if (ENABLE_CLICKHOUSE_LOCAL)
- target_link_libraries (clickhouse clickhouse-local-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-local-lib)
endif ()
if (ENABLE_CLICKHOUSE_BENCHMARK)
- target_link_libraries (clickhouse clickhouse-benchmark-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-benchmark-lib)
endif ()
if (ENABLE_CLICKHOUSE_PERFORMANCE)
- target_link_libraries (clickhouse clickhouse-performance-test-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-performance-test-lib)
endif ()
if (ENABLE_CLICKHOUSE_COPIER)
- target_link_libraries (clickhouse clickhouse-copier-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-copier-lib)
endif ()
if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
- target_link_libraries (clickhouse clickhouse-extract-from-config-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-extract-from-config-lib)
endif ()
if (ENABLE_CLICKHOUSE_COMPRESSOR)
- target_link_libraries (clickhouse clickhouse-compressor-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-compressor-lib)
endif ()
if (ENABLE_CLICKHOUSE_FORMAT)
- target_link_libraries (clickhouse clickhouse-format-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-format-lib)
endif ()
if (ENABLE_CLICKHOUSE_OBFUSCATOR)
- target_link_libraries (clickhouse clickhouse-obfuscator-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-obfuscator-lib)
endif ()
if (USE_EMBEDDED_COMPILER)
- target_link_libraries (clickhouse clickhouse-compiler-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-compiler-lib)
endif ()
if (ENABLE_CLICKHOUSE_ODBC_BRIDGE)
- target_link_libraries (clickhouse clickhouse-odbc-bridge-lib)
+ target_link_libraries (clickhouse PRIVATE clickhouse-odbc-bridge-lib)
endif()
set (CLICKHOUSE_BUNDLE)
diff --git a/dbms/programs/benchmark/Benchmark.cpp b/dbms/programs/benchmark/Benchmark.cpp
index 161321f4d8..8931774bac 100644
--- a/dbms/programs/benchmark/Benchmark.cpp
+++ b/dbms/programs/benchmark/Benchmark.cpp
@@ -42,10 +42,8 @@ namespace DB
namespace ErrorCodes
{
- extern const int POCO_EXCEPTION;
- extern const int STD_EXCEPTION;
- extern const int UNKNOWN_EXCEPTION;
extern const int BAD_ARGUMENTS;
+ extern const int EMPTY_DATA_PASSED;
}
class Benchmark
@@ -170,7 +168,7 @@ private:
}
if (queries.empty())
- throw Exception("Empty list of queries.");
+ throw Exception("Empty list of queries.", ErrorCodes::EMPTY_DATA_PASSED);
std::cerr << "Loaded " << queries.size() << " queries.\n";
}
diff --git a/dbms/programs/benchmark/CMakeLists.txt b/dbms/programs/benchmark/CMakeLists.txt
index 85af075071..c41c46edeb 100644
--- a/dbms/programs/benchmark/CMakeLists.txt
+++ b/dbms/programs/benchmark/CMakeLists.txt
@@ -1,8 +1,8 @@
add_library (clickhouse-benchmark-lib ${LINK_MODE} Benchmark.cpp)
-target_link_libraries (clickhouse-benchmark-lib clickhouse-client-lib clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
target_include_directories (clickhouse-benchmark-lib SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR})
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-benchmark clickhouse-benchmark.cpp)
- target_link_libraries (clickhouse-benchmark clickhouse-benchmark-lib clickhouse_aggregate_functions)
+ target_link_libraries (clickhouse-benchmark PRIVATE clickhouse-benchmark-lib clickhouse_aggregate_functions)
endif ()
diff --git a/dbms/programs/clang/CMakeLists.txt b/dbms/programs/clang/CMakeLists.txt
index ca06f27546..dec21ac611 100644
--- a/dbms/programs/clang/CMakeLists.txt
+++ b/dbms/programs/clang/CMakeLists.txt
@@ -6,9 +6,9 @@ if (CLICKHOUSE_SPLIT_BINARY)
if (USE_EMBEDDED_COMPILER)
link_directories (${LLVM_LIBRARY_DIRS})
add_executable (clickhouse-clang clickhouse-clang.cpp)
- target_link_libraries (clickhouse-clang clickhouse-compiler-lib)
+ target_link_libraries (clickhouse-clang PRIVATE clickhouse-compiler-lib)
add_executable (clickhouse-lld clickhouse-lld.cpp)
- target_link_libraries (clickhouse-lld clickhouse-compiler-lib)
+ target_link_libraries (clickhouse-lld PRIVATE clickhouse-compiler-lib)
install (TARGETS clickhouse-clang clickhouse-lld RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
endif ()
endif ()
diff --git a/dbms/programs/client/CMakeLists.txt b/dbms/programs/client/CMakeLists.txt
index 65353094c2..f3dd518e89 100644
--- a/dbms/programs/client/CMakeLists.txt
+++ b/dbms/programs/client/CMakeLists.txt
@@ -1,13 +1,12 @@
add_library (clickhouse-client-lib ${LINK_MODE} Client.cpp)
-target_link_libraries (clickhouse-client-lib clickhouse_common_io clickhouse_functions clickhouse_aggregate_functions ${LINE_EDITING_LIBS} ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-client-lib PRIVATE clickhouse_common_io clickhouse_functions clickhouse_aggregate_functions ${LINE_EDITING_LIBS} ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (READLINE_INCLUDE_DIR)
target_include_directories (clickhouse-client-lib SYSTEM PRIVATE ${READLINE_INCLUDE_DIR})
endif ()
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-client clickhouse-client.cpp)
- target_link_libraries (clickhouse-client clickhouse-client-lib)
+ target_link_libraries (clickhouse-client PRIVATE clickhouse-client-lib)
endif ()
install (FILES clickhouse-client.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-client COMPONENT clickhouse-client RENAME config.xml)
-
diff --git a/dbms/programs/client/Client.cpp b/dbms/programs/client/Client.cpp
index 5a6f2b9bf2..784b172b6a 100644
--- a/dbms/programs/client/Client.cpp
+++ b/dbms/programs/client/Client.cpp
@@ -87,9 +87,6 @@ namespace DB
namespace ErrorCodes
{
- extern const int POCO_EXCEPTION;
- extern const int STD_EXCEPTION;
- extern const int UNKNOWN_EXCEPTION;
extern const int NETWORK_ERROR;
extern const int NO_DATA_TO_INSERT;
extern const int BAD_ARGUMENTS;
diff --git a/dbms/programs/client/Suggest.h b/dbms/programs/client/Suggest.h
index b93086d3b0..617e2bb520 100644
--- a/dbms/programs/client/Suggest.h
+++ b/dbms/programs/client/Suggest.h
@@ -184,7 +184,7 @@ public:
}
catch (...)
{
- std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false) << "\n";
+ std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n";
}
/// Note that keyword suggestions are available even if we cannot load data from server.
diff --git a/dbms/programs/compressor/CMakeLists.txt b/dbms/programs/compressor/CMakeLists.txt
index 5c9c11072c..7aa2cad570 100644
--- a/dbms/programs/compressor/CMakeLists.txt
+++ b/dbms/programs/compressor/CMakeLists.txt
@@ -1,8 +1,8 @@
add_library (clickhouse-compressor-lib ${LINK_MODE} Compressor.cpp)
-target_link_libraries (clickhouse-compressor-lib clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-compressor-lib PRIVATE clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (CLICKHOUSE_SPLIT_BINARY)
# Also in utils
add_executable (clickhouse-compressor clickhouse-compressor.cpp)
- target_link_libraries (clickhouse-compressor clickhouse-compressor-lib)
+ target_link_libraries (clickhouse-compressor PRIVATE clickhouse-compressor-lib)
endif ()
diff --git a/dbms/programs/copier/CMakeLists.txt b/dbms/programs/copier/CMakeLists.txt
index e8583dba44..ed3e55208a 100644
--- a/dbms/programs/copier/CMakeLists.txt
+++ b/dbms/programs/copier/CMakeLists.txt
@@ -1,5 +1,5 @@
add_library (clickhouse-copier-lib ${LINK_MODE} ClusterCopier.cpp)
-target_link_libraries (clickhouse-copier-lib clickhouse-server-lib clickhouse_functions clickhouse_aggregate_functions)
+target_link_libraries (clickhouse-copier-lib PRIVATE clickhouse-server-lib clickhouse_functions clickhouse_aggregate_functions daemon)
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-copier clickhouse-copier.cpp)
diff --git a/dbms/programs/extract-from-config/CMakeLists.txt b/dbms/programs/extract-from-config/CMakeLists.txt
index c31b0e8cec..6225364936 100644
--- a/dbms/programs/extract-from-config/CMakeLists.txt
+++ b/dbms/programs/extract-from-config/CMakeLists.txt
@@ -1,7 +1,7 @@
add_library (clickhouse-extract-from-config-lib ${LINK_MODE} ExtractFromConfig.cpp)
-target_link_libraries (clickhouse-extract-from-config-lib clickhouse_common_config clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-extract-from-config-lib PRIVATE clickhouse_common_config clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-extract-from-config clickhouse-extract-from-config.cpp)
- target_link_libraries (clickhouse-extract-from-config clickhouse-extract-from-config-lib)
+ target_link_libraries (clickhouse-extract-from-config PRIVATE clickhouse-extract-from-config-lib)
endif ()
diff --git a/dbms/programs/format/CMakeLists.txt b/dbms/programs/format/CMakeLists.txt
index f53cd11bee..53d09e8262 100644
--- a/dbms/programs/format/CMakeLists.txt
+++ b/dbms/programs/format/CMakeLists.txt
@@ -1,6 +1,6 @@
add_library (clickhouse-format-lib ${LINK_MODE} Format.cpp)
-target_link_libraries (clickhouse-format-lib dbms clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-format-lib PRIVATE dbms clickhouse_common_io clickhouse_parsers ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-format clickhouse-format.cpp)
- target_link_libraries (clickhouse-format clickhouse-format-lib)
+ target_link_libraries (clickhouse-format PRIVATE clickhouse-format-lib)
endif ()
diff --git a/dbms/programs/local/CMakeLists.txt b/dbms/programs/local/CMakeLists.txt
index 9680ceaf1c..07729d6856 100644
--- a/dbms/programs/local/CMakeLists.txt
+++ b/dbms/programs/local/CMakeLists.txt
@@ -1,7 +1,7 @@
add_library (clickhouse-local-lib ${LINK_MODE} LocalServer.cpp)
-target_link_libraries (clickhouse-local-lib clickhouse_common_io clickhouse-server-lib clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-local-lib PRIVATE clickhouse_common_io clickhouse-server-lib clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-local clickhouse-local.cpp)
- target_link_libraries (clickhouse-local clickhouse-local-lib)
+ target_link_libraries (clickhouse-local PRIVATE clickhouse-local-lib)
endif ()
diff --git a/dbms/programs/obfuscator/CMakeLists.txt b/dbms/programs/obfuscator/CMakeLists.txt
index 5ee6ace5a0..73c3f01e9c 100644
--- a/dbms/programs/obfuscator/CMakeLists.txt
+++ b/dbms/programs/obfuscator/CMakeLists.txt
@@ -1,8 +1,8 @@
add_library (clickhouse-obfuscator-lib ${LINK_MODE} Obfuscator.cpp)
-target_link_libraries (clickhouse-obfuscator-lib dbms ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-obfuscator-lib PRIVATE dbms ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-obfuscator clickhouse-obfuscator.cpp)
set_target_properties(clickhouse-obfuscator PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
- target_link_libraries (clickhouse-obfuscator clickhouse-obfuscator-lib)
+ target_link_libraries (clickhouse-obfuscator PRIVATE clickhouse-obfuscator-lib)
endif ()
diff --git a/dbms/programs/odbc-bridge/CMakeLists.txt b/dbms/programs/odbc-bridge/CMakeLists.txt
index d13a2866e7..a57c8c9c8c 100644
--- a/dbms/programs/odbc-bridge/CMakeLists.txt
+++ b/dbms/programs/odbc-bridge/CMakeLists.txt
@@ -9,23 +9,23 @@ add_library (clickhouse-odbc-bridge-lib ${LINK_MODE}
validateODBCConnectionString.cpp
)
-target_link_libraries (clickhouse-odbc-bridge-lib clickhouse_common_io daemon dbms)
+target_link_libraries (clickhouse-odbc-bridge-lib PRIVATE clickhouse_common_io daemon dbms)
target_include_directories (clickhouse-odbc-bridge-lib PUBLIC ${ClickHouse_SOURCE_DIR}/libs/libdaemon/include)
if (USE_POCO_SQLODBC)
- target_link_libraries (clickhouse-odbc-bridge-lib ${Poco_SQLODBC_LIBRARY})
+ target_link_libraries (clickhouse-odbc-bridge-lib PRIVATE ${Poco_SQLODBC_LIBRARY})
target_include_directories (clickhouse-odbc-bridge-lib SYSTEM PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQLODBC_INCLUDE_DIR})
endif ()
if (Poco_SQL_FOUND)
- target_link_libraries (clickhouse-odbc-bridge-lib ${Poco_SQL_LIBRARY})
+ target_link_libraries (clickhouse-odbc-bridge-lib PRIVATE ${Poco_SQL_LIBRARY})
endif ()
if (USE_POCO_DATAODBC)
- target_link_libraries (clickhouse-odbc-bridge-lib ${Poco_DataODBC_LIBRARY})
+ target_link_libraries (clickhouse-odbc-bridge-lib PRIVATE ${Poco_DataODBC_LIBRARY})
target_include_directories (clickhouse-odbc-bridge-lib SYSTEM PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_DataODBC_INCLUDE_DIR})
endif()
if (Poco_Data_FOUND)
- target_link_libraries (clickhouse-odbc-bridge-lib ${Poco_Data_LIBRARY})
+ target_link_libraries (clickhouse-odbc-bridge-lib PRIVATE ${Poco_Data_LIBRARY})
endif ()
@@ -35,5 +35,5 @@ endif ()
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-odbc-bridge odbc-bridge.cpp)
- target_link_libraries (clickhouse-odbc-bridge clickhouse-odbc-bridge-lib)
+ target_link_libraries (clickhouse-odbc-bridge PRIVATE clickhouse-odbc-bridge-lib)
endif ()
diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp b/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp
index f59abd5f58..997ef9cf21 100644
--- a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp
+++ b/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp
@@ -124,9 +124,9 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
auto identifier_quote = getIdentifierQuote(hdbc);
if (identifier_quote.length() == 0)
settings.identifier_quoting_style = IdentifierQuotingStyle::None;
- else if(identifier_quote[0] == '`')
+ else if (identifier_quote[0] == '`')
settings.identifier_quoting_style = IdentifierQuotingStyle::Backticks;
- else if(identifier_quote[0] == '"')
+ else if (identifier_quote[0] == '"')
settings.identifier_quoting_style = IdentifierQuotingStyle::DoubleQuotes;
else
throw Exception("Can not map quote identifier '" + identifier_quote + "' to IdentifierQuotingStyle value", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
diff --git a/dbms/programs/odbc-bridge/HandlerFactory.cpp b/dbms/programs/odbc-bridge/HandlerFactory.cpp
index 8a0ff06268..a6422db268 100644
--- a/dbms/programs/odbc-bridge/HandlerFactory.cpp
+++ b/dbms/programs/odbc-bridge/HandlerFactory.cpp
@@ -25,7 +25,7 @@ Poco::Net::HTTPRequestHandler * HandlerFactory::createRequestHandler(const Poco:
#else
return nullptr;
#endif
- else if(uri.getPath() == "/identifier_quote")
+ else if (uri.getPath() == "/identifier_quote")
#if USE_POCO_SQLODBC || USE_POCO_DATAODBC
return new IdentifierQuoteHandler(keep_alive_timeout, context);
#else
diff --git a/dbms/programs/odbc-bridge/tests/CMakeLists.txt b/dbms/programs/odbc-bridge/tests/CMakeLists.txt
index 5240a91742..5211c39d11 100644
--- a/dbms/programs/odbc-bridge/tests/CMakeLists.txt
+++ b/dbms/programs/odbc-bridge/tests/CMakeLists.txt
@@ -1,2 +1,2 @@
add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp)
-target_link_libraries (validate-odbc-connection-string clickhouse-odbc-bridge-lib)
+target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse-odbc-bridge-lib clickhouse_common_io)
diff --git a/dbms/programs/performance-test/CMakeLists.txt b/dbms/programs/performance-test/CMakeLists.txt
index 31796cd9d7..adad45025c 100644
--- a/dbms/programs/performance-test/CMakeLists.txt
+++ b/dbms/programs/performance-test/CMakeLists.txt
@@ -1,8 +1,8 @@
add_library (clickhouse-performance-test-lib ${LINK_MODE} PerformanceTest.cpp)
-target_link_libraries (clickhouse-performance-test-lib clickhouse_common_io dbms ${Boost_PROGRAM_OPTIONS_LIBRARY})
+target_link_libraries (clickhouse-performance-test-lib PRIVATE dbms clickhouse_common_io ${Boost_PROGRAM_OPTIONS_LIBRARY})
target_include_directories (clickhouse-performance-test-lib SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR})
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-performance-test clickhouse-performance-test.cpp)
- target_link_libraries (clickhouse-performance-test clickhouse-performance-test-lib dbms)
+ target_link_libraries (clickhouse-performance-test PRIVATE clickhouse-performance-test-lib)
endif ()
diff --git a/dbms/programs/performance-test/PerformanceTest.cpp b/dbms/programs/performance-test/PerformanceTest.cpp
index e63d40e2db..ebebedd876 100644
--- a/dbms/programs/performance-test/PerformanceTest.cpp
+++ b/dbms/programs/performance-test/PerformanceTest.cpp
@@ -49,10 +49,10 @@ namespace DB
{
namespace ErrorCodes
{
- extern const int POCO_EXCEPTION;
- extern const int STD_EXCEPTION;
- extern const int UNKNOWN_EXCEPTION;
extern const int NOT_IMPLEMENTED;
+ extern const int LOGICAL_ERROR;
+ extern const int BAD_ARGUMENTS;
+ extern const int FILE_DOESNT_EXIST;
}
static String pad(size_t padding)
@@ -156,7 +156,7 @@ struct StopConditionsSet
else if (key == "average_speed_not_changing_for_ms")
average_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key);
else
- throw DB::Exception("Met unkown stop condition: " + key);
+ throw DB::Exception("Met unkown stop condition: " + key, DB::ErrorCodes::LOGICAL_ERROR);
++initialized_count;
}
@@ -521,7 +521,7 @@ public:
{
if (input_files.size() < 1)
{
- throw DB::Exception("No tests were specified", 0);
+ throw DB::Exception("No tests were specified", DB::ErrorCodes::BAD_ARGUMENTS);
}
std::string name;
@@ -694,7 +694,7 @@ private:
size_t ram_size_needed = config->getUInt64("preconditions.ram_size");
size_t actual_ram = getMemoryAmount();
if (!actual_ram)
- throw DB::Exception("ram_size precondition not available on this platform", ErrorCodes::NOT_IMPLEMENTED);
+ throw DB::Exception("ram_size precondition not available on this platform", DB::ErrorCodes::NOT_IMPLEMENTED);
if (ram_size_needed > actual_ram)
{
@@ -868,12 +868,12 @@ private:
if (!test_config->has("query") && !test_config->has("query_file"))
{
- throw DB::Exception("Missing query fields in test's config: " + test_name);
+ throw DB::Exception("Missing query fields in test's config: " + test_name, DB::ErrorCodes::BAD_ARGUMENTS);
}
if (test_config->has("query") && test_config->has("query_file"))
{
- throw DB::Exception("Found both query and query_file fields. Choose only one");
+ throw DB::Exception("Found both query and query_file fields. Choose only one", DB::ErrorCodes::BAD_ARGUMENTS);
}
if (test_config->has("query"))
@@ -885,7 +885,7 @@ private:
{
const String filename = test_config->getString("query_file");
if (filename.empty())
- throw DB::Exception("Empty file name");
+ throw DB::Exception("Empty file name", DB::ErrorCodes::BAD_ARGUMENTS);
bool tsv = fs::path(filename).extension().string() == ".tsv";
@@ -909,7 +909,7 @@ private:
if (queries.empty())
{
- throw DB::Exception("Did not find any query to execute: " + test_name);
+ throw DB::Exception("Did not find any query to execute: " + test_name, DB::ErrorCodes::BAD_ARGUMENTS);
}
if (test_config->has("substitutions"))
@@ -929,7 +929,7 @@ private:
if (!test_config->has("type"))
{
- throw DB::Exception("Missing type property in config: " + test_name);
+ throw DB::Exception("Missing type property in config: " + test_name, DB::ErrorCodes::BAD_ARGUMENTS);
}
String config_exec_type = test_config->getString("type");
@@ -938,7 +938,7 @@ private:
else if (config_exec_type == "once")
exec_type = ExecutionType::Once;
else
- throw DB::Exception("Unknown type " + config_exec_type + " in :" + test_name);
+ throw DB::Exception("Unknown type " + config_exec_type + " in :" + test_name, DB::ErrorCodes::BAD_ARGUMENTS);
times_to_run = test_config->getUInt("times_to_run", 1);
@@ -951,7 +951,7 @@ private:
}
if (stop_conditions_template.empty())
- throw DB::Exception("No termination conditions were found in config");
+ throw DB::Exception("No termination conditions were found in config", DB::ErrorCodes::BAD_ARGUMENTS);
for (size_t i = 0; i < times_to_run * queries.size(); ++i)
stop_conditions_by_run.push_back(stop_conditions_template);
@@ -978,7 +978,7 @@ private:
else
{
if (lite_output)
- throw DB::Exception("Specify main_metric for lite output");
+ throw DB::Exception("Specify main_metric for lite output", DB::ErrorCodes::BAD_ARGUMENTS);
}
if (metrics.size() > 0)
@@ -1023,22 +1023,14 @@ private:
if (exec_type == ExecutionType::Loop)
{
for (const String & metric : metrics)
- {
if (std::find(non_loop_metrics.begin(), non_loop_metrics.end(), metric) != non_loop_metrics.end())
- {
- throw DB::Exception("Wrong type of metric for loop execution type (" + metric + ")");
- }
- }
+ throw DB::Exception("Wrong type of metric for loop execution type (" + metric + ")", DB::ErrorCodes::BAD_ARGUMENTS);
}
else
{
for (const String & metric : metrics)
- {
if (std::find(loop_metrics.begin(), loop_metrics.end(), metric) != loop_metrics.end())
- {
- throw DB::Exception("Wrong type of metric for non-loop execution type (" + metric + ")");
- }
- }
+ throw DB::Exception("Wrong type of metric for non-loop execution type (" + metric + ")", DB::ErrorCodes::BAD_ARGUMENTS);
}
}
@@ -1439,7 +1431,7 @@ try
if (input_files.empty())
{
std::cerr << std::endl;
- throw DB::Exception("Did not find any xml files");
+ throw DB::Exception("Did not find any xml files", DB::ErrorCodes::BAD_ARGUMENTS);
}
else
std::cerr << " found " << input_files.size() << " files." << std::endl;
@@ -1454,7 +1446,7 @@ try
fs::path file(filename);
if (!fs::exists(file))
- throw DB::Exception("File '" + filename + "' does not exist");
+ throw DB::Exception("File '" + filename + "' does not exist", DB::ErrorCodes::FILE_DOESNT_EXIST);
if (fs::is_directory(file))
{
@@ -1463,7 +1455,7 @@ try
else
{
if (file.extension().string() != ".xml")
- throw DB::Exception("File '" + filename + "' does not have .xml extension");
+ throw DB::Exception("File '" + filename + "' does not have .xml extension", DB::ErrorCodes::BAD_ARGUMENTS);
collected_files.push_back(filename);
}
}
diff --git a/dbms/programs/server/CMakeLists.txt b/dbms/programs/server/CMakeLists.txt
index bc6683d6e9..d8caa07b74 100644
--- a/dbms/programs/server/CMakeLists.txt
+++ b/dbms/programs/server/CMakeLists.txt
@@ -10,12 +10,16 @@ add_library (clickhouse-server-lib ${LINK_MODE}
TCPHandler.cpp
)
-target_link_libraries (clickhouse-server-lib clickhouse_common_io daemon clickhouse_storages_system clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions)
+target_link_libraries (clickhouse-server-lib PRIVATE clickhouse_common_io daemon clickhouse_storages_system clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions ${Poco_Net_LIBRARY})
+if (USE_POCO_NETSSL)
+ target_link_libraries (clickhouse-server-lib PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
+endif ()
+
target_include_directories (clickhouse-server-lib PUBLIC ${ClickHouse_SOURCE_DIR}/libs/libdaemon/include)
if (CLICKHOUSE_SPLIT_BINARY)
add_executable (clickhouse-server clickhouse-server.cpp)
- target_link_libraries (clickhouse-server clickhouse-server-lib)
+ target_link_libraries (clickhouse-server PRIVATE clickhouse-server-lib)
install (TARGETS clickhouse-server ${CLICKHOUSE_ALL_TARGETS} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
endif ()
diff --git a/dbms/programs/server/HTTPHandler.cpp b/dbms/programs/server/HTTPHandler.cpp
index 9d9324b9a3..34037a7c7c 100644
--- a/dbms/programs/server/HTTPHandler.cpp
+++ b/dbms/programs/server/HTTPHandler.cpp
@@ -270,7 +270,6 @@ void HTTPHandler::processQuery(
std::string query_id = params.get("query_id", "");
context.setUser(user, password, request.clientAddress(), quota_key);
context.setCurrentQueryId(query_id);
- CurrentThread::attachQueryContext(context);
/// The user could specify session identifier and session timeout.
/// It allows to modify settings, create temporary tables and reuse them in subsequent requests.
diff --git a/dbms/programs/server/InterserverIOHTTPHandler.cpp b/dbms/programs/server/InterserverIOHTTPHandler.cpp
index 3895b2d899..3c93ee1989 100644
--- a/dbms/programs/server/InterserverIOHTTPHandler.cpp
+++ b/dbms/programs/server/InterserverIOHTTPHandler.cpp
@@ -19,9 +19,6 @@ namespace DB
namespace ErrorCodes
{
extern const int ABORTED;
- extern const int POCO_EXCEPTION;
- extern const int STD_EXCEPTION;
- extern const int UNKNOWN_EXCEPTION;
extern const int TOO_MANY_SIMULTANEOUS_QUERIES;
}
diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp
index 0937cf4b84..b80a9c7073 100644
--- a/dbms/programs/server/Server.cpp
+++ b/dbms/programs/server/Server.cpp
@@ -55,6 +55,7 @@
namespace CurrentMetrics
{
extern const Metric Revision;
+ extern const Metric VersionInteger;
}
namespace DB
@@ -66,6 +67,8 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
+ extern const int INVALID_CONFIG_PARAMETER;
+ extern const int SYSTEM_ERROR;
}
@@ -73,7 +76,7 @@ static std::string getCanonicalPath(std::string && path)
{
Poco::trimInPlace(path);
if (path.empty())
- throw Exception("path configuration parameter is empty");
+ throw Exception("path configuration parameter is empty", ErrorCodes::INVALID_CONFIG_PARAMETER);
if (path.back() != '/')
path += '/';
return std::move(path);
@@ -108,6 +111,7 @@ int Server::main(const std::vector & /*args*/)
registerStorages();
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::get());
+ CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
/** Context contains all that query execution is dependent:
* settings, available functions, data types, aggregate functions, databases...
@@ -141,7 +145,7 @@ int Server::main(const std::vector & /*args*/)
{
LOG_TRACE(log, "Will mlockall to prevent executable memory from being paged out. It may take a few seconds.");
if (0 != mlockall(MCL_CURRENT))
- LOG_WARNING(log, "Failed mlockall: " + errnoToString());
+ LOG_WARNING(log, "Failed mlockall: " + errnoToString(ErrorCodes::SYSTEM_ERROR));
else
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed");
}
diff --git a/dbms/programs/server/TCPHandler.cpp b/dbms/programs/server/TCPHandler.cpp
index 5c9b3a2d86..63e60d22c0 100644
--- a/dbms/programs/server/TCPHandler.cpp
+++ b/dbms/programs/server/TCPHandler.cpp
@@ -718,7 +718,7 @@ bool TCPHandler::receiveData()
{
NamesAndTypesList columns = block.getNamesAndTypesList();
storage = StorageMemory::create(external_table_name,
- ColumnsDescription{columns, NamesAndTypesList{}, NamesAndTypesList{}, ColumnDefaults{}});
+ ColumnsDescription{columns, NamesAndTypesList{}, NamesAndTypesList{}, ColumnDefaults{}, ColumnComments{}});
storage->startup();
query_context.addExternalTable(external_table_name, storage);
}
diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml
index 051f6f7fb2..514a081eac 100644
--- a/dbms/programs/server/config.xml
+++ b/dbms/programs/server/config.xml
@@ -164,6 +164,20 @@
+
+
+
+ localhost
+ 9000
+
+
+
+
+ localhost
+ 9000
+
+
+
diff --git a/dbms/programs/server/users.xml b/dbms/programs/server/users.xml
index 6f746baf2a..979b2d3fc1 100644
--- a/dbms/programs/server/users.xml
+++ b/dbms/programs/server/users.xml
@@ -56,7 +56,7 @@
Each element of list has one of the following forms:
IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
- 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
+ 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
Hostname. Example: server01.yandex.ru.
To check access, DNS query is performed, and all received addresses compared to peer address.
Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
diff --git a/dbms/scripts/gen-bias-data.py b/dbms/scripts/gen-bias-data.py
index 7edc9948e7..034cfcca7d 100755
--- a/dbms/scripts/gen-bias-data.py
+++ b/dbms/scripts/gen-bias-data.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3.4
+#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp b/dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp
index 36b29796b9..565f1f0c33 100644
--- a/dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp
+++ b/dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp
@@ -12,7 +12,7 @@ namespace
template
struct Avg
{
- using FieldType = std::conditional_t, Decimal128, typename NearestFieldType::Type>;
+ using FieldType = std::conditional_t, Decimal128, NearestFieldType>;
using Function = AggregateFunctionAvg>;
};
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSum.cpp b/dbms/src/AggregateFunctions/AggregateFunctionSum.cpp
index 45a97d2bc8..f21c60eeae 100644
--- a/dbms/src/AggregateFunctions/AggregateFunctionSum.cpp
+++ b/dbms/src/AggregateFunctions/AggregateFunctionSum.cpp
@@ -14,7 +14,7 @@ template
struct SumSimple
{
/// @note It uses slow Decimal128 (cause we need such a variant). sumWithOverflow is faster for Decimal32/64
- using ResultType = std::conditional_t, Decimal128, typename NearestFieldType::Type>;
+ using ResultType = std::conditional_t, Decimal128, NearestFieldType>;
using AggregateDataType = AggregateFunctionSumData;
using Function = AggregateFunctionSum;
};
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h
index e89c0bf841..b79670eb3d 100644
--- a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h
+++ b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h
@@ -52,7 +52,7 @@ struct AggregateFunctionSumMapData
template
class AggregateFunctionSumMap final : public IAggregateFunctionDataHelper<
- AggregateFunctionSumMapData::Type>, AggregateFunctionSumMap>
+ AggregateFunctionSumMapData>, AggregateFunctionSumMap>
{
private:
using ColVecType = std::conditional_t, ColumnDecimal, ColumnVector>;
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp b/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp
index 77b6c9cfb9..6b63a719b8 100644
--- a/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp
+++ b/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp
@@ -130,9 +130,6 @@ void registerAggregateFunctionsUniq(AggregateFunctionFactory & factory)
factory.registerFunction("uniqExact",
createAggregateFunctionUniq>);
-
- factory.registerFunction("uniqCombined",
- createAggregateFunctionUniq>);
}
}
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.h b/dbms/src/AggregateFunctions/AggregateFunctionUniq.h
index 140928959a..980d62b40e 100644
--- a/dbms/src/AggregateFunctions/AggregateFunctionUniq.h
+++ b/dbms/src/AggregateFunctions/AggregateFunctionUniq.h
@@ -22,7 +22,6 @@
#include
#include
-#include
#include
@@ -124,46 +123,6 @@ struct AggregateFunctionUniqExactData
static String getName() { return "uniqExact"; }
};
-template
-struct AggregateFunctionUniqCombinedData
-{
- using Key = UInt32;
- using Set = CombinedCardinalityEstimator<
- Key,
- HashSet>,
- 16,
- 14,
- 17,
- TrivialHash,
- UInt32,
- HyperLogLogBiasEstimator,
- HyperLogLogMode::FullFeatured>;
-
- Set set;
-
- static String getName() { return "uniqCombined"; }
-};
-
-template <>
-struct AggregateFunctionUniqCombinedData
-{
- using Key = UInt64;
- using Set = CombinedCardinalityEstimator<
- Key,
- HashSet>,
- 16,
- 14,
- 17,
- TrivialHash,
- UInt64,
- HyperLogLogBiasEstimator,
- HyperLogLogMode::FullFeatured>;
-
- Set set;
-
- static String getName() { return "uniqCombined"; }
-};
-
namespace detail
{
@@ -199,39 +158,6 @@ template <> struct AggregateFunctionUniqTraits
}
};
-/** Hash function for uniqCombined.
- */
-template struct AggregateFunctionUniqCombinedTraits
-{
- static UInt32 hash(T x) { return static_cast(intHash64(x)); }
-};
-
-template <> struct AggregateFunctionUniqCombinedTraits
-{
- static UInt32 hash(UInt128 x)
- {
- return sipHash64(x);
- }
-};
-
-template <> struct AggregateFunctionUniqCombinedTraits
-{
- static UInt32 hash(Float32 x)
- {
- UInt64 res = ext::bit_cast(x);
- return static_cast(intHash64(res));
- }
-};
-
-template <> struct AggregateFunctionUniqCombinedTraits
-{
- static UInt32 hash(Float64 x)
- {
- UInt64 res = ext::bit_cast(x);
- return static_cast(intHash64(res));
- }
-};
-
/** The structure for the delegation work to add one element to the `uniq` aggregate functions.
* Used for partial specialization to add strings.
@@ -255,19 +181,6 @@ struct OneAdder
data.set.insert(CityHash_v1_0_2::CityHash64(value.data, value.size));
}
}
- else if constexpr (std::is_same_v>)
- {
- if constexpr (!std::is_same_v)
- {
- const auto & value = static_cast &>(column).getData()[row_num];
- data.set.insert(AggregateFunctionUniqCombinedTraits::hash(value));
- }
- else
- {
- StringRef value = column.getDataAt(row_num);
- data.set.insert(CityHash_v1_0_2::CityHash64(value.data, value.size));
- }
- }
else if constexpr (std::is_same_v>)
{
if constexpr (!std::is_same_v)
@@ -387,5 +300,4 @@ public:
const char * getHeaderFilePath() const override { return __FILE__; }
};
-
}
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp b/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp
new file mode 100644
index 0000000000..90b84d3b92
--- /dev/null
+++ b/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp
@@ -0,0 +1,127 @@
+#include
+
+#include
+#include
+
+#include
+#include
+
+namespace DB
+{
+namespace ErrorCodes
+{
+ extern const int ILLEGAL_TYPE_OF_ARGUMENT;
+ extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
+ extern const int ARGUMENT_OUT_OF_BOUND;
+}
+
+namespace
+{
+ template
+ struct WithK
+ {
+ template
+ using AggregateFunction = AggregateFunctionUniqCombined;
+
+ template
+ using AggregateFunctionVariadic = AggregateFunctionUniqCombinedVariadic;
+ };
+
+ template
+ AggregateFunctionPtr createAggregateFunctionWithK(const DataTypes & argument_types)
+ {
+ /// We use exact hash function if the arguments are not contiguous in memory, because only exact hash function has support for this case.
+ bool use_exact_hash_function = !isAllArgumentsContiguousInMemory(argument_types);
+
+ if (argument_types.size() == 1)
+ {
+ const IDataType & argument_type = *argument_types[0];
+
+ AggregateFunctionPtr res(createWithNumericType::template AggregateFunction>(*argument_types[0]));
+
+ WhichDataType which(argument_type);
+ if (res)
+ return res;
+ else if (which.isDate())
+ return std::make_shared::template AggregateFunction>();
+ else if (which.isDateTime())
+ return std::make_shared::template AggregateFunction>();
+ else if (which.isStringOrFixedString())
+ return std::make_shared::template AggregateFunction>();
+ else if (which.isUUID())
+ return std::make_shared::template AggregateFunction>();
+ else if (which.isTuple())
+ {
+ if (use_exact_hash_function)
+ return std::make_shared::template AggregateFunctionVariadic>(argument_types);
+ else
+ return std::make_shared::template AggregateFunctionVariadic>(argument_types);
+ }
+ }
+
+ /// "Variadic" method also works as a fallback generic case for a single argument.
+ if (use_exact_hash_function)
+ return std::make_shared::template AggregateFunctionVariadic>(argument_types);
+ else
+ return std::make_shared::template AggregateFunctionVariadic>(argument_types);
+ }
+
+ AggregateFunctionPtr createAggregateFunctionUniqCombined(
+ const std::string & name, const DataTypes & argument_types, const Array & params)
+ {
+ /// log2 of the number of cells in HyperLogLog.
+ /// Reasonable default value, selected to be comparable in quality with "uniq" aggregate function.
+ UInt8 precision = 17;
+
+ if (!params.empty())
+ {
+ if (params.size() != 1)
+ throw Exception(
+ "Aggregate function " + name + " requires one parameter or less.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
+
+ UInt64 precision_param = applyVisitor(FieldVisitorConvertToNumber(), params[0]);
+
+ // This range is hardcoded below
+ if (precision_param > 20 || precision_param < 12)
+ throw Exception(
+ "Parameter for aggregate function " + name + "is out or range: [12, 20].", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
+
+ precision = precision_param;
+ }
+
+ if (argument_types.empty())
+ throw Exception("Incorrect number of arguments for aggregate function " + name, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
+
+ switch (precision)
+ {
+ case 12:
+ return createAggregateFunctionWithK<12>(argument_types);
+ case 13:
+ return createAggregateFunctionWithK<13>(argument_types);
+ case 14:
+ return createAggregateFunctionWithK<14>(argument_types);
+ case 15:
+ return createAggregateFunctionWithK<15>(argument_types);
+ case 16:
+ return createAggregateFunctionWithK<16>(argument_types);
+ case 17:
+ return createAggregateFunctionWithK<17>(argument_types);
+ case 18:
+ return createAggregateFunctionWithK<18>(argument_types);
+ case 19:
+ return createAggregateFunctionWithK<19>(argument_types);
+ case 20:
+ return createAggregateFunctionWithK<20>(argument_types);
+ }
+
+ __builtin_unreachable();
+ }
+
+} // namespace
+
+void registerAggregateFunctionUniqCombined(AggregateFunctionFactory & factory)
+{
+ factory.registerFunction("uniqCombined", createAggregateFunctionUniqCombined);
+}
+
+} // namespace DB
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h
new file mode 100644
index 0000000000..99cc3a9389
--- /dev/null
+++ b/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h
@@ -0,0 +1,229 @@
+#pragma once
+
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+namespace DB
+{
+namespace detail
+{
+ /** Hash function for uniqCombined.
+ */
+ template
+ struct AggregateFunctionUniqCombinedTraits
+ {
+ static UInt32 hash(T x)
+ {
+ return static_cast(intHash64(x));
+ }
+ };
+
+ template <>
+ struct AggregateFunctionUniqCombinedTraits
+ {
+ static UInt32 hash(UInt128 x)
+ {
+ return sipHash64(x);
+ }
+ };
+
+ template <>
+ struct AggregateFunctionUniqCombinedTraits
+ {
+ static UInt32 hash(Float32 x)
+ {
+ UInt64 res = ext::bit_cast(x);
+ return static_cast(intHash64(res));
+ }
+ };
+
+ template <>
+ struct AggregateFunctionUniqCombinedTraits
+ {
+ static UInt32 hash(Float64 x)
+ {
+ UInt64 res = ext::bit_cast(x);
+ return static_cast(intHash64(res));
+ }
+ };
+
+} // namespace detail
+
+
+template
+struct AggregateFunctionUniqCombinedDataWithKey
+{
+ // TODO(ilezhankin): pre-generate values for |UniqCombinedBiasData|,
+ // at the moment gen-bias-data.py script doesn't work.
+
+ // We want to migrate from |HashSet| to |HyperLogLogCounter| when the sizes in memory become almost equal.
+ // The size per element in |HashSet| is sizeof(Key)*2 bytes, and the overall size of |HyperLogLogCounter| is 2^K * 6 bits.
+ // For Key=UInt32 we can calculate: 2^X * 4 * 2 ≤ 2^(K-3) * 6 ⇒ X ≤ K-4.
+ using Set = CombinedCardinalityEstimator>, 16, K - 4, K, TrivialHash, Key>;
+
+ Set set;
+};
+
+template
+struct AggregateFunctionUniqCombinedDataWithKey
+{
+ using Set = CombinedCardinalityEstimator>,
+ 16,
+ 13,
+ 17,
+ TrivialHash,
+ Key,
+ HyperLogLogBiasEstimator,
+ HyperLogLogMode::FullFeatured>;
+
+ Set set;
+};
+
+
+template
+struct AggregateFunctionUniqCombinedData : public AggregateFunctionUniqCombinedDataWithKey
+{
+};
+
+
+template
+struct AggregateFunctionUniqCombinedData : public AggregateFunctionUniqCombinedDataWithKey
+{
+};
+
+
+template
+class AggregateFunctionUniqCombined final
+ : public IAggregateFunctionDataHelper, AggregateFunctionUniqCombined>
+{
+public:
+ String getName() const override
+ {
+ return "uniqCombined";
+ }
+
+ DataTypePtr getReturnType() const override
+ {
+ return std::make_shared();
+ }
+
+ void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
+ {
+ if constexpr (!std::is_same_v)
+ {
+ const auto & value = static_cast &>(*columns[0]).getData()[row_num];
+ this->data(place).set.insert(detail::AggregateFunctionUniqCombinedTraits::hash(value));
+ }
+ else
+ {
+ StringRef value = columns[0]->getDataAt(row_num);
+ this->data(place).set.insert(CityHash_v1_0_2::CityHash64(value.data, value.size));
+ }
+ }
+
+ void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
+ {
+ this->data(place).set.merge(this->data(rhs).set);
+ }
+
+ void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override
+ {
+ this->data(place).set.write(buf);
+ }
+
+ void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override
+ {
+ this->data(place).set.read(buf);
+ }
+
+ void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const override
+ {
+ static_cast(to).getData().push_back(this->data(place).set.size());
+ }
+
+ const char * getHeaderFilePath() const override
+ {
+ return __FILE__;
+ }
+};
+
+/** For multiple arguments. To compute, hashes them.
+ * You can pass multiple arguments as is; You can also pass one argument - a tuple.
+ * But (for the possibility of efficient implementation), you can not pass several arguments, among which there are tuples.
+ */
+template
+class AggregateFunctionUniqCombinedVariadic final : public IAggregateFunctionDataHelper,
+ AggregateFunctionUniqCombinedVariadic>
+{
+private:
+ size_t num_args = 0;
+
+public:
+ explicit AggregateFunctionUniqCombinedVariadic(const DataTypes & arguments)
+ {
+ if (argument_is_tuple)
+ num_args = typeid_cast(*arguments[0]).getElements().size();
+ else
+ num_args = arguments.size();
+ }
+
+ String getName() const override
+ {
+ return "uniqCombined";
+ }
+
+ DataTypePtr getReturnType() const override
+ {
+ return std::make_shared();
+ }
+
+ void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
+ {
+ this->data(place).set.insert(typename AggregateFunctionUniqCombinedData::Set::value_type(
+ UniqVariadicHash::apply(num_args, columns, row_num)));
+ }
+
+ void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
+ {
+ this->data(place).set.merge(this->data(rhs).set);
+ }
+
+ void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override
+ {
+ this->data(place).set.write(buf);
+ }
+
+ void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override
+ {
+ this->data(place).set.read(buf);
+ }
+
+ void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const override
+ {
+ static_cast(to).getData().push_back(this->data(place).set.size());
+ }
+
+ const char * getHeaderFilePath() const override
+ {
+ return __FILE__;
+ }
+};
+
+} // namespace DB
diff --git a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h b/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h
index c9a4e6b32a..317637b1b6 100644
--- a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h
+++ b/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h
@@ -16,10 +16,12 @@
namespace DB
{
+
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
+ extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
struct ComparePairFirst final
@@ -191,7 +193,7 @@ public:
const auto time_arg = arguments.front().get();
if (!WhichDataType(time_arg).isDateTime() && !WhichDataType(time_arg).isUInt32())
throw Exception{"Illegal type " + time_arg->getName() + " of first argument of aggregate function " + getName()
- + ", must be DateTime or UInt32"};
+ + ", must be DateTime or UInt32", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
for (const auto i : ext::range(1, arguments.size()))
{
diff --git a/dbms/src/AggregateFunctions/CMakeLists.txt b/dbms/src/AggregateFunctions/CMakeLists.txt
index ef2665d0d3..56cc66d391 100644
--- a/dbms/src/AggregateFunctions/CMakeLists.txt
+++ b/dbms/src/AggregateFunctions/CMakeLists.txt
@@ -20,5 +20,5 @@ list(REMOVE_ITEM clickhouse_aggregate_functions_headers
)
add_library(clickhouse_aggregate_functions ${LINK_MODE} ${clickhouse_aggregate_functions_sources})
-target_link_libraries(clickhouse_aggregate_functions dbms)
+target_link_libraries(clickhouse_aggregate_functions PRIVATE dbms)
target_include_directories (clickhouse_aggregate_functions BEFORE PRIVATE ${COMMON_INCLUDE_DIR})
diff --git a/dbms/src/AggregateFunctions/registerAggregateFunctions.cpp b/dbms/src/AggregateFunctions/registerAggregateFunctions.cpp
index 3517ad57a7..800beda1d5 100644
--- a/dbms/src/AggregateFunctions/registerAggregateFunctions.cpp
+++ b/dbms/src/AggregateFunctions/registerAggregateFunctions.cpp
@@ -21,6 +21,7 @@ void registerAggregateFunctionsStatisticsSimple(AggregateFunctionFactory &);
void registerAggregateFunctionSum(AggregateFunctionFactory &);
void registerAggregateFunctionSumMap(AggregateFunctionFactory &);
void registerAggregateFunctionsUniq(AggregateFunctionFactory &);
+void registerAggregateFunctionUniqCombined(AggregateFunctionFactory &);
void registerAggregateFunctionUniqUpTo(AggregateFunctionFactory &);
void registerAggregateFunctionTopK(AggregateFunctionFactory &);
void registerAggregateFunctionsBitwise(AggregateFunctionFactory &);
@@ -55,6 +56,7 @@ void registerAggregateFunctions()
registerAggregateFunctionSum(factory);
registerAggregateFunctionSumMap(factory);
registerAggregateFunctionsUniq(factory);
+ registerAggregateFunctionUniqCombined(factory);
registerAggregateFunctionUniqUpTo(factory);
registerAggregateFunctionTopK(factory);
registerAggregateFunctionsBitwise(factory);
diff --git a/dbms/src/Client/Connection.cpp b/dbms/src/Client/Connection.cpp
index ce6246fba3..07d2ca05f2 100644
--- a/dbms/src/Client/Connection.cpp
+++ b/dbms/src/Client/Connection.cpp
@@ -108,14 +108,14 @@ void Connection::connect()
disconnect();
/// Add server address to exception. Also Exception will remember stack trace. It's a pity that more precise exception type is lost.
- throw NetException(e.displayText(), "(" + getDescription() + ")", ErrorCodes::NETWORK_ERROR);
+ throw NetException(e.displayText() + " (" + getDescription() + ")", ErrorCodes::NETWORK_ERROR);
}
catch (Poco::TimeoutException & e)
{
disconnect();
/// Add server address to exception. Also Exception will remember stack trace. It's a pity that more precise exception type is lost.
- throw NetException(e.displayText(), "(" + getDescription() + ")", ErrorCodes::SOCKET_TIMEOUT);
+ throw NetException(e.displayText() + " (" + getDescription() + ")", ErrorCodes::SOCKET_TIMEOUT);
}
}
diff --git a/dbms/src/Columns/ColumnConst.h b/dbms/src/Columns/ColumnConst.h
index c9038cdf70..248bb04a18 100644
--- a/dbms/src/Columns/ColumnConst.h
+++ b/dbms/src/Columns/ColumnConst.h
@@ -209,7 +209,7 @@ public:
Field getField() const { return getDataColumn()[0]; }
template
- T getValue() const { return getField().safeGet::Type>(); }
+ T getValue() const { return getField().safeGet>(); }
};
}
diff --git a/dbms/src/Columns/ColumnDecimal.cpp b/dbms/src/Columns/ColumnDecimal.cpp
index 092ee74d66..e2cb798360 100644
--- a/dbms/src/Columns/ColumnDecimal.cpp
+++ b/dbms/src/Columns/ColumnDecimal.cpp
@@ -213,8 +213,8 @@ void ColumnDecimal::getExtremes(Field & min, Field & max) const
{
if (data.size() == 0)
{
- min = typename NearestFieldType::Type(0, scale);
- max = typename NearestFieldType::Type(0, scale);
+ min = NearestFieldType(0, scale);
+ max = NearestFieldType(0, scale);
return;
}
@@ -229,8 +229,8 @@ void ColumnDecimal::getExtremes(Field & min, Field & max) const
cur_max = x;
}
- min = typename NearestFieldType::Type(cur_min, scale);
- max = typename NearestFieldType::Type(cur_max, scale);
+ min = NearestFieldType(cur_min, scale);
+ max = NearestFieldType(cur_max, scale);
}
template class ColumnDecimal;
diff --git a/dbms/src/Columns/ColumnDecimal.h b/dbms/src/Columns/ColumnDecimal.h
index 523064167d..f6dee8877b 100644
--- a/dbms/src/Columns/ColumnDecimal.h
+++ b/dbms/src/Columns/ColumnDecimal.h
@@ -91,7 +91,7 @@ public:
void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast(src).getData()[n]); }
void insertData(const char * pos, size_t /*length*/) override;
void insertDefault() override { data.push_back(T()); }
- void insert(const Field & x) override { data.push_back(DB::get::Type>(x)); }
+ void insert(const Field & x) override { data.push_back(DB::get>(x)); }
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
void popBack(size_t n) override { data.resize_assume_reserved(data.size() - n); }
diff --git a/dbms/src/Columns/ColumnFunction.cpp b/dbms/src/Columns/ColumnFunction.cpp
index 852f6f03ad..8bc239eb65 100644
--- a/dbms/src/Columns/ColumnFunction.cpp
+++ b/dbms/src/Columns/ColumnFunction.cpp
@@ -183,7 +183,7 @@ void ColumnFunction::appendArgument(const ColumnWithTypeAndName & column)
auto index = captured_columns.size();
if (!column.type->equals(*argumnet_types[index]))
throw Exception("Cannot capture column " + std::to_string(argumnet_types.size()) +
- "because it has incompatible type: got " + column.type->getName() +
+ " because it has incompatible type: got " + column.type->getName() +
", but " + argumnet_types[index]->getName() + " is expected.", ErrorCodes::LOGICAL_ERROR);
captured_columns.push_back(column);
diff --git a/dbms/src/Columns/ColumnUnique.h b/dbms/src/Columns/ColumnUnique.h
index be5d71c3e2..7fb01620fb 100644
--- a/dbms/src/Columns/ColumnUnique.h
+++ b/dbms/src/Columns/ColumnUnique.h
@@ -81,12 +81,14 @@ public:
{
return column_holder->allocatedBytes()
+ index.allocatedBytes()
- + (cached_null_mask ? cached_null_mask->allocatedBytes() : 0);
+ + (nested_null_mask ? nested_null_mask->allocatedBytes() : 0);
}
void forEachSubcolumn(IColumn::ColumnCallback callback) override
{
callback(column_holder);
index.setColumn(getRawColumnPtr());
+ if (is_nullable)
+ nested_column_nullable = ColumnNullable::create(column_holder, nested_null_mask);
}
const UInt64 * tryGetSavedHash() const override { return index.tryGetSavedHash(); }
@@ -100,8 +102,8 @@ private:
ReverseIndex index;
/// For DataTypeNullable, stores null map.
- mutable ColumnPtr cached_null_mask;
- mutable ColumnPtr cached_column_nullable;
+ ColumnPtr nested_null_mask;
+ ColumnPtr nested_column_nullable;
class IncrementalHash
{
@@ -118,6 +120,9 @@ private:
mutable IncrementalHash hash;
+ void createNullMask();
+ void updateNullMask();
+
static size_t numSpecialValues(bool is_nullable) { return is_nullable ? 2 : 1; }
size_t numSpecialValues() const { return numSpecialValues(is_nullable); }
@@ -148,6 +153,7 @@ ColumnUnique::ColumnUnique(const ColumnUnique & other)
, index(numSpecialValues(is_nullable), 0)
{
index.setColumn(getRawColumnPtr());
+ createNullMask();
}
template
@@ -158,6 +164,7 @@ ColumnUnique::ColumnUnique(const IDataType & type)
const auto & holder_type = is_nullable ? *static_cast(type).getNestedType() : type;
column_holder = holder_type.createColumn()->cloneResized(numSpecialValues());
index.setColumn(getRawColumnPtr());
+ createNullMask();
}
template
@@ -172,32 +179,51 @@ ColumnUnique::ColumnUnique(MutableColumnPtr && holder, bool is_nulla
throw Exception("Holder column for ColumnUnique can't be nullable.", ErrorCodes::ILLEGAL_COLUMN);
index.setColumn(getRawColumnPtr());
+ createNullMask();
+}
+
+template
+void ColumnUnique::createNullMask()
+{
+ if (is_nullable)
+ {
+ size_t size = getRawColumnPtr()->size();
+ if (!nested_null_mask)
+ {
+ ColumnUInt8::MutablePtr null_mask = ColumnUInt8::create(size, UInt8(0));
+ null_mask->getData()[getNullValueIndex()] = 1;
+ nested_null_mask = std::move(null_mask);
+ nested_column_nullable = ColumnNullable::create(column_holder, nested_null_mask);
+ }
+ else
+ throw Exception("Null mask for ColumnUnique is already created.", ErrorCodes::LOGICAL_ERROR);
+ }
+}
+
+template
+void ColumnUnique::updateNullMask()
+{
+ if (is_nullable)
+ {
+ if (!nested_null_mask)
+ throw Exception("Null mask for ColumnUnique is was not created.", ErrorCodes::LOGICAL_ERROR);
+
+ size_t size = getRawColumnPtr()->size();
+
+ if (nested_null_mask->size() != size)
+ {
+ IColumn & null_mask = nested_null_mask->assumeMutableRef();
+ static_cast(null_mask).getData().resize_fill(size);
+ }
+ }
}
template
const ColumnPtr & ColumnUnique::getNestedColumn() const
{
if (is_nullable)
- {
- size_t size = getRawColumnPtr()->size();
- if (!cached_null_mask)
- {
- ColumnUInt8::MutablePtr null_mask = ColumnUInt8::create(size, UInt8(0));
- null_mask->getData()[getNullValueIndex()] = 1;
- cached_null_mask = std::move(null_mask);
- cached_column_nullable = ColumnNullable::create(column_holder, cached_null_mask);
- }
+ return nested_column_nullable;
- if (cached_null_mask->size() != size)
- {
- MutableColumnPtr null_mask = (*std::move(cached_null_mask)).mutate();
- static_cast(*null_mask).getData().resize_fill(size);
- cached_null_mask = std::move(null_mask);
- cached_column_nullable = ColumnNullable::create(column_holder, cached_null_mask);
- }
-
- return cached_column_nullable;
- }
return column_holder;
}
@@ -227,6 +253,8 @@ size_t ColumnUnique::uniqueInsert(const Field & x)
if (pos != prev_size)
column->popBack(1);
+ updateNullMask();
+
return pos;
}
@@ -260,6 +288,8 @@ size_t ColumnUnique::uniqueInsertData(const char * pos, size_t lengt
index.insertFromLastRow();
}
+ updateNullMask();
+
return insertion_point;
}
@@ -288,6 +318,8 @@ size_t ColumnUnique::uniqueInsertDataWithTerminatingZero(const char
if (position != prev_size)
column->popBack(1);
+ updateNullMask();
+
return static_cast(position);
}
@@ -305,7 +337,7 @@ StringRef ColumnUnique::serializeValueIntoArena(size_t n, Arena & ar
size_t nested_size = 0;
- if (n == getNullValueIndex())
+ if (n != getNullValueIndex())
nested_size = column_holder->serializeValueIntoArena(n, arena, begin).size;
return StringRef(pos, sizeof(null_flag) + nested_size);
@@ -343,6 +375,8 @@ size_t ColumnUnique::uniqueDeserializeAndInsertFromArena(const char
if (index_pos != prev_size)
column->popBack(1);
+ updateNullMask();
+
return static_cast(index_pos);
}
@@ -533,6 +567,8 @@ MutableColumnPtr ColumnUnique::uniqueInsertRangeFrom(const IColumn &
if (!positions_column)
throw Exception("Can't find index type for ColumnUnique", ErrorCodes::LOGICAL_ERROR);
+ updateNullMask();
+
return positions_column;
}
@@ -577,6 +613,8 @@ IColumnUnique::IndexesWithOverflow ColumnUnique::uniqueInsertRangeWi
if (!positions_column)
throw Exception("Can't find index type for ColumnUnique", ErrorCodes::LOGICAL_ERROR);
+ updateNullMask();
+
IColumnUnique::IndexesWithOverflow indexes_with_overflow;
indexes_with_overflow.indexes = std::move(positions_column);
indexes_with_overflow.overflowed_keys = std::move(overflowed_keys);
diff --git a/dbms/src/Columns/ColumnVector.cpp b/dbms/src/Columns/ColumnVector.cpp
index d6fc07cd9d..e8aa9a756a 100644
--- a/dbms/src/Columns/ColumnVector.cpp
+++ b/dbms/src/Columns/ColumnVector.cpp
@@ -311,8 +311,8 @@ void ColumnVector::getExtremes(Field & min, Field & max) const
cur_max = x;
}
- min = typename NearestFieldType::Type(cur_min);
- max = typename NearestFieldType::Type(cur_max);
+ min = NearestFieldType(cur_min);
+ max = NearestFieldType(cur_max);
}
/// Explicit template instantiations - to avoid code bloat in headers.
diff --git a/dbms/src/Columns/ColumnVector.h b/dbms/src/Columns/ColumnVector.h
index c0c2020ba2..a2a947f421 100644
--- a/dbms/src/Columns/ColumnVector.h
+++ b/dbms/src/Columns/ColumnVector.h
@@ -244,7 +244,7 @@ public:
void insert(const Field & x) override
{
- data.push_back(DB::get::Type>(x));
+ data.push_back(DB::get>(x));
}
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
diff --git a/dbms/src/Columns/ReverseIndex.h b/dbms/src/Columns/ReverseIndex.h
index cf73534316..a003e8282d 100644
--- a/dbms/src/Columns/ReverseIndex.h
+++ b/dbms/src/Columns/ReverseIndex.h
@@ -272,7 +272,7 @@ public:
auto hash = calcHashes();
ptr = &hash->getData()[0];
UInt64 * expected = nullptr;
- if(saved_hash_ptr.compare_exchange_strong(expected, ptr))
+ if (saved_hash_ptr.compare_exchange_strong(expected, ptr))
saved_hash = std::move(hash);
else
ptr = expected;
diff --git a/dbms/src/Common/ClickHouseRevision.cpp b/dbms/src/Common/ClickHouseRevision.cpp
index ea48dcdc54..5513922a65 100644
--- a/dbms/src/Common/ClickHouseRevision.cpp
+++ b/dbms/src/Common/ClickHouseRevision.cpp
@@ -4,4 +4,5 @@
namespace ClickHouseRevision
{
unsigned get() { return VERSION_REVISION; }
+ unsigned getVersionInteger() { return VERSION_INTEGER; }
}
diff --git a/dbms/src/Common/ClickHouseRevision.h b/dbms/src/Common/ClickHouseRevision.h
index 7c147fe6f8..1d097a5bf8 100644
--- a/dbms/src/Common/ClickHouseRevision.h
+++ b/dbms/src/Common/ClickHouseRevision.h
@@ -3,4 +3,5 @@
namespace ClickHouseRevision
{
unsigned get();
+ unsigned getVersionInteger();
}
diff --git a/dbms/src/Common/Config/CMakeLists.txt b/dbms/src/Common/Config/CMakeLists.txt
index 614e70e757..a1bb2790fd 100644
--- a/dbms/src/Common/Config/CMakeLists.txt
+++ b/dbms/src/Common/Config/CMakeLists.txt
@@ -1,9 +1,8 @@
-
include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake)
add_headers_and_sources(clickhouse_common_config .)
add_library(clickhouse_common_config ${LINK_MODE} ${clickhouse_common_config_headers} ${clickhouse_common_config_sources})
-target_link_libraries (clickhouse_common_config clickhouse_common_zookeeper string_utils ${Poco_XML_LIBRARY} ${Poco_Util_LIBRARY})
-target_include_directories (clickhouse_common_config PRIVATE ${DBMS_INCLUDE_DIR})
+target_link_libraries(clickhouse_common_config PUBLIC common PRIVATE clickhouse_common_zookeeper string_utils PUBLIC ${Poco_XML_LIBRARY} ${Poco_Util_LIBRARY})
+target_include_directories(clickhouse_common_config PUBLIC ${DBMS_INCLUDE_DIR})
diff --git a/dbms/src/Common/CounterInFile.h b/dbms/src/Common/CounterInFile.h
index 6ea34362a5..2c831e3330 100644
--- a/dbms/src/Common/CounterInFile.h
+++ b/dbms/src/Common/CounterInFile.h
@@ -18,7 +18,15 @@
#include
-#define SMALL_READ_WRITE_BUFFER_SIZE 16
+namespace DB
+{
+ namespace ErrorCodes
+ {
+ extern const int CANNOT_OPEN_FILE;
+ extern const int CANNOT_READ_ALL_DATA;
+ extern const int ATTEMPT_TO_READ_AFTER_EOF;
+ }
+}
/** Stores a number in the file.
@@ -26,6 +34,9 @@
*/
class CounterInFile
{
+private:
+ static inline constexpr size_t SMALL_READ_WRITE_BUFFER_SIZE = 16;
+
public:
/// path - the name of the file, including the path
CounterInFile(const std::string & path_) : path(path_) {}
@@ -56,13 +67,13 @@ public:
int fd = ::open(path.c_str(), O_RDWR | O_CREAT, 0666);
if (-1 == fd)
- DB::throwFromErrno("Cannot open file " + path);
+ DB::throwFromErrno("Cannot open file " + path, DB::ErrorCodes::CANNOT_OPEN_FILE);
try
{
int flock_ret = flock(fd, LOCK_EX);
if (-1 == flock_ret)
- DB::throwFromErrno("Cannot lock file " + path);
+ DB::throwFromErrno("Cannot lock file " + path, DB::ErrorCodes::CANNOT_OPEN_FILE);
if (!file_doesnt_exists)
{
@@ -130,7 +141,7 @@ public:
int fd = ::open(path.c_str(), O_RDWR | O_CREAT, 0666);
if (-1 == fd)
- DB::throwFromErrno("Cannot open file " + path);
+ DB::throwFromErrno("Cannot open file " + path, DB::ErrorCodes::CANNOT_OPEN_FILE);
try
{
@@ -178,6 +189,3 @@ private:
std::string path;
std::mutex mutex;
};
-
-
-#undef SMALL_READ_WRITE_BUFFER_SIZE
diff --git a/dbms/src/Common/CurrentMetrics.cpp b/dbms/src/Common/CurrentMetrics.cpp
index 59b26f4b5e..2f8346d554 100644
--- a/dbms/src/Common/CurrentMetrics.cpp
+++ b/dbms/src/Common/CurrentMetrics.cpp
@@ -39,7 +39,8 @@
M(StorageBufferRows, "Number of rows in buffers of Buffer tables") \
M(StorageBufferBytes, "Number of bytes in buffers of Buffer tables") \
M(DictCacheRequests, "Number of requests in fly to data sources of dictionaries of cache type.") \
- M(Revision, "Revision of the server. It is a number incremented for every release or release candidate.") \
+ M(Revision, "Revision of the server. It is a number incremented for every release or release candidate except patch releases.") \
+ M(VersionInteger, "Version of the server in a single integer number in base-1000. For example, version 11.22.33 is translated to 11022033.") \
M(RWLockWaitingReaders, "Number of threads waiting for read on a table RWLock.") \
M(RWLockWaitingWriters, "Number of threads waiting for write on a table RWLock.") \
M(RWLockActiveReaders, "Number of threads holding read lock in a table RWLock.") \
diff --git a/dbms/src/Common/ErrorCodes.cpp b/dbms/src/Common/ErrorCodes.cpp
index 4e724c995c..719e593e3b 100644
--- a/dbms/src/Common/ErrorCodes.cpp
+++ b/dbms/src/Common/ErrorCodes.cpp
@@ -396,6 +396,11 @@ namespace ErrorCodes
extern const int MULTIPLE_ASSIGNMENTS_TO_COLUMN = 419;
extern const int CANNOT_UPDATE_COLUMN = 420;
extern const int CANNOT_ADD_DIFFERENT_AGGREGATE_STATES = 421;
+ extern const int UNSUPPORTED_URI_SCHEME = 422;
+ extern const int CANNOT_GETTIMEOFDAY = 423;
+ extern const int CANNOT_LINK = 424;
+ extern const int SYSTEM_ERROR = 425;
+ extern const int NULL_POINTER_DEREFERENCE = 426;
extern const int KEEPER_EXCEPTION = 999;
extern const int POCO_EXCEPTION = 1000;
diff --git a/dbms/src/Common/Exception.cpp b/dbms/src/Common/Exception.cpp
index 606f180e9d..a7bfbd6442 100644
--- a/dbms/src/Common/Exception.cpp
+++ b/dbms/src/Common/Exception.cpp
@@ -52,7 +52,7 @@ std::string errnoToString(int code, int e)
void throwFromErrno(const std::string & s, int code, int e)
{
- throw ErrnoException(s + ", " + errnoToString(code, e));
+ throw ErrnoException(s + ", " + errnoToString(code, e), code, e);
}
void tryLogCurrentException(const char * log_name, const std::string & start_of_message)
diff --git a/dbms/src/Common/Exception.h b/dbms/src/Common/Exception.h
index 4e3bdc8baf..d5e9d3112e 100644
--- a/dbms/src/Common/Exception.h
+++ b/dbms/src/Common/Exception.h
@@ -14,19 +14,25 @@ namespace Poco { class Logger; }
namespace DB
{
+namespace ErrorCodes
+{
+ extern const int POCO_EXCEPTION;
+}
+
class Exception : public Poco::Exception
{
public:
Exception() {} /// For deferred initialization.
- Exception(const std::string & msg, int code = 0) : Poco::Exception(msg, code) {}
- Exception(const std::string & msg, const std::string & arg, int code = 0) : Poco::Exception(msg, arg, code) {}
- Exception(const std::string & msg, const Exception & exc, int code = 0) : Poco::Exception(msg, exc, code), trace(exc.trace) {}
- explicit Exception(const Poco::Exception & exc) : Poco::Exception(exc.displayText()) {}
+ Exception(const std::string & msg, int code) : Poco::Exception(msg, code) {}
+ Exception(const std::string & msg, const Exception & nested_exception, int code)
+ : Poco::Exception(msg, nested_exception, code), trace(nested_exception.trace) {}
- const char * name() const throw() override { return "DB::Exception"; }
- const char * className() const throw() override { return "DB::Exception"; }
- DB::Exception * clone() const override { return new DB::Exception(*this); }
+ enum CreateFromPocoTag { CreateFromPoco };
+ Exception(CreateFromPocoTag, const Poco::Exception & exc) : Poco::Exception(exc.displayText(), ErrorCodes::POCO_EXCEPTION) {}
+
+ Exception * clone() const override { return new Exception(*this); }
void rethrow() const override { throw *this; }
+ const char * name() const throw() override { return "DB::Exception"; }
/// Add something to the existing message.
void addMessage(const std::string & arg) { extendedMessage(arg); }
@@ -35,6 +41,8 @@ public:
private:
StackTrace trace;
+
+ const char * className() const throw() override { return "DB::Exception"; }
};
@@ -42,25 +50,27 @@ private:
class ErrnoException : public Exception
{
public:
- ErrnoException(const std::string & msg, int code = 0, int saved_errno_ = 0)
+ ErrnoException(const std::string & msg, int code, int saved_errno_)
: Exception(msg, code), saved_errno(saved_errno_) {}
- ErrnoException(const std::string & msg, const std::string & arg, int code = 0, int saved_errno_ = 0)
- : Exception(msg, arg, code), saved_errno(saved_errno_) {}
- ErrnoException(const std::string & msg, const Exception & exc, int code = 0, int saved_errno_ = 0)
- : Exception(msg, exc, code), saved_errno(saved_errno_) {}
+
+ ErrnoException * clone() const override { return new ErrnoException(*this); }
+ void rethrow() const override { throw *this; }
int getErrno() const { return saved_errno; }
private:
int saved_errno;
+
+ const char * name() const throw() override { return "DB::ErrnoException"; }
+ const char * className() const throw() override { return "DB::ErrnoException"; }
};
using Exceptions = std::vector;
-std::string errnoToString(int code = 0, int the_errno = errno);
-[[noreturn]] void throwFromErrno(const std::string & s, int code = 0, int the_errno = errno);
+std::string errnoToString(int code, int the_errno = errno);
+[[noreturn]] void throwFromErrno(const std::string & s, int code, int the_errno = errno);
/** Try to write an exception to the log (and forget about it).
diff --git a/dbms/src/Common/HashTable/SmallTable.h b/dbms/src/Common/HashTable/SmallTable.h
index 73c2b35865..27dc8c0033 100644
--- a/dbms/src/Common/HashTable/SmallTable.h
+++ b/dbms/src/Common/HashTable/SmallTable.h
@@ -3,6 +3,15 @@
#include
+namespace DB
+{
+ namespace ErrorCodes
+ {
+ extern const int INCORRECT_DATA;
+ }
+}
+
+
/** Replacement of the hash table for a small number (<10) of keys.
* Implemented as an array with linear search.
* The array is located inside the object.
@@ -13,7 +22,6 @@
* you should check if the table is not full,
* and do a `fallback` in this case (for example, use a real hash table).
*/
-
template
<
typename Key,
@@ -86,7 +94,7 @@ public:
DB::readVarUInt(size, in);
if (size > capacity)
- throw DB::Exception("Illegal size");
+ throw DB::Exception("Illegal size", DB::ErrorCodes::INCORRECT_DATA);
is_initialized = true;
}
@@ -306,7 +314,7 @@ public:
DB::readVarUInt(new_size, rb);
if (new_size > capacity)
- throw DB::Exception("Illegal size");
+ throw DB::Exception("Illegal size", DB::ErrorCodes::INCORRECT_DATA);
for (size_t i = 0; i < new_size; ++i)
buf[i].read(rb);
@@ -324,7 +332,7 @@ public:
DB::readText(new_size, rb);
if (new_size > capacity)
- throw DB::Exception("Illegal size");
+ throw DB::Exception("Illegal size", DB::ErrorCodes::INCORRECT_DATA);
for (size_t i = 0; i < new_size; ++i)
{
diff --git a/dbms/src/Common/NetException.h b/dbms/src/Common/NetException.h
index b2b12e8564..ff7d7c5c5f 100644
--- a/dbms/src/Common/NetException.h
+++ b/dbms/src/Common/NetException.h
@@ -6,20 +6,17 @@
namespace DB
{
-class NetException : public DB::Exception
+class NetException : public Exception
{
public:
- NetException(const std::string & msg, int code = 0) : DB::Exception(msg, code) {}
- NetException(const std::string & msg, const std::string & arg, int code = 0) : DB::Exception(msg, arg, code) {}
- NetException(const std::string & msg, const DB::Exception & exc, int code = 0) : DB::Exception(msg, exc, code) {}
+ NetException(const std::string & msg, int code) : Exception(msg, code) {}
- explicit NetException(const DB::Exception & exc) : DB::Exception(exc) {}
- explicit NetException(const Poco::Exception & exc) : DB::Exception(exc.displayText()) {}
+ NetException * clone() const override { return new NetException(*this); }
+ void rethrow() const override { throw *this; }
+private:
const char * name() const throw() override { return "DB::NetException"; }
const char * className() const throw() override { return "DB::NetException"; }
- DB::NetException * clone() const override { return new DB::NetException(*this); }
- void rethrow() const override { throw *this; }
};
}
diff --git a/dbms/src/Common/PoolWithFailoverBase.h b/dbms/src/Common/PoolWithFailoverBase.h
index 63420f12ac..dc986a44ff 100644
--- a/dbms/src/Common/PoolWithFailoverBase.h
+++ b/dbms/src/Common/PoolWithFailoverBase.h
@@ -305,7 +305,7 @@ void PoolWithFailoverBase::reportError(const Entry & entry)
return;
}
}
- throw DB::Exception("Can't find pool to report error.");
+ throw DB::Exception("Can't find pool to report error", DB::ErrorCodes::LOGICAL_ERROR);
}
template
diff --git a/dbms/src/Common/ProfileEvents.cpp b/dbms/src/Common/ProfileEvents.cpp
index de50c625f1..7059e02d76 100644
--- a/dbms/src/Common/ProfileEvents.cpp
+++ b/dbms/src/Common/ProfileEvents.cpp
@@ -170,6 +170,7 @@
M(OSWriteBytes, "Number of bytes written to disks or block devices. Doesn't include bytes that are in page cache dirty pages. May not include data that was written by OS asynchronously.") \
M(OSReadChars, "Number of bytes read from filesystem, including page cache.") \
M(OSWriteChars, "Number of bytes written to filesystem, including page cache.") \
+ M(CreatedHTTPConnections, "Total amount of created HTTP connections (closed or opened).") \
namespace ProfileEvents
{
diff --git a/dbms/src/Common/ShellCommand.cpp b/dbms/src/Common/ShellCommand.cpp
index c9e625810c..675dd8391f 100644
--- a/dbms/src/Common/ShellCommand.cpp
+++ b/dbms/src/Common/ShellCommand.cpp
@@ -4,9 +4,11 @@
#include
#include
#include
+#include
#include
#include
#include
+#include
namespace DB
@@ -75,13 +77,27 @@ namespace
namespace DB
{
+ShellCommand::ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_)
+ : pid(pid)
+ , terminate_in_destructor(terminate_in_destructor_)
+ , log(&Poco::Logger::get("ShellCommand"))
+ , in(in_fd)
+ , out(out_fd)
+ , err(err_fd) {}
+
ShellCommand::~ShellCommand()
{
- if (!wait_called)
+ if (terminate_in_destructor)
+ {
+ int retcode = kill(pid, SIGTERM);
+ if (retcode != 0)
+ LOG_WARNING(log, "Cannot kill pid " << pid << " errno '" << errnoToString(retcode) << "'");
+ }
+ else if (!wait_called)
tryWait();
}
-std::unique_ptr ShellCommand::executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only)
+std::unique_ptr ShellCommand::executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only, bool terminate_in_destructor)
{
/** Here it is written that with a normal call `vfork`, there is a chance of deadlock in multithreaded programs,
* because of the resolving of characters in the shared library
@@ -128,7 +144,7 @@ std::unique_ptr ShellCommand::executeImpl(const char * filename, c
_exit(int(ReturnCodes::CANNOT_EXEC));
}
- std::unique_ptr res(new ShellCommand(pid, pipe_stdin.write_fd, pipe_stdout.read_fd, pipe_stderr.read_fd));
+ std::unique_ptr res(new ShellCommand(pid, pipe_stdin.write_fd, pipe_stdout.read_fd, pipe_stderr.read_fd, terminate_in_destructor));
/// Now the ownership of the file descriptors is passed to the result.
pipe_stdin.write_fd = -1;
@@ -139,7 +155,7 @@ std::unique_ptr ShellCommand::executeImpl(const char * filename, c
}
-std::unique_ptr ShellCommand::execute(const std::string & command, bool pipe_stdin_only)
+std::unique_ptr ShellCommand::execute(const std::string & command, bool pipe_stdin_only, bool terminate_in_destructor)
{
/// Arguments in non-constant chunks of memory (as required for `execv`).
/// Moreover, their copying must be done before calling `vfork`, so after `vfork` do a minimum of things.
@@ -149,11 +165,11 @@ std::unique_ptr ShellCommand::execute(const std::string & command,
char * const argv[] = { argv0.data(), argv1.data(), argv2.data(), nullptr };
- return executeImpl("/bin/sh", argv, pipe_stdin_only);
+ return executeImpl("/bin/sh", argv, pipe_stdin_only, terminate_in_destructor);
}
-std::unique_ptr ShellCommand::executeDirect(const std::string & path, const std::vector & arguments)
+std::unique_ptr ShellCommand::executeDirect(const std::string & path, const std::vector & arguments, bool terminate_in_destructor)
{
size_t argv_sum_size = path.size() + 1;
for (const auto & arg : arguments)
@@ -174,7 +190,7 @@ std::unique_ptr ShellCommand::executeDirect(const std::string & pa
argv[arguments.size() + 1] = nullptr;
- return executeImpl(path.data(), argv.data(), false);
+ return executeImpl(path.data(), argv.data(), false, terminate_in_destructor);
}
diff --git a/dbms/src/Common/ShellCommand.h b/dbms/src/Common/ShellCommand.h
index a439568fe0..3d1308272e 100644
--- a/dbms/src/Common/ShellCommand.h
+++ b/dbms/src/Common/ShellCommand.h
@@ -28,11 +28,13 @@ class ShellCommand
private:
pid_t pid;
bool wait_called = false;
+ bool terminate_in_destructor;
- ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd)
- : pid(pid), in(in_fd), out(out_fd), err(err_fd) {}
+ Poco::Logger * log;
- static std::unique_ptr executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only);
+ ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_);
+
+ static std::unique_ptr executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only, bool terminate_in_destructor);
public:
WriteBufferFromFile in; /// If the command reads from stdin, do not forget to call in.close() after writing all the data there.
@@ -41,11 +43,13 @@ public:
~ShellCommand();
- /// Run the command using /bin/sh -c
- static std::unique_ptr execute(const std::string & command, bool pipe_stdin_only = false);
+ /// Run the command using /bin/sh -c.
+ /// If terminate_in_destructor is true, send terminate signal in destructor and don't wait process.
+ static std::unique_ptr execute(const std::string & command, bool pipe_stdin_only = false, bool terminate_in_destructor = false);
/// Run the executable with the specified arguments. `arguments` - without argv[0].
- static std::unique_ptr executeDirect(const std::string & path, const std::vector & arguments);
+ /// If terminate_in_destructor is true, send terminate signal in destructor and don't wait process.
+ static std::unique_ptr executeDirect(const std::string & path, const std::vector & arguments, bool terminate_in_destructor = false);
/// Wait for the process to end, throw an exception if the code is not 0 or if the process was not completed by itself.
void wait();
diff --git a/dbms/src/Common/StatusFile.cpp b/dbms/src/Common/StatusFile.cpp
index 84b1edc922..afe42262b5 100644
--- a/dbms/src/Common/StatusFile.cpp
+++ b/dbms/src/Common/StatusFile.cpp
@@ -20,6 +20,14 @@
namespace DB
{
+namespace ErrorCodes
+{
+ extern const int CANNOT_OPEN_FILE;
+ extern const int CANNOT_CLOSE_FILE;
+ extern const int CANNOT_TRUNCATE_FILE;
+ extern const int CANNOT_SEEK_THROUGH_FILE;
+}
+
StatusFile::StatusFile(const std::string & path_)
: path(path_)
@@ -43,7 +51,7 @@ StatusFile::StatusFile(const std::string & path_)
fd = ::open(path.c_str(), O_WRONLY | O_CREAT, 0666);
if (-1 == fd)
- throwFromErrno("Cannot open file " + path);
+ throwFromErrno("Cannot open file " + path, ErrorCodes::CANNOT_OPEN_FILE);
try
{
@@ -51,16 +59,16 @@ StatusFile::StatusFile(const std::string & path_)
if (-1 == flock_ret)
{
if (errno == EWOULDBLOCK)
- throw Exception("Cannot lock file " + path + ". Another server instance in same directory is already running.");
+ throw Exception("Cannot lock file " + path + ". Another server instance in same directory is already running.", ErrorCodes::CANNOT_OPEN_FILE);
else
- throwFromErrno("Cannot lock file " + path);
+ throwFromErrno("Cannot lock file " + path, ErrorCodes::CANNOT_OPEN_FILE);
}
if (0 != ftruncate(fd, 0))
- throwFromErrno("Cannot ftruncate " + path);
+ throwFromErrno("Cannot ftruncate " + path, ErrorCodes::CANNOT_TRUNCATE_FILE);
if (0 != lseek(fd, 0, SEEK_SET))
- throwFromErrno("Cannot lseek " + path);
+ throwFromErrno("Cannot lseek " + path, ErrorCodes::CANNOT_SEEK_THROUGH_FILE);
/// Write information about current server instance to the file.
{
@@ -82,10 +90,10 @@ StatusFile::StatusFile(const std::string & path_)
StatusFile::~StatusFile()
{
if (0 != close(fd))
- LOG_ERROR(&Logger::get("StatusFile"), "Cannot close file " << path << ", " << errnoToString());
+ LOG_ERROR(&Logger::get("StatusFile"), "Cannot close file " << path << ", " << errnoToString(ErrorCodes::CANNOT_CLOSE_FILE));
if (0 != unlink(path.c_str()))
- LOG_ERROR(&Logger::get("StatusFile"), "Cannot unlink file " << path << ", " << errnoToString());
+ LOG_ERROR(&Logger::get("StatusFile"), "Cannot unlink file " << path << ", " << errnoToString(ErrorCodes::CANNOT_CLOSE_FILE));
}
}
diff --git a/dbms/src/Common/XDBCBridgeHelper.h b/dbms/src/Common/XDBCBridgeHelper.h
index efdf7b401d..3ff91c902f 100644
--- a/dbms/src/Common/XDBCBridgeHelper.h
+++ b/dbms/src/Common/XDBCBridgeHelper.h
@@ -68,6 +68,7 @@ protected:
public:
using Configuration = Poco::Util::AbstractConfiguration;
+ Context & context;
const Configuration & config;
static constexpr inline auto DEFAULT_HOST = "localhost";
@@ -78,8 +79,8 @@ public:
static constexpr inline auto IDENTIFIER_QUOTE_HANDLER = "/identifier_quote";
static constexpr inline auto PING_OK_ANSWER = "Ok.";
- XDBCBridgeHelper(const Configuration & config_, const Poco::Timespan & http_timeout_, const std::string & connection_string_)
- : http_timeout(http_timeout_), connection_string(connection_string_), config(config_)
+ XDBCBridgeHelper(Context & global_context_, const Poco::Timespan & http_timeout_, const std::string & connection_string_)
+ : http_timeout(http_timeout_), connection_string(connection_string_), context(global_context_), config(context.getConfigRef())
{
size_t bridge_port = config.getUInt(BridgeHelperMixin::configPrefix() + ".port", DEFAULT_PORT);
std::string bridge_host = config.getString(BridgeHelperMixin::configPrefix() + ".host", DEFAULT_HOST);
@@ -210,7 +211,8 @@ private:
/* Contains logic for instantiation of the bridge instance */
void startBridge() const
{
- BridgeHelperMixin::startBridge(config, log, http_timeout);
+ auto cmd = BridgeHelperMixin::startBridge(config, log, http_timeout);
+ context.addXDBCBridgeCommand(std::move(cmd));
}
};
@@ -230,7 +232,7 @@ struct JDBCBridgeMixin
return "JDBC";
}
- static void startBridge(const Poco::Util::AbstractConfiguration &, const Poco::Logger *, const Poco::Timespan &)
+ static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration &, const Poco::Logger *, const Poco::Timespan &)
{
throw Exception("jdbc-bridge is not running. Please, start it manually", ErrorCodes::EXTERNAL_SERVER_IS_NOT_RESPONDING);
}
@@ -253,11 +255,13 @@ struct ODBCBridgeMixin
return "ODBC";
}
- static void startBridge(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout)
+ static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout)
{
/// Path to executable folder
Poco::Path path{config.getString("application.dir", "/usr/bin")};
+
+ std::vector cmd_args;
path.setFileName(
#if CLICKHOUSE_SPLIT_BINARY
"clickhouse-odbc-bridge"
@@ -268,34 +272,35 @@ struct ODBCBridgeMixin
std::stringstream command;
- command << path.toString() <<
-#if CLICKHOUSE_SPLIT_BINARY
- " "
-#else
- " odbc-bridge "
+#if !CLICKHOUSE_SPLIT_BINARY
+ cmd_args.push_back("odbc-bridge");
#endif
- ;
- command << "--http-port " << config.getUInt(configPrefix() + ".port", DEFAULT_PORT) << ' ';
- command << "--listen-host " << config.getString(configPrefix() + ".listen_host", XDBCBridgeHelper::DEFAULT_HOST)
- << ' ';
- command << "--http-timeout " << http_timeout.totalMicroseconds() << ' ';
+ cmd_args.push_back("--http-port");
+ cmd_args.push_back(std::to_string(config.getUInt(configPrefix() + ".port", DEFAULT_PORT)));
+ cmd_args.push_back("--listen-host");
+ cmd_args.push_back(config.getString(configPrefix() + ".listen_host", XDBCBridgeHelper::DEFAULT_HOST));
+ cmd_args.push_back("--http-timeout");
+ cmd_args.push_back(std::to_string(http_timeout.totalMicroseconds()));
if (config.has("logger." + configPrefix() + "_log"))
- command << "--log-path " << config.getString("logger." + configPrefix() + "_log") << ' ';
+ {
+ cmd_args.push_back("--log-path");
+ cmd_args.push_back(config.getString("logger." + configPrefix() + "_log"));
+ }
if (config.has("logger." + configPrefix() + "_errlog"))
- command << "--err-log-path " << config.getString("logger." + configPrefix() + "_errlog") << ' ';
+ {
+ cmd_args.push_back("--err-log-path");
+ cmd_args.push_back(config.getString("logger." + configPrefix() + "_errlog"));
+ }
if (config.has("logger." + configPrefix() + "_level"))
- command << "--log-level " << config.getString("logger." + configPrefix() + "_level") << ' ';
- command << "&"; /// we don't want to wait this process
+ {
+ cmd_args.push_back("--log-level");
+ cmd_args.push_back(config.getString("logger." + configPrefix() + "_level"));
+ }
- auto command_str = command.str();
+ LOG_TRACE(log, "Starting " + serviceAlias());
- std::cerr << command_str << std::endl;
-
- LOG_TRACE(log, "Starting " + serviceAlias() + " with command: " << command_str);
-
- auto cmd = ShellCommand::execute(command_str);
- cmd->wait();
+ return ShellCommand::executeDirect(path.toString(), cmd_args, true);
}
};
}
diff --git a/dbms/src/Common/ZooKeeper/CMakeLists.txt b/dbms/src/Common/ZooKeeper/CMakeLists.txt
index 2969d9a91e..1f69f0af1e 100644
--- a/dbms/src/Common/ZooKeeper/CMakeLists.txt
+++ b/dbms/src/Common/ZooKeeper/CMakeLists.txt
@@ -4,7 +4,8 @@ add_headers_and_sources(clickhouse_common_zookeeper .)
add_library(clickhouse_common_zookeeper ${LINK_MODE} ${clickhouse_common_zookeeper_headers} ${clickhouse_common_zookeeper_sources})
-target_link_libraries (clickhouse_common_zookeeper clickhouse_common_io)
+target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY})
+target_include_directories(clickhouse_common_zookeeper PUBLIC ${DBMS_INCLUDE_DIR})
if (ENABLE_TESTS)
add_subdirectory (tests)
diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp b/dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp
index 1b49c22cf4..9343bb4add 100644
--- a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp
+++ b/dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp
@@ -1,5 +1,15 @@
#include "ZooKeeperHolder.h"
+
+namespace DB
+{
+ namespace ErrorCodes
+ {
+ extern const int NULL_POINTER_DEREFERENCE;
+ }
+}
+
+
using namespace zkutil;
ZooKeeperHolder::UnstorableZookeeperHandler ZooKeeperHolder::getZooKeeper()
@@ -47,7 +57,7 @@ ZooKeeperHolder::UnstorableZookeeperHandler::UnstorableZookeeperHandler(ZooKeepe
ZooKeeper * ZooKeeperHolder::UnstorableZookeeperHandler::operator->()
{
if (zk_ptr == nullptr)
- throw DB::Exception(nullptr_exception_message);
+ throw DB::Exception(nullptr_exception_message, DB::ErrorCodes::NULL_POINTER_DEREFERENCE);
return zk_ptr.get();
}
@@ -55,20 +65,20 @@ ZooKeeper * ZooKeeperHolder::UnstorableZookeeperHandler::operator->()
const ZooKeeper * ZooKeeperHolder::UnstorableZookeeperHandler::operator->() const
{
if (zk_ptr == nullptr)
- throw DB::Exception(nullptr_exception_message);
+ throw DB::Exception(nullptr_exception_message, DB::ErrorCodes::NULL_POINTER_DEREFERENCE);
return zk_ptr.get();
}
ZooKeeper & ZooKeeperHolder::UnstorableZookeeperHandler::operator*()
{
if (zk_ptr == nullptr)
- throw DB::Exception(nullptr_exception_message);
+ throw DB::Exception(nullptr_exception_message, DB::ErrorCodes::NULL_POINTER_DEREFERENCE);
return *zk_ptr;
}
const ZooKeeper & ZooKeeperHolder::UnstorableZookeeperHandler::operator*() const
{
if (zk_ptr == nullptr)
- throw DB::Exception(nullptr_exception_message);
+ throw DB::Exception(nullptr_exception_message, DB::ErrorCodes::NULL_POINTER_DEREFERENCE);
return *zk_ptr;
}
diff --git a/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt b/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt
index a24948240f..06716e4991 100644
--- a/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt
+++ b/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt
@@ -1,23 +1,23 @@
add_executable(zkutil_test_commands zkutil_test_commands.cpp)
-target_link_libraries(zkutil_test_commands clickhouse_common_zookeeper)
+target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper)
add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp)
-target_link_libraries(zkutil_test_commands_new_lib clickhouse_common_zookeeper)
+target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper)
add_executable(zkutil_test_lock zkutil_test_lock.cpp)
-target_link_libraries(zkutil_test_lock clickhouse_common_zookeeper)
+target_link_libraries(zkutil_test_lock PRIVATE clickhouse_common_zookeeper)
add_executable(zkutil_expiration_test zkutil_expiration_test.cpp)
-target_link_libraries(zkutil_expiration_test clickhouse_common_zookeeper)
+target_link_libraries(zkutil_expiration_test PRIVATE clickhouse_common_zookeeper)
add_executable(zkutil_test_async zkutil_test_async.cpp)
-target_link_libraries(zkutil_test_async clickhouse_common_zookeeper)
+target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper)
add_executable(zkutil_zookeeper_holder zkutil_zookeeper_holder.cpp)
-target_link_libraries(zkutil_zookeeper_holder clickhouse_common_zookeeper)
+target_link_libraries(zkutil_zookeeper_holder PRIVATE clickhouse_common_zookeeper)
add_executable (zk_many_watches_reconnect zk_many_watches_reconnect.cpp)
-target_link_libraries (zk_many_watches_reconnect clickhouse_common_zookeeper clickhouse_common_config)
+target_link_libraries (zk_many_watches_reconnect PRIVATE clickhouse_common_zookeeper clickhouse_common_config)
add_executable (zookeeper_impl zookeeper_impl.cpp)
-target_link_libraries (zookeeper_impl clickhouse_common_zookeeper)
+target_link_libraries (zookeeper_impl PRIVATE clickhouse_common_zookeeper)
diff --git a/dbms/src/Common/config.h.in b/dbms/src/Common/config.h.in
index 27c5e96524..a06970384e 100644
--- a/dbms/src/Common/config.h.in
+++ b/dbms/src/Common/config.h.in
@@ -16,3 +16,4 @@
#cmakedefine01 USE_POCO_NETSSL
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
#cmakedefine01 ENABLE_INSERT_INFILE
+#cmakedefine01 USE_BASE64
diff --git a/dbms/src/Common/config_version.h.in b/dbms/src/Common/config_version.h.in
index 8f3e2f56c8..a90fd77b6a 100644
--- a/dbms/src/Common/config_version.h.in
+++ b/dbms/src/Common/config_version.h.in
@@ -23,6 +23,7 @@
#cmakedefine VERSION_FULL "@VERSION_FULL@"
#cmakedefine VERSION_DESCRIBE "@VERSION_DESCRIBE@"
#cmakedefine VERSION_GITHASH "@VERSION_GITHASH@"
+#cmakedefine VERSION_INTEGER @VERSION_INTEGER@
#if defined(VERSION_MAJOR)
#define DBMS_VERSION_MAJOR VERSION_MAJOR
diff --git a/dbms/src/Common/createHardLink.cpp b/dbms/src/Common/createHardLink.cpp
index 06647cd437..824b7e9908 100644
--- a/dbms/src/Common/createHardLink.cpp
+++ b/dbms/src/Common/createHardLink.cpp
@@ -8,6 +8,12 @@
namespace DB
{
+namespace ErrorCodes
+{
+ extern const int CANNOT_STAT;
+ extern const int CANNOT_LINK;
+}
+
void createHardLink(const String & source_path, const String & destination_path)
{
if (0 != link(source_path.c_str(), destination_path.c_str()))
@@ -20,16 +26,16 @@ void createHardLink(const String & source_path, const String & destination_path)
struct stat destination_descr;
if (0 != lstat(source_path.c_str(), &source_descr))
- throwFromErrno("Cannot stat " + source_path);
+ throwFromErrno("Cannot stat " + source_path, ErrorCodes::CANNOT_STAT);
if (0 != lstat(destination_path.c_str(), &destination_descr))
- throwFromErrno("Cannot stat " + destination_path);
+ throwFromErrno("Cannot stat " + destination_path, ErrorCodes::CANNOT_STAT);
if (source_descr.st_ino != destination_descr.st_ino)
- throwFromErrno("Destination file " + destination_path + " is already exist and have different inode.", 0, link_errno);
+ throwFromErrno("Destination file " + destination_path + " is already exist and have different inode.", ErrorCodes::CANNOT_LINK, link_errno);
}
else
- throwFromErrno("Cannot link " + source_path + " to " + destination_path);
+ throwFromErrno("Cannot link " + source_path + " to " + destination_path, ErrorCodes::CANNOT_LINK);
}
}
diff --git a/dbms/src/Common/setThreadName.cpp b/dbms/src/Common/setThreadName.cpp
index fc228d78ff..d035822974 100644
--- a/dbms/src/Common/setThreadName.cpp
+++ b/dbms/src/Common/setThreadName.cpp
@@ -32,7 +32,7 @@ void setThreadName(const char * name)
#else
if (0 != prctl(PR_SET_NAME, name, 0, 0, 0))
#endif
- DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)");
+ DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR);
}
std::string getThreadName()
@@ -48,7 +48,7 @@ std::string getThreadName()
// throw DB::Exception("Cannot get thread name with pthread_get_name_np()", DB::ErrorCodes::PTHREAD_ERROR);
#else
if (0 != prctl(PR_GET_NAME, name.data(), 0, 0, 0))
- DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)");
+ DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR);
#endif
name.resize(std::strlen(name.data()));
diff --git a/dbms/src/Common/tests/CMakeLists.txt b/dbms/src/Common/tests/CMakeLists.txt
index f4d01e85bd..802963bf40 100644
--- a/dbms/src/Common/tests/CMakeLists.txt
+++ b/dbms/src/Common/tests/CMakeLists.txt
@@ -1,73 +1,73 @@
add_executable (hashes_test hashes_test.cpp)
-target_link_libraries (hashes_test dbms)
+target_link_libraries (hashes_test PRIVATE dbms)
add_executable (sip_hash sip_hash.cpp)
-target_link_libraries (sip_hash clickhouse_common_io)
+target_link_libraries (sip_hash PRIVATE clickhouse_common_io)
add_executable (sip_hash_perf sip_hash_perf.cpp)
-target_link_libraries (sip_hash_perf clickhouse_common_io)
+target_link_libraries (sip_hash_perf PRIVATE clickhouse_common_io)
add_executable (auto_array auto_array.cpp)
-target_link_libraries (auto_array clickhouse_common_io)
+target_link_libraries (auto_array PRIVATE clickhouse_common_io)
add_executable (lru_cache lru_cache.cpp)
-target_link_libraries (lru_cache clickhouse_common_io)
+target_link_libraries (lru_cache PRIVATE clickhouse_common_io)
add_executable (hash_table hash_table.cpp)
-target_link_libraries (hash_table clickhouse_common_io)
+target_link_libraries (hash_table PRIVATE clickhouse_common_io)
add_executable (small_table small_table.cpp)
-target_link_libraries (small_table clickhouse_common_io)
+target_link_libraries (small_table PRIVATE clickhouse_common_io)
add_executable (parallel_aggregation parallel_aggregation.cpp)
-target_link_libraries (parallel_aggregation clickhouse_common_io)
+target_link_libraries (parallel_aggregation PRIVATE clickhouse_common_io)
add_executable (parallel_aggregation2 parallel_aggregation2.cpp)
-target_link_libraries (parallel_aggregation2 clickhouse_common_io)
+target_link_libraries (parallel_aggregation2 PRIVATE clickhouse_common_io)
add_executable (int_hashes_perf int_hashes_perf.cpp AvalancheTest.cpp Random.cpp)
-target_link_libraries (int_hashes_perf clickhouse_common_io)
+target_link_libraries (int_hashes_perf PRIVATE clickhouse_common_io)
add_executable (simple_cache simple_cache.cpp)
target_include_directories (simple_cache PRIVATE ${DBMS_INCLUDE_DIR})
-target_link_libraries (simple_cache common)
+target_link_libraries (simple_cache PRIVATE common)
add_executable (compact_array compact_array.cpp)
-target_link_libraries (compact_array clickhouse_common_io ${Boost_FILESYSTEM_LIBRARY})
+target_link_libraries (compact_array PRIVATE clickhouse_common_io ${Boost_FILESYSTEM_LIBRARY})
add_executable (radix_sort radix_sort.cpp)
-target_link_libraries (radix_sort clickhouse_common_io)
+target_link_libraries (radix_sort PRIVATE clickhouse_common_io)
add_executable (shell_command_test shell_command_test.cpp)
-target_link_libraries (shell_command_test clickhouse_common_io)
+target_link_libraries (shell_command_test PRIVATE clickhouse_common_io)
add_executable (arena_with_free_lists arena_with_free_lists.cpp)
-target_link_libraries (arena_with_free_lists clickhouse_common_io)
+target_link_libraries (arena_with_free_lists PRIVATE clickhouse_common_io)
add_executable (pod_array pod_array.cpp)
-target_link_libraries (pod_array clickhouse_common_io)
+target_link_libraries (pod_array PRIVATE clickhouse_common_io)
add_executable (thread_creation_latency thread_creation_latency.cpp)
-target_link_libraries (thread_creation_latency clickhouse_common_io)
+target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io)
add_executable (thread_pool thread_pool.cpp)
-target_link_libraries (thread_pool clickhouse_common_io)
+target_link_libraries (thread_pool PRIVATE clickhouse_common_io)
add_executable (array_cache array_cache.cpp)
-target_link_libraries (array_cache clickhouse_common_io)
+target_link_libraries (array_cache PRIVATE clickhouse_common_io)
add_executable (space_saving space_saving.cpp)
-target_link_libraries (space_saving clickhouse_common_io)
+target_link_libraries (space_saving PRIVATE clickhouse_common_io)
add_executable (integer_hash_tables_and_hashes integer_hash_tables_and_hashes.cpp)
target_include_directories (integer_hash_tables_and_hashes SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
-target_link_libraries (integer_hash_tables_and_hashes clickhouse_common_io)
+target_link_libraries (integer_hash_tables_and_hashes PRIVATE clickhouse_common_io)
add_executable (allocator allocator.cpp)
-target_link_libraries (allocator clickhouse_common_io)
+target_link_libraries (allocator PRIVATE clickhouse_common_io)
add_executable (cow_columns cow_columns.cpp)
-target_link_libraries (cow_columns clickhouse_common_io)
+target_link_libraries (cow_columns PRIVATE clickhouse_common_io)
add_executable (stopwatch stopwatch.cpp)
-target_link_libraries (stopwatch clickhouse_common_io)
+target_link_libraries (stopwatch PRIVATE clickhouse_common_io)
diff --git a/dbms/src/Common/tests/arena_with_free_lists.cpp b/dbms/src/Common/tests/arena_with_free_lists.cpp
index 0a16eff6c1..5091551b55 100644
--- a/dbms/src/Common/tests/arena_with_free_lists.cpp
+++ b/dbms/src/Common/tests/arena_with_free_lists.cpp
@@ -25,6 +25,14 @@
using namespace DB;
+namespace DB
+{
+ namespace ErrorCodes
+ {
+ extern const int SYSTEM_ERROR;
+ }
+}
+
/// Implementation of ArenaWithFreeLists, which contains a bug. Used to reproduce the bug.
#if USE_BAD_ARENA
@@ -237,7 +245,7 @@ int main(int argc, char ** argv)
rusage resource_usage;
if (0 != getrusage(RUSAGE_SELF, &resource_usage))
- throwFromErrno("Cannot getrusage");
+ throwFromErrno("Cannot getrusage", ErrorCodes::SYSTEM_ERROR);
size_t allocated_bytes = resource_usage.ru_maxrss * 1024;
std::cerr << "Current memory usage: " << allocated_bytes << " bytes.\n";
diff --git a/dbms/src/Common/tests/thread_creation_latency.cpp b/dbms/src/Common/tests/thread_creation_latency.cpp
index ccc499c9a1..ef910a3e9f 100644
--- a/dbms/src/Common/tests/thread_creation_latency.cpp
+++ b/dbms/src/Common/tests/thread_creation_latency.cpp
@@ -22,6 +22,15 @@ void f() { ++x; }
void * g(void *) { f(); return {}; }
+namespace DB
+{
+ namespace ErrorCodes
+ {
+ extern const int PTHREAD_ERROR;
+ }
+}
+
+
template
void test(size_t n, const char * name, F && kernel)
{
@@ -80,9 +89,9 @@ int main(int argc, char ** argv)
{
pthread_t thread;
if (pthread_create(&thread, nullptr, g, nullptr))
- DB::throwFromErrno("Cannot create thread.");
+ DB::throwFromErrno("Cannot create thread.", DB::ErrorCodes::PTHREAD_ERROR);
if (pthread_join(thread, nullptr))
- DB::throwFromErrno("Cannot join thread.");
+ DB::throwFromErrno("Cannot join thread.", DB::ErrorCodes::PTHREAD_ERROR);
});
test(n, "Create and destroy std::thread each iteration", []
diff --git a/dbms/src/Core/BackgroundSchedulePool.cpp b/dbms/src/Core/BackgroundSchedulePool.cpp
index 3e24cfca92..5da499e5ae 100644
--- a/dbms/src/Core/BackgroundSchedulePool.cpp
+++ b/dbms/src/Core/BackgroundSchedulePool.cpp
@@ -278,7 +278,7 @@ void BackgroundSchedulePool::delayExecutionThreadFunction()
{
std::unique_lock lock(delayed_tasks_mutex);
- while(!shutdown)
+ while (!shutdown)
{
Poco::Timestamp min_time;
diff --git a/dbms/src/Core/Defines.h b/dbms/src/Core/Defines.h
index cf7a0b621e..570c108b2a 100644
--- a/dbms/src/Core/Defines.h
+++ b/dbms/src/Core/Defines.h
@@ -62,6 +62,9 @@
#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800
#define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1
+/// Maximum namber of http-connections between two endpoints
+/// the number is unmotivated
+#define DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT 15
// more aliases: https://mailman.videolan.org/pipermail/x264-devel/2014-May/010660.html
diff --git a/dbms/src/Core/ExternalTable.cpp b/dbms/src/Core/ExternalTable.cpp
index 9ab3a8e8d3..5bfdbb12e9 100644
--- a/dbms/src/Core/ExternalTable.cpp
+++ b/dbms/src/Core/ExternalTable.cpp
@@ -168,7 +168,7 @@ void ExternalTablesHandler::handlePart(const Poco::Net::MessageHeader & header,
/// Write data
data.first->readPrefix();
output->writePrefix();
- while(Block block = data.first->read())
+ while (Block block = data.first->read())
output->write(block);
data.first->readSuffix();
output->writeSuffix();
diff --git a/dbms/src/Core/Field.h b/dbms/src/Core/Field.h
index 6e07e1387f..b08c75df88 100644
--- a/dbms/src/Core/Field.h
+++ b/dbms/src/Core/Field.h
@@ -578,43 +578,54 @@ template <> struct TypeName { static std::string get() { return "Array";
template <> struct TypeName { static std::string get() { return "Tuple"; } };
-template struct NearestFieldType;
+template struct NearestFieldTypeImpl;
-template <> struct NearestFieldType { using Type = UInt64; };
-template <> struct NearestFieldType { using Type = UInt64; };
-template <> struct NearestFieldType { using Type = UInt64; };
-template <> struct NearestFieldType { using Type = UInt64; };
-#ifdef __APPLE__
-template <> struct NearestFieldType { using Type = UInt64; };
-template <> struct NearestFieldType { using Type = UInt64; };
-#endif
-template <> struct NearestFieldType { using Type = UInt64; };
-template <> struct NearestFieldType { using Type = UInt128; };
-template <> struct NearestFieldType { using Type = UInt128; };
-template <> struct NearestFieldType { using Type = Int64; };
-template <> struct NearestFieldType