remove docker/CI folder and fix ci error

This commit is contained in:
fredwang 2024-11-12 13:29:59 +08:00
parent da5a5ae346
commit 9f730be7ba
18 changed files with 2 additions and 1790 deletions

View File

@ -1 +0,0 @@
docker-compose exec server-0 /opt/byconity/bin/clickhouse client --port 52145 --host 127.0.0.1

View File

@ -1,174 +0,0 @@
version: "3"
services:
# After upgrade to docker-compose v2, we could use `include` instead of `extend`.
hdfs-namenode:
extends:
file: ./common/hdfs.yml
service: hdfs-namenode
hdfs-datanode:
extends:
file: ./common/hdfs.yml
service: hdfs-datanode
fdb:
extends:
file: ./common/fdb.yml
service: fdb
my_mysql:
extends:
file: ./common/mysql.yml
service: my_mysql
tso:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "fdbcli -C /config/fdb.cluster --exec \"configure new single ssd\"; tso-server --config-file /config/tso.yml"
depends_on:
- fdb
- hdfs-namenode
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/tso/:/var/log/byconity/:rw
environment: &env
LD_LIBRARY_PATH: /opt/byconity/lib
PATH: /opt/byconity/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ASAN_OPTIONS:
TSAN_OPTIONS:
IS_CI_ENV: 1
CI_PIPELINE_NAME: CI
cap_add:
- SYS_PTRACE
healthcheck:
test: ["CMD", "curl", "localhost:18845"]
interval: 5s
server-0:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "(udf-manager --config-file /config/server.yml & clickhouse-server --config-file /config/server.yml)"
depends_on:
tso:
condition: service_healthy
ports:
- "9000:52145"
- "127.0.0.1:8123:21557"
- "127.0.0.1:9004:9004"
environment:
<<: *env
SERVER_ID: server-0
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/server-0/:/var/log/byconity/:rw
- ./queries/:/opt/byconity/queries/:ro
cap_add:
- SYS_PTRACE
healthcheck:
test: ["CMD", "curl", "localhost:21557"]
interval: 5s
server-1:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "(udf-manager --config-file /config/server.yml & clickhouse-server --config-file /config/server.yml)"
depends_on:
tso:
condition: service_healthy
ports:
- "9001:52145"
- "127.0.0.1:8124:21557"
environment:
<<: *env
SERVER_ID: server-1
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/server-1/:/var/log/byconity/:rw
- ./queries/:/opt/byconity/queries/:ro
cap_add:
- SYS_PTRACE
healthcheck:
test: ["CMD", "curl", "localhost:52145"]
interval: 5s
worker-write:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "clickhouse-server --config-file /config/worker.yml"
depends_on:
- server-0
- server-1
ports:
- "52149:52145"
environment:
<<: *env
WORKER_GROUP_ID: wg_write
VIRTUAL_WAREHOUSE_ID: vw_write
WORKER_ID: w0
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/worker-write/:/var/log/byconity/:rw
- ./queries/:/opt/byconity/queries/:ro
cap_add:
- SYS_PTRACE
worker-default:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "(udf-manager --config-file /config/worker.yml & clickhouse-server --config-file /config/worker.yml)"
depends_on:
- server-0
- server-1
environment:
<<: *env
WORKER_GROUP_ID: wg_default
VIRTUAL_WAREHOUSE_ID: vw_default
WORKER_ID: r0
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/worker-default/:/var/log/byconity/:rw
- ./queries/:/opt/byconity/queries/:ro
cap_add:
- SYS_PTRACE
daemon-manager:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "daemon-manager --config-file ./config/daemon-manager.yml"
depends_on:
server-0:
condition: service_healthy
server-1:
condition: service_healthy
environment:
<<: *env
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/daemon-manager/:/var/log/byconity/:rw
cap_add:
- SYS_PTRACE
restart: always
resource-manager:
image: hub.byted.org/bytehouse/debian.bullseye.fdb.udf:0.1
command: bash -c "resource-manager --config-file /config/resource-manager.yml"
depends_on:
- tso
volumes:
- ${CNCH_BINARY_PATH}/:/opt/byconity/bin/:ro
- ${CNCH_LIBRARY_PATH}/:/opt/byconity/lib/:ro
- ./nexusfs/:/config/:ro
- ./test_output/rm/:/var/log/byconity/:rw
environment:
<<: *env
cap_add:
- SYS_PTRACE
volumes:
fdb-data:
external: false
hdfs-namenode:
external: false
hdfs-datanode:
external: false

View File

@ -1,228 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
console: true
additional_services:
GIS: 1
VectorSearch: 1
FullTextSearch: 1
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
mysql_port: 9004
listen_host: "0.0.0.0"
prometheus:
endpoint: "/metrics"
port: 0
metrics: true
events: true
asynchronous_metrics: true
part_metrics: false
cnch_type: server
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
storage_configuration:
disks:
hdfs_disk:
path: /user/clickhouse/
type: bytehdfs
local_disk:
path: /var/byconity/data/
type: local
policies:
default:
volumes:
hdfs:
default: hdfs_disk
disk: hdfs_disk
local:
default: local_disk
disk: local_disk
cnch_kafka_log:
database: cnch_system
table: cnch_kafka_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_query_log:
database: cnch_system
table: cnch_query_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
part_allocation_algorithm: 1
consistent_hash_ring:
num_replicas: 16
num_probes: 21
load_factor: 1.3
service_discovery:
mode: local
cluster: default
disable_cache: false
cache_timeout: 5
server:
psm: data.cnch.server
node:
- host: server-0
hostname: server-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
- host: server-1
hostname: server-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
tso:
psm: data.cnch.tso
node:
host: tso-0
hostname: tso
ports:
port:
- name: PORT0
value: 18845
- name: PORT2
value: 9181
resource_manager:
psm: data.cnch.resource_manager
node:
host: resource-manager-0
hostname: resource-manager-0
ports:
port:
name: PORT0
value: 28989
daemon_manager:
psm: data.cnch.daemon_manager
node:
host: daemon-manager-0
hostname: daemon-manager
ports:
port:
name: PORT0
value: 17553
vw_psm: data.cnch.vw
vw:
psm: data.cnch.vw
node:
- host: worker-write-0
hostname: worker-write
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_write
- host: worker-default-0
hostname: worker-default
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_default
catalog:
name_space: default
catalog_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster
hdfs_addr: hdfs://hdfs-namenode:9000
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_server
timeout_ms: 10000
max_retry: 1
custom_settings_prefixes: SQL_
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
sensitive_permission_tenants: 1234

View File

@ -1,202 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
listen_host: "0.0.0.0"
cnch_type: worker
vw_name: vw_default
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
storage_configuration:
disks:
hdfs_disk:
path: /user/clickhouse/
type: bytehdfs
local_disk:
path: /var/byconity/data/
type: local
policies:
default:
volumes:
hdfs:
default: hdfs_disk
disk: hdfs_disk
local:
default: local_disk
disk: local_disk
hdfs_addr: "hdfs://hdfs-namenode:9000"
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
service_discovery:
mode: local
cluster: default
disable_cache: false
cache_timeout: 5
server:
psm: data.cnch.server
node:
- host: server-0
hostname: server-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
- host: server-1
hostname: server-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
tso:
psm: data.cnch.tso
node:
host: tso-0
hostname: tso
ports:
port:
- name: PORT0
value: 18845
- name: PORT2
value: 9181
resource_manager:
psm: data.cnch.resource_manager
node:
host: resource-manager-0
hostname: resource-manager-0
ports:
port:
name: PORT0
value: 28989
daemon_manager:
psm: data.cnch.daemon_manager
node:
host: daemon-manager-0
hostname: daemon-manager
ports:
port:
name: PORT0
value: 17553
vw_psm: data.cnch.vw
vw:
psm: data.cnch.vw
node:
- host: worker-write-0
hostname: worker-write
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_write
- host: worker-default-0
hostname: worker-default
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_default
catalog:
name_space: default
catalog_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_worker
timeout_ms: 10000
max_retry: 1
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
additional_services:
FullTextSearch: true
sensitive_permission_tenants: 1234

View File

@ -1,252 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
additional_services:
GIS: 1
VectorSearch: 1
FullTextSearch: 1
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
mysql_port: 9004
listen_host: "0.0.0.0"
prometheus:
endpoint: "/metrics"
port: 0
metrics: true
events: true
asynchronous_metrics: true
part_metrics: false
cnch_type: server
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
storage_configuration:
disks:
hdfs_disk:
path: /user/clickhouse/
type: bytehdfs
local_disk:
path: /var/byconity/data/
type: local
policies:
default:
volumes:
hdfs:
default: hdfs_disk
disk: hdfs_disk
local:
default: local_disk
disk: local_disk
cnch_kafka_log:
database: cnch_system
table: cnch_kafka_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_query_log:
database: cnch_system
table: cnch_query_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
part_allocation_algorithm: 1
consistent_hash_ring:
num_replicas: 16
num_probes: 21
load_factor: 1.3
service_discovery:
mode: local
cluster: default
disable_cache: false
cache_timeout: 5
server:
psm: data.cnch.server
node:
- host: server-0
hostname: server-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
- host: server-1
hostname: server-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
resource_manager:
psm: data.cnch.resource_manager
node:
host: resource-manager-0
hostname: resource-manager-0
ports:
port:
name: PORT0
value: 28989
daemon_manager:
psm: data.cnch.daemon_manager
node:
host: daemon-manager-0
hostname: daemon-manager-0
ports:
port:
name: PORT0
value: 17553
tso:
psm: data.cnch.tso
node:
host: tso-0
hostname: tso-0
ports:
port:
- name: PORT0
value: 18845
- name: PORT2
value: 9181
vw_psm: data.cnch.vw
vw:
psm: data.cnch.vw
node:
vw_name: vw_write
host: worker-write
hostname: worker-write
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
node:
vw_name: vw_default
host: worker-default-0
hostname: worker-default-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
node:
vw_name: vw_default
host: worker-default-1
hostname: worker-default-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
catalog:
name_space: default
catalog_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster
external_catalog_mgr:
type: fdb
fdb:
cluster_file: /config/fdb/cluster
hdfs_addr: "hdfs://hdfs-namenode:9000"
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_server
timeout_ms: 10000
max_retry: 1
custom_settings_prefixes: SQL_
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
sensitive_permission_tenants: 1234

View File

@ -1,6 +0,0 @@
catalog:
name_space: default
catalog_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster

View File

@ -1,115 +0,0 @@
service_discovery:
mode: local
cluster: default
disable_cache: false
cache_timeout: 5
server:
psm: data.cnch.server
node:
- host: server-0
hostname: server-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
- host: server-1
hostname: server-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
tso:
psm: data.cnch.tso
node:
host: tso
hostname: tso
ports:
port:
- name: PORT0
value: 18845
- name: PORT2
value: 9181
resource_manager:
psm: data.cnch.resource_manager
node:
host: resource-manager
hostname: resource-manager
ports:
port:
name: PORT0
value: 28989
daemon_manager:
psm: data.cnch.daemon_manager
node:
host: daemon-manager
hostname: daemon-manager
ports:
port:
name: PORT0
value: 17553
vw_psm: data.cnch.vw
vw:
psm: data.cnch.vw
node:
- host: worker-write
hostname: worker-write
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_write
- host: worker-default
hostname: worker-default
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_default

View File

@ -1,18 +0,0 @@
hdfs_addr: hdfs://hdfs-namenode:9000
storage_configuration:
disks:
hdfs_disk:
path: /user/clickhouse/
type: bytehdfs
local_disk:
path: /var/byconity/data/
type: local
policies:
default:
volumes:
hdfs:
default: hdfs_disk
disk: hdfs_disk
local:
default: local_disk
disk: local_disk

View File

@ -1,63 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
listen_host: "0.0.0.0"
cnch_type: server
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
daemon_manager:
port: 17553
daemon_jobs:
job:
- name: PART_GC
interval: 10000
disable: 0
- name: PART_MERGE
interval: 10000
disable: 0
- name: CONSUMER
interval: 10000
disable: 0
- name: GLOBAL_GC
interval: 5000
disable: 1
- name: PART_CLUSTERING
interval: 30000
disable: 0
- name: DEDUP_WORKER
interval: 3000
disable: 0
# Increasing the frequency of recycling in a test environment
- name: TXN_GC
interval: 3000
disable: 0

View File

@ -1 +0,0 @@
docker:docker@fdb:4550

View File

@ -1,29 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
listen_host: "0.0.0.0"
path: /var/byconity/
timezone: Europe/Moscow
perQuery: 1
resource_manager:
port: 28989
vws:
vw:
- name: vw_default
type: default
num_workers: 1
worker_groups:
worker_group:
name: wg_default
type: Physical
- name: vw_write
type: write
num_workers: 1
worker_groups:
worker_group:
name: wg_write
type: Physical

View File

@ -1,105 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
console: true
additional_services:
GIS: 1
VectorSearch: 1
FullTextSearch: 1
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
mysql_port: 9004
listen_host: "0.0.0.0"
prometheus:
endpoint: "/metrics"
port: 0
metrics: true
events: true
asynchronous_metrics: true
part_metrics: false
cnch_type: server
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
nexus_fs:
enable: 1
use_memory_device: 0
enable_async_io: 0
cache_size: 5368709120
region_size: 4194304
segment_size: 524288
enable_memory_buffer: 1
memory_buffer_size: 1073741824
clean_regions_pool: 16
clean_region_threads: 4
num_in_mem_buffers: 32
reader_threads: 32
merge_tree:
reorganize_marks_data_layout: 1
enable_nexus_fs: 1
cnch_kafka_log:
database: cnch_system
table: cnch_kafka_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_query_log:
database: cnch_system
table: cnch_query_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
part_allocation_algorithm: 1
consistent_hash_ring:
num_replicas: 16
num_probes: 21
load_factor: 1.3
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_server
timeout_ms: 10000
max_retry: 1
custom_settings_prefixes: SQL_
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
sensitive_permission_tenants: 1234

View File

@ -1,22 +0,0 @@
logger:
level: trace
log: /var/log/byconity/tso.log
errorlog: /var/log/byconity/tso.err.log
testlog: /var/log/byconity/tso.test.log
size: 1000M
count: 10
console: false
listen_host: "0.0.0.0"
path: /var/byconity/tso
tmp_path: /var/byconity/tmp
tso_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster
port: 18845
http:
port: 9181
receive_timeout: 1800
send_timeout: 1800
tso_window_ms: 3000
tso_get_leader_info_interval_ms: 0

View File

@ -1,46 +0,0 @@
profiles:
default:
load_balancing: random
log_queries: 1
max_execution_time: 180
exchange_timeout_ms: 300000
enable_nexus_fs: 1
point_lookup:
max_threads: 1
exchange_source_pipeline_threads: 1
enable_plan_cache: true
query_worker_fault_tolerance: false
send_cacheable_table_definitions: true
optimize_skip_unused_shards: true
enable_prune_source_plan_segment: true
users:
default:
networks:
ip: ::/0
password: ""
profile: default
quota: default
access_management: 1
server:
networks:
ip: ::/0
password: ""
profile: default
quota: default
probe:
networks:
ip: ::/0
password: ""
profile: default
quota: default
quotas:
default:
interval:
duration: 3600
queries: 0
errors: 0
result_rows: 0
read_rows: 0
execution_time: 0

View File

@ -1,82 +0,0 @@
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
listen_host: "0.0.0.0"
cnch_type: worker
vw_name: vw_default
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
nexus_fs:
enable: 1
use_memory_device: 0
enable_async_io: 0
cache_size: 5368709120
region_size: 4194304
segment_size: 524288
enable_memory_buffer: 1
memory_buffer_size: 1073741824
clean_regions_pool: 16
clean_region_threads: 4
num_in_mem_buffers: 32
reader_threads: 32
merge_tree:
reorganize_marks_data_layout: 1
enable_nexus_fs: 1
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_worker
timeout_ms: 10000
max_retry: 1
restrict_tenanted_users_to_system_tables: false
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
additional_services:
FullTextSearch: true
VectorSearch: true
GIS: true
sensitive_permission_tenants: 1234

View File

@ -1,236 +0,0 @@
# Auto-generated! Please do not modify this file directly. Refer to 'convert-hdfs-configs-to-s3.sh'.
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
console: true
additional_services:
GIS: 1
VectorSearch: 1
FullTextSearch: 1
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
mysql_port: 9004
listen_host: "0.0.0.0"
prometheus:
endpoint: "/metrics"
port: 0
metrics: true
events: true
asynchronous_metrics: true
part_metrics: false
cnch_type: server
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
storage_configuration:
disks:
local_disk:
path: /var/byconity/data/
type: local
s3_disk:
path: data123/
type: s3
endpoint: http://minio:9000
bucket: cnch
ak_id: minio
ak_secret: minio123
policies:
default:
volumes:
local:
default: local_disk
disk: local_disk
cnch_default_hdfs:
volumes:
s3:
default: s3_disk
disk: s3_disk
# To avoid break hard-coded test cases.
cnch_default_policy: cnch_default_hdfs
cnch_kafka_log:
database: cnch_system
table: cnch_kafka_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
cnch_query_log:
database: cnch_system
table: cnch_query_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
part_allocation_algorithm: 1
consistent_hash_ring:
num_replicas: 16
num_probes: 21
load_factor: 1.3
service_discovery:
mode: local
cluster: default
disable_cache: false
cache_timeout: 5
server:
psm: data.cnch.server
node:
- host: server-0
hostname: server-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
- host: server-1
hostname: server-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
tso:
psm: data.cnch.tso
node:
host: tso-0
hostname: tso
ports:
port:
- name: PORT0
value: 18845
- name: PORT2
value: 9181
resource_manager:
psm: data.cnch.resource_manager
node:
host: resource-manager-0
hostname: resource-manager-0
ports:
port:
name: PORT0
value: 28989
daemon_manager:
psm: data.cnch.daemon_manager
node:
host: daemon-manager-0
hostname: daemon-manager
ports:
port:
name: PORT0
value: 17553
vw_psm: data.cnch.vw
vw:
psm: data.cnch.vw
node:
- host: worker-write-0
hostname: worker-write
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_write
- host: worker-default-0
hostname: worker-default
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_default
catalog:
name_space: default
catalog_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_server
timeout_ms: 10000
max_retry: 1
custom_settings_prefixes: SQL_
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
sensitive_permission_tenants: 1234

View File

@ -1,210 +0,0 @@
# Auto-generated! Please do not modify this file directly. Refer to 'convert-hdfs-configs-to-s3.sh'.
logger:
level: trace
log: /var/log/byconity/out.log
errorlog: /var/log/byconity/err.log
testlog: /var/log/byconity/test.log
size: 1000M
count: 10
http_port: 21557
rpc_port: 30605
tcp_port: 52145
ha_tcp_port: 26247
exchange_port: 47447
exchange_status_port: 60611
interserver_http_port: 30491
listen_host: "0.0.0.0"
cnch_type: worker
vw_name: vw_default
max_connections: 4096
keep_alive_timeout: 3
max_concurrent_queries: 200
uncompressed_cache_size: 8589934592
mark_cache_size: 5368709120
path: /var/byconity/
tmp_path: /var/byconity/tmp_data/
users_config: /config/users.yml
default_profile: default
default_database: default
timezone: Europe/Moscow
mlock_executable: false
enable_tenant_systemdb: false
macros:
"-incl": macros
"-optional": true
builtin_dictionaries_reload_interval: 3600
max_session_timeout: 3600
default_session_timeout: 60
dictionaries_config: "*_dictionary.xml"
format_schema_path: /var/byconity/format_schemas/
perQuery: 1
storage_configuration:
disks:
local_disk:
path: /var/byconity/data/
type: local
s3_disk:
path: data123/
type: s3
endpoint: http://minio:9000
bucket: cnch
ak_id: minio
ak_secret: minio123
policies:
default:
volumes:
local:
default: local_disk
disk: local_disk
cnch_default_hdfs:
volumes:
s3:
default: s3_disk
disk: s3_disk
# To avoid break hard-coded test cases.
cnch_default_policy: cnch_default_hdfs
cnch_unique_table_log:
database: cnch_system
table: cnch_unique_table_log
flush_max_row_count: 10000
flush_interval_milliseconds: 7500
query_log:
database: system
table: query_log
flush_interval_milliseconds: 15000
partition_by: event_date
service_discovery:
mode: local
cluster: default
disable_cache: false
cache_timeout: 5
server:
psm: data.cnch.server
node:
- host: server-0
hostname: server-0
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
- host: server-1
hostname: server-1
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
tso:
psm: data.cnch.tso
node:
host: tso-0
hostname: tso
ports:
port:
- name: PORT0
value: 18845
- name: PORT2
value: 9181
resource_manager:
psm: data.cnch.resource_manager
node:
host: resource-manager-0
hostname: resource-manager-0
ports:
port:
name: PORT0
value: 28989
daemon_manager:
psm: data.cnch.daemon_manager
node:
host: daemon-manager-0
hostname: daemon-manager
ports:
port:
name: PORT0
value: 17553
vw_psm: data.cnch.vw
vw:
psm: data.cnch.vw
node:
- host: worker-write-0
hostname: worker-write
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_write
- host: worker-default-0
hostname: worker-default
ports:
port:
- name: PORT2
value: 21557
- name: PORT1
value: 30605
- name: PORT0
value: 52145
- name: PORT4
value: 27651
- name: PORT3
value: 45443
- name: PORT5
value: 47447
- name: PORT6
value: 60611
vw_name: vw_default
catalog:
name_space: default
catalog_service:
type: fdb
fdb:
cluster_file: /config/fdb.cluster
udf_path: /var/byconity/data/user_defined
udf_manager_server:
timeout_ms: 20000
max_retry: 1
udf_processor:
count: 3
uds_path: /dev/shm/udf_processor_worker
timeout_ms: 10000
max_retry: 1
restrict_tenanted_users_to_whitelist_settings: false
restrict_tenanted_users_to_privileged_operations: false
additional_services:
FullTextSearch: true
sensitive_permission_tenants: 1234

View File

@ -12,6 +12,8 @@ profiles:
send_cacheable_table_definitions: true
optimize_skip_unused_shards: true
enable_prune_source_plan_segment: true
readonly:
readonly: 1
users:
default: