nixos/clickhouse: Migrate tests from handleTest to runTest

This commit is contained in:
Jonathan Davies 2025-06-15 12:35:59 +00:00
parent 4d60b8f537
commit 2723c76503
No known key found for this signature in database
6 changed files with 450 additions and 462 deletions

View File

@ -306,7 +306,7 @@ in
cinnamon-wayland = runTest ./cinnamon-wayland.nix; cinnamon-wayland = runTest ./cinnamon-wayland.nix;
cjdns = runTest ./cjdns.nix; cjdns = runTest ./cjdns.nix;
clatd = runTest ./clatd.nix; clatd = runTest ./clatd.nix;
clickhouse = handleTest ./clickhouse { }; clickhouse = import ./clickhouse { inherit runTest; };
cloud-init = handleTest ./cloud-init.nix { }; cloud-init = handleTest ./cloud-init.nix { };
cloud-init-hostname = handleTest ./cloud-init-hostname.nix { }; cloud-init-hostname = handleTest ./cloud-init-hostname.nix { };
cloudlog = runTest ./cloudlog.nix; cloudlog = runTest ./cloudlog.nix;

View File

@ -1,35 +1,33 @@
import ../make-test-python.nix ( { pkgs, ... }:
{ pkgs, ... }: {
{ name = "clickhouse";
name = "clickhouse"; meta.maintainers = with pkgs.lib.maintainers; [ jpds ];
meta.maintainers = with pkgs.lib.maintainers; [ jpds ];
nodes.machine = { nodes.machine = {
services.clickhouse.enable = true; services.clickhouse.enable = true;
virtualisation.memorySize = 4096; virtualisation.memorySize = 4096;
}; };
testScript = testScript =
let let
# work around quote/substitution complexity by Nix, Perl, bash and SQL. # work around quote/substitution complexity by Nix, Perl, bash and SQL.
tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();"; tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();";
insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');"; insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`"; selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
in in
'' ''
machine.start() machine.start()
machine.wait_for_unit("clickhouse.service") machine.wait_for_unit("clickhouse.service")
machine.wait_for_open_port(9000) machine.wait_for_open_port(9000)
machine.succeed( machine.succeed(
"cat ${tableDDL} | clickhouse-client" "cat ${tableDDL} | clickhouse-client"
) )
machine.succeed( machine.succeed(
"cat ${insertQuery} | clickhouse-client" "cat ${insertQuery} | clickhouse-client"
) )
machine.succeed( machine.succeed(
"cat ${selectQuery} | clickhouse-client | grep foo" "cat ${selectQuery} | clickhouse-client | grep foo"
) )
''; '';
} }
)

View File

@ -1,12 +1,8 @@
{ { runTest }:
system ? builtins.currentSystem,
config ? { },
pkgs ? import ../../.. { inherit system config; },
}:
{ {
base = import ./base.nix { inherit system pkgs; }; base = runTest ./base.nix;
kafka = import ./kafka.nix { inherit system pkgs; }; kafka = runTest ./kafka.nix;
keeper = import ./keeper.nix { inherit system pkgs; }; keeper = runTest ./keeper.nix;
s3 = import ./s3.nix { inherit system pkgs; }; s3 = runTest ./s3.nix;
} }

View File

@ -1,174 +1,172 @@
import ../make-test-python.nix ( { pkgs, ... }:
{ pkgs, ... }:
let let
kafkaNamedCollectionConfig = '' kafkaNamedCollectionConfig = ''
<clickhouse> <clickhouse>
<named_collections> <named_collections>
<cluster_1> <cluster_1>
<!-- ClickHouse Kafka engine parameters --> <!-- ClickHouse Kafka engine parameters -->
<kafka_broker_list>kafka:9092</kafka_broker_list> <kafka_broker_list>kafka:9092</kafka_broker_list>
<kafka_topic_list>test_topic</kafka_topic_list> <kafka_topic_list>test_topic</kafka_topic_list>
<kafka_group_name>clickhouse</kafka_group_name> <kafka_group_name>clickhouse</kafka_group_name>
<kafka_format>JSONEachRow</kafka_format> <kafka_format>JSONEachRow</kafka_format>
<kafka_commit_every_batch>0</kafka_commit_every_batch> <kafka_commit_every_batch>0</kafka_commit_every_batch>
<kafka_num_consumers>1</kafka_num_consumers> <kafka_num_consumers>1</kafka_num_consumers>
<kafka_thread_per_consumer>1</kafka_thread_per_consumer> <kafka_thread_per_consumer>1</kafka_thread_per_consumer>
<!-- Kafka extended configuration --> <!-- Kafka extended configuration -->
<kafka> <kafka>
<debug>all</debug> <debug>all</debug>
<auto_offset_reset>earliest</auto_offset_reset> <auto_offset_reset>earliest</auto_offset_reset>
</kafka> </kafka>
</cluster_1> </cluster_1>
</named_collections> </named_collections>
</clickhouse> </clickhouse>
''; '';
kafkaNamedCollection = pkgs.writeText "kafka.xml" kafkaNamedCollectionConfig; kafkaNamedCollection = pkgs.writeText "kafka.xml" kafkaNamedCollectionConfig;
in in
{ {
name = "clickhouse-kafka"; name = "clickhouse-kafka";
meta.maintainers = with pkgs.lib.maintainers; [ jpds ]; meta.maintainers = with pkgs.lib.maintainers; [ jpds ];
nodes = { nodes = {
clickhouse = { clickhouse = {
environment.etc = { environment.etc = {
"clickhouse-server/config.d/kafka.xml" = { "clickhouse-server/config.d/kafka.xml" = {
source = "${kafkaNamedCollection}"; source = "${kafkaNamedCollection}";
};
}; };
services.clickhouse.enable = true;
virtualisation.memorySize = 4096;
}; };
kafka = { services.clickhouse.enable = true;
networking.firewall.allowedTCPPorts = [ virtualisation.memorySize = 4096;
9092
9093
];
environment.systemPackages = [
pkgs.apacheKafka
pkgs.jq
];
services.apache-kafka = {
enable = true;
# Randomly generated uuid. You can get one by running:
# kafka-storage.sh random-uuid
clusterId = "b81s-MuGSwyt_B9_h37wtQ";
formatLogDirs = true;
settings = {
listeners = [
"PLAINTEXT://:9092"
"CONTROLLER://:9093"
];
"listener.security.protocol.map" = [
"PLAINTEXT:PLAINTEXT"
"CONTROLLER:PLAINTEXT"
];
"controller.quorum.voters" = [
"1@kafka:9093"
];
"controller.listener.names" = [ "CONTROLLER" ];
"node.id" = 1;
"broker.rack" = 1;
"process.roles" = [
"broker"
"controller"
];
"log.dirs" = [ "/var/lib/apache-kafka" ];
"num.partitions" = 1;
"offsets.topic.replication.factor" = 1;
"transaction.state.log.replication.factor" = 1;
"transaction.state.log.min.isr" = 1;
};
};
systemd.services.apache-kafka.serviceConfig.StateDirectory = "apache-kafka";
};
}; };
testScript = kafka = {
let networking.firewall.allowedTCPPorts = [
jsonTestMessage = pkgs.writeText "kafka-test-data.json" '' 9092
{ "id": 1, "first_name": "Fred", "age": 32 } 9093
{ "id": 2, "first_name": "Barbara", "age": 30 } ];
{ "id": 3, "first_name": "Nicola", "age": 12 }
'';
# work around quote/substitution complexity by Nix, Perl, bash and SQL.
tableKafkaDDL = pkgs.writeText "ddl-kafka.sql" ''
CREATE TABLE `test_kafka_topic` (
`id` UInt32,
`first_name` String,
`age` UInt32
) ENGINE = Kafka(cluster_1);
'';
tableDDL = pkgs.writeText "ddl.sql" '' environment.systemPackages = [
CREATE TABLE `test_topic` ( pkgs.apacheKafka
`id` UInt32, pkgs.jq
`first_name` String, ];
`age` UInt32
) ENGINE = MergeTree ORDER BY id;
'';
viewDDL = pkgs.writeText "view.sql" '' services.apache-kafka = {
CREATE MATERIALIZED VIEW kafka_view TO test_topic AS enable = true;
SELECT
id,
first_name,
age,
FROM test_kafka_topic;
'';
selectQuery = pkgs.writeText "select.sql" "SELECT sum(age) from `test_topic`";
in
''
kafka.start()
kafka.wait_for_unit("apache-kafka")
kafka.wait_for_open_port(9092)
clickhouse.start() # Randomly generated uuid. You can get one by running:
clickhouse.wait_for_unit("clickhouse") # kafka-storage.sh random-uuid
clickhouse.wait_for_open_port(9000) clusterId = "b81s-MuGSwyt_B9_h37wtQ";
clickhouse.wait_until_succeeds( formatLogDirs = true;
"""
journalctl -o cat -u clickhouse.service | grep "Merging configuration file '/etc/clickhouse-server/config.d/kafka.xml'"
"""
)
clickhouse.succeed( settings = {
"cat ${tableKafkaDDL} | clickhouse-client" listeners = [
) "PLAINTEXT://:9092"
"CONTROLLER://:9093"
];
"listener.security.protocol.map" = [
"PLAINTEXT:PLAINTEXT"
"CONTROLLER:PLAINTEXT"
];
"controller.quorum.voters" = [
"1@kafka:9093"
];
"controller.listener.names" = [ "CONTROLLER" ];
clickhouse.succeed( "node.id" = 1;
"cat ${tableDDL} | clickhouse-client" "broker.rack" = 1;
)
clickhouse.succeed( "process.roles" = [
"cat ${viewDDL} | clickhouse-client" "broker"
) "controller"
];
kafka.succeed( "log.dirs" = [ "/var/lib/apache-kafka" ];
"jq -rc . ${jsonTestMessage} | kafka-console-producer.sh --topic test_topic --bootstrap-server kafka:9092" "num.partitions" = 1;
) "offsets.topic.replication.factor" = 1;
"transaction.state.log.replication.factor" = 1;
"transaction.state.log.min.isr" = 1;
};
};
kafka.wait_until_succeeds( systemd.services.apache-kafka.serviceConfig.StateDirectory = "apache-kafka";
"journalctl -o cat -u apache-kafka.service | grep 'Created a new member id ClickHouse-clickhouse-default-test_kafka_topic'" };
) };
clickhouse.wait_until_succeeds( testScript =
"cat ${selectQuery} | clickhouse-client | grep 74" let
) jsonTestMessage = pkgs.writeText "kafka-test-data.json" ''
{ "id": 1, "first_name": "Fred", "age": 32 }
{ "id": 2, "first_name": "Barbara", "age": 30 }
{ "id": 3, "first_name": "Nicola", "age": 12 }
''; '';
} # work around quote/substitution complexity by Nix, Perl, bash and SQL.
) tableKafkaDDL = pkgs.writeText "ddl-kafka.sql" ''
CREATE TABLE `test_kafka_topic` (
`id` UInt32,
`first_name` String,
`age` UInt32
) ENGINE = Kafka(cluster_1);
'';
tableDDL = pkgs.writeText "ddl.sql" ''
CREATE TABLE `test_topic` (
`id` UInt32,
`first_name` String,
`age` UInt32
) ENGINE = MergeTree ORDER BY id;
'';
viewDDL = pkgs.writeText "view.sql" ''
CREATE MATERIALIZED VIEW kafka_view TO test_topic AS
SELECT
id,
first_name,
age,
FROM test_kafka_topic;
'';
selectQuery = pkgs.writeText "select.sql" "SELECT sum(age) from `test_topic`";
in
''
kafka.start()
kafka.wait_for_unit("apache-kafka")
kafka.wait_for_open_port(9092)
clickhouse.start()
clickhouse.wait_for_unit("clickhouse")
clickhouse.wait_for_open_port(9000)
clickhouse.wait_until_succeeds(
"""
journalctl -o cat -u clickhouse.service | grep "Merging configuration file '/etc/clickhouse-server/config.d/kafka.xml'"
"""
)
clickhouse.succeed(
"cat ${tableKafkaDDL} | clickhouse-client"
)
clickhouse.succeed(
"cat ${tableDDL} | clickhouse-client"
)
clickhouse.succeed(
"cat ${viewDDL} | clickhouse-client"
)
kafka.succeed(
"jq -rc . ${jsonTestMessage} | kafka-console-producer.sh --topic test_topic --bootstrap-server kafka:9092"
)
kafka.wait_until_succeeds(
"journalctl -o cat -u apache-kafka.service | grep 'Created a new member id ClickHouse-clickhouse-default-test_kafka_topic'"
)
clickhouse.wait_until_succeeds(
"cat ${selectQuery} | clickhouse-client | grep 74"
)
'';
}

View File

@ -1,185 +1,183 @@
import ../make-test-python.nix ( { lib, pkgs, ... }:
{ lib, pkgs, ... }: rec {
rec { name = "clickhouse-keeper";
name = "clickhouse-keeper"; meta.maintainers = with pkgs.lib.maintainers; [ jpds ];
meta.maintainers = with pkgs.lib.maintainers; [ jpds ];
nodes = nodes =
let let
node = i: { node = i: {
environment.etc = { environment.etc = {
"clickhouse-server/config.d/cluster.xml".text = '' "clickhouse-server/config.d/cluster.xml".text = ''
<clickhouse> <clickhouse>
<remote_servers> <remote_servers>
<perftest_2shards_1replicas> <perftest_2shards_1replicas>
${lib.concatStrings (
lib.imap0 (j: name: ''
<shard>
<replica>
<host>${name}</host>
<port>9000</port>
</replica>
</shard>
'') (builtins.attrNames nodes)
)}
</perftest_2shards_1replicas>
</remote_servers>
</clickhouse>
'';
"clickhouse-server/config.d/keeper.xml".text = ''
<clickhouse>
<keeper_server>
<server_id>${toString i}</server_id>
<tcp_port>9181</tcp_port>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>30000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
<rotate_log_storage_interval>10000</rotate_log_storage_interval>
</coordination_settings>
<raft_configuration>
${lib.concatStrings (
lib.imap1 (j: name: ''
<server>
<id>${toString j}</id>
<hostname>${name}</hostname>
<port>9444</port>
</server>
'') (builtins.attrNames nodes)
)}
</raft_configuration>
</keeper_server>
<zookeeper>
${lib.concatStrings ( ${lib.concatStrings (
lib.imap0 (j: name: '' lib.imap0 (j: name: ''
<node> <shard>
<host>${name}</host> <replica>
<port>9181</port> <host>${name}</host>
</node> <port>9000</port>
</replica>
</shard>
'') (builtins.attrNames nodes) '') (builtins.attrNames nodes)
)} )}
</zookeeper> </perftest_2shards_1replicas>
</remote_servers>
</clickhouse>
'';
<distributed_ddl> "clickhouse-server/config.d/keeper.xml".text = ''
<path>/clickhouse/testcluster/task_queue/ddl</path> <clickhouse>
</distributed_ddl> <keeper_server>
</clickhouse> <server_id>${toString i}</server_id>
''; <tcp_port>9181</tcp_port>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
"clickhouse-server/config.d/listen.xml".text = '' <coordination_settings>
<clickhouse> <operation_timeout_ms>10000</operation_timeout_ms>
<listen_host>::</listen_host> <session_timeout_ms>30000</session_timeout_ms>
</clickhouse> <raft_logs_level>trace</raft_logs_level>
''; <rotate_log_storage_interval>10000</rotate_log_storage_interval>
</coordination_settings>
"clickhouse-server/config.d/macros.xml".text = '' <raft_configuration>
<clickhouse> ${lib.concatStrings (
<macros> lib.imap1 (j: name: ''
<replica>${toString i}</replica> <server>
<cluster>perftest_2shards_1replicas</cluster> <id>${toString j}</id>
</macros> <hostname>${name}</hostname>
</clickhouse> <port>9444</port>
''; </server>
}; '') (builtins.attrNames nodes)
)}
</raft_configuration>
</keeper_server>
networking.firewall.allowedTCPPorts = [ <zookeeper>
9009 ${lib.concatStrings (
9181 lib.imap0 (j: name: ''
9444 <node>
]; <host>${name}</host>
<port>9181</port>
</node>
'') (builtins.attrNames nodes)
)}
</zookeeper>
services.clickhouse.enable = true; <distributed_ddl>
<path>/clickhouse/testcluster/task_queue/ddl</path>
</distributed_ddl>
</clickhouse>
'';
systemd.services.clickhouse = { "clickhouse-server/config.d/listen.xml".text = ''
after = [ "network-online.target" ]; <clickhouse>
requires = [ "network-online.target" ]; <listen_host>::</listen_host>
}; </clickhouse>
'';
virtualisation.memorySize = 1024 * 4; "clickhouse-server/config.d/macros.xml".text = ''
virtualisation.diskSize = 1024 * 10; <clickhouse>
<macros>
<replica>${toString i}</replica>
<cluster>perftest_2shards_1replicas</cluster>
</macros>
</clickhouse>
'';
}; };
in
{ networking.firewall.allowedTCPPorts = [
clickhouse1 = node 1; 9009
clickhouse2 = node 2; 9181
9444
];
services.clickhouse.enable = true;
systemd.services.clickhouse = {
after = [ "network-online.target" ];
requires = [ "network-online.target" ];
};
virtualisation.memorySize = 1024 * 4;
virtualisation.diskSize = 1024 * 10;
}; };
in
{
clickhouse1 = node 1;
clickhouse2 = node 2;
};
testScript = testScript =
let let
# work around quote/substitution complexity by Nix, Perl, bash and SQL. # work around quote/substitution complexity by Nix, Perl, bash and SQL.
clustersQuery = pkgs.writeText "clusters.sql" "SHOW clusters"; clustersQuery = pkgs.writeText "clusters.sql" "SHOW clusters";
keeperQuery = pkgs.writeText "keeper.sql" "SELECT * FROM system.zookeeper WHERE path IN ('/', '/clickhouse') FORMAT VERTICAL"; keeperQuery = pkgs.writeText "keeper.sql" "SELECT * FROM system.zookeeper WHERE path IN ('/', '/clickhouse') FORMAT VERTICAL";
systemClustersQuery = pkgs.writeText "system-clusters.sql" "SELECT host_name, host_address, replica_num FROM system.clusters WHERE cluster = 'perftest_2shards_1replicas'"; systemClustersQuery = pkgs.writeText "system-clusters.sql" "SELECT host_name, host_address, replica_num FROM system.clusters WHERE cluster = 'perftest_2shards_1replicas'";
tableDDL = pkgs.writeText "table.sql" '' tableDDL = pkgs.writeText "table.sql" ''
CREATE TABLE test ON cluster 'perftest_2shards_1replicas' ( A Int64, S String) CREATE TABLE test ON cluster 'perftest_2shards_1replicas' ( A Int64, S String)
Engine = ReplicatedMergeTree('/clickhouse/{cluster}/tables/{database}/{table}', '{replica}') Engine = ReplicatedMergeTree('/clickhouse/{cluster}/tables/{database}/{table}', '{replica}')
ORDER BY A; ORDER BY A;
''; '';
insertDDL = pkgs.writeText "insert.sql" " insertDDL = pkgs.writeText "insert.sql" "
INSERT INTO test SELECT number, '' FROM numbers(100000000); INSERT INTO test SELECT number, '' FROM numbers(100000000);
"; ";
selectCountQuery = pkgs.writeText "select-count.sql" " selectCountQuery = pkgs.writeText "select-count.sql" "
select count() from test; select count() from test;
"; ";
in in
'' ''
clickhouse1.start() clickhouse1.start()
clickhouse2.start() clickhouse2.start()
for machine in clickhouse1, clickhouse2: for machine in clickhouse1, clickhouse2:
machine.wait_for_unit("clickhouse.service") machine.wait_for_unit("clickhouse.service")
machine.wait_for_open_port(9000) machine.wait_for_open_port(9000)
machine.wait_for_open_port(9009) machine.wait_for_open_port(9009)
machine.wait_for_open_port(9181) machine.wait_for_open_port(9181)
machine.wait_for_open_port(9444) machine.wait_for_open_port(9444)
machine.wait_until_succeeds( machine.wait_until_succeeds(
""" """
journalctl -o cat -u clickhouse.service | grep "Merging configuration file '/etc/clickhouse-server/config.d/keeper.xml'" journalctl -o cat -u clickhouse.service | grep "Merging configuration file '/etc/clickhouse-server/config.d/keeper.xml'"
""" """
)
machine.log(machine.succeed(
"cat ${clustersQuery} | clickhouse-client | grep perftest_2shards_1replicas"
))
machine.log(machine.succeed(
"cat ${keeperQuery} | clickhouse-client"
))
machine.succeed(
"cat ${systemClustersQuery} | clickhouse-client | grep clickhouse1"
)
machine.succeed(
"cat ${systemClustersQuery} | clickhouse-client | grep clickhouse2"
)
machine.succeed(
"ls /var/lib/clickhouse/coordination/log | grep changelog"
)
clickhouse2.succeed(
"cat ${tableDDL} | clickhouse-client"
) )
clickhouse2.succeed( machine.log(machine.succeed(
"cat ${insertDDL} | clickhouse-client" "cat ${clustersQuery} | clickhouse-client | grep perftest_2shards_1replicas"
))
machine.log(machine.succeed(
"cat ${keeperQuery} | clickhouse-client"
))
machine.succeed(
"cat ${systemClustersQuery} | clickhouse-client | grep clickhouse1"
)
machine.succeed(
"cat ${systemClustersQuery} | clickhouse-client | grep clickhouse2"
) )
for machine in clickhouse1, clickhouse2: machine.succeed(
machine.wait_until_succeeds( "ls /var/lib/clickhouse/coordination/log | grep changelog"
"cat ${selectCountQuery} | clickhouse-client | grep 100000000" )
)
''; clickhouse2.succeed(
} "cat ${tableDDL} | clickhouse-client"
) )
clickhouse2.succeed(
"cat ${insertDDL} | clickhouse-client"
)
for machine in clickhouse1, clickhouse2:
machine.wait_until_succeeds(
"cat ${selectCountQuery} | clickhouse-client | grep 100000000"
)
'';
}

View File

@ -1,123 +1,121 @@
import ../make-test-python.nix ( { pkgs, ... }:
{ pkgs, ... }:
let let
s3 = { s3 = {
bucket = "clickhouse-bucket"; bucket = "clickhouse-bucket";
accessKey = "BKIKJAA5BMMU2RHO6IBB"; accessKey = "BKIKJAA5BMMU2RHO6IBB";
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
}; };
clickhouseS3StorageConfig = '' clickhouseS3StorageConfig = ''
<clickhouse> <clickhouse>
<storage_configuration> <storage_configuration>
<disks> <disks>
<s3_disk> <s3_disk>
<type>s3</type> <type>s3</type>
<endpoint>http://minio:9000/${s3.bucket}/</endpoint> <endpoint>http://minio:9000/${s3.bucket}/</endpoint>
<access_key_id>${s3.accessKey}</access_key_id> <access_key_id>${s3.accessKey}</access_key_id>
<secret_access_key>${s3.secretKey}</secret_access_key> <secret_access_key>${s3.secretKey}</secret_access_key>
<metadata_path>/var/lib/clickhouse/disks/s3_disk/</metadata_path> <metadata_path>/var/lib/clickhouse/disks/s3_disk/</metadata_path>
</s3_disk> </s3_disk>
<s3_cache> <s3_cache>
<type>cache</type> <type>cache</type>
<disk>s3_disk</disk> <disk>s3_disk</disk>
<path>/var/lib/clickhouse/disks/s3_cache/</path> <path>/var/lib/clickhouse/disks/s3_cache/</path>
<max_size>10Gi</max_size> <max_size>10Gi</max_size>
</s3_cache> </s3_cache>
</disks> </disks>
<policies> <policies>
<s3_main> <s3_main>
<volumes> <volumes>
<main> <main>
<disk>s3_disk</disk> <disk>s3_disk</disk>
</main> </main>
</volumes> </volumes>
</s3_main> </s3_main>
</policies> </policies>
</storage_configuration> </storage_configuration>
</clickhouse> </clickhouse>
''; '';
in in
{ {
name = "clickhouse-s3"; name = "clickhouse-s3";
meta.maintainers = with pkgs.lib.maintainers; [ jpds ]; meta.maintainers = with pkgs.lib.maintainers; [ jpds ];
nodes = { nodes = {
clickhouse = { clickhouse = {
environment.etc = { environment.etc = {
"clickhouse-server/config.d/s3.xml" = { "clickhouse-server/config.d/s3.xml" = {
text = "${clickhouseS3StorageConfig}"; text = "${clickhouseS3StorageConfig}";
};
}; };
services.clickhouse.enable = true;
virtualisation.diskSize = 15 * 1024;
virtualisation.memorySize = 4 * 1024;
}; };
minio = services.clickhouse.enable = true;
{ pkgs, ... }: virtualisation.diskSize = 15 * 1024;
{ virtualisation.memorySize = 4 * 1024;
virtualisation.diskSize = 2 * 1024;
networking.firewall.allowedTCPPorts = [ 9000 ];
services.minio = {
enable = true;
inherit (s3) accessKey secretKey;
};
environment.systemPackages = [ pkgs.minio-client ];
};
}; };
testScript = minio =
let { pkgs, ... }:
# work around quote/substitution complexity by Nix, Perl, bash and SQL. {
tableDDL = pkgs.writeText "ddl.sql" '' virtualisation.diskSize = 2 * 1024;
CREATE TABLE `demo` ( networking.firewall.allowedTCPPorts = [ 9000 ];
`value` String
)
ENGINE = MergeTree
ORDER BY value
SETTINGS storage_policy = 's3_main';
'';
insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
in
''
minio.wait_for_unit("minio")
minio.wait_for_open_port(9000)
minio.succeed(
"mc alias set minio "
+ "http://localhost:9000 "
+ "${s3.accessKey} ${s3.secretKey} --api s3v4",
"mc mb minio/${s3.bucket}",
)
clickhouse.start() services.minio = {
clickhouse.wait_for_unit("clickhouse.service") enable = true;
clickhouse.wait_for_open_port(9000) inherit (s3) accessKey secretKey;
};
clickhouse.wait_until_succeeds( environment.systemPackages = [ pkgs.minio-client ];
""" };
journalctl -o cat -u clickhouse.service | grep "Merging configuration file '/etc/clickhouse-server/config.d/s3.xml'" };
"""
)
clickhouse.succeed( testScript =
"cat ${tableDDL} | clickhouse-client" let
# work around quote/substitution complexity by Nix, Perl, bash and SQL.
tableDDL = pkgs.writeText "ddl.sql" ''
CREATE TABLE `demo` (
`value` String
) )
clickhouse.succeed( ENGINE = MergeTree
"cat ${insertQuery} | clickhouse-client" ORDER BY value
) SETTINGS storage_policy = 's3_main';
clickhouse.succeed(
"cat ${selectQuery} | clickhouse-client | grep foo"
)
minio.log(minio.succeed(
"mc ls minio/${s3.bucket}",
))
''; '';
} insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
) selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
in
''
minio.wait_for_unit("minio")
minio.wait_for_open_port(9000)
minio.succeed(
"mc alias set minio "
+ "http://localhost:9000 "
+ "${s3.accessKey} ${s3.secretKey} --api s3v4",
"mc mb minio/${s3.bucket}",
)
clickhouse.start()
clickhouse.wait_for_unit("clickhouse.service")
clickhouse.wait_for_open_port(9000)
clickhouse.wait_until_succeeds(
"""
journalctl -o cat -u clickhouse.service | grep "Merging configuration file '/etc/clickhouse-server/config.d/s3.xml'"
"""
)
clickhouse.succeed(
"cat ${tableDDL} | clickhouse-client"
)
clickhouse.succeed(
"cat ${insertQuery} | clickhouse-client"
)
clickhouse.succeed(
"cat ${selectQuery} | clickhouse-client | grep foo"
)
minio.log(minio.succeed(
"mc ls minio/${s3.bucket}",
))
'';
}