summary refs log tree commit diff
path: root/host/Rory-ovh/services/postgres.nix
diff options
context:
space:
mode:
Diffstat (limited to 'host/Rory-ovh/services/postgres.nix')
-rwxr-xr-xhost/Rory-ovh/services/postgres.nix99
1 files changed, 99 insertions, 0 deletions
diff --git a/host/Rory-ovh/services/postgres.nix b/host/Rory-ovh/services/postgres.nix
new file mode 100755

index 0000000..10d1cb7 --- /dev/null +++ b/host/Rory-ovh/services/postgres.nix
@@ -0,0 +1,99 @@ +{ config, pkgs, ... }: + +{ + systemd.tmpfiles.rules = [ "d /data/dedicated/postgres 0750 postgres postgres" ]; + + services.postgresql = { + enable = true; + package = pkgs.postgresql_17_jit; + enableTCPIP = true; + authentication = pkgs.lib.mkOverride 10 '' + # TYPE, DATABASE, USER, ADDRESS, METHOD + local all all trust + host all all 127.0.0.1/32 trust + host all all ::1/128 trust + host discordbots discordbots 192.168.1.2/32 trust + host matrix-synapse-rory-gay matrix-synapse-rory-gay 192.168.1.5/32 trust + host all all 0.0.0.0/0 md5 + ''; + # initialScript = pkgs.writeText "backend-initScript" '' + # CREATE ROLE nixcloud WITH LOGIN PASSWORD 'nixcloud' CREATEDB; + # CREATE DATABASE nixcloud; + # GRANT ALL PRIVILEGES ON DATABASE nixcloud TO nixcloud; + # ''; + dataDir = "/data/dedicated/postgres"; + settings = { + # https://pgconfigurator.cybertec.at/ + max_connections = 2500; + superuser_reserved_connections = 3; + + shared_buffers = if config.virtualisation.isVmVariant then "128MB" else "64GB"; + work_mem = if config.virtualisation.isVmVariant then "64MB" else "32GB"; + maintenance_work_mem = if config.virtualisation.isVmVariant then "512MB" else "8GB"; + huge_pages = "try"; + effective_cache_size = if config.virtualisation.isVmVariant then "1GB" else "64GB"; # was 22 + effective_io_concurrency = 100; + random_page_cost = 1.1; + + # can use this to view stats: SELECT query, total_time, calls, rows FROM pg_stat_statements ORDER BY total_time DESC LIMIT 10; + shared_preload_libraries = "pg_stat_statements"; + track_io_timing = "on"; + track_functions = "pl"; + "pg_stat_statements.max" = "10000"; # additional + "pg_stat_statements.track" = "all"; # additional + + wal_level = "replica"; + max_wal_senders = 0; + synchronous_commit = "on"; # was ond3 + + checkpoint_timeout = "15min"; + checkpoint_completion_target = "0.9"; + max_wal_size = "2GB"; + min_wal_size = "1GB"; + + wal_compression = "off"; + wal_buffers = "-1"; + wal_writer_delay = "500ms"; # was 100 + wal_writer_flush_after = "32MB"; # was 1 + #checkpoint_segments = "64"; # additional + default_statistics_target = "250"; # additional + + bgwriter_delay = "200ms"; + bgwriter_lru_maxpages = "100"; + bgwriter_lru_multiplier = "2.0"; + bgwriter_flush_after = "0"; + + max_worker_processes = "64"; # was 14 + max_parallel_workers_per_gather = "32"; # was 7 + max_parallel_maintenance_workers = "32"; # was 7 + max_parallel_workers = "64"; # was 14 + parallel_leader_participation = "on"; + + enable_partitionwise_join = "on"; + enable_partitionwise_aggregate = "on"; + jit = "on"; + max_slot_wal_keep_size = "1GB"; + track_wal_io_timing = "on"; + maintenance_io_concurrency = "4"; + wal_recycle = "on"; + + }; + }; + + # services.prometheus.exporters.postgres = { + # enable = true; + # port = 9187; + # extraFlags = [ + # "--collector.database_wraparound" + # "--collector.long_running_transactions" + # "--collector.postmaster" + # "--collector.process_idle" + # "--collector.stat_activity_autovacuum" + # "--collector.stat_statements" + # #"--collector.stat_wal_receiver" #we dont have WAL receivers + # "--collector.statio_user_indexes" + # "--collector.xlog_location" + # ]; + # }; + +}