summary refs log tree commit diff
path: root/host/Rory-nginx/services/postgres.nix
blob: d24b0b96b6af0c76abb16044ec610e886f827923 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
{ pkgs, ... }:

{
  systemd.tmpfiles.rules = [ "d /mnt/postgres/data 0750 postgres postgres" ];

  services.postgresql = {
    enable = true;
    package = pkgs.postgresql_16_jit;
    enableTCPIP = true;
    authentication = pkgs.lib.mkOverride 10 ''
      # TYPE, DATABASE, USER, ADDRESS, METHOD
      local all all trust
      host all all 127.0.0.1/32 trust
      host all all ::1/128 trust
      host discordbots discordbots 192.168.1.2/32 trust
      host matrix-synapse-rory-gay matrix-synapse-rory-gay 192.168.1.5/32 trust
      host all all 0.0.0.0/0 md5
    '';
    # initialScript = pkgs.writeText "backend-initScript" ''
    #   CREATE ROLE nixcloud WITH LOGIN PASSWORD 'nixcloud' CREATEDB;
    #   CREATE DATABASE nixcloud;
    #   GRANT ALL PRIVILEGES ON DATABASE nixcloud TO nixcloud;
    # '';
    dataDir = "/mnt/postgres/data";
    settings = {
      # https://pgconfigurator.cybertec.at/
      max_connections = 2500;
      superuser_reserved_connections = 3;

      shared_buffers = "64GB";
      work_mem = "32GB";
      maintenance_work_mem = "8GB";
      huge_pages = "try";
      effective_cache_size = "64GB"; # was 22
      effective_io_concurrency = 100;
      random_page_cost = 1.1;

      # can use this to view stats: SELECT query, total_time, calls, rows FROM pg_stat_statements ORDER BY total_time DESC LIMIT 10;
      shared_preload_libraries = "pg_stat_statements";
      track_io_timing = "on";
      track_functions = "pl";
      "pg_stat_statements.max" = "10000"; # additional
      "pg_stat_statements.track" = "all"; # additional

      wal_level = "replica";
      max_wal_senders = 0;
      synchronous_commit = "on"; # was ond3

      checkpoint_timeout = "15min";
      checkpoint_completion_target = "0.9";
      max_wal_size = "2GB";
      min_wal_size = "1GB";

      wal_compression = "off";
      wal_buffers = "-1";
      wal_writer_delay = "500ms"; #was 100
      wal_writer_flush_after = "32MB"; # was 1
      #checkpoint_segments = "64"; # additional
      default_statistics_target = "250"; # additional

      bgwriter_delay = "200ms";
      bgwriter_lru_maxpages = "100";
      bgwriter_lru_multiplier = "2.0";
      bgwriter_flush_after = "0";

      max_worker_processes = "64"; # was 14
      max_parallel_workers_per_gather = "32"; # was 7
      max_parallel_maintenance_workers = "32"; # was 7
      max_parallel_workers = "64"; # was 14
      parallel_leader_participation = "on";

      enable_partitionwise_join = "on";
      enable_partitionwise_aggregate = "on";
      jit = "on";
      max_slot_wal_keep_size = "1GB";
      track_wal_io_timing = "on";
      maintenance_io_concurrency = "4";
      wal_recycle = "on";

    };
  };

#  services.prometheus.exporters.postgres = {
#    enable = true;
#    port = 9187;
#    extraFlags = [
#      "--collector.database_wraparound"
#      "--collector.long_running_transactions"
#      "--collector.postmaster"
#      "--collector.process_idle"
#      "--collector.stat_activity_autovacuum"
#      "--collector.stat_statements"
#      #"--collector.stat_wal_receiver" #we dont have WAL receivers
#      "--collector.statio_user_indexes"
#      "--collector.xlog_location"
#    ];
#  };

}