36-node: prod
Production environment simulation with 43 nodes
Categories:
Overview
- Conf Name:
prod
- Node Count: 36-node,
pigsty/vagrant/spec/prod.rb
- Description: Production environment simulation with 43 nodes
- Content:
pigsty/conf/prod.yml
- OS Distro:
el8
,el9
,d12
,u22
,u24
- OS Arch:
x86_64
cp -f conf/prod.yml pigsty.yml
Content
Source: pigsty/conf/prod.yml
all:
children:
#==========================================================#
# infra: 2 nodes
#==========================================================#
# ./infra.yml -l infra
# ./docker.yml -l infra (optional)
infra:
hosts:
10.10.10.10: {}
10.10.10.11: {}
vars:
docker_enabled: true
node_conf: oltp # use oltp template for infra nodes
pg_conf: oltp.yml # use oltp template for infra pgsql
pg_exporters: # bin/pgmon-add pg-meta2/pg-test2/pg-src2/pg-dst2
20001: {pg_cluster: pg-meta2 ,pg_seq: 1 ,pg_host: 10.10.10.10, pg_databases: [{ name: meta }]}
20002: {pg_cluster: pg-meta2 ,pg_seq: 2 ,pg_host: 10.10.10.11, pg_databases: [{ name: meta }]}
20003: {pg_cluster: pg-test2 ,pg_seq: 1 ,pg_host: 10.10.10.41, pg_databases: [{ name: test }]}
20004: {pg_cluster: pg-test2 ,pg_seq: 2 ,pg_host: 10.10.10.42, pg_databases: [{ name: test }]}
20005: {pg_cluster: pg-test2 ,pg_seq: 3 ,pg_host: 10.10.10.43, pg_databases: [{ name: test }]}
20006: {pg_cluster: pg-test2 ,pg_seq: 4 ,pg_host: 10.10.10.44, pg_databases: [{ name: test }]}
20007: {pg_cluster: pg-src2 ,pg_seq: 1 ,pg_host: 10.10.10.45, pg_databases: [{ name: src }]}
20008: {pg_cluster: pg-src2 ,pg_seq: 2 ,pg_host: 10.10.10.46, pg_databases: [{ name: src }]}
20009: {pg_cluster: pg-src2 ,pg_seq: 3 ,pg_host: 10.10.10.47, pg_databases: [{ name: src }]}
20010: {pg_cluster: pg-dst2 ,pg_seq: 3 ,pg_host: 10.10.10.48, pg_databases: [{ name: dst }]}
20011: {pg_cluster: pg-dst2 ,pg_seq: 4 ,pg_host: 10.10.10.49, pg_databases: [{ name: dst }]}
#==========================================================#
# nodes: 36 nodes
#==========================================================#
# ./node.yml
nodes:
hosts:
10.10.10.10 : { nodename: meta1 ,node_cluster: meta ,pg_cluster: pg_meta ,pg_seq: 1 ,pg_role: primary, infra_seq: 1 }
10.10.10.11 : { nodename: meta2 ,node_cluster: meta ,pg_cluster: pg_meta ,pg_seq: 2 ,pg_role: replica, infra_seq: 2 }
10.10.10.12 : { nodename: pg12 ,node_cluster: pg12 ,pg_cluster: pg-v12 ,pg_seq: 1 ,pg_role: primary }
10.10.10.13 : { nodename: pg13 ,node_cluster: pg13 ,pg_cluster: pg-v13 ,pg_seq: 1 ,pg_role: primary }
10.10.10.14 : { nodename: pg14 ,node_cluster: pg14 ,pg_cluster: pg-v14 ,pg_seq: 1 ,pg_role: primary }
10.10.10.15 : { nodename: pg15 ,node_cluster: pg15 ,pg_cluster: pg-v15 ,pg_seq: 1 ,pg_role: primary }
10.10.10.16 : { nodename: pg16 ,node_cluster: pg16 ,pg_cluster: pg-v16 ,pg_seq: 1 ,pg_role: primary }
10.10.10.17 : { nodename: pg17 ,node_cluster: pg17 ,pg_cluster: pg-v17 ,pg_seq: 1 ,pg_role: primary }
10.10.10.18 : { nodename: proxy1 ,node_cluster: proxy ,vip_address: 10.10.10.20 ,vip_vrid: 20 ,vip_interface: eth1 ,vip_role: master }
10.10.10.19 : { nodename: proxy2 ,node_cluster: proxy ,vip_address: 10.10.10.20 ,vip_vrid: 20 ,vip_interface: eth1 ,vip_role: backup }
10.10.10.21 : { nodename: minio1 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 1 ,etcd_cluster: etcd ,etcd_seq: 1}
10.10.10.22 : { nodename: minio2 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 2 ,etcd_cluster: etcd ,etcd_seq: 2}
10.10.10.23 : { nodename: minio3 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 3 ,etcd_cluster: etcd ,etcd_seq: 3}
10.10.10.24 : { nodename: minio4 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 4 ,etcd_cluster: etcd ,etcd_seq: 4}
10.10.10.25 : { nodename: minio5 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 5 ,etcd_cluster: etcd ,etcd_seq: 5}
10.10.10.40 : { nodename: node40 ,node_id_from_pg: true }
10.10.10.41 : { nodename: node41 ,node_id_from_pg: true }
10.10.10.42 : { nodename: node42 ,node_id_from_pg: true }
10.10.10.43 : { nodename: node43 ,node_id_from_pg: true }
10.10.10.44 : { nodename: node44 ,node_id_from_pg: true }
10.10.10.45 : { nodename: node45 ,node_id_from_pg: true }
10.10.10.46 : { nodename: node46 ,node_id_from_pg: true }
10.10.10.47 : { nodename: node47 ,node_id_from_pg: true }
10.10.10.48 : { nodename: node48 ,node_id_from_pg: true }
10.10.10.49 : { nodename: node49 ,node_id_from_pg: true }
10.10.10.50 : { nodename: node50 ,node_id_from_pg: true }
10.10.10.51 : { nodename: node51 ,node_id_from_pg: true }
10.10.10.52 : { nodename: node52 ,node_id_from_pg: true }
10.10.10.53 : { nodename: node53 ,node_id_from_pg: true }
10.10.10.54 : { nodename: node54 ,node_id_from_pg: true }
10.10.10.55 : { nodename: node55 ,node_id_from_pg: true }
10.10.10.56 : { nodename: node56 ,node_id_from_pg: true }
10.10.10.57 : { nodename: node57 ,node_id_from_pg: true }
10.10.10.58 : { nodename: node58 ,node_id_from_pg: true }
10.10.10.59 : { nodename: node59 ,node_id_from_pg: true }
10.10.10.88 : { nodename: test }
#==========================================================#
# etcd: 5 nodes used as dedicated minio cluster
#==========================================================#
# ./etcd.yml -l etcd;
etcd:
hosts:
10.10.10.21: {}
10.10.10.22: {}
10.10.10.23: {}
10.10.10.24: {}
10.10.10.25: {}
vars: {}
#==========================================================#
# minio: 3 nodes used as dedicated minio cluster
#==========================================================#
# ./minio.yml -l minio;
minio:
hosts:
10.10.10.21: {}
10.10.10.22: {}
10.10.10.23: {}
10.10.10.24: {}
10.10.10.25: {}
vars:
minio_data: '/data{1...4}' # 5 node x 4 disk
#==========================================================#
# proxy: 2 nodes used as dedicated haproxy server
#==========================================================#
# ./node.yml -l proxy
proxy:
hosts:
10.10.10.18: {}
10.10.10.19: {}
vars:
vip_enabled: true
haproxy_services: # expose minio service : sss.pigsty:9000
- name: minio # [REQUIRED] service name, unique
port: 9000 # [REQUIRED] service port, unique
balance: leastconn # Use leastconn algorithm and minio health check
options: [ "option httpchk", "option http-keep-alive", "http-check send meth OPTIONS uri /minio/health/live", "http-check expect status 200" ]
servers: # reload service with ./node.yml -t haproxy_config,haproxy_reload
- { name: minio-1 ,ip: 10.10.10.21 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-2 ,ip: 10.10.10.22 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-3 ,ip: 10.10.10.23 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-4 ,ip: 10.10.10.24 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-5 ,ip: 10.10.10.25 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
#==========================================================#
# pg-meta: reuse infra node as meta cmdb
#==========================================================#
# ./pgsql.yml -l pg-meta
pg-meta:
hosts:
10.10.10.10: { pg_seq: 1 , pg_role: primary }
10.10.10.11: { pg_seq: 2 , pg_role: replica }
vars:
pg_cluster: pg-meta
pg_vip_enabled: true
pg_vip_address: 10.10.10.2/24
pg_vip_interface: eth1
pg_users:
- {name: dbuser_meta ,password: DBUser.Meta ,pgbouncer: true ,roles: [dbrole_admin] ,comment: pigsty admin user }
- {name: dbuser_view ,password: DBUser.Viewer ,pgbouncer: true ,roles: [dbrole_readonly] ,comment: read-only viewer for meta database }
- {name: dbuser_grafana ,password: DBUser.Grafana ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for grafana database }
- {name: dbuser_bytebase ,password: DBUser.Bytebase ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for bytebase database }
- {name: dbuser_kong ,password: DBUser.Kong ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for kong api gateway }
- {name: dbuser_gitea ,password: DBUser.Gitea ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for gitea service }
- {name: dbuser_wiki ,password: DBUser.Wiki ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for wiki.js service }
- {name: dbuser_noco ,password: DBUser.Noco ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for nocodb service }
pg_databases:
- { name: meta ,baseline: cmdb.sql ,comment: pigsty meta database ,schemas: [pigsty] ,extensions: [{name: vector}]}
- { name: grafana ,owner: dbuser_grafana ,revokeconn: true ,comment: grafana primary database }
- { name: bytebase ,owner: dbuser_bytebase ,revokeconn: true ,comment: bytebase primary database }
- { name: kong ,owner: dbuser_kong ,revokeconn: true ,comment: kong the api gateway database }
- { name: gitea ,owner: dbuser_gitea ,revokeconn: true ,comment: gitea meta database }
- { name: wiki ,owner: dbuser_wiki ,revokeconn: true ,comment: wiki meta database }
- { name: noco ,owner: dbuser_noco ,revokeconn: true ,comment: nocodb database }
pg_hba_rules:
- { user: dbuser_view , db: all ,addr: infra ,auth: pwd ,title: 'allow grafana dashboard access cmdb from infra nodes' }
pg_libs: 'pg_stat_statements, auto_explain' # add timescaledb to shared_preload_libraries
node_crontab: # make a full backup on monday 1am, and an incremental backup during weekdays
- '00 01 * * 1 postgres /pg/bin/pg-backup full'
- '00 01 * * 2,3,4,5,6,7 postgres /pg/bin/pg-backup'
#==========================================================#
# pg-v12 - v16 (no longer supported in OSS ver since 2.6)
#==========================================================#
# ./pgsql.yml -l pg-v*
pg-v12:
hosts: { 10.10.10.12: {}}
vars:
pg_version: 12
pg_service_provider: proxy # use load balancer on group `proxy` with port 10012
pg_default_services: [{ name: primary ,port: 10012 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v13:
hosts: { 10.10.10.13: {}}
vars:
pg_version: 13
pg_service_provider: proxy # use load balancer on group `proxy` with port 10013
pg_default_services: [{ name: primary ,port: 10013 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v14:
hosts: { 10.10.10.14: {}}
vars:
pg_version: 14
pg_service_provider: proxy # use load balancer on group `proxy` with port 10014
pg_default_services: [{ name: primary ,port: 10014 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v15:
hosts: { 10.10.10.15: {}}
vars:
pg_version: 15
pg_service_provider: proxy # use load balancer on group `proxy` with port 10015
pg_default_services: [{ name: primary ,port: 10015 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v16:
hosts: { 10.10.10.16: {}}
vars:
pg_version: 16
pg_service_provider: proxy # use load balancer on group `proxy` with port 10016
pg_default_services: [{ name: primary ,port: 10016 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v17:
hosts: { 10.10.10.17: {}}
vars:
pg_version: 17
pg_service_provider: proxy # use load balancer on group `proxy` with port 10017
pg_default_services: [{ name: primary ,port: 10017 ,dest: postgres ,check: /primary ,selector: "[]" }]
#==========================================================#
# pg-pitr: single node
#==========================================================#
# ./pgsql.yml -l pg-pitr
pg-pitr:
hosts:
10.10.10.40: { pg_seq: 1 ,pg_role: primary }
vars:
pg_cluster: pg-pitr
pg_databases: [{ name: test }]
#==========================================================#
# pg-test: dedicate 4 node testing cluster
#==========================================================#
# ./pgsql.yml -l pg-test
pg-test:
hosts:
10.10.10.41: { pg_seq: 1 ,pg_role: primary }
10.10.10.42: { pg_seq: 2 ,pg_role: replica }
10.10.10.43: { pg_seq: 3 ,pg_role: replica }
10.10.10.44: { pg_seq: 4 ,pg_role: replica }
vars:
pg_cluster: pg-test
pg_vip_enabled: true
pg_vip_address: 10.10.10.3/24
pg_vip_interface: eth1
pg_users: [{ name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] }]
pg_databases: [{ name: test }]
#==========================================================#
# pg-src: dedicate 3 node testing cluster
#==========================================================#
# ./pgsql.yml -l pg-src
pg-src:
hosts:
10.10.10.45: { pg_seq: 1 ,pg_role: primary }
10.10.10.46: { pg_seq: 2 ,pg_role: replica }
10.10.10.47: { pg_seq: 3 ,pg_role: replica }
vars:
pg_cluster: pg-src
#pg_version: 14
pg_vip_enabled: true
pg_vip_address: 10.10.10.4/24
pg_vip_interface: eth1
pg_users: [{ name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] }]
pg_databases: [{ name: src }]
#==========================================================#
# pg-dst: dedicate 2 node testing cluster
#==========================================================#
# ./pgsql.yml -l pg-dst
pg-dst:
hosts:
10.10.10.48: { pg_seq: 1 ,pg_role: primary } # 8C 8G
10.10.10.49: { pg_seq: 2 ,pg_role: replica } # 1C 2G
vars:
pg_cluster: pg-dst
pg_vip_enabled: true
pg_vip_address: 10.10.10.5/24
pg_vip_interface: eth1
node_hugepage_ratio: 0.3
pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
pg_databases: [ { name: dst } ]
#==========================================================#
# pg-citus: 10 node citus cluster (5 x primary-replica pair)
#==========================================================#
pg-citus: # citus group
hosts:
10.10.10.50: { pg_group: 0, pg_cluster: pg-citus0 ,pg_vip_address: 10.10.10.60/24 ,pg_seq: 0, pg_role: primary }
10.10.10.51: { pg_group: 0, pg_cluster: pg-citus0 ,pg_vip_address: 10.10.10.60/24 ,pg_seq: 1, pg_role: replica }
10.10.10.52: { pg_group: 1, pg_cluster: pg-citus1 ,pg_vip_address: 10.10.10.61/24 ,pg_seq: 0, pg_role: primary }
10.10.10.53: { pg_group: 1, pg_cluster: pg-citus1 ,pg_vip_address: 10.10.10.61/24 ,pg_seq: 1, pg_role: replica }
10.10.10.54: { pg_group: 2, pg_cluster: pg-citus2 ,pg_vip_address: 10.10.10.62/24 ,pg_seq: 0, pg_role: primary }
10.10.10.55: { pg_group: 2, pg_cluster: pg-citus2 ,pg_vip_address: 10.10.10.62/24 ,pg_seq: 1, pg_role: replica }
10.10.10.56: { pg_group: 3, pg_cluster: pg-citus3 ,pg_vip_address: 10.10.10.63/24 ,pg_seq: 0, pg_role: primary }
10.10.10.57: { pg_group: 3, pg_cluster: pg-citus3 ,pg_vip_address: 10.10.10.63/24 ,pg_seq: 1, pg_role: replica }
10.10.10.58: { pg_group: 4, pg_cluster: pg-citus4 ,pg_vip_address: 10.10.10.64/24 ,pg_seq: 0, pg_role: primary }
10.10.10.59: { pg_group: 4, pg_cluster: pg-citus4 ,pg_vip_address: 10.10.10.64/24 ,pg_seq: 1, pg_role: replica }
vars:
pg_mode: citus # pgsql cluster mode: citus
pg_version: 16 # citus does not have pg16 available
pg_shard: pg-citus # citus shard name: pg-citus
pg_primary_db: test # primary database used by citus
pg_dbsu_password: DBUser.Postgres # all dbsu password access for citus cluster
pg_vip_enabled: true
pg_vip_interface: eth1
pg_extensions: [ 'citus postgis pgvector' ]
pg_libs: 'citus, pg_stat_statements, auto_explain' # citus will be added by patroni automatically
pg_users: [ { name: test ,password: test ,pgbouncer: true ,roles: [ dbrole_admin ] } ]
pg_databases: [ { name: test ,owner: test ,extensions: [ { name: citus }, { name: vector } ] } ]
pg_hba_rules:
- { user: 'all' ,db: all ,addr: 10.10.10.0/24 ,auth: trust ,title: 'trust citus cluster members' }
- { user: 'all' ,db: all ,addr: 127.0.0.1/32 ,auth: ssl ,title: 'all user ssl access from localhost' }
- { user: 'all' ,db: all ,addr: intra ,auth: ssl ,title: 'all user ssl access from intranet' }
#==========================================================#
# redis-meta: reuse the 5 etcd nodes as redis sentinel
#==========================================================#
# ./redis.yml -l redis-meta
redis-meta:
hosts:
10.10.10.21: { redis_node: 1 , redis_instances: { 26379: {} } }
10.10.10.22: { redis_node: 2 , redis_instances: { 26379: {} } }
10.10.10.23: { redis_node: 3 , redis_instances: { 26379: {} } }
10.10.10.24: { redis_node: 4 , redis_instances: { 26379: {} } }
10.10.10.25: { redis_node: 5 , redis_instances: { 26379: {} } }
vars:
redis_cluster: redis-meta
redis_password: 'redis.meta'
redis_mode: sentinel
redis_max_memory: 256MB
redis_sentinel_monitor: # primary list for redis sentinel, use cls as name, primary ip:port
- { name: redis-src, host: 10.10.10.45, port: 6379 ,password: redis.src, quorum: 1 }
- { name: redis-dst, host: 10.10.10.48, port: 6379 ,password: redis.dst, quorum: 1 }
#==========================================================#
# redis-test: redis native cluster in 4 nodes, 12 instances
#==========================================================#
# ./node.yml -l redis-test; ./redis.yml -l redis-test
redis-test:
hosts:
10.10.10.41: { redis_node: 1 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
10.10.10.42: { redis_node: 2 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
10.10.10.43: { redis_node: 3 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
10.10.10.44: { redis_node: 4 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
vars:
redis_cluster: redis-test
redis_password: 'redis.test'
redis_mode: cluster
redis_max_memory: 64MB
#==========================================================#
# redis-src: reuse pg-src 3 nodes for redis
#==========================================================#
# ./redis.yml -l redis-src
redis-src:
hosts:
10.10.10.45: { redis_node: 1 , redis_instances: {6379: { } }}
10.10.10.46: { redis_node: 2 , redis_instances: {6379: { replica_of: '10.10.10.45 6379' }, 6380: { replica_of: '10.10.10.46 6379' } }}
10.10.10.47: { redis_node: 3 , redis_instances: {6379: { replica_of: '10.10.10.45 6379' }, 6380: { replica_of: '10.10.10.47 6379' } }}
vars:
redis_cluster: redis-src
redis_password: 'redis.src'
redis_max_memory: 64MB
#==========================================================#
# redis-dst: reuse pg-dst 2 nodes for redis
#==========================================================#
# ./redis.yml -l redis-dst
redis-dst:
hosts:
10.10.10.48: { redis_node: 1 , redis_instances: {6379: { } }}
10.10.10.49: { redis_node: 2 , redis_instances: {6379: { replica_of: '10.10.10.48 6379' } }}
vars:
redis_cluster: redis-dst
redis_password: 'redis.dst'
redis_max_memory: 64MB
#==========================================================#
# ferret: reuse pg-src as mongo (ferretdb)
#==========================================================#
# ./mongo.yml -l ferret
ferret:
hosts:
10.10.10.45: { mongo_seq: 1 }
10.10.10.46: { mongo_seq: 2 }
10.10.10.47: { mongo_seq: 3 }
vars:
mongo_cluster: ferret
mongo_pgurl: 'postgres://test:test@10.10.10.45:5432/src'
#mongo_pgurl: 'postgres://test:test@10.10.10.3:5436/test'
#==========================================================#
# test: running cli tools and test miscellaneous stuff
#==========================================================#
test:
hosts: { 10.10.10.88: { nodename: test } }
vars:
node_cluster: test
node_packages: [ 'etcd,logcli,mcli,redis' ]
#============================================================#
# Global Variables
#============================================================#
vars:
#==========================================================#
# INFRA
#==========================================================#
version: v3.1.0 # pigsty version string
admin_ip: 10.10.10.10 # admin node ip address
region: default # upstream mirror region: default|china|europe
infra_portal: # domain names and upstream servers
home : { domain: h.pigsty }
grafana : { domain: g.pigsty ,endpoint: "10.10.10.10:3000" , websocket: true }
prometheus : { domain: p.pigsty ,endpoint: "10.10.10.10:9090" }
alertmanager : { domain: a.pigsty ,endpoint: "10.10.10.10:9093" }
blackbox : { endpoint: "10.10.10.10:9115" }
loki : { endpoint: "10.10.10.10:3100" }
minio : { domain: m.pigsty ,endpoint: "10.10.10.21:9001" ,scheme: https ,websocket: true }
postgrest : { domain: api.pigsty ,endpoint: "127.0.0.1:8884" }
pgadmin : { domain: adm.pigsty ,endpoint: "127.0.0.1:8885" }
pgweb : { domain: cli.pigsty ,endpoint: "127.0.0.1:8886" }
bytebase : { domain: ddl.pigsty ,endpoint: "127.0.0.1:8887" }
jupyter : { domain: lab.pigsty ,endpoint: "127.0.0.1:8888" , websocket: true }
supa : { domain: supa.pigsty ,endpoint: "10.10.10.10:8000", websocket: true }
nginx_navbar: []
dns_records: # dynamic dns records resolved by dnsmasq
- 10.10.10.1 h.pigsty a.pigsty p.pigsty g.pigsty
#==========================================================#
# NODE
#==========================================================#
node_id_from_pg: false # use nodename rather than pg identity as hostname
node_conf: tiny # use small node template
node_timezone: Asia/Hong_Kong # use Asia/Hong_Kong Timezone
node_dns_servers: # DNS servers in /etc/resolv.conf
- 10.10.10.10
- 10.10.10.11
node_etc_hosts:
- 10.10.10.10 h.pigsty a.pigsty p.pigsty g.pigsty
- 10.10.10.20 sss.pigsty # point minio serviec domain to the L2 VIP of proxy cluster
node_ntp_servers: # NTP servers in /etc/chrony.conf
- pool cn.pool.ntp.org iburst
- pool 10.10.10.10 iburst
node_admin_ssh_exchange: false # exchange admin ssh key among node cluster
#==========================================================#
# PGSQL
#==========================================================#
pg_conf: tiny.yml
pgbackrest_method: minio # USE THE HA MINIO THROUGH A LOAD BALANCER
pg_dbsu_ssh_exchange: false # do not exchange dbsu ssh key among pgsql cluster
pgbackrest_repo: # pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository
local: # default pgbackrest repo with local posix fs
path: /pg/backup # local backup directory, `/pg/backup` by default
retention_full_type: count # retention full backups by count
retention_full: 2 # keep 2, at most 3 full backup when using local fs repo
minio:
type: s3
s3_endpoint: sss.pigsty # s3_endpoint could be any load balancer: 10.10.10.1{0,1,2}, or domain names point to any of the 3 nodes
s3_region: us-east-1 # you could use external domain name: sss.pigsty , which resolve to any members (`minio_domain`)
s3_bucket: pgsql # instance & nodename can be used : minio-1.pigsty minio-1.pigsty minio-1.pigsty minio-1 minio-2 minio-3
s3_key: pgbackrest # Better using a new password for MinIO pgbackrest user
s3_key_secret: S3User.Backup
s3_uri_style: path
path: /pgbackrest
storage_port: 9000 # Use the load balancer port 9000
storage_ca_file: /etc/pki/ca.crt
bundle: y
cipher_type: aes-256-cbc # Better using a new cipher password for your production environment
cipher_pass: pgBackRest.${pg_cluster}
retention_full_type: time
retention_full: 14
Caveat
Feedback
Was this page helpful?
Glad to hear it! Please tell us how we can improve.
Sorry to hear that. Please tell us how we can improve.