(Note: This assumes you've already configured your master server for streaming replication)
export DATADIR=/data/postgres-9.3
export MASTER=192.168.0.1
export REPL_USER=replication
export REPL_PASSWORD=mypassword
Create a base backup:
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
name: pganalyze-web | |
namespace: default | |
labels: | |
app: pganalyze-web | |
spec: | |
replicas: 2 | |
selector: |
(Note: This assumes you've already configured your master server for streaming replication)
export DATADIR=/data/postgres-9.3
export MASTER=192.168.0.1
export REPL_USER=replication
export REPL_PASSWORD=mypassword
Create a base backup:
version: '2' | |
services: | |
db: | |
image: postgres:16 | |
ports: | |
- "5432:5432" | |
command: > | |
postgres | |
-c shared_preload_libraries='pg_stat_statements' |
# Copyright (c) 2014 Lukas Fittl <lukas@pganalyze.com> | |
# | |
# Released in the public domain - fork as you wish. | |
require 'rubygems' | |
require 'mixlib/cli' | |
require 'pg' | |
require 'pg_query' | |
require 'curses' |
[...] | |
<kellabyte> what happens if the master goes down? | |
<posi> So if citus master goes down, how does one recover? | |
<posi> kellabyte: https://www.citusdata.com/citusdb-architecture-whitepaper | |
<kellabyte> yeah, since all reads/writes must go through the master, what happens? | |
<kellabyte> ah thank you | |
<posi> "The master node keeps only metadata tables which are typically small (a few MBs in size). The metadata table can be replicated and quickly restored if the master node ever experiences a failure." | |
<ozgun> @posi Methods that use "logical replication" from PG to Citus will work. @andres noted that if your source table has "partitions" and the schema needs to change, you'll need to pay some attention | |
<lfittl_> @kellabyte https://www.citusdata.com/docs/citus/5.0/admin_guide/cluster_management.html#master-node-failures probably answers the question best | |
<lfittl_> (Use Postgres streaming replication is afaik the best approach here, but @ozgun knows best) |
/* Based on https://stackoverflow.com/questions/65213615/cdk-to-enable-dns-resolution-for-vpcpeering */ | |
import { custom_resources } from 'aws-cdk-lib'; | |
import { aws_ec2 as ec2, aws_iam as iam, aws_logs as logs } from 'aws-cdk-lib'; | |
import { Construct } from 'constructs'; | |
export interface AllowVPCPeeringDNSResolutionProps { | |
vpcPeering: ec2.CfnVPCPeeringConnection, | |
} |
# | ?column? | lines | pct_lines | commits | current_employer | |
----+------------------------------+-------+-----------+---------+------------------ | |
1 | Tom Lane | 66210 | 26.09 | 438 | Crunchy Data | |
2 | Tomas Vondra | 15357 | 6.05 | 50 | EDB | |
3 | Dagfinn Ilmari Mannsåker [*] | 14715 | 5.80 | 10 | ? | |
4 | Peter Eisentraut | 12976 | 5.11 | 214 | EDB | |
5 | Robert Haas | 7035 | 2.77 | 46 | EDB | |
6 | Bruce Momjian | 7010 | 2.76 | 58 | EDB | |
7 | Peter Geoghegan | 6889 | 2.71 | 91 | Zenith Labs | |
8 | Amit Langote [*] | 6859 | 2.70 | 24 | EDB |
require 'pg_query' | |
pp PgQuery.parse("SELECT 1") | |
#<PgQuery:0x007f8cdaa8f8b8 | |
@parsetree= | |
[{"SELECT"=> | |
{"distinctClause"=>nil, | |
"intoClause"=>nil, | |
"targetList"=> | |
[{"RESTARGET"=> |
[Unit] | |
Description=Keeper for pg_auto_failover | |
After=network.target | |
[Service] | |
ExecStart=/usr/bin/pg_autoctl run --pgdata /path/to/pg/data | |
User=postgres | |
ProtectSystem=full | |
ProtectHome=true | |
CapabilityBoundingSet= |