Skip to content

Instantly share code, notes, and snippets.

@vitobotta
Last active March 27, 2020 17:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vitobotta/edf78ae8df6df3366944466bf6987e04 to your computer and use it in GitHub Desktop.
Save vitobotta/edf78ae8df6df3366944466bf6987e04 to your computer and use it in GitHub Desktop.
Zalando Postgres Operator with Pulumi
import { ZalandoPostgresOperator } from "./zalando-postgres-operator/ZalandoPostgresOperator";
import { ZalandoPostgresCluster } from "./zalando-postgres-operator/ZalandoPostgresCluster";
# install the operator
const zalandoPostgresOperator = new ZalandoPostgresOperator("zalando-postgres-operator", {
version: "1.4.0",
namespace: "postgres-operator",
});
# create a new cluster
const zalandoPostgresCluster = new ZalandoPostgresCluster("postgres-db", {
operator: zalandoPostgresOperator,
namespace: "postgres-db",
teamId: "postgres",
storageClass: "...",
storageSize: "10Gi",
numberOfInstances: 1,
s3Region: "...",
s3Endpoint: "https://...",
s3Bucket: "...",
enableLogicalBackups: true,
enableWalBackups: true,
});
# create a new cluster and do point in time restore of the data
const zalandoPostgresCluster2 = new ZalandoPostgresCluster("postgres-db-2", {
operator: zalandoPostgresOperator,
namespace: "postgres-db-2",
teamId: "postgres",
storageClass: "...",
storageSize: "10Gi",
numberOfInstances: 1,
s3Region: "...",
s3Endpoint: "https://...",
s3Bucket: "...",
enableLogicalBackups: true,
enableWalBackups: true,
clone: true,
cloneClusterName: "postgres-db",
cloneClusterID: "09c3df08-3921-4002-be3a-e299891006c8",
cloneTargetTime: "2020-03-26T20:36:03+00:00",
});
import * as k8s from '@pulumi/kubernetes'
import * as pulumi from '@pulumi/pulumi'
import { ZalandoPostgresOperator } from "./ZalandoPostgresOperator"
export interface ZalandoPostgresClusterArgs {
operator: pulumi.Input<ZalandoPostgresOperator>,
namespace: pulumi.Input<string>,
teamId: pulumi.Input<string>,
storageClass: pulumi.Input<string>,
storageSize: pulumi.Input<string>,
numberOfInstances?: pulumi.Input<number>,
podConfigMapName?: pulumi.Input<string>,
s3Region?: pulumi.Input<string>,
s3Endpoint?: pulumi.Input<string>,
s3Bucket?: pulumi.Input<string>,
s3AccessKeyId?: pulumi.Input<string>,
s3SecretAccessKey?: pulumi.Input<string>,
version?: pulumi.Input<string>,
sharedBuffers?: pulumi.Input<string>,
maxConnections?: pulumi.Input<number>,
cpuRequest?: pulumi.Input<string>,
memoryRequest?: pulumi.Input<string>,
cpuLimit?: pulumi.Input<string>,
memoryLimit?: pulumi.Input<string>,
enableLogicalBackups?: pulumi.Input<boolean>,
enableWalBackups?: pulumi.Input<boolean>,
logicalBackupSchedule?: pulumi.Input<string>,
walBackupsToRetain?: pulumi.Input<string>,
walBackupSchedule?: pulumi.Input<string>,
clone?: pulumi.Input<boolean>,
cloneClusterID?: pulumi.Input<string>,
cloneClusterName?: pulumi.Input<string>,
cloneTargetTime?: pulumi.Input<string>,
}
export class ZalandoPostgresCluster extends pulumi.ComponentResource {
constructor(
appName: string,
args: ZalandoPostgresClusterArgs,
opts?: pulumi.ComponentResourceOptions,
) {
super('ZalandoPostgresCluster', appName, {}, opts)
const config: pulumi.Config = new pulumi.Config(`${appName}-cluster`)
const operator = args.operator
const namespace = args.namespace
const teamId = args.teamId
const storageClass = args.storageClass
const storageSize = args.storageSize
const s3Region: string = String(args.s3Region || config.get('s3Region'))
const s3Endpoint: string = String(args.s3Endpoint || config.get('s3Endpoint'))
const s3Bucket: string = String(args.s3Bucket || config.get('s3Bucket'))
const s3AccessKeyId = pulumi.output(args.s3AccessKeyId || config.getSecret('s3AccessKeyId'))
const s3SecretAccessKey = pulumi.output(args.s3SecretAccessKey || config.getSecret('s3SecretAccessKey'))
const podConfigMapName = args.podConfigMapName || "postgres-pod-config"
const numberOfInstances = args.numberOfInstances || 1
const version = args.version || "11"
const sharedBuffers = args.sharedBuffers || "32MB"
const maxConnections = args.maxConnections || "500"
const cpuRequest = args.cpuRequest || "10m"
const memoryRequest = args.memoryRequest || "100Mi"
const cpuLimit = args.cpuLimit || "500m"
const memoryLimit = args.memoryLimit || "500Mi"
const enableLogicalBackups = args.enableLogicalBackups || false
const logicalBackupSchedule = args.logicalBackupSchedule || "00 05 * * *"
const enableWalBackups = args.enableWalBackups || true
const walBackupsToRetain = args.walBackupsToRetain || "14"
const walBackupSchedule = args.walBackupSchedule || "0 */12 * * *"
const clone = args.clone || false
const cloneClusterID = args.cloneClusterID || ""
const cloneClusterName = args.cloneClusterName || ""
const cloneTargetTime = args.cloneTargetTime || "2050-02-04T12:49:03+00:00"
let configMapData = {}
if (clone) {
configMapData = {
"BACKUP_SCHEDULE": walBackupSchedule,
"USE_WALG_BACKUP": String(enableWalBackups),
"BACKUP_NUM_TO_RETAIN": walBackupsToRetain,
"WAL_S3_BUCKET": s3Bucket,
"AWS_ACCESS_KEY_ID": s3AccessKeyId,
"AWS_SECRET_ACCESS_KEY": s3SecretAccessKey,
"AWS_ENDPOINT": s3Endpoint,
"AWS_REGION": s3Region,
"WALG_DISABLE_S3_SSE": "true",
"USEWALG_RESTORE": "true",
"CLONE_METHOD": "CLONE_WITH_WALE",
"CLONE_AWS_ACCESS_KEY_ID": s3AccessKeyId,
"CLONE_AWS_SECRET_ACCESS_KEY": s3SecretAccessKey,
"CLONE_AWS_ENDPOINT": s3Endpoint,
"CLONE_AWS_REGION": s3Region,
"CLONE_WAL_S3_BUCKET": s3Bucket,
"CLONE_WAL_BUCKET_SCOPE_SUFFIX": `/${cloneClusterID}`,
"CLONE_TARGET_TIME": cloneTargetTime,
"CLONE_SCOPE": cloneClusterName
}
} else {
configMapData = {
"BACKUP_SCHEDULE": walBackupSchedule,
"USE_WALG_BACKUP": String(enableWalBackups),
"BACKUP_NUM_TO_RETAIN": walBackupsToRetain,
"WAL_S3_BUCKET": s3Bucket,
"AWS_ACCESS_KEY_ID": s3AccessKeyId,
"AWS_SECRET_ACCESS_KEY": s3SecretAccessKey,
"AWS_ENDPOINT": s3Endpoint,
"AWS_REGION": s3Region,
"WALG_DISABLE_S3_SSE": "true",
}
}
const ns = new k8s.core.v1.Namespace(
`${appName}-ns`,
{
metadata: {
name: namespace,
},
},
{ parent: this },
)
const configMap = new k8s.core.v1.ConfigMap(`${appName}-config-map`,
{
metadata: {
name: podConfigMapName,
namespace: namespace
},
data: configMapData
},
{
parent: this,
dependsOn: [
ns
]
}
);
const cluster = new k8s.apiextensions.CustomResource(appName,
{
kind: "postgresql",
apiVersion: "acid.zalan.do/v1",
metadata: {
name: appName,
namespace: namespace,
},
spec: {
teamId: teamId,
volume: {
size: storageSize,
storageClass: storageClass
},
numberOfInstances: numberOfInstances,
users: {},
databases: {},
postgresql: {
version: version,
parameters: {
shared_buffers: sharedBuffers,
max_connections: maxConnections
}
},
resources: {
requests: {
cpu: cpuRequest,
memory: memoryRequest
},
limits: {
cpu: cpuLimit,
memory: memoryLimit
}
},
enableLogicalBackup: enableLogicalBackups,
logicalBackupSchedule: logicalBackupSchedule,
initContainers: [],
sidecars: []
}
},
{
parent: this,
dependsOn: [
operator,
ns,
configMap
]
}
)
}
}
import * as k8s from '@pulumi/kubernetes'
import * as pulumi from '@pulumi/pulumi'
import * as fs from "fs";
import * as path from "path";
import * as nodepath from "path";
import * as shell from "shelljs";
export interface ZalandoPostgresOperatorArgs {
version?: pulumi.Input<string>,
namespace?: pulumi.Input<string>,
s3Region?: pulumi.Input<string>,
s3Endpoint?: pulumi.Input<string>,
s3Bucket?: pulumi.Input<string>,
logicalBackupS3SSE?: pulumi.Input<string>,
logicalBackupDefaultSchedule?: pulumi.Input<string>,
}
export class ZalandoPostgresOperator extends pulumi.ComponentResource {
constructor(
appName: string,
args: ZalandoPostgresOperatorArgs,
opts?: pulumi.ComponentResourceOptions,
) {
super('ZalandoPostgres', appName, {}, opts)
const config: pulumi.Config = new pulumi.Config(appName)
const version = args.version || config.get('version') || "1.4.0"
const namespace = args.namespace || config.get('namespace') || "postgres-operator"
const s3Region = args.s3Region || config.get('s3Region')
const s3Endpoint = args.s3Endpoint || config.get('s3Endpoint')
const s3Bucket = args.s3Bucket || config.get('s3Bucket')
const logicalBackupS3SSE = args.logicalBackupS3SSE || config.get('logicalBackupS3SSE') || ""
const logicalBackupDefaultSchedule = args.logicalBackupDefaultSchedule || config.get('logicalBackupDefaultSchedule') || "00 05 * * *"
const s3AccessKeyId = config.requireSecret('s3AccessKeyId')
const s3SecretAccessKey = config.requireSecret('s3SecretAccessKey')
const ns = new k8s.core.v1.Namespace(
`${appName}-ns`,
{
metadata: {
name: namespace,
},
},
{ parent: this },
)
const chartDir = path.resolve(`/tmp/${appName}`);
if (!fs.existsSync(nodepath.join(chartDir, "postgres-operator"))) {
k8s.helm.v3.fetch(`https://opensource.zalando.com/postgres-operator/charts/postgres-operator/postgres-operator-${version}.tgz`,
{
destination: chartDir,
untar: true,
});
// Pulumi always loads values.yaml in the current version, which causes
// problems to the operator when installing in CRD mode -recommended- instead of
// ConfigMap mode. So we are overwriting values.yaml with the contents of values-crd.yaml
shell.cp(nodepath.join(chartDir, "postgres-operator/values-crd.yaml"), nodepath.join(chartDir, "postgres-operator/values.yaml"));
}
const crds = new k8s.yaml.ConfigGroup(`${appName}-crds`, {
files: [ path.join(nodepath.join(chartDir, "postgres-operator/crds", "*.yaml")) ],
});
function crdValues(): any {
let fileContents = fs.readFileSync(nodepath.join(chartDir,
"postgres-operator/values-crd.yaml"));
return require("js-yaml").safeLoad(fileContents);
}
const customValues = {
configTarget: "OperatorConfigurationCRD",
configKubernetes: {
enable_pod_antiaffinity: true,
pod_environment_configmap: "postgres-pod-config",
watched_namespace: "*",
enable_init_containers: true,
enable_pod_disruption_budget: true,
enable_sidecars: true,
spilo_privileged: false
},
configAwsOrGcp: {
aws_region: s3Region,
aws_endpoint: s3Endpoint,
wal_s3_bucket: s3Bucket
},
configLoadBalancer: {
enable_master_load_balancer: false,
enable_replica_load_balancer: false
},
configDebug: {
debug_logging: true,
enable_database_access: true
},
configLogicalBackup: {
logical_backup_docker_image: "vitobotta/postgres-logical-backup:0.0.13",
logical_backup_s3_access_key_id: s3AccessKeyId,
logical_backup_s3_bucket: s3Bucket,
logical_backup_s3_region: s3Region,
logical_backup_s3_endpoint: s3Endpoint,
logical_backup_s3_secret_access_key: s3SecretAccessKey,
logical_backup_s3_sse: logicalBackupS3SSE,
logical_backup_schedule: logicalBackupDefaultSchedule
},
configGeneral: {
enable_crd_validation: true,
enable_shm_volume: true,
workers: 4,
min_instances: -1,
max_instances: -1,
},
configTeamsApi: {
enable_team_superuser: false,
enable_teams_api: false
}
}
const zalandoPostgresOperator = new k8s.helm.v3.Chart(
appName,
{
path: nodepath.join(chartDir, "postgres-operator"),
namespace: namespace,
values: {...crdValues(), ...customValues}
},
{
parent: this,
dependsOn: [
ns,
crds
],
},
)
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment