Skip to content

Instantly share code, notes, and snippets.

@nuria
Last active May 17, 2022 16:47
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
Star You must be signed in to star a gist
Embed
What would you like to do?
redshift_connector_playground.sh
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${DIR}/../../scripts/utils.sh
if [ ! -f ${DIR}/RedshiftJDBC4-1.2.20.1043.jar ]
then
wget https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/1.2.20.1043/RedshiftJDBC4-1.2.20.1043.jar
fi
if [ ! -f $HOME/.aws/config ]
then
logerror "ERROR: $HOME/.aws/config is not set"
exit 1
fi
if [ -z "$AWS_CREDENTIALS_FILE_NAME" ]
then
export AWS_CREDENTIALS_FILE_NAME="credentials"
fi
if [ ! -f $HOME/.aws/$AWS_CREDENTIALS_FILE_NAME ]
then
logerror "ERROR: $HOME/.aws/$AWS_CREDENTIALS_FILE_NAME is not set"
exit 1
fi
if [[ "$TAG" == *ubi8 ]] || version_gt $TAG_BASE "5.9.0"
then
export CONNECT_CONTAINER_HOME_DIR="/home/appuser"
else
export CONNECT_CONTAINER_HOME_DIR="/root"
fi
${DIR}/../../ccloud/environment/start.sh "${PWD}/docker-compose.yml"
if [ -f /tmp/delta_configs/env.delta ]
then
source /tmp/delta_configs/env.delta
else
logerror "ERROR: /tmp/delta_configs/env.delta has not been generated"
exit 1
fi
log "Creating topic in Confluent Cloud (auto.create.topics.enable=false)"
set +e
delete_topic orders
sleep 3
create_topic orders
set -e
CLUSTER_NAME=pg${USER}redshift${TAG}
CLUSTER_NAME=${CLUSTER_NAME//[-._]/}
set +e
log "Delete AWS Redshift cluster, if required"
aws redshift delete-cluster --cluster-identifier $CLUSTER_NAME --skip-final-cluster-snapshot
log "Delete security group sg$CLUSTER_NAME, if required"
aws ec2 delete-security-group --group-name sg$CLUSTER_NAME
set -e
log "Create AWS Redshift cluster"
# https://docs.aws.amazon.com/redshift/latest/mgmt/getting-started-cli.html
aws redshift create-cluster --cluster-identifier $CLUSTER_NAME --master-username masteruser --master-user-password myPassword1 --node-type dc2.large --cluster-type single-node --publicly-accessible
# Verify AWS Redshift cluster has started within MAX_WAIT seconds
MAX_WAIT=480
CUR_WAIT=0
log "⌛ Waiting up to $MAX_WAIT seconds for AWS Redshift cluster $CLUSTER_NAME to start"
aws redshift describe-clusters --cluster-identifier $CLUSTER_NAME | jq .Clusters[0].ClusterStatus > /tmp/out.txt 2>&1
while [[ ! $(cat /tmp/out.txt) =~ "available" ]]; do
sleep 10
aws redshift describe-clusters --cluster-identifier $CLUSTER_NAME | jq .Clusters[0].ClusterStatus > /tmp/out.txt 2>&1
CUR_WAIT=$(( CUR_WAIT+10 ))
if [[ "$CUR_WAIT" -gt "$MAX_WAIT" ]]; then
echo -e "\nERROR: The logs in ${CONTROL_CENTER_CONTAINER} container do not show 'available' after $MAX_WAIT seconds. Please troubleshoot with 'docker container ps' and 'docker container logs'.\n"
exit 1
fi
done
log "AWS Redshift cluster $CLUSTER_NAME has started!"
log "Create a security group"
GROUP_ID=$(aws ec2 create-security-group --group-name sg$CLUSTER_NAME --description "playground aws redshift" | jq -r .GroupId)
log "Allow ingress traffic from 0.0.0.0/0 on port 5439"
aws ec2 authorize-security-group-ingress --group-id $GROUP_ID --protocol tcp --port 5439 --cidr "0.0.0.0/0"
log "Modify AWS Redshift cluster to use the security group $GROUP_ID"
aws redshift modify-cluster --cluster-identifier $CLUSTER_NAME --vpc-security-group-ids $GROUP_ID
sleep 60
# getting cluster URL
CLUSTER=$(aws redshift describe-clusters --cluster-identifier $CLUSTER_NAME | jq -r .Clusters[0].Endpoint.Address)
log "Sending messages to topic orders"
docker exec -i -e BOOTSTRAP_SERVERS="$BOOTSTRAP_SERVERS" -e SASL_JAAS_CONFIG="$SASL_JAAS_CONFIG" -e SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO="$SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO" -e SCHEMA_REGISTRY_URL="$SCHEMA_REGISTRY_URL" connect kafka-avro-console-producer --broker-list $BOOTSTRAP_SERVERS --producer-property ssl.endpoint.identification.algorithm=https --producer-property sasl.mechanism=PLAIN --producer-property security.protocol=SASL_SSL --producer-property sasl.jaas.config="$SASL_JAAS_CONFIG" --property basic.auth.credentials.source=USER_INFO --property schema.registry.basic.auth.user.info="$SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO" --property schema.registry.url=$SCHEMA_REGISTRY_URL --topic orders --property value.schema='{"type":"record","name":"myrecord","fields":[{"name":"id","type":"int"},{"name":"product", "type": "string"}, {"name":"quantity", "type": "int"}, {"name":"price",
"type": "float"}]}' << EOF
{"id": 999, "product": "foo", "quantity": 100, "price": 50}
EOF
log "Creating AWS Redshift Sink connector with cluster url $CLUSTER"
curl -X PUT \
-H "Content-Type: application/json" \
--data '{
"connector.class": "io.confluent.connect.aws.redshift.RedshiftSinkConnector",
"tasks.max": "1",
"topics": "orders",
"aws.redshift.domain": "'"$CLUSTER"'",
"aws.redshift.port": "5439",
"aws.redshift.database": "dev",
"aws.redshift.user": "masteruser",
"aws.redshift.password": "myPassword1",
"auto.create": "true",
"pk.mode": "kafka",
"confluent.topic.ssl.endpoint.identification.algorithm" : "https",
"confluent.topic.sasl.mechanism" : "PLAIN",
"confluent.topic.bootstrap.servers": "${file:/data:bootstrap.servers}",
"confluent.topic.sasl.jaas.config" : "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"${file:/data:sasl.username}\" password=\"${file:/data:sasl.password}\";",
"confluent.topic.security.protocol" : "SASL_SSL",
"confluent.topic.replication.factor": "3"
}' \
http://localhost:8083/connectors/redshift-sink/config | jq .
sleep 20
log "Verify data is in Redshift"
timeout 30 docker run -i debezium/postgres:10 psql -h $CLUSTER -U masteruser -d dev -p 5439 << EOF > /tmp/result.log
myPassword1
SELECT * from orders;
EOF
cat /tmp/result.log
grep "foo" /tmp/result.log
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment