Skip to content

Instantly share code, notes, and snippets.

@lacek
Created November 27, 2020 10:28
Show Gist options
  • Save lacek/e133c58b89bb39ad10d2046366c1668f to your computer and use it in GitHub Desktop.
Save lacek/e133c58b89bb39ad10d2046366c1668f to your computer and use it in GitHub Desktop.
MySQL InnoDB Cluster on Docker

Testing MySQL InnoDB Cluster with Docker

  1. Spin up the cluster
docker-compose up -d
docker-compose exec mysql1 mysqlsh -f /init.js
  1. Test access to the cluster
# test access to read/write instance, should be always mysql1
docker-compose exec router mysql -h 127.0.0.1 -P 6446 -e 'SELECT @@hostname'

# test access to read only instance, should be round robin of mysql2 and mysql3
docker-compose exec router mysql -h 127.0.0.1 -P 6447 -e 'SELECT @@hostname'
  1. Try dropping a read only node
# kill a read only node
docker-compose kill mysql3

# the log should show that:
# - 'Member with address mysql3:3306 has become unreachable.
# - 'Members removed from the group: mysql3:3306'
docker-compose logs -f --tail 10 mysql2

# read only access is now provided by mysql3 solely
docker-compose exec router mysql -h 127.0.0.1 -P 6447 -e 'SELECT @@hostname'

# restore the node
docker-compose up -d mysql3

# the log should show that:
# - 'The member with address mysql3:3306 was declared online within the replication group.'
docker-compose logs -f --tail 10 mysql2
  1. Try dropping the primary node
# kill the primary node
docker-compose kill mysql1

# the log should show that:
# - 'Member with address mysql1:3306 has become unreachable.
# - either mysql2 or mysql3 becomes the new primary
docker-compose logs -f --tail 10 mysql2

# read/write access is now provided by mysql2
docker-compose exec router mysql -h 127.0.0.1 -P 6446 -e 'SELECT @@hostname'

# restore the node
docker-compose up -d mysql1

# the log should show that:
# - 'The member with address mysql1:3306 was declared online within the replication group.'
docker-compose logs -f --tail 10 mysql2

# read only access should be round robin of mysql1 and mysql3
docker-compose exec router mysql -h 127.0.0.1 -P 6447 -e 'SELECT @@hostname'
[client]
user=root
password=root
[mysqld]
gtid-mode=ON
enforce-gtid-consistency=ON
default-authentication-plugin=mysql_native_password
version: '3'
services:
mysql1:
image: mysql/mysql-server:8.0.22
container_name: innodb-cluster_mysql1
hostname: mysql1
environment:
- MYSQL_ROOT_HOST=%
- MYSQL_ROOT_PASSWORD=root
- PEER_HOSTS=mysql2 mysql3
command: --server_id=1
volumes:
- ./config-server.cnf:/etc/mysql/my.cnf
- ./config-client.cnf:/root/.my.cnf
- ./data/mysql1:/var/lib/mysql
- ./init.js:/init.js
restart: on-failure
mysql2:
image: mysql/mysql-server:8.0.22
container_name: innodb-cluster_mysql2
hostname: mysql2
environment:
- MYSQL_ROOT_HOST=%
- MYSQL_ROOT_PASSWORD=root
command: --server_id=2
volumes:
- ./config-server.cnf:/etc/mysql/my.cnf
- ./config-client.cnf:/root/.my.cnf
- ./data/mysql2:/var/lib/mysql
restart: on-failure
mysql3:
image: mysql/mysql-server:8.0.22
container_name: innodb-cluster_mysql3
hostname: mysql3
environment:
- MYSQL_ROOT_HOST=%
- MYSQL_ROOT_PASSWORD=root
command: --server_id=3
volumes:
- ./config-server.cnf:/etc/mysql/my.cnf
- ./config-client.cnf:/root/.my.cnf
- ./data/mysql3:/var/lib/mysql
restart: on-failure
router:
image: mysql/mysql-router:8.0.22
container_name: innodb-cluster_router
hostname: router
environment:
- MYSQL_HOST=mysql1
- MYSQL_PORT=3306
- MYSQL_USER=root
- MYSQL_PASSWORD=root
- MYSQL_INNODB_CLUSTER_MEMBERS=3
ports:
- "6446:6446"
- "6447:6447"
volumes:
- ./config-client.cnf:/root/.my.cnf
restart: on-failure
#!/usr/bin/mysqlsh -f
const CLUSTER_NAME = 'MY_CLUSTER';
const PASSWORD = os.getenv('MYSQL_ROOT_PASSWORD');
const PEER_HOSTS = os.getenv('PEER_HOSTS').split(/\s+/);
print('Connecting...\n');
shell.connect('root@127.0.0.1', PASSWORD);
try {
print('Creating cluster...\n');
let cluster = dba.createCluster(CLUSTER_NAME);
os.getenv('PEER_HOSTS').split(/\s+/).forEach(host => {
print('Adding ' + host + ' to the cluster');
cluster.addInstance({
host: host,
password: PASSWORD
}, {
recoveryMethod: 'Clone'
});
});
print('Cluster created\n');
} catch {
print('Fail to create cluster. Trying to reboot cluster...\n');
dba.rebootClusterFromCompleteOutage(CLUSTER_NAME, {
rejoinInstances: PEER_HOSTS
});
print('Cluster rebooted\n');
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment