Skip to content

Instantly share code, notes, and snippets.

@arif-ali
Created October 10, 2014 18:39
Show Gist options
  • Save arif-ali/7ddc55e3f5910d528029 to your computer and use it in GitHub Desktop.
Save arif-ali/7ddc55e3f5910d528029 to your computer and use it in GitHub Desktop.
HPC Heat template
eat_template_version: 2013-05-23
description: >
HOT template to deploy one compute node into an xisting neutron tenant network and
assign floating IP address to the server so they are routable from the
public network.
parameters:
key_name:
type: string
description: Name of keypair to assign to servers
image:
type: string
description: Name of image to use for servers
flavor:
type: string
description: Flavor to use for servers
public_net_id:
type: string
description: >
ID of public network for which floating IP addresses will be allocated
private_net_id:
type: string
description: ID of private network into which servers get deployed
private_subnet_id:
type: string
description: ID of private sub network into which servers get deployed
master_node_ip:
type: string
description: IP address of the Master node.
index_name:
type: number
description: IP address of the Master node.
resources:
node_wait_handle:
type: "AWS::CloudFormation::WaitConditionHandle"
node_wait_condition:
type: "AWS::CloudFormation::WaitCondition"
depends_on:
- compute_node
properties:
Handle:
get_resource: node_wait_handle
Timeout: "1200"
secgroup_all_open:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: icmp
- protocol: tcp
- protocol: udp
compute_node:
type: OS::Nova::Server
properties:
name: { "Fn::Join" : ["",["compute", { get_param: index_name }]] }
image: { get_param: image }
flavor: { get_param: flavor }
key_name: { get_param: key_name }
networks:
- port: { get_resource: compute_port }
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
yum -y remove NetworkManager
chkconfig network on
cat > /etc/yum.repos.d/torque.repo << EOF
[torque]
name=torque
baseurl=http://192.168.33.200/install/post/otherpkgs/el7/torque
enabled=1
gpgcheck=0
EOF
yum -y install torque-client
chkconfig pbs_mom on
echo $MASTER_NODE_IP > /var/spool/torque/server_name
cat > /var/spool/torque/mom_priv/config << EOF
$logevent 0x1ff
$pbsserver $MASTER_NODE_IP
EOF
service pbs_mom start
myip=$(ip addr show eth0 | awk '$1 == "inet" {print $2}' | cut -f1 -d/)
myip_last_octet=${myip##*.}
cat > /tmp/wait-data << EOF
{
"Status" : "SUCCESS",
"Reason" : "Setup Complete",
"UniqueId" : "None",
"Data" : "OK"
}
EOF
curl -T /tmp/wait-data '$WAIT_HANDLE'
params:
"$MASTER_NODE_IP":
get_param: master_node_ip
"$WAIT_HANDLE":
get_resource: node_wait_handle
networks:
- port:
get_resource: compute_node_eth0
compute_node_eth0:
type: OS::Neutron::Port
properties:
network_id: { get_param: private_net_id }
fixed_ips:
- subnet_id: { get_param: private_subnet_id }
security_groups: [{ get_resource: secgroup_all_open }]
compute_floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: compute_node_eth0 }
compute_security_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: security-group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
outputs:
compute_node_name:
description: The name of the compute node
value: { get_attr: [ compute_node, name ] }
compute_node_ip:
description: IP address of compute node in private network
value: { get_attr: [ compute_node_eth0, fixed_ips, 0, ip_address ] }
compute_node_external_ip:
description: Floating IP address of compute node in public network
value: { get_attr: [ compute_floating_ip, floating_ip_address ] }
heat_template_version: 2013-05-23
description: >
This template will boot a Kubernetes cluster with one or more
minions (as specified by the number_of_minions parameter, which
defaults to "2").
parameters:
#
# REQUIRED PARAMETERS
#
key_name:
type: string
description: name of ssh key to be provisioned on our server
public_net_id:
type: string
description: uuid of a network to use for floating ip addresses
#
# OPTIONAL PARAMETERS
#
image:
type: string
default: centos7
description: glance image used to boot the server
flavor:
type: string
default: m1.small
description: flavor to use when booting the server
dns_nameserver:
type: string
description: address of a dns nameserver reachable in your environment
default: 8.8.8.8
number_of_compute_nodes:
type: string
description: how many compute nodes to spawn
default: 10
resources:
master_wait_handle:
type: "AWS::CloudFormation::WaitConditionHandle"
master_wait_condition:
type: "AWS::CloudFormation::WaitCondition"
depends_on:
- master_node
properties:
Handle:
get_resource: master_wait_handle
Timeout: "1200"
######################################################################
#
# network resources. allocate a network and router for our server.
# it would also be possible to take advantage of existing network
# resources (and have the deployer provide network and subnet ids,
# etc, as parameters), but I wanted to minmize the amount of
# configuration necessary to make this go.
fixed_net:
type: "OS::Neutron::Net"
# This is the subnet on which we will deploy our server.
fixed_subnet:
type: "OS::Neutron::Subnet"
properties:
cidr: 10.0.9.0/24
network_id: { get_resource: fixed_net }
dns_nameservers: [{ get_param: dns_nameserver }]
# create a router attached to the external network provided as a
# parameter to this stack.
extrouter:
type: "OS::Neutron::Router"
properties:
external_gateway_info:
network:
get_param: public_net_id
# attached fixed_subnet to our extrouter router.
extrouter_inside:
type: "OS::Neutron::RouterInterface"
properties:
router_id:
get_resource: extrouter
subnet_id: { get_resource: fixed_subnet }
######################################################################
#
# security groups. we need to permit network traffic of various
# sorts.
#
secgroup_base:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: icmp
- protocol: tcp
port_range_min: 22
port_range_max: 22
secgroup_compute:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: tcp
port_range_min: 22
port_range_max: 22
- protocol: tcp
port_range_min: 4001
port_range_max: 4001
######################################################################
#
# databases server. this sets up a MySQL server
#
master_node:
type: "OS::Nova::Server"
depends_on:
- extrouter_inside
properties:
name: master
image:
get_param: image
flavor:
get_param: flavor
key_name:
get_param: key_name
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
yum -y upgrade
cat > /etc/yum.repos.d/torque.repo << EOF
[torque]
name=torque
baseurl=http://192.168.95.200/install/post/otherpkgs/el7/torque
enabled=1
gpgcheck=0
EOF
yum -y install torque-server torque-scheduler
chkconfig pbs_server on
chkconfig pbs_scheduler on
myip=$(ip addr show eth0 | awk '$1 == "inet" {print $2}' | cut -f1 -d/)
echo $myip > /var/spool/torque/server_name
rm -rf /var/spool/torque/server_priv/nodes
for node in `echo $COMP_NODE_NAMES`
do
echo $node >> /var/spool/torque/server_priv/nodes
done
# Parse the 2 variables to create /etc/hosts file
array1=(${COMP_NODE_ADDRESSES// / })
array2=(${COMP_NODE_NAMES// / })
length=${#array1[@]}
for ((i=0;i<=$length;i++)); do
echo -e "${array1[$i]} ${array2[$i]}"
done >> /etc/hosts
# Start the torque services
service pbs_server start
service pbs_scheduler start
qmgr -c "c q testq"
qmgr -c "s q testq queue_type=e"
qmgr -c "s q testq enabled=t"
qmgr -c "s q testq started=t"
qmgr -c "s s scheduling=true"
cat > /tmp/wait-data << EOF
{
"Status" : "SUCCESS",
"Reason" : "Setup Complete",
"UniqueId" : "None",
"Data" : "OK"
}
EOF
curl -T /tmp/wait-data '$WAIT_HANDLE'
params:
"$COMP_NODE_ADDRESSES": {"Fn::Join": [" ", [{get_attr: [compute_nodes, compute_node_ip]}]]}
"$COMP_NODE_NAMES": {"Fn::Join": [" ", [{get_attr: [compute_nodes, name]}]]}
"$WAIT_HANDLE":
get_resource: master_wait_handle
networks:
- port:
get_resource: master_node_eth0
master_node_eth0:
type: "OS::Neutron::Port"
properties:
network_id:
get_resource: fixed_net
security_groups:
- get_resource: secgroup_base
- get_resource: secgroup_compute
fixed_ips:
- subnet_id:
get_resource: fixed_subnet
master_node_floating:
type: "OS::Neutron::FloatingIP"
depends_on:
- extrouter_inside
properties:
floating_network_id:
get_param: public_net_id
port_id:
get_resource: master_node_eth0
compute_nodes:
type: "OS::Heat::ResourceGroup"
depends_on:
- extrouter_inside
properties:
count: {get_param: number_of_compute_nodes}
resource_def:
type: compute_node.yaml
properties:
index_name: %index%
key_name: {get_param: key_name}
image: {get_param: image}
flavor: {get_param: flavor}
private_net_id: {get_resource: fixed_net}
private_subnet_id: {get_resource: fixed_subnet}
public_net_id: {get_param: public_net_id}
master_node_ip: {get_attr: [master_node_eth0, fixed_ips, 0, ip_address]}
outputs:
master_node:
value: {get_attr: [master_node_floating, floating_ip_address]}
compute_nodes:
value: {get_attr: [compute_nodes, compute_node_ip]}
compute_names:
value: {get_attr: [compute_nodes, name]}
compute_node_external:
value: {get_attr: [compute_nodes, compute_node_external_ip]}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment