Skip to content

Instantly share code, notes, and snippets.

@allthingsclowd
Created October 10, 2017 12:52
Show Gist options
  • Save allthingsclowd/f2bab279979cc0c4214ff4f246f7ad34 to your computer and use it in GitHub Desktop.
Save allthingsclowd/f2bab279979cc0c4214ff4f246f7ad34 to your computer and use it in GitHub Desktop.
AutoScaling on Fujitsu Cloud Service K5
# Basic K5 template to demonstrate Fujitsu's HEAT Autoscaling enhancements
# Author: Graham J Land
# Date: 10/10/2017
heat_template_version: 2013-05-23
description:
Fujitsu Cloud Service K5 IaaS AutoScaling Example Template.
# The prerequisites for a successful deployment
parameters:
# target availability zone
az:
type: string
default: uk-1b
# server to be scaled - simple nodejs app in this demo
param_image_id:
type: string
default: bc4d2c64-1694-4488-80e2-e089bd18fc42
# t-shirt size to use
param_flavor:
type: string
default: S-1
# ssh keys to be injected into scaled servers
key_name:
type: string
description: SSH key to connect to the servers
default: LEMP-KP-AZ2
# existing router in project with external gateway configured
autoscale_router:
type: string
default: 5b29b682-df94-4178-b1b4-9bf487055787
# what actually gets built
resources:
# create a private network
autoscale_private_net_az:
type: OS::Neutron::Net
properties:
availability_zone: { get_param: az }
name: "autoscale_private_net"
# create a new subnet on the private above
autoscale_private_subnet_az:
type: OS::Neutron::Subnet
depends_on: autoscale_private_net_az
properties:
availability_zone: { get_param: az }
name: "autoscale_private_subnet_az"
network_id: { get_resource: autoscale_private_net_az }
cidr: "192.168.200.0/24"
gateway_ip: "192.168.200.254"
allocation_pools:
- start: "192.168.200.100"
end: "192.168.200.150"
dns_nameservers: ["62.60.42.9", "62.60.42.10"]
# connect an interface on the network's subnet to the existing router
az_router_interface:
type: OS::Neutron::RouterInterface
depends_on: [autoscale_private_subnet_az]
properties:
router_id: { get_param: autoscale_router }
subnet_id: { get_resource: autoscale_private_subnet_az }
# create a new security group for your PC's access
# just google "what's my ip" to determine your public NAT address
# mine was 31.53.253.24 during the demo below
security_group_01:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: AutoScaleServer
rules:
# allow ssh (port 22) connection from my pc
- remote_ip_prefix: 31.53.253.24/32
protocol: tcp
port_range_min: 22
port_range_max: 22
# allow ping packets from my pc
- remote_ip_prefix: 31.53.253.24/32
protocol: icmp
# create open security group for everyone to access the public LBaaS
security_group_02:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: AutoScaleLBaaS
rules:
# allow http (port 80) traffic from 'whole internet'
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 80
port_range_max: 80
# define the scaling server pool
web_server_group:
depends_on: [ az_router_interface ]
type: FCX::AutoScaling::AutoScalingGroup
properties:
AvailabilityZones: [{get_param: az}]
LaunchConfigurationName: {get_resource: launch_config}
MinSize: '1'
MaxSize: '3'
VPCZoneIdentifier: [ {get_resource: autoscale_private_subnet_az} ]
LoadBalancerNames: [ {get_resource: eLBint} ]
# this is the actual scalable unit of deployment - the web server
launch_config:
type: FCX::AutoScaling::LaunchConfiguration
depends_on: [ security_group_01, az_router_interface ]
properties:
ImageId: { get_param: param_image_id }
InstanceType: { get_param: param_flavor }
KeyName: {get_param: key_name}
SecurityGroups: [ {get_resource: security_group_01}, {get_resource: security_group_02} ]
BlockDeviceMappingsV2: [{source_type: 'image', destination_type: 'volume', boot_index: '0', device_name: '/dev/vda', volume_size: '3',uuid: {get_param: param_image_id}, delete_on_termination: true}]
UserData: |
#!/bin/bash
sudo hostname `hostname`
echo "Rebooting Hack"
sudo reboot
# create the load balancer that will be used to
# manage the scaling instances
eLBint:
type: FJ::ExpandableLoadBalancer::LoadBalancer
depends_on: [ security_group_01, az_router_interface ]
properties:
Subnets: [ {get_resource: autoscale_private_subnet_az} ]
Listeners:
- {LoadBalancerPort: '80', InstancePort: '80', Protocol: 'HTTP', InstanceProtocol: 'HTTP' }
HealthCheck: {Target: 'HTTP:80/', HealthyThreshold: '2', UnhealthyThreshold: '3', Interval: '5', Timeout: '5'}
Version: 2014-09-30
Scheme: public
LoadBalancerName: autoscaler
SecurityGroups: [ {get_resource: security_group_02} ]
# create the scale out policy
web_server_scaleout_policy:
type: FCX::AutoScaling::ScalingPolicy
properties:
AdjustmentType: ChangeInCapacity
AutoScalingGroupName: {get_resource: web_server_group}
Cooldown: '10'
ScalingAdjustment: '1'
# create the scale in policy
web_server_scalein_policy:
type: FCX::AutoScaling::ScalingPolicy
properties:
AdjustmentType: ChangeInCapacity
AutoScalingGroupName: {get_resource: web_server_group}
Cooldown: '10'
ScalingAdjustment: '-1'
# create the ALARM event which triggers when
# the server is overloaded
cpu_alarm_high:
type: OS::Ceilometer::Alarm
properties:
description: Scale-out if the average CPU > 50% for 1 minute
meter_name: fcx.compute.cpu_util
statistic: avg
period: '60'
evaluation_periods: '1'
threshold: '50'
alarm_actions:
- {get_attr: [web_server_scaleout_policy, AlarmUrl]}
matching_metadata: {'metadata.user_metadata.groupname': {get_resource: web_server_group}}
comparison_operator: gt
# create the 'reset' ALARM event when services return to normal
# workloads
cpu_alarm_low:
type: OS::Ceilometer::Alarm
properties:
description: Scale-in if the average CPU < 15% for 1 minute
meter_name: fcx.compute.cpu_util
statistic: avg
period: '60'
evaluation_periods: '1'
threshold: '15'
alarm_actions:
- {get_attr: [web_server_scalein_policy, AlarmUrl]}
matching_metadata: {'metadata.user_metadata.groupname': {get_resource: web_server_group}}
comparison_operator: lt
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment