Skip to content

Instantly share code, notes, and snippets.

@mariash
Created March 31, 2021 22:09
Show Gist options
  • Save mariash/09c1bec4ea3930639964368d4040bd1f to your computer and use it in GitHub Desktop.
Save mariash/09c1bec4ea3930639964368d4040bd1f to your computer and use it in GitHub Desktop.
diff --git a/agent/bootstrap.go b/agent/bootstrap.go
index 6e6a97f8..3944aa24 100644
--- a/agent/bootstrap.go
+++ b/agent/bootstrap.go
@@ -4,8 +4,10 @@ import (
"encoding/json"
"errors"
"fmt"
+ "os"
"path"
"path/filepath"
+ "time"
"github.com/cloudfoundry/bosh-agent/agent/applier/applyspec"
boshplatform "github.com/cloudfoundry/bosh-agent/platform"
@@ -106,7 +108,7 @@ func (boot bootstrap) Run() (err error) {
ephemeralDiskPath := boot.platform.GetEphemeralDiskPath(settings.EphemeralDiskSettings())
desiredSwapSizeInBytes := settings.Env.GetSwapSizeInBytes()
- if err = boot.platform.SetupEphemeralDiskWithPath(ephemeralDiskPath, desiredSwapSizeInBytes, settings.AgentID); err != nil {
+ if err = boot.platform.SetupEphemeralDiskWithPath(ephemeralDiskPath, desiredSwapSizeInBytes, ""); err != nil {
return bosherr.WrapError(err, "Setting up ephemeral disk")
}
@@ -122,6 +124,18 @@ func (boot bootstrap) Run() (err error) {
return bosherr.WrapError(err, "Setting up log dir")
}
+ go func() {
+ for {
+ boot.settingsService.LoadSettings()
+ newSettings := boot.settingsService.GetSettings()
+ if newSettings.Networks["default"].Mac != settings.Networks["default"].Mac {
+ boot.logger.Error(boot.logTag, "Detected MAC address change, exiting to restart agent")
+ os.Exit(155)
+ }
+ time.Sleep(5*time.Second)
+ }
+ }()
+
if err = boot.platform.SetTimeWithNtpServers(settings.GetNtpServers()); err != nil {
return bosherr.WrapError(err, "Setting up NTP servers")
}
diff --git a/platform/disk/ephemeral_device_partitioner.go b/platform/disk/ephemeral_device_partitioner.go
index 50c4b7e9..dddb9d9a 100644
--- a/platform/disk/ephemeral_device_partitioner.go
+++ b/platform/disk/ephemeral_device_partitioner.go
@@ -39,6 +39,7 @@ func (p *EphemeralDevicePartitioner) Partition(devicePath string, partitions []P
return bosherr.WrapErrorf(err, "Getting existing partitions of `%s'", devicePath)
}
+ p.logger.Debug(p.logTag, "Existing partiions: %#v, desired partitions: %#v", existingPartitions, partitions)
if p.matchPartitionNames(existingPartitions, partitions, deviceFullSizeInBytes) {
p.logger.Info(p.logTag, "%s already partitioned as expected, skipping", devicePath)
return nil
@@ -65,7 +66,7 @@ func (p *EphemeralDevicePartitioner) GetPartitions(devicePath string) (partition
return p.partedPartitioner.GetPartitions(devicePath)
}
-func (p *EphemeralDevicePartitioner) matchPartitionNames(existingPartitions []ExistingPartition, desiredPartitions []Partition, deviceSizeInBytes uint64) bool {
+func (p *EphemeralDevicePartitioner) matchPartitionNames(existingPartitions []ExistingPartition, desiredPartitions []Partition, deviceSizeInBytes uint64) bool {
if len(existingPartitions) < len(desiredPartitions) {
return false
}
diff --git a/platform/net/interface_configuration_creator.go b/platform/net/interface_configuration_creator.go
index 48651bcb..ac71b0f5 100644
--- a/platform/net/interface_configuration_creator.go
+++ b/platform/net/interface_configuration_creator.go
@@ -132,27 +132,27 @@ func (creator interfaceConfigurationCreator) CreateInterfaceConfigurations(netwo
// In cases where we only have one network and it has no MAC address (either because the IAAS doesn't give us one or
// it's an old CPI), if we only have one interface, we should map them
- if len(networks) == 1 && len(interfacesByMAC) == 1 {
+ //if len(networks) == 1 && len(interfacesByMAC) == 1 {
networkSettings := creator.getFirstNetwork(networks)
- if networkSettings.Mac == "" {
+ //if networkSettings.Mac == "" {
var ifaceName string
networkSettings.Mac, ifaceName = creator.getFirstInterface(interfacesByMAC)
return creator.createInterfaceConfiguration([]StaticInterfaceConfiguration{}, []DHCPInterfaceConfiguration{}, ifaceName, networkSettings)
- }
- }
+ //}
+ //}
- return creator.createMultipleInterfaceConfigurations(networks, interfacesByMAC)
+ //return creator.createMultipleInterfaceConfigurations(networks, interfacesByMAC)
}
func (creator interfaceConfigurationCreator) createMultipleInterfaceConfigurations(networks boshsettings.Networks, interfacesByMAC map[string]string) ([]StaticInterfaceConfiguration, []DHCPInterfaceConfiguration, error) {
// Validate potential MAC values on networks exist on host
- for name := range networks {
- if mac := networks[name].Mac; mac != "" {
- if _, ok := interfacesByMAC[mac]; !ok {
- return nil, nil, bosherr.Errorf("No device found for network '%s' with MAC address '%s'", name, mac)
- }
- }
- }
+ //for name := range networks {
+ // if mac := networks[name].Mac; mac != "" {
+ // if _, ok := interfacesByMAC[mac]; !ok {
+ // return nil, nil, bosherr.Errorf("No device found for network '%s' with MAC address '%s'", name, mac)
+ // }
+ // }
+ //}
// Configure interfaces with network settings matching MAC address.
// If we cannot find a network setting with a matching MAC address, configure that interface as DHCP
diff --git a/platform/net/ubuntu_net_manager.go b/platform/net/ubuntu_net_manager.go
index 1fa78443..604cedff 100644
--- a/platform/net/ubuntu_net_manager.go
+++ b/platform/net/ubuntu_net_manager.go
@@ -106,22 +106,22 @@ func (net UbuntuNetManager) SetupNetworking(networks boshsettings.Networks, errC
}
}
- changed, err := net.writeNetConfigs(dhcpConfigs, staticConfigs, dnsServers, boshsys.ConvergeFileContentsOpts{})
+ _, err = net.writeNetConfigs(dhcpConfigs, staticConfigs, dnsServers, boshsys.ConvergeFileContentsOpts{})
if err != nil {
return bosherr.WrapError(err, "Updating network configs")
}
- if changed {
+ //if changed {
err = net.removeDhcpDNSConfiguration()
if err != nil {
return err
}
- err := net.restartNetworking()
+ err = net.restartNetworking()
if err != nil {
return bosherr.WrapError(err, "Failure restarting networking")
}
- }
+ //}
staticAddresses, dynamicAddresses := net.ifaceAddresses(staticConfigs, dhcpConfigs)
diff --git a/src/bosh-director/lib/bosh/director.rb b/src/bosh-director/lib/bosh/director.rb
index 63536d86bf..5eaf0437ad 100644
--- a/src/bosh-director/lib/bosh/director.rb
+++ b/src/bosh-director/lib/bosh/director.rb
@@ -110,8 +110,10 @@ require 'bosh/director/instance_deleter'
require 'bosh/director/instance_updater'
require 'bosh/director/instance_updater/instance_state'
require 'bosh/director/instance_updater/recreate_handler'
+require 'bosh/director/instance_updater/recreate_clone_handler'
require 'bosh/director/instance_updater/state_applier'
require 'bosh/director/instance_updater/update_procedure'
+require 'bosh/director/instance_updater/update_with_clone_procedure'
require 'bosh/director/disk_manager'
require 'bosh/director/orphan_disk_manager'
require 'bosh/director/orphan_network_manager'
diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb
index 0c2e802d33..27ce48608f 100644
--- a/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb
+++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb
@@ -9,6 +9,7 @@ require 'bosh/director/deployment_plan/steps/apply_vm_spec_step'
require 'bosh/director/deployment_plan/steps/attach_disk_step'
require 'bosh/director/deployment_plan/steps/attach_instance_disks_step'
require 'bosh/director/deployment_plan/steps/create_vm_step'
+require 'bosh/director/deployment_plan/steps/clone_vm_step'
require 'bosh/director/deployment_plan/steps/commit_instance_network_settings_step'
require 'bosh/director/deployment_plan/steps/delete_vm_step'
require 'bosh/director/deployment_plan/steps/detach_disk_step'
diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/clone_vm_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/clone_vm_step.rb
new file mode 100644
index 0000000000..80dddcc6ba
--- /dev/null
+++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/clone_vm_step.rb
@@ -0,0 +1,194 @@
+module Bosh::Director
+ module DeploymentPlan
+ module Steps
+ class CloneVmStep
+ include PasswordHelper
+
+ def initialize(source_instance, instance_plan, agent_broadcaster, disks, tags = {}, use_existing = false)
+ @source_instance = source_instance
+ @instance_plan = instance_plan
+ @agent_broadcaster = agent_broadcaster
+ @disks = disks
+ @use_existing = use_existing
+ @tags = tags
+ @logger = Config.logger
+ @vm_deleter = VmDeleter.new(@logger, false, Config.enable_virtual_delete_vms)
+ @variables_interpolator = Bosh::Director::ConfigServer::VariablesInterpolator.new
+ @blobstore = App.instance.blobstores.blobstore
+ end
+
+ def perform(report)
+ instance = @instance_plan.instance
+
+ cpi_factory, stemcell_model = choose_factory_and_stemcell_model(@instance_plan, @use_existing)
+
+ instance_model = instance.model
+ @logger.info('Creating VM')
+
+ vm = clone(
+ @source_instance,
+ instance,
+ stemcell_model.cid,
+ instance.cloud_properties,
+ @instance_plan.network_settings_hash,
+ @disks,
+ instance.env,
+ cpi_factory,
+ stemcell_model.api_version,
+ )
+
+ begin
+ report.vm = vm
+ update_metadata(@instance_plan, vm, cpi_factory)
+ rescue Exception => e
+ @logger.error("Failed to create/contact VM #{instance_model.vm_cid}: #{e.inspect}")
+ if Config.keep_unreachable_vms
+ @logger.info('Keeping the VM for debugging')
+ else
+ DeleteVmStep.new.perform(report)
+ end
+ raise e
+ end
+ end
+
+ private
+
+ def update_metadata(instance_plan, vm, factory)
+ instance_model = instance_plan.instance.model
+ MetadataUpdater.build.update_vm_metadata(instance_model, vm, @tags, factory)
+ agent_client = AgentClient.with_agent_id(vm.agent_id, instance_model.name)
+ agent_client.wait_until_ready
+
+ if Config.flush_arp
+ ip_addresses = instance_plan.network_settings_hash.map do |index, network|
+ network['ip']
+ end.compact
+
+ @agent_broadcaster.delete_arp_entries(vm.cid, ip_addresses)
+ end
+ end
+
+ def choose_factory_and_stemcell_model(instance_plan, use_existing)
+ deployment = instance_plan.instance.model.deployment
+
+ if use_existing && !!instance_plan.existing_instance.availability_zone
+ factory = AZCloudFactory.create_from_deployment(deployment)
+ az = instance_plan.existing_instance.availability_zone
+ else
+ factory = AZCloudFactory.create_with_latest_configs(deployment)
+ az = instance_plan.instance.availability_zone_name
+ end
+
+ stemcell_model = instance_plan.instance.stemcell.model_for_az(az, factory)
+ [factory, stemcell_model]
+ end
+
+ def clone(source_instance, instance, stemcell_cid, cloud_properties, network_settings, disks, env, cloud_factory, stemcell_api_version)
+ instance_model = instance.model
+ deployment_name = instance_model.deployment.name
+ parent_id = add_event(deployment_name, instance_model.name, 'create')
+ agent_id = SecureRandom.uuid
+
+ env = @variables_interpolator.interpolate_with_versioning(env, instance.desired_variable_set)
+ cloud_properties = @variables_interpolator.interpolate_with_versioning(cloud_properties, instance.desired_variable_set)
+ network_settings = @variables_interpolator.interpolate_with_versioning(network_settings, instance.desired_variable_set)
+
+ cpi = cloud_factory.get_name_for_az(instance_model.availability_zone)
+
+ vm_options = { instance: instance_model, agent_id: agent_id, cpi: cpi }
+
+ env['bosh'] ||= {}
+ env['bosh'] = Config.agent_env.merge(env['bosh'])
+
+ @blobstore.validate!(env['bosh'].fetch('blobstores', [{}]).first.fetch('options', {}), stemcell_api_version)
+ remove_blobstore_credentials(env) if @blobstore.can_sign_urls?(stemcell_api_version)
+ env['bosh']['tags'] = @tags unless @tags.empty?
+
+ if Config.nats_server_ca
+ env['bosh'] ||= {}
+ env['bosh']['mbus'] ||= {}
+ env['bosh']['mbus']['cert'] ||= {}
+ env['bosh']['mbus']['cert']['ca'] = Config.nats_server_ca
+ cert_generator = NatsClientCertGenerator.new(@logger)
+ agent_cert_key_result = cert_generator.generate_nats_client_certificate "#{agent_id}.agent.bosh-internal"
+ env['bosh']['mbus']['cert']['certificate'] = agent_cert_key_result[:cert].to_pem
+ env['bosh']['mbus']['cert']['private_key'] = agent_cert_key_result[:key].to_pem
+ end
+
+ password = env.fetch('bosh', {}).fetch('password', "")
+ if Config.generate_vm_passwords && password == ""
+ env['bosh'] ||= {}
+ env['bosh']['password'] = sha512_hashed_password
+ end
+
+ if instance_model.job
+ env['bosh'] ||= {}
+ env['bosh']['group'] = Canonicalizer.canonicalize("#{Bosh::Director::Config.name}-#{deployment_name}-#{instance_model.job}")
+ env['bosh']['groups'] = [
+ Bosh::Director::Config.name,
+ deployment_name,
+ instance_model.job,
+ "#{Bosh::Director::Config.name}-#{deployment_name}",
+ "#{deployment_name}-#{instance_model.job}",
+ "#{Bosh::Director::Config.name}-#{deployment_name}-#{instance_model.job}",
+ ]
+ env['bosh']['groups'].map! { |name| Canonicalizer.canonicalize(name) }
+ end
+
+ count = 0
+ begin
+ cloud = cloud_factory.get(vm_options[:cpi], stemcell_api_version)
+ create_vm_obj = cloud.instant_clone_vm(agent_id, source_instance.model.vm_cid, cloud_properties, network_settings, disks, env)
+ vm_cid = create_vm_obj
+ rescue Bosh::Clouds::VMCreationFailed => e
+ count += 1
+ @logger.error("failed to create VM, retrying (#{count})")
+ retry if e.ok_to_retry && count < Config.max_vm_create_tries
+ raise e
+ end
+
+ vm_options[:cid] = vm_cid
+ vm_options[:created_at] = Time.now
+ vm_options[:stemcell_api_version] = stemcell_api_version
+ vm_model = Models::Vm.create(vm_options)
+ vm_model
+ rescue => e
+ @logger.error("error creating vm: #{e.message}")
+ if vm_cid
+ parent_id = add_event(deployment_name, instance_model.name, 'delete', vm_cid)
+ @vm_deleter.delete_vm_by_cid(vm_cid, stemcell_api_version)
+ add_event(deployment_name, instance_model.name, 'delete', vm_cid, parent_id)
+ end
+ raise e
+ ensure
+ add_event(deployment_name, instance_model.name, 'create', vm_cid, parent_id, e)
+ end
+
+ def add_event(deployment_name, instance_name, action, object_name = nil, parent_id = nil, error = nil)
+ event = Config.current_job.event_manager.create_event(
+ {
+ parent_id: parent_id,
+ user: Config.current_job.username,
+ action: action,
+ object_type: 'vm',
+ object_name: object_name,
+ task: Config.current_job.task_id,
+ deployment: deployment_name,
+ instance: instance_name,
+ error: error,
+ }
+ )
+ event.id
+ end
+
+ def remove_blobstore_credentials(env)
+ env['bosh'].fetch('blobstores', [{}]).each do |blobstore|
+ blobstore.fetch('options', {}).reject! do |k, _|
+ @blobstore.redacted_credential_properties_list.include?(k)
+ end
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/src/bosh-director/lib/bosh/director/instance_group_updater.rb b/src/bosh-director/lib/bosh/director/instance_group_updater.rb
index dbb14e45c7..ecc5ed6082 100644
--- a/src/bosh-director/lib/bosh/director/instance_group_updater.rb
+++ b/src/bosh-director/lib/bosh/director/instance_group_updater.rb
@@ -68,7 +68,7 @@ module Bosh::Director
end
def update_instance_group(instance_plans, canaries_updated, event_log_stage)
- @logger.info("Starting to update instance group '#{@instance_group.name}'")
+ @logger.info("Starting to update instance group '#{@instance_group.name}', instance_plans: #{instance_plans.inspect}")
ThreadPool.new(max_threads: @instance_group.update.max_in_flight(instance_plans.size)).wrap do |pool|
unless canaries_updated
@@ -82,7 +82,7 @@ module Bosh::Director
@logger.info('Finished canary update')
end
- @logger.info('Continuing the rest of the update')
+ @logger.info("Continuing the rest of the update: instance_plans: #{instance_plans.inspect}")
update_instances(pool, instance_plans, event_log_stage)
@logger.info('Finished the rest of the update')
end
@@ -171,8 +171,13 @@ module Bosh::Director
end
def update_instances(pool, instance_plans, event_log_stage)
+ return if instance_plans.empty?
+
+ instance_plan = instance_plans.shift
+ source_instance = instance_plan.instance
+ update_instance(instance_plan, event_log_stage)
instance_plans.each do |instance_plan|
- pool.process { update_instance(instance_plan, event_log_stage) }
+ pool.process { update_instance_with_instant_clone(source_instance, instance_plan, event_log_stage) }
end
end
@@ -191,5 +196,21 @@ module Bosh::Director
end
end
end
+
+ def update_instance_with_instant_clone(source_instance, instance_plan, event_log_stage)
+ event_log_stage.advance_and_track(
+ instance_plan.instance.model.to_s,
+ ) do |task|
+ with_thread_name("instance_update_with_instant_clone(#{instance_plan.instance.model})") do
+ InstanceUpdater.new_instance_updater(
+ @ip_provider, @template_blob_cache, @dns_encoder,
+ @link_provider_intents, task
+ ).update_with_instant_clone(source_instance, instance_plan)
+ rescue Exception => e
+ @logger.error("Error updating instance: #{e.inspect}\n#{e.backtrace.join("\n")}")
+ raise
+ end
+ end
+ end
end
end
diff --git a/src/bosh-director/lib/bosh/director/instance_updater.rb b/src/bosh-director/lib/bosh/director/instance_updater.rb
index aa575f01b8..cf600373fc 100644
--- a/src/bosh-director/lib/bosh/director/instance_updater.rb
+++ b/src/bosh-director/lib/bosh/director/instance_updater.rb
@@ -66,6 +66,30 @@ module Bosh::Director
)
end
+ def update_with_instant_clone(source_instance, instance_plan, options = {})
+ instance = instance_plan.instance
+ @links_manager = Bosh::Director::Links::LinksManager.new(instance.deployment_model.links_serial_id)
+ instance_report = DeploymentPlan::Stages::Report.new.tap { |r| r.vm = instance.model.active_vm }
+
+ update_procedure = get_update_with_clone_procedure(source_instance, instance, instance_plan, options, instance_report)
+
+ if instance_plan.changed?
+ parent_id = add_event(instance.deployment_model.name,
+ update_procedure.action,
+ instance.model.name,
+ update_procedure.context)
+ end
+ @logger.info("Updating instance #{instance} with instant clone, changes: #{instance_plan.changes.to_a.join(', ').inspect}")
+
+ InstanceUpdater::InstanceState.with_instance_update_and_event_creation(
+ instance.model,
+ parent_id,
+ instance.deployment_model.name,
+ update_procedure.action,
+ &update_procedure
+ )
+ end
+
def needs_recreate?(instance_plan)
if instance_plan.needs_shutting_down?
@logger.debug('VM needs to be shutdown before it can be updated.')
@@ -96,6 +120,26 @@ module Bosh::Director
)
end
+ def get_update_with_clone_procedure(source_instance, instance, instance_plan, options, instance_report)
+ UpdateWithCloneProcedure.new(
+ source_instance,
+ instance,
+ instance_plan,
+ options,
+ @blobstore,
+ needs_recreate?(instance_plan),
+ instance_report,
+ @disk_manager,
+ @rendered_templates_persistor,
+ @vm_creator,
+ @links_manager,
+ @ip_provider,
+ @dns_state_updater,
+ @logger,
+ @task,
+ )
+ end
+
def add_event(deployment_name,
action, instance_name = nil,
context = nil,
diff --git a/src/bosh-director/lib/bosh/director/instance_updater/recreate_clone_handler.rb b/src/bosh-director/lib/bosh/director/instance_updater/recreate_clone_handler.rb
new file mode 100644
index 0000000000..e35e31dea9
--- /dev/null
+++ b/src/bosh-director/lib/bosh/director/instance_updater/recreate_clone_handler.rb
@@ -0,0 +1,97 @@
+module Bosh::Director
+ class InstanceUpdater
+ class RecreateCloneHandler
+ attr_reader :instance, :instance_plan, :instance_report, :new_vm, :instance_model, :deleted_vm, :deleted_vm_id
+ def initialize(logger, vm_creator, ip_provider, instance_plan, instance_report, instance, source_instance)
+ @logger = logger
+ @vm_creator = vm_creator
+ @ip_provider = ip_provider
+ @instance_plan = instance_plan
+ @instance_report = instance_report
+ @deleted_vm_id = -1
+ @instance_model = instance_plan.instance.model
+ @new_vm = instance_model.most_recent_inactive_vm || instance_model.active_vm
+ @instance = instance
+ @source_instance = source_instance
+ end
+
+ def perform
+ @logger.debug('Failed to update in place. Recreating VM')
+
+ if instance_plan.unresponsive_agent?
+ @deleted_vm_id = instance_report.vm.id
+ delete_vm
+ else
+ detach_disks
+ @delete_vm_first = true
+ end
+
+ any_create_swap_delete_vms? ? create_swap_delete : delete_create
+ end
+
+ private
+
+ def create_swap_delete
+ elect_active_vm
+ orphan_inactive_vms
+
+ attach_disks if instance_plan.needs_disk?
+
+ instance.update_instance_settings
+ end
+
+ def delete_create
+ delete_vm if @delete_vm_first
+ create_vm
+ end
+
+ def delete_vm
+ DeploymentPlan::Steps::DeleteVmStep
+ .new(true, false, Config.enable_virtual_delete_vms)
+ .perform(instance_report)
+ end
+
+ def detach_disks
+ DeploymentPlan::Steps::UnmountInstanceDisksStep.new(instance_model).perform(instance_report)
+ DeploymentPlan::Steps::DetachInstanceDisksStep.new(instance_model).perform(instance_report)
+ end
+
+ def attach_disks
+ DeploymentPlan::Steps::AttachInstanceDisksStep.new(instance_model, instance_plan.tags).perform(instance_report)
+ DeploymentPlan::Steps::MountInstanceDisksStep.new(instance_model).perform(instance_report)
+ end
+
+ def orphan_inactive_vms
+ inactive_vms.each do |inactive_vm|
+ ips = inactive_vm.ip_addresses.map(&:address_str)
+ DeploymentPlan::Steps::OrphanVmStep.new(inactive_vm).perform(instance_report)
+ instance_plan.remove_obsolete_network_plans_for_ips(ips)
+ end
+ end
+
+ def inactive_vms
+ instance_model.vms.reject { |vm| vm.id == new_vm.id || vm.id == deleted_vm_id }
+ end
+
+ def elect_active_vm
+ instance_report.vm = new_vm
+ DeploymentPlan::Steps::ElectActiveVmStep.new.perform(instance_report)
+ instance_report.vm = instance_model.active_vm
+ end
+
+ def create_vm
+ @vm_creator.create_for_instance_plan_with_clone(
+ @source_instance,
+ instance_plan,
+ @ip_provider,
+ instance_model.active_persistent_disk_cids,
+ instance_plan.tags,
+ )
+ end
+
+ def any_create_swap_delete_vms?
+ instance_plan.should_create_swap_delete? && instance_model.vms.count > 1
+ end
+ end
+ end
+end
diff --git a/src/bosh-director/lib/bosh/director/instance_updater/update_with_clone_procedure.rb b/src/bosh-director/lib/bosh/director/instance_updater/update_with_clone_procedure.rb
new file mode 100644
index 0000000000..df029c6999
--- /dev/null
+++ b/src/bosh-director/lib/bosh/director/instance_updater/update_with_clone_procedure.rb
@@ -0,0 +1,234 @@
+module Bosh::Director
+ class InstanceUpdater
+ class UpdateWithCloneProcedure
+ attr_reader :instance, :instance_plan, :options, :instance_report, :action, :context
+
+ def initialize(source_instance,
+ instance,
+ instance_plan,
+ options,
+ blobstore,
+ needs_recreate,
+ instance_report,
+ disk_manager,
+ rendered_templates_persister,
+ vm_creator,
+ links_manager,
+ ip_provider,
+ dns_state_updater,
+ logger,
+ task)
+ @source_instance = source_instance
+ @instance = instance
+ @instance_plan = instance_plan
+ @options = options
+ @blobstore = blobstore
+ @needs_recreate = needs_recreate
+ @instance_report = instance_report
+ @disk_manager = disk_manager
+ @rendered_templates_persister = rendered_templates_persister
+ @vm_creator = vm_creator
+ @links_manager = links_manager
+ @ip_provider = ip_provider
+ @dns_state_updater = dns_state_updater
+ @logger = logger
+ @action = calculate_action
+ @context = calculate_context
+ @task = task
+ end
+
+ def to_proc
+ -> { perform }
+ end
+
+ def perform
+ # Optimization to only update DNS if nothing else changed.
+ @links_manager.bind_links_to_instance(instance)
+ instance.update_variable_set
+
+ unless full_update_required?
+ if instance_plan.changes.include?(:tags)
+ @logger.debug('Minimal update: VM and disk tags')
+ update_vm_disk_metadata
+ end
+
+ if instance_plan.changes.include?(:dns)
+ @logger.debug('Minimal update: DNS configuration')
+ update_dns_if_changed
+ end
+
+ return
+ end
+
+ unless instance_plan.already_detached?
+ handle_not_detached_instance_plan
+
+ # desired state
+ if instance.state == 'stopped'
+ # Command issued: `bosh stop`
+ update_instance
+ return
+ end
+
+ handle_detached_instance_if_detached
+ end
+
+ converge_vm if instance.state != 'detached'
+ update_instance
+ #update_dns_if_changed
+ #update_vm_disk_metadata
+
+ #return if instance.state == 'detached'
+
+ #@rendered_templates_persister.persist(instance_plan)
+ #apply_state
+ end
+
+ private
+
+ def apply_state
+ state_applier = InstanceUpdater::StateApplier.new(
+ instance_plan,
+ agent,
+ RenderedJobTemplatesCleaner.new(instance.model, @blobstore, @logger),
+ @logger,
+ task: @task,
+ canary: options[:canary],
+ )
+ state_applier.apply(instance_plan.desired_instance.instance_group.update)
+ end
+
+ def handle_not_detached_instance_plan
+ # Rendered templates are persisted here, in the case where a vm is already soft stopped
+ # It will update the rendered templates on the VM
+ unless Config.enable_nats_delivered_templates && @needs_recreate
+ @rendered_templates_persister.persist(instance_plan)
+ end
+
+ unless instance_plan.needs_shutting_down? || instance.state == 'detached'
+ DeploymentPlan::Steps::PrepareInstanceStep.new(instance_plan).perform(instance_report)
+ end
+
+ # current state
+ return unless instance.model.state != 'stopped'
+
+ stop
+ take_snapshot
+ end
+
+ def handle_detached_instance_if_detached
+ return unless instance.state == 'detached'
+
+ # Command issued: `bosh stop --hard`
+ @logger.info("Detaching instance #{instance}")
+ instance_model = instance_plan.new? ? instance_plan.instance.model : instance_plan.existing_instance
+ DeploymentPlan::Steps::UnmountInstanceDisksStep.new(instance_model).perform(instance_report)
+ DeploymentPlan::Steps::DetachInstanceDisksStep.new(instance_model).perform(instance_report)
+ DeploymentPlan::Steps::DeleteVmStep.new(true, false, Config.enable_virtual_delete_vms).perform(instance_report)
+ end
+
+ def update_instance
+ instance_plan.release_obsolete_network_plans(@ip_provider)
+ instance.update_state
+ end
+
+ def update_vm_disk_metadata
+ return unless instance_plan.changes.include?(:tags)
+ return if instance_plan.new? || @needs_recreate
+ return if instance.state == 'detached' # disks will get a metadata update when attaching again
+
+ @logger.debug("Updating instance #{instance} VM and disk metadata with tags")
+ tags = instance_plan.tags
+ cloud = CloudFactory.create.get(instance.model.active_vm.cpi)
+ MetadataUpdater.build.update_disk_metadata(cloud, instance.model.managed_persistent_disk, tags) if instance.model.managed_persistent_disk
+ MetadataUpdater.build.update_vm_metadata(instance.model, instance.model.active_vm, tags)
+ end
+
+ def converge_vm
+ recreate = @needs_recreate || (instance_plan.should_create_swap_delete? && instance_plan.instance.model.vms.count > 1)
+
+ RecreateCloneHandler.new(@logger, @vm_creator, @ip_provider, instance_plan, instance_report, instance, @source_instance).perform if recreate
+
+ instance_report.vm = instance_plan.instance.model.active_vm
+ @disk_manager.update_persistent_disk(instance_plan)
+
+ instance.update_instance_settings unless recreate
+ end
+
+ # Full update drains jobs and starts them again
+ def full_update_required?
+ return true if instance_plan.changes.count > 2
+
+ # Only DNS and tag changes do not require a full update
+ return false if instance_plan.changes.sort == %i[dns tags]
+
+ return false if instance_plan.changes.first == :dns || instance_plan.changes.first == :tags
+
+ true
+ end
+
+ def stop
+ stop_intent = deleting_vm? ? :delete_vm : :keep_vm
+ Stopper.stop(intent: stop_intent, instance_plan: instance_plan,
+ target_state: instance.state, logger: @logger, task: @task)
+ end
+
+ def deleting_vm?
+ @needs_recreate || instance_plan.needs_shutting_down? || instance.state == 'detached' ||
+ (instance_plan.should_create_swap_delete? && instance_plan.instance.model.vms.count > 1)
+ end
+
+ def calculate_action
+ if restarting?
+ names = {
+ 'started' => 'start',
+ 'stopped' => 'stop',
+ 'detached' => 'stop',
+ }
+
+ raw_name = instance_plan.instance.virtual_state
+ return names[raw_name] if names.key? raw_name
+
+ return raw_name
+ end
+
+ return 'create' if instance_plan.new?
+
+ @needs_recreate ? 'recreate' : 'update'
+ end
+
+ def restarting?
+ changes.size == 1 && %i[state restart].include?(changes.first)
+ end
+
+ def changes
+ instance_plan.changes
+ end
+
+ def calculate_context
+ return {} if restarting?
+
+ context = {}
+ context['az'] = instance_plan.desired_az_name if instance_plan.desired_az_name
+ unless instance_plan.new?
+ context['changes'] = changes.to_a unless changes.size == 1 && changes.first == :recreate
+ end
+ context
+ end
+
+ def update_dns_if_changed
+ return unless instance_plan.dns_changed?
+
+ @dns_state_updater.update_dns_for_instance(instance_plan, instance_plan.network_settings.dns_record_info)
+ end
+
+ def agent
+ AgentClient.with_agent_id(instance.model.agent_id, instance.model.name)
+ end
+
+ def take_snapshot
+ Api::SnapshotManager.take_snapshot(instance.model, clean: true)
+ end
+ end
+ end
+end
diff --git a/src/bosh-director/lib/bosh/director/vm_creator.rb b/src/bosh-director/lib/bosh/director/vm_creator.rb
index 09723d1164..83fafb44a1 100644
--- a/src/bosh-director/lib/bosh/director/vm_creator.rb
+++ b/src/bosh-director/lib/bosh/director/vm_creator.rb
@@ -36,6 +36,12 @@ module Bosh::Director
StepExecutor.new('Creating VM', [agenda], track: false).run
end
+ def create_for_instance_plan_with_clone(source_instance, instance_plan, ip_provider, disks, tags, use_existing = false)
+ agenda = get_clone_agenda_for_instance_plan(source_instance, instance_plan, disks, tags, ip_provider, 1, use_existing)
+
+ StepExecutor.new('Creating VM', [agenda], track: false).run
+ end
+
private
def get_agenda_for_instance_plan(instance_plan, disks, tags, ip_provider, total, use_existing = false)
@@ -90,6 +96,59 @@ module Bosh::Director
agenda
end
+ def get_clone_agenda_for_instance_plan(source_instance, instance_plan, disks, tags, ip_provider, total, use_existing = false)
+ instance_string = instance_plan.instance.model.to_s
+
+ agenda = DeploymentPlan::Stages::Agenda.new.tap do |a|
+ a.report = DeploymentPlan::Stages::Report.new.tap do |r|
+ r.network_plans = instance_plan.network_plans
+ end
+
+ a.thread_name = "clone_missing_vm(#{instance_string}/#{total})"
+ a.info = 'Clonning missing VM'
+ a.task_name = instance_string
+ end
+
+ instance = instance_plan.instance
+ already_had_active_vm = instance.vm_created?
+
+ agenda.steps = [
+ DeploymentPlan::Steps::CloneVmStep.new(
+ source_instance,
+ instance_plan,
+ @agent_broadcaster,
+ disks,
+ tags,
+ use_existing,
+ ),
+ ]
+
+ agenda.steps << DeploymentPlan::Steps::ElectActiveVmStep.new unless already_had_active_vm
+
+ agenda.steps << DeploymentPlan::Steps::CommitInstanceNetworkSettingsStep.new
+
+ agenda.steps << DeploymentPlan::Steps::ReleaseObsoleteNetworksStep.new(ip_provider) unless instance_plan.should_create_swap_delete?
+
+ # TODO(mxu, cdutra): find cleaner way to express when you need to Attach and Mount the disk
+ if instance_plan.needs_disk?
+ if !instance_plan.should_create_swap_delete? || creating_first_create_swap_delete_vm?(instance_plan, already_had_active_vm)
+ agenda.steps << DeploymentPlan::Steps::AttachInstanceDisksStep.new(instance.model, tags)
+ agenda.steps << DeploymentPlan::Steps::MountInstanceDisksStep.new(instance.model)
+ end
+ end
+
+ agenda.steps << DeploymentPlan::Steps::UpdateInstanceSettingsStep.new(instance_plan.instance)
+ agenda.steps << DeploymentPlan::Steps::ApplyVmSpecStep.new(instance_plan)
+ agenda.steps << DeploymentPlan::Steps::RenderInstanceJobTemplatesStep.new(
+ instance_plan,
+ @template_blob_cache,
+ @dns_encoder,
+ @link_provider_intents,
+ )
+
+ agenda
+ end
+
def creating_first_create_swap_delete_vm?(instance_plan, already_had_active_vm)
instance_plan.should_create_swap_delete? && !already_had_active_vm
end
diff --git a/src/bosh-director/lib/cloud/external_cpi.rb b/src/bosh-director/lib/cloud/external_cpi.rb
index 2dc87121e8..f86ee1511b 100644
--- a/src/bosh-director/lib/cloud/external_cpi.rb
+++ b/src/bosh-director/lib/cloud/external_cpi.rb
@@ -54,6 +54,7 @@ module Bosh::Clouds
def create_stemcell(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def delete_stemcell(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def create_vm(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
+ def instant_clone_vm(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def delete_vm(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def create_network(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def delete_network(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
diff --git a/src/bosh-director/lib/cloud/external_cpi_response_wrapper.rb b/src/bosh-director/lib/cloud/external_cpi_response_wrapper.rb
index 7e4c41db84..5bcaa13d8b 100644
--- a/src/bosh-director/lib/cloud/external_cpi_response_wrapper.rb
+++ b/src/bosh-director/lib/cloud/external_cpi_response_wrapper.rb
@@ -12,6 +12,7 @@ module Bosh::Clouds
def create_stemcell(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def delete_stemcell(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def delete_vm(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
+ def instant_clone_vm(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def create_network(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def delete_network(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
def has_vm(*arguments); invoke_cpi_method(__method__.to_s, *arguments); end
#!/bin/bash
for NETDEV in /sys/class/net/e*
do
DEVICE_LABEL=$(basename $(readlink -f $NETDEV/device))
DEVICE_DRIVER=$(basename $(readlink -f $NETDEV/device/driver))
echo $DEVICE_LABEL > /sys/bus/pci/drivers/$DEVICE_DRIVER/unbind
echo $DEVICE_LABEL > /sys/bus/pci/drivers/$DEVICE_DRIVER/bind
done
systemctl restart systemd-networkd
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment