Skip to content

Instantly share code, notes, and snippets.

@tobert
Created September 8, 2017 20:37
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tobert/a5202217bec73b2a7428b4171df22484 to your computer and use it in GitHub Desktop.
Save tobert/a5202217bec73b2a7428b4171df22484 to your computer and use it in GitHub Desktop.
copilot.pl - homegrown CI from 2004
#!/usr/local/bin/perl
use warnings;
use strict;
use Config::ApacheFormat;
use Getopt::Long;
use File::Path;
use File::Basename;
use Net::SSH::Perl;
use Data::Dumper; # for debugging
die "NEVER RUN THIS AS ROOT!!!" if ( $< == 0 );
our($opt_h, $opt_a, $opt_v, $opt_x, $opt_X, $opt_B,
$opt_u, $opt_c, $opt_s, $opt_t, $opt_e);
Getopt::Long::Configure('bundling');
GetOptions(
"h" => \$opt_h, "help" => \$opt_h,
"a" => \$opt_a, "all" => \$opt_a,
"u" => \$opt_u, "update" => \$opt_u,
"v" => \$opt_v, "verbose" => \$opt_v,
"x" => \$opt_x,
"X" => \$opt_X,
"e" => \$opt_e, "brb" => \$opt_B,
"c=s" => \$opt_c, "config=s" => \$opt_c,
"s=s" => \$opt_s, "site=s" => \$opt_s,
"t=s" => \$opt_t, "target=s" => \$opt_t
);
print_help() unless( $opt_c && !$opt_h && (($opt_s && $opt_t) || $opt_a) );
# set rsync base arguments
#our $rsync_args = '--cvs-exclude --archive --verbose --links --stats';
our $rsync_args = '--cvs-exclude --archive --verbose --links --stats --exclude="docs/"';
if ( $opt_x ) { $rsync_args .= ' --dry-run' }
# switch CVS between update/checkout
our $cvs_command = $opt_u ? 'update' : 'checkout';
# figure out branch names now, so they're consistent even if copilot takes
# a long time to run
my @lt = localtime(time);
# pad the day and month with a zero if we need it
my $day = sprintf("%02d", $lt[3] );
my $month = sprintf("%02d", $lt[4]+1 );
my $branch = join( '_', $lt[5]+1900, $month, $day );
# define verbose here so it can be used without ()'s
sub verbose {
return unless ( $opt_v );
print STDERR @_, "\n";
}
# read the configuration file
our $config = Config::ApacheFormat->new(
setenv_vars => $opt_e ? undef : 1,
valid_directives => [qw(setenv workdir mappings webhost cvstag vhost zope_vhost
mapdir symlink rsync_remote_args rsync_remote_target
post_install)],
valid_blocks => [qw(mappings map site target)]
);
$config->read($opt_c);
# Don't mess with these settings - they are rm -rf'ed and
# rsync --delete'd often. NEVER RUN AS ROOT
#our $merge_base = $config->get('workdir') . '/merge';
#our $stage_base = $config->get('workdir') . '/stage';
our $merge_base = '/var/copilot/merge';
our $stage_base = '/var/copilot/stage';
# Get all the mapped applications
our %map_hash = make_map_hash($config);
# list of servers to restart
my %restart_list;
# Use perl's map function to get the second element of the'
# arrayrefs in the array returned by $config->get('site').
# This ends up being a list of site names.
foreach my $site ( map { $_->[1] } $config->get('site') ) {
next if ( ($opt_s && $site ne $opt_s) || (!$opt_s && !$opt_a) );
verbose "---> current site is \"$site\"";
my $site_block = $config->block( site => $site );
my @mappings = $site_block->get('mappings');
validate_mappings( \%map_hash, \@mappings );
my %symlinks = reverse $site_block->get('symlink');
verbose_hash( 'symlinks', \%symlinks );
# map function usage same as for $config->get('site') above
foreach my $target ( map { $_->[1] } $site_block->get('target') ) {
next if ( ($opt_t && $target ne $opt_t) || (!$opt_t && !$opt_a) );
verbose "------> current site is \"$site\" and target is \"$target\"";
# Clear the merge space.
verbose( "rmtree( \"$merge_base/\", 0, 1 );" );
rmtree( "$merge_base/", 0, 1 ) unless ( $opt_x );
my $tblock = $site_block->block( target => $target );
# now, skip any targets that use the PROD cvs tag if we're not in BRB mode
next if ( lc($tblock->get('cvstag')) eq 'prod'
&& !$opt_B && ($opt_t && $target ne $opt_t) );
next if ( lc($tblock->get('cvstag')) eq 'prod' );
# now apply all site mappings to the target(s)
foreach my $mapping ( @mappings ) {
for ( my $i=0; $i<@{$map_hash{$mapping}}; $i+=2 ) {
my( $cvs_mod, $dest_dir ) = @{$map_hash{$mapping}}[$i..$i+1];
my $stage_dir = "$stage_base/$site/$target/$i";
#my $merge_dir = $config->get('workdir')."/merge/$site/$dest_dir";
my $merge_dir = "/var/copilot/merge/$site/$dest_dir";
# Big Red Button!
if ( $opt_B ) {
cvs_tag_and_branch( 'STAGE', 'PROD', $cvs_mod );
}
cvs_checkout( $stage_dir, $tblock->get('cvstag'), $map_hash{$mapping}->[$i] )
and # only run local_merge if cvs_checkout is successful
local_merge( "$stage_dir/$map_hash{$mapping}->[$i]", $merge_dir );
}
}
apply_symlinks( "$merge_base/$site", \%symlinks );
# If rsync_remote_args is not set in the target, Config::ApacheFormat
# will set it to the default defined in the globals automatically.
# For rsync_remote_target, it defaults to the target block's name.
my $remote_files_updated = 1;
my @webhost = $tblock->get('webhost');
my @vhost = $tblock->get('vhost');
my @zope_vhost = $tblock->get('zope_vhost');
my $position = 0;
foreach my $web_host_temp (@webhost)
{
remote_merge( "$merge_base/$site/$target",
$tblock->get('rsync_remote_args'),
$web_host_temp,
$tblock->get('rsync_remote_target') || $target,
\$remote_files_updated
);
# only run post_install if some files were updated on the remote host
# and there exists a command to be run.
#verbose "**** RFU: $remote_files_updated VHOST: $vhost[$position] *****";
if ( $remote_files_updated > 0 && defined $vhost[$position] && $vhost[$position] ne '') {
# this assumes that the user that made the config isn't a complete idiot and put a web vhost
# that isn't on the machine, but then again dumber things have happened.
my $temp_key = $vhost[$position].":".$web_host_temp;
$restart_list{$temp_key} = 1;
}
# Zope restart
if ( $remote_files_updated > 0 && defined $zope_vhost[$position] && $zope_vhost[$position] ne '') {
# zope:/web/zope/bin/:molly.internal.priority-health.com
my $temp_zope = $zope_vhost[$position].":".$web_host_temp;
$restart_list{$temp_zope} = 2;
}
$position++;
}
}
}
foreach my $key (keys %restart_list)
{
process_remote_commands($key, $restart_list{$key});
}
exit 0;
# --------------------------------------------------------------------------- #
# FUNCTIONS
# --------------------------------------------------------------------------- #
sub cvs_tag_and_branch {
my( $from, $to, $cvs_mod ) = @_;
verbose 'cvs_tag_and_branch(', join(', ', @_), ')';
chdir('/tmp'); # cvs complains if the cwd doesn't exist
verbose "[cvs_tag_and_branch] `cvs rtag -r $from -F $to $cvs_mod`;";
if ( !$opt_x ) {
my @out = `cvs rtag -r $from -F $to $cvs_mod`;
verbose @out;
}
verbose "[cvs_tag_and_branch] `cvs rtag -r $to -b ${to}_$branch $cvs_mod`;";
if ( !$opt_x ) {
my @out = `cvs rtag -r $to -b ${to}_$branch $cvs_mod`;
verbose @out;
}
}
sub cvs_checkout {
my( $stage_dir, $cvstag, $cvs_module ) = @_;
verbose 'cvs_checkout(', join(', ', @_), ')';
rmtree( "$stage_dir/", 0, 1 ) unless ( $opt_u || $opt_x );
mkpath( $stage_dir, 0, 0775 ) unless ( -e $stage_dir || $opt_x );
smart_chdir($stage_dir);
if ( $opt_u && !-e $cvs_module ) {
$cvs_command = 'checkout';
}
if ( $opt_x && $cvs_command !~ /-n/ ) {
$cvs_command = "-n $cvs_command";
}
# Pull source dirs from CVS into staging directories
# -Q: quiet mode (no longer used)
# -n: dry-run mode (only with $opt_x)
# co: checkout
# -P: prune empty directories
# -r: specify tag $cvstag
# : $cvs_source_dir is the module to check out
verbose "[cvs_checkout()] `cvs $cvs_command -P -r $cvstag $cvs_module`";
my @cvs_out = `cvs $cvs_command -P -r $cvstag $cvs_module 2>&1`;
if ( @cvs_out ) {
chomp $cvs_out[0];
if ( $cvs_out[0] eq "cvs server: warning: new-born $cvs_module has disappeared" ) {
print "no $cvstag tag for $cvs_module!\n";
return undef;
}
verbose '[cvs_checkout()] ', @cvs_out;
}
unless ( -e $cvs_module || $opt_x ) {
print "\"$cvs_module\" not created - system error or tag \"$cvstag\" is not set.\n";
return undef;
}
return 1;
}
# Merge staging directories into final web directory structue
sub local_merge {
my( $staged_cvs_dir, $merge_dir ) = @_;
verbose 'local_merge(', join(', ', @_), ')';
$merge_dir =~ s#/+$##;
mkpath( $merge_dir, 0, 0775 ) unless ( -e $merge_dir || $opt_x );
verbose "[local_merge()] mkpath( $merge_dir, 0, 0775 )";
die "[local_merge()] $staged_cvs_dir does not exist!" unless ( -e $staged_cvs_dir || $opt_x );
smart_chdir( $staged_cvs_dir );
verbose "[local_merge()] `rsync ${rsync_args} . $merge_dir\"`";
my @rsync_out = `rsync ${rsync_args} . $merge_dir 2>&1`;
verbose '[local_merge()] ', @rsync_out;
}
# uh-glee function to create symlinks
# this could be delicate, so may need some work in the future
sub apply_symlinks {
my( $path, $hash ) = @_;
smart_chdir( $path );
foreach my $to ( keys %$hash ) {
# remove leading / from filenames
$to =~ s#^/##;
$hash->{$to} =~ s#^/##;
# get the directory name for chdir
my $to_dir = dirname( $to );
# get the file name
my $to_fname = basename( $hash->{$to} );
# build a ../../.. for making the link relative
my $dotdot = '';
foreach ( split '/', $to_dir ) { $dotdot .= '../' }
$dotdot =~ s#/$##;
# chdir to the directory the symlink should be in
eval {
chdir( $to_dir ) || die "[apply_symlinks()] chdir($to_dir): $!";
};
# fail gracefully if the directory does not exist
if ( $@ ) {
verbose $@;
next;
}
# execute the command - ln rarely has output, so use system
verbose "[apply_symlinks()] system(\"ln -fns $to_fname $dotdot/$to\");";
system( "ln -fns $to_fname $dotdot/$to" ) unless ( $opt_x );
die "could not create symlink from $hash->{$to} to $to"
unless( $? == 0 );
# return to $path
chdir( $dotdot );
}
}
sub remote_merge {
my( $merge_base, $rsync_remote_args, $webhost, $target, $rfu_ref ) = @_;
verbose 'remote_merge(', join(', ', @_), ')';
die "[remote_merge()] could not determine rsync_remote_args - check your configuration file"
if ( !$rsync_remote_args );
# force rsync --delete OFF for production pushes
if ( $opt_B ) {
$rsync_remote_args =~ s/--delete//;
}
# Everything is built, lets send it to it's destination
smart_chdir( dirname($merge_base) );
verbose "[remote_merge()] `rsync $rsync_remote_args ${rsync_args} . ${webhost}::$target`";
unless ( $opt_x || $opt_X ) {
my @rsync_out = `rsync $rsync_remote_args ${rsync_args} . ${webhost}::$target 2>&1`;
verbose '[remote_merge()]', @rsync_out;
# parse rsync output (with --status) to determine the number of files
# actually updated on the remote server - return via the reference passed in
foreach my $line ( @rsync_out ) {
if ( $line =~ /Number of files transferred: (\d+)/ ) {
$$rfu_ref = $1;
}
}
}
}
sub process_remote_commands {
my ($key, $type) = @_;
if($type eq '1') # Apache restart
{
my ($vhost, $host) = split(':', $key);
my $cmd = '';
if ($vhost eq "none" )
{ $cmd = "sudo /etc/httpd/bin/apachectl restart"; }
else
{ $cmd = "sudo /etc/httpd/bin/apachectl restart -v $vhost"; }
#verbose 'process_remote_commands('.$host.':'.$vhost.')';
#verbose 'cmd:'.$cmd;
my $ssh;
if ( $opt_X || $opt_x ) {
verbose "[run_script()] Net::SSH::Perl->new( $host, ciphers => '3des-cbc' )";
verbose "[run_script()] \$ssh->login;";
}
else {
$ssh = Net::SSH::Perl->new( $host, ciphers => '3des-cbc' )
|| return undef;
$ssh->login || return undef;
}
if ( $opt_x || $opt_X ) {
verbose "\$ssh->cmd( $cmd )";
}
else {
return undef if ( !$ssh );
my( $out, $err, $exit ) = $ssh->cmd( $cmd );
verbose "[run_script - $cmd] STDOUT: $out]" if ( $out );
verbose "[run_script - $cmd] ERROR: $err]" if ( $err );
verbose "[run_script - $cmd] exit code: $exit]";
}
$ssh->break_client_loop if ( $ssh );
}
elsif($type eq '2') # zope restart
{
my ($user, $path, $host) = split(':', $key);
$path = $path."zopectl";
my $cmd = "sudo -u $user $path restart";
verbose 'process_remote_commands_zope('.$host.')';
verbose 'cmd:'.$cmd;
my $ssh;
if ( $opt_X || $opt_x ) {
verbose "[run_script()] Net::SSH::Perl->new( $host, ciphers => '3des-cbc' )";
verbose "[run_script()] \$ssh->login;";
}
else {
$ssh = Net::SSH::Perl->new( $host, ciphers => '3des-cbc' )
|| return undef;
$ssh->login || return undef;
}
if ( $opt_x || $opt_X ) {
verbose "\$ssh->cmd( $cmd )";
}
else {
return undef if ( !$ssh );
my( $out, $err, $exit ) = $ssh->cmd( $cmd );
verbose "[run_script - $cmd] STDOUT: $out]" if ( $out );
verbose "[run_script - $cmd] ERROR: $err]" if ( $err );
verbose "[run_script - $cmd] exit code: $exit]";
}
$ssh->break_client_loop if ( $ssh );
}
else
{ verbose "I don't know what do with this type"; }
return 1;
}
sub make_map_hash {
verbose 'make_map_hash(', join(', ', @_), ')';
my $config = shift;
my $block = $config->block('mappings');
my %map_hash = ();
foreach my $map ( $block->get("map") ) {
my $map_block = $block->block($map);
# each map block can have more than one mapping - it must be in pairs though
my @maps = $map_block->get("mapdir");
for ( my $i=0; $i<@maps; $i+=2 ) {
# $i is the source
# $i+1 is the destination
push( @{$map_hash{$map->[1]}}, $maps[$i], $maps[$i+1] );
}
}
verbose_hash( 'map_hash', \%map_hash );
return %map_hash;
}
sub verbose_hash {
my( $name, $hash ) = @_;
if ( $opt_v ) {
print "----------------------- \%$name --------------------------\n",
Dumper( $hash ),
"----------------------- \%$name --------------------------\n";
}
}
sub smart_chdir {
my $dir = shift;
verbose "[smart_chdir()] chdir(\"$dir\");";
if ( -f $dir ) {
chdir( dirname($dir) );
}
else {
chdir( $dir );
}
}
sub validate_mappings {
my( $map_hash, $mappings ) = @_;
verbose 'validate_mappings( $map_hash, [', join(',',@$mappings), '] )';
foreach my $mapping ( @$mappings ) {
die "Mapping \"$mapping\" does not exist in $opt_c"
unless ( $map_hash->{$mapping} );
}
}
sub print_help {
printf "Usage: %s [-c configfile] -s sitename -t target\n", basename $0;
print <<EOT;
-h, --help
print this help message
-a, --all
pull, merge, and push all sites/targets in the configuration file
-v, --verbose
be very verbose about what this program is doing (as well as put cvs/rsync
in verbose mode)
-u, --update
only do minimal operations to get updates from CVS, instead of completely
building the copilot working filesystem from scratch every time
-c, --config=configfile
you should provide full path to the copilot config file
-s, --site=sitename
you should provide name of the web site that is to be created as defined in
the copilot config file
-t, --target=targetname
you should provide the name of the target for the site that was specified
-x
Don't actually do anything - just show what would be done (dry run).
-X
Run everything but the remote sync.
-e
Ignore SetEnv directives in the configuration.
* handy for using your CVSROOT for testing instead of the config file's
--brb
The big red button. This branches the PROD tag and pushes it out.
EOT
exit 1;
}
=pod
=head1 NAME
copliot.pl - CVS publishing and mangling tool
=head1 DESCRIPTION
Copilot checks code out of CVS, in a controlled manner, with the intent of
publishing that code to development (HEAD), TEST, STAGE, and PROD environments.
It can be configured such that different CVS modules may be woven together and
mapped into a very different filesystem layout than what is in CVS.
Requirements:
cvs access - either pserver or ext:ssh as long as the password is saved or
ssh keys are set up.
filesystem - write permissions and file space in the directories specified
for merge and stage. Also, write permission to the rsync daemons used to
publish merged content.
post_install (remote ssh command execution) - ssh keys must be set up in
advance. Also, the ssh user is limited to the user executing ssh for now.
The command will only be executed if 1 or more files were updated on the
remote host.
=head1 SYNOPSIS
copilot.pl [-havuxXe] [--brb] -c configfile [-s sitename] [-t target]
=head1 OPTIONS
=over 4
=item -h, --help
Prints a terse help text.
=item -a, --all
Process all tags and sites in the configuration file. If not specified, both a site
and target must be specified using the -s/--site and -t/--target switches.
=item -v, --verbose
Print out lots of diagnostic information.
=item -u, --update
Only do updates, do not wipe/rebuild staging and merging directories. This also
causes CVS to run with 'update' to save time. The default is to do a full rebuild.
=item -c, --config
Specify the location of the configuration file. Required!
=item -s, --site
Specify a site to update. Not required if -a/--all is specified.
=item -t, --target
Specify a target to update. Not required if -a/--all is specified.
=item -x
Dry-run mode: really only useful in combination with verbose mode. This will show
everything it would have done, including running rsync and CVS in dry-run mode where
possible.
=item -X
Do not perform any remote operations. Everything that's local to the machine running
copliot will still execute. rsync to remote servers and remote commands (post_install)
will not run.
=item -e
Do not process the environment variables set with SetEnv in the configuration file. This
is handy for developers working on this program to use their default CVS credentials instead
of copilot's.
=item --brb
Big Red Button mode. This mode tags all of STAGE as PROD, then copies that out to production.
At this point, PROD is also branched with the current date/time down to minute resolution.
=back
=cut
#!/bin/bash
# COde PILe Over There
# Matt Hahnfeld <matt.hahnfeld@redacted>
VERSION=1.0;
# Load the Console Library
. copilot_console_lib.sh
# function setup_env()
#
# This will set up the cvs environment and make sure the user is in the
# right place...
copilot_setup_env ()
{
dialog \
--backtitle "COde PILe Over There (version $VERSION)" \
--yesno 'To use this tool, you must be in the cvs working directory on Production and you must have permissions to copy files to and from the Production web directories. Are you currently in the cvs working directory on Production?' 0 0
if [ $? -ne 0 ]; then
exit 1
fi
export CVSROOT=`dialog --stdout \
--backtitle "COde PILe Over There (version $VERSION)" \
--inputbox 'Please confirm or re-enter the CVSROOT environment variable:' \
8 75 $CVSROOT`
if [ $? -ne 0 ]; then
exit 1
fi
}
# function menu()
#
# This will print a menu. Sets $SELECTION to a menu option (numeric)
# if a valid option is chosen.
copilot_menu ()
{
SELECTION=`\
dialog --stdout \
--backtitle "COde PILe Over There (version $VERSION)" \
--menu 'Select one of the following actions:' 0 65 6 \
'1' 'Branch Current STAGE as PROD_version' \
'2' 'Pull Down PROD_version to Production cvs' \
'3' 'Copy Live Web Directory to Backup Directory' \
'4' 'Copy Production cvs Directory to Live Directory' \
'5' 'Rollback Backup Directory to Live Directory' \
'6' 'Exit'`
}
# function branch()
#
# Branches STAGE to PROD_version.
copilot_branch ()
{
copilot_get_tag
if [ $TAG_NAME ]; then
{
COMMAND="cvs rtag -b -r STAGE PROD_$TAG_NAME";
copilot_confirm
sleep 5
copilot_pause
}
fi
}
# function update_cvs()
#
# Updates Production cvs with a tagged branch.
copilot_update_from_cvs ()
{
copilot_get_tag
if [ $TAG_NAME ]; then
{
COMMAND="cvs update -d -P -C -r PROD_$TAG_NAME";
copilot_confirm
sleep 5
copilot_pause
}
fi
}
# function live2bak()
#
# Copies /web to /web_bak
copilot_live2bak ()
{
COMMAND="rsync -Crptgo /web /web_bak";
copilot_confirm
sleep 5
copilot_pause
}
# function cvs2live()
#
# Copies /web to /web_bak
copilot_cvs2live ()
{
COMMAND="rsync -Crptgo . /web";
copilot_confirm
sleep 5
copilot_pause
}
# function bak2live()
#
# Copies /web_bak to /web
copilot_bak2live ()
{
COMMAND="rsync -Crptgo /web_bak /web";
copilot_confirm
sleep 5
copilot_pause
}
copilot_setup_env
SELECTION=0
while [ $SELECTION ] && [ $SELECTION -ne 6 ]
do
copilot_menu
case $SELECTION in
1) copilot_branch
;;
2) copilot_update_from_cvs
;;
3) copilot_live2bak
;;
4) copilot_cvs2live
;;
5) copilot_bak2live
;;
esac
done
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment