Skip to content

Instantly share code, notes, and snippets.

@jkraemer
Created October 8, 2012 10:48
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save jkraemer/3851917 to your computer and use it in GitHub Desktop.
Save jkraemer/3851917 to your computer and use it in GitHub Desktop.
Rufus Scheduler initialization script
require 'rufus/scheduler'
class Scheduler
# Starts the scheduler unless it is already running
def self.start_unless_running(pid_file)
with_lockfile(File.join(File.dirname(pid_file), 'scheduler.lock')) do
if File.exists?(pid_file)
pid = IO.read(pid_file).to_i
if pid > 0 && process_running?(pid)
puts "not starting scheduler because it already is running with pid #{pid}"
else
puts "Process #{$$} removes stale pid file"
File.delete pid_file
end
end
if !File.exists?(pid_file)
# Write the current PID to the file
(File.new(pid_file,'w') << $$).close
puts "scheduler process is: #{$$}"
# Execute the scheduler
new.setup_jobs
end
true
end or puts "could not start scheduler - lock not acquired"
end
# true if the process with the given PID exists, false otherwise
def self.process_running?(pid)
Process.kill(0, pid)
true
rescue Exception
false
end
# executes the given block if the lock can be acquired, otherwise nothing is
# done and false returned.
def self.with_lockfile(lock_file)
lock = File.new(lock_file, 'w')
begin
if lock.flock(File::LOCK_EX | File::LOCK_NB)
yield
else
return false
end
ensure
lock.flock(File::LOCK_UN)
File.delete lock
end
end
def initialize
@rufus_scheduler = Rufus::Scheduler.start_new
# install exception handler to report errors via Airbrake
@rufus_scheduler.class_eval do
define_method :handle_exception do |job, exception|
puts "job #{job.job_id} caught exception '#{exception}'"
Airbrake.notify exception
end
end
end
#
# Job-Definitions go here
#
def setup_jobs
@rufus_scheduler.every('5m') do
puts "hello from your test job"
end
puts 'scheduler initialized.'
end
end
APP_ROOT = File.expand_path(File.dirname(File.dirname(__FILE__)))
ENV['BUNDLE_GEMFILE'] = File.expand_path('../Gemfile', File.dirname(__FILE__))
require 'bundler/setup'
worker_processes 2
working_directory APP_ROOT
preload_app true
timeout 30
listen APP_ROOT + "/tmp/pids/unicorn.sock", :backlog => 64
pid APP_ROOT + "/tmp/pids/unicorn.pid"
stderr_path APP_ROOT + "/log/unicorn.stderr.log"
stdout_path APP_ROOT + "/log/unicorn.stdout.log"
before_fork do |server, worker|
defined?(ActiveRecord::Base) && ActiveRecord::Base.connection.disconnect!
old_pid = Rails.root + '/tmp/pids/unicorn.pid.oldbin'
if File.exists?(old_pid) && server.pid != old_pid
begin
Process.kill("QUIT", File.read(old_pid).to_i)
rescue Errno::ENOENT, Errno::ESRCH
puts "Old master already dead"
end
end
end
# load the scheduler init script
require APP_ROOT+'/lib/scheduler'
# path to the scheduler pid file
scheduler_pid_file = File.join(APP_ROOT, "tmp/pids/scheduler.pid").to_s
after_fork do |server, worker|
defined?(ActiveRecord::Base) && ActiveRecord::Base.establish_connection
child_pid = server.config[:pid].sub('.pid', ".#{worker.nr}.pid")
system("echo #{Process.pid} > #{child_pid}")
# run scheduler initialization
Scheduler::start_unless_running scheduler_pid_file
end
#!/bin/sh
### BEGIN INIT INFO
# Provides: unicorn
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: unicorn initscript
# Description: unicorn
### END INIT INFO
set -e
# Example init script, this can be used with nginx, too,
# since nginx and unicorn accept the same signals
# Feel free to change any of the following variables for your app:
TIMEOUT=${TIMEOUT-60}
APP_ROOT=/path/to/your/rails/app/current
PID=$APP_ROOT/tmp/pids/unicorn.pid
ENVIRONMENT=production
PATH="/home/deploy/.rbenv/bin:/home/deploy/.rbenv/shims:$PATH"
CMD="bin/unicorn -E $ENVIRONMENT -D -c $APP_ROOT/config/unicorn.rb"
cd $APP_ROOT
if [ "deploy" != `whoami` ]; then
CMD="sudo -u deploy -- env PATH=$PATH $CMD"
fi
echo $CMD
action="$1"
set -u
old_pid="$PID.oldbin"
cd $APP_ROOT || exit 1
sig () {
test -s "$PID" && kill -$1 `cat $PID`
}
oldsig () {
test -s $old_pid && kill -$1 `cat $old_pid`
}
workersig () {
workerpid="$APP_ROOT/tmp/pids/unicorn.$2.pid"
test -s "$workerpid" && kill -$1 `cat $workerpid`
}
case $action in
start)
sig 0 && echo >&2 "Already running" && exit 0
$CMD
;;
stop)
sig QUIT && exit 0
echo >&2 "Not running"
;;
force-stop)
sig TERM && exit 0
echo >&2 "Not running"
;;
restart|reload)
sig HUP && echo reloaded OK && exit 0
echo >&2 "Couldn't reload, starting '$CMD' instead"
$CMD
;;
upgrade)
if sig USR2 && sleep 20 && sig 0 && oldsig QUIT
then
n=$TIMEOUT
while test -s $old_pid && test $n -ge 0
do
printf '.' && sleep 1 && n=$(( $n - 1 ))
done
echo
if test $n -lt 0 && test -s $old_pid
then
echo >&2 "$old_pid still exists after $TIMEOUT seconds"
exit 1
fi
# add this line to bring up another worker for the scheduler right after the upgrade is done:
sig TTIN
exit 0
fi
echo >&2 "Couldn't upgrade, starting '$CMD' instead"
$CMD
;;
kill_worker)
workersig QUIT $2 && exit 0
echo >&2 "Worker not running"
;;
reopen-logs)
sig USR1
;;
*)
echo >&2 "Usage: $0 <start|stop|restart|upgrade|force-stop|reopen-logs>"
exit 1
;;
esac
@ghufftesla
Copy link

I ran into an interesting situation where I had a Rufus::Scheduler.start_new in an initializer file in an app running under unicorn with 10 workers. I had no protection like this against each worker not doing the jobs in that scheduler, but the jobs only ran once. I believe the reason why it worked is that the unicorn fork does not clone the scheduler thread, but only the main ruby thread. Did you actually experience all of your workers creating the schedule and running jobs?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment