Skip to content

Instantly share code, notes, and snippets.

@spjwebster
Created September 11, 2013 09:23
Show Gist options
  • Save spjwebster/6521272 to your computer and use it in GitHub Desktop.
Save spjwebster/6521272 to your computer and use it in GitHub Desktop.
A basic rq worker that will retry failed jobs before dumping it in the failed queue.
#!/usr/bin/env python
import os, sys
sys.path.append(os.getcwd())
import logging
import rq
MAX_FAILURES = 3
logger = logging.getLogger(__name__)
queues = None
def retry_handler(job, exc_type, exc_value, traceback):
job.meta.setdefault('failures', 0)
job.meta['failures'] += 1
# Too many failures
if job.meta['failures'] >= MAX_FAILURES:
logger.warn('job %s: failed too many times times - moving to failed queue' % job.id)
job.save()
return True
# Requeue job and stop it from being moved into the failed queue
logger.warn('job %s: failed %d times - retrying' % (job.id, job.meta['failures']))
for queue in queues:
if queue.name == job.origin:
queue.enqueue_job(job, timeout=job.timeout)
return False
# Can't find queue, which should basically never happen as we only work jobs that match the given queue names and
# queues are transient in rq.
logger.warn('job %s: cannot find queue %s - moving to failed queue' % (job.id, job.origin))
return True
with rq.Connection():
queues = map(rq.Queue, sys.argv[1:]) or [rq.Queue()]
worker = rq.Worker(queues)
worker.push_exc_handler(retry_handler)
worker.work()
@petervtzand
Copy link

👍 Awesome!

@fossilet
Copy link

fossilet commented Nov 3, 2016

After the max retries, I find the job won't be saved to failed queue. Is there something wrong?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment