Skip to content

Instantly share code, notes, and snippets.

@braoru
Created October 11, 2016 11:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save braoru/f48d053a5ccca0d3079c1bcedfb73184 to your computer and use it in GitHub Desktop.
Save braoru/f48d053a5ccca0d3079c1bcedfb73184 to your computer and use it in GitHub Desktop.
#!/usr/bin/python -i
from concurrent import futures
import logging
import time
import signal
import datetime
import pykka
import pytz
import uuid
import concurrent.futures
import json
import pykka.debug
from queue import Queue, Empty
from enum import Enum, unique
from collections import deque
# Logging
logging.basicConfig(
format='%(asctime)s %(name)s %(levelname)s %(process)d %(thread)d %(funcName)s() %(threadName)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
logger = logging.getLogger("test")
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
signal.signal(signal.SIGUSR1, pykka.debug.log_thread_tracebacks)
@unique
class DeployableJobs(Enum):
job_1 = 1,
job_2 = 2,
job_3 = 3
@unique
class DeployableJobsStatus(Enum):
running = 1,
failed = 2,
idle = 3
# An Ugly job
##
def job_1(name=None):
x = 0
for y in range(1,5):
logger.info("Job 1 {name} - processing {y}".format(
y=y,
name=name
))
x = y
time.sleep(1)
return x
def job_2(name=None):
x = 0
for y in range(1,4):
logger.info("Job 2 {name} - processing {y}".format(
y=y,
name=name
))
x = y
time.sleep(1)
return x
def job_3(name=None):
x = 0
for y in range(1,3):
logger.info("Job 3 {name} - processing {y}".format(
y=y,
name=name
))
x = y
time.sleep(1)
return x
# My worker actor
##
class FabricDeployActor(pykka.ThreadingActor):
_todo = None
_status = None
_executor = None
_monitoring_actor = None
_master = None
_in_future = None
_job_mapping = None
_job = None
def __init__(
self,
monitoring_actor=None,
master=None
):
super(FabricDeployActor, self).__init__()
self._in_future = self.actor_ref.proxy()
self._executor = concurrent.futures.ProcessPoolExecutor(max_workers=1)
self._monitoring_actor = monitoring_actor
self._master = master
self._job_mapping = {
DeployableJobs.job_1: self._in_future.job_1,
DeployableJobs.job_2: self._in_future.job_2,
DeployableJobs.job_3: self._in_future.job_3
}
logger.info(self._job_mapping)
def deploy(self):
logger.info("In deploy")
job = self._master.get_job().get()
if job is None:
logger.info("No work to do")
time.sleep(1)
self._in_future.deploy()
return
#Queue jobs to process
jobs_tuple = job['jobs']
self._todo = deque(jobs_tuple)
#define status
self._status = {
"name": job['name'],
"id": job['deploy_id'],
"job_todo": {
todo.name: {}
for todo in jobs_tuple
},
"start_time": datetime.datetime.now(tz=pytz.utc).isoformat()
}
logger.info("New jobs")
logger.info(
json.dumps(
self._status,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
)
self._in_future.schedule()
def on_start(self):
logger.info("Actor started")
self._in_future.deploy()
def schedule(self):
logger.info(len(self._todo))
if len(self._todo) == 0:
self._master.done(self._status)
self._todo = None
self._status = None
self._in_future.deploy()
return
next_job = self._todo.pop()
logger.info("Next job is : {n}".format(n=next_job.name))
self._monitoring_actor.notification("Processing next job {m}".format(m=next_job.name))
self._job_mapping[next_job]()
#logger.info("job status")
#logger.info(
# json.dumps(
# status,
# sort_keys=True,
# indent=4,
# separators=(',', ': ')
# )
#)
def job_1(self):
logger.info("Starting job 1")
start_time = datetime.datetime.now(tz=pytz.utc).isoformat()
future = self._executor.submit(
job_1,
name=self._status['name']
)
result = future.result()
completion_time = datetime.datetime.now(tz=pytz.utc).isoformat()
self._status['job_todo']['job_1'] = {
"start_time": start_time,
"completion_time": completion_time,
"last_result": result
}
self._monitoring_actor.notification("{name} job 1 end".format(name=self._status['name']))
self._in_future.schedule()
return self._status['job_todo']['job_1']
def job_2(self):
logger.info("Starting job 2")
start_time = datetime.datetime.now(tz=pytz.utc).isoformat()
future = self._executor.submit(
job_2,
name=self._status['name']
)
result = future.result()
completion_time = datetime.datetime.now(tz=pytz.utc).isoformat()
self._status['job_todo']['job_2'] = {
"start_time": start_time,
"completion_time": completion_time,
"last_result": result
}
self._monitoring_actor.notification("{name} job 2 end".format(name=self._status['name']))
self._in_future.schedule()
return self._status['job_todo']['job_2']
def job_3(self):
logger.info("Starting job 3")
start_time = datetime.datetime.now(tz=pytz.utc).isoformat()
future = self._executor.submit(
job_3,
name=self._status['name']
)
result = future.result()
completion_time = datetime.datetime.now(tz=pytz.utc).isoformat()
self._status['job_todo']['job_3'] = {
"start_time": start_time,
"completion_time": completion_time,
"last_result": result
}
self._monitoring_actor.notification("{name} job 3 end".format(name=self._status['name']))
self._in_future.schedule()
return self._status['job_todo']['job_3']
# My monitoring actors
##
class MonitoringActor(pykka.ThreadingActor):
def notification(
self,
status=None
):
logger.info("Monitor : {s}".format(s=status))
# Master Actor
##
class Master(pykka.ThreadingActor):
_deploy_actors = None
_monitoring_actor = None
_task_queue = None
def __init__(self):
super(Master, self).__init__()
self._monitoring_actor = MonitoringActor().start().proxy()
self._in_future = self.actor_ref.proxy()
self._deploy_actors = [
FabricDeployActor.start(master=self._in_future, monitoring_actor=self._monitoring_actor).proxy()
for x in range(1, 3)
]
self._task_queue = deque(maxlen=10)
#self._task_queue = Queue(maxsize=10)
def create_task(self, n, name):
# Create a new deployement
deploy_id = str(uuid.uuid4())
logger.info("processing task {id}".format(id=deploy_id))
if n == 5:
jobs = (
DeployableJobs.job_1,
DeployableJobs.job_3,
DeployableJobs.job_2
)
if n == 2:
jobs = (
DeployableJobs.job_2,
DeployableJobs.job_3,
DeployableJobs.job_1
)
if n == 1:
jobs = (
DeployableJobs.job_3,
DeployableJobs.job_2,
DeployableJobs.job_1
)
# Save it
##
deployment = {
"name" : name,
"start_time": datetime.datetime.now(tz=pytz.utc).isoformat(),
"deploy_id": deploy_id,
"jobs": jobs
}
# TODO : Fail or success ? raise exception in case of full stack ? (timeout better)
self._task_queue.append(
deployment
)
logger.info("Queue length : {n}".format(n=len(self._task_queue)))
return deploy_id
def get_job(self):
try:
job = self._task_queue.popleft()
except IndexError:
logger.info("no work at disposal")
return None
logger.info("job ready")
return job
def done(
self,
status
):
logger.info("DONE")
logger.info(
json.dumps(
status,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
)
def on_stop(self):
for actor in self._deploy_actors:
actor.stop()
self._monitoring_actor.stop()
if __name__ == "__main__":
try:
master = Master().start().proxy()
id_task = master.create_task(name="A", n=5).get()
logger.info("Actor A 5 task : {d}".format(d=id_task))
id_task = master.create_task(name="B", n=5).get()
logger.info("Actor B 5 task : {d}".format(d=id_task))
time.sleep(2)
id_task = master.create_task(name="C", n=2).get()
logger.info("Actor C 2 task : {d}".format(d=id_task))
id_task = master.create_task(name="D", n=2).get()
logger.info("Actor D 2 task : {d}".format(d=id_task))
id_task = master.create_task(name="E", n=1).get()
logger.info("Actor E 1 task : {d}".format(d=id_task))
id_task = master.create_task(name="F", n=1).get()
logger.info("Actor F 1 task : {d}".format(d=id_task))
logger.info("end of stacking")
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.info("END")
finally:
pykka.ActorRegistry.stop_all()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment