Commit febe8709 authored by Georges Racinet's avatar Georges Racinet 🌳
Browse files

PAAS Dispatcher: tracking jobs progress to limit concurrency

To limit concurrency, we have to track jobs progress once they've
been launched.

This is done via a permanent thread that calls the coordinator
API endpoint to retrieve job info by job token. As it turns out,
the endpoint returns a 401 code unless the job is ongoing.
This feels weird, but given that we are using the token that
the coordinator itself sent us as part of the job payload, this
is good enough for now, but it makes debugging of any other
problem leading to 401 responses really hard.

Later versions of the PAAS dispatcher may have a endpoint on
the provisioned ressources in order to follow progress directly
there, which would be less hacky and alleviate the load on
coordinator a bit (of course it's otherwise hammered, but full
job details by token are expected to be heavy to generate).

The current design of the `PaasDispatcher` class makes it so
that progress is fully interpreted only at next polling cycle,
because there's a reporting thread reading the queue for each
cycle. We should later make a permanent reporting thread.

--HG--
branch : heptapod
parent 202519f0fc8e
......@@ -169,7 +169,13 @@ class CleverCloudDockerRunner(PaasRunner):
@overrides
def launch(self, paas_resource, job_json):
with tempfile.TemporaryDirectory() as tmp_path:
build_helper = DockerBuildHelper(tmp_path)
# TODO make Git details configurable
build_helper = DockerBuildHelper(
tmp_path,
git_process_env={},
git_user_name="Heptapod Paas Runner",
git_user_email='paasrunner@heptapod.test',
)
build_helper.write_build_context(self, job_json)
build_helper.git_push(paas_resource.deploy_url)
......
......@@ -70,7 +70,7 @@ class DockerBuildHelper:
def git_push(self, url):
"""Make the Docker context a Git repository, commit and push."""
self.git('init')
self.git('init', '-q')
if self.git_user_name:
self.git('config', 'user.name', self.git_user_name)
if self.git_user_email:
......@@ -79,4 +79,4 @@ class DockerBuildHelper:
self.git('add', '.')
self.git('commit', '-m', 'Job definition')
self.git('remote', 'add', 'clever', url)
self.git('push', '--set-upstream', 'clever', 'master')
self.git('push', '-q', '--set-upstream', 'clever', 'master')
# Copyright 2021 Georges Racinet <georges.racinet@octobus.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
#
# SPDX-License-Identifier: GPL-3.0-or-later
class JobHandle:
"""Represent a job provided by a coordinator.
- :attr:`job_id` the numeric id, relative to the coordinator
- :attr:`runner_name` the unique name of the Runner instance that
was used to acquire the job
- :attr:`token` authentication token to the coordinator.
This class is meant to be message passing payload, hence does not
have heavy attributes.
With the Runner instance (usually can be retrieved from its unique name)
and :attr:`token`, all jobs details can be retrieved from coordinator.
Some of them (status, notably) can change over time.
We could therefore use ``(runner_name, token)`` as a full ID, but
:attr:`token` must stay secret and never be leaked to the logs, which
could otherwise happen easily on changes of this class.
"""
def __init__(self, runner_name, job_id, token):
self.runner_name = runner_name
self.job_id = job_id
self.token = token
self.full_id = (runner_name, job_id)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.full_id == other.full_id)
def __hash__(self):
return hash(self.full_id)
def __repr__(self):
return 'JobHandle(runner_name=%r, job_id=%d, token="REDACTED")' % (
self.runner_name, self.job_id)
def __str__(self):
return 'job %d for runner %r' % (self.job_id, self.runner_name)
......@@ -13,7 +13,12 @@ import threading
import time
import toml
from .runner import runner_classes
from .exceptions import (
GitLabUnavailableError,
GitLabUnexpectedError,
)
from .job import JobHandle
from .runner import PaasRunner
logger = logging.getLogger(__name__)
......@@ -21,108 +26,191 @@ logger = logging.getLogger(__name__)
class LauncherThreadStatus(Enum):
STARTING = 0
LAUNCHED = 1
FAILED = 2
LAUNCH_FAILED = 2
FINISHED = 3
POLL_CYCLE_FINISHED = object()
def launch_job(reporting_queue, runner, job_json):
job = json.loads(job_json)
job_id = job['id']
runner_name = runner.unique_name
reporting_queue.put((runner_name, job_id,
LauncherThreadStatus.STARTING))
def launch_job(reporting_queue, runner, job_handle, job_data, job_json):
"""Provision and start job for runner, reporting on queue
Job arguments are very redundant to avoid needless back-and-forth
conversions. These are, from source to most refined and partial:
:param str job_json: raw JSON serialized job payload, as retrieved from
coordinator
:param dict job_data: JSON deserialization of ``job_json``
:param JobHandle job_handle: representation of the job, can be recreated
from ``job_data``.
"""
reporting_queue.put((job_handle, LauncherThreadStatus.STARTING))
try:
app = runner.provision(job_id)
# TODO pass full job_data for flavors etc.
app = runner.provision(job_data)
runner.launch(app, job_json)
except Exception:
logger.exception("Uncatched exception for job %r on runner %r",
job_id, runner_name)
reporting_queue.put((runner_name, job_id,
LauncherThreadStatus.FAILED))
logger.exception("Uncatched exception for %s", job_handle)
reporting_queue.put((job_handle, LauncherThreadStatus.LAUNCH_FAILED))
else:
reporting_queue.put((runner_name, job_id,
LauncherThreadStatus.LAUNCHED))
reporting_queue.put((job_handle, LauncherThreadStatus.LAUNCHED))
class PaasDispatcher:
def __init__(self, config):
self.launch_errors = [] # pairs (Runner.unique_name, job_id)
self.init_runners(config)
self.max_concurrency = config.get('concurrent', 1)
self.reporting_queue = Queue()
self.reporting_threads = []
# tracking info about jobs, all storing JobHandle instances
self.launch_errors = []
self.launched_jobs = set()
self.pending_jobs = set() # jobs acquired and not yet launched
def init_runners(self, config):
"""Return an immutable iterable of Runner instances.
Immutability will be helpful to avoid bugs in the requeueing loop.
"""
self.runners = tuple(
runner_classes[runner_conf['executor']](runner_conf)
for runner_conf in config['runners'])
runners = (PaasRunner.create(conf) for conf in config['runners'])
self.runners = {runner.unique_name: runner for runner in runners}
def wait_all_reporting_threads(self):
for thread in self.reporting_threads:
thread.join(timeout=60)
def potential_concurrency(self):
"""Return the number of parallel jobs if all launches success."""
return len(self.pending_jobs) + len(self.launched_jobs)
def poll_all_launch(self):
"""Poll for all runners and launch jobs.
Each runner is polled until it doesn't get jobs to run any more.
"""
polling_runners = list(self.runners)
polling_runners = list(self.runners.values())
def report(queue):
def report():
nb_jobs = 0
init = True
while nb_jobs or init:
msg = queue.get()
msg = self.reporting_queue.get()
if msg is POLL_CYCLE_FINISHED:
init = False
continue
runner_name, job_id, status = msg
job_handle, status = msg
if status is LauncherThreadStatus.STARTING:
logger.info("Launching job %d for runner %r",
job_id, runner_name)
logger.info("Launching %s", job_handle)
nb_jobs += 1
elif status is LauncherThreadStatus.LAUNCHED:
logger.info("Successfullly launched job %d for runner %r",
job_id, runner_name)
self.pending_jobs.discard(job_handle)
logger.info("Successfullly launched %s", job_handle)
nb_jobs -= 1
elif status is LauncherThreadStatus.FAILED:
logger.error("Failed to launch job %d for runner %r",
job_id, runner_name)
self.launch_errors.append((runner_name, job_id))
self.launched_jobs.add(job_handle)
elif status is LauncherThreadStatus.LAUNCH_FAILED:
self.pending_jobs.discard(job_handle)
logger.error("Failed to launch %s", job_handle)
self.launch_errors.append(job_handle)
nb_jobs -= 1
elif status is LauncherThreadStatus.FINISHED:
logger.info("Finished %s, according to "
"coordinator.", job_handle)
self.launched_jobs.discard(job_handle)
reporting_queue = Queue()
reporting_thread = threading.Thread(
target=lambda: report(reporting_queue))
reporting_thread = threading.Thread(target=report)
reporting_thread.name = "Launch reporting" # TODO add a number?
reporting_thread.start()
while polling_runners:
next_runners = []
for runner in polling_runners:
if self.potential_concurrency() >= self.max_concurrency:
continue
job_json = runner.request_job()
if job_json is None:
continue
job_data = json.loads(job_json)
job_handle = JobHandle(job_id=job_data['id'],
runner_name=runner.unique_name,
token=job_data['token'])
# need to add immediately to pending jobs. Doing it in a
# thread would make it possible to acquire a new one before
# self.potential_concurrency() takes the present one into
# account, hence to overflow the max concurrency
self.pending_jobs.add(job_handle)
# GIL expected to become a bottleneck in the long term only
# with most of the time spent in subprocess and HTTP calls.
launcher = threading.Thread(
target=lambda: launch_job(reporting_queue,
runner, job_json))
target=lambda: launch_job(self.reporting_queue,
runner,
job_handle, job_data, job_json))
launcher.start()
# requeue for immediate repolling
next_runners.append(runner)
polling_runners = next_runners
reporting_queue.put(POLL_CYCLE_FINISHED)
self.reporting_queue.put(POLL_CYCLE_FINISHED)
self.reporting_threads.append(reporting_thread)
def poll_launched_jobs_progress_once(self):
"""Call coordinator to enquire about progress of launched jobs.
This is notably useful to track down the number of currently running
jobs.
"""
# normally, only the reporting thread would mutate the `launched_jobs`
# attributes, and removals are even done only upon signal from
# this method (currently launched in a single thread).
# Still, we can be sure of thread safety by copying to an immutable
# structure before iteration.
for job_handle in tuple(self.launched_jobs):
runner_name = job_handle.runner_name
runner = self.runners[runner_name]
try:
finished = runner.is_job_finished(job_handle)
except GitLabUnavailableError as exc:
# warning only because this is likely to be a temporary
# condition
logger.warning("Runner %r, coordinator not available, "
"could not poll job progress "
"(got %r on URL %r)",
runner_name, exc.message, exc.url)
except GitLabUnexpectedError as exc:
logger.error("Runner %r, got HTTP error %d from coordinator "
"while polling job progress. URL was %r, "
"message is %r", runner_name,
exc.status_code, exc.url, exc.message)
except Exception: # the thread must not crash
logger.exception("Unexpected exception while polling "
"coordinator for progress of %s", job_handle)
else:
if finished:
logger.warning("%r is FINISHED", job_handle)
self.reporting_queue.put((job_handle,
LauncherThreadStatus.FINISHED))
def start_launched_jobs_progress_thread(self, poll_interval):
def progress_loop():
logger.info("Thread to poll coordinator about progress of "
"launched jobs started, polling every %d seconds",
poll_interval)
while True:
self.poll_launched_jobs_progress_once()
time.sleep(poll_interval)
thread = self.launched_jobs_progress_thread = threading.Thread(
target=progress_loop, daemon=True)
thread.start()
def main(raw_args=None):
"""Main loop.
......@@ -137,6 +225,9 @@ def main(raw_args=None):
parser.add_argument("--poll-interval", type=int, default=3,
help="Time (seconds) to wait after all available jobs "
"are treated before polling coordinators again.")
parser.add_argument("--job-progress-poll-interval", type=int, default=30,
help="Time (seconds) to wait between coordinators "
"polls about progress of successfully launched jobs")
parser.add_argument("--poll-cycles", type=int,
help="Number of times to poll all runners. "
"(useful for testing purposes)")
......@@ -154,6 +245,9 @@ def main(raw_args=None):
max_poll_cycles = cl_args.poll_cycles
infinite = max_poll_cycles is None
dispatcher.start_launched_jobs_progress_thread(
cl_args.job_progress_poll_interval)
while infinite or poll_cycles < max_poll_cycles:
dispatcher.poll_all_launch()
# No runners got job, sleep before polling coordinator again
......@@ -163,9 +257,26 @@ def main(raw_args=None):
# This could be fixed by another layer of threading (per
# runner) to handle that, but that will be good enough for now.
# Also, waiting times should be per coordinator
logger.info("No job to process, "
"polling for all runners again in %d seconds",
poll_interval)
# WARNING it is important to call the poll method even if it
# won't actually poll the server because max concurrency is reached:
# it will at least start the reporting thread, which will get
# the FINISHED messages.
# TODO we'll be better off with a permanent reporting thread, now.
potential = dispatcher.potential_concurrency()
if potential >= dispatcher.max_concurrency:
# TODO separate interval setting, with default closer to
# the job progress thread interval.
logger.info("No (more) jobs to process and max concurrency %d "
"is reached with currently %d job(s) running or "
"being launched; will awake again in %d seconds "
"and poll again if concurrency has decreased.",
dispatcher.max_concurrency, potential, poll_interval)
else:
logger.info("No (more) job to process, "
"polling for all runners again in %d seconds",
poll_interval)
poll_cycles += 1
time.sleep(poll_interval)
# TODO catch KeyboardInterrupt and SIGTERM, and use
......
......@@ -4,6 +4,7 @@
# GNU General Public License version 2 or any later version.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import logging
import requests
from requests.exceptions import RequestException
import toml
......@@ -14,8 +15,11 @@ from .exceptions import (
)
logger = logging.getLogger(__name__)
class PaasRunner:
"""Abstract base class for one `[runner]` section of the main configuration.
"""Abstract base class for one of the `[[runners]]` of the main config.
Concrete subclasses will be able to provision PAAS resources and
launch the main Heptapod Runner's command to run one job on them.
......@@ -32,6 +36,11 @@ class PaasRunner:
def register(cls):
runner_classes[cls.executor] = cls
@staticmethod
def create(config):
"""Instantiate with the appropriate class."""
return runner_classes[config['executor']](config)
def __init__(self, config):
self.config = config
self.gitlab_token = self.config['token']
......@@ -107,6 +116,35 @@ class PaasRunner:
return None
return resp.text
def is_job_finished(self, job_handle):
# Using the API endpoint to get a job by its token
url = self.config['url'].rstrip('/') + '/api/v4/job'
try:
resp = requests.get(url, params=dict(job_token=job_handle.token))
except RequestException as exc:
raise GitLabUnavailableError(url=url, message=str(exc))
# TODO raise the usual exceptions, just catch and log from
# the polling method
if resp.status_code == 401:
# of course this is ugly, but assuming we are correct
# on our call (proper token, on a job that was properly
# acquired), then geting a 401 means that the job is not
# running anymore. Doing something more natural may require
# changes in the Rails app (or next main iteration of the Paas
# Runner, with a proper HTTP service)
return True
elif resp.status_code >= 400:
raise GitLabUnexpectedError(status_code=resp.status_code,
params=None,
message=resp.text,
url=url)
# TODO cancel provisioned resource if job is canceled
status = resp.json().get('status')
logger.debug("%s status is %r", job_handle, status)
return status in ('failed', 'success', 'canceled')
def provision(self, job):
"""Provision necessary resources in which to actually run the job.
......
# Copyright 2021 Georges Racinet <georges.racinet@octobus.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
#
# SPDX-License-Identifier: GPL-3.0-or-later
from ..job import JobHandle
def test_job_handle_eq_hash():
jh1 = JobHandle(runner_name='runner1',
job_id=1,
token='should-be-unique')
jh2 = JobHandle(runner_name='runner1',
job_id=2,
token='tok2')
jh1_other_runner = JobHandle(runner_name='runner2',
job_id=1,
token='tok2')
jh1_bis = JobHandle(runner_name='runner1',
job_id=1,
token='should-be-unique')
assert jh1 == jh1
assert jh1 != jh2
assert jh1 != jh1_other_runner
# set operations, thanks to JobHandle being hashable
assert jh2 in {jh1, jh2}
assert {jh1, jh1} == {jh1}
assert len({jh1, jh2}) == 2
assert len({jh1, jh1_other_runner}) == 2
# token doesn't matter for equality and hash
assert jh1 == jh1_bis
assert len({jh1, jh1_bis}) == 1
......@@ -7,6 +7,10 @@
import json
from ..testing import RunnerForTests
from ..exceptions import (
GitLabUnavailableError,
GitLabUnexpectedError,
)
from ..paas_dispatcher import (
PaasDispatcher,
main as dispatcher_main,
......@@ -32,7 +36,8 @@ class Runner(RunnerForTests):
super(Runner, self).__init__(config)
self.acquirable_jobs = []
self.acquired_jobs = {}
self.launched_jobs = []
self.launched_jobs_resources = []
self.progress_errors = {} # used to trigger errors in job progress
def request_job(self):
if not self.acquirable_jobs:
......@@ -44,7 +49,8 @@ class Runner(RunnerForTests):
self.acquired_jobs[job_id] = job
return json.dumps(job)
def provision(self, job_id):
def provision(self, job):
job_id = job['id']
success = self.acquired_jobs[job_id]['provision_ok']
if success:
return ApplicationForTests(self.unique_name, job_id)
......@@ -52,11 +58,23 @@ class Runner(RunnerForTests):
raise RuntimeError('TODO precise error')
def launch(self, app, job_json):
success = self.acquired_jobs[app.job_id]['launch_ok']
if not success:
job = self.acquired_jobs[app.job_id]
if not job['launch_ok']:
raise RuntimeError('TODO precise error')
self.launched_jobs.append((app, job_json))
self.launched_jobs_resources.append((app, job_json))
job['status'] = 'launched'
def is_job_finished(self, job_handle):
job_id = job_handle.job_id
error = self.progress_errors.get(job_id)
if error:
raise error
else:
return self.acquired_jobs[job_id]['status'] == 'finished'
def mark_job_finished(self, job_id):
self.acquired_jobs[job_id]['status'] = 'finished'
Runner.register()
......@@ -65,35 +83,124 @@ Runner.register()
def test_one_cycle():
dispatcher = PaasDispatcher(
config=dict(
concurrent=4,
runners=[dict(executor=Runner.executor,
token='secret'),
]
))
assert len(dispatcher.runners) == 1
runner = dispatcher.runners[0]
runner_name = runner.unique_name
runner_name, runner = next(iter(dispatcher.runners.items()))
runner.acquirable_jobs.extend((
dict(id=12, provision_ok=True, launch_ok=True),
dict(id=13, provision_ok=False),
dict(id=14, provision_ok=True, launch_ok=False),
dict(id=15, provision_ok=True, launch_ok=True),
dict(id=12, token='jobtok12', provision_ok=True, launch_ok=True),
dict(id=13, token='jobtok13', provision_ok=False),
dict(id=14, token='jobtok14', provision_ok=True, launch_ok=False),
dict(id=15, token='jobtok15', provision_ok=True, launch_ok=True),
))
dispatcher.poll_all_launch()
dispatcher.wait_all_reporting_threads()
# reports about launch attempts can arrive in any order
assert set(dispatcher.launch_errors) == {(runner_name, 13),
(runner_name, 14)
}
launched = runner.launched_jobs
assert set(jh.full_id for jh in dispatcher.launch_errors) == {
(runner_name, 13),
(runner_name, 14),
}
assert set(jh.full_id for jh in dispatcher.launched_jobs) == {
(runner_name, 12),
(runner_name, 15),
}
# testing runner has more details about jobs
launched = runner.launched_jobs_resources
assert len(launched) == 2
assert launched[0][0] == ApplicationForTests(runner.unique_name, 12)
assert launched[1][0] == ApplicationForTests(runner.unique_name, 15)
def test_main(tmpdir):
def test_jobs_progress_max_concurrency():
dispatcher = PaasDispatcher(
config=dict(
concurrent=2,
runners=[dict(executor=Runner.executor,
token='secret'),
]
))
assert len(dispatcher.runners) == 1
runner_name, runner = next(iter(dispatcher.runners.items()))
def launched_jobs():
return set(jh.job_id for jh in dispatcher.launched_jobs)
runner.acquirable_jobs.extend((
dict(id=12, token='jobtok12', provision_ok=True, launch_ok=True),
dict(id=13, token='jobtok13', provision_ok=True, launch_ok=True),
dict(id=14, token='jobtok14', provision_ok=True, launch_ok=True),
))
dispatcher.poll_all_launch()