Skip to content
Snippets Groups Projects
Commit b038cd92 authored by Georges Racinet's avatar Georges Racinet :squid:
Browse files

clone-bundles: auto-generation for public projects

We had to replace the call of the "internal" subprocess
by our own because fully detached `procutil.bgcommand` does
not pass `stdin` correctly (seen systematically to be empty), so
instead we go the environment variable way, which is ok for a
tiny pickle of a descriptor object.

Many choices, notably:

- the parent dir for the bundles is derived from the namspace
  full path, so that it looks familiar to users, but the project
  slug is replaced by its `GL_REPOSITORY`, typically `project-#ID`,
  so that rename attacks could not work (when we have private clone
  bundles, this will matter)
- `on-change` implemented for HTTP transactions by expecting Workhorse to
  pass the namespace path and the public boolean in headers
- `on-change` implemented for SSH transactions by expecting Heptapod Shell
  to set the proper environment variables (hence we do not need to pass them
  to the background generation job)
- the refresh command takes the path and the public indication form
  the command line instead of from environment, because that is
  easier in the first iteration: we will not have to change RHGitaly
  for this.
- ACL is expected to be set on the bucket, hence the `public-bucket`
  configuration subsubkey. We will later need a `private-bucket` one,
parent fd8bb2fe
No related branches found
No related tags found
1 merge request!117clone-bundles: auto-generation for public projects
Pipeline #100838 failed
Pipeline: hgitaly

#100841

    ......@@ -79,3 +79,7 @@
    # if there was an ongoing operation. Not very likely to happen, due to
    # Heptapod upgrades typically involving a stop of the Rails app.
    safe-mismatch.source-safe = upgrade-allow
    [clone-bundles]
    auto-generate.on-change = no
    auto-generate.formats = zstd-v2
    ......@@ -8,6 +8,8 @@
    This extension should enclose all Mercurial modifications and commands
    needed for Heptapod server operations.
    """
    from base64 import b64encode, b64decode
    import collections
    import json
    ......@@ -11,6 +13,8 @@
    import collections
    import json
    from hgext import clonebundles
    from io import BytesIO
    from mercurial.i18n import _
    from mercurial import (
    cmdutil,
    ......@@ -14,6 +18,7 @@
    from mercurial.i18n import _
    from mercurial import (
    cmdutil,
    commands,
    config,
    demandimport,
    error,
    ......@@ -27,6 +32,8 @@
    ui as uimod,
    util,
    )
    from mercurial.utils import procutil
    from minio import Minio, S3Error
    import os
    import sys
    import tarfile
    ......@@ -41,6 +48,10 @@
    )
    from .branch import DEFAULT_GITLAB_BRANCH_FILE_NAME
    clonebundles.__doc__ # force actual import
    ASYNC_HEPTAPOD_CLONE_BUNDLE_GENERATE = b'hpd::generate-specified-clone-bundle'
    # these have conditional imports and subsequent `None` testing in
    # (urllib3 and/or requests). In other words, hgdemandimport breaks `requests`
    demandimport.IGNORES.update([
    ......@@ -425,6 +436,41 @@
    handler.force_git_refs_from_gitlab_files()
    @command(
    b'hpd-clone-bundles-refresh',
    [
    (b'', b'project-namespace-full-path', b'',
    b'Full Heptapod applicative path of the namespace of the project',
    ),
    (b'', b'public', False,
    b'Whether the repository is public (can be anonymously cloned)',
    ),
    ],
    b'',
    )
    def clone_bundles_refresh(ui, repo,
    project_namespace_full_path=None,
    public=False):
    """Refresh clone bundles and put them in appropriate storage
    """
    # We would need a proper gRPC call to provide correct user feedback
    if ui.config(b'heptapod', b'clone-bundles') == b'disabled':
    repo.ui.note(
    b"Clone bundles are disabled by config for this repository")
    return
    refresh = cmdutil.findcmd(b'admin::clone-bundles-refresh',
    commands.table)[1][0]
    environ = repo.ui.environ
    ns_path = project_namespace_full_path
    if ns_path:
    environ[b'HEPTAPOD_PROJECT_NAMESPACE_FULL_PATH'] = ns_path
    if public:
    environ[b'HEPTAPOD_PUBLIC_REPOSITORY'] = b'true'
    refresh(ui, repo)
    def runsystem(orig, ui, cmd, environ, cwd, out):
    heptapod_env = {k: v for k, v in ui.environ.items()
    if k.startswith(b'HEPTAPOD_')}
    ......@@ -483,6 +529,154 @@
    wrap_function(subrepo.svnsubrepo, 'get', forbid_subrepo_get)
    def async_generate_clone_bundle(orig, repo, bundle):
    """We need to pass over HGRCPATH and the variables from WSGI env.
    :param bundle: a `RequestedBundle` object.
    Aside from the environment variables (main reason), this override
    is also trimmed down for simplicity to avoid useless user feedback:
    given that Heptapod manages everything, it is pointless writing
    back to the client. Server logs should be preferred.
    """
    src_env = repo.ui.environ
    data = util.pickle.dumps(bundle)
    env = procutil.shellenviron()
    hgrc_path = src_env.get(b'HEPTAPOD_HGRC')
    if hgrc_path is not None:
    # typical of WSGI environment, hence we need to forward to process
    # environment. TODO use one of the wsgi_ vars for detection
    env[b'HGRCPATH'] = hgrc_path
    env.update((k, v) for k, v in src_env.items()
    if k.startswith(b'HEPTAPOD_'))
    env[b'GL_REPOSITORY'] = src_env[b'GL_REPOSITORY']
    env[b'HEPTAPOD_HG_BUNDLE_SPEC'] = b64encode(data)
    # TODO use the HG environment variable if available, like
    # procutil.hgexecutable() does
    hg = os.path.join(os.path.dirname(sys.executable), 'hg')
    cmd = [os.fsencode(hg),
    b'--cwd', repo.path,
    ASYNC_HEPTAPOD_CLONE_BUNDLE_GENERATE]
    repo.ui.note(b"clone-bundles: starting async bundle generation, "
    b"type: %r, cmd=%r\n" % (bundle.bundle_type, cmd))
    procutil.runbgcommand(cmd, env)
    @command(ASYNC_HEPTAPOD_CLONE_BUNDLE_GENERATE, [], b'')
    def async_clone_bundle_generate_subprocess(ui, repo):
    # this is meant to be run in a detached child process and be its
    # only duty so we do not hesitate to savagely patch procutil
    # we need to restore it in tests though, because they do not run
    # this in a subprocess
    orig_stdin = procutil.stdin
    bundle_data = ui.environ[b'HEPTAPOD_HG_BUNDLE_SPEC']
    procutil.stdin = BytesIO(b64decode(bundle_data))
    refresh = cmdutil.findcmd(clonebundles.INTERNAL_CMD,
    commands.table)[1][0]
    try:
    return refresh(ui, repo)
    finally:
    procutil.stdin = orig_stdin
    def clone_bundle_s3_details(repo, basename):
    """return everything needed"""
    env = repo.ui.environ
    # separating public and private base URLs should help keeping access
    # rights simple and secure, especially to avoid discoverabiliy issues.
    # We will also set permissions at the bucket level, hence have
    # a separate bucket for private clone bundles.
    # It also allows more offloading for public URLs, since we wil
    # typically have private clone bundles go through Workhorse.
    # TODO something to accept true, ok, yes etc?
    public = env.get(b'HEPTAPOD_PUBLIC_REPOSITORY') == b'true'
    ns_full_path = env.get(b'HEPTAPOD_PROJECT_NAMESPACE_FULL_PATH')
    gl_repo = env.get(b'GL_REPOSITORY')
    bucket_rpath = b'/'.join((ns_full_path, gl_repo, basename))
    client, bucket = s3_client_bucket(repo, public=public)
    return client, public, bucket, bucket_rpath
    def upload_clone_bundle(orig, repo, bundle):
    """Perform the upload directly with the relevant method for Heptapod.
    The clonebundles extension provides the option to use an external
    command, but that is very cumbersome in our context:
    We would need to either make it – very atrtificially a Mercurial
    command or generate it each time in a temptoary file (persisting it
    is a very bad idea, as it will depend on things that can change, such
    as project visibility, etc.
    """
    filepath = bundle.filepath
    basename = repo.vfs.basename(filepath)
    client, public, bucket, bucket_rpath = clone_bundle_s3_details(
    repo, basename)
    if public:
    url_conf_key = b'clone-bundles.public-base-url'
    else: # pragma no cover
    raise NotImplementedError("No clonebundles for private repos yet")
    base_url = repo.ui.config(b'heptapod', url_conf_key)
    s3_upload_clone_bundle(client, os.fsdecode(filepath),
    bucket,
    bucket_rpath.decode('utf-8'))
    url = b'/'.join((base_url, bucket_rpath))
    return bundle.uploaded(url, basename)
    def delete_clone_bundle(orig, repo, bundle):
    """Delete a bundle from the storage used by Heptapod."""
    assert bundle.ready
    client, _pub, bucket, bucket_rpath = clone_bundle_s3_details(
    repo, bundle.basename)
    try:
    client.remove_object(bucket, bucket_rpath.decode('utf-8'))
    except S3Error as exc:
    # as stated by doc of the clonebundles extension, removals
    # must not crash if file does not exist. This is stronger,
    # it should be ok.
    repo.ui.warn(b"Error removing clone bundle %r: %r" % (
    bundle.basename,
    exc))
    def s3_client_bucket(repo, public=False):
    def s3_conf(subkey, boolean=False):
    get = repo.ui.configbool if boolean else repo.ui.config
    value = get(b'heptapod',
    b'clone-bundles.s3.' + subkey.encode('ascii'))
    if boolean:
    return value
    elif value is not None:
    return value.decode('ascii')
    bucket_key = 'public-bucket' if public else 'private-bucket'
    return Minio(s3_conf('endpoint'),
    access_key=s3_conf('access_key'),
    secret_key=s3_conf('secret_key'),
    region=s3_conf('region'),
    secure=s3_conf('tls', boolean=True),
    ), s3_conf(bucket_key)
    def s3_upload_clone_bundle(client, src_path, bucket, bucket_rpath):
    # The bucket policy is assumed to be appropriate: anonymous
    # readonly for public repos, completely closed for private repos
    # (the latter not implemented yet as of this writing)
    client.fput_object(bucket, bucket_rpath, src_path)
    def extsetup(ui):
    """Tweaks after all extensions went though their `uisetup`
    ......@@ -493,3 +687,7 @@
    wrap_function(exchange, '_pullbookmarks', bookmarks_op_override)
    wrap_function(exchange, '_pushbookmark', bookmarks_op_override)
    wrap_function(exchange, '_pushb2bookmarkspart', bookmarks_op_override)
    wrap_function(clonebundles, 'upload_bundle', upload_clone_bundle)
    wrap_function(clonebundles, 'delete_bundle', delete_clone_bundle)
    wrap_function(clonebundles, 'start_one_bundle',
    async_generate_clone_bundle)
    ......@@ -7,7 +7,11 @@
    from __future__ import absolute_import
    import json
    from hgext.clonebundles import (
    GeneratedBundle,
    read_auto_gen,
    )
    from mercurial import (
    error,
    scmutil,
    )
    ......@@ -10,7 +14,9 @@
    from mercurial import (
    error,
    scmutil,
    )
    from mercurial.utils import procutil
    from minio import Minio, S3Error
    import os
    import pytest
    import re
    ......@@ -23,6 +29,8 @@
    GitLabStateMaintainerFixture,
    )
    from heptapod.testhelpers.git import GitRepo
    from hgext import clonebundles
    from .utils import common_config
    from .. import (
    ......@@ -30,5 +38,6 @@
    branch as hpd_branch,
    special_ref,
    keep_around,
    ASYNC_HEPTAPOD_CLONE_BUNDLE_GENERATE,
    )
    ......@@ -33,5 +42,7 @@
    )
    parametrize = pytest.mark.parametrize
    @pytest.fixture
    def wrapper(tmpdir):
    ......@@ -264,3 +275,160 @@
    assert os.path.exists(git_repo.path)
    # would not work if it were not a Git repo
    assert git_repo.branches() == {}
    class CloneBundlesFixture():
    def __init__(self, parent, monkeypatch):
    self.native_fixture = parent
    wrapper = self.hg_repo_wrapper = parent.hg_repo_wrapper
    self.bucket = 'seau'
    self.bucket_subdir = 'group/subgrp/project-123'
    self.pub_base_url = 'https://pub.hgcb.test'
    self.set_conf('trigger.below-bundled-ratio', 1.0)
    self.set_conf('auto-generate.formats', 'zstd-v2')
    self.set_conf('clone-bundles.public-base-url', self.pub_base_url,
    section='heptapod')
    self.set_conf('clone-bundles.s3.endpoint', 's3.test',
    section='heptapod')
    self.set_conf('clone-bundles.s3.public-bucket', self.bucket,
    section='heptapod')
    wrapper.repo.ui.environ.update((
    (b'GL_REPOSITORY', b'project-123'),
    (b'HEPTAPOD_HGRC', b'/some/heptapod.hgrc'),
    ))
    self.fput_records = []
    self.rm_records = []
    monkeypatch.setattr(
    Minio, 'fput_object',
    lambda client, *a, **kw: self.fput_records.append((a, kw))
    )
    monkeypatch.setattr(
    Minio, 'remove_object',
    lambda client, *a, **kw: self.rm_records.append((a, kw))
    )
    def set_conf(self, k, v, section='clone-bundles'):
    self.hg_repo_wrapper.repo.ui.setconfig(
    section.encode(), k.encode(), str(v).encode())
    @pytest.fixture
    def clone_bundles_fixture(native_fixture, monkeypatch):
    yield CloneBundlesFixture(native_fixture, monkeypatch)
    # with 'disabled', the point is to run everything configured
    # as for 'command' and check that is really does nothing
    @parametrize('trigger', ('command', 'on-change', 'disabled'))
    def test_clone_bundles_refresh(clone_bundles_fixture, monkeypatch, trigger):
    fixture = clone_bundles_fixture
    wrapper = fixture.hg_repo_wrapper
    on_change = trigger == 'on-change'
    if on_change:
    fixture.set_conf('auto-generate.on-change', True)
    wrapper.repo.ui.environ.update((
    (b'HEPTAPOD_PUBLIC_REPOSITORY', b'true'),
    (b'HEPTAPOD_PROJECT_NAMESPACE_FULL_PATH', b'group/subgrp'),
    ))
    bg_commands = []
    monkeypatch.setattr(procutil, 'runbgcommand',
    lambda *a, **kw: bg_commands.append((a, kw)))
    fput_records = fixture.fput_records
    if trigger == 'disabled':
    wrapper.repo.ui.setconfig(b'heptapod', b'clone-bundles', b'disabled')
    base_ctx = wrapper.commit_file('foo', message='Commit 0')
    # Adding various repo content
    wrapper.command('tag', b'v1.2.3', rev=base_ctx.hex())
    wrapper.commit_file('foo', message='Commit 1')
    if on_change:
    assert len(bg_commands) == 3 # indeed, we had 3 transactions
    (cmd, env), _ = bg_commands[-1]
    assert env[b'GL_REPOSITORY'] == b'project-123'
    assert env[b'HGRCPATH'] == b'/some/heptapod.hgrc'
    assert env.get(b'HEPTAPOD_HG_BUNDLE_SPEC')
    # let us launch it synchronously instead and benefit
    # from our Minio monkey patch
    wrapper.repo.ui.environ.update(env)
    wrapper.command(ASYNC_HEPTAPOD_CLONE_BUNDLE_GENERATE)
    else:
    wrapper.command('hpd-clone-bundles-refresh',
    project_namespace_full_path=b'group/subgrp',
    public=True,
    )
    if trigger == 'disabled':
    assert not fput_records
    return
    fput_args = fput_records[0][0]
    assert fput_args[0] == fixture.bucket
    basename = fput_args[2].rsplit('/', 1)[1]
    assert fput_args[1] == '/'.join((fixture.bucket_subdir, basename))
    bundles = read_auto_gen(wrapper.repo)
    assert len(bundles) == 1
    bundle = bundles[0]
    assert isinstance(bundle, GeneratedBundle)
    assert bundle.ready
    assert bundle.basename == basename.encode()
    assert bundle.revs == 3
    assert bundle.file_url.decode() == '/'.join(
    (fixture.pub_base_url, fixture.bucket_subdir, basename)
    )
    @parametrize('result', ('success', 's3_failure'))
    def test_delete_clone_bundle(clone_bundles_fixture, monkeypatch, result):
    fixture = clone_bundles_fixture
    wrapper = fixture.hg_repo_wrapper
    rm_records = fixture.rm_records
    wrapper.commit_file('foo', message='Commit 0')
    wrapper.command('hpd-clone-bundles-refresh',
    project_namespace_full_path=b'group/subgrp',
    public=True,
    )
    bundles = read_auto_gen(wrapper.repo)
    assert len(bundles) == 1
    bundle = bundles[0]
    assert isinstance(bundle, GeneratedBundle)
    def find_outdated_bundles(repo, bundles):
    return [bundle]
    if result == 's3_failure':
    def rm_obj(*a, **kw):
    raise S3Error(403,
    message='patched',
    resource=bundle.basename,
    request_id='test-req',
    host_id='some-host',
    response=403)
    monkeypatch.setattr(Minio, 'remove_object', rm_obj)
    monkeypatch.setattr(clonebundles, 'find_outdated_bundles',
    find_outdated_bundles)
    clonebundles.collect_garbage(wrapper.repo)
    if result == 'success':
    assert len(rm_records) == 1
    rm_args = rm_records[0][0]
    assert rm_args[0] == fixture.bucket
    basename = bundle.basename.decode('ascii')
    assert rm_args[1] == '/'.join((fixture.bucket_subdir, basename))
    else:
    # no crash and nothing else done
    assert len(rm_records) == 0
    ......@@ -27,5 +27,6 @@
    topic='',
    evolve='',
    rebase='',
    clonebundles='',
    ),
    heptapod={b'repositories-root': as_bytes(repos_root)})
    ......@@ -9,3 +9,4 @@
    urllib3<2
    attrs~=23.2.0
    python_jwt~=4.0.0
    minio~=7.2.15
    0% Loading or .
    You are about to add 0 people to the discussion. Proceed with caution.
    Please register or to comment