Skip to content

Instantly share code, notes, and snippets.

@cosimoc
Created October 7, 2017 15:42
Show Gist options
  • Save cosimoc/588b07dd42aa9938260cfde9a04f6b7c to your computer and use it in GitHub Desktop.
Save cosimoc/588b07dd42aa9938260cfde9a04f6b7c to your computer and use it in GitHub Desktop.
# eosostree.py - Endless OSTree management library
#
# Copyright (C) 2016 Dan Nicholson <nicholson@endlessm.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Endless OSTree infrastructure module
eosostree is a Python 3 module providing configuration, classes and
functions for managing the Endless OSTree repositories.
The configuration can be accessed through the CONFIG dictionary module
attribute. This object is loaded from a JSON configuration file named
eosconfig.json. See the load_config() documentation for details.
"""
from collections import OrderedDict
import contextlib
import datetime
from debian import debian_support
from enum import IntEnum
import errno
import fcntl
import gi
gi.require_version('OSTree', '1.0')
from gi.repository import Gio, GLib, OSTree
import json
import logging
import os
import shutil
import subprocess
import sys
import time
logger = logging.getLogger(__name__)
PROGDIR = os.path.dirname(os.path.realpath(__file__))
# Release stages and subdirectories.
RELEASE_STAGES = OrderedDict([
('dev', 'staging/dev'),
('demo', 'staging/demo'),
('prod', 'ostree'),
])
# Make sure this is only used on python3
if sys.version_info.major != 3:
raise RuntimeError('{} can only be used with Python 3'
.format(__name__))
class EosOSTreeError(Exception):
"""Errors from the eosostree module"""
def __init__(self, *args):
self.msg = ' '.join(map(str, args))
def __str__(self):
return self.msg
# In OSTree 2017.11, the OSTree.RepoListRefsExtFlags GIR changed from an
# enumeration to a bitfield. Provide our own enumeration until that's
# straightened out upstream.
#
# https://github.com/ostreedev/ostree/issues/1243
class RepoListRefsExtFlags(IntEnum):
"""Compatibility enumeration for OSTree.RepoListRefsExtFlags
The type changed incompatibly in 2017.11, so we provide our own to
be safe across versions.
"""
# No flags
NONE = 0
# Only list aliases. Since: 2017.10
ALIASES = (1 << 0)
# Exclude remote refs. Since: 2017.11
EXCLUDE_REMOTES = (1 << 1)
# eosostree configuration. The 'repos' key is a dictionary defining the
# repository configuration. The top level keys are the repository
# basenames and each repository contains default default GPG keys, title
# and branch.
#
# Note: No default branch is specified for the runtimes since they don't
# actually use the eosX major branch, as the apps specify which minor
# series branch (eosX.Y) they need.
#
# GPG key info:
# 9E08D8DABA02FC46: EOS OSTree Signing Key 1 (EOSK1) [expires: 2019-05-13]
# FCF17B17F1F8E157: EOS Flatpak Signing Key 1 (EFSK1) [expires: 2021-06-10]
def read_config(path=None):
"""Read the eosostree configuration
Read the eosostree configuration file and return the configuration
object. The configuration file is JSON formatted. If path is not
set, the following locations are checked in order of precedence:
* EOSOSTREE_CONFIG environment variable
* ~/.config/ostree/eosconfig.json
* eosconfig.json in eosostree module directory
As a convenience, this is run during module import and sets the
module level CONFIG attribute. If a custom configuration file is
needed, call read_config() with its path and use the configuration
from the returned object rather than the module CONFIG attribute.
"""
if path is None:
user_conf = os.path.expanduser('~/.config/ostree/eosconfig.json')
if 'EOSOSTREE_CONFIG' in os.environ:
path = os.environ['EOSOSTREE_CONFIG']
elif os.path.exists(user_conf):
path = user_conf
else:
path = os.path.join(PROGDIR, 'eosconfig.json')
# Set defaults
data = {
'repos': {},
'internal-repo-base': '/srv/ostree/www',
'public-repo-base': '/srv/ostree/www',
'internal-url': 'http://ostree.endlessm-sf.com',
'public-url': 'https://ostree.endlessm.com',
'public-dev-url': 'https://endless:kassyicNigNarHon@origin.ostree.endlessm.com',
'backup-daily-s3-bucket': 'backups.daily.ostree.endlessm.com',
'backup-daily-s3-region': 'us-west-2',
'backup-weekly-s3-bucket': 'backups.weekly.ostree.endlessm.com',
'backup-weekly-s3-region': 'us-west-2',
'backup-monthly-s3-bucket': 'backups.monthly.ostree.endlessm.com',
'backup-monthly-s3-region': 'us-west-2',
}
logger.info('Loading configuration file %s', path)
with open(path) as f:
data.update(json.load(f))
return data
class EosOSTreeRepo(OSTree.Repo):
"""OSTree.Repo with locking
This is a small wrapper around OSTree.Repo to manage a repository
lock file. See the lock, unlock and lockcontext methods.
Since the OSTree.Repo.new constructor can't be used from this
subclass, the normal GObject constructor is used. The path keyword
argument can be used to supply a Gio.File with the repo path. If no
path is supplied, then one is guessed like OSTree.Repo.new_default.
When https://bugzilla.gnome.org/show_bug.cgi?id=759442 lands, this
is likely to go away.
"""
# Repo lock file name. This intentionally chosen to be different
# than the name used in the upstream locking work ($repo/lock) so
# that deadlocks aren't introduced when that's landed and deployed.
LOCK_FILE = '.eoslock'
# Wait 30 minutes until locking timeout by default.
LOCK_TIMEOUT = 30 * 60
def __init__(self, **kwargs):
# Emulate new_default by setting path if none given
if kwargs.get('path') is None:
if os.path.isdir('objects') and os.path.isfile('config'):
path = '.'
elif len(os.getenv('OSTREE_REPO', '')) > 0:
path = os.environ['OSTREE_REPO']
else:
path = '/ostree/repo'
logger.info('No repo path supplied, using %s', path)
kwargs['path'] = Gio.File.new_for_path(path)
super().__init__(**kwargs)
self.lock_file = None
def open(self, cancellable=None):
"""Open the repository and the lock file"""
ret = super().open(cancellable=cancellable)
lock_path = self.get_path().get_child(self.LOCK_FILE).get_path()
logger.info('Opening lock file %s', lock_path)
self.lock_file = open(lock_path, 'w')
return ret
def create(self, mode, cancellable=None):
"""Create and open the repository
This is only needed to ensure that our open override is called
after a repo is created.
"""
ret = super().create(mode, cancellable=cancellable)
if ret:
ret = self.open(cancellable=cancellable)
return ret
def __del__(self):
if self.lock_file is not None:
self.lock_file.close()
self.lock_file = None
def lock(self, exclusive=False, timeout=LOCK_TIMEOUT):
"""Acquire a flock on the repository
Takes a flock on the repository lock file. By default the lock
is shared. An exclusive lock can be acquired by setting exclusive
to True. The lock acquisition will block for timeout seconds.
If timeout is None, the lock acquisition will block
indefinitely.
"""
lock_path = self.lock_file.name
logger.info('Locking file %s %s', lock_path,
'exclusive' if exclusive else 'shared')
mode = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
lock_fd = self.lock_file.fileno()
if timeout is None:
# Full blocking lock
fcntl.flock(lock_fd, mode)
else:
# Try non-blocking lock and sleep until timeout exhausted
mode |= fcntl.LOCK_NB
wait = timeout
while True:
try:
fcntl.flock(lock_fd, mode)
break
except IOError as err:
if err.errno != errno.EWOULDBLOCK:
raise
# Fail if the timeout has been reached
if wait <= 0:
raise EosOSTreeError('Could not lock', lock_path,
'in', timeout, 'seconds')
# Try again in 1 second
if wait % 30 == 0:
logger.debug('Could not acquire lock %s, %d second%s '
'until timeout', lock_path, wait,
's' if wait > 1 else '')
wait -= 1
time.sleep(1)
def unlock(self):
"""Remove the repository flock"""
logger.info('Unlocking file %s', self.lock_file.name)
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def lockcontext(self, exclusive=False, timeout=LOCK_TIMEOUT):
"""Context manager for lock()"""
self.lock(exclusive=exclusive, timeout=timeout)
try:
yield
finally:
self.unlock()
def enumerate_deltas(repo, refs=[]):
"""Collect static deltas from an OSTree repository
Returns a dictionary of static deltas optionally filtered so that
only deltas to refs are included.
"""
target_revs = set()
for ref in refs:
_, rev = repo.resolve_rev(ref, True)
if rev:
target_revs.add(rev)
if len(refs) > 0 and len(target_revs) == 0:
# None of the specified refs exist
return {}
_, all_deltas = repo.list_static_delta_names(None)
target_deltas = {}
for name in all_deltas:
# If there's a - in the name, then the target revision is the
# trailing part. Otherwise, it's the entire delta name.
src, sep, dest = name.partition('-')
if len(sep) == 0:
dest = src
src = ''
if len(target_revs) > 0 and dest not in target_revs:
continue
# Get the modified base64 name
dest_bytes = OSTree.checksum_to_bytes(dest)
delta_b64 = OSTree.checksum_b64_from_bytes(dest_bytes)
if len(src) > 0:
src_bytes = OSTree.checksum_to_bytes(src)
src_b64 = OSTree.checksum_b64_from_bytes(src_bytes)
delta_b64 = src_b64 + '-' + delta_b64
# Calculate the relative path
relpath = 'deltas/%s/%s' %(delta_b64[0:2], delta_b64[2:])
target_deltas[name] = {
'from': src,
'to': dest,
'base64': delta_b64,
'relpath': relpath,
}
return target_deltas
def ref_info(ref):
"""Get info about a reference
Returns a dictionary of attributes about the reference.
Assume the ref is one of the following forms:
1. $branch/$platform (deprecated)
2. os/$product/$branch/$platform (deprecated)
3. Anything else where the branch is the last part. E.g.,
runtime/$name/$arch/$branch
"""
info = dict(ref=ref)
parts = ref.split('/')
# If the ref is already in the release namespace remove that prefix
if parts[0] == 'release':
info['release'] = True
parts.pop(0)
else:
info['release'] = False
if len(parts) == 2:
if parts[0] == 'appstream':
info['type'], info['arch'] = parts
# No name or branch for appstream
info['name'], info['branch'] = (None, None)
else:
# Assume this is old style eos branch/platform
info['type'] = 'os'
info['name'] = 'eos'
info['branch'], info['arch'] = parts
elif len(parts) == 4:
info['type'], info['name'] = parts[0:2]
# Try to detect deprecated branch/platform format
if parts[0] == 'os' and \
(parts[2] == 'master' or parts[2].startswith('eos')):
info['branch'], info['arch'] = parts[2:]
else:
# Newer format with branch as last component
info['arch'], info['branch'] = parts[2:]
else:
# Reject everything else for now
raise EosOSTreeError('Unrecognized ref format', ref)
# Determine versioning info. This is only done for Endless stable
# refs, which we assume for release and branches starting with eos.
info['version'] = info['major'] = info['minor'] = info['micro'] = None
if info['release']:
# Branch is a full version
info['version'] = debian_support.Version(info['branch'])
# Split out the version components
vparts = str(info['version']).split('.')
info['major'] = debian_support.Version(vparts[0])
if len(vparts) > 1:
info['minor'] = debian_support.Version(vparts[1])
if len(vparts) > 2:
info['micro'] = debian_support.Version(vparts[2])
elif info['branch'] is not None and info['branch'] != 'master':
# Strip the eos prefix from our branches
branch = info['branch'].replace('eos', '', 1)
vparts = branch.split('.')
if len(vparts) > 1:
info['minor'] = debian_support.Version(vparts[1])
info['major'] = debian_support.Version(vparts[0])
# Numerical minor series. For release refs, create this only if
# there's a micro version. For non-release refs, create this if the
# minor version is known.
info['minor_series'] = None
if ((info['release'] and info['micro'] is not None) or
(not info['release'] and info['minor'] is not None)):
minor_series_str = '{}.{}'.format(info['major'], info['minor'])
info['minor_series'] = debian_support.Version(minor_series_str)
# Major/minor branch names. Most of the time an "eos" prefix is used
# on the branches, but the new SDK uses simple numerical branches
# like GNOME.
branch_prefix = 'eos'
if info['release']:
# Release ref. The branch is just the version, so assume eos
# unless this is a new runtime.
if (info['type'] == 'runtime' and
info['name'].startswith('com.endlessm.apps.')):
branch_prefix = ''
elif info['branch'] is not None and not info['branch'].startswith('eos'):
# Non-release ref. Use the eos prefix only if the branch has it.
branch_prefix = ''
info['major_branch'] = info['minor_branch'] = None
if info['major'] is not None:
info['major_branch'] = '{}{}'.format(branch_prefix,
info['major'])
if info['minor_series'] is not None:
info['minor_branch'] = '{}{}'.format(branch_prefix,
info['minor_series'])
# Release tag prefix
info['tag_prefix'] = None
if all([info['type'], info['name'], info['arch']]):
info['tag_prefix'] = '/'.join(['release', info['type'],
info['name'], info['arch']])
return info
def update_branch_config_file(src_repo, dest_repo, dry_run=False):
"""Copy the eos-branch config file between OSTree repositories"""
src_repo_path = src_repo.get_path().get_path()
src = os.path.join(src_repo_path, 'eos-branch')
# Only the OS repos contain the branch file
if not os.path.exists(src):
return
dest_repo_path = dest_repo.get_path().get_path()
dest = os.path.join(dest_repo_path, 'eos-branch')
print('== Copying', src, 'to', dest, '==', flush=True)
if not dry_run:
shutil.copy2(src, dest)
def link_or_copy(src, dest):
"""Hardlink a file or fallback to copying. This is used as the
copy_function for shutil.copytree.
"""
try:
# Remove first and replace with a link
if os.path.exists(dest):
os.unlink(dest)
os.link(src, dest)
shutil.copystat(src, dest)
except OSError as err:
# If the error was EXDEV (invalid cross device link), fall back
# to a regular copy. Otherwise, raise again.
if err.errno == errno.EXDEV:
shutil.copy2(src, dest)
else:
raise
def pull_local_full(src_repo, dest_repo, refs=None, depth=0,
dry_run=False):
"""Pull data between local OSTree repositories
This handles pulling the refs as well as static deltas until
https://bugzilla.gnome.org/show_bug.cgi?id=765701 is fixed.
"""
src_file = src_repo.get_path()
src_path = src_file.get_path()
dest_file = dest_repo.get_path()
dest_path = dest_file.get_path()
# Copy the eos-branch config file
update_branch_config_file(src_repo, dest_repo, dry_run=dry_run)
# Pull all refs if none specified
if refs is None or len(refs) == 0:
_, refs_info = src_repo.list_refs(None, None)
refs = sorted(refs_info.keys())
# Print some info on updates
print('== Pulling the following refs from', src_path, 'to',
dest_path, '==\n ', '\n '.join(refs), flush=True)
# Pull refs and objects
pull_args = {
'flags': GLib.Variant('i', OSTree.RepoPullFlags.NONE),
'refs': GLib.Variant('as', refs),
'depth': GLib.Variant('i', depth),
# Pull objects directly instead of processing deltas
'disable-static-deltas': GLib.Variant('b', True),
}
pull_var = GLib.Variant('a{sv}', pull_args)
src_uri = src_file.get_uri()
if not dry_run:
# In upstream locking, only the transaction is locked, but we
# don't have access to the transaction from outside the pull.
# Just take a shared lock for the whole thing.
#
# Furthermore, since we have access to the source repo, lock
# that too so things are deleted during the pull.
with src_repo.lockcontext(), dest_repo.lockcontext():
dest_repo.pull_with_options(src_uri, pull_var, None, None)
# Pull doesn't include static deltas, so enumerate the existing
# deltas and copy any that match the target revision and don't exist
# in the destination repo. See
# https://bugzilla.gnome.org/show_bug.cgi?id=765701.
src_deltas = enumerate_deltas(src_repo, refs)
dest_deltas = enumerate_deltas(dest_repo, refs)
pull_deltas = {k: v for k, v in src_deltas.items()
if k not in dest_deltas}
# Copy the deltas with both repos locked so the deltas aren't
# deleted while copying
with src_repo.lockcontext(), dest_repo.lockcontext():
for name, info in pull_deltas.items():
delta_src_path = os.path.join(src_path, info['relpath'])
delta_dest_path = os.path.join(dest_path, info['relpath'])
print('== Copying delta', name, 'from', src_path, 'to',
dest_path, '==', flush=True)
# Copy the delta directory recursively with hardlinks if
# possible
if not dry_run:
shutil.rmtree(delta_dest_path, ignore_errors=True)
shutil.copytree(delta_src_path, delta_dest_path,
symlinks=True, copy_function=link_or_copy)
def is_flatpak_repo(repo):
"""See if the OSTree repository contains flatpak apps or runtimes
This is done by checking if there are any refs in the app or runtime
namespace.
"""
_, refs = repo.list_refs('app', None)
if len(refs) > 0:
return True
_, refs = repo.list_refs('runtime', None)
if len(refs) > 0:
return True
return False
def update_repo(repo, gpg_keys=None, title=None, default_branch=None, dry_run=False):
"""Update OSTree repository metadata
For standard OSTree repos, this will update the summary file. For
flatpak repos, this will also update the appstream branch and add a
title and/or set a default branch to the summary if supplied.
If gpg_keys or title are None, defaults will be used. If gpg_keys is
a list, those keys will be used for signing.
"""
repo_path = repo.get_path().get_path()
repo_name = os.path.basename(repo_path)
repo_defaults = CONFIG['repos'][repo_name]
if gpg_keys is None:
gpg_keys = repo_defaults['gpg-keys']
if title is None:
title = repo_defaults['title']
if default_branch is None:
default_branch = repo_defaults['default-branch']
start = datetime.datetime.now()
if is_flatpak_repo(repo):
# Build flatpak build-update-repo command since the appstream
# regeneration is not provided to libflatpak.
cmd = ['flatpak', 'build-update-repo']
if title is not None:
cmd.append('--title=' + title)
if default_branch is not None:
cmd.append('--default-branch=' + default_branch)
cmd.extend(['--gpg-sign=' + key for key in gpg_keys])
cmd.append(repo_path)
print('== Updating', repo_path, 'flatpak metadata using',
' '.join(cmd), '==', flush=True)
if not dry_run:
# Lock the repo exclusively while updating the summary so
# refs aren't deleted and 2 processes can't race signing the
# summary file
with repo.lockcontext(exclusive=True):
subprocess.check_call(cmd)
else:
print('== Updating', repo_path, 'summary file with keys',
' '.join(gpg_keys), '==', flush=True)
if not dry_run:
# Lock the repo exclusively while updating the summary so
# refs aren't deleted and 2 processes can't race signing the
# summary file
with repo.lockcontext(exclusive=True):
repo.regenerate_summary(None, None)
if len(gpg_keys) > 0:
repo.add_gpg_signature_summary(gpg_keys, None, None)
if not dry_run:
elapsed = datetime.datetime.now() - start
print('Repo update completed in', str(elapsed).rsplit('.')[0],
flush=True)
def previous_stage(stage):
"""Get the name of the previous release stage"""
stage_names = list(RELEASE_STAGES.keys())
cur_index = stage_names.index(stage)
if cur_index == 0:
raise EosOSTreeError('No stages prior to', stage)
return stage_names[cur_index - 1]
def next_stage(stage):
"""Get the name of the next release stage"""
stage_names = list(RELEASE_STAGES.keys())
cur_index = stage_names.index(stage)
if cur_index == len(stage_names) - 1:
raise EosOSTreeError('No stages after', stage)
return stage_names[cur_index + 1]
def staged_repo_path(name, base=None, stage='dev'):
"""Get the full path to a repository based on its stage"""
if base is None:
base = CONFIG['public-repo-base']
stage_subdir = RELEASE_STAGES[stage]
return os.path.join(base, stage_subdir, name)
def staged_repo_url(name, public=True, stage='dev'):
"""Get the full URL to the repository based on its stage"""
if public:
if stage == 'dev':
server = CONFIG['public-dev-url']
else:
server = CONFIG['public-url']
else:
if stage != 'dev':
raise EosOSTreeError('Only dev repositories on internal server')
server = CONFIG['internal-url']
stage_subdir = RELEASE_STAGES[stage]
return '%s/%s/%s' %(server, stage_subdir, name)
class OSTreeBackup(object):
"""S3 backup settings"""
def __init__(self, stage, repo_base=None, bucket=None, region=None,
dry_run=False):
self.stage = stage
self.repo_base = repo_base or CONFIG['public-repo-base']
self.bucket = bucket
self.region = region
self.dry_run = dry_run
# Make sure the ostree-backup script is here
self.script = os.path.join(PROGDIR, 'ostree-backup')
if not os.path.exists(self.script):
raise EosOSTreeError('Cannot find backup script at',
self.script)
def backup(self, repo):
# Build the ostree-backup command line
backup_cmd = [self.script, '--root', self.repo_base,
'--stage', self.stage]
if self.dry_run:
backup_cmd += ['--dry-run']
if self.bucket is not None:
backup_cmd += ['--bucket', self.bucket]
if self.region is not None:
backup_cmd += ['--region', self.region]
backup_cmd += [repo]
print('Running:', ' '.join(backup_cmd), flush=True)
subprocess.check_call(backup_cmd)
class OSTreeReleaseRepo(object):
def __init__(self, src_path, dest_path, branches, version, depth=0,
gpg_keys=None, compat_repos=[], compat_branches=[],
dry_run=False):
self.src_path = src_path
self.dest_path = dest_path
self.branches = branches
self.version = version
self.depth = depth
self.gpg_keys = gpg_keys
self.compat_repos = compat_repos
self.compat_branches = compat_branches
self.dry_run = dry_run
print('=== Releasing repo', self.src_path, 'to', self.dest_path,
'for', self.version, '===', flush=True)
# Check that the source exists
if not os.path.exists(self.src_path):
raise EosOSTreeError('Source repository "%s" does not exist'
% self.src_path)
# Open the source repo
src_repo_file = Gio.File.new_for_path(self.src_path)
self.src_repo = EosOSTreeRepo(path=src_repo_file)
self.src_repo.open(None)
# Open the dest repo and initialize if necessary
dest_repo_file = Gio.File.new_for_path(self.dest_path)
self.dest_repo = EosOSTreeRepo(path=dest_repo_file)
try:
self.dest_repo.open(None)
except:
print('== Initializing ostree repository', self.dest_path,
'==', flush=True)
os.makedirs(self.dest_path, exist_ok=True)
self.dest_repo.create(OSTree.RepoMode.ARCHIVE_Z2, None)
def rev_parse(self, repo, ref, missing_ok=False):
ret, rev = repo.resolve_rev(ref, missing_ok)
return rev
def write_ref(self, repo, ref, spec):
rev = self.rev_parse(repo, spec)
repo.set_ref_immediate(None, ref, rev, None)
return rev
def promote(self):
# Show some information on what will be changed
for branch in self.branches:
print('== Promoting branch', branch, 'from', self.src_path,
'to', self.dest_path, '==')
# Make sure the branch exists in the source repo
src_rev = self.rev_parse(self.src_repo, branch)
# Try to get the current branch ref
dest_rev = self.rev_parse(self.dest_repo, branch,
missing_ok=True)
if dest_rev:
print('== Updating', self.dest_path, branch, 'from',
dest_rev, 'to', src_rev, '==')
else:
print('== Creating new branch', branch, 'in',
self.dest_path, 'from', src_rev, '==')
sys.stdout.flush()
# Pull the data
pull_local_full(self.src_repo,
self.dest_repo,
refs=self.branches,
depth=self.depth,
dry_run=self.dry_run)
# Make sure all the refs got updated appropriately
if not self.dry_run:
for branch in self.branches:
src_rev = self.rev_parse(self.src_repo, branch)
dest_rev = self.rev_parse(self.dest_repo, branch)
if dest_rev == src_rev:
print(self.dest_path, branch, 'updated to', dest_rev)
else:
raise EosOSTreeError(self.dest_path, branch,
'not updated to', src_rev)
sys.stdout.flush()
def tag_name(self, branch):
# Determine the tag name based on the branch and version.
prefix = ref_info(branch)['tag_prefix']
if prefix is None:
raise EosOSTreeError('Could not determine tag prefix for',
branch)
return '%s/%s' %(prefix, self.version)
def check_tags(self):
# Ensure that if the tags already exist that they point to the
# right revision.
for branch in self.branches:
# Get the tag name and revision from the source repo branch
tag_name = self.tag_name(branch)
tag_revision = self.rev_parse(self.src_repo, branch)
for path, repo in [(self.src_path, self.src_repo),
(self.dest_path, self.dest_repo)]:
print('== Checking for existing tag', tag_name, 'in',
path, '==')
current_tag = self.rev_parse(repo, tag_name,
missing_ok=True)
if current_tag is not None and \
current_tag != tag_revision:
raise EosOSTreeError(path, 'tag', tag_name,
'does not match', branch)
sys.stdout.flush()
def tag(self):
# If necessary, add a tag pointing to the released ref in both
# the source and dest repositories while under lock.
with self.src_repo.lockcontext(), self.dest_repo.lockcontext():
for branch in self.branches:
tag_name = self.tag_name(branch)
for path, repo in [(self.src_path, self.src_repo),
(self.dest_path, self.dest_repo)]:
current_tag = self.rev_parse(repo, tag_name,
missing_ok=True)
if current_tag is not None:
print('== Tag', tag_name, 'already exists in',
path, '==')
continue
print('== Creating', path, 'tag', tag_name,
'pointing', 'to', branch, '==')
if not self.dry_run:
self.write_ref(repo, tag_name, branch)
sys.stdout.flush()
def compat(self):
# Create repo compat symlinks
for link in self.compat_repos:
print('== Creating compat repo link', link, 'to',
self.repo, '==')
dest = os.path.join(os.path.dirname(self.dest_path), link)
os.symlink(self.repo, dest)
sys.stdout.flush()
# Create repo compat branches while under lock
with self.dest_repo.lockcontext():
for src, dest in self.compat_branches:
if src not in self.branches:
continue
print('== Updating', self.dest_path, 'branch', dest, 'to',
src, '==')
if not self.dry_run:
self.write_ref(self.dest_repo, dest, src)
sys.stdout.flush()
def update_repo_metadata(self):
# Update source and destination repo's metadata
update_repo(self.src_repo,
gpg_keys=self.gpg_keys,
dry_run=self.dry_run)
update_repo(self.dest_repo,
gpg_keys=self.gpg_keys,
dry_run=self.dry_run)
def release(self):
# Do the whole release process
self.check_tags()
self.promote()
self.tag()
self.compat()
self.update_repo_metadata()
class OSTreeDeltaGenerator(object):
# Default number of deltas
NUM_DELTAS = 2
def __init__(self, repo, refs, num_deltas=NUM_DELTAS,
upgrade_deltas=False, dry_run=False):
if num_deltas <= 0:
raise EosOSTreeError('Must generate at least 1 delta')
self.repo = repo
self.path = self.repo.get_path().get_path()
self.refs = refs
self.num_deltas = num_deltas
self.upgrade_deltas = upgrade_deltas
self.dry_run = dry_run
# Cache the list of existing deltas
_, deltas_list = self.repo.list_static_delta_names(None)
self.existing_deltas = set(deltas_list)
def _get_ref_release_sources(self, info, rev, sources):
"""Find previous releases to use as delta sources
The sources list is appended to with tuples of version, ref and
revision.
"""
num_needed = self.num_deltas - len(sources)
if num_needed <= 0:
return
if info['major'] is None:
# Only look for releases on stable refs (not master)
return
if info['type'] == 'os' and info['minor_series'] is None:
# For OS refs, only make deltas against minor version
# branches (e.g., eos3.2). We can't determine the correct
# versions to make deltas to on the major version branches
# (e.g., eos3), and they point to some other minor series
# ref, anyway.
return
# Get a list of release tags for this ref
if info['tag_prefix'] is None:
print('Ref', info['ref'], 'has no release tag prefix')
return
flags = RepoListRefsExtFlags.NONE
_, tags = self.repo.list_refs_ext(info['tag_prefix'], flags, None)
if len(tags) == 0:
print('Could not find any version tags for', info['ref'])
return
# See if this ref is the same as one of the tags
this_version = None
for tag, tag_rev in tags.items():
tag_version = ref_info(tag)['version']
if tag_version is None:
raise EosOSTreeError('Could not determine version for '
'tag ref', tag)
if tag_rev == rev:
this_version = tag_version
break
# Enumerate the tags into versions
previous_versions = []
for tag, tag_rev in tags.items():
# Don't make a delta to itself
if tag_rev == rev:
continue
tag_info = ref_info(tag)
tag_version = tag_info['version']
tag_major = tag_info['major']
tag_minor_series = tag_info['minor_series']
# Skip newer versions
if this_version is not None and tag_version >= this_version:
continue
# Look at the minor series if it's known, otherwise the
# major version
if info['minor_series'] is None:
# Skip newer major versions
if tag_major > info['major']:
continue
if self.upgrade_deltas:
# Only create deltas to an older series
if tag_major >= info['major']:
continue
else:
# Only create deltas to the same series
if tag_major != info['major']:
continue
else:
# Skip newer minor series
if tag_minor_series > info['minor_series']:
continue
if self.upgrade_deltas:
# Only create deltas to an older series
if tag_minor_series >= info['minor_series']:
continue
else:
# Only create deltas to the same series
if tag_minor_series != info['minor_series']:
continue
previous_versions.append((tag_version, tag, tag_rev))
# Use the newest tags as sources for the deltas
sources += sorted(previous_versions)[-num_needed:]
def _get_ref_parent_sources(self, info, rev, sources):
"""Find parent commits to use as delta sources
The sources list is appended to with tuples of version, ref and
revision.
"""
num_needed = self.num_deltas - len(sources)
if num_needed <= 0:
return
# Use the commit's parents as delta sources.
for i in range(1, num_needed + 1):
# Use the ^ ref notation to get parent revision
parent = info['ref'] + i * '^'
# Unfortunately, ostree returns an error even if allow_noent
# is True when using ^ refspec notation
try:
_, parent_rev = self.repo.resolve_rev(parent, True)
if parent_rev is None:
# No more parents
break
# Make sure we actually have the commit and it's full
_, commit, state = self.repo.load_commit(parent_rev)
if state == OSTree.RepoCommitState.REPO_COMMIT_STATE_PARTIAL:
print('Skipping partial source', parent)
break
except GLib.Error as err:
if err.matches(Gio.io_error_quark(),
Gio.IOErrorEnum.FAILED):
# No more parents
break
elif err.matches(Gio.io_error_quark(),
Gio.IOErrorEnum.NOT_FOUND):
# Parent commit is pruned
break
else:
raise
sources.append((None, parent, parent_rev))
def _get_ref_delta_sources(self, ref, rev):
"""Get a list of refs to use as static delta sources for ref
The values in the list are a tuple of version, ref and revision.
"""
info = ref_info(ref)
sources = []
self._get_ref_release_sources(info, rev, sources)
self._get_ref_parent_sources(info, rev, sources)
return sources
def generate_deltas(self, verbose=False):
"""Create static deltas for all refs"""
new_deltas = []
for ref in self.refs:
# Get the revision for this ref and make sure the commit
# fully exists
_, this_rev = self.repo.resolve_rev(ref, False)
try:
_, commit, state = self.repo.load_commit(this_rev)
if state == OSTree.RepoCommitState.REPO_COMMIT_STATE_PARTIAL:
print('Skipping partial target', ref)
continue
except GLib.Error as err:
if err.matches(Gio.io_error_quark(),
Gio.IOErrorEnum.NOT_FOUND):
# Ref points to a missing commit
print('WARNING:', ref, 'commit', this_rev,
'is missing')
continue
else:
raise
# Get the sources for deltas
delta_sources = self._get_ref_delta_sources(ref, this_rev)
# Create the deltas
for _, tag, tag_rev in delta_sources:
delta_name = '-'.join([tag_rev, this_rev])
# Don't generate a delta that already exists
if delta_name in self.existing_deltas:
print('Delta from', tag, 'to', ref, 'already',
'exists, skipping')
continue
print('Creating', self.path, 'static delta from',
tag, 'to', ref, flush=True)
if not self.dry_run:
delta_args = {
'verbose': GLib.Variant('b', verbose),
}
start = datetime.datetime.now()
with self.repo.lockcontext():
self.repo.static_delta_generate(
OSTree.StaticDeltaGenerateOpt.MAJOR,
tag_rev, this_rev, None,
GLib.Variant('a{sv}', delta_args))
elapsed = datetime.datetime.now() - start
print('Delta completed in',
str(elapsed).rsplit('.')[0], flush=True)
self.existing_deltas.add(delta_name)
new_deltas.append(delta_name)
return new_deltas
# Load the configuration now in the module level CONFIG attribute
CONFIG = read_config()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment