Skip to content

Instantly share code, notes, and snippets.

@adiroiban
Last active September 1, 2022 12:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save adiroiban/2cb52f30b187c24e71692dc4a8043b90 to your computer and use it in GitHub Desktop.
Save adiroiban/2cb52f30b187c24e71692dc4a8043b90 to your computer and use it in GitHub Desktop.
GitHub Actions poor man's replacement for the awesome Buildbot try tool.
#
# A workflow which is only available for manual trigger.
#
name: Try-Patch
on:
workflow_dispatch:
inputs:
tests:
description: Run selected tests
default: ""
required: false
job:
description: Specific job to execute (windows | linux)
default: ""
required: false
diff:
description: Diff in base64
default: ""
required: false
concurrency:
group: try-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
linux:
# The type of runner that the job will run on
runs-on: ubuntu-20.04
if: github.event.inputs.job == '' || github.event.inputs.job == 'linux'
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 2
clean: false
- name: Unpack diff
if: ${{ github.event.inputs.diff }}
run: |
echo ${{ github.event.inputs.diff }} | base64 -d > patch.diff
cat patch.diff
git apply -v patch.diff
- name: Deps
run: ./brink.sh deps
- uses: chevah/python-info-action@v1
with:
python-path: build-brink/*/python
- name: Test
run: ./brink.sh test_ci ${{ github.event.inputs.tests }}
windows:
# The type of runner that the job will run on
runs-on: nt-10
if: github.event.inputs.job == '' || github.event.inputs.job == 'windows'
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 2
clean: false
- name: Set Windows path
if: runner.os == 'Windows'
shell: powershell
run: |
echo "C:/Program Files/Git/usr/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Unpack diff
if: ${{ github.event.inputs.diff }}
run: |
echo ${{ github.event.inputs.diff }} | base64 -d > patch.diff
cat patch.diff
git apply -v patch.diff
- name: Deps
run: ./brink.sh deps
- name: Test
run: ./brink.sh test_ci ${{ github.event.inputs.tests }}
@task
@cmdopts([
('workflow=', 'w', 'Name of workflow for which to execute the actions.'),
('job=', 'j', 'Execute a specific job'),
('tests=', 't', 'Tests to execute'),
('step=', 's', 'Show output only for step'),
('trigger', '', 'Only trigger and don\'t wait for run completion'),
('debug', 'd', 'Show debug output'),
])
def actions_try(options):
"""
Manual trigger of workflow based on current branch uncommited diff.
Make sure to stage/add any new files.
It will automatically push the local branch to make sure remote and local
are on the same base.
"""
try:
target = options.actions_try.workflow
except AttributeError:
print('--workflow is required.')
sys.exit(1)
command_start = datetime.now()
trigger = options.actions_try.get('trigger', False)
tests = options.actions_try.get('tests', '')
job = options.actions_try.get('job', '')
debug = options.actions_try.get('debug', False)
target_step = options.actions_try.get('step', '')
branch = pave.git.branch_name
diff = pave.git.diff(ref=None)
if debug:
print(diff)
diff = b64encode(diff.encode('utf-8'))
# Push the latest changes to remote repo, as otherwise the diff will
# not be valid.
print('Pushing all branch commits...')
pave.git.push()
payload = {
'ref': branch,
'inputs': {
'tests': tests,
'diff': diff.decode('ascii'),
'job': job,
},
}
# Triggering the run will not give us any positive feedback.
url = '/actions/workflows/{}/dispatches'.format(target)
result, response = _github_api(url, method='POST', json=payload)
if response.status_code != 204:
print("Failed to dispatch action: {}".format(result))
sys.exit(1)
# We need to pool the status to see if we get our run ID.
# It will pool every 1 second but print status every 5 seconds.
sleep = 1
in_progress = []
for i in range(30):
time.sleep(sleep)
url = '/actions/runs?branch={}&event=workflow_dispatch,'.format(branch)
result, _ = _github_api(url)
in_progress = []
for run in result['workflow_runs']:
if run['status'] in ['in_progress', 'queued']:
in_progress.append(run)
if debug:
print(' Found run {}: {} - {}'.format(
run['id'], run['status'], run['conclusion']))
if in_progress:
break
if i % 5 == 0:
# Reduce the output noise.
print('Run not found in the queue. Retrying...')
if not in_progress:
print('Failed to get the triggered run.')
sys.exit(1)
if len(in_progress) > 1:
print(
'!!!WARNING!!! Multiple pending runs found. Trying last one.')
run = in_progress[0]
if trigger:
print('Queued run {}. See: {}'.format(run['id'], run['html_url']))
return
# Pool for run completion.
sleep = 2
completed = None
for i in range(300):
time.sleep(sleep)
url = '/actions/runs/%s' % (run['id'])
result, _ = _github_api(url)
if debug:
print(' Current run status: {}'.format(result['status']))
if result['status'] in ['in_progress', 'queued']:
if i % 5 == 0:
# Reduce the output noise.
print('Waiting for run to end... %s' % (result['html_url']))
continue
completed = result
break
if not completed:
print('ERROR: Run not completed in the timeout time.')
print('Do a manual check at:')
print(run['html_url'])
sys.exit(1)
print('Run done with: %s' % (completed['conclusion']))
# Run done. Get logs
# This will redirect to the logs zip file.
url = '/actions/runs/{}/logs'.format(completed['id'])
result, response = _github_api(url)
if response.status_code != 200:
print('Failed to get run logs. %s' % (response.text))
sys.exit(1)
if response.headers['Content-Type'] != 'application/zip':
print('Run logs are not ZIP.')
sys.exit(1)
# The archive will contain a TXT for each job inside the run,
# and separate directories for each job with separate step output.
archive = ZipFile(BytesIO(response.content))
members = archive.namelist()
target_logs = []
if target_step:
target_name = '_{}.txt'.format(target_step)
# Show output only for a step.
for member in members:
if not member.lower().endswith(target_name):
continue
target_logs.append(member)
if not target_logs:
# Show output for all steps from each job.
for member in members:
if '/' in member:
continue
if member.endswith(').txt'):
continue
target_logs.append(member)
for log in target_logs:
with archive.open(log) as stream:
content = stream.read()
print(content.decode('utf-8'))
result, _ = _github_api(completed['jobs_url'], absolute=True)
print('-' * 72)
for job in result['jobs']:
print('Job: {} - {}'.format(job['name'], job['conclusion']))
start = _parse_datetime(job['started_at'])
end = _parse_datetime(job['completed_at'])
duration = end - start
print('Duration: {}'.format(duration))
for step in job['steps']:
start = _parse_datetime(step['started_at'])
end = _parse_datetime(step['completed_at'])
duration = end - start
color = ''
cend = ''
if step['conclusion'] == 'success':
color = TC.GREEN
cend = TC.END
elif step['conclusion'] == 'failure':
color = TC.RED
cend = TC.END
elif step['conclusion'] == 'skipped':
color = TC.YELLOW
cend = TC.END
print(' {}{}{}({}): {}'.format(
color, step['conclusion'], cend, duration, step['name']))
print('-' * 72)
print('Total duration: {}'.format(datetime.now() - command_start))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment