Last active
December 17, 2020 21:27
-
-
Save mvxt/f8b9be2e4941f5c4c33bef8c31c1cedb to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import base64 | |
from datetime import datetime, timedelta | |
import http.client | |
import json | |
import os | |
import requests | |
import sys | |
################### | |
# MODIFY AS NEEDED | |
################### | |
vcs = 'gh' # gh or bb | |
org = 'mvxt' # organization name | |
project = 'circleci-demo' # name of project | |
artifact_jobs = ['test'] # add more job names you expect artifacts from | |
timeline = 1000 # how many minutes to look back for jobs | |
base_path = '/api/v2' # Current API version. Leave at v2 | |
################### | |
#################### | |
# DON'T TOUCH THESE | |
#################### | |
conn = http.client.HTTPSConnection("circleci.com") | |
token = os.environ.get('CIRCLE_TOKEN') | |
if not token: | |
sys.exit('CIRCLE_TOKEN not set') | |
auth_str = token + ':' | |
auth_bytes = auth_str.encode('ascii') | |
auth = base64.b64encode(auth_bytes) | |
headers = { | |
'Authorization': 'Basic ' + auth.decode('ascii'), | |
'Content-Type': 'application/json', | |
'Accept': 'application/json' | |
} | |
def get_data(method: str, url: str) -> dict: | |
"""Method to make an API request and return JSON obj""" | |
conn.request(method, url, headers=headers) | |
response = conn.getresponse() | |
return json.loads(response.read()) | |
def pretty_print(obj: dict) -> None: | |
"""Method for pretty-printing JSON data from responses""" | |
pretty = json.dumps(obj, indent=2) | |
print(pretty) | |
#################### | |
# (1a) Fetch pipelines from project | |
pipelines_url = "{base}/project/{vcs}/{org}/{project}/pipeline".format(base=base_path, vcs=vcs, org=org, project=project) | |
pipelines = get_data("GET", pipelines_url) | |
# (1b) Filter for pipelines within last (timeline mins) | |
filtered = [] | |
delta = timedelta(minutes=timeline) | |
now = datetime.utcnow() # Get current datetime, UTC | |
cutoff = now - delta | |
for pipeline in pipelines['items']: | |
# Check if date fits, then add to filtered | |
datestring = pipeline['updated_at'].replace('Z', '') | |
temp_date = datetime.fromisoformat(datestring) | |
# If pipeline time occurred after cutoff, | |
# add ID to filtered list | |
if temp_date > cutoff: | |
filtered.append(pipeline['id']) | |
# (2) For each pipeline ID, fetch its workflows | |
for pipeline_id in filtered: | |
pipeline_workflows_url = "{base}/pipeline/{id}/workflow".format(base=base_path, id=pipeline_id) | |
workflows = get_data("GET", pipeline_workflows_url) | |
# (3) Get all jobs for each workflow | |
for workflow in workflows['items']: | |
jobs_url = "{base}/workflow/{id}/job".format(base=base_path, id=workflow['id']) | |
jobs = get_data("GET", jobs_url) | |
# (4a) Filtered on finished jobs and names we expect artifacts from (line 14) | |
filtered_jobs = [job for job in jobs['items'] if job['name'] in artifact_jobs and job['status'] == 'success'] | |
# (4b) For each job, get its artifacts | |
for job in filtered_jobs: | |
artifacts_url = "{base}/project/{vcs}/{org}/{project}/{job_num}/artifacts".format(base=base_path, vcs=vcs, org=org, project=project, job_num=job['job_number']) | |
artifacts = get_data("GET", artifacts_url) | |
# (5) For each artifact, get URL, request, and save to filesystem | |
for artifact in artifacts['items']: | |
filename = artifact['path'] | |
url = artifact['url'] | |
print('filename: {filename}, url: {url}'.format(filename=filename, url=url)) | |
# Switched to using requests (r) here because http.request had some issues | |
r = requests.get(url) | |
if not os.path.exists(os.path.dirname(filename)): | |
try: | |
if os.path.dirname(filename): | |
os.makedirs(os.path.dirname(filename)) | |
except OSError as exc: # Guard against race condition | |
if exc.errno != errno.EEXIST: | |
raise | |
with open(filename, 'wb') as f: | |
f.write(r.content) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Scrappy script I put together in python to download artifacts from jobs using CircleCI's API.