Skip to content

Instantly share code, notes, and snippets.

@jimmo
Created May 4, 2018 05:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jimmo/3fe2839c1e64696842992c90b4c9addd to your computer and use it in GitHub Desktop.
Save jimmo/3fe2839c1e64696842992c90b4c9addd to your computer and use it in GitHub Desktop.
Simple script to use the GitHub API to generate CSV for import into Pivotal Tracker
import csv
import dateutil.parser
import json
import requests
import sys
# Get a token from https://github.com/settings/tokens
USER='jimmo'
TOKEN=''
REPO=''
URL=f'https://api.github.com/repos/{REPO}/issues?status=open'
AUTH=(USER, TOKEN,)
NAMES = {
# Map of github username to pivotal full name.
'jimmo': 'Jim Mussared',
}
stories = []
def get_name(name):
return NAMES.get(name, name)
def get_date(date):
date = dateutil.parser.parse(date)
return f'{date.strftime("%b")} {date.month}, {date.year}'
def process_issue(issue):
print(issue['number'], '-', issue['title'], file=sys.stderr)
story = {
'id': issue['number'],
'title': issue['title'],
'description': get_name(issue['user']['login']) + ' - ' + get_date(issue['created_at']) + '\n' + issue['body'],
'created_at': get_date(issue['created_at']),
'type': 'bug',
'comments': [],
'labels': ','.join(label['name'] for label in issue['labels']),
'requested_by': get_name(issue['user']['login']),
}
for comment in requests.get(issue['comments_url'], auth=AUTH).json():
name = get_name(comment['user']['login'])
date = get_date(comment['updated_at'])
story['comments'].append(f'{name} - {date}' + '\n' + comment['body'] + f' ({name} - {date})')
stories.append(story)
def fetch_issues(url):
r = requests.get(url, auth=AUTH)
for issue in r.json():
process_issue(issue)
# Link: <https://api.github.com/repositories/___/issues?state=open&page=2>; rel="next", <https://api.github.com/repositories/___/issues?state=open&page=8>; rel="last"
links = {}
for link in r.headers.get('link', '').split(','):
link_url, rel = link.split(';')
link_url = link_url.split('<')[1].split('>')[0]
rel = rel.split('"')[1]
links[rel] = link_url
print(links)
if 'last' in links and links['last'] == url:
return
if 'next' in links:
fetch_issues(links['next'])
fetch_issues(URL)
max_comments = max(len(story['comments']) for story in stories)
# Output is CSV:
with open('issues.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['Id', 'Title', 'Labels', 'Type', 'Created at', 'Requested By', 'Description',] + ['Comment'] * max_comments)
for story in stories:
writer.writerow([story['id'], story['title'], story['labels'], story['type'], story['created_at'], story['requested_by'], story['description'],] + story['comments'])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment