Skip to content

Instantly share code, notes, and snippets.

@jimmo
Created June 29, 2018 05:43
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jimmo/8e95863c8f31ff4b0ccc680844367102 to your computer and use it in GitHub Desktop.
Save jimmo/8e95863c8f31ff4b0ccc680844367102 to your computer and use it in GitHub Desktop.
Simple script to use the GitHub API to generate CSV for import into Jira
import csv
import datetime
import dateutil.parser
import json
import re
import requests
import sys
from collections import defaultdict
from functools import partial
# Get a token from https://github.com/settings/tokens
USER=''
TOKEN=''
ORG=''
REPO=f'{ORG}/...'
ISSUES_URL=f'https://api.github.com/repos/{REPO}/issues?state=all'
PROJECTS_URL=f'https://api.github.com/orgs/{ORG}/projects'
AUTH=(USER, TOKEN,)
NAMES = {
# Map of GitHub username to Jira name.
'github': 'jira',
}
issue_extra_tags = defaultdict(list)
def jira_tag(v):
return re.sub('[^a-z:]+', '-', v.lower()).strip('-')
def continue_page(r, url, fn):
# Link: <https://api.github.com/repositories/___/issues?state=open&page=2>; rel="next", <https://api.github.com/repositories/___/issues?state=open&page=8>; rel="last"
links = {}
for link in r.headers.get('link', '').split(','):
if not link:
continue
link_url, rel = link.split(';')
link_url = link_url.split('<')[1].split('>')[0]
rel = rel.split('"')[1]
links[rel] = link_url
if 'last' in links and links['last'] == url:
return
if 'next' in links:
fn(links['next'])
def fetch_cards(project, column, url):
project_name = jira_tag(project['name'])
column_name = jira_tag(column['name'])
r = requests.get(url, auth=AUTH, headers={'Accept': 'application/vnd.github.inertia-preview+json'})
for card in r.json():
if 'content_url' in card:
issue = int(card['content_url'].split('/')[-1])
print(project_name, column_name, issue)
issue_extra_tags[issue].append(f'project:{project_name}')
issue_extra_tags[issue].append(f'project-column:{column_name}')
continue_page(r, url, partial(fetch_cards, project, column))
def fetch_columns(project, url):
r = requests.get(url, auth=AUTH, headers={'Accept': 'application/vnd.github.inertia-preview+json'})
for column in r.json():
fetch_cards(project, column, column['cards_url'])
continue_page(r, url, partial(fetch_issues, project))
def fetch_projects(url):
r = requests.get(url, auth=AUTH, headers={'Accept': 'application/vnd.github.inertia-preview+json'})
for project in r.json():
fetch_columns(project, project['columns_url'])
continue_page(r, url, fetch_issues)
stories = []
def get_name(name):
return NAMES.get(name.lower(), name)
def get_date(date):
date = dateutil.parser.parse(date)
return date.strftime("%Y-%m-%d %H:%M")
def issue_body(issue):
# To fix up more of Jira's weird markdown style:
# cat issues.csv | sed 's/!\[.*\](\(.*\))/!\1!/g' > issues_images.csv
# cat issues_images.csv | sed 's/\[\(.*\)\](\(.*\))/[\1|\2]/g' > issues_links.csv
# cat issues_links.csv | sed 's/```/{noformat}/g' > issues_code.csv
return '{}.\n\nImported from [GitHub {}#{}|{}] on {}.'.format(issue['body'], REPO, issue['number'], issue['html_url'], datetime.datetime.now().strftime("%Y-%m-%d"))
def get_status(issue):
if issue['state'] == 'open':
return 'To Do'
else:
return 'Done'
def process_issue(issue):
print(issue['number'], '-', issue['title'], file=sys.stderr)
story = {
'id': 'GI-' + str(issue['number']),
'title': issue['title'],
'description': issue_body(issue),
'created_at': get_date(issue['created_at']),
'type': 'Bug',
'status': get_status(issue),
'milestone': issue['milestone']['title'] if issue['milestone'] else '',
'comments': [],
'labels': [jira_tag(label['name']) for label in issue['labels']] + issue_extra_tags[issue['number']],
'requested_by': get_name(issue['user']['login']),
}
for comment in requests.get(issue['comments_url'], auth=AUTH).json():
name = get_name(comment['user']['login'])
date = get_date(comment['updated_at'])
story['comments'].append(f'{date}; {name}' + '; ' + comment['body'])
stories.append(story)
def fetch_issues(url):
r = requests.get(url, auth=AUTH)
for issue in r.json():
process_issue(issue)
continue_page(r, url, fetch_issues)
fetch_projects(PROJECTS_URL)
fetch_issues(ISSUES_URL)
max_comments = max(len(story['comments']) for story in stories)
max_labels = max(len(story['labels']) for story in stories)
# Output is CSV:
with open('issues.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['Issue Key', 'Summary', 'Issue Type', 'Fix Version', 'Status', 'Date Created', 'Reporter', 'Description',] + ['Comment Body'] * max_comments + ['Labels'] * max_labels)
for story in stories:
writer.writerow([story['id'], story['title'], story['type'], story['milestone'], story['status'], story['created_at'], story['requested_by'], story['description'],] + story['comments'] + ['']*(max_comments-len(story['comments'])) + story['labels'] + ['']*(max_labels-len(story['labels'])))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment