Skip to content

Instantly share code, notes, and snippets.

@bcooksey
Last active August 5, 2022 14:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bcooksey/543d80abf2c08dbc5671a07ba8597d40 to your computer and use it in GitHub Desktop.
Save bcooksey/543d80abf2c08dbc5671a07ba8597d40 to your computer and use it in GitHub Desktop.
Compute Jira Cycle Times from a Jira Issue Export
  1. Download the script to your local machine
  2. Create a credentials JSON file on your local machine:
    1. If you do not have an API token for Jira, generate one by going here
    2. Create a JSON file on your local machine like {"subdomain": "<your_domain>", "username": "<your_login_email>", "api_token": "<your_api_token>"}. Name it anything you want.
  3. Run an export in Jira to get the issues you want. You only need the columns that the script requires
  4. Run compute-jira-cycle-times.py <path_to_credential_file> <path_to_export>
  5. You can take the new CSV generated and put it in Google Sheets, Excel, or Pages to create some charts and work with the data.
#!/usr/local/bin/python3
#-*- coding: utf-8 -*-
import csv
import json
import sys
import requests
from datetime import datetime, timedelta
credentials = {}
if len(sys.argv) < 3:
print('Usage: get-jira-cycle-time <path_to_credentials> <path_to_jira_export>')
exit()
with open(sys.argv[1], 'r') as _file:
credentials = json.loads(_file.read())
BASE_URL = 'https://{}.atlassian.net/rest/api/2/'.format(credentials['subdomain'])
# BASE_URL = 'https://api.atlassian.com/ex/jira/{}/rest/api/3/'.format(credentials['cloud_id'])
CACHE_DIR = 'jira-data'
csv_headers = [
# These should match the headings exported out of Jira
'Issue Type',
'Issue key',
'Issue id',
'Summary',
'Assignee',
'Reporter',
'Created',
'Story Points',
'Resolved',
# These are going to be added by this script
'First In Progress',
'Last In Progress',
'Done',
'Cycle Time (F)',
'Cycle Time (L)',
]
session = requests.Session()
session.auth = (credentials['username'], credentials['api_token'])
session.headers.update({
'Accept': 'application/json',
})
def get_issue_list_from_csv():
issues = []
with open(sys.argv[2], 'r') as _file:
csv_file = csv.DictReader(_file)
for row in csv_file:
issues.append(row)
return issues
def get_issue_changelog(issue_key):
changelog = None
try:
with open('{}/{}.json'.format(CACHE_DIR, issue_key), 'r') as _file:
changelog = json.loads(_file.read())
except:
print('No cache for {}, fetching from API'.format(issue_key))
changelog = get_issue_changelog_from_api(issue_key)
if changelog is not None:
with open('{}/{}.json'.format(CACHE_DIR, issue_key), 'w') as _file:
_file.write(json.dumps(changelog, indent=4, sort_keys=True))
return changelog
def get_issue_changelog_from_api(issue_key):
changelog = None
res = session.get(BASE_URL + f'issue/{issue_key}/changelog', params={'maxResults': 1000})
if res.status_code == 200:
changelog = json.loads(res.content)
if changelog.get('isLast') == False:
print('{} has more than 1000 changelogs'.format(issue_key))
else:
import ipdb; ipdb.set_trace()
print('Failed to fetch {}. Reason: {} {}'.format(issue_key, res.status_code, res.content))
return changelog
def get_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%f%z')
def parse_start_and_end_dates(issue_changelog):
first_in_progress = None
last_in_progress = None
done_date = None
for val in issue_changelog.get('values', []):
for item in val.get('items', []):
if item['field'] == 'status':
cur_date = get_date(val['created'])
if item['fromString'] == 'To Do' and item['toString'] in ['In Progress', 'In Review', 'Done']:
if not first_in_progress:
first_in_progress = cur_date
if not last_in_progress:
last_in_progress = cur_date
if cur_date < first_in_progress:
first_in_progress = cur_date
if cur_date > last_in_progress:
last_in_progress = cur_date
if item['toString'] == 'Done':
if not done_date:
done_date = cur_date
if cur_date > done_date:
done_date = cur_date
return {
'First In Progress': first_in_progress,
'Last In Progress': last_in_progress,
'Done': done_date,
}
def compute_cycle_time(start, end):
# Borrowed from https://stackoverflow.com/questions/3615375/number-of-days-between-2-dates-excluding-weekends
day_generator = (start + timedelta(x + 1) for x in range((end - start).days + 1))
days = sum(1 for day in day_generator if day.weekday() < 5)
return days
issues = get_issue_list_from_csv()
for issue in issues:
changelog = get_issue_changelog(issue['Issue key'])
if not changelog:
continue
dates = parse_start_and_end_dates(changelog)
issue.update(dates)
issue.update({
'Cycle Time (F)': compute_cycle_time(dates['First In Progress'], dates['Done']),
'Cycle Time (L)': compute_cycle_time(dates['Last In Progress'], dates['Done']),
})
with open('jira-with-cycle-times.csv', 'w') as _file:
csv_file = csv.DictWriter(_file, fieldnames=csv_headers)
csv_file.writeheader()
csv_file.writerows(issues)
Issue Type Issue key Summary Assignee Reporter Status Resolution Created Story Points Resolved
Task AP-123 Improve query response time Alice Alcorn Alice Alcorn Done Done 09/Jun/21 9:50 AM 1.0 09/Jun/21 2:46 PM
Task AP-456 Add metric to track size of incoming webhooks Bob Bowls Bob Bowls Done Done 09/Jun/21 9:46 AM 1.0 09/Jun/21 2:44 PM
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment