Skip to content

Instantly share code, notes, and snippets.

@chaosphere2112
Created April 2, 2015 22:30
Show Gist options
  • Save chaosphere2112/b752608f93b931b7b5a7 to your computer and use it in GitHub Desktop.
Save chaosphere2112/b752608f93b931b7b5a7 to your computer and use it in GitHub Desktop.
Basic changelog
import git
r = git.Repo("/Users/fries2/projects/uvcdat")
t = None
for tag in r.tags:
if tag.tag:
tag = tag.tag
if t is None or tag.object.committed_date > t.object.committed_date:
t = tag
end = t.object.binsha
commit = r.head.commit
prs = []
def traverse_commits(c, end, pull_requests):
if c.binsha == end:
return True
if c.message[:18] == "Merge pull request":
pull_requests.append(c)
for parent in c.parents:
if traverse_commits(parent, end, pull_requests):
return True
return False
traverse_commits(commit, end, prs)
import os
import requests
import re
fix_commit_re = re.compile("\d{3,}")
key = os.environ["CHANGELOG_GITHUB_TOKEN"]
headers = {
"Authorization": "token %s" % key,
"Accept": "application/vnd.github.v3+json",
"User-Agent": "chaosphere2112-uvcdat-changelogger",
}
api_base = "https://api.github.com/%s"
pr_base = api_base % "repos/UV-CDAT/uvcdat/pulls/%s"
user_base = api_base % "users/%s"
issue_base = api_base % "repos/UV-CDAT/uvcdat/issues/%s"
pull_data = []
for pr in prs:
# Get the #PRID part of the message
pr_id = pr.message[19:].split()[0]
# Get the actual ID number
pr_id = pr_id[1:]
response = requests.get(pr_base % pr_id, headers=headers)
if response.status_code == 404:
continue
pr_info = response.json()
data = {}
data["title"] = pr_info["title"]
match = fix_commit_re.search(data["title"])
if match is not None:
# Grab the matching issue
issue_response = requests.get(issue_base % match.group(0), headers=headers)
if issue_response.status_code != 404:
issue_data = issue_response.json()
data["issue"] = issue_data["html_url"]
data["issue_title"] = issue_data["title"]
data["url"] = pr_info["html_url"]
authors= set()
author_commits = {}
pr_commits = requests.get(pr_info["commits_url"], headers=headers)
commits = pr_commits.json()
while "next" in pr_commits.links:
pr_commits = requests.get(pr_commits.links["next"]["url"], headers=headers)
commits.extend(pr_commits.json())
for commit in commits:
author = commit["commit"]["author"]["name"]
if author not in authors:
authors.add(author)
author_commits[author] = 1
else:
author_commits[author] += 1
# Check if author is a github user, deduplicate names
for author in authors:
if len(author.split()) == 1:
# Might be a user, let's check
user_data = requests.get(user_base % author, headers=headers)
if user_data.status_code == 404:
continue
user = user_data.json()
user_real_name = user["name"]
if user_real_name in author_commits:
author_commits[user_real_name] += author_commits[author]
else:
author_commits[user_real_name] = author_commits[author]
del author_commits[author]
# Sort authors by commit count
authors = sorted(author_commits, key=lambda x: author_commits[x])
authors.reverse()
data["authors"] = ", ".join(authors)
if "issue" in data:
print "* [{title}]({url}) by {authors} ([{issue_title}]({issue}))".format(**data)
else:
print "* [{title}]({url}) by {authors}".format(**data)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment