Skip to content

Instantly share code, notes, and snippets.

@teopost
Created May 27, 2022 06:04
Show Gist options
  • Save teopost/6dbde9ad3ab0b584886025196d450938 to your computer and use it in GitHub Desktop.
Save teopost/6dbde9ad3ab0b584886025196d450938 to your computer and use it in GitHub Desktop.
#!/usr/bin/python3
# https://stackoverflow.com/questions/64991055/can-i-get-the-most-recent-commit-of-a-repository-with-bitbucket-api
import requests
from requests.auth import HTTPBasicAuth
##Login
username = '<<bitbucket user>>'
password = '<<app password>>'
team = 'teclait'
fields = 'next,values.links.clone.href,values.slug,values.project.name,values.project.key,values.project.links,values.links.html,values.workspace.name,values.size,values.updated_on'
full_repo_list = []
# Request 100 repositories per page (and only their slugs), and the next page URL
#next_page_url = 'https://api.bitbucket.org/2.0/repositories/%s?pagelen=10&fields=next,values.links.clone.href,values.slug' % team
next_page_url = 'https://api.bitbucket.org/2.0/repositories/%s?pagelen=10&fields=%s' % (team,fields)
print('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' % ('reponame','repohttp','repogit','projectname','projectkey','projecturl','repourl','workspacename','reposize','updatedon'))
# Keep fetching pages while there's a page to fetch
while next_page_url is not None:
response = requests.get(next_page_url, auth=HTTPBasicAuth(username, password))
page_json = response.json()
# Parse repositories from the JSON
for repo in page_json['values']:
reponame=repo['slug']
repohttp=repo['links']['clone'][0]['href'].replace(username + '@','')
repohttp=repo['links']['clone'][0]['href']
repogit=repo['links']['clone'][1]['href']
projectname=repo['project']['name']
projectkey=repo['project']['key']
projecturl=repo['project']['links']['html']['href']
repourl=repo['links']['html']['href']
workspacename=repo['workspace']['name']
reposize=repo['size']
updatedon=repo['updated_on']
print('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' % (reponame,repohttp,repogit,projectname,projectkey,projecturl,repourl,workspacename,reposize,updatedon))
#print(reponame+","+repohttp+","+repogit + "," + projectname)
full_repo_list.append(repo['slug'])
# Get the next page URL, if present
# It will include same query parameters, so no need to append them again
next_page_url = page_json.get('next', None)
# Result length will be equal to `size` returned on any page
print ("Result:", len(full_repo_list))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment