Skip to content

Instantly share code, notes, and snippets.

@HaxxonHax
Created October 21, 2020 17:39
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save HaxxonHax/58e74fb1adb6a9672ba5a6c0e0b1c1f3 to your computer and use it in GitHub Desktop.
Save HaxxonHax/58e74fb1adb6a9672ba5a6c0e0b1c1f3 to your computer and use it in GitHub Desktop.
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive.appdata',
'https://www.googleapis.com/auth/drive.metadata',
'https://www.googleapis.com/auth/drive.photos.readonly']
def delete_file(request_id, response, exception):
if exception is not None:
# Do something with the exception
pass
else:
# Do something with the response
pass
def main():
"""
Lists files in Drive
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
page_token = None
count = 0
# Google API by default limits 1000 queries for every 100s.
max_pages = 9
batch = service.new_batch_http_request(callback=delete_file)
# We could also for loop here.
while count < max_pages:
response = service.files().list(q="name contains 'AVIProjectX' and trashed = false",
spaces='drive',
fields='nextPageToken, files(id, name, trashed)',
pageToken=page_token).execute()
for file in response.get('files', []):
# Process change
fileId = file.get('id')
fileName = file.get('name')
if fileName.startswith('AVIProjectX') and fileName.endswith('avi'):
print('Trashing file: %s (%s)' % (fileName, fileId))
# Move to trash
body = {'trashed': True}
batch.add(service.files().update(fileId=fileId, body=body))
# Delete permanently; Do not move to trash
#batch.add(service.files().delete(fileId=fileId))
count = count + 1
page_token = response.get('nextPageToken', None)
if count > max_pages:
break
if page_token is None:
break
print('Executing Batch')
batch.execute()
service.close()
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment