Skip to content

Instantly share code, notes, and snippets.

@FoobarProtocol
Created December 29, 2023 05:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save FoobarProtocol/8272df85bf6e8f5e9aee9e80fea5e4ce to your computer and use it in GitHub Desktop.
Save FoobarProtocol/8272df85bf6e8f5e9aee9e80fea5e4ce to your computer and use it in GitHub Desktop.
OpenAI Skeleton for Those Looking to Iteratively Employ ChatGPT
#!/usr/bin/env python3
import json
import openai
import time
import os
import logging
from openai.error import InvalidRequestError, RateLimitError
from concurrent.futures import ThreadPoolExecutor
# Set up logging
logging.basicConfig(filename='placeholder.log', level=logging.DEBUG)
file_path = "~/placeholder.json"
output_path = "placeholder.json"
assert file_path != output_path
model = 'gpt-4-1106-preview'
api_keys = "sk-placeholder"
prompt_template = """
[placeholder]
"""
system_prompt = """
[placeholder]
"""
# Read the JSON file
logging.info('Reading JSON file')
print('Reading JSON file')
with open(file_path, 'r') as f:
data = json.load(f)
# Open the output file in append mode
logging.info('Opening output file in append mode')
print('Opening output file in append mode')
with open(output_path, 'a') as f:
# Iterate over the key-value pairs in the JSON data
logging.info('Iterating over JSON data')
print('Iterating over JSON data')
for item in data:
# Format the prompt
logging.info('Formatting prompt')
print('Formatting prompt')
prompt = prompt_template.format(source_code=item["source_code"])
# Send the prompt to the OpenAI API
def send_prompt(api_key):
logging.info('Sending prompt to OpenAI API')
print('Sending prompt to OpenAI API')
openai.api_key = api_key
try:
logging.info('Creating chat completion')
print('Creating chat completion')
chat_completion = openai.ChatCompletion.create(
model=model,
messages=[{"role": "system", "content": prompt}]
)
except InvalidRequestError as e:
logging.error(f"InvalidRequestError: {e}")
print("\nError: ", e)
print()
time.sleep(10)
return
except RateLimitError as e:
logging.error(f"RateLimitError: {e}")
print("\nError: ", e)
print()
time.sleep(10)
return
# Write the response to the output file
logging.info('Writing response to output file')
print('Writing response to output file')
f.write(json.dumps({"prompt": chat_completion}))
f.write('\n') # Write a newline character to separate responses
# Use ThreadPoolExecutor to send concurrent requests
logging.info('Sending concurrent requests')
print('Sending concurrent requests')
with ThreadPoolExecutor() as executor:
executor.map(send_prompt, api_keys)
# Sleep for a while to avoid hitting the rate limit
logging.info('Sleeping to avoid hitting rate limit')
print('Sleeping to avoid hitting rate limit')
time.sleep(10)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment