Skip to content

Instantly share code, notes, and snippets.

@dmanexe
Created May 10, 2023 22:48
Show Gist options
  • Save dmanexe/0d795f62ac119602720c93eeab571de5 to your computer and use it in GitHub Desktop.
Save dmanexe/0d795f62ac119602720c93eeab571de5 to your computer and use it in GitHub Desktop.
unlimited chatgpt input script
import openai
from concurrent.futures import ThreadPoolExecutor
# Add your own OpenAI API key
openai.api_key = "sk-XXXXXXXXXXXXXXXXXXXXX"
def load_text(file_path):
with open(file_path, 'r') as file:
return file.read()
def save_to_file(responses, output_file):
with open(output_file, 'w') as file:
for response in responses:
file.write(response + '\n')
# Change your OpenAI chat model accordingly
def call_openai_api(chunk):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "PASS IN ANY ARBITRARY SYSTEM VALUE TO GIVE THE AI AN IDENITY"},
{"role": "user", "content": f"OPTIONAL PREPROMPTING FOLLOWING BY YOUR DATA TO PASS IN: {chunk}."},
],
max_tokens=1750,
n=1,
stop=None,
temperature=0.5,
)
return response.choices[0]['message']['content'].strip()
def split_into_chunks(text, tokens=1500):
words = text.split()
chunks = [' '.join(words[i:i + tokens]) for i in range(0, len(words), tokens)]
return chunks
def process_chunks(input_file, output_file):
text = load_text(input_file)
chunks = split_into_chunks(text)
# Processes chunks in parallel
with ThreadPoolExecutor() as executor:
responses = list(executor.map(call_openai_api, chunks))
save_to_file(responses, output_file)
# Specify your input and output files
if __name__ == "__main__":
input_file = "your_input_here.txt"
output_file = "your_output_here.txt"
process_chunks(input_file, output_file)
# Can take up to a few minutes to run depending on the size of your data input
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment