Skip to content

Instantly share code, notes, and snippets.

@jmsdnns
Last active May 24, 2024 01:08
Show Gist options
  • Save jmsdnns/5f6b30e35e1b2d4d6a3ce2b329e6f77c to your computer and use it in GitHub Desktop.
Save jmsdnns/5f6b30e35e1b2d4d6a3ce2b329e6f77c to your computer and use it in GitHub Desktop.
dont need copilot when you can use this instead
# Only two requirements
# - openai
# - python-dotenv
#
# Assumes you have $OPENAI_API_KEY in your environment or it is defined
# in a .env file
#
# Usage:
#
# $ nopilot hello world in erlang
#
# -------
#
# io:format("Hello world~n", []).
#
# $ echo "hello world in haskell" | nopilot
#
# -------
#
# main = putStrLn "Hello, world!"
#
# $ nopilot -e # opens a tempfile in $EDITOR and reads prompt from that
#
# -------
#
# #include <iostream>
# using namespace std;
#
# int main() {
# cout << "Hello world" << endl;
# return 0;
# }
import os
import sys
import tempfile
import subprocess
from dotenv import load_dotenv
from openai import OpenAI
# MODEL = "gpt-4-turbo"
MODEL = "gpt-3.5-turbo-0125"
ASSISTANT_DESC = """Provide only code as output without any description.
Provide only code in plain text format without Markdown formatting.
Do not include symbols such as ``` or ```python.
If there is a lack of details, provide most logical solution.
You are not allowed to ask for more details.
For example if the prompt is "Hello world Python", you should return "print('Hello world')".
"""
EDITOR = os.environ.get("EDITOR", "vim")
def prompt_editor():
# Create a temporary file
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as tf:
subprocess.call([f"{EDITOR}", tf.name])
tf.seek(0)
prompt = tf.read().rstrip()
return prompt.decode("utf-8")
def read_input():
# piped input
if not sys.stdin.isatty():
return sys.stdin.read().strip()
# temp file in EDITOR
elif len(sys.argv) == 2 and sys.argv[1] == "-e":
return prompt_editor()
# cli args
elif len(sys.argv) > 1:
return " ".join(sys.argv[1:])
else:
sys.stderr.write("No prompt given")
sys.exit(-1)
def init_client():
load_dotenv()
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
return client
def gen_code(client, prompt):
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": ASSISTANT_DESC},
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content
client = init_client()
prompt = read_input()
text = gen_code(client, prompt)
print("\n-------\n")
print(text)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment