Skip to content

Instantly share code, notes, and snippets.

@ddrscott
Last active August 29, 2023 21:55
Show Gist options
  • Save ddrscott/08fb103fc3e85bcb53fb7882837066f3 to your computer and use it in GitHub Desktop.
Save ddrscott/08fb103fc3e85bcb53fb7882837066f3 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
"""
Requirements:
pip install click langchain openai
"""
import sys
import click
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
class CustomHandler(StreamingStdOutCallbackHandler):
def on_llm_start(self, serialized, prompts, **_) -> None:
pass
def on_llm_new_token(self, token: str, **_) -> None:
click.echo(token, nl=False)
def on_llm_end(self, response, **kwargs) -> None:
click.echo('\n')
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
def auto_lint(data, model):
llm=ChatOpenAI(
client=None,
model=model,
temperature=0.1,
verbose=True,
callbacks=[CustomHandler()],
streaming=True,
)
messages = [
SystemMessage(content="""You are a Python script repair bot.
You are given Python scripts and your job is to perfect them.
You add Google style docstrings to functions, correct spelling mistakes, and cleanup whitespace.
You follow PEP8 style guidelines and use Black to format the code.
You also add TODO comments for smelly code and FIXME comments for known bugs.
You never explain yourself, just fix the code."""
),
HumanMessage(content=data + "\n\n"),
]
llm(messages)
@click.command()
@click.option('--model', '-m', default='gpt-3.5-turbo-16k-0613')
@click.argument('src',
type=click.File('r'),
default=sys.stdin)
def my_command(model, src):
data = None
with src:
data = src.read()
auto_lint(data, model)
if __name__ == '__main__':
my_command()
@ddrscott
Copy link
Author

ddrscott commented Aug 29, 2023

Example Use:

python auto.py file_to_fix.py

Example output when used on itself:

#!/usr/bin/env python3
"""
Requirements:
pip install click langchain openai
"""
import sys
import click
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage


class CustomHandler(StreamingStdOutCallbackHandler):
    def on_llm_start(self, serialized, prompts, **_):
        pass

    def on_llm_new_token(self, token: str, **_):
        click.echo(token, nl=False)

    def on_llm_end(self, response, **kwargs):
        click.echo('\n')


def auto_lint(data, model):
    """
    Auto lints the given data using the specified model.
    """
    llm = ChatOpenAI(
        client=None,
        model=model,
        temperature=0.1,
        verbose=True,
        callbacks=[CustomHandler()],
        streaming=True,
    )
    messages = [
        SystemMessage(
            content="""You are a Python script repair bot.
            You are given Python scripts and your job is to perfect them.
            You add Google style docstrings to functions, correct spelling mistakes, and cleanup whitespace.
            You follow PEP8 style guidelines and use Black to format the code.
            You also add TODO comments for smelly code and FIXME comments for known bugs.
            You never explain yourself, just fix the code."""
        ),
        HumanMessage(content=data + "\n\n"),
    ]
    llm(messages)


@click.command()
@click.option('--model', '-m', default='gpt-3.5-turbo-16k-0613')
@click.argument('src', type=click.File('r'), default=sys.stdin)
def my_command(model, src):
    """
    Command line interface for auto_lint function.
    """
    data = None
    with src:
        data = src.read()
        auto_lint(data, model)


if __name__ == '__main__':
    my_command()

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment