Skip to content

Instantly share code, notes, and snippets.

@rymate1234
Created May 26, 2023 13:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rymate1234/d8cbcf54b5fe47317283584235240379 to your computer and use it in GitHub Desktop.
Save rymate1234/d8cbcf54b5fe47317283584235240379 to your computer and use it in GitHub Desktop.
import asyncio
import openai
import datetime
from typing import List
from dataclasses import dataclass, field
from datetime import datetime
import json
from rich.console import Console
from rich.markdown import Markdown
console = Console(
width=80,
)
openai.api_key = ""
template = """
You are a web server. The application you are hosting is a functional Search Engine.
You listen to and respond to HTTP 1.1 messages. It does not support GZIP or chunked responses.
Any web page will have valid HTML and CSS styles. Feel free to make up content for the page.
Further messages will be in HTTP format and you must respond with a valid HTTP response.
The time is {time} on {date}.
"""
filename = f"debug-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.json"
@dataclass
class ChatGPT:
system: str = None
messages: List[dict] = field(default_factory=list)
token_total: int = 0
user_start: bool = True
temperature: float = 1.0
async def __call__(self):
self.messages.append({
"role": "system",
"content": template.format(
time=datetime.now().strftime("%H:%M:%S"),
date=datetime.now().strftime("%d/%m/%Y")
)
})
server = await asyncio.start_server(
self.handle, '0.0.0.0', 8888)
addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)
print(f'Serving on {addrs}')
async with server:
await server.serve_forever()
async def handle(self, reader, writer):
# wait until we get a full http request
data = await reader.readuntil(b"\r\n\r\n")
self.user_act(data.decode("utf-8"))
res = await self.assistant_act()
writer.write(res.encode("utf-8"))
await writer.drain()
writer.close()
await writer.wait_closed()
def user_act(self, user_input=None):
console.print(f"[bold blue]REQ[/bold blue]: {user_input}")
self.messages.append({"role": "user", "content": user_input})
return
def system_act(self, system_input):
console.print(f"[bold green]RES[/bold green]: {system_input}")
self.messages.append({"role": "system", "content": system_input})
return
async def assistant_act(self):
result = await self.execute()
self.system_act(result)
return result
async def generate_history(self):
if len(self.messages) < 6:
return self.messages
messages = []
completion = await openai.ChatCompletion.acreate(
model="gpt-4", messages=[
{"role": "user",
"content": f"Summarise the HTTP session so far: {json.dumps(self.messages)}"},
], temperature=self.temperature
)
self.token_total += completion["usage"]["total_tokens"]
messages.append(
{"role": "system", "content": completion['choices'][0]['message']['content']})
messages.append({
"role": "user",
"content": template.format(
time=datetime.now().strftime("%H:%M:%S"),
date=datetime.now().strftime("%d/%m/%Y")
)
})
messages.append(self.messages[-4])
messages.append(self.messages[-3])
messages.append(self.messages[-2])
messages.append(self.messages[-1])
return messages
async def execute(self):
messages = await self.generate_history()
f = open("test.json", "w")
f.write(json.dumps(messages, indent=4))
f.close()
completion = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo", messages=messages,
temperature=self.temperature
)
self.token_total += completion["usage"]["total_tokens"]
return completion["choices"][0]["message"]["content"]
assistant = ChatGPT(user_start=True, temperature=0.5,
system="Assistant")
asyncio.run(assistant())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment