Skip to content

Instantly share code, notes, and snippets.

@akdeb
Last active February 21, 2024 16:12
Show Gist options
  • Save akdeb/745d686196c4adba52f532363c24bd70 to your computer and use it in GitHub Desktop.
Save akdeb/745d686196c4adba52f532363c24bd70 to your computer and use it in GitHub Desktop.
Streaming JSON objects returned by LLM Stream (OpenAI, Anthropic, Mistral etc.) in NextJS / React
"use client";
// generated this with copilot
const YourClient: React.FC<{}> = (props) => {
function processBuffer(
buffer: string,
onObjectParsed: (object: any) => void,
isFinal: boolean = false
): string {
// Pattern to find the start and end of a JSON object/array in the buffer
let pattern = isFinal
? /(\{.*?\})|(\[.*?\])/gs
: /(\{.*?\})|(\[.*?\]),/gs;
let match: RegExpExecArray | null;
let newBuffer = buffer;
while ((match = pattern.exec(buffer)) !== null) {
try {
const jsonObject = JSON.parse(match[0]);
onObjectParsed(jsonObject);
newBuffer = buffer.substring(pattern.lastIndex);
} catch (error) {
// If we're on the final chunk, attempt to parse the remainder of the buffer
if (isFinal) {
console.error("Error parsing JSON:", error);
}
break; // Break on first failed parse in non-final chunks
}
}
return newBuffer; // Return the unprocessed part of the buffer
}
async function readStream(
response: Response,
onObjectParsed: (object: any) => void
): Promise<void> {
const reader = response.body?.getReader();
if (!reader) {
throw new Error("Failed to get stream reader");
}
let buffer = "";
const decoder = new TextDecoder();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
// Process buffer each time we get new data
buffer = processBuffer(buffer, onObjectParsed);
}
// Final processing to catch any remaining data
processBuffer(buffer, onObjectParsed, true);
} catch (error) {
console.error("Stream reading failed:", error);
}
}
const onRunClick = async () => {
const response = await fetch("/api/suggest-emails", {
method: "POST",
body: JSON.stringify({
prompt: form.getValues().body_prompt,
}),
headers: {
"Content-Type": "application/json",
},
});
// here we are expecting the LLM to return an array of objects. The methods processBuffer and readStream will parse the data as a JSON
await readStream(response, (parsedObject: any) => {
// This function is called for each parsed object from the stream
console.log("Parsed Object:", parsedObject);
// You can update your state or variable here
});
};
return (
<button onClick={onRunClick}>click me!</button>
);
};
export default YourClient;
import OpenAI from "openai";
import {
OpenAIStream,
StreamingTextResponse,
experimental_StreamData,
} from "ai";
import { NextResponse } from "next/server";
import { SuggestEmailsPrompt } from "@/lib/utils";
export const runtime = "edge";
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY!,
});
export async function POST(req: Request) {
// Extract the `prompt` from the body of the request
const { prompt } = await req.json();
// Request the OpenAI API for the response based on the prompt
const response = await openai.chat.completions.create({
model: "gpt-4-turbo-preview",
stream: true,
// a precise prompt is important for the AI to reply with the correct tokens
messages: [
{
role: "user",
content: `From user: ${prompt}. Generate an array of topics/lessons on this topic.`,
},
{
role: "system",
content: SuggestEmailsPrompt(),
},
],
max_tokens: 500,
temperature: 0.7, // you want absolute certainty for spell check
top_p: 1,
frequency_penalty: 1,
presence_penalty: 1,
// response_format: {
// type: "json_object",
// },
});
// console.log(response.choices[0].message);
// return NextResponse.json(
// { message: response.choices[0].message.content },
// {
// status: 200,
// }
// );
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
}
@akdeb
Copy link
Author

akdeb commented Feb 21, 2024

post here if you find a better way ;)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment