Skip to content

Instantly share code, notes, and snippets.

@iceener
Last active May 31, 2024 19:28
Show Gist options
  • Save iceener/d02201f2404ac7a8481813dde9287193 to your computer and use it in GitHub Desktop.
Save iceener/d02201f2404ac7a8481813dde9287193 to your computer and use it in GitHub Desktop.
heyalice.app local server template
import express from 'express';
import { OpenAI } from 'openai';
// Todo: Make sure to add your own OPENAI API KEY to the .env
const initializeOpenAI = () => new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const initializeServer = () => {
const app = express();
app.use(express.json());
return app;
};
const app = initializeServer();
const openai = initializeOpenAI();
const getFormattedTimestamp = () => new Date().toISOString();
const processStream = async (stream, res) => {
for await (const chunk of stream) {
const response = {
model: "gpt-4",
created_at: getFormattedTimestamp(),
message: { content: chunk.choices[0]?.delta?.content || '' },
done: chunk.choices[0]?.delta?.index === null
};
res.write(JSON.stringify(response) + '\n');
}
};
const respondWithError = (error, res) => {
res.writeHead(500, { 'Content-Type': 'application/json' });
console.error('Error processing OpenAI API request:', error);
res.end();
};
const chatEndpoint = (openai) => async (req, res) => {
try {
res.writeHead(200, {
'Content-Type': 'application/x-ndjson',
'Transfer-Encoding': 'chunked'
});
const stream = await openai.chat.completions.create({
model: 'gpt-4',
messages: req.body.messages,
stream: true,
});
await processStream(stream, res);
} catch (error) {
respondWithError(error, res);
}
res.end();
};
app.post('/api/chat', chatEndpoint(openai));
app.listen(3005, () => {
console.log('Server running on http://localhost:3005');
});
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment