Skip to content

Instantly share code, notes, and snippets.

@farhadjaman
Created February 5, 2024 08:43
Show Gist options
  • Save farhadjaman/2dcb657501d25948be98e0b6ee7a831b to your computer and use it in GitHub Desktop.
Save farhadjaman/2dcb657501d25948be98e0b6ee7a831b to your computer and use it in GitHub Desktop.
import { OpenAI } from 'langchain/llms/openai'
import { PromptTemplate } from 'langchain/prompts'
import { loadQARefineChain } from 'langchain/chains'
import { MemoryVectorStore } from 'langchain/vectorstores/memory'
import { OpenAIEmbeddings } from 'langchain/embeddings/openai'
import {
StructuredOutputParser,
OutputFixingParser,
} from 'langchain/output_parsers'
import { Document } from 'langchain/document'
import { z } from 'zod'
const parser = StructuredOutputParser.fromZodSchema(
z.object({
mood: z
.string()
.describe('the mood of the person who wrote the journal entry.'),
subject: z.string().describe('the subject of the journal entry.')
})
)
const getPrompt = async (content) => {
const format_instructions = parser.getFormatInstructions()
const prompt = new PromptTemplate({
template:
'Analyze the following journal entry. Follow the intrusctions and format your response to match the format instructions, no matter what! \n{format_instructions}\n{entry}',
inputVariables: ['entry'],
partialVariables: { format_instructions },
})
const input = await prompt.format({
entry: content,
})
return input
}
export const analyzeEntry = async (entry) => {
const input = await getPrompt(entry.content)
const model = new OpenAI({ temperature: 0, modelName: 'gpt-3.5-turbo' })
const output = await model.call(input)
try {
return parser.parse(output)
} catch (e) {
const fixParser = OutputFixingParser.fromLLM(
new OpenAI({ temperature: 0, modelName: 'gpt-3.5-turbo' }),
parser
)
const fix = await fixParser.parse(output)
return fix
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment