Skip to content

Instantly share code, notes, and snippets.

@waptik
Last active April 29, 2023 10:42
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save waptik/191e2a38da8dd08bce55f42705f0b1a4 to your computer and use it in GitHub Desktop.
Save waptik/191e2a38da8dd08bce55f42705f0b1a4 to your computer and use it in GitHub Desktop.
a custom class that makes use of Langchain + openai model
import config from "$config/mod.ts";
import {
initializeAgentExecutorWithOptions,
} from "langchain/agents";
import { BaseLanguageModel } from "langchain/base_language";
import { CallbackManager, ConsoleCallbackHandler } from "langchain/callbacks";
import { ChatOpenAI } from "langchain/chat_models/openai";
import { Calculator } from "langchain/tools/calculator";
import { BaseChatMessage } from "langchain/schema";
import { assert } from "asserts";
import {
BasePromptTemplate,
} from "langchain/prompts";
import { YouTubeSearchAPI } from "$utils/langchain/tools/youtubesearchapi.ts";
import { BufferMemory } from "langchain/memory";
import { Tool } from "langchain/tools";
type CHATGPT_MODELS = "gpt-3.5-turbo" | "gpt-4" | "text-davinci-003";
const CHAT_PERSONAS = config.personas;
type CHAT_PERSONAS_TYPE = typeof CHAT_PERSONAS;
type KEY_OF_CHAT_PERSONAS = keyof CHAT_PERSONAS_TYPE;
type GenerateReplyOptions = {
llm: BaseLanguageModel;
persona: KEY_OF_CHAT_PERSONAS;
isAdmin?: boolean;
keys?: Record<string, string>;
callbackManager?: CallbackManager;
prompt?: BasePromptTemplate;
retries?: number;
input?: string;
conversation?: BaseChatMessage[];
};
const OPENAI_COMPLETION_OPTIONS = {
temperature: 0.7,
maxTokens: 400,
};
class LangchainOpenAI {
private modelName: string;
// private cache: RedisCache;
private tools: Tool[] = [new Calculator()];
private callbackManager?: CallbackManager;
constructor(
readonly chatModelName: CHATGPT_MODELS = "gpt-3.5-turbo"
) {
this.modelName = chatModelName;
this.tools = [...this.tools, new YouTubeSearchAPI()];
}
async generate({
llm,
keys,
isAdmin,
retries = 0,
persona,
conversation,
input,
}: GenerateReplyOptions): Promise<string> {
this.callbackManager?.addHandler(new ConsoleCallbackHandler());
const tools = [...this.tools]; //.concat(new YouTubeSearchAPI());
const executor = await initializeAgentExecutorWithOptions(tools, llm, {
agentType: "chat-conversational-react-description",
verbose: true,
agentArgs: {
callbacks: this.callbackManager,
systemMessage: CHAT_PERSONAS[persona].prompt_start,
},
memory: new BufferMemory({
returnMessages: true,
memoryKey: "chat_history",
inputKey: "input",
}),
});
try {
const response = await executor.call({
input,
});
console.log("response", response);
assert(typeof response.output === "string");
return response.output
} catch (error) {
if (retries < 2) {
return this.generate({
llm,
conversation,
keys,
input,
isAdmin,
retries: retries + 1,
persona,
});
}
console.error("Error when generating agent message", error);
assert(error instanceof Error);
return `Error: ${error.message}`
}
}
async sendMessageGenerator(
input: string,
persona: KEY_OF_CHAT_PERSONAS = "assistant",
userId: number
) {
if (!(persona in CHAT_PERSONAS)) {
throw new Error(`Chat mode ${persona} is not supported`);
}
OPENAI_COMPLETION_OPTIONS.maxTokens += input.length;
const callbackManager = CallbackManager.fromHandlers({
handleLLMNewToken: async (token) => {
console.log("handleLLMNewToken", { token });
token = token.trim();
},
handleLLMError: async (e) => {
console.error("handleLLMError", e);
},
});
const llm = new ChatOpenAI(
{
modelName: this.modelName,
cache: true,
// maxConcurrency: 5,
callbackManager,
modelKwargs: {
user: userId.toString(),
},
verbose: true,
streaming: true,
...OPENAI_COMPLETION_OPTIONS,
},
{
basePath: "https://oai.hconeai.com/v1",
baseOptions: {
headers: {
"Helicone-Cache-Enabled": "true",
},
},
}
); // llm definition
this.callbackManager = callbackManager;
this.generate({
llm,
input,
persona,
isAdmin: config.admins.includes(userId),
})
.then((response) => {
console.log("Output:", response);
})
.catch((error) => {
console.error("Error when generating agent message", error);
});
} // sendMessageGenerator
} // end class LangchainOpenAI
export default LangchainOpenAI;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment