-
-
Save microchipgnu/0f327e328c4e18e4549725b41ee37d84 to your computer and use it in GitHub Desktop.
ollama ai-jsx demo
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import * as AI from "ai-jsx"; | |
import { Tool, UseTools } from "ai-jsx/batteries/use-tools"; | |
import { | |
ChatCompletion, | |
SystemMessage, | |
UserMessage | |
} from "ai-jsx/core/completion"; | |
import { | |
Ollama | |
} from "./ai-jsx/models/ollama.js"; | |
const tools: Record<string, Tool> = { | |
turnLightsOn: { | |
description: "Turn the lights on in the user's home", | |
parameters: {}, | |
func: () => { | |
return "1"; | |
}, | |
}, | |
turnLightsOff: { | |
description: "Turn the lights off in the user's home", | |
parameters: {}, | |
func: () => { | |
return "2"; | |
}, | |
}, | |
activeScene: { | |
description: `Activate a scene in the user's lighting settings, like "Bedtime" or "Midday".`, | |
parameters: { | |
sceneName: { | |
description: "The scene to activate the lighting in.", | |
type: "string", | |
required: true, | |
}, | |
}, | |
func: () => { | |
return "3"; | |
}, | |
}, | |
}; | |
const App = () => { | |
return ( | |
<Ollama model="mistral"> | |
<ChatCompletion> | |
<UseTools tools={tools}> | |
<SystemMessage> | |
You control a home automation system. The user will request an | |
action in their home. You should take an action and then generate a | |
response telling the user what you've done. | |
</SystemMessage> | |
<UserMessage>Turn on lights</UserMessage> | |
</UseTools> | |
</ChatCompletion> | |
</Ollama> | |
); | |
}; | |
const renderContext = AI.createRenderContext(); | |
const response = await renderContext.render(<App />); | |
console.log(response); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
microchipgnu@xxx ai-jsx-template % bun run start | |
$ bun run typecheck && AIJSX_LOG=debug bun run index.tsx | |
$ tsc | |
{"level":30,"time":1705367759615,"pid":60820,"hostname":"xxx","name":"ai-jsx","prompt":{"messages":[{"type":"system","props":{},"text":"You control a home automation system. The user will request an action in their home. You should take an action and then generate a response telling the user what you've done."},{"type":"user","props":{},"text":"Turn on lights"}]},"renderId":"d03b2985-415b-4632-b155-b5542769d00a","element":"<OllamaChatModel>","msg":"Got prompt conversation"} | |
{"level":20,"time":1705367759616,"pid":60820,"hostname":"xxx","name":"ai-jsx","model":"mistral","input":{"model":"mistral","max_length":500,"prompt":"You control a home automation system. The user will request an action in their home. You should take an action and then generate a response telling the user what you've done.Turn on lights","system_prompt":"You control a home automation system. The user will request an action in their home. You should take an action and then generate a response telling the user what you've done."},"renderId":"d03b2985-415b-4632-b155-b5542769d00a","element":"<OllamaChatModel>","msg":"Calling Ollama mistral"} | |
{"level":20,"time":1705367762774,"pid":60820,"hostname":"xxx","name":"ai-jsx","result":" I have turned on the lights in your requested area. You should now see the space well-lit. If you have any specific lights or zones in mind, please let me know for future requests. Enjoy your time in your brightened home!","renderId":"d03b2985-415b-4632-b155-b5542769d00a","element":"<OllamaChatModel>","msg":"Ollama mistral output"} | |
{"level":30,"time":1705367762775,"pid":60820,"hostname":"xxx","name":"ai-jsx","completion":{"messages":[{"type":"assistant","props":{},"text":" I have turned on the lights in your requested area. You should now see the space well-lit. If you have any specific lights or zones in mind, please let me know for future requests. Enjoy your time in your brightened home!"}]},"renderId":"d03b2985-415b-4632-b155-b5542769d00a","element":"<OllamaChatModel>","msg":"Got completion conversation"} | |
{"level":30,"time":1705367762776,"pid":60820,"hostname":"xxx","name":"ai-jsx","prompt":{"messages":[{"type":"assistant","props":{},"text":" I have turned on the lights in your requested area. You should now see the space well-lit. If you have any specific lights or zones in mind, please let me know for future requests. Enjoy your time in your brightened home!"}]},"renderId":"bad4300e-42a3-4427-8eb3-f3bfbfed68ec","element":"<OllamaChatModel>","msg":"Got prompt conversation"} | |
{"level":30,"time":1705367762777,"pid":60820,"hostname":"xxx","name":"ai-jsx","prompt":{"messages":[{"type":"system","props":{},"text":"You control a home automation system. The user will request an action in their home. You should take an action and then generate a response telling the user what you've done."},{"type":"user","props":{},"text":"Turn on lights"}]},"renderId":"3231fcc5-1210-4591-bc9e-a870869524b8","element":"<OllamaChatModel>","msg":"Got prompt conversation"} | |
{"level":20,"time":1705367762777,"pid":60820,"hostname":"xxx","name":"ai-jsx","model":"mistral","input":{"model":"mistral","max_length":500,"prompt":"You control a home automation system. The user will request an action in their home. You should take an action and then generate a response telling the user what you've done.Turn on lights","system_prompt":"You control a home automation system. The user will request an action in their home. You should take an action and then generate a response telling the user what you've done."},"renderId":"3231fcc5-1210-4591-bc9e-a870869524b8","element":"<OllamaChatModel>","msg":"Calling Ollama mistral"} | |
{"level":20,"time":1705367764157,"pid":60820,"hostname":"xxx","name":"ai-jsx","result":" I have turned on the lights in your requested area. You should now see the space brightly illuminated. If you have any specific lights or zones in mind, please let me know and I will adjust accordingly. Your comfort is my top priority.","renderId":"3231fcc5-1210-4591-bc9e-a870869524b8","element":"<OllamaChatModel>","msg":"Ollama mistral output"} | |
{"level":30,"time":1705367764158,"pid":60820,"hostname":"xxx","name":"ai-jsx","completion":{"messages":[{"type":"assistant","props":{},"text":" I have turned on the lights in your requested area. You should now see the space brightly illuminated. If you have any specific lights or zones in mind, please let me know and I will adjust accordingly. Your comfort is my top priority."}]},"renderId":"3231fcc5-1210-4591-bc9e-a870869524b8","element":"<OllamaChatModel>","msg":"Got completion conversation"} | |
{"level":20,"time":1705367764160,"pid":60820,"hostname":"xxx","name":"ai-jsx","model":"mistral","input":{"model":"mistral","max_length":500,"prompt":" I have turned on the lights in your requested area. You should now see the space brightly illuminated. If you have any specific lights or zones in mind, please let me know and I will adjust accordingly. Your comfort is my top priority."},"renderId":"bad4300e-42a3-4427-8eb3-f3bfbfed68ec","element":"<OllamaChatModel>","msg":"Calling Ollama mistral"} | |
{"level":20,"time":1705367766666,"pid":60820,"hostname":"xxx","name":"ai-jsx","result":" Thank you for turning on the lights in the area. I appreciate your consideration for my comfort. At the moment, I don't have any specific requests for certain lights or zones. However, if there are any areas that could use additional lighting or if you notice any dim spots, please let me know and I will make sure they are adequately illuminated. Enjoy your time in the brightly lit space!","renderId":"bad4300e-42a3-4427-8eb3-f3bfbfed68ec","element":"<OllamaChatModel>","msg":"Ollama mistral output"} | |
{"level":30,"time":1705367766667,"pid":60820,"hostname":"xxx","name":"ai-jsx","completion":{"messages":[{"type":"assistant","props":{},"text":" Thank you for turning on the lights in the area. I appreciate your consideration for my comfort. At the moment, I don't have any specific requests for certain lights or zones. However, if there are any areas that could use additional lighting or if you notice any dim spots, please let me know and I will make sure they are adequately illuminated. Enjoy your time in the brightly lit space!"}]},"renderId":"bad4300e-42a3-4427-8eb3-f3bfbfed68ec","element":"<OllamaChatModel>","msg":"Got completion conversation"} | |
Thank you for turning on the lights in the area. I appreciate your consideration for my comfort. At the moment, I don't have any specific requests for certain lights or zones. However, if there are any areas that could use additional lighting or if you notice any dim spots, please let me know and I will make sure they are adequately illuminated. Enjoy your time in the brightly lit space! | |
microchipgnu@xxx ai-jsx-template % |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { | |
ChatProvider, | |
CompletionProvider, | |
type ModelPropsWithChildren, | |
} from "ai-jsx/core/completion"; | |
import * as AI from "ai-jsx"; | |
import { | |
AssistantMessage, | |
renderToConversation, | |
} from "ai-jsx/core/conversation"; | |
import { | |
Ollama as LangchainOllama, | |
type OllamaInput, | |
} from "@langchain/community/llms/ollama"; | |
interface OllamaModelArgs | |
extends Omit<OllamaModelProps, "repetitionPenalty" | "children">, | |
OllamaInput { | |
model: string; | |
prompt: string; | |
/** Penalty for repeated words in the output. Must be in the range [0.01, 5]. */ | |
repetition_penalty?: OllamaModelProps["repetitionPenalty"]; | |
top_p?: OllamaModelProps["topP"]; | |
/** The maximum number of tokens to generate */ | |
max_length?: OllamaModelProps["maxTokens"]; | |
} | |
export interface OllamaModelProps extends ModelPropsWithChildren { | |
/** Penalty for repeated words in the output. Must be in the range [0.01, 5]. */ | |
repetitionPenalty?: number; | |
model: string; | |
} | |
interface OllamaChatModelArgs extends OllamaModelArgs { | |
system_prompt?: string; | |
} | |
export const defaultMaxTokens = 500; | |
async function fetchOllama<ModelArgs extends OllamaModelArgs>( | |
model: string, | |
input: ModelArgs, | |
logger: AI.ComponentContext["logger"] | |
) { | |
const ollama = new LangchainOllama({ | |
model: model, | |
}); | |
logger.debug({ model, input }, `Calling Ollama ${model}`); | |
const output = await ollama.generate([input.prompt]); | |
const result = output.generations.map(genArray => | |
genArray.map(gen => gen.text).join('') | |
).join("") | |
logger.debug({ result }, `Ollama ${model} output`); | |
return result; | |
} | |
export async function* OllamaChatModel( | |
props: OllamaModelProps, | |
{ render, logger }: AI.ComponentContext | |
): AI.RenderableStream { | |
yield AI.AppendOnlyStream; | |
const messageElements = await renderToConversation( | |
props.children, | |
render, | |
logger, | |
"prompt" | |
); | |
const systemMessages = messageElements.filter( | |
(element) => element.type === "system" | |
); | |
const userMessages = messageElements.filter( | |
(element) => element.type === "user" | |
); | |
// TODO: Add AIJSXErrors | |
yield AI.AppendOnlyStream; | |
const ollamaArgs: OllamaChatModelArgs = { | |
model: props.model, | |
max_length: props.maxTokens ?? defaultMaxTokens, | |
prompt: await render(props.children), | |
repetition_penalty: props.repetitionPenalty, | |
top_p: props.topP, | |
temperature: props.temperature, | |
system_prompt: systemMessages.length | |
? await render(systemMessages[0].element) | |
: undefined, | |
}; | |
const assistantMessage = ( | |
<AssistantMessage> | |
{await fetchOllama(props.model, ollamaArgs, logger)} | |
</AssistantMessage> | |
); | |
yield assistantMessage; | |
await renderToConversation(assistantMessage, render, logger, "completion"); | |
return AI.AppendOnlyStream; | |
} | |
export async function* OllamaCompletionModel( | |
props: OllamaModelProps, | |
{ render, logger }: AI.ComponentContext | |
): AI.RenderableStream { | |
yield AI.AppendOnlyStream; | |
const ollamaArgs: OllamaModelArgs = { | |
model: props.model, | |
prompt: await render(props.children), | |
max_length: props.maxTokens ?? defaultMaxTokens, | |
temperature: props.temperature, | |
top_p: props.topP, | |
}; | |
logger.debug({ ollamaArgs }, "Calling Ollama"); | |
const response = await fetchOllama(props.model, ollamaArgs, logger); | |
yield response; | |
return AI.AppendOnlyStream; | |
} | |
export function Ollama({ children, ...defaults }: OllamaModelProps): AI.Node { | |
return ( | |
<ChatProvider component={OllamaChatModel} {...defaults}> | |
<CompletionProvider component={OllamaCompletionModel} {...defaults}> | |
{children} | |
</CompletionProvider> | |
</ChatProvider> | |
); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment