human (Aug 27, 2024, 02:43 PM)
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GoogleGenerativeAI } from "@google/generative-ai";
import fs from "fs";
import { dirname } from "path";
import { fileURLToPath } from "url";
const __dirname = dirname(fileURLToPath(import.meta.url));
const mediaPath = __dirname + "/media";
async function chat() {
// [START chat]
// Make sure to include these imports:
// import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
let result = await chat.sendMessage("I have 2 dogs in my house.");
console.log(result.response.text());
result = await chat.sendMessage("How many paws are in my house?");
console.log(result.response.text());
// [END chat]
}
async function chatStreaming() {
// [START chat_streaming]
// Make sure to include these imports:
// import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
let result = await chat.sendMessageStream("I have 2 dogs in my house.");
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
result = await chat.sendMessageStream("How many paws are in my house?");
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
// [END chat_streaming]
}
async function chatStreamingWithImages() {
// [START chat_streaming_with_images]
// Make sure to include these imports:
// import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat();
let result = await chat.sendMessageStream("Hello, I'm designing inventions. Can I show you one?");
process.stdout.write('\n\nmodel:\n');
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
result = await chat.sendMessageStream(["What do you think about this design?", {
inlineData: {
data: Buffer.from(fs.readFileSync(`${mediaPath}/jetpack.jpg`)).toString("base64"),
mimeType: "image/jpeg",
},
}]);
process.stdout.write('\n\nmodel:\n');
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
// [END chat_streaming_with_images]
}
async function runAll() {
// Comment out or delete any sample cases you don't want to run.
await chat();
await chatStreaming();
await chatStreamingWithImages();
}
runAll();
This code uses Node.js and google/generative-ai - but that library can also be loaded in the browser like this:
import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai";
import { marked } from "https://esm.run/marked";
function getApiKey() {
let apiKey = localStorage.getItem("GEMINI_API_KEY");
if (!apiKey) {
apiKey = prompt("Please enter your Gemini API key:");
if (apiKey) {
localStorage.setItem("GEMINI_API_KEY", apiKey);
}
}
return apiKey;
}
async function getGenerativeModel(params) {
const API_KEY = getApiKey();
const genAI = new GoogleGenerativeAI(API_KEY);
return genAI.getGenerativeModel(params);
}
Build me a JavaScript app (no react) that I can use to chat with the Gemini model, using the above strategy for API key usage
assistant (Aug 27, 2024, 02:43 PM)
Certainly! I'll create a simple JavaScript app that allows you to chat with the Gemini model using the strategy you provided for API key usage. This app will use vanilla JavaScript and HTML, without any frameworks like React.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Gemini Chat App</title>
<script type="module">
import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai";
import { marked } from "https://esm.run/marked";
function getApiKey() {
let apiKey = localStorage.getItem("GEMINI_API_KEY");
if (!apiKey) {
apiKey = prompt("Please enter your Gemini API key:");
if (apiKey) {
localStorage.setItem("GEMINI_API_KEY", apiKey);
}
}
return apiKey;
}
async function getGenerativeModel(params) {
const API_KEY = getApiKey();
const genAI = new GoogleGenerativeAI(API_KEY);
return genAI.getGenerativeModel(params);
}
let chat;
async function initChat() {
const model = await getGenerativeModel({ model: "gemini-1.5-pro" });
chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Hello! How can I assist you today?" }],
},
],
});
displayMessage("Model", "Hello! How can I assist you today?");
}
async function sendMessage() {
const userInput = document.getElementById("user-input");
const message = userInput.value.trim();
if (message) {
displayMessage("You", message);
userInput.value = "";
try {
const result = await chat.sendMessageStream(message);
let fullResponse = "";
for await (const chunk of result.stream) {
const chunkText = chunk.text();
fullResponse += chunkText;
updateModelResponse(fullResponse);
}
} catch (error) {
console.error("Error:", error);
displayMessage("Error", "An error occurred while processing your request.");
}
}
}
function displayMessage(sender, message) {
const chatMessages = document.getElementById("chat-messages");
const messageElement = document.createElement("div");
messageElement.innerHTML = `<strong>${sender}:</strong> ${marked.parse(message)}`;
chatMessages.appendChild(messageElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function updateModelResponse(response) {
const chatMessages = document.getElementById("chat-messages");
let modelResponse = chatMessages.lastElementChild;
if (!modelResponse || !modelResponse.querySelector("strong")?.textContent.includes("Model")) {
modelResponse = document.createElement("div");
modelResponse.innerHTML = "<strong>Model:</strong> ";
chatMessages.appendChild(modelResponse);
}
modelResponse.innerHTML = `<strong>Model:</strong> ${marked.parse(response)}`;
chatMessages.scrollTop = chatMessages.scrollHeight;
}
window.onload = () => {
initChat();
document.getElementById("send-button").addEventListener("click", sendMessage);
document.getElementById("user-input").addEventListener("keypress", (e) => {
if (e.key === "Enter") sendMessage();
});
};
</script>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
#chat-messages {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
margin-bottom: 10px;
}
#user-input {
width: calc(100% - 70px);
padding: 5px;
}
#send-button {
width: 60px;
padding: 5px;
}
</style>
</head>
<body>
<h1>Gemini Chat App</h1>
<div id="chat-messages"></div>
<input type="text" id="user-input" placeholder="Type your message...">
<button id="send-button">Send</button>
</body>
</html>
This HTML file contains a complete Gemini chat application. Here's a breakdown of its key components:
- It imports the necessary libraries (GoogleGenerativeAI and marked) from CDNs.
- It implements the API key strategy you provided, storing the key in localStorage.
- The chat interface includes a message display area, an input field, and a send button.
- Messages are sent to the Gemini model using the chat functionality.
- The model's responses are streamed and updated in real-time.
- Markdown in the responses is rendered using the marked library.
To use this application:
- Save the content as an HTML file (e.g.,
gemini-chat-app.html
). - Open the file in a web browser.
- When prompted, enter your Gemini API key.
- Start chatting with the Gemini model!
The app will remember your API key for future sessions. If you need to change the key, you can clear your browser's localStorage for the page.
This implementation uses the gemini-1.5-pro
model. You can change this to gemini-1.5-flash
or another available model by modifying the model
parameter in the getGenerativeModel
function call.
Would you like me to explain any part of the code in more detail?
human (Aug 27, 2024, 02:47 PM)
that's great, modify it to catch any exceptions and show them as red text
assistant (Aug 27, 2024, 02:47 PM)
Certainly! I'll modify the code to catch exceptions and display them as red text in the chat interface. This will help users understand when there's an error, such as an invalid API key or network issues.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Gemini Chat App</title>
<script type="module">
import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai";
import { marked } from "https://esm.run/marked";
function getApiKey() {
let apiKey = localStorage.getItem("GEMINI_API_KEY");
if (!apiKey) {
apiKey = prompt("Please enter your Gemini API key:");
if (apiKey) {
localStorage.setItem("GEMINI_API_KEY", apiKey);
}
}
return apiKey;
}
async function getGenerativeModel(params) {
const API_KEY = getApiKey();
const genAI = new GoogleGenerativeAI(API_KEY);
return genAI.getGenerativeModel(params);
}
let chat;
async function initChat() {
try {
const model = await getGenerativeModel({ model: "gemini-1.5-pro" });
chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Hello! How can I assist you today?" }],
},
],
});
displayMessage("Model", "Hello! How can I assist you today?");
} catch (error) {
displayError("Failed to initialize chat: " + error.message);
}
}
async function sendMessage() {
const userInput = document.getElementById("user-input");
const message = userInput.value.trim();
if (message) {
displayMessage("You", message);
userInput.value = "";
try {
const result = await chat.sendMessageStream(message);
let fullResponse = "";
for await (const chunk of result.stream) {
const chunkText = chunk.text();
fullResponse += chunkText;
updateModelResponse(fullResponse);
}
} catch (error) {
displayError("Error: " + error.message);
}
}
}
function displayMessage(sender, message) {
const chatMessages = document.getElementById("chat-messages");
const messageElement = document.createElement("div");
messageElement.innerHTML = `<strong>${sender}:</strong> ${marked.parse(message)}`;
chatMessages.appendChild(messageElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function displayError(message) {
const chatMessages = document.getElementById("chat-messages");
const errorElement = document.createElement("div");
errorElement.innerHTML = `<strong style="color: red;">Error:</strong> <span style="color: red;">${message}</span>`;
chatMessages.appendChild(errorElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function updateModelResponse(response) {
const chatMessages = document.getElementById("chat-messages");
let modelResponse = chatMessages.lastElementChild;
if (!modelResponse || !modelResponse.querySelector("strong")?.textContent.includes("Model")) {
modelResponse = document.createElement("div");
modelResponse.innerHTML = "<strong>Model:</strong> ";
chatMessages.appendChild(modelResponse);
}
modelResponse.innerHTML = `<strong>Model:</strong> ${marked.parse(response)}`;
chatMessages.scrollTop = chatMessages.scrollHeight;
}
window.onload = () => {
initChat();
document.getElementById("send-button").addEventListener("click", sendMessage);
document.getElementById("user-input").addEventListener("keypress", (e) => {
if (e.key === "Enter") sendMessage();
});
};
</script>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
#chat-messages {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
margin-bottom: 10px;
}
#user-input {
width: calc(100% - 70px);
padding: 5px;
}
#send-button {
width: 60px;
padding: 5px;
}
</style>
</head>
<body>
<h1>Gemini Chat App</h1>
<div id="chat-messages"></div>
<input type="text" id="user-input" placeholder="Type your message...">
<button id="send-button">Send</button>
</body>
</html>
I've made the following changes to improve error handling and display:
-
Added a new
displayError
function:function displayError(message) { const chatMessages = document.getElementById("chat-messages"); const errorElement = document.createElement("div"); errorElement.innerHTML = `<strong style="color: red;">Error:</strong> <span style="color: red;">${message}</span>`; chatMessages.appendChild(errorElement); chatMessages.scrollTop = chatMessages.scrollHeight; }
This function creates a new element in the chat display with red text for errors.
-
Modified the
initChat
function to catch and display errors:async function initChat() { try { // ... existing initialization code ... } catch (error) { displayError("Failed to initialize chat: " + error.message); } }
-
Updated the
sendMessage
function to use the new error display:async function sendMessage() { // ... existing code ... try { // ... existing message sending code ... } catch (error) { displayError("Error: " + error.message); } }
These changes will catch and display any errors that occur during chat initialization or message sending. The errors will appear in red text within the chat interface, making them easily noticeable to the user.
Common errors that might be caught and displayed include:
- Invalid API key
- Network connectivity issues
- Rate limiting or quota exceeded errors
- Any unexpected errors from the Gemini API
This improved error handling will help users understand when something goes wrong and potentially how to fix it (e.g., by providing a new API key if the current one is invalid).
Is there anything else you'd like me to modify or explain about this chat application?
human (Aug 27, 2024, 02:48 PM)
add a model selection UI with these values gemini-1.5-pro (default) gemini-1.5-pro-exp-0827 gemini-1.5-flash-exp-0827 gemini-1.5-flash-8b-exp-0827
assistant (Aug 27, 2024, 02:48 PM)
Certainly! I'll add a model selection UI with the options you've provided. This will allow users to choose different Gemini models for their chat experience.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Gemini Chat App</title>
<script type="module">
import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai";
import { marked } from "https://esm.run/marked";
let chat;
let currentModel = "gemini-1.5-pro";
function getApiKey() {
let apiKey = localStorage.getItem("GEMINI_API_KEY");
if (!apiKey) {
apiKey = prompt("Please enter your Gemini API key:");
if (apiKey) {
localStorage.setItem("GEMINI_API_KEY", apiKey);
}
}
return apiKey;
}
async function getGenerativeModel(params) {
const API_KEY = getApiKey();
const genAI = new GoogleGenerativeAI(API_KEY);
return genAI.getGenerativeModel(params);
}
async function initChat() {
try {
const model = await getGenerativeModel({ model: currentModel });
chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Hello! How can I assist you today?" }],
},
],
});
displayMessage("Model", `Chat initialized with ${currentModel}. How can I assist you today?`);
} catch (error) {
displayError("Failed to initialize chat: " + error.message);
}
}
async function sendMessage() {
const userInput = document.getElementById("user-input");
const message = userInput.value.trim();
if (message) {
displayMessage("You", message);
userInput.value = "";
try {
const result = await chat.sendMessageStream(message);
let fullResponse = "";
for await (const chunk of result.stream) {
const chunkText = chunk.text();
fullResponse += chunkText;
updateModelResponse(fullResponse);
}
} catch (error) {
displayError("Error: " + error.message);
}
}
}
function displayMessage(sender, message) {
const chatMessages = document.getElementById("chat-messages");
const messageElement = document.createElement("div");
messageElement.innerHTML = `<strong>${sender}:</strong> ${marked.parse(message)}`;
chatMessages.appendChild(messageElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function displayError(message) {
const chatMessages = document.getElementById("chat-messages");
const errorElement = document.createElement("div");
errorElement.innerHTML = `<strong style="color: red;">Error:</strong> <span style="color: red;">${message}</span>`;
chatMessages.appendChild(errorElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function updateModelResponse(response) {
const chatMessages = document.getElementById("chat-messages");
let modelResponse = chatMessages.lastElementChild;
if (!modelResponse || !modelResponse.querySelector("strong")?.textContent.includes("Model")) {
modelResponse = document.createElement("div");
modelResponse.innerHTML = "<strong>Model:</strong> ";
chatMessages.appendChild(modelResponse);
}
modelResponse.innerHTML = `<strong>Model:</strong> ${marked.parse(response)}`;
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function changeModel() {
const modelSelect = document.getElementById("model-select");
currentModel = modelSelect.value;
displayMessage("System", `Changing model to ${currentModel}. Reinitializing chat...`);
initChat();
}
window.onload = () => {
initChat();
document.getElementById("send-button").addEventListener("click", sendMessage);
document.getElementById("user-input").addEventListener("keypress", (e) => {
if (e.key === "Enter") sendMessage();
});
document.getElementById("model-select").addEventListener("change", changeModel);
};
</script>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
#chat-messages {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
margin-bottom: 10px;
}
#user-input {
width: calc(100% - 70px);
padding: 5px;
}
#send-button {
width: 60px;
padding: 5px;
}
#model-select {
margin-bottom: 10px;
}
</style>
</head>
<body>
<h1>Gemini Chat App</h1>
<select id="model-select">
<option value="gemini-1.5-pro">gemini-1.5-pro (default)</option>
<option value="gemini-1.5-pro-exp-0827">gemini-1.5-pro-exp-0827</option>
<option value="gemini-1.5-flash-exp-0827">gemini-1.5-flash-exp-0827</option>
<option value="gemini-1.5-flash-8b-exp-0827">gemini-1.5-flash-8b-exp-0827</option>
</select>
<div id="chat-messages"></div>
<input type="text" id="user-input" placeholder="Type your message...">
<button id="send-button">Send</button>
</body>
</html>
I've made the following changes to add the model selection feature:
-
Added a
<select>
element in the HTML body with the model options you provided:<select id="model-select"> <option value="gemini-1.5-pro">gemini-1.5-pro (default)</option> <option value="gemini-1.5-pro-exp-0827">gemini-1.5-pro-exp-0827</option> <option value="gemini-1.5-flash-exp-0827">gemini-1.5-flash-exp-0827</option> <option value="gemini-1.5-flash-8b-exp-0827">gemini-1.5-flash-8b-exp-0827</option> </select>
-
Added a
currentModel
variable to keep track of the selected model:let currentModel = "gemini-1.5-pro";
-
Modified the
initChat
function to use thecurrentModel
:async function initChat() { try { const model = await getGenerativeModel({ model: currentModel }); // ... rest of the function ... displayMessage("Model", `Chat initialized with ${currentModel}. How can I assist you today?`); } catch (error) { displayError("Failed to initialize chat: " + error.message); } }
-
Added a
changeModel
function to handle model changes:function changeModel() { const modelSelect = document.getElementById("model-select"); currentModel = modelSelect.value; displayMessage("System", `Changing model to ${currentModel}. Reinitializing chat...`); initChat(); }
-
Added an event listener for the model selection change:
window.onload = () => { // ... existing event listeners ... document.getElementById("model-select").addEventListener("change", changeModel); };
-
Added some CSS to style the model select dropdown:
#model-select { margin-bottom: 10px; }
These changes allow users to select different Gemini models from the dropdown menu. When a new model is selected, the chat is reinitialized with the chosen model.
This enhancement provides more flexibility for users to experiment with different Gemini models and compare their performance or capabilities.
Is there anything else you'd like me to add or modify in this chat application?
human (Aug 27, 2024, 02:50 PM)
do history properly - so it starts out empty and then accumulates messages and responses over time
When a message has finished looping in sendMessage dump out result
usitg console.log
assistant (Aug 27, 2024, 02:50 PM)
Certainly! I'll modify the code to handle the chat history properly, starting with an empty history and accumulating messages and responses over time. I'll also add a console.log to dump the result
after a message has finished processing in the sendMessage
function.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Gemini Chat App</title>
<script type="module">
import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai";
import { marked } from "https://esm.run/marked";
let chat;
let currentModel = "gemini-1.5-pro";
let chatHistory = [];
function getApiKey() {
let apiKey = localStorage.getItem("GEMINI_API_KEY");
if (!apiKey) {
apiKey = prompt("Please enter your Gemini API key:");
if (apiKey) {
localStorage.setItem("GEMINI_API_KEY", apiKey);
}
}
return apiKey;
}
async function getGenerativeModel(params) {
const API_KEY = getApiKey();
const genAI = new GoogleGenerativeAI(API_KEY);
return genAI.getGenerativeModel(params);
}
async function initChat() {
try {
const model = await getGenerativeModel({ model: currentModel });
chat = model.startChat({
history: chatHistory,
});
displayMessage("System", `Chat initialized with ${currentModel}. How can I assist you today?`);
} catch (error) {
displayError("Failed to initialize chat: " + error.message);
}
}
async function sendMessage() {
const userInput = document.getElementById("user-input");
const message = userInput.value.trim();
if (message) {
displayMessage("You", message);
userInput.value = "";
try {
chatHistory.push({ role: "user", parts: [{ text: message }] });
const result = await chat.sendMessageStream(message);
let fullResponse = "";
for await (const chunk of result.stream) {
const chunkText = chunk.text();
fullResponse += chunkText;
updateModelResponse(fullResponse);
}
chatHistory.push({ role: "model", parts: [{ text: fullResponse }] });
console.log("Full result:", result);
} catch (error) {
displayError("Error: " + error.message);
}
}
}
function displayMessage(sender, message) {
const chatMessages = document.getElementById("chat-messages");
const messageElement = document.createElement("div");
messageElement.innerHTML = `<strong>${sender}:</strong> ${marked.parse(message)}`;
chatMessages.appendChild(messageElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function displayError(message) {
const chatMessages = document.getElementById("chat-messages");
const errorElement = document.createElement("div");
errorElement.innerHTML = `<strong style="color: red;">Error:</strong> <span style="color: red;">${message}</span>`;
chatMessages.appendChild(errorElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function updateModelResponse(response) {
const chatMessages = document.getElementById("chat-messages");
let modelResponse = chatMessages.lastElementChild;
if (!modelResponse || !modelResponse.querySelector("strong")?.textContent.includes("Model")) {
modelResponse = document.createElement("div");
modelResponse.innerHTML = "<strong>Model:</strong> ";
chatMessages.appendChild(modelResponse);
}
modelResponse.innerHTML = `<strong>Model:</strong> ${marked.parse(response)}`;
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function changeModel() {
const modelSelect = document.getElementById("model-select");
currentModel = modelSelect.value;
displayMessage("System", `Changing model to ${currentModel}. Reinitializing chat...`);
chatHistory = []; // Clear history when changing models
initChat();
}
window.onload = () => {
initChat();
document.getElementById("send-button").addEventListener("click", sendMessage);
document.getElementById("user-input").addEventListener("keypress", (e) => {
if (e.key === "Enter") sendMessage();
});
document.getElementById("model-select").addEventListener("change", changeModel);
};
</script>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
#chat-messages {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
margin-bottom: 10px;
}
#user-input {
width: calc(100% - 70px);
padding: 5px;
}
#send-button {
width: 60px;
padding: 5px;
}
#model-select {
margin-bottom: 10px;
}
</style>
</head>
<body>
<h1>Gemini Chat App</h1>
<select id="model-select">
<option value="gemini-1.5-pro">gemini-1.5-pro (default)</option>
<option value="gemini-1.5-pro-exp-0827">gemini-1.5-pro-exp-0827</option>
<option value="gemini-1.5-flash-exp-0827">gemini-1.5-flash-exp-0827</option>
<option value="gemini-1.5-flash-8b-exp-0827">gemini-1.5-flash-8b-exp-0827</option>
</select>
<div id="chat-messages"></div>
<input type="text" id="user-input" placeholder="Type your message...">
<button id="send-button">Send</button>
</body>
</html>
I've made the following changes to improve history handling and add logging:
-
Added a
chatHistory
array to store the conversation history:let chatHistory = [];
-
Modified the
initChat
function to use thechatHistory
:async function initChat() { try { const model = await getGenerativeModel({ model: currentModel }); chat = model.startChat({ history: chatHistory, }); displayMessage("System", `Chat initialized with ${currentModel}. How can I assist you today?`); } catch (error) { displayError("Failed to initialize chat: " + error.message); } }
-
Updated the
sendMessage
function to accumulate history and log the full result:async function sendMessage() { // ... existing code ... try { chatHistory.push({ role: "user", parts: [{ text: message }] }); const result = await chat.sendMessageStream(message); let fullResponse = ""; for await (const chunk of result.stream) { const chunkText = chunk.text(); fullResponse += chunkText; updateModelResponse(fullResponse); } chatHistory.push({ role: "model", parts: [{ text: fullResponse }] }); console.log("Full result:", result); } catch (error) { displayError("Error: " + error.message); } }
-
Modified the
changeModel
function to clear the history when changing models:function changeModel() { const modelSelect = document.getElementById("model-select"); currentModel = modelSelect.value; displayMessage("System", `Changing model to ${currentModel}. Reinitializing chat...`); chatHistory = []; // Clear history when changing models initChat(); }
These changes ensure that:
- The chat starts with an empty history.
- User messages and model responses are accumulated in the
chatHistory
array. - The full conversation history is maintained and used when sending new messages.
- The history is cleared when changing models to start fresh.
- The full
result
object is logged to the console after each message is processed.
With these modifications, the chat application now properly handles the conversation history, allowing for more context-aware responses from the model. The console log will provide detailed information about each interaction, which can be useful for debugging or analysis.
Is there anything else you'd like me to add or modify in this chat application?
human (Aug 27, 2024, 02:53 PM)
When a response comes back log result.usageMetadata to a <pre>
below the chat
Add a "clear" button that resets everything
Start a timer when a prompt is sent to the API and end it when the API response has finished, then show a message below the chat with the duration in seconds to two decimal places of the last prompt
assistant (Aug 27, 2024, 02:53 PM)
Certainly! I'll add these features to the chat application. We'll log the usage metadata, add a clear button, and display the duration of each API call.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Gemini Chat App</title>
<script type="module">
import { GoogleGenerativeAI } from "https://esm.run/@google/generative-ai";
import { marked } from "https://esm.run/marked";
let chat;
let currentModel = "gemini-1.5-pro";
let chatHistory = [];
function getApiKey() {
let apiKey = localStorage.getItem("GEMINI_API_KEY");
if (!apiKey) {
apiKey = prompt("Please enter your Gemini API key:");
if (apiKey) {
localStorage.setItem("GEMINI_API_KEY", apiKey);
}
}
return apiKey;
}
async function getGenerativeModel(params) {
const API_KEY = getApiKey();
const genAI = new GoogleGenerativeAI(API_KEY);
return genAI.getGenerativeModel(params);
}
async function initChat() {
try {
const model = await getGenerativeModel({ model: currentModel });
chat = model.startChat({
history: chatHistory,
});
displayMessage("System", `Chat initialized with ${currentModel}. How can I assist you today?`);
} catch (error) {
displayError("Failed to initialize chat: " + error.message);
}
}
async function sendMessage() {
const userInput = document.getElementById("user-input");
const message = userInput.value.trim();
if (message) {
displayMessage("You", message);
userInput.value = "";
try {
chatHistory.push({ role: "user", parts: [{ text: message }] });
const startTime = performance.now();
const result = await chat.sendMessageStream(message);
let fullResponse = "";
for await (const chunk of result.stream) {
const chunkText = chunk.text();
fullResponse += chunkText;
updateModelResponse(fullResponse);
}
const endTime = performance.now();
const duration = ((endTime - startTime) / 1000).toFixed(2);
chatHistory.push({ role: "model", parts: [{ text: fullResponse }] });
console.log("Full result:", result);
updateUsageMetadata(result.usageMetadata);
updateDuration(duration);
} catch (error) {
displayError("Error: " + error.message);
}
}
}
function displayMessage(sender, message) {
const chatMessages = document.getElementById("chat-messages");
const messageElement = document.createElement("div");
messageElement.innerHTML = `<strong>${sender}:</strong> ${marked.parse(message)}`;
chatMessages.appendChild(messageElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function displayError(message) {
const chatMessages = document.getElementById("chat-messages");
const errorElement = document.createElement("div");
errorElement.innerHTML = `<strong style="color: red;">Error:</strong> <span style="color: red;">${message}</span>`;
chatMessages.appendChild(errorElement);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function updateModelResponse(response) {
const chatMessages = document.getElementById("chat-messages");
let modelResponse = chatMessages.lastElementChild;
if (!modelResponse || !modelResponse.querySelector("strong")?.textContent.includes("Model")) {
modelResponse = document.createElement("div");
modelResponse.innerHTML = "<strong>Model:</strong> ";
chatMessages.appendChild(modelResponse);
}
modelResponse.innerHTML = `<strong>Model:</strong> ${marked.parse(response)}`;
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function updateUsageMetadata(metadata) {
const usageMetadataElement = document.getElementById("usage-metadata");
usageMetadataElement.textContent = JSON.stringify(metadata, null, 2);
}
function updateDuration(duration) {
const durationElement = document.getElementById("api-duration");
durationElement.textContent = `Last API call duration: ${duration} seconds`;
}
function changeModel() {
const modelSelect = document.getElementById("model-select");
currentModel = modelSelect.value;
displayMessage("System", `Changing model to ${currentModel}. Reinitializing chat...`);
chatHistory = []; // Clear history when changing models
initChat();
}
function clearChat() {
chatHistory = [];
document.getElementById("chat-messages").innerHTML = "";
document.getElementById("usage-metadata").textContent = "";
document.getElementById("api-duration").textContent = "";
initChat();
}
window.onload = () => {
initChat();
document.getElementById("send-button").addEventListener("click", sendMessage);
document.getElementById("user-input").addEventListener("keypress", (e) => {
if (e.key === "Enter") sendMessage();
});
document.getElementById("model-select").addEventListener("change", changeModel);
document.getElementById("clear-button").addEventListener("click", clearChat);
};
</script>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
#chat-messages {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
margin-bottom: 10px;
}
#user-input {
width: calc(100% - 140px);
padding: 5px;
}
#send-button, #clear-button {
width: 60px;
padding: 5px;
}
#model-select {
margin-bottom: 10px;
}
#usage-metadata {
white-space: pre-wrap;
font-family: monospace;
background-color: #f0f0f0;
padding: 10px;
margin-top: 10px;
}
#api-duration {
margin-top: 10px;
font-weight: bold;
}
</style>
</head>
<body>
<h1>Gemini Chat App</h1>
<select id="model-select">
<option value="gemini-1.5-pro">gemini-1.5-pro (default)</option>
<option value="gemini-1.5-pro-exp-0827">gemini-1.5-pro-exp-0827</option>
<option value="gemini-1.5-flash-exp-0827">gemini-1.5-flash-exp-0827</option>
<option value="gemini-1.5-flash-8b-exp-0827">gemini-1.5-flash-8b-exp-0827</option>
</select>
<div id="chat-messages"></div>
<input type="text" id="user-input" placeholder="Type your message...">
<button id="send-button">Send</button>
<button id="clear-button">Clear</button>
<div id="api-duration"></div>
<pre id="usage-metadata"></pre>
</body>
</html>
I've made the following changes to add the requested features:
-
Added usage metadata logging:
- Created a new
<pre>
element with id "usage-metadata" to display the metadata. - Added a
updateUsageMetadata
function to update the metadata display:function updateUsageMetadata(metadata) { const usageMetadataElement = document.getElementById("usage-metadata"); usageMetadataElement.textContent = JSON.stringify(metadata, null, 2); }
- Called
updateUsageMetadata(result.usageMetadata)
in thesendMessage
function after receiving the response.
- Created a new
-
Added a clear button:
- Added a new button in the HTML:
<button id="clear-button">Clear</button>
- Implemented a
clearChat
function:function clearChat() { chatHistory = []; document.getElementById("chat-messages").innerHTML = ""; document.getElementById("usage-metadata").textContent = ""; document.getElementById("api-duration").textContent = ""; initChat(); }
- Added an event listener for the clear button in the
window.onload
function.
- Added a new button in the HTML:
-
Added API call duration tracking:
- Created a new
<div>
element with id "api-duration" to display the duration. - Added a
updateDuration
function to update the duration display:function updateDuration(duration) { const durationElement = document.getElementById("api-duration"); durationElement.textContent = `Last API call duration: ${duration} seconds`; }
- Modified the
sendMessage
function to measure and display the duration:const startTime = performance.now(); // ... existing code ... const endTime = performance.now(); const duration = ((endTime - startTime) / 1000).toFixed(2); updateDuration(duration);
- Created a new
-
Updated the CSS to style the new elements and adjust the layout.
These changes add the following functionality to the chat application:
- Usage metadata is displayed in a formatted
<pre>
element below the chat. - A "Clear" button resets the chat history, messages display, metadata, and duration information.
- The duration of each API call is measured and displayed below the chat in seconds, with two decimal places.
The application now provides more detailed information about each interaction with the Gemini API and allows users to easily reset the chat when needed.
Is there anything else you'd like me to add or modify in this chat application?