Created
February 20, 2024 09:46
-
-
Save tuhinpal/228f294fe993bd63ba5e839834a808f0 to your computer and use it in GitHub Desktop.
Groq Unofficial Wrapper
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* Usage: | |
* | |
* const groq = new Groq({ initialMessages: [], systemPrompt: "Always give helpful and polite responses." }); | |
* const response = await groq.sendMessage("Hello"); // response.message | |
*/ | |
import axios from "axios"; | |
const groqApi = axios.create({ | |
baseURL: `https://api.groq.com/v1`, | |
headers: { | |
"Content-Type": "application/json", | |
"User-Agent": "Groq/1.0", | |
origin: "https://groq.com", | |
referer: "https://groq.com", | |
}, | |
}); | |
type ChatResponse = { | |
requestId: string; | |
performance: { | |
timeGenerated: number; | |
tokensGenerated: number; | |
timeProcessed: number; | |
tokensProcessed: number; | |
}; | |
message: string; | |
}; | |
export default class Groq { | |
private _token: string = ""; | |
private _systemPrompt: string = ""; | |
private _messages: { | |
user: string; | |
bot: string; | |
}[] = []; | |
private _configuration: { | |
max_input_tokens: number; | |
max_tokens: number; | |
model_id: "mixtral-8x7b-32768"; | |
seed: number; // random seed | |
temperature: number; // 0.8 is a good value, larger is more random | |
top_k: number; // top k sampling, 40 is a good value | |
top_p: number; // nucleus sampling, 0.8 is a good value | |
} = { | |
max_input_tokens: 21845, | |
max_tokens: 32768, | |
model_id: "mixtral-8x7b-32768", | |
seed: 10, | |
temperature: 0.5, | |
top_k: 40, | |
top_p: 0.8, | |
}; | |
constructor({ | |
systemPrompt, | |
initialMessages, | |
configuration, | |
}: { | |
systemPrompt: string; | |
initialMessages: typeof Groq.prototype._messages; | |
configuration?: typeof Groq.prototype._configuration; | |
}) { | |
this._systemPrompt = systemPrompt; | |
this._messages = initialMessages; | |
if (configuration) { | |
this._configuration = configuration; | |
} | |
} | |
private async generateToken() { | |
try { | |
console.log("Generating token"); | |
const tokenResponse = await groqApi.get("/auth/anon_token"); | |
this._token = tokenResponse.data.access_token; | |
} catch (error) { | |
throw new Error("Error generating token"); | |
} | |
} | |
private jsonLParser(data: string): any[] { | |
const jsonLUnprsed = data.split("\n"); | |
const jsonL = jsonLUnprsed | |
.filter(Boolean) | |
.map((d: string) => { | |
try { | |
return JSON.parse(d).result; | |
} catch (_) {} | |
}) | |
.filter(Boolean); | |
return jsonL; | |
} | |
private async sendMessageWrapper(message: string): Promise<{ | |
code: "success" | "token_expired" | "error"; | |
message: string; | |
data?: ChatResponse; | |
}> { | |
try { | |
if (!this._token) { | |
await this.generateToken(); | |
} | |
const lastMessages = this._messages.slice(-10); // It's better not to send all messages as context | |
const response = await groqApi.post( | |
"/request_manager/text_completion", | |
{ | |
...this._configuration, | |
system_prompt: this._systemPrompt, | |
user_prompt: message, | |
history: lastMessages.map((message) => ({ | |
user_prompt: message.user, | |
assistant_response: message.bot, | |
})), | |
}, | |
{ | |
headers: { | |
Authorization: `Bearer ${this._token}`, | |
}, | |
} | |
); | |
const jsonL = this.jsonLParser(response.data); | |
const requestId = jsonL.shift(); | |
const performance = jsonL.pop(); | |
const botMessage = jsonL.map((d: any) => d.content).join(""); | |
this._messages.push({ | |
user: message, | |
bot: botMessage, | |
}); | |
return { | |
code: "success", | |
message: `Successfull generated message`, | |
data: { | |
requestId: requestId.requestId, | |
performance: performance.stats, | |
message: botMessage, | |
}, | |
}; | |
} catch (error) { | |
if ( | |
axios.isAxiosError(error) && | |
error.response?.status === 400 && | |
error.response.data && | |
error.response.data.error?.code === 3 | |
) { | |
this._token = ""; | |
return { | |
code: "token_expired", | |
message: "Token expired", | |
}; | |
} | |
return { | |
code: "error", | |
message: (error as Error).message, | |
}; | |
} | |
} | |
public async sendMessage(message: string): Promise<ChatResponse> { | |
let finalResponse: ChatResponse | null = null; | |
// Try 3 times to generate message | |
for (let i = 0; i < 3; i++) { | |
const response = await this.sendMessageWrapper(message); | |
if (response.code === "success") { | |
finalResponse = response.data as ChatResponse; | |
break; | |
} | |
if (response.code === "token_expired") { | |
continue; | |
} | |
if (response.code === "error") { | |
throw new Error(response.message); | |
} | |
} | |
if (!finalResponse) { | |
throw new Error("Failed to generate message"); | |
} | |
return finalResponse; | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Only for educational purposes!
Team Groq, Respect 💪💪