Skip to content

Instantly share code, notes, and snippets.

@Slach
Created April 15, 2024 05:36
Show Gist options
  • Save Slach/b03c5f203b4c32b62ac974a6e10ff900 to your computer and use it in GitHub Desktop.
Save Slach/b03c5f203b4c32b62ac974a6e10ff900 to your computer and use it in GitHub Desktop.
ai-oracle
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/Slach/b03c5f203b4c32b62ac974a6e10ff900/ai-oracle.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"source": [
"## AI-Oracle, by [Matt Shumer](https://twitter.com/mattshumer_)"
],
"metadata": {
"id": "_E_K_PU3qsqx"
}
},
{
"cell_type": "code",
"source": [
"!pip install openai"
],
"metadata": {
"id": "AZikgJ2tdYvd"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"ANTHROPIC_API_KEY = \"ANTHROPIC_API_KEY\" # Replace with your Anthropic API key\n",
"OPENAI_API_KEY = \"OPENAI_API_KEY\" # Replace with your OpenAI API key\n",
"PPLX_API_KEY = \"PERPLEXITY_API_KEY\" # Replace with your Perplexity API key"
],
"metadata": {
"id": "x48QfV6jdLdz"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import requests\n",
"from openai import OpenAI\n",
"client = OpenAI(api_key=OPENAI_API_KEY)"
],
"metadata": {
"id": "BGjpZBCYcUlt"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"def generate_claude_step_1(prompt, model=\"claude-3-opus-20240229\", max_tokens=2000, temperature=0.1): # using 0.1 for precision during the pre-answer phase\n",
" headers = {\n",
" \"x-api-key\": ANTHROPIC_API_KEY,\n",
" \"anthropic-version\": \"2023-06-01\",\n",
" \"content-type\": \"application/json\"\n",
" }\n",
" data = {\n",
" \"model\": model,\n",
" \"max_tokens\": max_tokens,\n",
" \"temperature\": temperature,\n",
" \"system\": \"You are a world-class expert across every topic. Answer with incredibly accurate and useful responses.\",\n",
" \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
" }\n",
" response = requests.post(\"https://api.anthropic.com/v1/messages\", headers=headers, json=data)\n",
" print(response.json())\n",
" return response.json()['content'][0]['text']\n",
"\n",
"\n",
"\n",
"def generate_gpt_step_1(prompt, model=\"gpt-4\", max_tokens=2000, temperature=0.1): # using 0.1 for precision during the pre-answer phase\n",
" response = client.chat.completions.create(\n",
" model=model,\n",
" messages=[\n",
" {\n",
" \"role\": \"system\",\n",
" \"content\": \"You are a world-class expert across every topic. Answer with incredibly accurate and useful responses.\"\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": prompt,\n",
" }\n",
" ],\n",
" max_tokens=max_tokens,\n",
" temperature=temperature,\n",
" )\n",
"\n",
" return response.choices[0].message.content\n",
"\n",
"\n",
"\n",
"def generate_perplexity_step_1(prompt, model=\"pplx-70b-online\", max_tokens=2000, temperature=0.1): # using 0.1 for precision during the pre-answer phase\n",
" payload = {\n",
" \"model\": model,\n",
" \"messages\": [\n",
" {\n",
" \"role\": \"system\",\n",
" \"content\": \"Be precise.\"\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": prompt,\n",
" }\n",
" ]\n",
" }\n",
" headers = {\n",
" \"accept\": \"application/json\",\n",
" \"content-type\": \"application/json\",\n",
" \"Authorization\": f\"Bearer {PPLX_API_KEY}\"\n",
" }\n",
"\n",
" response = requests.post(\"https://api.perplexity.ai/chat/completions\", json=payload, headers=headers)\n",
" print(response.json())\n",
"\n",
" return response.json()['choices'][0]['message']['content']\n",
"\n",
"\n",
"def generate_claude_step_2(question, claude_answer, gpt_answer, pplx_answer, model=\"claude-3-opus-20240229\", max_tokens=2000, temperature=0.6): # using 0.6 for flowiness during the answer phase\n",
" prompt = f\"\"\"Here is the user's question:\n",
"<user_question>\n",
"{question}\n",
"</user_question>\n",
"\n",
"<ai_answers_to_inform_your_response>\n",
"GPT-4, which is strong at reasoning, responded with:\n",
"<gpt_4_response>\n",
"{gpt_answer}\n",
"</gpt_4_response>\n",
"\n",
"Claude 3, which is strong at reasoning and has a great personality, responded with:\n",
"<claude_3_response>\n",
"{claude_answer}\n",
"</claude_3_response>\n",
"\n",
"PPLX AI, which is weaker at reasoning but has access to up-to-date information from the internet, responded with:\n",
"<pplx_response>\n",
"{pplx_answer}\n",
"</pplx_response>\n",
"</ai_answers_to_inform_your_response>\n",
"\n",
"Again, the user's question is:\n",
"<user_question>\n",
"{question}\n",
"</user_question>\n",
"\n",
"Use all of these AI answers to inform your final answer. Now, answer the user's question perfectly.\"\"\"\n",
"\n",
" headers = {\n",
" \"x-api-key\": ANTHROPIC_API_KEY,\n",
" \"anthropic-version\": \"2023-06-01\",\n",
" \"content-type\": \"application/json\"\n",
" }\n",
" data = {\n",
" \"model\": model,\n",
" \"max_tokens\": max_tokens,\n",
" \"temperature\": temperature,\n",
" \"system\": \"The user has asked a question, and we've asked three different LLMs to response. You will take all of their outputs, and combine their strengths and mitigate their mistakes into a final, perfect answer.\",\n",
" \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
" }\n",
" response = requests.post(\"https://api.anthropic.com/v1/messages\", headers=headers, json=data)\n",
" print(response.json())\n",
" return response.json()['content'][0]['text']\n",
"\n",
"def generate_response(question):\n",
"\n",
" claude = generate_claude_step_1(question)\n",
" gpt = generate_gpt_step_1(question)\n",
" pplx = generate_perplexity_step_1(question)\n",
"\n",
" final = generate_claude_step_2(question, claude, gpt, pplx)\n",
"\n",
" return final, claude, gpt, pplx"
],
"metadata": {
"id": "fouhWkGPcb2U"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"question = \"What is today's news?\"\n",
"\n",
"response = generate_response(question)\n",
"\n",
"print('\\n\\n', response[0])"
],
"metadata": {
"id": "hLwXQ04Ldg2c"
},
"execution_count": null,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment