Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save svrc/fa46102fbcb49f8419fdcf7bf3622286 to your computer and use it in GitHub Desktop.
Save svrc/fa46102fbcb49f8419fdcf7bf3622286 to your computer and use it in GitHub Desktop.
#!/usr/bin/env bash
API_KEY=''
API_BASE='https://genai-proxy.sys.tas.vmtanzu.com/v1'
MODEL="text-embedding-ada-002"# this is actually vicuna 1.5 7b on my TAS
TEMPERATURE=0
MAX_TOKENS=1024
function chat_with_model() {
local prompt=$1
# Construct the request body
local request_body=$(jq -n \
--arg model "$MODEL" \
--arg prompt "$prompt" \
--argjson temperature $TEMPERATURE \
--argjson max_tokens $MAX_TOKENS \
'{model: $model, messages: [{"content": $prompt, role: "user"}], temperature: $temperature, max_tokens: $max_tokens}')
echo $request_body
# Use the curl command to get the response from the model
local response=$(curl -k ${API_BASE}/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${API_KEY}" \
-d "$request_body")
# Extract and print the response
echo $response | jq -r '.choices[].text'
echo $response | jq -r
}
# Check if a prompt is provided
if [ -z "$1" ]; then
echo "Usage: $0 \"Your prompt here\""
exit 1
fi
# Process the provided prompt
chat_with_model "$1"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment