This tutorial is based on this AWS tutorial. In this tutorial, we will install Nvidia driver on AWS EC2 instance and compile and run llama.cpp on it.
Here we use g5.4xlarge
instance with Ubuntu 22.04 AMI, which use Nvidia A10G GPU.
export FZF_COMPLETION_TRIGGER=',,' | |
_fzf_complete_j() { | |
_fzf_complete --prompt="autojump> " -- "$@" < <( | |
j -s | awk -F $'\t' '/^___/{exit} {print $2}' | tac | |
) | |
} | |
_fzf_complete_j_post() { | |
awk '{printf "\"%s\"", $0}' | |
} |
function pchome { | |
echo "在 PChome 24 上搜尋 $* ..." | |
local tempfile=$(mktemp) | |
curl -s --get \ | |
--data-urlencode "q=$*" \ | |
--data-urlencode 'sort=sale/dc' \ | |
'https://ecshweb.pchome.com.tw/search/v3.3/all/results' > "$tempfile" | |
jq -r '.prods[] | "$" + (.price | tostring) + "\t" + .name + | |
":::" + "https://24h.pchome.com.tw/prod/" + .Id + | |
":::" + "https://cs-e.ecimg.tw" + .picB' "$tempfile" \ |
#!/bin/bash | |
set -e | |
set -u | |
for post in _posts/*.md | |
do | |
echo "Processing $post" | |
# Get the filename without the extension |
#!/usr/bin/env python3 | |
import json | |
import logging | |
from openai import OpenAI | |
logging.basicConfig(format="[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s") | |
logger = logging.getLogger(__name__) | |
logger.setLevel(logging.INFO) |
export WASI_VERSION=20
export WASI_VERSION_FULL=${WASI_VERSION}.0
wget https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_VERSION}/wasi-sdk-${WASI_VERSION_FULL}-linux.tar.gz
tar xvf wasi-sdk-${WASI_VERSION_FULL}-linux.tar.gz
export WASI_SDK_PATH=`pwd`/wasi-sdk-${WASI_VERSION_FULL}
I hereby claim:
To claim this, I am signing this object:
{ | |
"100": "1f4af", | |
"1234": "1f522", | |
"interrobang": "2049", | |
"tm": "2122", | |
"information_source": "2139", | |
"left_right_arrow": "2194", | |
"arrow_up_down": "2195", | |
"arrow_upper_left": "2196", | |
"arrow_upper_right": "2197", |
I hereby claim:
To claim this, I am signing this object:
pf's note (too old)