- try adding ggml-metal.metal to python executable directory
- llama-cpp-python[server] FAILS
##################################
# figure out python exectuable directory
python -V
> Python 3.9.16
which python
> /Users/xxxx/miniconda3-mac-silicon/envs/llama/bin/python
##################################
# create 'pypath' environment variable
pp=$(which python)
pp=${pp::-6}
export pypath=$pp
echo $pypath
> /Users/xxxx/miniconda3-mac-silicon/envs/llama/bin/
##################################
# copy ggml-metal.metal to python executable directory
cp vendor/llama.cpp/ggml-metal.metal $pypath
ls $pypath | grep ggml
> **ggml**-metal.metal
##################################
# rebuild the pip package
# FORCE
export FORCE_CMAKE=1
pip install -e .
pip install -e '.[server]'
##################################
# check pip list
pip list | grep llama
> **llama**-cpp-python 0.1.59 /Users/...
##################################
# config yor ggml model path
# make sure it is ggml v3
# make sure it is q4_0
export MODEL=[path to your llama.cpp ggml models]]/[ggml-model-name]]q4_0.bin
##################################
# TEST
python3 -m llama_cpp.server --model $MODEL
##################################
# errors 😞
Traceback (most recent call last):
File "/Users/ianscrivener/miniconda3-mac-silicon/envs/llama/lib/python3.9/runpy.py", line 188, in _run_module_as_main
mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
File "/Users/ianscrivener/miniconda3-mac-silicon/envs/llama/lib/python3.9/runpy.py", line 111, in _get_module_details
__import__(pkg_name)
File "/Users/ianscrivener/_AI/lcp4/llama_cpp/__init__.py", line 1, in <module>
from .llama_cpp import *
File "/Users/ianscrivener/_AI/lcp4/llama_cpp/llama_cpp.py", line 77, in <module>
_lib = _load_shared_library(_lib_base_name)
File "/Users/ianscrivener/_AI/lcp4/llama_cpp/llama_cpp.py", line 68, in _load_shared_library
raise FileNotFoundError(
FileNotFoundError: Shared library with base name 'llama' not found