Skip to content

Instantly share code, notes, and snippets.

@hasnocool
Last active May 20, 2024 20:07
Show Gist options
  • Save hasnocool/9b59bdf578ed07639b9ac7a575e11875 to your computer and use it in GitHub Desktop.
Save hasnocool/9b59bdf578ed07639b9ac7a575e11875 to your computer and use it in GitHub Desktop.
Flask Routes For Ollama API
from flask import Flask, request, jsonify
app = Flask(__name__)
# Placeholder functions to interact with models
generate_completion_from_model = lambda model, prompt, images, options: ...
generate_chat_completion_from_model = lambda model, messages, options: ...
create_model_from_modelfile = lambda name, modelfile: ...
list_local_models = lambda: ...
get_model_info = lambda name: ...
copy_model = lambda source, destination: ...
delete_model = lambda name: ...
pull_model_from_library = lambda name, insecure: ...
push_model_to_library = lambda name, insecure: ...
generate_embeddings_from_model = lambda model, prompt, options: ...
# 1. Generate a completion
@app.route('/api/generate', methods=['POST'])
def generate_completion():
data = request.get_json()
model = data.get('model')
prompt = data.get('prompt')
images = data.get('images', [])
# Advanced parameters
format = data.get('format', 'text')
options = data.get('options', {})
system = data.get('system')
template = data.get('template')
context = data.get('context')
stream = data.get('stream', True)
raw = data.get('raw', False)
keep_alive = data.get('keep_alive', '5m')
# Generate completion using the model
completion_data = generate_completion_from_model(model, prompt, images, options)
# Return the generated completion or relevant data
return jsonify(completion_data)
# 2. Generate a chat completion
@app.route('/api/chat', methods=['POST'])
def generate_chat_completion():
data = request.get_json()
model = data.get('model')
messages = data.get('messages')
# Advanced parameters
format = data.get('format', 'text')
options = data.get('options', {})
stream = data.get('stream', True)
keep_alive = data.get('keep_alive', '5m')
# Generate chat completion using the model
chat_completion_data = generate_chat_completion_from_model(model, messages, options)
# Return the generated chat completion or relevant data
return jsonify(chat_completion_data)
# 3. Create a new model
@app.route('/api/create', methods=['POST'])
def create_model():
data = request.get_json()
name = data.get('name')
modelfile = data.get('modelfile')
path = data.get('path')
# Create a new model using the provided modelfile or path
model_created = create_model_from_modelfile(name, modelfile, path)
# Return a response indicating the model creation status
return jsonify(model_created)
# 4. List locally available models
@app.route('/api/tags', methods=['GET'])
def list_local_models():
# Retrieve locally available models
local_models_list = list_local_models()
# Return a list of locally available models
return jsonify(local_models_list)
# 5. Show model information
@app.route('/api/show', methods=['POST'])
def show_model_info():
data = request.get_json()
name = data.get('name')
# Retrieve model information
model_info = get_model_info(name)
# Return the model information
return jsonify(model_info)
# 6. Copy an existing model
@app.route('/api/copy', methods=['POST'])
def copy_model():
data = request.get_json()
source = data.get('source')
destination = data.get('destination')
# Copy a model
copy_status = copy_model(source, destination)
# Return a response indicating the copy status
return jsonify(copy_status)
# 7. Delete a model
@app.route('/api/delete', methods=['DELETE'])
def delete_model():
data = request.get_json()
name = data.get('name')
# Delete a model
deletion_status = delete_model(name)
# Return a response indicating the deletion status
return jsonify(deletion_status)
# 8. Pull a model from the library
@app.route('/api/pull', methods=['POST'])
def pull_model():
data = request.get_json()
name = data.get('name')
insecure = data.get('insecure', False)
# Pull a model from the library
pull_status = pull_model_from_library(name, insecure)
# Return a response indicating the pull status
return jsonify(pull_status)
# 9. Push a model to the library
@app.route('/api/push', methods=['POST'])
def push_model():
data = request.get_json()
name = data.get('name')
insecure = data.get('insecure', False)
# Push a model to the library
push_status = push_model_to_library(name, insecure)
# Return a response indicating the push status
return jsonify(push_status)
# 10. Generate embeddings from a model
@app.route('/api/embeddings', methods=['POST'])
def generate_embeddings():
data = request.get_json()
model = data.get('model')
prompt = data.get('prompt')
# Advanced parameters
options = data.get('options', {})
keep_alive = data.get('keep_alive', '5m')
# Generate embeddings using the model
embeddings_data = generate_embeddings_from_model(model, prompt, options)
# Return the generated embeddings
return jsonify(embeddings_data)
# Run the Flask application
if __name__ == '__main__':
app.run()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment