Skip to content

Instantly share code, notes, and snippets.

@Arnold1
Last active March 16, 2020 01:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Arnold1/6d2f715825313abe055070313590f8fd to your computer and use it in GitHub Desktop.
Save Arnold1/6d2f715825313abe055070313590f8fd to your computer and use it in GitHub Desktop.
envoy
version: "3.7"
services:
front-envoy:
build:
context: ./
dockerfile: Dockerfile-frontenvoy
volumes:
- ./front-envoy.yaml:/etc/front-envoy.yaml
networks:
- envoymesh
expose:
# Expose ports 8080 (for general traffic) and 8001 (for the admin server)
- "8080"
- "8001"
ports:
# Map the host port 8000 to container port 8080, and the host port 8001 to container port 8001
- "8080:8080"
- "8001:8001"
server1:
image: tensorflow/serving:latest
volumes:
- "../../../docker/models:/models"
- "../../../docker/models.conf:/serving/models.conf"
- "../../../docker/batching_parameters.txt:/serving/batching_parameters.txt"
- "../../../docker/monitoring.conf:/serving/monitoring.conf"
networks:
envoymesh:
aliases:
- server1
command: --port=8500 --rest_api_port=8501 --model_config_file=/serving/models.conf --enable_batching=true --batching_parameters_file=/serving/batching_parameters.txt --monitoring_config_file=/serving/monitoring.conf
ports:
- 8500:8500
- 8501:8501
server2:
image: tensorflow/serving:latest
volumes:
- "../../../docker/models:/models"
- "../../../docker/models.conf:/serving/models.conf"
- "../../../docker/batching_parameters.txt:/serving/batching_parameters.txt"
- "../../../docker/monitoring.conf:/serving/monitoring.conf"
networks:
envoymesh:
aliases:
- server2
command: --port=8600 --rest_api_port=8601 --model_config_file=/serving/models.conf --enable_batching=true --batching_parameters_file=/serving/batching_parameters.txt --monitoring_config_file=/serving/monitoring.conf
ports:
- 8600:8600
- 8601:8601
rest-client:
image: ellerbrock/alpine-bash-curl-ssl:latest
volumes:
- "../../../predict.json:/predict.json"
command: bash -c "echo \"start test...\"; sleep 15; for i in `seq 1 10`; do curl -X POST -H X-Model-Partition:1 --data @/predict.json front-envoy:8080/v1/models/tfs:predict; done"
networks:
envoymesh:
aliases:
- rest-client
depends_on:
- front-envoy
- server1
- server2
networks:
envoymesh: {}
FROM envoyproxy/envoy:latest
RUN apt-get update && apt-get -q install -y \
curl
CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 8080
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/v1/models/tfs:predict"
headers:
- name: "X-Model-Partition"
exact_match: "1"
route:
weighted_clusters:
clusters:
- name: server1
weight: 50
- name: server2
weight: 50
http_filters:
- name: envoy.router
config: {}
clusters:
- name: server1
connect_timeout: 0.25s
type: strict_dns
lb_policy: round_robin
http2_protocol_options: {}
hosts:
- socket_address:
address: server1
port_value: 8501
- name: server2
connect_timeout: 0.25s
type: strict_dns
lb_policy: round_robin
http2_protocol_options: {}
hosts:
- socket_address:
address: server2
port_value: 8601
admin:
access_log_path: "/dev/null"
address:
socket_address:
address: 0.0.0.0
port_value: 8001
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment