Skip to content

Instantly share code, notes, and snippets.

cluster_name: repro-issue-13090
min_workers: 0
max_workers: 0
docker:
image: anyscale/ray-ml:latest
container_name: ray_container
pull_before_run: True
+-------------------------------------------+------------+-------+--------+------------------+---------+----------+----------------------+----------------------+--------------------+
| Trial name | status | loc | iter | total time (s) | ts | reward | episode_reward_max | episode_reward_min | episode_len_mean |
|-------------------------------------------+------------+-------+--------+------------------+---------+----------+----------------------+----------------------+--------------------|
| A2C_BreakoutNoFrameskip-v4_ed940_00000 | TERMINATED | | 354 | 3601.99 | 3929000 | 142.56 | 330 | 24 | 5552.71 |
| A2C_BreakoutNoFrameskip-v4_ed940_00001 | TERMINATED | | 354 | 3609.69 | 3975000 | 164.16 | 396 | 26 | 5796.55 |
| APEX_BreakoutNoFrameskip-v4_ed940_00002 | TERMINATED | | 92 | 3616.25 | 6282880 | 12
####################################################################
# All nodes in this cluster will auto-terminate in 1 hour
####################################################################
# An unique identifier for the head node and workers of this cluster.
cluster_name: autoscaler-stress-test-1.0.0
# The minimum number of workers nodes to launch in addition to the head
# node. This number should be >= 0.
min_workers: 100
Iteration 1023:
- Iteration time: 9.326589822769165.
- Absolute time: 1607723877.3213854.
- Total elapsed time: 7786.904662370682.
2020-12-11 21:58:03,954 WARNING services.py:1640 -- WARNING: The object store is using /tmp instead of /dev/shm because /dev/shm has only 67108864 bytes available. This may slow down performance! You may be able to free up space by deleting files in /dev/shm or terminating any running plasma_store_server processes. If you are inside a Docker container, you may need to pass an argument with the flag '--shm-size' to 'docker run'.
(pid=raylet) E1211 21:58:05.133839404 73164 server_chttp2.cc:40] {"created":"@1607723885.133752839","description":"No address added out of total 1 resolved","file":"external/com_github_grpc_grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc","file_line":394,"referenced_errors":[{"created":"@1607723885.133750775","description":"Failed to add any wildcard listeners","file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_pos
import ray
import streamlit as st
def fetch_actor():
actor = retry_until_success(lambda: ray.get_actor("streamlitactor"))
return actor
def main():
ray.init(address="auto", ignore_reinit_error=True)
actor = fetch_actor()
import streamlit as st
import time
import session_state
from datetime import datetime, timedelta
from util import get_db_connection
from PIL import Image
import ray
def retry_until_success(f, timeout=15):
end = datetime.now() + timedelta(seconds=timeout)
import uuid
import ray
from ray import serve
from util import ImpressionStore, choose_ensemble_results
@ray.remote
class ComposedModel:
import ray
import os
import time
from datetime import datetime, timedelta
import streamlit as st
def main():
ray.init(address="auto")
actor_name: Optional[str] = os.getenv("ACTOR_NAME")
if not actor_name:
import ray
import os
import time
from datetime import datetime, timedelta
import streamlit as st
def main():
ray.init(address="auto")
actor_name: Optional[str] = os.getenv("ACTOR_NAME")
if not actor_name:
import ray
import os
import time
from datetime import datetime, timedelta
import streamlit as st
def main():
ray.init(address="auto")
actor_name: Optional[str] = os.getenv("ACTOR_NAME")
if not actor_name: