Here is a Docker compose file which starts redis, mongo, cassandra and neo4j containers for demonstration purposes.
If you want to start all at once !
Start : docker-compose up
package utils | |
import org.apache.spark.rdd.RDD | |
import org.apache.spark.sql.types.StructType | |
import org.apache.spark.sql.{DataFrame, Row} | |
import org.specs2.matcher.{Expectable, Matcher} | |
import org.specs2.mutable.Specification | |
/** | |
* Utility class to compare DataFrames and Rows inside unit tests |
docker run -v $PWD:/tmp/working -w=/tmp/working --rm -it -p 8888:8888 kaggle/python jupyter notebook --no-browser --ip=* --allow-root --NotebookApp.token="" --notebook-dir=/tmp/working |
conda create -n productivity python=3.7 | |
conda activate productivity | |
conda install ffmpeg | |
# conda install nginx | |
# conda install nodejs | |
# conda install rust | |
pip install -r requirements.txt | |
# to regenerate requirements.txt | |
pip install pip-tools |
conda create -n analytics python=3.7 | |
conda activate analytics | |
#conda install pytorch-cpu torchvision-cpu torchvision torchtext -c pytorch | |
# if pip-compile doesn't work you can just pip install requirements.in | |
pip install pip-tools | |
pip-compile requirements.in | |
pip install -r requirements.txt | |
# Jupyter lab extensions |
[alias] | |
lg1 = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold green)(%ar)%C(reset) %C(white)%s%C(reset) %C(dim white)- %an%C(reset)%C(bold yellow)%d%C(reset)' --all | |
lg2 = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold cyan)%aD%C(reset) %C(bold green)(%ar)%C(reset)%C(bold yellow)%d%C(reset)%n'' %C(white)%s%C(reset) %C(dim white)- %an%C(reset)' --all | |
lg = !"git lg1" |
import altair as alt | |
import streamlit as st | |
from vega_datasets import data | |
chart = None | |
@st.cache | |
def get_data(): | |
return data.gapminder_health_income() |
I had started a Vega Explorer datasets Streamlit app. Feel free to reuse it.
pip install altair matplotlib pandas streamlit scikit-learn vega_datasets
streamlit run vega_datasets_explorer.py
streamlit/streamlit#1083 (comment)
from streamlit.server.Server import Server
import streamlit as st
def get_headers():
# Hack to get the session object from Streamlit.
current_server = Server.get_current()
You can use your own conda environments within Jupyterhub as a Kernel. Make sure the library "ipykernel" is installed in your environment.
If not, you will have to install it before you proceed : conda install ipykernel
# Activate your environment
conda activate yourEnvName