Skip to content

Instantly share code, notes, and snippets.

View patryk-oleniuk's full-sized avatar

Patryk Oleniuk patryk-oleniuk

View GitHub Profile
@patryk-oleniuk
patryk-oleniuk / artifact_viewer_filetypes.py
Last active May 11, 2020 03:47
Creating a bunch of test files, and then logging then into mlflow experiment
import json
import plotly.express as px
import mlflow
import requests
### prepare sample files to log
# test data
df = px.data.iris()
# sample CSV file
@patryk-oleniuk
patryk-oleniuk / artifact_viewer_web_embed.py
Created May 10, 2020 06:14
How to embed a website in mlflow artifact viewer
website_embed = '''<!DOCTYPE html>
<html>
<iframe src="https://en.wikipedia.org/wiki/Machine_learning" style='width: 700px; height: 450px' sandbox='allow-same-origin allow-scripts'>
</iframe>
</html>'''
with mlflow.start_run(experiment_id=1, run_name="website_embedding") as run:
with open("output.html", "w") as f:
f.write(website_embed)
mlflow.log_artifact("output.html")
@patryk-oleniuk
patryk-oleniuk / artifact_viewer_folders.py
Created May 10, 2020 09:44
showing how to log folder structure in mlflow
"""
the output directory is prepared before logging:
├── output
│ ├── data
│ │ ├── data_sample.csv
│ │ └── data_sample.html
│ ├── images
│ │ ├── gif_sample.gif
│ │ └── image_sample.png
│ ├── maps
with mlflow.start_run(experiment_id=1, run_name="top_lever_run") as run:
with mlflow.start_run(experiment_id=1, run_name="subrun1",nested=True) as subrun1:
mlflow.log_param("p1","red")
mlflow.log_metric("m1", 5.1)
with mlflow.start_run(experiment_id=1, run_name="subsubrun1",nested=True) as subsubrun1:
mlflow.log_param("p3","green")
mlflow.log_metric("m3", 5.24)
with mlflow.start_run(experiment_id=1, run_name="subsubrun2", nested=True) as subsubrun2:
mlflow.log_param("p4","blue")
mlflow.log_metric("m5", 3.25)
@patryk-oleniuk
patryk-oleniuk / correct_run.py
Last active May 10, 2020 10:47
correct mlflow run
# to correct a parameter, metric or artifact of an existing run, just pass run_id
# instead of experiment_id to mlflow.start_run function
with mlflow.start_run(run_id="your_run_id") as run:
mlflow.log_param("p1","your_corrected_value")
mlflow.log_metric("m1",42.0) # your corrected metrics
mlflow.log_artifact("data_sample.html") # your corrected artifact file