Skip to content

Instantly share code, notes, and snippets.

View JuhaKiili's full-sized avatar

Juha Kiili JuhaKiili

  • Valohai Ltd
  • Finland
View GitHub Profile
# This code works for:
# 1. Single image
# 2. List of images
# 3. An archive with images
# 4. List of archives
# 5. Mix of everything above
# Defined by list of:
# 1. Public URIs
import valohai
def main(old_config):
pipeline = valohai.Pipeline(name="train-superbai", config=old_config)
# Define nodes
convert = papi.execution("convert-superbai")
weights = papi.execution("weights")
train = papi.execution("train")
import numpy as np
from yolov3_tf2.models import YoloV3
from yolov3_tf2.utils import load_darknet_weights
import tensorflow as tf
import valohai
params = {
"weights_num_classes": 80,
}
@JuhaKiili
JuhaKiili / with-valohai-utils.py
Last active May 27, 2021 17:22
Example of image resize step supporting archived inputs and outputs using valohai-utils
import os
from PIL import Image
import valohai
parameters = {
"width": 640,
"height": 480,
}
inputs = {
"images": [
@JuhaKiili
JuhaKiili / without-valohai-utils.py
Last active May 28, 2021 05:49
Example of image resize step with support for archived inputs and outputs without using valohai-utils
import glob
import os
import argparse
import json
import shutil
import tempfile
import zipfile
from PIL import Image
parser = argparse.ArgumentParser()
from enums import *
import random
import tensorflow as tf
import numpy as np
class DeepGambler:
def __init__(self, learning_rate=0.5, discount=0.95, exploration_rate=1.0, iterations=10000):
self.learning_rate = learning_rate
self.discount = discount # How much we appreciate future reward over current
@JuhaKiili
JuhaKiili / deep_gambler.py
Last active April 18, 2019 09:15 — forked from joannapurosto/deep_gambler.py
Q-learning tutorial part 3 and Deep Learning
from enums import *
import random
import tensorflow as tf
import numpy as np
class DeepGambler:
def __init__(self, learning_rate=0.1, discount=0.95, exploration_rate=1.0, iterations=10000):
self.learning_rate = learning_rate
self.discount = discount # How much we appreciate future reward over current
self.exploration_rate = 1.0 # Initial exploration rate
from enums import *
import random
import tensorflow as tf
import numpy as np
class DeepGambler:
def __init__(self, learning_rate=0.1, discount=0.95, exploration_rate=1.0, iterations=10000):
self.learning_rate = learning_rate
self.discount = discount # How much we appreciate future reward over current
self.exploration_rate = 1.0 # Initial exploration rate