Skip to content

Instantly share code, notes, and snippets.

View mrafayaleem's full-sized avatar

Rafay Aleem mrafayaleem

View GitHub Profile
domains:
- <domain-you-setup-in-duckdns>.duckdns.org
token: <duckdns-token>
aliases: []
lets_encrypt:
accept_terms: true
algo: secp384r1
certfile: fullchain.pem
keyfile: privkey.pem
seconds: 300
@mrafayaleem
mrafayaleem / nonSingletonRateLimiterResilience4j.java
Last active May 7, 2020 19:32
Getting a non-singleton rate limiter in resilience4j
// Getting a non-singleton rate limiter in resilience4j
// This effectively creates as many rate limiters as wanted
// For example, rate limiter instance owned by each thread vs a global rate limiter for the whole application
// can be achieved through the following for a Spring Boot application
Optional<RateLimiterConfig> config = rateLimiterRegistry.getConfiguration("extendLock"); // Configuration retrieved from application.yml in Spring Boot
rateLimiter = RateLimiter.of("uniqueNameDoesNotMatter", config.orElseThrow()); // This calls new AtomicRateLimiter under the hood so name doesn't have to be unique. This bypasses the singleton
@mrafayaleem
mrafayaleem / docker-wwi.sh
Created April 21, 2020 01:35 — forked from alextercete/docker-wwi.sh
Create a docker container for the WideWorldImporters database
#!/bin/bash
# See: https://docs.microsoft.com/en-us/sql/linux/tutorial-restore-backup-in-sql-server-container
SA_PASSWORD=<YourStrong!Passw0rd>
function show_info {
tput setaf 6; echo $1; tput sgr 0
}
show_info 'Pulling the container image...'
pip install BigDL
pip install analytics-zoo
# Test dataframe
testDF = NNImageReader.readImages(test_path, sc, resizeH=300, resizeW=300, image_codec=1)
testDF = testDF.withColumn('filename', getFileName('image')).withColumn('label', getLabel('image'))
testPredDF = antbeeModel.transform(testDF).cache()
row = testPredDF.first().asDict()
# showImage function
def showImage(row):
# Open file
plt.imshow(Image.open(row['image'][0][5:]))
# Train the model and get predictions on the validation set
antbeeModel = pipeline.fit(trainingDF)
predictionDF = antbeeModel.transform(validationDF).cache()
predictionDF.sample(False, 0.1).show()
# Evaluate predictions
evaluator = MulticlassClassificationEvaluator(
labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictionDF)
# expected error should be less than 10%
# Create a last layer with input dimension of 1000 that outputs 2 classes of ants and bees
# Epochs are set to 25 and the optimizer is SGD
lrModel = Sequential().add(Linear(1000, 2)).add(LogSoftMax())
classifier = NNClassifier(lrModel, ClassNLLCriterion(), SeqToTensor([1000])) \
.setOptimMethod(SGD(learningrate=0.001, momentum=0.9)) \
.setBatchSize(4) \
.setMaxEpoch(25) \
.setFeaturesCol("embedding") \
.setCachingSample(False)
# Create a chained transformer that resizes, crops and normalizes each image in the dataframe
transformer = ChainedPreprocessing(
[RowToImageFeature(), ImageResize(256, 256), ImageCenterCrop(224, 224),
ImageChannelNormalize(123.0, 117.0, 104.0), ImageMatToTensor(), ImageFeatureToTensor()])
# Load pre-trained Resnet-50 that was downloaded earlier and give the column to pick features from
preTrainedNNModel = NNModel(Model.loadModel(model_path), transformer) \
.setFeaturesCol("image") \
.setPredictionCol("embedding")
# Define udfs to extract filename and generate labels in floats
getFileName = udf(lambda row: os.path.basename(row[0]), StringType())
getLabel = udf(lambda row: 1.0 if 'ants' in row[0] else 2.0, DoubleType())
# Construct training dataframe
trainingDF = NNImageReader.readImages(train_path, sc, resizeH=300, resizeW=300, image_codec=1)
trainingDF = trainingDF.withColumn('filename', getFileName('image')).withColumn('label', getLabel('image'))
# Construct validation dataframe
validationDF = NNImageReader.readImages(val_path, sc, resizeH=300, resizeW=300, image_codec=1)
import os
from bigdl.nn.criterion import *
from bigdl.nn.layer import *
from bigdl.optim.optimizer import Adam
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql.functions import col, udf
from pyspark.sql.types import DoubleType, StringType