Skip to content

Instantly share code, notes, and snippets.

cstrike <- read.csv("CSrandom2.csv")
csgo <- read.csv("CSgopolar.csv")
p <- ggplot(data = cstrike, aes(x = x, y = y)) +
stat_binhex(aes(fill = ..density.. * 100), bins = 8) +
scale_fill_gradientn(colours=c("Gray97","Firebrick3"),
name = "Percentage of shots") +
theme_set(theme_bw(base_size = 20)) +
theme(axis.title.y = element_text(angle = 0, hjust = 0)) +
ggtitle("Distribution of shots in CS 1.6's spread box")
cstrike <- read.csv("CSrandom2.csv")
csgo <- read.csv("CSgopolar.csv")
p <- ggplot(data = cstrike, aes(x = x, y = y)) +
geom_point(alpha = 0.05, colour = "Blue") +
theme(axis.title.y = element_text(angle = 0, hjust = 0)) +
theme_set(theme_bw(base_size = 20)) +
ggtitle("Spread/Inaccuracy in CS 1.6 (100 000 shots)")
p
@Lauler
Lauler / csspread.py
Created December 14, 2015 18:41
Generate csv file with CS 1.6's and CS: GO's different methods for calculating spread
import random
import math
def cs16():
with open('CS16spread.csv', 'w') as file:
i = 0
file.write("x" + "," + "y" + "\n")
while i < 100000:
@Lauler
Lauler / csgoinacc.py
Last active December 29, 2015 19:04
import math
tickrate = 100
firerate = 0.1 # ak47 firerate (seconds)
seconds = 2 # nr of seconds to run spraying simulation
def inacc():
with open('CSinaccuracypre2.csv', 'w') as file:
stocks <- read.table("returns.csv")
head(stocks,200)
stocks_quantiles <- data.frame(NA, NA, NA, NA, NA, NA)
names(stocks_quantiles) <- c("year", "five", "twofive", "median", "sevenfive", "ninefive")
stocks_quantiles$year <- 1:40
for(i in 1:40){
for(j in 2:6){
stocks_quantiles[i, j] <- quantile(stocks$multip[stocks$year == i],
library(jsonlite)
faton <- fromJSON("https://api.opendota.com/api/players/36933/matches") # My matches and basic stats
fatonmatch <- fromJSON("https://api.opendota.com/api/matches/2863026979") # one of my parsed matches
# Need to make SQL-call to
# https://api.opendota.com/api/explorer in order to get a full list of professional matches
promatches <- fromJSON("https://api.opendota.com/api/proMatches")
faton$match_id # matchid of all my matches
import os
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from efficientnet.tfkeras import EfficientNetB5, EfficientNetB3, preprocess_input
from tensorflow.keras.applications import DenseNet201
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
import os
import numpy as np
import scipy as sp
# from tensorflow.contrib.saved_model import load_keras_model
# from tensorflow import keras
from tensorflow.keras.models import load_model
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import backend
from tensorflow.keras.preprocessing.image import ImageDataGenerator
@Lauler
Lauler / preprocess_srt.py
Created August 15, 2023 06:09
Preprocess srt files and bucket to ~30s chunks
import numpy as np
import os
import pandas as pd
import pysrt
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(