Skip to content

Instantly share code, notes, and snippets.

View monogenea's full-sized avatar

Francisco Lima monogenea

View GitHub Profile
@monogenea
monogenea / 2-grad.R
Last active December 11, 2021 10:02
# Sun Nov 14 20:13:12 2021 ------------------------------
# Logistic regression mtcars
library(RColorBrewer)
data("mtcars")
# Determine number of iterations
niter <- 100
# Determine learning rate / step size
# Sun Nov 14 19:58:50 2021 ------------------------------
# Regression Oranges
library(RColorBrewer)
data("Orange")
# Determine number of iterations
niter <- 25
# Determine learning rate / step size
#%% Initiate processing
# Init count
count = 0
# Create new window
cv2.namedWindow('stream')
while(vid.isOpened()):
# Perform detection every 60 frames
#%% Load video capture and init VideoWriter
vid = cv2.VideoCapture('input/input.mp4')
vid_w, vid_h = int(vid.get(3)), int(vid.get(4))
out = cv2.VideoWriter('output/output.mp4', cv2.VideoWriter_fourcc(*'mp4v'),
vid.get(cv2.CAP_PROP_FPS), (vid_w, vid_h))
# Check if capture started successfully
assert vid.isOpened()
#%% Define function to extract object coordinates if successful in detection
def where_is_it(frame, outputs):
frame_h = frame.shape[0]
frame_w = frame.shape[1]
bboxes, probs, class_ids = [], [], []
for preds in outputs: # different detection scales
hits = np.any(preds[:, 5:] > P_THRESH, axis=1) & (preds[:, 4] > OBJ_THRESH)
# Save prob and bbox coordinates if both objectness and probability pass respective thresholds
for i in np.where(hits)[0]:
pred = preds[i, :]
#%% Load YOLOv3 COCO weights, configs and class IDs
# Import class names
with open('yolov3/coco.names', 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
colors = np.random.randint(0, 255, (len(classes), 3))
# Give the configuration and weight files for the model and load the network using them
cfg = 'yolov3/yolov3.cfg'
weights = 'yolov3/yolov3.weights'
# Load model
#%% Imports and constants
import cv2, os
import numpy as np
import matplotlib.pyplot as plt
# Define objectness, prob and NMS thresholds
OBJ_THRESH = .6
P_THRESH = .6
NMS_THRESH = .5
#!/bin/bash
# Create subdirs
mkdir yolov3 input output
# Convert video (parse argument) to 720p mp4 without audio
echo "Converting $1 to MP4..."
ffmpeg -i $1 -vcodec h264 -vf scale=720:-2,setsar=1:1 -an input/input.mp4
# Get yolo dependencies
# Define UMAP
brain_umap = umap.UMAP(random_state=999, n_neighbors=30, min_dist=.25)
# Fit UMAP and extract latent vars 1-2
embedding = pd.DataFrame(brain_umap.fit_transform(matrix), columns = ['UMAP1','UMAP2'])
# Produce sns.scatterplot and pass metadata.subclasses as color
sns_plot = sns.scatterplot(x='UMAP1', y='UMAP2', data=embedding,
hue=metadata.subclass_label.to_list(),
alpha=.1, linewidth=0, s=1)
# Remove expression features with > 50% zero-valued expression levels
is_expressed = np.apply_along_axis(lambda x: np.mean(x == 0) < .5, arr=matrix, axis=0)
matrix = matrix[:,is_expressed.tolist()]
# Log2-transform
matrix = np.log2(matrix.to_numpy() + 1)