Skip to content

Instantly share code, notes, and snippets.

View aliwaqas333's full-sized avatar
🎯
Focusing

Ali Waqas aliwaqas333

🎯
Focusing
View GitHub Profile
@aliwaqas333
aliwaqas333 / rledata.json
Created November 7, 2022 02:19
sample rle format
"value": {
"format": "rle",
"rle": [
0, 18, 192, 0, 57, 27, 255, 255, 255, 0, 255, 255, 224, 31, 255,
252, 3, 255, 255, 128, 113, 224, 112, 8, 212, 35, 207, 159, 254,
58, 248, 211, 248, 79, 24, 4, 100, 24, 95, 254, 61, 248, 223,
227, 16, 225, 58, 224, 17, 168, 97, 255, 248, 251, 227, 80, 225,
58, 96, 17, 168, 98, 127, 248, 239, 227, 64, 225, 57, 96, 17,
144, 98, 255, 248, 239, 227, 100, 140, 255, 132, 225, 128, 70,
161, 30, 134, 51, 255, 143, 190, 52, 14, 19, 142, 1, 27, 4, 120,
@aliwaqas333
aliwaqas333 / install.sh
Created June 2, 2022 18:36 — forked from HoKim98/install.sh
[Ubuntu] OpenVSLAM automatic installation script
#!/bin/bash
cd ~/Desktop
# ---------------------------
## 0. Requirements
# ---------------------------
sudo apt-get update
sudo apt-get install -y build-essential git pkg-config cmake make \
gcc curl wget unzip \
@aliwaqas333
aliwaqas333 / image_test.py
Created June 19, 2020 10:37
functions to test custom images
def singleImage(path, label= None, show= False):
img = cv2.imread(path)
assert img is not None,"Immage wasn't read properly"
img = cv2.resize(img, (100, 100))
img = torch.from_numpy(img)
img = img.permute((2, 0,1)) # model expects image to be of shape [3, 100, 100]
img = img.unsqueeze(dim=0).float() # convert single image to batch [1, 3, 100, 100]
img = img.to('cuda') # Using the same device as the model
pred = model(img)
_, preds = torch.max(pred, dim=1)
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
for epoch in range(epochs):
@aliwaqas333
aliwaqas333 / tocuda.py
Created June 19, 2020 10:28
basic function for GPU feature in PyTorch
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
def to_device(data, device):
@aliwaqas333
aliwaqas333 / imageClassification.py
Created June 19, 2020 10:14
Image Classification base class
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels.long()) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
@aliwaqas333
aliwaqas333 / model.py
Created June 19, 2020 09:46
model creation
class MaskDetection(ImageClassificationBase):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(3, 100, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(100, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 128 x 8 x 8
@aliwaqas333
aliwaqas333 / data_loader_creation.py
Created June 19, 2020 09:27
To create a dataloader in PyTorch
from torch.utils.data.dataloader import DataLoader
from torchvision.utils import make_grid
batch_size = 32
train_dl = DataLoader(train_ds, batch_size*2, shuffle=True)
val_dl = DataLoader(val_ds, batch_size*2)
@aliwaqas333
aliwaqas333 / dataset_creation.py
Last active June 19, 2020 09:24
We will use pytorch Dataset Class to create our dataset
from torch.utils.data.dataset import Dataset
from torchvision.transforms import Compose, ToTensor
class MaskDataset(Dataset):
""" Masked faces dataset
0 = 'no mask'
1 = 'mask'
"""
def __init__(self, train_data):
self.train_data = train_data
self.transformations = Compose([
@aliwaqas333
aliwaqas333 / data_extraction.py
Created June 19, 2020 09:08
This class is used to extract images from various directories
REBUILD_DATA = True
if REBUILD_DATA: #if we are running it for the first time
data_path = Path('F:/FILES/AI/face-mask-dataset/')
maskPath = data_path/'dataset1/AFDB_masked_face_dataset'
maskPath2= data_path/'dataset2/webface_masked'
nonMaskPath = data_path/'dataset1/AFDB_face_dataset'
path_dirs = [ [maskPath,1],[nonMaskPath,0] ] #path and label
if not os.path.exists(data_path):
raise Exception("The data path doesn't exist")