Skip to content

Instantly share code, notes, and snippets.

View grohith327's full-sized avatar

Rohith Gandhi G grohith327

View GitHub Profile
from simplegan.gan import Pix2Pix
## Create an object
gan = Pix2Pix() ## Customize the model by specifying parameters for Pix2Pix object
## Load the training and testing data
train_ds, test_ds = gan.load_data(use_edges2handbags = True, batch_size = 32)
## Get samples from training data
train_samples = gan.get_sample(data= train_ds, n_samples = 2)
## Get samples from testing data
train_samples = gan.get_sample(data= test_ds, n_samples = 2)
## train the model
from simplegan.autoencoder import ConvolutionalAutoencoder
## Create an object
autoenc = ConvolutionalAutoencoder() ## Modify the architecture of the model by specifying parameters
## Load the MNIST data
train_ds, test_ds = autoenc.load_data(use_mnsist = True)
## Get samples from the loaded training data to view them
train_samples = autoenc.get_sample(data = train_ds, n_samples = 2)
## Get samples from the loaded testing data to view them
test_samples = autoenc.get_sample(data = test_ds, n_samples = 2)
## Train the model
import pyaudio
import wave
from keras.models import load_model
import librosa
import numpy as np
import warnings
import osascript
import webbrowser
import os
import cv2
import cv2
cam = cv2.VideoCapture(0)
cv2.namedWindow("take a picture")
img_counter = 0
while True:
ret, frame = cam.read()
cv2.imshow("test", frame)
if not ret:
import os
os.system('top')
import webbrowser
webbrowser.open('http://google.com')
import osascript
vol = osascript.osascript('get volume settings')
cur_vol = int(vol[1].split(':')[1].split(',')[0])
cur_vol = cur_vol + 20
if(cur_vol > 100):
cur_vol = 100
osascript.osascript("set volume output volume "+str(cur_vol))
import osascript
vol = osascript.osascript('get volume settings')
cur_vol = int(vol[1].split(':')[1].split(',')[0])
cur_vol = cur_vol - 20
if(cur_vol < 0):
cur_vol = 0
osascript.osascript("set volume output volume "+str(cur_vol))
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
# Create training and testing datasets from tensors
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(1)
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(1)
# CNN Model
class Command(Model):
def __init__(self):