Skip to content

Instantly share code, notes, and snippets.

View SP2203's full-sized avatar
🏠
Working from home

Swaraj Patil SP2203

🏠
Working from home
View GitHub Profile
@SP2203
SP2203 / Lenet5.py
Last active May 23, 2021 11:12
Lenet architecture implementation using functional programming in Keras
# Imports
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.layers import Dense, Flatten, Input
from keras.layers import Conv2D, MaxPooling2D
@SP2203
SP2203 / onnx_check.py
Last active April 19, 2022 17:00
Onnx Version Check
import onnx
print(onnx.__version__)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten, Input, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten, Input, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten, Input, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
# # Load pretrained model and tokenizer
from transformers import AutoModel, AutoTokenizer
model_name = "sentence-transformers/all-MiniLM-L6-v2"
tokenizer = AutoTokenizer.from_pretrained(model_name,
do_lower_case=True )
model = AutoModel.from_pretrained(model_name )
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
df = pd.read_csv('./imdb_top_1000.csv', usecols=['Overview'])
total_samples = len(df)
import onnxruntime
import numpy as np
sess_options = onnxruntime.SessionOptions()
session = onnxruntime.InferenceSession(export_model_path,
sess_options,
providers=['CPUExecutionProvider'])
latency = []
device = torch.device("cuda")
# Set model to inference mode, which is required before exporting
# the model because some operators behave differently in
# inference and training mode.
model.eval()
model.to(device)
total_samples = len(df)
import onnxruntime
import numpy as np
sess_options = onnxruntime.SessionOptions()
session = onnxruntime.InferenceSession(export_model_path,
sess_options,
providers=['CUDAExecutionProvider'])
latency = []