Skip to content

Instantly share code, notes, and snippets.

View quangnhat185's full-sized avatar

Quang Nguyen quangnhat185

View GitHub Profile
from openai import OpenAI
import os
import ipdb
import base64
api_key = os.getenv('OPENAI_API_KEY')
client = OpenAI()
client.api_key = api_key
# Function to encode the image
@quangnhat185
quangnhat185 / infer.py
Created August 25, 2023 08:49
trt_infer
def infer(self, img):
self.inputs[0]['host'] = np.ravel(img)
# transfer data to the gpu
for inp in self.inputs:
# print(type(inp['device']))
cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream)
# run inference
self.context.execute_async(batch_size=1,
bindings=self.bindings,
@quangnhat185
quangnhat185 / pi_save_system.py
Created August 25, 2021 13:59
This script is used to extract Memory and CPU usage on Raspberry Pi
import psutil
import time
import logging
import os
import numpy as np
try:
## os.remove("./Data/Pi_ML")
## os.remove("./Data/Pi_CV")
pass
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
# return heading angle from machine learning model
angle = np.radians(max(model.predict(image)))
'''
speed_slider: lowest speed of each motor
angle: heading angle corresponding to current frame
angle_last: heading angle corresponding to previous frame
kP, kD, kI: P - I - D coefficient defined by users
steering_bias_slider: bias coefficient between two motors
# pre-processing input images and pedict with model
def predict_from_model(image,model,labels):
image = cv2.resize(image,(80,80))
image = np.stack((image,)*3, axis=-1)
prediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])
return prediction
fig = plt.figure(figsize=(15,3))
cols = len(crop_characters)
grid = gridspec.GridSpec(ncols=cols,nrows=1,figure=fig)
# Load model architecture, weight and labels
json_file = open('MobileNets_character_recognition.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights("License_character_recognition_weight.h5")
print("[INFO] Model loaded successfully...")
labels = LabelEncoder()
labels.classes_ = np.load('license_character_classes.npy')
# save model architectur as json file
model_json = model.to_json()
with open("MobileNets_character_recognition.json", "w") as json_file:
json_file.write(model_json)
BATCH_SIZE = 64
my_checkpointer = [
EarlyStopping(monitor='val_loss', patience=5, verbose=0),
ModelCheckpoint(filepath="License_character_recognition.h5", verbose=1, save_weights_only=True)
]
result = model.fit(image_gen.flow(trainX, trainY, batch_size=BATCH_SIZE),
steps_per_epoch=len(trainX) // BATCH_SIZE,
validation_data=(testX, testY),
# Create our model with pre-trained MobileNetV2 architecture from imagenet
def create_model(lr=1e-4,decay=1e-4/25, training=False,output_shape=y.shape[1]):
baseModel = MobileNetV2(weights="imagenet",
include_top=False,
input_tensor=Input(shape=(80, 80, 3)))
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(3, 3))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)