Skip to content

Instantly share code, notes, and snippets.

@sachinkmohan
Last active March 3, 2022 13:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save sachinkmohan/8859c5687f258a2416384576ff63e71d to your computer and use it in GitHub Desktop.
Save sachinkmohan/8859c5687f258a2416384576ff63e71d to your computer and use it in GitHub Desktop.
Incorrect shape after inference in TF and TensorRT
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
from keras_loss_function.keras_ssd_loss_tf2 import SSDLoss # added for TF2.0
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
## imports used for pruning
import tensorflow_model_optimization as tfmot
import numpy as np
import cv2
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print(physical_devices)
if physical_devices:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
img_height = 300 # Height of the input images
img_width = 480 # Width of the input images
img_channels = 3 # Number of color channels of the input images
intensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
intensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
n_classes = 5 # Number of positive classes
scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.
aspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes
two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1
steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended
offsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled
normalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size
# TODO: Set the path to the `.h5` file of the model to be loaded.
model_path = './saved_models/ssd7_base_epoch-30_loss-2.0457_val_loss-2.2370.h5'
# We need to create an SSDLoss object in order to pass that to the model loader.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
'compute_loss': ssd_loss.compute_loss})
#Reading a dummy image
im2 = cv2.imread('./1478899365487445082.jpg')
#im2 = image.img_to_array(im2)
#Converting it into batch dimensions
im3 = np.expand_dims(im2, axis=0)
print(im3.shape)
# Make a prediction
y_pred = model.predict(im3)
print(y_pred.shape) # (1, 11692, 18)
import numpy as np
import cv2
BATCH_SIZE=1
PRECISION = np.float32
# If you get an error in this cell, restart your notebook (possibly your whole machine) and do not run anything that imports/uses Tensorflow/PyTorch
from onnx_helper import ONNXClassifierWrapper
trt_model = ONNXClassifierWrapper("ssd7keras_od.trt", [BATCH_SIZE, 1000], target_dtype = PRECISION)
im = cv2.imread('1478899365487445082.jpg')
im3 = np.expand_dims(im2, axis=0)
print(im3.shape) # (1, 300, 480, 3)
y_pred=trt_model.predict(im3)
print(y_pred.shape) # (1, 1000)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment