Skip to content

Instantly share code, notes, and snippets.

@leigh-johnson
Last active February 24, 2021 05:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save leigh-johnson/aadf17f683689b50b5408d661f50cb14 to your computer and use it in GitHub Desktop.
Save leigh-johnson/aadf17f683689b50b5408d661f50cb14 to your computer and use it in GitHub Desktop.
TensorFlow Everywhere North America - scripts and snippets
###
# Load AutoML evaluation metrics via Python API client
##
from google.cloud import automl
from google.protobuf.json_format import MessageToDict
import pandas as pd
project_id = "your-project-id"
model_id = "your-model-id" # look for modelId= in the GCP console url
# Initialize AutoMl API Client
client = automl.AutoMlClient()
# Get the full path of the model
model_full_id = client.model_path(project_id, "us-central1", model_id)
# Get all evaluation metrics for model
eval_metrics = client.list_model_evaluations(parent=model_full_id, filter="")
# Deserialize from protobuf to dict
eval_metrics = [MessageToDict(e._pb) for e in evaluation_metrics ]
# Initialize DataFrame
df = pd.DataFrame(eval_metrics)
df
import numpy as np
import octoprint_nanny.types
def percent_intersection(
self,
prediction: octoprint_nanny.types.BoundingBoxPrediction,
area_of_interest: np.array,
) -> float:
"""
Returns intersection-over-union area, normalized between 0 and 1
https://stackoverflow.com/questions/25349178/calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation
"""
detection_boxes = prediction.detection_boxes
# initialize array of zeroes
aou = np.zeros(len(detection_boxes))
# for each bounding box, calculate the intersection-over-area
for i, box in enumerate(detection_boxes):
# determine the coordinates of the intersection rectangle
x_left = max(area_of_interest[0], box[0])
y_top = max(area_of_interest[1], box[1])
x_right = min(area_of_interest[2], box[2])
y_bottom = min(area_of_interest[3], box[3])
# boxes do not intersect, area is 0
if x_right < x_left or y_bottom < y_top:
aou[i] = 0.0
continue
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of detection box
box_area = (box[2] - box[0]) * (box[3] - box[1])
if (intersection_area / box_area) == 1.0:
aou[i] = 1.0
continue
aou[i] = intersection_area / box_area
return aou
diff --git a/src/providers/activeLearning/objectDetection.ts b/src/providers/activeLearning/objectDetection.ts
index 196db45..a8dff06 100755
--- a/src/providers/activeLearning/objectDetection.ts
+++ b/src/providers/activeLearning/objectDetection.ts
@@ -151,6 +151,8 @@ export class ObjectDetection {
const batched = tf.tidy(() => {
if (!(img instanceof tf.Tensor)) {
img = tf.browser.fromPixels(img);
+ // model requires float32 input
+ img = tf.cast(img, 'float32');
}
// Reshape to a single-element batch so we can pass it to executeAsync.
return img.expandDims(0);
@@ -166,7 +168,8 @@ export class ObjectDetection {
const result = await this.model.executeAsync(batched) as tf.Tensor[];
const scores = result[0].dataSync() as Float32Array;
- const boxes = result[1].dataSync() as Float32Array;
+ // tf.image.nonMaxSepressionAsync() expects tf.Tensor as input
+ const boxes = result[1].dataSync()
// clean the webgl tensors
batched.dispose();
@@ -177,10 +180,8 @@ export class ObjectDetection {
const prevBackend = tf.getBackend();
// run post process in cpu
tf.setBackend("cpu");
- const indexTensor = tf.tidy(() => {
- const boxes2 = tf.tensor2d(boxes, [result[1].shape[1], result[1].shape[3]]);
- return tf.image.nonMaxSuppression(boxes2, maxScores, maxNumBoxes, 0.5, 0.5);
- });
+ const boxes2d = tf.tensor2d(boxes, [result[1].shape[0], result[1].shape[1]]);
+ const indexTensor = await tf.image.nonMaxSuppressionAsync(boxes2d, maxScores, maxNumBoxes, 0.5, 0.5);
const indexes = indexTensor.dataSync() as Float32Array;
indexTensor.dispose();
@@ -188,7 +189,9 @@ export class ObjectDetection {
// restore previous backend
tf.setBackend(prevBackend);
- return this.buildDetectedObjects(width, height, boxes, maxScores, indexes, classes);
+ // _.buildDetectedObjects expects Float32Array input
+ const fboxes = boxes as Float32Array
+ return this.buildDetectedObjects(width, height, fboxes, maxScores, indexes, classes);
}
import cv2
base_path = '/path/to/vott-csv-export/'
LOG_INTERVAL=2000
# convert absolute coordinates to relative coordinates in [0, 1] range
for index, row in df.iterrows():
if index % LOG_INTERVAL == 0:
print(f'finished {index} / {len(df)}')
filename = row['image_path'].split('/')[-1]
img = cv2.imread(f'{base_path}{filename}')
height, width, channels = img.shape
df.at[index, 'x1_n'] = row['x1'] / width
df.at[index, 'x2_n']= row['x2'] / width
df.at[index, 'y1_n'] = row['y1'] / height
df.at[index, 'y2_n'] = row['y2'] / height
# replace relative image paths with a Google Storage bucket path
df['set'] = 'UNASSIGNED'
df['gs_path'] = df['image'] + 'gs://bucket-name/path/to/upload'
# write CSV with columns expected by AutoML Vision
# the "none" columns are required for boxes defined by 2 vertices
df['none'] = ''
df.to_csv('/path/to/normalized-export.csv',
columns=['set', 'image_path', 'label', 'x1_n', 'y1_n', 'none', 'none', 'x2_n', 'y2_n', 'none', 'none'],
index=False
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment