Skip to content

Instantly share code, notes, and snippets.

View OlafenwaMoses's full-sized avatar
💭
Coding the future....

MOSES OLAFENWA OlafenwaMoses

💭
Coding the future....
View GitHub Profile
@OlafenwaMoses
OlafenwaMoses / ip_camera_with_deepstack.py
Created September 29, 2021 09:59
Sample code to detect objects in IP Camera with DeepStack
import cv2
# Import DeepStack's Python SDK
from deepstack_sdk import ServerConfig, Detection
# Function to draw detections and object names on camera frames
def draw_detections(img, detections):
for detection in detections:
output_font_scale = 0.8e-3 * img.shape[0]
label = detection.label
Name: fedex
Confidence: 0.83026457
x_min: 385
x_max: 524
y_min: 135
y_max: 183
-----------------------
Name: fedex
Confidence: 0.8716001
x_min: 278
from deepstack_sdk import Detection,ServerConfig
import os
config = ServerConfig("http://localhost:80")
detector = Detection(config=config, name="openlogo")
detections = detector.detectObject(image="fedex.jpg", output="fedex_new.jpg")
for detection in detections:
print("Name: {}".format(detection.label))
@OlafenwaMoses
OlafenwaMoses / feature_mapping_orb_opencv.py
Created August 31, 2019 01:05
Feature Mapping using OpenCV's ORB detector and Brute-force Matching
import cv2
image1 = cv2.imread("1.jpg")
# Loaded first image in full color
image2 = cv2.imread("2.jpg")
# Loaded second image in full color
image1_gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
# Created gray version of first image
image2_gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
from imageai.Detection.Custom import CustomObjectDetection
import os
execution_path = os.getcwd()
detector = CustomObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(detection_model_path="detection_model-ex-028--loss-8.723.h5")
detector.setJsonPath(configuration_json="detection_config.json")
detector.loadModel()
from imageai.Detection.Custom import DetectionModelTrainer
trainer = DetectionModelTrainer()
trainer.setModelTypeAsYOLOv3()
trainer.setDataDirectory(data_directory="apple_dataset")
trainer.setTrainConfig(object_names_array=["apple", "damaged_apple"], batch_size=8, num_experiments=50, train_from_pretrained_model="pretrained-yolov3.h5")
trainer.trainModel()
{'success': True,
'predictions': [{'y_min': 197, 'x_max': 396, 'x_min': 254, 'label': 'person', 'y_max': 394, 'confidence': 0.9963659},
{'y_min': 253, 'x_max': 349, 'x_min': 47, 'label': 'person', 'y_max': 563, 'confidence': 0.9960499},
{'y_min': 205, 'x_max': 764, 'x_min': 617, 'label': 'person', 'y_max': 381, 'confidence': 0.9950463},
{'y_min': 269, 'x_max': 1001, 'x_min': 647, 'label': 'person', 'y_max': 559, 'confidence': 0.9885887},
{'y_min': 233, 'x_max': 346, 'x_min': 184, 'label': 'person', 'y_max': 454, 'confidence': 0.97972304},
{'y_min': 194, 'x_max': 1001, 'x_min': 894, 'label': 'person', 'y_max': 285, 'confidence': 0.9065306},
{'y_min': 74, 'x_max': 278, 'x_min': 229, 'label': 'person', 'y_max': 243, 'confidence': 0.6799101},
{'y_min': 224, 'x_max': 897, 'x_min': 636, 'label': 'person', 'y_max': 455, 'confidence': 0.5133961},
{'y_min': 501, 'x_max': 353,
using System;
using System.IO;
using System.Net.Http;
using System.Threading.Tasks;
using Newtonsoft.Json;
namespace appone
{
const request = require("request")
const fs = require("fs")
image_stream = fs.createReadStream("test-image.jpg")
var form = {"image":image_stream}
request.post({url:"http://localhost:80/v1/vision/detection", formData:form},function(err,res,body){
import requests
image_data = open("test-image.jpg","rb").read()
response = requests.post("http://localhost:80/v1/vision/detection",files={"image":image_data}).json()
print(response)