considering:
- https://github.com/whoever/whatever.git is the address of original repo
- https://github.com/youraccount/whatever.git is the address of fork repo
clone your fork if you haven't cloned already
#!/bin/bash | |
################################################################################################### | |
# Bash script to import CMake project from Github and create local Eclipse CDT compatible project # | |
# # | |
# The CMake Eclipse CDT strategy was taken from here: https://stackoverflow.com/a/9663686/3055724 # | |
# After run this script you need to import the project in eclipse: # | |
# File->Import->C/C++->Existing code as Makefile Project->Next # | |
# Tested in Eclise Oxygen and Luna # | |
################################################################################################### |
// inverse kinematics | |
// helper functions, calculates angle theta1 (for YZ-pane) | |
int delta_calcAngleYZ(float x0, float y0, float z0, float &theta) { | |
float y1 = -0.5 * 0.57735 * f; // f/2 * tg 30 | |
y0 -= 0.5 * 0.57735 * e; // shift center to edge | |
// z = a + b*y | |
float a = (x0*x0 + y0*y0 + z0*z0 +rf*rf - re*re - y1*y1)/(2*z0); | |
float b = (y1-y0)/z0; | |
// discriminant | |
float d = -(a+b*y1)*(a+b*y1)+rf*(b*b*rf+rf); |
void setup() { | |
Serial.begin(9600); | |
} | |
void loop() { | |
byte close[] = {0xA0, 0x01, 0x01, 0xA2}; | |
Serial.write(close, sizeof(close)); | |
delay(2000); | |
byte open[] = {0xA0, 0x01, 0x00, 0xA1}; |
considering:
clone your fork if you haven't cloned already
def format_yolov5(source): | |
# put the image in square big enough | |
col, row, _ = source.shape | |
_max = max(col, row) | |
resized = np.zeros((_max, _max, 3), np.uint8) | |
resized[0:col, 0:row] = source | |
# resize to 640x640, normalize to [0,1[ and swap Red and Blue channels | |
result = cv2.dnn.blobFromImage(resized, 1/255.0, (640, 640), swapRB=True) |
#include <opencv2/opencv.hpp> | |
int main(int, char **) | |
{ | |
auto net = cv::dnn::readNet("yolov5s.onnx"); | |
return 0; | |
} |
import cv2 | |
net = cv2.dnn.readNet('yolov5s.onnx') |
std::vector<cv::Mat> predictions; | |
net.forward(predictions, net.getUnconnectedOutLayersNames()); | |
const cv::Mat &output = predictions[0]; |
predictions = net.forward() | |
output = predictions[0] |
def unwrap_detection(input_image, output_data): | |
class_ids = [] | |
confidences = [] | |
boxes = [] | |
rows = output_data.shape[0] | |
image_width, image_height, _ = input_image.shape | |
x_factor = image_width / 640 |