Skip to content

Instantly share code, notes, and snippets.

@funart23
Last active July 17, 2020 20:38
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save funart23/3f8708c6119f70539be7de941717e1a4 to your computer and use it in GitHub Desktop.
Save funart23/3f8708c6119f70539be7de941717e1a4 to your computer and use it in GitHub Desktop.
//converted from: https://www.pyimagesearch.com/2014/11/24/detecting-barcodes-images-python-opencv/
// some defines
#define TABLE_SIZE( X ) ( sizeof( (X) ) / sizeof( (X)[0] ) )
#define ROUNDINT( F ) static_cast<int>( 0.5 + (F) )
// Helper Struct
struct CompareContourAreas
{ //TODO very inefficient
static bool Asc( const std::vector< cv::Point>& contour1, const std::vector<cv::Point>& contour2 ) {
double i = fabs( cv::contourArea( cv::Mat( contour1 ) ) );
double j = fabs( cv::contourArea( cv::Mat( contour2 ) ) );
return ( i < j );
}
static bool Desc( const std::vector< cv::Point>& contour1, const std::vector<cv::Point>& contour2 ) {
return Asc(contour2, contour1);
}
};
static bool _HorzBarcodeFilter( const cv::Mat& _matOrigin, cv::Mat& _matOutput )
{
cv::Mat gray;
cvtColor( _matOrigin, gray, cv::COLOR_BGR2GRAY );
/*
* Then, we use the Scharr operator (specified using ksize = -1 ) to construct
* the gradient magnitude representation of the grayscale image in the horizontal
* and vertical directions
*/
//# compute the Scharr gradient magnitude representation of the images
//# in both the x and y direction using OpenCV 2.4
//ddepth = cv2.cv.CV_32F if imutils.is_cv2() else cv2.CV_32F
//gradX = cv2.Sobel(gray, ddepth=ddepth, dx=1, dy=0, ksize=-1)
//gradY = cv2.Sobel(gray, ddepth=ddepth, dx=0, dy=1, ksize=-1)
int ddepth = CV_32F;
cv::Mat grad_x;
int dx1 = 1;
int dy1 = 0;
int ksize= (-1);
double scale = 1;
double delta = 0;
cv::Sobel( gray, grad_x, ddepth, dx1, dy1, ksize, scale, delta, cv::BORDER_DEFAULT );
cv::Mat grad_y;
int dx2 = 0;
int dy2 = 1;
cv::Sobel( gray, grad_y, ddepth, dx2, dy2, ksize, scale, delta, cv::BORDER_DEFAULT );
/*
* From there, we subtract the y-gradient of the Scharr operator
* from the x-gradient of the Scharr operator on Lines 24 and 25.
* By performing this subtraction we are left with regions of the image
* that have high horizontal gradients and low vertical gradients.
*/
//# subtract the y-gradient from the x-gradient
//gradient = cv2.subtract(gradX, gradY)
//gradient = cv2.convertScaleAbs(gradient)
cv::Mat gradient_temp;
cv::subtract( grad_x, grad_y, gradient_temp );
cv::Mat gradient;
cv::convertScaleAbs( gradient_temp, gradient );
/*
* The next steps will be to filter out the noise in the image and focus solely on the barcode region.
*/
//# blur and threshold the image
//blurred = cv2.blur(gradient, (9, 9))
//(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
cv::Mat blurred;
cv::blur( gradient, blurred, cv::Size( 9,9 ) );
cv::Mat thresh;
cv::threshold( blurred, thresh, 225, 255, cv::THRESH_BINARY );
/*
* We’ll start by constructing a rectangular kernel using the cv2.getStructuringElement.
* This kernel has a width that is larger than the height, thus allowing us to close the gaps
* between vertical stripes of the barcode. (DEPEND)
* We then perform our morphological-EX operation by applying our kernel to our thresholded image,
* thus attempting to close the the gaps between the bars.
*/
//# construct a closing kernel and apply it to the thresholded image
//kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
//closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
cv::Mat kernel = cv::getStructuringElement( cv::MORPH_RECT, cv::Size(21, 7) );
cv::Mat closed1;
cv::morphologyEx( thresh, closed1, cv::MORPH_CLOSE, kernel );
/*
* Of course, now we have small blobs in the image that are not part of the actual barcode, but may interfere with our contour detection.
* Let’s go ahead and try to remove these small blobs:
*/
//# perform a series of erosions and dilations
//closed = cv2.erode(closed, None, iterations = 4)
//closed = cv2.dilate(closed, None, iterations = 4)
const int iterations = 4;
cv::Mat closed2;
cv::erode( closed1, closed2, cv::noArray(), cv::Point(-1,-1), iterations );
cv::Mat closed3;
cv::dilate( closed2, closed3, cv::noArray(), cv::Point(-1,-1), iterations );
//Finally, let’s find the contours of the barcoded region of the image:
//# find the contours in the thresholded image, then sort the contours
//# by their area, keeping only the largest one
//cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
//cnts = cnts[0] if imutils.is_cv2() else cnts[1]
//c = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
std::vector<std::vector< cv::Point > > contours;
cv::Mat contourOutput = closed3.clone();
cv::findContours( contourOutput, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE );
/*
* Python: cv2.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy
* Sort the contours:
* key: Optional. A function of one argument that is used to extract a comparison key from each list element. The default value is None (compare the elements directly).
*/
// sort contours
if ( contours .size() > 0 )
{
auto iSize = contours.size();
std::sort( contours.begin(), contours.end(), CompareContourAreas::Desc );
const std::vector< cv::Point >& biggestContour = contours[0];
//# compute the rotated bounding box of the largest contour
//rect = cv2.minAreaRect(c)
//box = cv2.cv.BoxPoints(rect) if imutils.is_cv2() else cv2.boxPoints(rect)
//box = np.int0(box)
//minAreaRect accepts only Point or Point2f, i.e. points of type CV_32S or CV_32F.
cv::RotatedRect rect = cv::minAreaRect( biggestContour );
//It seems cv::boxPoints() needs a cv::Mat as the OutputArray.
//The rows are the 4 points and the two columns are x and y.
//The function cv::boxPoints() finds the four vertices of a rotated rectangle.
//This function is useful to draw the rectangle.
//In C++, instead of using this function, you can directly use box.points() method.
cv::Point2f vertices[4];
rect.points( vertices );
std::vector< std::vector< cv::Point > > contours2;
contours2.push_back( std::vector< cv::Point >() );
std::vector< cv::Point >& vcBoxpoints = contours2.back();
for ( int z=0; z<TABLE_SIZE( vertices ); z++ )
{
const cv::Point2f& pf = vertices[z];
vcBoxpoints.push_back( cv::Point( ROUNDINT( pf.x ), ROUNDINT( pf.y ) ) );
} // for (;;)
cv::Mat matOutput = _matOrigin.clone();
// drawContours
//contourIdx – Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
cv::drawContours( matOutput, contours2, (-1), cv::Scalar(0,255,0), 3 );
_matOutput = matOutput;
return true;
}
// error occurred
_matOutput.release();
return false;
} //
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment