Skip to content

Instantly share code, notes, and snippets.

@petebankhead
Created September 26, 2021 15:57
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save petebankhead/4a94ab1e9cd3ffae706e41f3f7bc844c to your computer and use it in GitHub Desktop.
Save petebankhead/4a94ab1e9cd3ffae706e41f3f7bc844c to your computer and use it in GitHub Desktop.
QuPath script to extract and unwarp manually-annotated rectangular regions from an image
/**
* Helper script to extract manually-annotated photos from an image,
* e.g. to help with digitising a photo album based upon scanning each page.
*
* The main thing this will do is to use the annotated polygons and perform
* a perspective transform to unwarp the photos.
*
* It can be applied in batch to many images.
*
* To use it
* - Add images containing the photos to a project
* - Optionally add an 'album' metadata value to group exported images
* - For each image, manually annotate photos using the polygon tool -
* clicking 4 points (clockwise, starting from the top left)
* - Optionally assign one of the following a classification to each photo:
* "Rotate -90", "Rotate 90", "Rotate 180"
* If available, the corresponding rotation will be appied during export.
* - Choose 'Run -> Run for project (without save)' to export the photos
*
* Should work for any annotated rectangular regions in an image... it was just written
* initially to help digitise photos quickly.
*
* @author Pete Bankhead
*/
import ij.ImagePlus
import org.bytedeco.javacpp.PointerScope
import qupath.lib.images.servers.ImageServer
import qupath.lib.regions.RegionRequest
import qupath.lib.roi.interfaces.ROI
import qupath.opencv.tools.OpenCVTools
import org.bytedeco.opencv.opencv_core.*
import org.bytedeco.opencv.global.opencv_imgproc
import org.bytedeco.opencv.global.opencv_core
import java.awt.image.BufferedImage
import java.nio.FloatBuffer
import static qupath.lib.gui.scripting.QPEx.*
String path = buildFilePath(PROJECT_BASE_DIR, 'export')
String ext = ".jpg"
boolean showOnly = false
def clahe = null
//def clahe = opencv_imgproc.createCLAHE(1.5, new Size(8, 8))
def annotations = getSelectedObjects()
if (!annotations)
annotations = getAnnotationObjects().findAll {it.getROI().isArea()}
def server = getCurrentServer()
def name = server.getMetadata().getName()
def entry = getProjectEntry()
if (entry) {
name = getProjectEntry().getImageName()
def album = entry.getMetadataValue('album')
if (album)
path = buildFilePath(path, album)
}
name = GeneralTools.getNameWithoutExtension(name)
println "Extracting ${annotations.size()} photos from ${name}"
if (!showOnly && path)
mkdirs(path)
def count = 0
try (def scope = new PointerScope()) {
// Get the entire image as an OpenCV Mat
def img = server.readBufferedImage(RegionRequest.createInstance(server))
def mat = OpenCVTools.imageToMat(img)
def matROI = new Mat(4, 2, opencv_core.CV_32FC1, Scalar.ZERO)
FloatBuffer bufROI = matROI.createBuffer()
def matROIOutput = new Mat(4, 2, opencv_core.CV_32FC1, Scalar.ZERO)
FloatBuffer bufROIOutput = matROIOutput.createBuffer()
for (annotation in annotations) {
count++
def roi = annotation.getROI()
def points = roi.getAllPoints()
// Try to simplify points (remove duplicate/nearby points, easily created when clicking)
if (points.size() != 4) {
qupath.lib.roi.ShapeSimplifier.simplifyPolygonPoints(points, 10.0)
}
if (points.size() != 4) {
println ("WARN: Unable to unwarp $annotation, require 4 points but found ${points.size()}")
continue
}
for (int i = 0; i < points.size(); i++) {
bufROI.put(i*2, points[i].x as float)
bufROI.put(i*2+1, points[i].y as float)
}
float width = points[1].distance(points[0]) as float
float height = points[2].distance(points[1]) as float
bufROIOutput.put(0, [0, 0, width, 0, width, height, 0, height] as float[])
def matTransform = opencv_imgproc.getPerspectiveTransform(matROI, matROIOutput)
def matOutput = new Mat((int)height, (int)width, mat.type())
opencv_imgproc.warpPerspective(mat, matOutput, matTransform, matOutput.size(),
opencv_imgproc.INTER_CUBIC,// | opencv_imgproc.WARP_INVERSE_MAP,
opencv_core.BORDER_CONSTANT,
Scalar.ZERO)
// Rotate if necessary
def pathClass = annotation.getPathClass()
if (pathClass == getPathClass('Rotate -90'))
opencv_core.rotate(matOutput, matOutput, opencv_core.ROTATE_90_COUNTERCLOCKWISE)
else if (pathClass == getPathClass('Rotate 90'))
opencv_core.rotate(matOutput, matOutput, opencv_core.ROTATE_90_CLOCKWISE)
else if (pathClass == getPathClass('Rotate 180'))
opencv_core.rotate(matOutput, matOutput, opencv_core.ROTATE_180)
if (clahe) {
opencv_imgproc.cvtColor(matOutput, matOutput, opencv_imgproc.COLOR_RGB2YCrCb)
def channels = OpenCVTools.splitChannels(matOutput)
clahe.apply(channels[0], channels[0])
OpenCVTools.mergeChannels(channels, matOutput)
opencv_imgproc.cvtColor(matOutput, matOutput, opencv_imgproc.COLOR_YCrCb2RGB)
}
// Export the image
def outputName = "${name}-${count}"
def imgOutput = OpenCVTools.matToBufferedImage(matOutput)
if (showOnly)
new ImagePlus(outputName, imgOutput).show()
else {
def pathOutput = buildFilePath(path, outputName + ext)
writeImage(imgOutput, pathOutput)
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment