Created
September 27, 2022 22:44
-
-
Save trbritt/01b0d43e8c4259d5b18b7ebb467498f1 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Libraries | |
from pyueye import ueye | |
import numpy as np | |
import pyqtgraph as pg | |
from scipy.signal import savgol_filter | |
import warnings | |
from contextlib import contextmanager | |
class uEye_camera: | |
def __init__(self, HID=0): | |
#--------------------------------------------------------------------------------------------------------------------------------------- | |
#Variables | |
self.hCam = ueye.HIDS(HID) #0: first available camera; 1-254: The camera with the specified camera ID | |
self.sInfo = ueye.SENSORINFO() | |
self.cInfo = ueye.CAMINFO() | |
self.pcImageMemory = ueye.c_mem_p() | |
self.MemID = ueye.int() | |
self.rectAOI = ueye.IS_RECT() | |
self.pitch = ueye.INT() | |
self.nBitsPerPixel = ueye.INT(24) #24: bits per pixel for color mode; take 8 bits per pixel for monochrome | |
self.channels = 3 #3: channels for color mode(RGB); take 1 channel for monochrome | |
self.m_nColorMode = ueye.INT() # Y8/RGB16/RGB24/REG32 | |
self.bytes_per_pixel = int(self.nBitsPerPixel / 8) | |
#--------------------------------------------------------------------------------------------------------------------------------------- | |
print("START") | |
print() | |
# Starts the driver and establishes the connection to the camera | |
self.nRet = ueye.is_InitCamera(self.hCam, None) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_InitCamera ERROR") | |
# Reads out the data hard-coded in the non-volatile camera memory and writes it to the data structure that cInfo points to | |
self.nRet = ueye.is_GetCameraInfo(self.hCam, self.cInfo) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_GetCameraInfo ERROR") | |
# You can query additional information about the sensor type used in the camera | |
self.nRet = ueye.is_GetSensorInfo(self.hCam, self.sInfo) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_GetSensorInfo ERROR") | |
self.nRet = ueye.is_ResetToDefault( self.hCam) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_ResetToDefault ERROR") | |
# Set display mode to DIB | |
self.nRet = ueye.is_SetDisplayMode(self.hCam, ueye.IS_SET_DM_DIB) | |
# Set the right color mode | |
if int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_BAYER: | |
# setup the color depth to the current windows setting | |
ueye.is_GetColorDepth(self.hCam, self.nBitsPerPixel, self.m_nColorMode) | |
self.bytes_per_pixel = int(self.nBitsPerPixel / 8) | |
print("IS_COLORMODE_BAYER: ", ) | |
print("\tm_nColorMode: \t\t", self.m_nColorMode) | |
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel) | |
print("\tbytes_per_pixel: \t\t", self.bytes_per_pixel) | |
print() | |
elif int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_CBYCRY: | |
# for color camera models use RGB32 mode | |
self.m_nColorMode = ueye.IS_CM_BGRA8_PACKED | |
self.nBitsPerPixel = ueye.INT(32) | |
self.bytes_per_pixel = int(self.nBitsPerPixel / 8) | |
print("IS_COLORMODE_CBYCRY: ", ) | |
print("\tm_nColorMode: \t\t", self.m_nColorMode) | |
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel) | |
print("\tbytes_per_pixel: \t\t", self.bytes_per_pixel) | |
print() | |
elif int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_MONOCHROME: | |
# for color camera models use RGB32 mode | |
self.m_nColorMode = ueye.IS_CM_MONO8 | |
self.nBitsPerPixel = ueye.INT(8) | |
self.bytes_per_pixel = int(self.nBitsPerPixel / 8) | |
print("IS_COLORMODE_MONOCHROME: ", ) | |
print("\tm_nColorMode: \t\t", self.m_nColorMode) | |
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel) | |
print("\tbytes_per_pixel: \t\t", self.bytes_per_pixel) | |
print() | |
else: | |
# for monochrome camera models use Y8 mode | |
self.m_nColorMode = ueye.IS_CM_MONO8 | |
self.nBitsPerPixel = ueye.INT(8) | |
self.bytes_per_pixel = int(self.nBitsPerPixel / 8) | |
print("else") | |
# Can be used to set the size and position of an "area of interest"(AOI) within an image | |
self.nRet = ueye.is_AOI(self.hCam, ueye.IS_AOI_IMAGE_GET_AOI, self.rectAOI, ueye.sizeof(self.rectAOI)) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_AOI ERROR") | |
self.width = self.rectAOI.s32Width | |
self.height = self.rectAOI.s32Height | |
# Prints out some information about the camera and the sensor | |
print("Camera model:\t\t", self.sInfo.strSensorName.decode('utf-8')) | |
print("Camera serial no.:\t", self.cInfo.SerNo.decode('utf-8')) | |
print("Maximum image width:\t", self.width) | |
print("Maximum image height:\t", self.height) | |
self.allocate_image_memory() | |
def allocate_image_memory(self): | |
#--------------------------------------------------------------------------------------------------------------------------------------- | |
# Allocates an image memory for an image having its dimensions defined by width and height and its color depth defined by nBitsPerPixel | |
self.nRet = ueye.is_AllocImageMem(self.hCam, self.width, self.height, self.nBitsPerPixel, self.pcImageMemory, self.MemID) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_AllocImageMem ERROR") | |
else: | |
# Makes the specified image memory the active memory | |
self.nRet = ueye.is_SetImageMem(self.hCam, self.pcImageMemory, self.MemID) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_SetImageMem ERROR") | |
else: | |
# Set the desired color mode | |
self.nRet = ueye.is_SetColorMode(self.hCam, self.m_nColorMode) | |
# Activates the camera's live video mode (free run mode) | |
self.nRet = ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_CaptureVideo ERROR") | |
# Enables the queue mode for existing image memory sequences | |
self.nRet = ueye.is_InquireImageMem(self.hCam, self.pcImageMemory, self.MemID, self.width, self.height, self.nBitsPerPixel, self.pitch) | |
if self.nRet != ueye.IS_SUCCESS: | |
print("is_InquireImageMem ERROR") | |
else: | |
print("Press q to leave the programm") | |
def get_image(self): | |
array = ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False) | |
self.bytes_per_pixel = int(self.nBitsPerPixel / 8) | |
# ...reshape it in an numpy array... | |
self.frame = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel)) | |
return self.frame | |
def get_centroid(self): | |
try: | |
# normalizer = np.sum(self.frame) | |
# grids = np.ogrid[[slice(0, i) for i in self.frame.shape]] | |
# results = [np.sum(self.frame * grids[d].astype(float)) / normalizer for d in range(self.frame.ndim)] | |
# if np.isscalar(results[0]): | |
# return tuple(results) | |
# self.centroid = [tuple(v) for v in np.array(results).T] | |
# self.centroid = np.mean(np.indices(self.frame.shape), weights=self.frame) | |
m,n, _ = self.frame.shape | |
# r,c = np.mgrid[:m,:n] | |
# count = np.bincount(self.frame.ravel()) | |
# centroid_row = np.bincount(self.frame.ravel(),r.ravel())/count | |
# centroid_col = np.bincount(self.frame.ravel(),c.ravel())/count | |
# self.centroid = np.average(np.c_[centroid_row, centroid_col], axis=0) | |
self.centroid = np.unravel_index(np.argmax(self.frame), (m,n)) | |
except (RuntimeWarning, ValueError) as e: | |
self.centroid = np.array([np.nan, np.nan]) | |
pass | |
return self.centroid | |
def __del__(self): | |
name = self.sInfo.strSensorName.decode('utf-8') | |
# Releases an image memory that was allocated using is_AllocImageMem() and removes it from the driver management | |
ueye.is_FreeImageMem(self.hCam, self.pcImageMemory, self.MemID) | |
# Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera | |
ueye.is_ExitCamera(self.hCam) | |
print(f'Camera {name} closed') | |
if __name__ == "__main__": | |
@contextmanager | |
def rowmajor_axisorder(): | |
""" | |
Context manager that sets the PyQtGraph image axis order to row-major. | |
The environment is reset to the initial value after context close. | |
""" | |
old_image_axis_order = pg.getConfigOption("imageAxisOrder") | |
pg.setConfigOptions(imageAxisOrder="row-major") | |
yield | |
pg.setConfigOptions(imageAxisOrder=old_image_axis_order) | |
class Diffshow(pg.QtGui.QWidget): | |
""" | |
Widget containing a main viewer, plus some cursor information. | |
Parameters | |
---------- | |
image : ndarray | |
""" | |
def __init__(self, **kwargs): | |
super().__init__(**kwargs) | |
self.viewer = pg.ImageView() | |
# self.viewer.ui.histogram.hide() | |
self.camera = uEye_camera(HID=0) | |
self.getUpdatedImage() | |
self.current_hists = [] | |
self.current_centroid_lines = [] | |
with warnings.catch_warnings(): | |
# Pesky FutureWarning from PyQtGraph | |
warnings.simplefilter("ignore") | |
self.viewer.setImage(self.frame, autoLevels=False, autoRange=False, autoHistogramRange=False) | |
self.cursor_info = pg.QtGui.QLabel("") | |
self.cursor_info.setAlignment(pg.QtCore.Qt.AlignCenter) | |
self.__cursor_proxy = pg.SignalProxy( | |
self.viewer.scene.sigMouseMoved, | |
rateLimit=60, | |
slot=self.update_cursor_info, | |
) | |
self.timer = pg.QtCore.QTimer() | |
self.timer.timeout.connect(self.update) | |
self.timer.start(int(1e2)) # fires every 100ms (frame rate 10 Hz, max 14 Hz) | |
self.setWindowTitle(f"uEye Image Viewer - {self.camera.sInfo.strSensorName.decode('utf-8')}") | |
layout = pg.QtGui.QVBoxLayout() | |
layout.addWidget(self.viewer) | |
layout.addWidget(self.cursor_info) | |
self.setLayout(layout) | |
def update_cursor_info(self, event): | |
"""Determine cursor information from mouse event.""" | |
mouse_point = self.viewer.getView().mapSceneToView(event[0]) | |
i, j = int(mouse_point.y()), int(mouse_point.x()) | |
try: | |
val = self.viewer.getImageItem().image[i, j][0] | |
except IndexError: | |
val = 0 | |
self.cursor_info.setText( | |
f"Position: ({i},{j}) | Pixel value: {val:.2f} cnts" | |
) | |
def getUpdatedImage(self): | |
self.frame = self.camera.get_image() | |
def update(self): | |
self.getUpdatedImage() | |
self.viewer.setImage(self.frame, autoLevels=False, autoRange=False, autoHistogramRange=False) | |
self.viewer.setLevels(min=0, max=np.percentile(self.frame, 99)) | |
# histograms | |
self.histogram_viewer = self.viewer.getView() | |
for hist in self.current_hists: self.histogram_viewer.removeItem(hist) | |
HIST_X = -savgol_filter(0.3*self.camera.height.value*np.average(self.frame, axis=0).reshape(-1,), 3, 1) | |
HIST_Y = -savgol_filter(0.3*self.camera.width.value*np.average(self.frame, axis=1).reshape(-1,), 3, 1) | |
self.current_hist_x = pg.PlotCurveItem( | |
x=np.arange(0, self.camera.width.value, 1), | |
y=HIST_X-HIST_X.max()+self.camera.height.value, | |
pen=pg.mkPen('r') | |
) | |
self.current_hist_y = pg.PlotCurveItem( | |
y=np.arange(0, self.camera.height.value, 1), | |
x=HIST_Y-HIST_Y.max()+self.camera.width.value, | |
pen=pg.mkPen('r') | |
) | |
self.current_hists = [self.current_hist_x, self.current_hist_y] | |
for hist in self.current_hists: self.histogram_viewer.addItem(hist) | |
#centroids | |
self.centroid_viewer = self.viewer.getView() | |
for line in self.current_centroid_lines: self.centroid_viewer.removeItem(line) | |
y, x = self.camera.get_centroid() | |
self.current_x_centroid = pg.PlotCurveItem( | |
x = x*np.ones((100,)), y = np.linspace(0, self.camera.height.value, 100), pen=pg.mkPen('g') | |
) | |
self.current_y_centroid = pg.PlotCurveItem( | |
x = np.linspace(0, self.camera.width.value, 100), y = y*np.ones((100,)), pen=pg.mkPen('g') | |
) | |
self.current_centroid_lines = [self.current_x_centroid, self.current_y_centroid] | |
for line in self.current_centroid_lines: self.centroid_viewer.addItem(line) | |
with rowmajor_axisorder(): | |
app = pg.QtGui.QApplication([]) | |
viewer = Diffshow() | |
viewer.show() | |
app.exec_() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment