Created
September 10, 2019 08:59
-
-
Save pavlinb/ae5405b6c3c3b068ac4b8cb4601053a9 to your computer and use it in GitHub Desktop.
live capture recognition_2.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/jsbain/objc_hacks/blob/master/live_camera_view.py | |
# https://gist.github.com/omz/a7c5f310e1c8b829a5a613cd556863d4 | |
from objc_util import * | |
import os | |
from time import sleep | |
import ui | |
# Configuration (change URL and filename if you want to use a different model): | |
#MODEL_URL = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel' | |
MODEL_FILENAME = 'mobilenet.mlmodel' | |
# Use a local path for caching the model file (no need to sync this with iCloud): | |
MODEL_PATH = os.path.join(os.path.expanduser('~/Documents'), MODEL_FILENAME) | |
# Declare/import ObjC classes: | |
MLModel = ObjCClass('MLModel') | |
VNCoreMLModel = ObjCClass('VNCoreMLModel') | |
VNCoreMLRequest = ObjCClass('VNCoreMLRequest') | |
VNImageRequestHandler = ObjCClass('VNImageRequestHandler') | |
VNDetectTextRectanglesRequest = ObjCClass('VNDetectTextRectanglesRequest').alloc() | |
AVCaptureStillImageOutput=ObjCClass('AVCaptureStillImageOutput') | |
def load_model(): | |
'''Helper method for downloading/caching the mlmodel file''' | |
if not os.path.exists(MODEL_PATH): | |
print(f'Downloading model: {MODEL_FILENAME}...') | |
r = requests.get(MODEL_URL, stream=True) | |
file_size = int(r.headers['content-length']) | |
with open(MODEL_PATH, 'wb') as f: | |
bytes_written = 0 | |
for chunk in r.iter_content(1024*100): | |
f.write(chunk) | |
print(f'{bytes_written/file_size*100:.2f}% downloaded') | |
bytes_written += len(chunk) | |
print('Download finished') | |
ml_model_url = nsurl(MODEL_PATH) | |
# Compile the model: | |
c_model_url = MLModel.compileModelAtURL_error_(ml_model_url, None) | |
# Load model from the compiled model file: | |
ml_model = MLModel.modelWithContentsOfURL_error_(c_model_url, None) | |
# Create a VNCoreMLModel from the MLModel for use with the Vision framework: | |
vn_model = VNCoreMLModel.modelForMLModel_error_(ml_model, None) | |
return vn_model | |
def _classify_img_data(img_data): | |
global vn_model | |
# Create and perform the recognition request: | |
req = VNCoreMLRequest.alloc().initWithModel_(vn_model).autorelease() | |
handler = VNImageRequestHandler.alloc().initWithData_options_(img_data, None).autorelease() | |
success = handler.performRequests_error_([req], None) | |
if success: | |
best_result = req.results()[0] | |
label = str(best_result.identifier()) | |
confidence = best_result.confidence() | |
ret = {'label': label, 'confidence': confidence} | |
else: | |
ret = None | |
del req | |
del handler | |
return ret | |
class LiveCameraView(ui.View): | |
''' device=1== front, device=2==back | |
''' | |
def __init__(self,device=0, *args, **kwargs): | |
ui.View.__init__(self,*args,**kwargs) | |
self._session=ObjCClass('AVCaptureSession').alloc().init() | |
self._session.setSessionPreset_('AVCaptureSessionPresetHigh'); | |
inputDevices=ObjCClass('AVCaptureDevice').devices() | |
self._inputDevice=inputDevices[device] | |
deviceInput=ObjCClass('AVCaptureDeviceInput').deviceInputWithDevice_error_(self._inputDevice, None); | |
if self._session.canAddInput_(deviceInput): | |
self._session.addInput_(deviceInput) | |
#stillSettings = [AVVideoCodecJPEG:AVVideoCodecKey] | |
self._stillImageOutput=AVCaptureStillImageOutput.new() | |
# .outputSettings = stillSettings | |
if(self._session.canAddOutput_(self._stillImageOutput)): | |
self._session.addOutput_(self._stillImageOutput) | |
self._previewLayer=ObjCClass('AVCaptureVideoPreviewLayer').alloc().initWithSession_(self._session) | |
self._previewLayer.setVideoGravity_( | |
'AVLayerVideoGravityResizeAspectFill') | |
rootLayer=ObjCInstance(self).layer() | |
rootLayer.setMasksToBounds_(True) | |
self._previewLayer.setFrame_( | |
CGRect(CGPoint(0, 0), CGSize(self.width,self.height))) | |
rootLayer.insertSublayer_atIndex_(self._previewLayer,0) | |
self._previewLayer.connection().videoOrientation= ObjCClass('UIDevice').currentDevice().orientation() | |
self._session.startRunning() | |
label = ui.Label(frame=(0, 0, 400, 30), flex='W', name='label') | |
label.background_color = (0, 0, 0, 0.5) | |
label.text_color = 'white' | |
label.text = 'Nothing scanned yet' | |
label.alignment = ui.ALIGN_CENTER | |
self.add_subview(label) | |
b = ui.ButtonItem(image=ui.Image.named('iow:ios7_camera_outline_32')) | |
b.action = self.go_action | |
self.right_button_items = (b,) | |
self._handler_blk=ObjCBlock(self.handler,restype=None,argtypes=[c_void_p, c_void_p, c_void_p]) | |
self.active = True | |
def go_action(self,sender): | |
self.capture() | |
def will_close(self): | |
self.active = False | |
sleep(1) # let last capture handler finish | |
self._session.stopRunning() | |
print('closed') | |
def layout(self): | |
#self._previewLayer.connection().videoOrientation= ObjCClass('UIDevice').currentDevice().orientation() | |
self._previewLayer.setFrame_( | |
CGRect(CGPoint(0, 0), CGSize(self.width,self.height))) | |
if not self._session.isRunning(): | |
self._session.startRunning() | |
def handler(self,_blk,buff,err): | |
data=ObjCInstance(buff) | |
img_data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation_(data) | |
lc = _classify_img_data(img_data) | |
print(lc) | |
self.subviews[0].text = lc['label'] | |
del img_data | |
if self.active: | |
self.capture() | |
def capture(self): | |
connection = self._stillImageOutput.connectionWithMediaType('vide') | |
self.handler_active = True | |
self._stillImageOutput.captureStillImageAsynchronouslyFromConnection( | |
connection, | |
completionHandler=self._handler_blk | |
) | |
def main(): | |
global vn_model | |
vn_model = load_model() | |
camera=LiveCameraView(frame=(0,0,400,400)) | |
camera.present('sheet') | |
camera.wait_modal() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment