Skip to content

Instantly share code, notes, and snippets.

Working from home

Anupam Chugh anupamchugh

Working from home
View GitHub Profile
View UIImage-from-color.swift
extension UIImage {
class func imageFromColor(color: UIColor, size: CGSize=CGSize(width: 1, height: 1), scale: CGFloat) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
UIRectFill(CGRect(origin:, size: size))
let image = UIGraphicsGetImageFromCurrentImageContext()
return image
View resizedImage-UIImage+Extension.swift
func resizedImage(for size: CGSize) -> UIImage? {
let image = self.cgImage
let context = CGContext(data: nil,
width: Int(size.width),
height: Int(size.height),
bitsPerComponent: image!.bitsPerComponent,
bytesPerRow: Int(size.width),
space: image?.colorSpace ?? CGColorSpace(name: CGColorSpace.sRGB)!,
bitmapInfo: image!.bitmapInfo.rawValue)
context?.interpolationQuality = .high
View ios13-animal-classifier-vision-request.swift
import UIKit
import Vision
class ViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
@IBOutlet weak var imageView: UIImageView!
@IBOutlet weak var textView: UITextView!
var animalRecognitionRequest = VNRecognizeAnimalsRequest(completionHandler: nil)
View ios-cat-vs-dog-coreml-keras.swift
import UIKit
import CoreML
enum Animal {
case cat
case dog
class ViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
View coreml2tools-modelconversion.swift
import coremltools
coreml_model = coremltools.converters.keras.convert('model.h5', input_names=['image'], output_names=['output'],image_input_names='image') = 'Anupam Chugh'
coreml_model.short_description = 'Cat Dog Classifier converted from a Keras model'
coreml_model.input_description['image'] = 'Takes as input an image'
coreml_model.output_description['output'] = 'Prediction as cat or dog'
View vision-doc-sanner.swift
import UIKit
import Vision
import VisionKit
class ViewController: UIViewController, VNDocumentCameraViewControllerDelegate {
@IBOutlet weak var imageView: UIImageView!
@IBOutlet weak var textView: UITextView!
var textRecognitionRequest = VNRecognizeTextRequest(completionHandler: nil)
View create-vision-request.swift
func createVisionRequest(image: UIImage){
currentImage = image
guard let cgImage = image.cgImage else {
let requestHandler = VNImageRequestHandler(cgImage: cgImage, orientation: image.cgImageOrientation, options: [:])
let vnRequests = [vnTextDetectionRequest] .background).async {
View imagePickerController.swift
extension ViewController: UIImagePickerControllerDelegate, UINavigationControllerDelegate {
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey: Any]) {
picker.dismiss(animated: true)
guard let uiImage = info[UIImagePickerController.InfoKey.originalImage] as? UIImage else {
imageView.image = uiImage
createVisionRequest(image: uiImage)
View UIImagePickerController-present.swift
guard UIImagePickerController.isSourceTypeAvailable(.camera) else {
presentPhotoPicker(sourceType: .photoLibrary)
let photoSourcePicker = UIAlertController()
let takePhoto = UIAlertAction(title: "Camera", style: .default) { [unowned self] _ in
self.presentPhotoPicker(sourceType: .camera)
let choosePhoto = UIAlertAction(title: "Photos Library", style: .default) { [unowned self] _ in
self.presentPhotoPicker(sourceType: .photoLibrary)
View imageclassifier-coreml2-vision.swift
func imageClassifier(image: UIImage, wordNumber: Int, characterNumber: Int, currentObservation : VNTextObservation){
let request = VNCoreMLRequest(model: model) { [weak self] request, error in
guard let results = request.results as? [VNClassificationObservation],
let topResult = results.first else {
fatalError("Unexpected result type from VNCoreMLRequest")
let result = topResult.identifier
let classificationInfo: [String: Any] = ["wordNumber" : wordNumber,
"characterNumber" : characterNumber,
"class" : result]