Skip to content

Instantly share code, notes, and snippets.

View sgl0v's full-sized avatar

Max sgl0v

View GitHub Profile
@sgl0v
sgl0v / install_ruby_with_rbenv.md
Created June 15, 2021 11:16 — forked from stonehippo/install_ruby_with_rbenv.md
Installing a new Ruby with rbenv on Mac OS

Install a new Ruby with rbenv on Mac OS (and make yourself a superhero)

If you're doing stuff with Ruby on a Mac, e.g. installling Jekyll or something, by default you'll end up having to use the sudo command to do stuff, since the permission to modify the default config is not available to your user account.

This sucks and should be avoided. Here's how to fix that.

Installing a new Ruby

To make this better, we are going install a new, custom Ruby. This used to be a big, scary thing, but thanks to the awesome tools Homebrew and rbenv, it's a snap.*

A word of warning: you will have to use Terminal to install this stuff. If you are uncomfortable with text, words, and doing stuff with your computer beyond pointing and hoping, this may not work well for you. But if that's the case, I'm not sure why you were trying to use Ruby in the first place.

extension ImageScanner {
private func cropImage(_ inputImage: CIImage, with detectedRectangle: VNRectangleObservation) -> Result<UIImage, Error> {
let imageSize = inputImage.extent.size
let transform = CGAffineTransform.identity.scaledBy(x: imageSize.width, y: imageSize.height)
let boundingBox = detectedRectangle.boundingBox.applying(transform)
guard inputImage.extent.contains(boundingBox) else {
return .failure(ImageProviderError.internalError)
}
let topLeft = detectedRectangle.topLeft.applying(transform)
extension ImageScanner {
private func detectRectangle(on image: CIImage, orientation: CGImagePropertyOrientation) -> Result<VNRectangleObservation, Error> {
var result: Result<VNRectangleObservation, Error> = .failure(ImageProviderError.internalError)
let semaphore = DispatchSemaphore(value: 1) // ➊
let rectanglesRequest = VNDetectRectanglesRequest { request, error in // ➋
guard error == nil,
let observations = request.results as? [VNRectangleObservation],
let detectedRectangle = observations.first else {
return
}
extension ImageScanner {
private func postProcessImage(_ image: UIImage) -> Result<UIImage, Error> {
guard let ciImage = CIImage(image: image),
let orientation = CGImagePropertyOrientation(rawValue: UInt32(image.imageOrientation.rawValue)) else {
return .failure(ImageProviderError.internalError)
}
let inputImage = ciImage.oriented(forExifOrientation: Int32(orientation.rawValue))
return detectRectangle(on: ciImage, orientation: orientation).flatMap {detectedRectangle in // ➊
self.cropImage(inputImage, with: detectedRectangle) // ➋
}
extension ImageScanner: UIImagePickerControllerDelegate {
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
guard let uiImage = info[UIImagePickerController.InfoKey.originalImage] as? UIImage else {
dismissController(picker, with: .failure(ImageProviderError.internalError))
return
}
DispatchQueue.background.async {
let result = self.postProcessImage(uiImage)
final class ImageScanner: NSObject {
private var completion: Completion?
func image(with completion: @escaping Completion) {
guard self.completion == nil, UIImagePickerController.isSourceTypeAvailable(.camera) else {
completion(.failure(ImageProviderError.startFailure))
return
}
self.completion = completion
extension ImageColorizer {
private func colorize(image inputImage: UIImage) -> Result<UIImage, Error> {
do {
let inputImageLab = try preProcess(inputImage: inputImage) // ➊
let input = try colorizerInput(from: inputImageLab) // ➋
let output = try coremlColorizer(configuration: MLModelConfiguration()).prediction(input: input) // ➌
let outputImageLab = imageLab(from: output, inputImageLab: inputImageLab) // ➍
let resultImage = try postProcess(outputLAB: outputImageLab, inputImage: inputImage) // ➎
return .success(resultImage)
} catch {
final class ImageColorizer {
func colorize(image inputImage: UIImage, completion: @escaping (Result<UIImage, Error>) -> Void) {
DispatchQueue.background.async {
let result = self.colorize(image: inputImage)
DispatchQueue.main.async { completion(result) }
}
}
}
/// Class for model loading and prediction
@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *)
class coremlColorizer {
/**
Make a prediction using the structured interface
- parameters:
- input: the input to the prediction as coremlColorizerInput
- throws: an NSError object that describes the problem
colorizer_coreml = ct.models.MLModel('coremlColorizer.mlmodel') # ➊
img = load_img(opt.img_path)
(tens_l_orig, tens_l_rs) = preprocess_img(img, HW=(256,256)) # ➋
tens_ab_rs = colorizer_coreml.predict({'input1': tens_l_rs.numpy()})['796'] # ➌
# ➍
img_bw = postprocess_tens(tens_l_orig, torch.cat((0*tens_l_orig,0*tens_l_orig),dim=1))
out_img_coreml = postprocess_tens(tens_l_orig, torch.from_numpy(tens_ab_rs))