Skip to content

Instantly share code, notes, and snippets.

@peterhellberg
Created June 4, 2012 08:38
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save peterhellberg/2867269 to your computer and use it in GitHub Desktop.
Save peterhellberg/2867269 to your computer and use it in GitHub Desktop.
Main view for the Tomtelizer written in RubyMotion
# Main controller for the Tomtelizer
#
class TomtelizerViewController < UIViewController
# Load the view
#
def loadView
self.view = UIImageView.alloc.init
@debug_face = false
end
def viewDidLoad
@images = %w{matz guido jmccolor}.reverse.map { |name|
UIImage.imageNamed "#{name}.jpg"
}
view.image = @images.first
view.contentMode = UIViewContentModeScaleAspectFit
view.userInteractionEnabled = true
add_gesture('swipe_previous:', UISwipeGestureRecognizerDirectionLeft)
add_gesture('swipe_next:', UISwipeGestureRecognizerDirectionRight)
end
def add_gesture(action, direction)
gesture = UISwipeGestureRecognizer.alloc.
initWithTarget(self, action: action)
gesture.direction = direction
view.addGestureRecognizer(gesture)
end
def viewDidAppear(animated)
hatify
end
def hatify
# Remove previous hats.
view.subviews.each { |v| v.removeFromSuperview }
# CoreImage used a coordinate system which is flipped on the Y axis
# compared to UIKit. Also, a UIImageView can return an image larger than
# itself. To properly translate points, we use an affine transform.
transform = CGAffineTransformMakeScale(view.bounds.size.width / view.image.size.width, -1 * (view.bounds.size.height / view.image.size.height))
transform = CGAffineTransformTranslate(transform, 0, -view.image.size.height)
image = CIImage.imageWithCGImage(view.image.CGImage)
@detector ||= CIDetector.detectorOfType CIDetectorTypeFace, context:nil, options: { CIDetectorAccuracy: CIDetectorAccuracyHigh }
@detector.featuresInImage(image).each do |feature|
# We need the mouth and eyes positions to determine
# where the hat should be added.
next unless feature.hasMouthPosition \
and feature.hasLeftEyePosition \
and feature.hasRightEyePosition
# Create the tomtelizer view.
hatView = UIImageView.alloc.init
hatView.image = UIImage.imageNamed('santa-hat.png')
hatView.contentMode = UIViewContentModeScaleAspectFit
# Compute its location and size, based on the position of the eyes and
# mouth.
w = feature.bounds.size.width * 1.6
h = feature.bounds.size.height * 1.2
x = (feature.mouthPosition.x + (feature.leftEyePosition.x + feature.rightEyePosition.x) / 2) / 2 - w / 2
y = ((feature.rightEyePosition.y + feature.leftEyePosition.y) / 2) - w/11
hatView.frame = CGRectApplyAffineTransform([[x, y], [w, h]], transform)
# Apply a rotation on the hat, based on the face inclination.
hatAngle = Math.atan2(feature.leftEyePosition.x -
feature.rightEyePosition.x,
feature.leftEyePosition.y -
feature.rightEyePosition.y) + Math::PI/2
hatView.transform = CGAffineTransformMakeRotation(hatAngle)
view.addSubview(hatView)
end
end
def swipe_previous(gesture)
idx = @images.index(view.image)
view.image = (idx == 0) ? @images.last : @images[idx-1]
hatify
end
def swipe_next(gesture)
idx = @images.index(view.image)
view.image = (idx == @images.size - 1) ? @images.first : @images[idx + 1]
hatify
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment