Skip to content

Instantly share code, notes, and snippets.

@MaxDaten
Created February 21, 2016 21:55
Show Gist options
  • Save MaxDaten/8f093c75730390c254b7 to your computer and use it in GitHub Desktop.
Save MaxDaten/8f093c75730390c254b7 to your computer and use it in GitHub Desktop.
Google Vision test with haskell
#!/usr/bin/env stack
-- stack --resolver nightly --install-ghc runghc --package wreq --package lens --package lens-aeson --package base64-bytestring --package ReadArgs --package formatting
-- Make it easy to write literal ByteString and Text values.
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE RecordWildCards #-}
import ReadArgs (readArgs)
import Data.Maybe
import Formatting
import Formatting.Clock
import System.Clock
import Network.Wreq as HTTP
import Control.Lens hiding ((.=))
import Data.ByteString.Lazy as BL hiding (putStrLn)
import Data.ByteString.Base64.Lazy as Base64
import Data.ByteString.Lazy.Char8 as BL hiding (readFile)
import Data.Text as T
import Data.Text.IO as T
import Data.Foldable (traverse_)
import Data.Text.Lazy.Encoding as TE
import Data.Aeson as JSON
import Data.Aeson.Lens (key, nth)
data VisionRequest = VisionRequest
{ image :: ByteString
, features :: [VisionFeature]
} deriving (Show)
data VisionFeature = VisionFeature
{ featureType :: VisionFeatureType
, maxResults :: Int
} deriving (Show)
data VisionFeatureType
= TYPE_UNSPECIFIED -- ^ Unspecified feature type.
| FACE_DETECTION -- ^ Run face detection.
| LANDMARK_DETECTION -- ^ Run landmark detection.
| LOGO_DETECTION -- ^ Run logo detection.
| LABEL_DETECTION -- ^ Run label detection.
| TEXT_DETECTION -- ^ Run OCR.
| SAFE_SEARCH_DETECTION -- ^ Run various computer vision models to
| IMAGE_PROPERTIES -- ^ Compute a set of properties about the image (such as the image's dominant colors)
deriving (Show, Read)
main :: IO ()
main = do
( imageFile :: FilePath
, apiKey :: Text
, mode :: VisionFeatureType
, results :: Maybe Int ) <- readArgs
let visionAPIOptions key = defaults & params .~ [("key", apiKey)]
visionRequest <- VisionRequest <$> BL.readFile imageFile <*> pure [VisionFeature mode (fromMaybe 1 results)]
startT <- getTime Realtime
response <- HTTP.postWith (visionAPIOptions apiKey) "https://vision.googleapis.com/v1/images:annotate"
(JSON.object ["requests" .= [visionRequest]])
endT <- getTime Realtime
traverse_ BL.putStrLn $ response^?responseBody
fprint ("Request took: " % timeSpecs % "\n") startT endT
-- * JSON Instances
instance ToJSON VisionRequest where
toJSON VisionRequest{..} =
JSON.object
[ "image" .= JSON.object ["content" .= TE.decodeUtf8 (Base64.encode image)]
, "features" .= toJSON features
]
instance ToJSON VisionFeature where
toJSON VisionFeature{..} =
JSON.object
[ "type" .= toJSON featureType
, "maxResults" .= toJSON maxResults
]
instance ToJSON VisionFeatureType where
toJSON = JSON.String . T.pack . show
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment