Skip to content

Instantly share code, notes, and snippets.

import xml.etree.ElementTree as ET
def get_pano(pano_id, api_key=your_api_key_here):
url_request = "https://maps.googleapis.com/maps/api/streetview?size=320x320&pano=" + pano_id + "&fov=120&heading=0&pitch=90&key=" + api_key
response = requests.get(url_request)
img = Image.open(StringIO(response.content))
return img
# undocumented API lets us get more pano metadata
def outdoors(lat, lon):
import shutil
import os.path
for i in range(len(pairs)):
if i in train_set:
s = 'train'
elif i in test_set:
s = 'test'
elif i in val_set:
s = 'val'
function defineG(input_nc, output_nc, ngf)
local netG = nil
if opt.which_model_netG == "encoder_decoder" then netG = defineG_encoder_decoder(input_nc, output_nc, ngf)
elseif opt.which_model_netG == "unet" then netG = defineG_unet(input_nc, output_nc, ngf)
elseif opt.which_model_netG == "unet_128" then netG = defineG_unet_128(input_nc, output_nc, ngf)
elseif opt.which_model_netG == "unet_512" then netG = defineG_unet_512(input_nc, output_nc, ngf)
else error("unsupported netG model")
end
netG:apply(weights_init)
function defineG_unet_512(input_nc, output_nc, ngf)
-- Two layer more than the default unet to handle 512x512 input
local netG = nil
-- input is (nc) x 512 x 512
local e1 = - nn.SpatialConvolution(input_nc, ngf, 4, 4, 2, 2, 1, 1)
-- input is (ngf) x 256 x 256
local e2 = e1 - nn.LeakyReLU(0.2, true) - nn.SpatialConvolution(ngf, ngf * 2, 4, 4, 2, 2, 1, 1) - nn.SpatialBatchNormalization(ngf * 2)
-- input is (ngf * 2) x 128 x 128
local e3 = e2 - nn.LeakyReLU(0.2, true) - nn.SpatialConvolution(ngf * 2, ngf * 4, 4, 4, 2, 2, 1, 1) - nn.SpatialBatchNormalization(ngf * 4)
-- input is (ngf * 4) x 64 x 64
from StringIO import StringIO
import requests
def get_image(lat, lon, api_key=your_api_key_here):
url_request = "https://maps.googleapis.com/maps/api/streetview?size=320x320&location=" + str(lat) + "," + str(lon) + "&fov=120&heading=0&pitch=90&key=" + api_key
response = requests.get(url_request)
img = Image.open(StringIO(response.content))
return img
-- usage: DATA_ROOT=/path/to/data/ name=expt1 which_direction=BtoA th hires_generate.lua
--
-- code derived from https://github.com/soumith/dcgan.torch
--
require 'image'
require 'nn'
require 'nngraph'
util = paths.dofile('util/util.lua')
# makes a grid_size x grid_size mosaic
grid_size = 100
# lat lon bounds
bounds = [-73.9922010899,40.7610814675,-73.9758396149,40.7722462525]
composite = Image.new('RGB', (320*grid_size, 320*grid_size), (0, 0, 0))
lats = np.linspace(bounds[1], bounds[3], grid_size)
lons = np.linspace(bounds[2], bounds[0], grid_size)
[
"#origin#": [
"#howwhat.capitalize# did you feel #beforeafter# you #took# #this# #picture#?",
...
],
"howwhat": ["how", "what"],
"beforeafter": ["before", "after", "just before", "just after"],
"took": ["took", "captured", "photographed", "shot", "recorded"],
"picture": ["picture", "photo", "image", "snapshot", "pic", "photograph", "shot"],
...
features
areas
building : building
playa : natural = mud
national park : protection_title = "National Park"
national preserve : protection_title = "National Preserve"
national monument : protection_title = "National Monument"
military : landuse = military
wilderness : protection_title = "Wilderness Area"
lake : natural = water
let calculateOpacity=(pos) => {
// pos 0.0 - 0.1: linear ramp opacity 0.0 -> 0.8
// pos 0.1 - 0.9: opacity 0.8
// pos 0.9 - 1.0: linear ramp opacity 0.8 -> 0.0
if (pos < 0.1) {
opacity = pos * 8;
} else if (pos >= 0.9) {
opacity = (1.0 - pos) * 8;
} else {
opacity = 0.8;