Skip to content

Instantly share code, notes, and snippets.

@mrcljx
Created December 30, 2010 16:13
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mrcljx/759939 to your computer and use it in GitHub Desktop.
Save mrcljx/759939 to your computer and use it in GitHub Desktop.
require 'aws/s3'
class StoredFile < ActiveRecord::Base
include AWS::S3
after_destroy :delete_on_s3!
validates_presence_of :filename, :size, :content_type
validates_numericality_of :size, :greater_than => 0
def extension
ext = File.extname(filename)
ext.blank? ? nil : ext
end
def upload_done?
!uploaded_at.nil?
end
def easy_setup(key, content_type_hint = "")
sanitized_key = key
sanitized_key.gsub!(/\s+/, '-')
sanitized_key.gsub!(/[^A-Za-z0-9\._-]/, '')
sanitized_key = "unnamed" if sanitized_key.blank?
self.filename = sanitized_key
self.content_type = detect_content_type(key, content_type_hint)
end
def upload_done!
return if upload_done?
raise "Does not exist on S3" unless exists_on_s3?
self.uploaded_at = Time.now
save!
end
def authenticated_s3_url(*args)
options = args.extract_options!
options[:expires_in] = options[:expires_in].to_i if options[:expires_in]
S3Object.url_for(base_path, bucket_name, options)
end
def s3_object_value(&block)
S3Object.find(base_path, bucket_name).value(&block)
end
def s3_object_stream(&block)
S3Object.stream(base_path, bucket_name, &block)
end
def s3_url
if use_vanity_style?
File.join(s3_protocol + bucket_name + "." + s3_hostname + s3_port_string, base_path)
else
File.join(s3_protocol + s3_hostname + s3_port_string, bucket_name, base_path)
end
end
protected
def delete_on_s3!
S3Object.delete(base_path, bucket_name, {})
end
def use_vanity_style?
true
end
def base_path
File.join("documents", self.id.to_s)
end
def bucket_name
S3Config.bucket
end
def exists_on_s3?
@s3_object ||= S3Object.find(base_path, bucket_name)
rescue NoSuchKey => e
false
end
def s3_protocol
use_s3_ssl? ? "https://" : "http://"
end
def s3_hostname
AWS::S3::DEFAULT_HOST
end
def s3_port_string
use_s3_ssl? ? 443 : 80
end
def use_s3_ssl?
false
end
protected
def detect_content_type(filename, hint = '')
types = MIME::Types[hint]
if types.blank? or types.empty?
types = MIME::Types.type_for(filename)
if types.empty?
content_type = "application/octet-stream"
else
content_type = types.first
end
else
content_type = types.first
end
content_type.to_s
end
end
// you might need to change the lines where I use js-routes (the Router).
// also it assumes that you have a variable authenticity_token defined
function createUploader(buttonId, containerId, customCallbacks) {
var defaultCallbacks = {
ready: function(){
$btn = $('#uploadContainer .btn').removeClass('disabled');
$('#uploadContainer .btn span.text').text($btn.attr('ready'));
},
newFile: $.noop,
fileRemoved: $.noop,
preparationStarted: $.noop,
uploadStarted: $.noop,
uploadProgress: $.noop,
uploadDone: $.noop,
uploadDoneNotified: $.noop,
error: $.noop
};
var callbacks = $.extend({}, defaultCallbacks, customCallbacks);
var uploader = new plupload.Uploader({
runtimes : 'flash',
multipart: true,
browse_button : buttonId,
container: containerId,
url: Router.authorize_upload_path(),
flash_swf_url : '/flash/flex/plupload.swf',
silverlight_xap_url : '/javascripts/plupload/plupload.silverlight.xap'
});
uploader.bind('Init', function(up, params) {
up.authorizedQueueState = "INACTIVE";
callbacks.ready();
});
uploader.bind('FilesAdded', function(up, files) {
$.each(files, function(i, file) {
callbacks.newFile(file);
});
setTimeout(function() {
up.trigger('StartAuthorizedQueue');
}, 0);
});
uploader.bind('PrepareUploadFile', function(up, file) {
if(file.cancelled) {
return true;
}
callbacks.preparationStarted(file);
$.ajax({
type: 'GET',
url: Router.authorize_upload_path(),
dataType: 'json',
data: {
authenticity_token: authenticity_token,
file_size: file.size,
file_name: file.name,
},
success: function(data) {
if(!data.ok) {
up.trigger('Error', {
message: data.errors.join(', '),
file: file
});
up.trigger('ContinueAuthorizedQueue');
return;
}
if(file.cancelled) {
up.trigger('ContinueAuthorizedQueue');
return;
}
file.headers = data.headers;
file.params = data.params;
file.uploadTo = data.url;
file.handle = data.file;
setTimeout(function() {
up.settings.headers = file.headers;
up.settings.multipart_params = file.params;
up.settings.url = file.uploadTo;
up.trigger('UploadFile', file);
}, 0);
},
error: function() {
up.trigger('Error', {
message: "Request failed.",
file: file
});
up.trigger('ContinueAuthorizedQueue');
}
});
});
uploader.bind('StartAuthorizedQueue', function(up) {
if(up.authorizedQueueState === "ACTIVE") {
return;
}
if(typeof(up.fileIndex) === "undefined") {
up.fileIndex = 0;
}
up.trigger('ContinueAuthorizedQueue');
});
uploader.bind('ContinueAuthorizedQueue', function(up) {
if(up.fileIndex > up.files.length) {
up.fileIndex = up.files.length;
}
if(up.fileIndex < up.files.length) {
up.authorizedQueueState = "ACTIVE";
up.trigger('PrepareUploadFile', up.files[up.fileIndex++]);
} else {
up.authorizedQueueState = "INACTIVE";
}
});
uploader.bind('FilesRemoved', function(up, files) {
$.each(files, function(i, file) {
file.cancelled = true;
callbacks.fileRemoved(file);
});
});
uploader.bind('FileUploaded', function(up, file, response) {
callbacks.uploadDone(file);
$.ajax({
url: Router.finalize_upload_path(),
type: 'POST',
dataType: 'xml',
data: {
authenticity_token: authenticity_token,
file: file.handle
},
success: function(data) {
file.notificationResponse = data;
callbacks.uploadDoneNotified(file, data);
up.trigger('ContinueAuthorizedQueue');
}
});
});
uploader.bind('UploadFile', function(up, file) {
callbacks.uploadStarted(file);
});
uploader.bind('UploadProgress', function(up, file) {
if(file.status === plupload.UPLOADING) {
callbacks.uploadProgress(file);
}
});
uploader.bind('Error', function(up, err) {
callbacks.error(err, err.file);
});
uploader.init();
return uploader;
}
class UploadsController < ApplicationController
def finalize
@file = StoredFile.find(params[:file])
@file.upload_done!
end
def authorize
authorize_write_to_s3 do |stored_file, errors|
success = stored_file.save
errors = []
unless success
errors += stored_file.errors.full_messages
end
[success, errors]
end
end
protected
def authorize_write_to_s3(https = false)
raise "no block given" unless block_given?
bucket = S3Config.bucket
access_key_id = S3Config.access_key_id
acl = S3Config.acl
content_type = params[:content_type].to_s
file_size = params[:file_size].to_i
unchecked_key = params[:file_name].to_s
stored_file = StoredFile.new do |file|
file.easy_setup(unchecked_key, content_type)
file.size = file_size
end
success, errors = yield stored_file
if success
content_type = stored_file.content_type
full_key = stored_file.base_path
expiration_date = 2.hours.from_now.utc.strftime('%Y-%m-%dT%H:%M:%S.000Z')
# ['starts-with', '$Filename', ''],
policy = Base64.encode64(
"{
'expiration': '#{expiration_date}',
'conditions': [
{'bucket': '#{bucket}'},
{'key': '#{full_key}'},
{'acl': '#{acl}'},
{'Content-Type': '#{content_type}'},
{'success_action_status': '201'},
['content-length-range', #{file_size}, #{file_size}],
['starts-with', '$filename', ''],
]
}").gsub(/\n|\r/, '')
signature = self.sign(policy, S3Config.secret_access_key)
full_url = "#{https ? 'https' : 'http'}://#{S3Config.server}/"
respond_to do |wants|
wants.json do
render :json => {
:ok => true,
:url => full_url,
:file => stored_file.id.to_s,
:headers => [],
:params => ActiveSupport::OrderedHash[*([
["key", full_key],
["AWSAccessKeyId", access_key_id],
["acl", acl],
["Content-Type", content_type],
["success_action_status", "201"],
["policy", policy],
["signature", signature],
["Filename", ""]
].flatten)]
}
end
end
else
errors = [errors] unless errors.is_a? Array
errors = ["Unknown Error"] if errors.blank?
respond_to do |wants|
wants.json do
render :json => {
:ok => false,
:alerts => errors
}
end
end
end
end
def sign(message, secret)
raise 'missing secret' unless secret
hmac = HMAC::SHA1.new(secret)
hmac.update(message)
hmaced = hmac.digest.to_s
Base64.encode64(hmaced).chomp.gsub(/\n/,'')
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment