aboutsummaryrefslogtreecommitdiffstats
path: root/lib/active_file/sites/s3_site.rb
diff options
context:
space:
mode:
authorDavid Heinemeier Hansson <david@loudthinking.com>2017-07-02 16:47:28 +0200
committerDavid Heinemeier Hansson <david@loudthinking.com>2017-07-02 16:47:28 +0200
commitcc2c5f428ae0606fe37050772c248bafafd187f0 (patch)
tree7133eec001a140fdafa4e2cdc1cec9d1eb50cc0e /lib/active_file/sites/s3_site.rb
parent27c2516f4868863ea27b85f94885641f61add700 (diff)
downloadrails-cc2c5f428ae0606fe37050772c248bafafd187f0.tar.gz
rails-cc2c5f428ae0606fe37050772c248bafafd187f0.tar.bz2
rails-cc2c5f428ae0606fe37050772c248bafafd187f0.zip
Start on S3 site
Diffstat (limited to 'lib/active_file/sites/s3_site.rb')
-rw-r--r--lib/active_file/sites/s3_site.rb69
1 files changed, 69 insertions, 0 deletions
diff --git a/lib/active_file/sites/s3_site.rb b/lib/active_file/sites/s3_site.rb
new file mode 100644
index 0000000000..46c409405a
--- /dev/null
+++ b/lib/active_file/sites/s3_site.rb
@@ -0,0 +1,69 @@
+require "aws-sdk"
+
+class ActiveFile::Sites::S3Site < ActiveFile::Site
+ attr_reader :client, :bucket
+
+ def initialize(access_key_id:, secret_access_key:, region:, bucket:)
+ @client = Aws::S3::Resource.new(access_key_id: access_key_id, secret_access_key: secret_access_key, region: region)
+ @bucket = @client.bucket(bucket)
+ end
+
+ def upload(key, data)
+ object_for(key).put(body: data)
+ end
+
+ def download(key)
+ if block_given?
+ stream(key, &block)
+ else
+ object_for(key).read
+ end
+ end
+
+ def delete(key)
+ object_for(key).delete
+ end
+
+ def exists?(key)
+ object_for(key).exists?
+ end
+
+
+ def byte_size(key)
+ object_for(key).head[:size]
+ end
+
+ def checksum(key)
+ head = object_for(key).head
+
+ # If the etag has no dashes, it's the MD5
+ if !head.etag.include?("-")
+ head.etag.gsub('"', '')
+ # Check for md5 in metadata if it was uploaded via multipart
+ elsif md5sum = head.meta["md5sum"]
+ md5sum
+ # Otherwise, we don't have a digest yet for this key
+ else
+ nil
+ end
+ end
+
+
+ private
+ def object_for(key)
+ bucket.object(key)
+ end
+
+ # Reads the object for the given key in chunks, yielding each to the block.
+ def stream(key, options = {}, &block)
+ object = object_for(key)
+
+ chunk_size = 5242880 # 5 megabytes
+ offset = 0
+
+ while offset < object.content_length
+ yield object.read(options.merge(:range => "bytes=#{offset}-#{offset + chunk_size - 1}"))
+ offset += chunk_size
+ end
+ end
+end