From 6d3962461fb8d35fc9538d685fee96267663acf2 Mon Sep 17 00:00:00 2001 From: Jeremy Daer Date: Tue, 11 Jul 2017 14:09:39 -0600 Subject: S3: slim down service implementation (#40) * Use simple core API for duck-type compat with other clients * initialize: accept an existing client * initialize: accept arbitrary client args instead of a fixed, required set * download: use native get_object streaming, no need to implement range requests * exists?: use head_object (which returns immediately) rather than waiting for existence --- lib/active_storage/service/s3_service.rb | 60 +++++++++++++------------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/lib/active_storage/service/s3_service.rb b/lib/active_storage/service/s3_service.rb index c3b6688bb9..ad55db0dc0 100644 --- a/lib/active_storage/service/s3_service.rb +++ b/lib/active_storage/service/s3_service.rb @@ -4,84 +4,72 @@ require "active_support/core_ext/numeric/bytes" class ActiveStorage::Service::S3Service < ActiveStorage::Service attr_reader :client, :bucket - def initialize(access_key_id:, secret_access_key:, region:, bucket:) - @client = Aws::S3::Resource.new(access_key_id: access_key_id, secret_access_key: secret_access_key, region: region) - @bucket = @client.bucket(bucket) + def initialize(bucket:, client: nil, **client_options) + @bucket = bucket + @client = client || Aws::S3::Client.new(client_options) end def upload(key, io, checksum: nil) instrument :upload, key, checksum: checksum do begin - object_for(key).put(body: io, content_md5: checksum) + client.put_object bucket: bucket, key: key, body: io, content_md5: checksum rescue Aws::S3::Errors::BadDigest raise ActiveStorage::IntegrityError end end end - def download(key) + def download(key, &block) if block_given? instrument :streaming_download, key do - stream(key, &block) + client.get_object bucket: bucket, key: key, &block end else instrument :download, key do - object_for(key).get.body.read.force_encoding(Encoding::BINARY) + "".b.tap do |data| + client.get_object bucket: bucket, key: key, response_target: data + end end end end def delete(key) instrument :delete, key do - object_for(key).delete + client.delete_object bucket: bucket, key: key end end def exist?(key) instrument :exist, key do |payload| - answer = object_for(key).exists? - payload[:exist] = answer - answer + payload[:exist] = + begin + client.head_object bucket: bucket, key: key + rescue Aws::S3::Errors::NoSuckKey + false + else + true + end end end def url(key, expires_in:, disposition:, filename:) instrument :url, key do |payload| - generated_url = object_for(key).presigned_url :get, expires_in: expires_in, + payload[:url] = presigner.presigned_url :get_object, + bucket: bucket, key: key, expires_in: expires_in, response_content_disposition: "#{disposition}; filename=\"#{filename}\"" - - payload[:url] = generated_url - - generated_url end end def url_for_direct_upload(key, expires_in:, content_type:, content_length:) instrument :url, key do |payload| - generated_url = object_for(key).presigned_url :put, expires_in: expires_in, + payload[:url] = presigner.presigned_url :put_object, + bucket: bucket, key: key, expires_in: expires_in, content_type: content_type, content_length: content_length - - payload[:url] = generated_url - - generated_url end end private - def object_for(key) - bucket.object(key) - end - - # Reads the object for the given key in chunks, yielding each to the block. - def stream(key, options = {}, &block) - object = object_for(key) - - chunk_size = 5.megabytes - offset = 0 - - while offset < object.content_length - yield object.read(options.merge(range: "bytes=#{offset}-#{offset + chunk_size - 1}")) - offset += chunk_size - end + def presigner + @presigner ||= Aws::S3::Presigner.new client: client end end -- cgit v1.2.3