Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

6 private bucket #20

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions lib/fog/backblaze/storage/models/file.rb
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,9 @@ def public_url
service.get_object_url(directory.key, key)
end

# TODO
#def url(expires)
# requires :key
# collection.get_https_url(key, expires)
#end
def url attr
requires :directory, :key
service.get_public_object_url(directory.key, key, {})
end

end
196 changes: 140 additions & 56 deletions lib/fog/backblaze/storage/real.rb
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def list_buckets(options = {})
def get_bucket(bucket_name)
response = list_buckets(bucketName: bucket_name)

bucket = response.json['buckets'].detect do |bucket|
bucket['bucketName'] == bucket_name
bucket = response.json['buckets'].detect do |bucket_data|
bucket_data['bucketName'] == bucket_name
end

unless bucket
Expand Down Expand Up @@ -124,7 +124,7 @@ def delete_bucket(bucket_name, options = {})
raise Fog::Errors::Error, "Failed delete_bucket, status = #{response.status} #{response.body}"
end

if cached = @token_cache.buckets
if @token_cache.buckets
#cached.delete(bucket_name)
#@token_cache.buckets = cached
@token_cache.buckets = nil
Expand Down Expand Up @@ -173,57 +173,14 @@ def head_object(bucket_name, file_path, options = {})
# * last_modified - time object or number of miliseconds
# * content_disposition
# * extra_headers - hash, list of custom headers
def put_object(bucket_name, file_path, content, options = {})
upload_url = @token_cache.fetch("upload_url/#{bucket_name}") do
bucket_id = _get_bucket_id!(bucket_name)
result = b2_command(:b2_get_upload_url, body: {bucketId: bucket_id})
result.json
end

if content.is_a?(IO)
content = content.read
end

extra_headers = {}
if options[:content_type]
extra_headers['Content-Type'] = options[:content_type]
end

if options[:last_modified]
value = if options[:last_modified].is_a?(::Time)
(options[:last_modified].to_f * 1000).round
else
value
end
extra_headers['X-Bz-Info-src_last_modified_millis'] = value
end

if options[:content_disposition]
extra_headers['X-Bz-Info-b2-content-disposition'] = options[:content_disposition]
end

if options[:extra_headers]
options[:extra_headers].each do |key, value|
extra_headers["X-Bz-Info-#{key}"] = value
end
end

response = b2_command(nil,
url: upload_url['uploadUrl'],
body: content,
headers: {
'Authorization': upload_url['authorizationToken'],
'Content-Type': 'b2/x-auto',
'X-Bz-File-Name': "#{_esc_file(file_path)}",
'X-Bz-Content-Sha1': Digest::SHA1.hexdigest(content)
}.merge(extra_headers)
)

if response.json['fileId'] == nil
raise Fog::Errors::Error, "Failed put_object, status = #{response.status} #{response.body}"
def put_object(bucket_name, file_path, local_file_path, options = {})
bucket_id = _get_bucket_id!(bucket_name)

if local_file_path.size / 1024 / 1024 > 50
handle_large_object_upload(bucket_id, bucket_name, file_path, local_file_path, options)
else
handle_small_object_upload(bucket_id, bucket_name, file_path, local_file_path, options)
end

response
end

# generates url regardless if bucket is private or not
Expand Down Expand Up @@ -311,7 +268,7 @@ def create_key(name, capabilities: nil, bucket_id: nil, name_prefix: nil)
'deleteFiles'
]

response = b2_command(:b2_create_key,
b2_command(:b2_create_key,
body: {
accountId: b2_account_id,
keyName: name,
Expand Down Expand Up @@ -341,8 +298,8 @@ def list_keys
def _get_object_version_ids(bucket_name, file_name)
response = b2_command(:b2_list_file_versions,
body: {
startFileName: file_name,
prefix: file_name,
startFileName: File.basename(file_name),
prefix: File.dirname(file_name),
bucketId: _get_bucket_id!(bucket_name),
maxFileCount: 1000
}
Expand Down Expand Up @@ -437,6 +394,133 @@ def auth_response
@auth_response.json
end

def handle_large_object_upload(bucket_id, bucket_name, file_path, local_file_path, options = {})
# Start large file upload
start_large_file_response = b2_command(
:b2_start_large_file,
body: {
bucketId: bucket_id,
fileName: file_path,
contentType: options[:content_type]
}
)

# Get upload part url
file_id = start_large_file_response.json["fileId"]
get_upload_part_url_response = b2_command(
:b2_get_upload_part_url,
body: {
fileId: file_id
}
)

upload_url = get_upload_part_url_response.json["uploadUrl"]
minimum_part_size_bytes = @token_cache.auth_response["recommendedPartSize"]
upload_authorization_token = get_upload_part_url_response.json["authorizationToken"]
local_file_size = File.stat(local_file_path).size
total_bytes_sent = 0
bytes_sent_for_part = minimum_part_size_bytes
# SHA1 of each uploaded part.
# You will need to save these because you will need them in b2_finish_large_file
sha1_of_parts = Array.new
sha1_part_index = 1
while total_bytes_sent < local_file_size do
# Determine num bytes to send
if ((local_file_size - total_bytes_sent) < minimum_part_size_bytes)
bytes_sent_for_part = (local_file_size - total_bytes_sent)
end

# Read file into memory and calculate a SHA1
file_part_data = File.read(local_file_path, bytes_sent_for_part, total_bytes_sent, mode: "rb")
sha1_of_parts.push(Digest::SHA1.hexdigest(file_part_data))
hex_digest_of_part = sha1_of_parts[sha1_part_index - 1]

# Send it over the wire
uri = URI(upload_url)
req = Net::HTTP::Post.new(uri)
req.add_field("Authorization", upload_authorization_token)
req.add_field("X-Bz-Part-Number", sha1_part_index)
req.add_field("X-Bz-Content-Sha1", hex_digest_of_part)
req.add_field("Content-Length", bytes_sent_for_part)
req.body = file_part_data
http = Net::HTTP.new(req.uri.host, req.uri.port)
http.use_ssl = (req.uri.scheme == 'https')
res = http.start {|_http| _http.request(req)}
case res
when Net::HTTPSuccess then
JSON.parse(res.body)
when Net::HTTPRedirection then
fetch(res['location'], limit - 1)
else
JSON.parse(res.body)
end
# Prepare for the next iteration of the loop
total_bytes_sent += bytes_sent_for_part
#offset = total_bytes_sent
sha1_part_index += 1
end

b2_command(
:b2_finish_large_file,
body: {
fileId: file_id,
partSha1Array: sha1_of_parts
}
)
end

def handle_small_object_upload(bucket_id, bucket_name, file_path, local_file_path, options = {})
upload_url = @token_cache.fetch("upload_url/#{bucket_name}") do
result = b2_command(:b2_get_upload_url, body: {bucketId: bucket_id})
result.json
end

if local_file_path.is_a?(IO)
local_file_path = local_file_path.read
end

extra_headers = {}
if options[:content_type]
extra_headers['Content-Type'] = options[:content_type]
end

if options[:last_modified]
value = if options[:last_modified].is_a?(::Time)
(options[:last_modified].to_f * 1000).round
else
value
end
extra_headers['X-Bz-Info-src_last_modified_millis'] = value
end

if options[:content_disposition]
extra_headers['X-Bz-Info-b2-content-disposition'] = options[:content_disposition]
end

if options[:extra_headers]
options[:extra_headers].each do |header_key, header_value|
extra_headers["X-Bz-Info-#{header_key}"] = header_value
end
end

response = b2_command(nil,
url: upload_url['uploadUrl'],
body: local_file_path,
headers: {
'Authorization': upload_url['authorizationToken'],
'Content-Type': 'b2/x-auto',
'X-Bz-File-Name': "#{_esc_file(file_path)}",
'X-Bz-Content-Sha1': Digest::SHA1.hexdigest(local_file_path)
}.merge(extra_headers)
)

if response.json['fileId'] == nil
raise Fog::Errors::Error, "Failed put_object, status = #{response.status} #{response.body}"
end

response
end

def b2_command(command, options = {})
auth_response = self.auth_response
options[:headers] ||= {}
Expand Down