Class: Fog::AWS::Storage::Mock

Inherits:
Object
  • Object
show all
Includes:
CredentialFetcher::ConnectionMethods, DeleteObjectUrl, GetObjectHttpUrl, GetObjectHttpsUrl, GetObjectUrl, HeadObjectUrl, PostObjectHiddenFields, PutObjectUrl, SharedMockMethods, Utils
Defined in:
lib/fog/aws/requests/storage/get_bucket.rb,
lib/fog/aws/storage.rb,
lib/fog/aws/requests/storage/get_object.rb,
lib/fog/aws/requests/storage/put_bucket.rb,
lib/fog/aws/requests/storage/put_object.rb,
lib/fog/aws/requests/storage/sync_clock.rb,
lib/fog/aws/requests/storage/copy_object.rb,
lib/fog/aws/requests/storage/get_service.rb,
lib/fog/aws/requests/storage/head_bucket.rb,
lib/fog/aws/requests/storage/head_object.rb,
lib/fog/aws/requests/storage/upload_part.rb,
lib/fog/aws/requests/storage/delete_bucket.rb,
lib/fog/aws/requests/storage/delete_object.rb,
lib/fog/aws/requests/storage/get_bucket_acl.rb,
lib/fog/aws/requests/storage/get_object_acl.rb,
lib/fog/aws/requests/storage/get_object_url.rb,
lib/fog/aws/requests/storage/put_bucket_acl.rb,
lib/fog/aws/requests/storage/put_object_acl.rb,
lib/fog/aws/requests/storage/put_object_url.rb,
lib/fog/aws/requests/storage/get_bucket_cors.rb,
lib/fog/aws/requests/storage/head_object_url.rb,
lib/fog/aws/requests/storage/put_bucket_cors.rb,
lib/fog/aws/requests/storage/upload_part_copy.rb,
lib/fog/aws/requests/storage/delete_object_url.rb,
lib/fog/aws/requests/storage/put_bucket_policy.rb,
lib/fog/aws/requests/storage/get_bucket_tagging.rb,
lib/fog/aws/requests/storage/put_bucket_tagging.rb,
lib/fog/aws/requests/storage/put_bucket_website.rb,
lib/fog/aws/requests/storage/get_bucket_location.rb,
lib/fog/aws/requests/storage/get_object_http_url.rb,
lib/fog/aws/requests/storage/get_request_payment.rb,
lib/fog/aws/requests/storage/post_object_restore.rb,
lib/fog/aws/requests/storage/put_request_payment.rb,
lib/fog/aws/requests/storage/delete_bucket_policy.rb,
lib/fog/aws/requests/storage/get_object_https_url.rb,
lib/fog/aws/requests/storage/delete_bucket_tagging.rb,
lib/fog/aws/requests/storage/get_bucket_versioning.rb,
lib/fog/aws/requests/storage/put_bucket_versioning.rb,
lib/fog/aws/requests/storage/abort_multipart_upload.rb,
lib/fog/aws/requests/storage/delete_multiple_objects.rb,
lib/fog/aws/requests/storage/get_bucket_notification.rb,
lib/fog/aws/requests/storage/put_bucket_notification.rb,
lib/fog/aws/requests/storage/complete_multipart_upload.rb,
lib/fog/aws/requests/storage/initiate_multipart_upload.rb,
lib/fog/aws/requests/storage/post_object_hidden_fields.rb,
lib/fog/aws/requests/storage/get_bucket_object_versions.rb

Overview

Real

Instance Attribute Summary

Attributes included from Utils

#disable_content_md5_validation, #max_copy_chunk_size, #max_put_chunk_size, #region

Class Method Summary collapse

Instance Method Summary collapse

Methods included from PostObjectHiddenFields

#post_object_hidden_fields

Methods included from GetObjectHttpsUrl

#get_object_https_url

Methods included from GetObjectHttpUrl

#get_object_http_url

Methods included from DeleteObjectUrl

#delete_object_url

Methods included from HeadObjectUrl

#head_object_url

Methods included from PutObjectUrl

#put_object_url

Methods included from GetObjectUrl

#get_object_url

Methods included from SharedMockMethods

#define_mock_acl, #get_upload_info, #parse_mock_data, #store_mock_object, #verify_mock_bucket_exists

Methods included from CredentialFetcher::ConnectionMethods

#refresh_credentials_if_expired

Methods included from Utils

#cdn, #http_url, #https_url, #request_url, #require_mime_types, #signed_url, #url, #validate_chunk_size

Constructor Details

#initialize(options = {}) ⇒ Mock

Returns a new instance of Mock.



471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
# File 'lib/fog/aws/storage.rb', line 471

def initialize(options={})
  require_mime_types

  @use_iam_profile = options[:use_iam_profile]

  @region = options[:region] || DEFAULT_REGION

  if @endpoint = options[:endpoint]
    endpoint = URI.parse(@endpoint)
    @host = endpoint.host
    @scheme = endpoint.scheme
    @port = endpoint.port
  else
    @host       = options[:host]        || region_to_host(@region)
    @scheme     = options[:scheme]      || DEFAULT_SCHEME
    @port       = options[:port]        || DEFAULT_SCHEME_PORT[@scheme]
  end


  @path_style = options[:path_style] || false

  init_max_put_chunk_size!(options)
  init_max_copy_chunk_size!(options)

  @disable_content_md5_validation = options[:disable_content_md5_validation] || false

  @signature_version = options.fetch(:aws_signature_version, 4)
  validate_signature_version!
  setup_credentials(options)
end

Class Method Details

.acls(type) ⇒ Object



386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
# File 'lib/fog/aws/storage.rb', line 386

def self.acls(type)
  case type
  when 'private'
    {
      "AccessControlList" => [
        {
          "Permission" => "FULL_CONTROL",
          "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
        }
      ],
      "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
    }
  when 'public-read'
    {
      "AccessControlList" => [
        {
          "Permission" => "FULL_CONTROL",
          "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
        },
        {
          "Permission" => "READ",
          "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AllUsers"}
        }
      ],
      "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
    }
  when 'public-read-write'
    {
      "AccessControlList" => [
        {
          "Permission" => "FULL_CONTROL",
          "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
        },
        {
          "Permission" => "READ",
          "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AllUsers"}
        },
        {
          "Permission" => "WRITE",
          "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AllUsers"}
        }
      ],
      "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
    }
  when 'authenticated-read'
    {
      "AccessControlList" => [
        {
          "Permission" => "FULL_CONTROL",
          "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
        },
        {
          "Permission" => "READ",
          "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"}
        }
      ],
      "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"}
    }
  end
end

.dataObject



447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
# File 'lib/fog/aws/storage.rb', line 447

def self.data
  @data ||= Hash.new do |hash, region|
    hash[region] = Hash.new do |region_hash, key|
      region_hash[key] = {
        :acls => {
          :bucket => {},
          :object => {}
        },
        :buckets => {},
        :cors => {
          :bucket => {}
        },
        :bucket_notifications => {},
        :bucket_tagging => {},
        :multipart_uploads => {}
      }
    end
  end
end

.resetObject



467
468
469
# File 'lib/fog/aws/storage.rb', line 467

def self.reset
  @data = nil
end

Instance Method Details

#abort_multipart_upload(bucket_name, object_name, upload_id) ⇒ Object



30
31
32
33
34
35
36
37
38
39
40
41
# File 'lib/fog/aws/requests/storage/abort_multipart_upload.rb', line 30

def abort_multipart_upload(bucket_name, object_name, upload_id)
  verify_mock_bucket_exists(bucket_name)
  upload_info = get_upload_info(bucket_name, upload_id, true)
  response = Excon::Response.new
  if upload_info
    response.status = 204
    response
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 204}, response))
  end
end

#byte_range(range, size) ⇒ Object



101
102
103
104
105
106
107
108
109
# File 'lib/fog/aws/requests/storage/upload_part_copy.rb', line 101

def byte_range(range, size)
  matches = range.match(/bytes=(\d*)-(\d*)/)

  return nil unless matches

  end_pos = [matches[2].to_i, size].min

  [matches[1].to_i, end_pos]
end

#complete_multipart_upload(bucket_name, object_name, upload_id, parts) ⇒ Object



53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# File 'lib/fog/aws/requests/storage/complete_multipart_upload.rb', line 53

def complete_multipart_upload(bucket_name, object_name, upload_id, parts)
  bucket = verify_mock_bucket_exists(bucket_name)
  upload_info = get_upload_info(bucket_name, upload_id, true)
  body = parts.map { |pid| upload_info[:parts][pid.to_i] }.join
  object = store_mock_object(bucket, object_name, body, upload_info[:options])

  response = Excon::Response.new
  response.status = 200
  response.body = {
    'Location' => "http://#{bucket_name}.s3.amazonaws.com/#{object_name}",
    'Bucket'   => bucket_name,
    'Key'      => object_name,
    'ETag'     => object['ETag'],
  }
  response.headers['x-amz-version-id'] = object['VersionId'] if object['VersionId'] != 'null'
  response
end

#copy_object(source_bucket_name, source_object_name, target_bucket_name, target_object_name, options = {}) ⇒ Object



45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# File 'lib/fog/aws/requests/storage/copy_object.rb', line 45

def copy_object(source_bucket_name, source_object_name, target_bucket_name, target_object_name, options = {})
  response = Excon::Response.new
  source_bucket = self.data[:buckets][source_bucket_name]
  source_object = source_bucket && source_bucket[:objects][source_object_name] && source_bucket[:objects][source_object_name].first
  target_bucket = self.data[:buckets][target_bucket_name]

  acl = options['x-amz-acl'] || 'private'
  if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl)
    raise Excon::Errors::BadRequest.new('invalid x-amz-acl')
  else
    self.data[:acls][:object][target_bucket_name] ||= {}
    self.data[:acls][:object][target_bucket_name][target_object_name] = self.class.acls(acl)
  end

  if source_object && target_bucket
    response.status = 200
    target_object = source_object.dup
    target_object.merge!({'Key' => target_object_name})
    target_bucket[:objects][target_object_name] = [target_object]
    response.body = {
      'ETag'          => target_object['ETag'],
      'LastModified'  => Time.parse(target_object['Last-Modified'])
    }
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end

  response
end

#dataObject



502
503
504
# File 'lib/fog/aws/storage.rb', line 502

def data
  self.class.data[@region][@aws_access_key_id]
end

#delete_bucket(bucket_name) ⇒ Object



25
26
27
28
29
30
31
32
33
34
35
36
37
38
# File 'lib/fog/aws/requests/storage/delete_bucket.rb', line 25

def delete_bucket(bucket_name)
  response = Excon::Response.new
  if self.data[:buckets][bucket_name].nil?
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 204}, response))
  elsif self.data[:buckets][bucket_name] && !self.data[:buckets][bucket_name][:objects].empty?
    response.status = 409
    raise(Excon::Errors.status_error({:expects => 204}, response))
  else
    self.data[:buckets].delete(bucket_name)
    response.status = 204
  end
  response
end

#delete_bucket_policy(bucket_name) ⇒ Object



26
27
28
29
30
31
32
33
34
35
36
37
# File 'lib/fog/aws/requests/storage/delete_bucket_policy.rb', line 26

def delete_bucket_policy(bucket_name)
   if bucket = data[:buckets][bucket_name]
     bucket[:policy] = nil

     Excon::Response.new.tap do |response|
       response.body = { 'RequestId' => Fog::AWS::Mock.request_id }
       response.status = 200
     end
   else
     raise(Excon::Errors.status_error({:expects => 200}, response))
   end
end

#delete_bucket_tagging(bucket_name) ⇒ Object



26
27
28
29
30
31
32
33
34
35
36
37
# File 'lib/fog/aws/requests/storage/delete_bucket_tagging.rb', line 26

def delete_bucket_tagging(bucket_name)
  response = Excon::Response.new
  if self.data[:buckets][bucket_name]
    self.data[:bucket_tagging].delete(bucket_name)
    response.status = 204
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 204}, response))
  end

  response
end

#delete_multiple_objects(bucket_name, object_names, options = {}) ⇒ Object



73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# File 'lib/fog/aws/requests/storage/delete_multiple_objects.rb', line 73

def delete_multiple_objects(bucket_name, object_names, options = {})
  headers = options.dup
  headers.delete(:quiet)
  response = Excon::Response.new
  if bucket = self.data[:buckets][bucket_name]
    response.status = 200
    response.body = { 'DeleteResult' => [] }
    version_ids = headers.delete('versionId')
    object_names.each do |object_name|
      object_version = version_ids.nil? ? [nil] : version_ids[object_name]
      object_version = object_version.is_a?(String) ? [object_version] : object_version
      object_version.each do |version_id|
        response.body['DeleteResult'] << delete_object_helper(bucket,
                                                          object_name,
                                                          version_id)
      end
    end
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#delete_object(bucket_name, object_name, options = {}) ⇒ Object



36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# File 'lib/fog/aws/requests/storage/delete_object.rb', line 36

def delete_object(bucket_name, object_name, options = {})
  response = Excon::Response.new
  if bucket = self.data[:buckets][bucket_name]
    response.status = 204

    version_id = options.delete('versionId')

    if bucket[:versioning]
      bucket[:objects][object_name] ||= []

      if version_id
        version = bucket[:objects][object_name].find { |object| object['VersionId'] == version_id}

        # S3 special cases the 'null' value to not error out if no such version exists.
        if version || (version_id == 'null')
          bucket[:objects][object_name].delete(version)
          bucket[:objects].delete(object_name) if bucket[:objects][object_name].empty?

          response.headers['x-amz-delete-marker'] = 'true' if version[:delete_marker]
          response.headers['x-amz-version-id'] = version_id
        else
          response.status = 400
          response.body = invalid_version_id_payload(version_id)
          raise(Excon::Errors.status_error({:expects => 200}, response))
        end
      else
        delete_marker = {
          :delete_marker    => true,
          'Key'             => object_name,
          'VersionId'       => bucket[:versioning] == 'Enabled' ? Fog::Mock.random_base64(32) : 'null',
          'Last-Modified'   => Fog::Time.now.to_date_header
        }

        # When versioning is suspended, a delete marker is placed if the last object ID is not the value 'null',
        # otherwise the last object is replaced.
        if bucket[:versioning] == 'Suspended' && bucket[:objects][object_name].first['VersionId'] == 'null'
          bucket[:objects][object_name].shift
        end

        bucket[:objects][object_name].unshift(delete_marker)

        response.headers['x-amz-delete-marker'] = 'true'
        response.headers['x-amz-version-id'] = delete_marker['VersionId']
      end
    else
      if version_id && version_id != 'null'
        response.status = 400
        response.body = invalid_version_id_payload(version_id)
        raise(Excon::Errors.status_error({:expects => 200}, response))
      else
        bucket[:objects].delete(object_name)

        response.headers['x-amz-version-id'] = 'null'
      end
    end
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 204}, response))
  end
  response
end

#get_bucket(bucket_name, options = {}) ⇒ Object



56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# File 'lib/fog/aws/requests/storage/get_bucket.rb', line 56

def get_bucket(bucket_name, options = {})
  prefix, marker, delimiter, max_keys = \
    options['prefix'], options['marker'], options['delimiter'], options['max-keys']
  common_prefixes = []

  unless bucket_name
    raise ArgumentError.new('bucket_name is required')
  end
  response = Excon::Response.new
  if bucket = self.data[:buckets][bucket_name]
    contents = bucket[:objects].values.map(&:first).sort {|x,y| x['Key'] <=> y['Key']}.reject do |object|
        (prefix    && object['Key'][0...prefix.length] != prefix) ||
        (marker    && object['Key'] <= marker) ||
        (delimiter && object['Key'][(prefix ? prefix.length : 0)..-1].include?(delimiter) \
                   && common_prefixes << object['Key'].sub(/^(#{prefix}[^#{delimiter}]+.).*/, '\1')) ||
        object.key?(:delete_marker)
      end.map do |object|
        data = object.reject {|key, value| !['ETag', 'Key', 'StorageClass'].include?(key)}
        data.merge!({
          'LastModified' => Time.parse(object['Last-Modified']),
          'Owner'        => bucket['Owner'],
          'Size'         => object['Content-Length'].to_i
        })
      data
    end
    max_keys = max_keys || 1000
    size = [max_keys, 1000].min
    truncated_contents = contents[0...size]

    response.status = 200
    response.body = {
      'CommonPrefixes'  => common_prefixes.uniq,
      'Contents'        => truncated_contents,
      'IsTruncated'     => truncated_contents.size != contents.size,
      'Marker'          => marker,
      'MaxKeys'         => max_keys,
      'Name'            => bucket['Name'],
      'Prefix'          => prefix
    }
    if max_keys && max_keys < response.body['Contents'].length
        response.body['IsTruncated'] = true
        response.body['Contents'] = response.body['Contents'][0...max_keys]
    end
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_acl(bucket_name) ⇒ Object



47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# File 'lib/fog/aws/requests/storage/get_bucket_acl.rb', line 47

def get_bucket_acl(bucket_name)
  response = Excon::Response.new
  if acl = self.data[:acls][:bucket][bucket_name]
    response.status = 200
    if acl.is_a?(String)
      response.body = Fog::AWS::Storage.acl_to_hash(acl)
    else
      response.body = acl
    end
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_cors(bucket_name) ⇒ Object



43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# File 'lib/fog/aws/requests/storage/get_bucket_cors.rb', line 43

def get_bucket_cors(bucket_name)
  response = Excon::Response.new
  if cors = self.data[:cors][:bucket][bucket_name]
    response.status = 200
    if cors.is_a?(String)
      response.body = Fog::AWS::Storage.cors_to_hash(cors)
    else
      response.body = cors
    end
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_location(bucket_name) ⇒ Object



31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# File 'lib/fog/aws/requests/storage/get_bucket_location.rb', line 31

def get_bucket_location(bucket_name)
  response = Excon::Response.new
  if bucket = self.data[:buckets][bucket_name]
    location_constraint = case bucket['LocationConstraint']
    when 'us-east-1'
      nil
    when 'eu-east-1'
      'EU'
    else
      bucket['LocationConstraint']
    end

    response.status = 200
    response.body = {'LocationConstraint' => location_constraint }
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_notification(bucket_name) ⇒ Object



45
46
47
48
49
50
51
52
53
54
55
56
# File 'lib/fog/aws/requests/storage/get_bucket_notification.rb', line 45

def get_bucket_notification(bucket_name)
  response = Excon::Response.new

  if self.data[:buckets][bucket_name] && self.data[:bucket_notifications][bucket_name]
    response.status = 200
    response.body = self.data[:bucket_notifications][bucket_name]
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_object_versions(bucket_name, options = {}) ⇒ Object



66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# File 'lib/fog/aws/requests/storage/get_bucket_object_versions.rb', line 66

def get_bucket_object_versions(bucket_name, options = {})
  delimiter, key_marker, max_keys, prefix, version_id_marker = \
    options['delimiter'], options['key-marker'], options['max-keys'],options['prefix'],options['version-id-marker']

  unless bucket_name
    raise ArgumentError.new('bucket_name is required')
  end

  response = Excon::Response.new

  # Invalid arguments.
  if version_id_marker && !key_marker
    response.status = 400
    response.body = {
      'Error' => {
        'Code' => 'InvalidArgument',
        'Message' => 'A version-id marker cannot be specified without a key marker.',
        'ArgumentValue' => version_id_marker,
        'RequestId' => Fog::Mock.random_hex(16),
        'HostId' => Fog::Mock.random_base64(65)
      }
    }
    raise(Excon::Errors.status_error({:expects => 200}, response))

  # Valid case.
  # TODO: (nirvdrum 12/15/11) It's not clear to me how to actually use version-id-marker, so I didn't implement it below.
  elsif bucket = self.data[:buckets][bucket_name]
    # We need to order results by S3 key, but since our data store is key => [versions], we want to ensure the integrity
    # of the versions as well.  So, sort the keys, then fetch the versions, and then combine them all as a sorted list by
    # flattening the results.
    contents = bucket[:objects].keys.sort.map { |key| bucket[:objects][key] }.flatten.reject do |object|
        (prefix      && object['Key'][0...prefix.length] != prefix) ||
        (key_marker  && object['Key'] <= key_marker) ||
        (delimiter   && object['Key'][(prefix ? prefix.length : 0)..-1].include?(delimiter) \
                     && common_prefixes << object['Key'].sub(/^(#{prefix}[^#{delimiter}]+.).*/, '\1'))
      end.map do |object|
        if object.key?(:delete_marker)
          tag_name = 'DeleteMarker'
          extracted_attrs = ['Key', 'VersionId']
        else
          tag_name = 'Version'
          extracted_attrs = ['ETag', 'Key', 'StorageClass', 'VersionId']
        end

        data = {}
        data[tag_name] = object.reject { |key, value| !extracted_attrs.include?(key) }
        data[tag_name].merge!({
          'LastModified' => Time.parse(object['Last-Modified']),
          'Owner'        => bucket['Owner'],
          'IsLatest'     => object == bucket[:objects][object['Key']].first
        })

        data[tag_name]['Size'] = object['Content-Length'].to_i if tag_name == 'Version'
      data
    end

    max_keys = max_keys || 1000
    size = [max_keys, 1000].min
    truncated_contents = contents[0...size]

    response.status = 200
    response.body = {
      'Versions'        => truncated_contents,
      'IsTruncated'     => truncated_contents.size != contents.size,
      'KeyMarker'       => key_marker,
      'VersionIdMarker' => version_id_marker,
      'MaxKeys'         => max_keys,
      'Name'            => bucket['Name'],
      'Prefix'          => prefix
    }
    if max_keys && max_keys < response.body['Versions'].length
        response.body['IsTruncated'] = true
        response.body['Versions'] = response.body['Versions'][0...max_keys]
    end

  # Missing bucket case.
  else
    response.status = 404
    response.body = {
      'Error' => {
        'Code' => 'NoSuchBucket',
        'Message' => 'The specified bucket does not exist',
        'BucketName' => bucket_name,
        'RequestId' => Fog::Mock.random_hex(16),
        'HostId' => Fog::Mock.random_base64(65)
      }
    }

    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_tagging(bucket_name) ⇒ Object



35
36
37
38
39
40
41
42
43
44
45
# File 'lib/fog/aws/requests/storage/get_bucket_tagging.rb', line 35

def get_bucket_tagging(bucket_name)
  response = Excon::Response.new
  if self.data[:buckets][bucket_name] && self.data[:bucket_tagging][bucket_name]
    response.status = 200
    response.body = {'BucketTagging' => self.data[:bucket_tagging][bucket_name]}
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_bucket_versioning(bucket_name) ⇒ Object



35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# File 'lib/fog/aws/requests/storage/get_bucket_versioning.rb', line 35

def get_bucket_versioning(bucket_name)
  response = Excon::Response.new
  bucket = self.data[:buckets][bucket_name]

  if bucket
    response.status = 200

    if bucket[:versioning]
      response.body = { 'VersioningConfiguration' => { 'Status' => bucket[:versioning] } }
    else
      response.body = { 'VersioningConfiguration' => {} }
    end

  else
    response.status = 404
    response.body = {
      'Error' => {
        'Code' => 'NoSuchBucket',
        'Message' => 'The specified bucket does not exist',
        'BucketName' => bucket_name,
        'RequestId' => Fog::Mock.random_hex(16),
        'HostId' => Fog::Mock.random_base64(65)
      }
    }

    raise(Excon::Errors.status_error({:expects => 200}, response))
  end

  response
end

#get_object(bucket_name, object_name, options = {}, &block) ⇒ Object



68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# File 'lib/fog/aws/requests/storage/get_object.rb', line 68

def get_object(bucket_name, object_name, options = {}, &block)
  version_id = options.delete('versionId')

  unless bucket_name
    raise ArgumentError.new('bucket_name is required')
  end

  unless object_name
    raise ArgumentError.new('object_name is required')
  end

  response = Excon::Response.new
  if (bucket = self.data[:buckets][bucket_name])
    object = nil
    if bucket[:objects].key?(object_name)
      object = version_id ? bucket[:objects][object_name].find { |object| object['VersionId'] == version_id} : bucket[:objects][object_name].first
    end

    if (object && !object[:delete_marker])
      if options['If-Match'] && options['If-Match'] != object['ETag']
        response.status = 412
        raise(Excon::Errors.status_error({:expects => 200}, response))
      elsif options['If-Modified-Since'] && options['If-Modified-Since'] >= Time.parse(object['Last-Modified'])
        response.status = 304
        raise(Excon::Errors.status_error({:expects => 200}, response))
      elsif options['If-None-Match'] && options['If-None-Match'] == object['ETag']
        response.status = 304
        raise(Excon::Errors.status_error({:expects => 200}, response))
      elsif options['If-Unmodified-Since'] && options['If-Unmodified-Since'] < Time.parse(object['Last-Modified'])
        response.status = 412
        raise(Excon::Errors.status_error({:expects => 200}, response))
      else
        response.status = 200
        for key, value in object
          case key
          when 'Cache-Control', 'Content-Disposition', 'Content-Encoding', 'Content-Length', 'Content-MD5', 'Content-Type', 'ETag', 'Expires', 'Last-Modified', /^x-amz-meta-/
            response.headers[key] = value
          end
        end

        response.headers['x-amz-version-id'] = object['VersionId'] if bucket[:versioning]

        body = object[:body]
        if options['Range']
          # since AWS S3 itself does not support multiple range headers, we will use only the first
          ranges = byte_ranges(options['Range'], body.size)
          unless ranges.nil? || ranges.empty?
            response.status = 206
            body = body[ranges.first]
          end
        end

        unless block_given?
          response.body = body
        else
          data = StringIO.new(body)
          remaining = total_bytes = data.length
          while remaining > 0
            chunk = data.read([remaining, Excon::CHUNK_SIZE].min)
            block.call(chunk, remaining, total_bytes)
            remaining -= Excon::CHUNK_SIZE
          end
        end
      end
    elsif version_id && !object
      response.status = 400
      response.body = {
        'Error' => {
          'Code' => 'InvalidArgument',
          'Message' => 'Invalid version id specified',
          'ArgumentValue' => version_id,
          'ArgumentName' => 'versionId',
          'RequestId' => Fog::Mock.random_hex(16),
          'HostId' => Fog::Mock.random_base64(65)
        }
      }

      raise(Excon::Errors.status_error({:expects => 200}, response))
    else
      response.status = 404
      response.body = "...<Code>NoSuchKey<\/Code>..."
      raise(Excon::Errors.status_error({:expects => 200}, response))
    end
  else
    response.status = 404
    response.body = "...<Code>NoSuchBucket</Code>..."
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_object_acl(bucket_name, object_name, options = {}) ⇒ Object



58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# File 'lib/fog/aws/requests/storage/get_object_acl.rb', line 58

def get_object_acl(bucket_name, object_name, options = {})
  response = Excon::Response.new
  if acl = self.data[:acls][:object][bucket_name] && self.data[:acls][:object][bucket_name][object_name]
    response.status = 200
    if acl.is_a?(String)
      response.body = Fog::AWS::Storage.acl_to_hash(acl)
    else
      response.body = acl
    end
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_request_payment(bucket_name) ⇒ Object



31
32
33
34
35
36
37
38
39
40
41
# File 'lib/fog/aws/requests/storage/get_request_payment.rb', line 31

def get_request_payment(bucket_name)
  response = Excon::Response.new
  if bucket = self.data[:buckets][bucket_name]
    response.status = 200
    response.body = { 'Payer' => bucket['Payer'] }
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#get_serviceObject



33
34
35
36
37
38
39
40
41
42
43
44
45
46
# File 'lib/fog/aws/requests/storage/get_service.rb', line 33

def get_service
  response = Excon::Response.new
  response.headers['Status'] = 200
  buckets = self.data[:buckets].values.map do |bucket|
    bucket.reject do |key, value|
      !['CreationDate', 'Name'].include?(key)
    end
  end
  response.body = {
    'Buckets' => buckets,
    'Owner'   => { 'DisplayName' => 'owner', 'ID' => 'some_id'}
  }
  response
end

#head_bucket(bucket_name) ⇒ Object



30
31
32
33
34
# File 'lib/fog/aws/requests/storage/head_bucket.rb', line 30

def head_bucket(bucket_name)
  response = get_bucket(bucket_name)
  response.body = nil
  response
end

#head_object(bucket_name, object_name, options = {}) ⇒ Object



54
55
56
57
58
# File 'lib/fog/aws/requests/storage/head_object.rb', line 54

def head_object(bucket_name, object_name, options = {})
  response = get_object(bucket_name, object_name, options)
  response.body = nil
  response
end

#initiate_multipart_upload(bucket_name, object_name, options = {}) ⇒ Object



45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# File 'lib/fog/aws/requests/storage/initiate_multipart_upload.rb', line 45

def initiate_multipart_upload(bucket_name, object_name, options = {})
  verify_mock_bucket_exists(bucket_name)
  upload_id = UUID.uuid
  self.data[:multipart_uploads][bucket_name] ||= {}
  self.data[:multipart_uploads][bucket_name][upload_id] = {
    :parts => {},
    :options => options,
  }

  response = Excon::Response.new
  response.status = 200
  response.body = {
    "Bucket" => bucket_name,
    "Key" => object_name,
    "UploadId" => upload_id,
  }
  response
end

#post_object_restore(bucket_name, object_name, days = 100000) ⇒ Object



42
43
44
45
46
# File 'lib/fog/aws/requests/storage/post_object_restore.rb', line 42

def post_object_restore(bucket_name, object_name, days = 100000)
  response = get_object(bucket_name, object_name)
  response.body = nil
  response
end

#put_bucket(bucket_name, options = {}) ⇒ Object



40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# File 'lib/fog/aws/requests/storage/put_bucket.rb', line 40

def put_bucket(bucket_name, options = {})
  acl = options['x-amz-acl'] || 'private'
  if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl)
    raise Excon::Errors::BadRequest.new('invalid x-amz-acl')
  else
    self.data[:acls][:bucket][bucket_name] = self.class.acls(acl)
  end

  response = Excon::Response.new
  response.status = 200
  bucket = {
    :objects        => {},
    'Name'          => bucket_name,
    'CreationDate'  => Time.now,
    'Owner'         => { 'DisplayName' => 'owner', 'ID' => 'some_id'},
    'Payer'         => 'BucketOwner'
  }
  if options['LocationConstraint']
    bucket['LocationConstraint'] = options['LocationConstraint']
  else
    bucket['LocationConstraint'] = nil
  end
  if !self.data[:buckets][bucket_name]
    self.data[:buckets][bucket_name] = bucket
  end
  response
end

#put_bucket_acl(bucket_name, acl) ⇒ Object



56
57
58
59
60
61
62
63
64
65
# File 'lib/fog/aws/requests/storage/put_bucket_acl.rb', line 56

def put_bucket_acl(bucket_name, acl)
  if acl.is_a?(Hash)
    self.data[:acls][:bucket][bucket_name] = Fog::AWS::Storage.hash_to_acl(acl)
  else
    if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl)
      raise Excon::Errors::BadRequest.new('invalid x-amz-acl')
    end
    self.data[:acls][:bucket][bucket_name] = acl
  end
end

#put_bucket_cors(bucket_name, cors) ⇒ Object



41
42
43
# File 'lib/fog/aws/requests/storage/put_bucket_cors.rb', line 41

def put_bucket_cors(bucket_name, cors)
  self.data[:cors][:bucket][bucket_name] = Fog::AWS::Storage.hash_to_cors(cors)
end

#put_bucket_notification(bucket_name, notification) ⇒ Object



67
68
69
70
71
72
73
74
75
76
77
78
79
# File 'lib/fog/aws/requests/storage/put_bucket_notification.rb', line 67

def put_bucket_notification(bucket_name, notification)
  response = Excon::Response.new

  if self.data[:buckets][bucket_name]
    self.data[:bucket_notifications][bucket_name] = notification
    response.status = 204
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 204}, response))
  end

  response
end

#put_bucket_policy(bucket_name, policy) ⇒ Object

FIXME: You can’t actually use the credentials for anything elsewhere in Fog FIXME: Doesn’t do any validation on the policy



27
28
29
30
31
32
33
34
35
36
37
38
# File 'lib/fog/aws/requests/storage/put_bucket_policy.rb', line 27

def put_bucket_policy(bucket_name, policy)
  if bucket = data[:buckets][bucket_name]
    bucket[:policy] = policy

    Excon::Response.new.tap do |response|
      response.body = { 'RequestId' => Fog::AWS::Mock.request_id }
      response.status = 200
    end
  else
    raise Fog::AWS::IAM::NotFound.new("The bucket with name #{bucket_name} cannot be found.")
  end
end

#put_bucket_tagging(bucket_name, tags) ⇒ Object



39
40
41
42
43
44
45
46
47
48
49
50
51
# File 'lib/fog/aws/requests/storage/put_bucket_tagging.rb', line 39

def put_bucket_tagging(bucket_name, tags)
  response = Excon::Response.new

  if self.data[:buckets][bucket_name]
    self.data[:bucket_tagging][bucket_name] = tags
    response.status = 204
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 204}, response))
  end

  response
end

#put_bucket_versioning(bucket_name, status) ⇒ Object



32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# File 'lib/fog/aws/requests/storage/put_bucket_versioning.rb', line 32

def put_bucket_versioning(bucket_name, status)
  response = Excon::Response.new
  bucket = self.data[:buckets][bucket_name]

  if bucket
    if ['Enabled', 'Suspended'].include?(status)
      bucket[:versioning] = status

      response.status = 200
    else
      response.status = 400
      response.body = {
        'Error' => {
          'Code' => 'MalformedXML',
          'Message' => 'The XML you provided was not well-formed or did not validate against our published schema',
          'RequestId' => Fog::Mock.random_hex(16),
          'HostId' => Fog::Mock.random_base64(65)
        }
      }

      raise(Excon::Errors.status_error({:expects => 200}, response))
    end
  else
    response.status = 404
    response.body = {
      'Error' => {
        'Code' => 'NoSuchBucket',
        'Message' => 'The specified bucket does not exist',
        'BucketName' => bucket_name,
        'RequestId' => Fog::Mock.random_hex(16),
        'HostId' => Fog::Mock.random_base64(65)
      }
    }

    raise(Excon::Errors.status_error({:expects => 200}, response))
  end

  response
end

#put_bucket_website(bucket_name, suffix, options = {}) ⇒ Object



73
74
75
76
77
78
79
80
81
82
83
# File 'lib/fog/aws/requests/storage/put_bucket_website.rb', line 73

def put_bucket_website(bucket_name, suffix, options = {})
  response = Excon::Response.new
  if self.data[:buckets][bucket_name]
    response.status = 200
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end

  response
end

#put_object(bucket_name, object_name, data, options = {}) ⇒ Object



64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# File 'lib/fog/aws/requests/storage/put_object.rb', line 64

def put_object(bucket_name, object_name, data, options = {})
  define_mock_acl(bucket_name, object_name, options)

  data = parse_mock_data(data)
  headers = data[:headers].merge!(options)
  Fog::AWS::Storage::Real.conforming_to_us_ascii! headers.keys.grep(/^x-amz-meta-/), headers
  bucket = verify_mock_bucket_exists(bucket_name)

  options['Content-Type'] ||= data[:headers]['Content-Type']
  options['Content-Length'] ||= data[:headers]['Content-Length']
  object = store_mock_object(bucket, object_name, data[:body], options)

  response = Excon::Response.new
  response.status = 200

  response.headers = {
    'Content-Length'   => object['Content-Length'],
    'Content-Type'     => object['Content-Type'],
    'ETag'             => object['ETag'],
    'Last-Modified'    => object['Last-Modified'],
  }

  response.headers['x-amz-version-id'] = object['VersionId'] if object['VersionId'] != 'null'
  response
end

#put_object_acl(bucket_name, object_name, acl, options = {}) ⇒ Object



65
66
67
68
69
70
71
72
73
74
# File 'lib/fog/aws/requests/storage/put_object_acl.rb', line 65

def put_object_acl(bucket_name, object_name, acl, options = {})
  if acl.is_a?(Hash)
    self.data[:acls][:object][bucket_name][object_name] = Fog::AWS::Storage.hash_to_acl(acl)
  else
    if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl)
      raise Excon::Errors::BadRequest.new('invalid x-amz-acl')
    end
    self.data[:acls][:object][bucket_name][object_name] = acl
  end
end

#put_request_payment(bucket_name, payer) ⇒ Object



31
32
33
34
35
36
37
38
39
40
41
# File 'lib/fog/aws/requests/storage/put_request_payment.rb', line 31

def put_request_payment(bucket_name, payer)
  response = Excon::Response.new
  if bucket = self.data[:buckets][bucket_name]
    response.status = 200
    bucket['Payer'] = payer
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
  response
end

#reset_dataObject



506
507
508
# File 'lib/fog/aws/storage.rb', line 506

def reset_data
  self.class.data[@region].delete(@aws_access_key_id)
end

#setup_credentials(options) ⇒ Object



510
511
512
513
514
515
516
517
518
519
# File 'lib/fog/aws/storage.rb', line 510

def setup_credentials(options)
  @aws_credentials_refresh_threshold_seconds = options[:aws_credentials_refresh_threshold_seconds]

  @aws_access_key_id = options[:aws_access_key_id]
  @aws_secret_access_key = options[:aws_secret_access_key]
  @aws_session_token     = options[:aws_session_token]
  @aws_credentials_expire_at = options[:aws_credentials_expire_at]

  @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 's3')
end

#signature_v2(params, expires) ⇒ Object



521
522
523
# File 'lib/fog/aws/storage.rb', line 521

def signature_v2(params, expires)
  'foo'
end

#sync_clockObject

:nodoc:all



26
27
28
# File 'lib/fog/aws/requests/storage/sync_clock.rb', line 26

def sync_clock
  true
end

#upload_part(bucket_name, object_name, upload_id, part_number, data, options = {}) ⇒ Object



42
43
44
45
46
47
48
49
50
51
52
53
# File 'lib/fog/aws/requests/storage/upload_part.rb', line 42

def upload_part(bucket_name, object_name, upload_id, part_number, data, options = {})
  data = parse_mock_data(data)
  verify_mock_bucket_exists(bucket_name)
  upload_info = get_upload_info(bucket_name, upload_id)
  upload_info[:parts][part_number] = data[:body]

  response = Excon::Response.new
  response.status = 200
  # just use the part number as the ETag, for simplicity
  response.headers["ETag"] = part_number.to_s
  response
end

#upload_part_copy(target_bucket_name, target_object_name, upload_id, part_number, options = {}) ⇒ Object



66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# File 'lib/fog/aws/requests/storage/upload_part_copy.rb', line 66

def upload_part_copy(target_bucket_name, target_object_name, upload_id, part_number, options = {})
  validate_options!(options)

  copy_source = options['x-amz-copy-source']
  copy_range = options['x-amz-copy-source-range']

  raise 'No x-amz-copy-source header provided' unless copy_source
  raise 'No x-amz-copy-source-range header provided' unless copy_range

  source_bucket_name, source_object_name = copy_source.split('/', 2)
  verify_mock_bucket_exists(source_bucket_name)

  source_bucket = self.data[:buckets][source_bucket_name]
  source_object = source_bucket && source_bucket[:objects][source_object_name] && source_bucket[:objects][source_object_name].first
  upload_info = get_upload_info(target_bucket_name, upload_id)

  response = Excon::Response.new

  if source_object
    start_pos, end_pos = byte_range(copy_range, source_object[:body].length)
    upload_info[:parts][part_number] = source_object[:body][start_pos..end_pos]

    response.status = 200
    response.body = {
      # just use the part number as the ETag, for simplicity
      'ETag'          => part_number.to_i,
      'LastModified'  => Time.parse(source_object['Last-Modified'])
    }
    response
  else
    response.status = 404
    raise(Excon::Errors.status_error({:expects => 200}, response))
  end
end

#validate_options!(options) ⇒ Object



111
112
113
114
115
# File 'lib/fog/aws/requests/storage/upload_part_copy.rb', line 111

def validate_options!(options)
  options.keys.each do |key|
    raise "Invalid UploadPart option: #{key}" unless ::Fog::AWS::Storage::ALLOWED_UPLOAD_PART_OPTIONS.include?(key.to_sym)
  end
end