Module: Bosh::OpenStackCloud::Helpers
- Included in:
- Cloud, Network, NetworkConfigurator
- Defined in:
- lib/cloud/openstack/helpers.rb
Constant Summary collapse
- DEFAULT_STATE_TIMEOUT =
Default timeout for target state (in seconds)
300
- MAX_RETRIES =
Max number of retries
10
- DEFAULT_RETRY_TIMEOUT =
Default timeout before retrying a call (in seconds)
3
Instance Method Summary collapse
-
#cloud_error(message, exception = nil) ⇒ Object
Raises CloudError exception.
-
#parse_openstack_response(response, *keys) ⇒ Hash
Parses and look ups for keys in an OpenStack response.
- #task_checkpoint ⇒ Object
-
#wait_resource(resource, target_state, state_method = :status, allow_notfound = false) ⇒ Object
Waits for a resource to be on a target state.
- #with_openstack ⇒ Object
Instance Method Details
#cloud_error(message, exception = nil) ⇒ Object
Raises CloudError exception
17 18 19 20 21 |
# File 'lib/cloud/openstack/helpers.rb', line 17 def cloud_error(, exception = nil) @logger.error() if @logger @logger.error(exception) if @logger && exception raise Bosh::Clouds::CloudError, end |
#parse_openstack_response(response, *keys) ⇒ Hash
Parses and look ups for keys in an OpenStack response
61 62 63 64 65 66 67 68 69 70 71 72 |
# File 'lib/cloud/openstack/helpers.rb', line 61 def parse_openstack_response(response, *keys) unless response.body.empty? begin body = JSON.parse(response.body) key = keys.detect { |k| body.has_key?(k)} return body[key] if key rescue JSON::ParserError # do nothing end end nil end |
#task_checkpoint ⇒ Object
132 133 134 |
# File 'lib/cloud/openstack/helpers.rb', line 132 def task_checkpoint Bosh::Clouds::Config.task_checkpoint end |
#wait_resource(resource, target_state, state_method = :status, allow_notfound = false) ⇒ Object
Waits for a resource to be on a target state
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
# File 'lib/cloud/openstack/helpers.rb', line 81 def wait_resource(resource, target_state, state_method = :status, allow_notfound = false) started_at = Time.now desc = resource.class.name.split("::").last.to_s + " `" + resource.id.to_s + "'" target_state = Array(target_state) state_timeout = @state_timeout || DEFAULT_STATE_TIMEOUT loop do task_checkpoint duration = Time.now - started_at if duration > state_timeout cloud_error("Timed out waiting for #{desc} to be #{target_state.join(", ")}") end if @logger @logger.debug("Waiting for #{desc} to be #{target_state.join(", ")} (#{duration}s)") end # If resource reload is nil, perhaps it's because resource went away # (ie: a destroy operation). Don't raise an exception if this is # expected (allow_notfound). if with_openstack { resource.reload.nil? } break if allow_notfound cloud_error("#{desc}: Resource not found") else state = with_openstack { resource.send(state_method).downcase.to_sym } end # This is not a very strong convention, but some resources # have 'error', 'failed' and 'killed' states, we probably don't want to keep # waiting if we're in these states. Alternatively we could introduce a # set of 'loop breaker' states but that doesn't seem very helpful # at the moment if state == :error || state == :failed || state == :killed cloud_error("#{desc} state is #{state}, expected #{target_state.join(", ")}") end break if target_state.include?(state) sleep(@wait_resource_poll_interval) end if @logger total = Time.now - started_at @logger.info("#{desc} is now #{target_state.join(", ")}, took #{total}s") end end |
#with_openstack ⇒ Object
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
# File 'lib/cloud/openstack/helpers.rb', line 23 def with_openstack retries = 0 begin yield rescue Excon::Errors::RequestEntityTooLarge => e # If we find a rate limit error, parse message, wait, and retry overlimit = parse_openstack_response(e.response, "overLimit", "overLimitFault") unless overlimit.nil? || retries >= MAX_RETRIES task_checkpoint wait_time = overlimit["retryAfter"] || e.response.headers["Retry-After"] || DEFAULT_RETRY_TIMEOUT details = "#{overlimit["message"]} - #{overlimit["details"]}" @logger.debug("OpenStack API Over Limit (#{details}), waiting #{wait_time} seconds before retrying") if @logger sleep(wait_time.to_i) retries += 1 retry end cloud_error("OpenStack API Request Entity Too Large error. Check task debug log for details.", e) rescue Excon::Errors::BadRequest => e badrequest = parse_openstack_response(e.response, "badRequest") details = badrequest.nil? ? "" : " (#{badrequest["message"]})" cloud_error("OpenStack API Bad Request#{details}. Check task debug log for details.", e) rescue Excon::Errors::InternalServerError => e unless retries >= MAX_RETRIES retries += 1 @logger.debug("OpenStack API Internal Server error, retrying (#{retries})") if @logger sleep(DEFAULT_RETRY_TIMEOUT) retry end cloud_error("OpenStack API Internal Server error. Check task debug log for details.", e) end end |