Module: Dapp::Kube::Dapp::Command::Deploy
- Included in:
- Dapp
- Defined in:
- lib/dapp/kube/dapp/command/deploy.rb
Instance Method Summary collapse
- #init_kube_deploy_start_time ⇒ Object
- #kube_all_jobs_names ⇒ Object
- #kube_create_helm_auto_purge_trigger_file(release_name) ⇒ Object
- #kube_delete_helm_auto_purge_trigger_file(release_name) ⇒ Object
- #kube_delete_job!(job_name, all_pods_specs) ⇒ Object
- #kube_deploy ⇒ Object
- #kube_deploy_old ⇒ Object
- #kube_deploy_start_time ⇒ Object
- #kube_flush_hooks_jobs(release) ⇒ Object
- #kube_helm_auto_purge_trigger_file_path(release_name) ⇒ Object
- #kube_run_deploy(release) ⇒ Object
Instance Method Details
#init_kube_deploy_start_time ⇒ Object
69 70 71 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 69 def init_kube_deploy_start_time @kube_deploy_start_time ||= Time.now.to_datetime.rfc3339 end |
#kube_all_jobs_names ⇒ Object
93 94 95 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 93 def kube_all_jobs_names kubernetes.job_list['items'].map { |i| i['metadata']['name'] } end |
#kube_create_helm_auto_purge_trigger_file(release_name) ⇒ Object
126 127 128 129 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 126 def kube_create_helm_auto_purge_trigger_file(release_name) FileUtils.mkdir_p File.dirname(kube_helm_auto_purge_trigger_file_path(release_name)) FileUtils.touch kube_helm_auto_purge_trigger_file_path(release_name) end |
#kube_delete_helm_auto_purge_trigger_file(release_name) ⇒ Object
131 132 133 134 135 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 131 def kube_delete_helm_auto_purge_trigger_file(release_name) if File.exists? kube_helm_auto_purge_trigger_file_path(release_name) FileUtils.rm_rf kube_helm_auto_purge_trigger_file_path(release_name) end end |
#kube_delete_job!(job_name, all_pods_specs) ⇒ Object
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 97 def kube_delete_job!(job_name, all_pods_specs) job_spec = Kubernetes::Client::Resource::Pod.new kubernetes.job(job_name) job_pods_specs = all_pods_specs .select do |pod| Array(pod.['ownerReferences']).any? do |owner_reference| owner_reference['uid'] == job_spec.['uid'] end end job_pods_specs.each do |job_pod_spec| kubernetes.delete_pod!(job_pod_spec.name) end # FIXME: https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/ # FIXME: orphanDependents deprecated, should be propagationPolicy=Orphan. But it does not work. # FIXME: Also, kubectl uses the same: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/delete.go#L388 # FIXME: https://github.com/kubernetes/kubernetes/issues/46659 kubernetes.delete_job!(job_name, orphanDependents: false) loop do break unless kubernetes.job?(job_name) sleep 1 end end |
#kube_deploy ⇒ Object
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 6 def kube_deploy # TODO: move setup_ssh_agent to golang setup_ssh_agent command = "deploy" # TODO: move option_tags logic to golang tag = begin raise ::Dapp::Error::Command, code: :expected_only_one_tag, data: { tags: .join(', ') } if .count > 1 .first end # TODO: move project name logic to golang project_name = name.to_s # TODO: move project dir logic to golang project_dir = path.to_s # TODO: move release name logic to golang release_name = kube_release_name # TODO: move repo logic to golang repo = option_repo dimgs = self.build_configs.map do |config| d = self.dimg(config: config, ignore_signature_auto_calculation: true) {"Name" => d.name, "ImageTag" => tag, "Repo" => repo} end.uniq do |dimg| dimg["Name"] end res = ruby2go_deploy( "command" => command, "projectName" => project_name, "projectDir" => project_dir, "tag" => tag, "releaseName" => release_name, "repo" => repo, "dimgs" => JSON.dump(dimgs), "rubyCliOptions" => JSON.dump(self.), options: { host_docker_config_dir: self.class.host_docker_config_dir }, ) raise ::Dapp::Error::Command, code: :ruby2go_deploy_command_failed, data: { command: command, message: res["error"] } unless res["error"].nil? end |
#kube_deploy_old ⇒ Object
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 52 def kube_deploy_old setup_ssh_agent helm_release do |release| do_deploy = proc do init_kube_deploy_start_time kube_run_deploy(release) end if dry_run? do_deploy.call else lock_helm_release &do_deploy end end end |
#kube_deploy_start_time ⇒ Object
73 74 75 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 73 def kube_deploy_start_time @kube_deploy_start_time end |
#kube_flush_hooks_jobs(release) ⇒ Object
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 77 def kube_flush_hooks_jobs(release) all_jobs_names = kube_all_jobs_names all_pods_specs = kubernetes.pod_list["items"] .map {|spec| Kubernetes::Client::Resource::Pod.new(spec)} release.hooks.values .reject { |job| ['0', 'false'].include? job.annotations["dapp/recreate"].to_s } .select { |job| all_jobs_names.include? job.name } .each do |job| log_process("Delete hook job `#{job.name}` (dapp/recreate)", short: true) do kube_delete_job!(job.name, all_pods_specs) unless dry_run? end end end |
#kube_helm_auto_purge_trigger_file_path(release_name) ⇒ Object
122 123 124 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 122 def kube_helm_auto_purge_trigger_file_path(release_name) File.join(self.class.home_dir, "helm", release_name, "auto_purge_failed_release_on_next_deploy") end |
#kube_run_deploy(release) ⇒ Object
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 |
# File 'lib/dapp/kube/dapp/command/deploy.rb', line 137 def kube_run_deploy(release) log_process("Deploy release #{release.name}") do helm_status_res = shellout([ "helm", ("--kube-context #{custom_kube_context}" if custom_kube_context), "status", release.name, ].compact.join(" ")) release_status = nil if helm_status_res.status.success? status_line = helm_status_res.stdout.lines.find {|l| l.start_with? "STATUS: "} release_status = status_line.partition(": ")[2].strip if status_line end release_exists = nil if not helm_status_res.status.success? # Helm release is not exists. release_exists = false # Create purge-trigger for the next run. kube_create_helm_auto_purge_trigger_file(release.name) elsif ["FAILED", "PENDING_INSTALL"].include? release_status release_exists = true if File.exists? kube_helm_auto_purge_trigger_file_path(release.name) log_process("Purge helm release #{release.name}") do shellout!([ "helm", ("--kube-context #{custom_kube_context}" if custom_kube_context), "delete", "--purge #{release.name}", ].compact.join(" ")) end # Purge-trigger file remains to exist release_exists = false end else if File.exists? kube_helm_auto_purge_trigger_file_path(release.name) log_warning "[WARN] Will not purge helm release #{release.name}: expected FAILED or PENDING_INSTALL release status, got #{release_status}" end release_exists = true kube_delete_helm_auto_purge_trigger_file(release.name) end kube_flush_hooks_jobs(release) watch_hooks_by_type = release.jobs.values .reduce({}) do |res, job| if job.annotations['dapp/watch-logs'].to_s == 'true' job.annotations['helm.sh/hook'].to_s.split(',').each do |hook_type| res[hook_type] ||= [] res[hook_type] << job end end res end .tap do |res| res.values.each do |jobs| jobs.sort_by! {|job| job.annotations['helm.sh/hook-weight'].to_i} end end watch_hooks = if release_exists watch_hooks_by_type['pre-upgrade'].to_a + watch_hooks_by_type['post-upgrade'].to_a else watch_hooks_by_type['pre-install'].to_a + watch_hooks_by_type['post-install'].to_a end watch_hooks_thr = nil watch_hooks_condition_mutex = ::Mutex.new watch_hooks_condition = ::ConditionVariable.new deploy_has_began = false unless dry_run? and watch_hooks.any? watch_hooks_thr = Thread.new do watch_hooks_condition_mutex.synchronize do while not deploy_has_began do watch_hooks_condition.wait(watch_hooks_condition_mutex) end end begin watch_hooks.each do |job| begin if ENV["KUBEDOG"] != "0" timeout = self.[:timeout] || 300 tmp_dir = Dir.mktmpdir('dapp-ruby2go-', tmp_base_dir) res = nil begin res = ruby2go_deploy_watcher( { "action" => "watch job", "resourceName" => job.name, "namespace" => release.namespace, "timeout" => timeout, "logsFromTime" => kube_deploy_start_time, "kubeContext" => custom_kube_context, }, tmp_dir: tmp_dir, ) rescue ::Dapp::Dapp::Ruby2Go::Error => err # ignore interrupt if err.net_status[:data][:status_code] == 17 res = {} else raise end ensure FileUtils.rmtree(tmp_dir) end if res["error"] $stderr.puts(::Dapp::Dapp.paint_string(res["error"], :warning)) end else Kubernetes::Manager::Job.new(self, job.name).watch_till_done! end rescue ::Exception => e raise end end # watch_hooks.each rescue Kubernetes::Error::Default => e # Default-ошибка -- это ошибка для пользователя которую перехватывает и # показывает bin/dapp, а затем dapp выходит с ошибкой. # Нельзя убивать родительский поток по Default-ошибке # из-за того, что в этот момент в нем вероятно работает helm, # а процесс деплоя в helm прерывать не стоит. # Поэтому перехватываем и просто отображаем произошедшую # ошибку для информации пользователю без завершения работы dapp. $stderr.puts(::Dapp::Dapp.paint_string(::Dapp::Helper::NetStatus.(e), :warning)) end end # Thread end # unless deployment_managers = release.deployments.values .map {|deployment| Kubernetes::Manager::Deployment.new(self, deployment.name)} deployment_managers.each(&:before_deploy) log_process("#{release_exists ? "Upgrade" : "Install"} helm release #{release.name}") do watch_hooks_condition_mutex.synchronize do deploy_has_began = true # Фактически гарантируется лишь вывод сообщения log_process перед выводом из потока watch_thr watch_hooks_condition.signal end cmd_res = if release_exists release.upgrade_helm_release else release.install_helm_release end if cmd_res.error? if cmd_res.stderr.end_with? "has no deployed releases\n" log_warning "[WARN] Helm release #{release.name} is in improper state: #{cmd_res.stderr}" log_warning "[WARN] Helm release #{release.name} will be removed with `helm delete --purge` on the next run of `dapp kube deploy`" kube_create_helm_auto_purge_trigger_file(release.name) end watch_hooks_thr.kill if !dry_run? && watch_hooks_thr && watch_hooks_thr.alive? raise ::Dapp::Error::Command, code: :kube_helm_failed, data: {output: (cmd_res.stdout + cmd_res.stderr).strip} else kube_delete_helm_auto_purge_trigger_file(release.name) log_info((cmd_res.stdout + cmd_res.stderr).strip) watch_hooks_thr.join if !dry_run? && watch_hooks_thr && watch_hooks_thr.alive? end end deployment_managers.each(&:after_deploy) unless dry_run? begin timeout = self.[:timeout] || 300 ::Timeout::timeout(timeout) do deployment_managers.each {|deployment_manager| if deployment_manager.should_watch? if ENV["KUBEDOG"] != "0" res = ruby2go_deploy_watcher( "action" => "watch deployment", "resourceName" => deployment_manager.name, "namespace" => release.namespace, "timeout" => timeout, "logsFromTime" => kube_deploy_start_time, "kubeContext" => custom_kube_context, ) if res["error"] raise ::Dapp::Kube::Kubernetes::Error::Default.new data: {message: res["error"]} end else deployment_manager.watch_till_ready! end end } end rescue ::Timeout::Error raise ::Dapp::Error::Command, code: :kube_deploy_timeout end end end end |