Class: Dcmgr::Rpc::HvaHandler
- Inherits:
-
EndpointBuilder
- Object
- EndpointBuilder
- Dcmgr::Rpc::HvaHandler
- Includes:
- Helpers::CliHelper, Helpers::NicHelper, Logger
- Defined in:
- lib/dcmgr/rpc/hva_handler.rb
Instance Method Summary collapse
- #attach_volume_to_host ⇒ Object
- #check_interface ⇒ Object
- #detach_volume_from_host ⇒ Object
- #event ⇒ Object
- #get_linux_dev_path ⇒ Object
- #jobreq ⇒ Object
- #pararell_curl(url, output_path) ⇒ Object
- #rpc ⇒ Object
- #select_hypervisor ⇒ Object
- #setup_metadata_drive ⇒ Object
- #terminate_instance(state_update = false) ⇒ Object
- #update_instance_state(opts, ev) ⇒ Object
- #update_volume_state(opts, ev) ⇒ Object
- #update_volume_state_to_available ⇒ Object
Methods included from Helpers::NicHelper
#clean_mac, #find_nic, #is_natted?, #nic_state, #valid_nic?
Methods included from Helpers::CliHelper
Methods included from Logger
create, default_logdev, included
Instance Method Details
#attach_volume_to_host ⇒ Object
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 17 def attach_volume_to_host # check under until the dev file is created. # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0 get_linux_dev_path tryagain do next true if File.exist?(@os_devpath) sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:storage_node][:ipaddr]]) sh("iscsiadm -m node -l -T '%s' --portal '%s'", [@vol[:transport_information][:iqn], @vol[:storage_node][:ipaddr]]) # wait udev queue sh("/sbin/udevadm settle") end rpc.request('sta-collector', 'update_volume', @vol_id, { :state=>:attaching, :attached_at => nil, :instance_id => @inst[:id], # needed after cleanup :host_device_name => @os_devpath}) end |
#check_interface ⇒ Object
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 87 def check_interface @inst[:instance_nics].each { |vnic| network = rpc.request('hva-collector', 'get_network', vnic[:network_id]) fwd_if = phy_if = network[:physical_network][:interface] bridge_if = network[:link_interface] if network[:vlan_id].to_i > 0 && phy_if fwd_if = "#{phy_if}.#{network[:vlan_id]}" unless valid_nic?(vlan_if) sh("/sbin/vconfig add #{phy_if} #{network[:vlan_id]}") sh("/sbin/ip link set %s up", [fwd_if]) sh("/sbin/ip link set %s promisc on", [fwd_if]) end end unless valid_nic?(bridge_if) sh("/usr/sbin/brctl addbr %s", [bridge_if]) sh("/usr/sbin/brctl setfd %s 0", [bridge_if]) # There is null case for the forward interface to create closed bridge network. if fwd_if sh("/usr/sbin/brctl addif %s %s", [bridge_if, fwd_if]) end end } sleep 1 end |
#detach_volume_from_host ⇒ Object
39 40 41 42 43 44 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 39 def detach_volume_from_host # iscsi logout sh("iscsiadm -m node -T '%s' --logout", [@vol[:transport_information][:iqn]]) # wait udev queue sh("/sbin/udevadm settle") end |
#event ⇒ Object
487 488 489 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 487 def event @event ||= Isono::NodeModules::EventChannel.new(@node) end |
#get_linux_dev_path ⇒ Object
116 117 118 119 120 121 122 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 116 def get_linux_dev_path # check under until the dev file is created. # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0 @os_devpath = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_node][:ipaddr]}:3260", @vol[:transport_information][:iqn], @vol[:transport_information][:lun]] end |
#jobreq ⇒ Object
483 484 485 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 483 def jobreq @jobreq ||= Isono::NodeModules::JobChannel.new(@node) end |
#pararell_curl(url, output_path) ⇒ Object
491 492 493 494 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 491 def pararell_curl(url, output_path) script_root_path = File.join(File.('../../../../',__FILE__), 'script') sh("#{script_root_path}/pararell-curl.sh --url=#{url} --output_path=#{output_path}") end |
#rpc ⇒ Object
479 480 481 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 479 def rpc @rpc ||= Isono::NodeModules::RpcChannel.new(@node) end |
#select_hypervisor ⇒ Object
13 14 15 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 13 def select_hypervisor @hv = Dcmgr::Drivers::Hypervisor.select_hypervisor(@inst[:instance_spec][:hypervisor]) end |
#setup_metadata_drive ⇒ Object
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 124 def logger.info("Setting up metadata drive image for :#{@hva_ctx.inst_id}") # truncate creates sparsed file. sh("/usr/bin/truncate -s 10m '#{@hva_ctx.}'; sync;") # TODO: need to lock loop device not to use same device from # another thread/process. lodev=`/sbin/losetup -f`.chomp sh("/sbin/losetup #{lodev} '#{@hva_ctx.}'") sh("mkfs.vfat -n METADATA '#{@hva_ctx.}'") Dir.mkdir("#{@hva_ctx.inst_data_dir}/tmp") unless File.exists?("#{@hva_ctx.inst_data_dir}/tmp") sh("/bin/mount -t vfat #{lodev} '#{@hva_ctx.inst_data_dir}/tmp'") vnic = @inst[:instance_nics].first || {} # Appendix B: Metadata Categories # http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?AESDG-chapter-instancedata.html = { 'ami-id' => @inst[:image][:uuid], 'ami-launch-index' => 0, 'ami-manifest-path' => nil, 'ancestor-ami-ids' => nil, 'block-device-mapping/root' => '/dev/sda', 'hostname' => @inst[:hostname], 'instance-action' => @inst[:state], 'instance-id' => @inst[:uuid], 'instance-type' => @inst[:instance_spec][:uuid], 'kernel-id' => nil, 'local-hostname' => @inst[:hostname], 'local-ipv4' => @inst[:ips].first, 'mac' => vnic[:mac_addr].unpack('A2'*6).join(':'), 'placement/availability-zone' => nil, 'product-codes' => nil, 'public-hostname' => @inst[:hostname], 'public-ipv4' => @inst[:nat_ips].first, 'ramdisk-id' => nil, 'reservation-id' => nil, 'security-groups' => @inst[:security_groups].join(' '), } @inst[:vif].each { |vnic| netaddr = IPAddress::IPv4.new("#{vnic[:ipv4][:network][:ipv4_network]}/#{vnic[:ipv4][:network][:prefix]}") # vfat doesn't allow folder name including ":". # folder name including mac address replaces "-" to ":". mac = vnic[:mac_addr].unpack('A2'*6).join('-') .merge!({ "network/interfaces/macs/#{mac}/local-hostname" => @inst[:hostname], "network/interfaces/macs/#{mac}/local-ipv4s" => vnic[:ipv4][:address], "network/interfaces/macs/#{mac}/mac" => vnic[:mac_addr].unpack('A2'*6).join(':'), "network/interfaces/macs/#{mac}/public-hostname" => @inst[:hostname], "network/interfaces/macs/#{mac}/public-ipv4s" => vnic[:ipv4][:nat_address], "network/interfaces/macs/#{mac}/security-groups" => @inst[:security_groups].join(' '), # wakame-vdc extention items. # TODO: need an iface index number? "network/interfaces/macs/#{mac}/x-gateway" => vnic[:ipv4][:network][:ipv4_gw], "network/interfaces/macs/#{mac}/x-netmask" => netaddr.prefix.to_ip, "network/interfaces/macs/#{mac}/x-network" => vnic[:ipv4][:network][:ipv4_network], "network/interfaces/macs/#{mac}/x-broadcast" => netaddr.broadcast, "network/interfaces/macs/#{mac}/x-metric" => vnic[:ipv4][:network][:metric], }) } if @inst[:ssh_key_data] .merge!({ "public-keys/0=#{@inst[:ssh_key_data][:name]}" => @inst[:ssh_key_data][:public_key], 'public-keys/0/openssh-key'=> @inst[:ssh_key_data][:public_key], }) else .merge!({'public-keys/'=>nil}) end # build metadata directory tree = File.("meta-data", "#{@hva_ctx.inst_data_dir}/tmp") FileUtils.mkdir_p() .each { |k, v| if k[-1,1] == '/' && v.nil? # just create empty folder FileUtils.mkdir_p(File.(k, )) next end dir = File.dirname(k) if dir != '.' FileUtils.mkdir_p(File.(dir, )) end File.open(File.(k, ), 'w') { |f| f.puts(v.to_s) } } # user-data File.open(File.('user-data', "#{@hva_ctx.inst_data_dir}/tmp"), 'w') { |f| f.puts(@inst[:user_data]) } ensure # ignore any errors from cleanup work. sh("/bin/umount -f '#{@hva_ctx.inst_data_dir}/tmp'") rescue logger.warn($!.) sh("/sbin/losetup -d #{lodev}") rescue logger.warn($!.) end |
#terminate_instance(state_update = false) ⇒ Object
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 56 def terminate_instance(state_update=false) @hv.terminate_instance(HvaContext.new(self)) unless @inst[:volume].nil? @inst[:volume].each { |volid, v| @vol_id = volid @vol = v # force to continue detaching volumes during termination. detach_volume_from_host rescue logger.error($!) if state_update update_volume_state_to_available rescue logger.error($!) end } end # cleanup vm data folder FileUtils.rm_r(File.("#{@inst_id}", @node.manifest.config.vm_data_dir)) end |
#update_instance_state(opts, ev) ⇒ Object
75 76 77 78 79 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 75 def update_instance_state(opts, ev) raise "Can't update instance info without setting @inst_id" if @inst_id.nil? rpc.request('hva-collector', 'update_instance', @inst_id, opts) event.publish(ev, :args=>[@inst_id]) end |
#update_volume_state(opts, ev) ⇒ Object
81 82 83 84 85 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 81 def update_volume_state(opts, ev) raise "Can't update volume info without setting @vol_id" if @vol_id.nil? rpc.request('sta-collector', 'update_volume', @vol_id, opts) event.publish(ev, :args=>[@vol_id]) end |
#update_volume_state_to_available ⇒ Object
46 47 48 49 50 51 52 53 54 |
# File 'lib/dcmgr/rpc/hva_handler.rb', line 46 def update_volume_state_to_available rpc.request('sta-collector', 'update_volume', @vol_id, { :state=>:available, :host_device_name=>nil, :instance_id=>nil, :detached_at => Time.now.utc, }) event.publish('hva/volume_detached', :args=>[@inst_id, @vol_id]) end |