Module: TensorStream::Utils
- Included in:
- TensorStream, Images, Train::LearningRateDecay, Train::SlotCreator
- Defined in:
- lib/tensor_stream/utils.rb
Instance Method Summary collapse
- #__v_scope_name ⇒ Object
-
#apply_data_type_coercion(*args) ⇒ Object
Auto cast ruby constant data types to the same tensor types of other operands.
- #assign(ref, value, name: nil) ⇒ Object
- #check_allowed_types(input, types) ⇒ Object
- #check_data_types(*args) ⇒ Object
-
#check_if_dense(value, expected_shape = nil) ⇒ Object
Check to make sure passed array is dense.
- #colocate_with(op, ignore_existing: false) ⇒ Object
- #constant(value, dtype: nil, shape: nil, internal: false, name: "Const") ⇒ Object
- #control_dependencies(control_inputs, &block) ⇒ Object
- #convert_to_tensor(value, dtype: nil, name: nil) ⇒ Object
- #device(device_uri, &block) ⇒ Object
- #disable_eager_execution ⇒ Object
- #dynamic_stitch(indices, data, name: nil) ⇒ Object
- #enable_eager_execution ⇒ Object
- #executing_eagerly? ⇒ Boolean
- #float32 ⇒ Object
- #get_collection(name, options = {}) ⇒ Object
- #get_default_graph ⇒ Object
- #get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false) ⇒ Object
- #get_variable_scope ⇒ Object
- #global_variables_initializer ⇒ Object
- #graph ⇒ Object
- #group(inputs, name: nil) ⇒ Object
- #image ⇒ Object
- #layers ⇒ Object
-
#list_local_devices ⇒ Object
List available evaluators + devices in the current local environment Returns: - An array containing the names of those devices.
- #math ⇒ Object
- #name_scope(name, default_name = nil, default: nil, values: nil) ⇒ Object
-
#placeholder(dtype, shape: nil, name: nil) ⇒ Object
Inserts a placeholder for a tensor that will be always fed.
- #program {|_self| ... } ⇒ Object
- #reset_default_graph ⇒ Object
-
#session(evaluator = nil, thread_pool_class: Concurrent::ImmediateExecutor, log_device_placement: false, profile_enabled: false) {|session| ... } ⇒ Object
Creates a session context where operations can be executed.
-
#set_random_seed(seed) ⇒ Object
Sets random seed to use for the default graph.
- #train ⇒ Object
- #trainable_variables ⇒ Object
-
#variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true) ⇒ Object
Creates a variable A variable maintains state across sessions.
-
#variable_scope(scope = nil, default_name = nil, reuse: nil, initializer: nil) ⇒ Object
Defines a variable context manager.
Instance Method Details
#__v_scope_name ⇒ Object
126 127 128 |
# File 'lib/tensor_stream/utils.rb', line 126 def __v_scope_name Thread.current[:tensor_stream_variable_scope].map(&:name).compact.reject(&:empty?).join("/") end |
#apply_data_type_coercion(*args) ⇒ Object
Auto cast ruby constant data types to the same tensor types of other operands
299 300 301 302 |
# File 'lib/tensor_stream/utils.rb', line 299 def apply_data_type_coercion(*args) coerced_type = check_data_types(*args) args.map { |a| a.is_a?(Tensor) ? a : convert_to_tensor(a, dtype: coerced_type) } end |
#assign(ref, value, name: nil) ⇒ Object
200 201 202 203 |
# File 'lib/tensor_stream/utils.rb', line 200 def assign(ref, value, name: nil) raise "#{ref.name} not a variable" unless ref.is_a?(Variable) ref.assign(value, name: name) end |
#check_allowed_types(input, types) ⇒ Object
279 280 281 282 283 284 |
# File 'lib/tensor_stream/utils.rb', line 279 def check_allowed_types(input, types) return input unless input.is_a?(Tensor) return input if input.data_type.nil? raise "#{input.source}: Parameter data type #{input.data_type} passed not in #{types.join(",")}" unless types.include?(input.data_type.to_sym) end |
#check_data_types(*args) ⇒ Object
286 287 288 289 290 291 292 293 294 |
# File 'lib/tensor_stream/utils.rb', line 286 def check_data_types(*args) unique_types = args.select { |a| a.is_a?(Tensor) }. map { |a| DataTypeUtils.norm_dtype(a.data_type) }.uniq if unique_types.size > 1 raise TensorStream::ValueError, "Value Error: Tensor conversion requested dtypes are different -> #{unique_types}" end unique_types.first end |
#check_if_dense(value, expected_shape = nil) ⇒ Object
Check to make sure passed array is dense
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 |
# File 'lib/tensor_stream/utils.rb', line 263 def check_if_dense(value, expected_shape = nil) return unless value.is_a?(Array) return if value.empty? expected_shape ||= shape_eval(value) s = expected_shape.shift raise TensorStream::ValueError, "Argument must be a dense tensor: #{value}, expected size #{s} got #{value.size}" if value.size != s return if expected_shape.empty? value.each do |item| check_if_dense(item, expected_shape.dup) end end |
#colocate_with(op, ignore_existing: false) ⇒ Object
147 148 149 150 |
# File 'lib/tensor_stream/utils.rb', line 147 def colocate_with(op, ignore_existing: false) # noop for now yield end |
#constant(value, dtype: nil, shape: nil, internal: false, name: "Const") ⇒ Object
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
# File 'lib/tensor_stream/utils.rb', line 160 def constant(value, dtype: nil, shape: nil, internal: false, name: "Const") = {const: true, value: value, name: name, internal: internal} if value.is_a?(Float) TensorStream::Constant.new(dtype || :float32, 0, shape || [], ) elsif value.is_a?(Integer) TensorStream::Constant.new(dtype || :int32, 0, shape || [], ) elsif value.is_a?(String) TensorStream::Constant.new(dtype || :string, 0, shape || [], ) elsif !!value == value TensorStream::Constant.new(dtype || :boolean, 0, shape || [], ) elsif value.is_a?(Array) dimension = shape || shape_eval(value) rank = dimension.size TensorStream.check_if_dense(value) cur_dtype = dtype || Tensor.detect_type(value.flatten.last) value = Tensor.cast_dtype(value, cur_dtype) unless dtype.nil? [:value] = value TensorStream::Constant.new(cur_dtype, rank, dimension, ) end end |
#control_dependencies(control_inputs, &block) ⇒ Object
237 238 239 |
# File 'lib/tensor_stream/utils.rb', line 237 def control_dependencies(control_inputs, &block) TensorStream.get_default_graph.control_dependencies(control_inputs, &block) end |
#convert_to_tensor(value, dtype: nil, name: nil) ⇒ Object
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 |
# File 'lib/tensor_stream/utils.rb', line 241 def convert_to_tensor(value, dtype: nil, name: nil) return value if value.is_a?(Tensor) return convert_to_tensor(value.call) if value.is_a?(Proc) # raise "Invalid tensor value" if value.nil? if value.is_a?(Array) && value.detect { |v| v.is_a?(Tensor) } return TensorStream.stack(value) if value.size > 1 return TensorStream.(value[0], 0) end if value.is_a?(TensorShape) value = value.shape end check_if_dense(value) i_cons(value, dtype: dtype || Tensor.detect_type(value), name: name) end |
#device(device_uri, &block) ⇒ Object
101 102 103 |
# File 'lib/tensor_stream/utils.rb', line 101 def device(device_uri, &block) get_default_graph.device(device_uri, &block) end |
#disable_eager_execution ⇒ Object
23 24 25 |
# File 'lib/tensor_stream/utils.rb', line 23 def disable_eager_execution TensorStream::Graph.get_default_graph.disable_eager_execution end |
#dynamic_stitch(indices, data, name: nil) ⇒ Object
188 189 190 |
# File 'lib/tensor_stream/utils.rb', line 188 def dynamic_stitch(indices, data, name: nil) TensorStream::DynamicStitch.new(:dynamic_stitch, [indices, data], name: name) end |
#enable_eager_execution ⇒ Object
19 20 21 |
# File 'lib/tensor_stream/utils.rb', line 19 def enable_eager_execution TensorStream::Graph.get_default_graph.enable_eager_execution end |
#executing_eagerly? ⇒ Boolean
27 28 29 |
# File 'lib/tensor_stream/utils.rb', line 27 def executing_eagerly? TensorStream::Graph.get_default_graph.executing_eagerly? end |
#get_collection(name, options = {}) ⇒ Object
196 197 198 |
# File 'lib/tensor_stream/utils.rb', line 196 def get_collection(name, = {}) Graph.get_default_graph.get_collection(name, ) end |
#get_default_graph ⇒ Object
11 12 13 |
# File 'lib/tensor_stream/utils.rb', line 11 def get_default_graph TensorStream::Graph.get_default_graph end |
#get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false) ⇒ Object
192 193 194 |
# File 'lib/tensor_stream/utils.rb', line 192 def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false) get_variable_scope.get_variable(name, dtype: dtype, shape: shape, initializer: initializer, trainable: trainable, collections: collections) end |
#get_variable_scope ⇒ Object
116 117 118 119 120 121 122 123 124 |
# File 'lib/tensor_stream/utils.rb', line 116 def get_variable_scope unless Thread.current[:tensor_stream_variable_scope] variable_scope = VariableScope.new Thread.current[:tensor_stream_variable_scope] = [variable_scope] return variable_scope end Thread.current[:tensor_stream_variable_scope].last end |
#global_variables_initializer ⇒ Object
211 212 213 |
# File 'lib/tensor_stream/utils.rb', line 211 def global_variables_initializer TensorStream::Variable.global_variables_initializer end |
#graph ⇒ Object
7 8 9 |
# File 'lib/tensor_stream/utils.rb', line 7 def graph TensorStream::Graph.new end |
#group(inputs, name: nil) ⇒ Object
184 185 186 |
# File 'lib/tensor_stream/utils.rb', line 184 def group(inputs, name: nil) TensorStream::ControlFlow.new(:group, inputs, nil, name: name) end |
#image ⇒ Object
223 224 225 |
# File 'lib/tensor_stream/utils.rb', line 223 def image TensorStream::Images end |
#layers ⇒ Object
156 157 158 |
# File 'lib/tensor_stream/utils.rb', line 156 def layers TensorStream::Layers end |
#list_local_devices ⇒ Object
List available evaluators + devices in the current local environment Returns:
-
An array containing the names of those devices
35 36 37 38 39 40 41 42 |
# File 'lib/tensor_stream/utils.rb', line 35 def list_local_devices local_name = "job:localhost" TensorStream::Evaluator.evaluators.collect { |k, v| v[:class].query_supported_devices.collect do |device_str| [local_name, "ts:#{k}:#{device_str.name}"].join("/") end }.flatten end |
#math ⇒ Object
219 220 221 |
# File 'lib/tensor_stream/utils.rb', line 219 def math TensorStream::Maths end |
#name_scope(name, default_name = nil, default: nil, values: nil) ⇒ Object
105 106 107 108 109 110 111 112 113 114 |
# File 'lib/tensor_stream/utils.rb', line 105 def name_scope(name, default_name = nil, default: nil, values: nil) if values graph_count = values.select { |v| v.is_a?(Tensor) }.map(&:graph).map(&:object_id).uniq.size raise "values are not on the same graph" if graph_count > 1 end get_default_graph.name_scope(name || default_name || default) do |scope| yield scope if block_given? end end |
#placeholder(dtype, shape: nil, name: nil) ⇒ Object
Inserts a placeholder for a tensor that will be always fed.
207 208 209 |
# File 'lib/tensor_stream/utils.rb', line 207 def placeholder(dtype, shape: nil, name: nil) TensorStream::Placeholder.new(dtype, nil, shape, name: name) end |
#program {|_self| ... } ⇒ Object
152 153 154 |
# File 'lib/tensor_stream/utils.rb', line 152 def program yield self end |
#reset_default_graph ⇒ Object
15 16 17 |
# File 'lib/tensor_stream/utils.rb', line 15 def reset_default_graph TensorStream::Graph.get_default_graph.reset end |
#session(evaluator = nil, thread_pool_class: Concurrent::ImmediateExecutor, log_device_placement: false, profile_enabled: false) {|session| ... } ⇒ Object
Creates a session context where operations can be executed
Args:
evaluator: Specific evaluator to use, otherwise the best evaluator will automatically be determined
Options:
thread_pool_class: Class to use to manage thread pooling
log_device_placement: Show assigned device/evalutor for each tensor op
profile_enabled: Log performance metrics for each operation
140 141 142 143 144 145 |
# File 'lib/tensor_stream/utils.rb', line 140 def session(evaluator = nil, thread_pool_class: Concurrent::ImmediateExecutor, log_device_placement: false, profile_enabled: false) session = TensorStream::Session.new(evaluator, thread_pool_class: thread_pool_class, log_device_placement: log_device_placement, profile_enabled: profile_enabled) yield session if block_given? session end |
#set_random_seed(seed) ⇒ Object
Sets random seed to use for the default graph
233 234 235 |
# File 'lib/tensor_stream/utils.rb', line 233 def set_random_seed(seed) TensorStream.get_default_graph.random_seed = seed end |
#train ⇒ Object
215 216 217 |
# File 'lib/tensor_stream/utils.rb', line 215 def train TensorStream::Trainer end |
#trainable_variables ⇒ Object
227 228 229 |
# File 'lib/tensor_stream/utils.rb', line 227 def trainable_variables TensorStream.get_default_graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES) end |
#variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true) ⇒ Object
Creates a variable A variable maintains state across sessions
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
# File 'lib/tensor_stream/utils.rb', line 47 def variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true) = { initializer: TensorStream.convert_to_tensor(initializer || value), name: name, graph: graph, dtype: dtype, trainable: trainable, } tensor = if value.is_a?(String) i_var(dtype || :string, 0, [], get_variable_scope, ) elsif value.is_a?(Integer) i_var(dtype || :int32, 0, [], get_variable_scope, ) elsif value.is_a?(Float) i_var(dtype || :float32, 0, [], get_variable_scope, ) else i_var(dtype || :float32, 0, nil, get_variable_scope, ) end tensor end |
#variable_scope(scope = nil, default_name = nil, reuse: nil, initializer: nil) ⇒ Object
Defines a variable context manager
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
# File 'lib/tensor_stream/utils.rb', line 69 def variable_scope(scope = nil, default_name = nil, reuse: nil, initializer: nil) Thread.current[:tensor_stream_variable_scope] ||= [VariableScope.new] # uniquenifier if scope.nil? && default_name same_names = get_variable_scope.used_names.select { |s| s.start_with?(default_name) } new_name = default_name index = 1 while same_names.include?(new_name) new_name = "#{default_name}_#{index}" index += 1 end scope = new_name end variable_scope = VariableScope.new(name: scope, reuse: reuse, initializer: initializer) get_variable_scope.register_name(scope || "") Thread.current[:tensor_stream_variable_scope] << variable_scope scope_name = __v_scope_name if block_given? begin TensorStream.get_default_graph.name_scope(scope) do yield(scope_name) end ensure Thread.current[:tensor_stream_variable_scope].pop end else variable_scope end end |