Module: Torch

Defined in:
lib/torch/inspector.rb,
lib/torch.rb,
lib/torch/hub.rb,
lib/torch/device.rb,
lib/torch/nn/elu.rb,
lib/torch/nn/gru.rb,
lib/torch/nn/rnn.rb,
lib/torch/tensor.rb,
lib/torch/nn/fold.rb,
lib/torch/nn/gelu.rb,
lib/torch/nn/init.rb,
lib/torch/nn/loss.rb,
lib/torch/nn/lstm.rb,
lib/torch/nn/relu.rb,
lib/torch/nn/tanh.rb,
lib/torch/version.rb,
lib/torch/nn/prelu.rb,
lib/torch/nn/utils.rb,
lib/torch/nn/conv1d.rb,
lib/torch/nn/conv2d.rb,
lib/torch/nn/conv3d.rb,
lib/torch/nn/convnd.rb,
lib/torch/nn/linear.rb,
lib/torch/nn/module.rb,
lib/torch/nn/unfold.rb,
lib/torch/optim/sgd.rb,
lib/torch/nn/dropout.rb,
lib/torch/nn/l1_loss.rb,
lib/torch/nn/sigmoid.rb,
lib/torch/nn/softmax.rb,
lib/torch/nn/softmin.rb,
lib/torch/optim/adam.rb,
lib/torch/optim/asgd.rb,
lib/torch/utils/data.rb,
lib/torch/nn/bce_loss.rb,
lib/torch/nn/bilinear.rb,
lib/torch/nn/ctc_loss.rb,
lib/torch/nn/identity.rb,
lib/torch/nn/mse_loss.rb,
lib/torch/nn/nll_loss.rb,
lib/torch/nn/rnn_base.rb,
lib/torch/nn/softplus.rb,
lib/torch/nn/softsign.rb,
lib/torch/nn/upsample.rb,
lib/torch/optim/adamw.rb,
lib/torch/optim/rprop.rb,
lib/torch/nn/dropout2d.rb,
lib/torch/nn/dropout3d.rb,
lib/torch/nn/dropoutnd.rb,
lib/torch/nn/embedding.rb,
lib/torch/nn/lp_pool1d.rb,
lib/torch/nn/lp_pool2d.rb,
lib/torch/nn/lp_poolnd.rb,
lib/torch/nn/parameter.rb,
lib/torch/nn/softmax2d.rb,
lib/torch/optim/adamax.rb,
lib/torch/nn/avg_pool1d.rb,
lib/torch/nn/avg_pool2d.rb,
lib/torch/nn/avg_pool3d.rb,
lib/torch/nn/avg_poolnd.rb,
lib/torch/nn/batch_norm.rb,
lib/torch/nn/functional.rb,
lib/torch/nn/group_norm.rb,
lib/torch/nn/hardshrink.rb,
lib/torch/nn/layer_norm.rb,
lib/torch/nn/leaky_relu.rb,
lib/torch/nn/max_pool1d.rb,
lib/torch/nn/max_pool2d.rb,
lib/torch/nn/max_pool3d.rb,
lib/torch/nn/max_poolnd.rb,
lib/torch/nn/sequential.rb,
lib/torch/nn/softshrink.rb,
lib/torch/nn/tanhshrink.rb,
lib/torch/nn/zero_pad2d.rb,
lib/torch/optim/adagrad.rb,
lib/torch/optim/rmsprop.rb,
lib/torch/nn/kl_div_loss.rb,
lib/torch/nn/log_sigmoid.rb,
lib/torch/nn/log_softmax.rb,
lib/torch/nn/module_list.rb,
lib/torch/nn/transformer.rb,
lib/torch/optim/adadelta.rb,
lib/torch/nn/batch_norm1d.rb,
lib/torch/nn/batch_norm2d.rb,
lib/torch/nn/batch_norm3d.rb,
lib/torch/nn/max_unpool1d.rb,
lib/torch/nn/max_unpool2d.rb,
lib/torch/nn/max_unpool3d.rb,
lib/torch/nn/max_unpoolnd.rb,
lib/torch/optim/optimizer.rb,
lib/torch/nn/alpha_dropout.rb,
lib/torch/nn/embedding_bag.rb,
lib/torch/nn/instance_norm.rb,
lib/torch/nn/weighted_loss.rb,
lib/torch/nn/constant_pad1d.rb,
lib/torch/nn/constant_pad2d.rb,
lib/torch/nn/constant_pad3d.rb,
lib/torch/nn/constant_padnd.rb,
lib/torch/nn/parameter_list.rb,
lib/torch/nn/smooth_l1_loss.rb,
lib/torch/utils/data/subset.rb,
lib/torch/nn/instance_norm1d.rb,
lib/torch/nn/instance_norm2d.rb,
lib/torch/nn/instance_norm3d.rb,
lib/torch/utils/data/dataset.rb,
lib/torch/nn/poisson_nll_loss.rb,
lib/torch/nn/reflection_pad1d.rb,
lib/torch/nn/reflection_pad2d.rb,
lib/torch/nn/reflection_padnd.rb,
lib/torch/nn/soft_margin_loss.rb,
lib/torch/nn/cosine_similarity.rb,
lib/torch/nn/multi_margin_loss.rb,
lib/torch/nn/pairwise_distance.rb,
lib/torch/nn/replication_pad1d.rb,
lib/torch/nn/replication_pad2d.rb,
lib/torch/nn/replication_pad3d.rb,
lib/torch/nn/replication_padnd.rb,
lib/torch/nn/cross_entropy_loss.rb,
lib/torch/nn/adaptive_avg_pool1d.rb,
lib/torch/nn/adaptive_avg_pool2d.rb,
lib/torch/nn/adaptive_avg_pool3d.rb,
lib/torch/nn/adaptive_avg_poolnd.rb,
lib/torch/nn/adaptive_max_pool1d.rb,
lib/torch/nn/adaptive_max_pool2d.rb,
lib/torch/nn/adaptive_max_pool3d.rb,
lib/torch/nn/adaptive_max_poolnd.rb,
lib/torch/nn/local_response_norm.rb,
lib/torch/nn/margin_ranking_loss.rb,
lib/torch/nn/multihead_attention.rb,
lib/torch/nn/transformer_decoder.rb,
lib/torch/nn/transformer_encoder.rb,
lib/torch/nn/triplet_margin_loss.rb,
lib/torch/utils/data/data_loader.rb,
lib/torch/nn/bce_with_logits_loss.rb,
lib/torch/nn/functional_attention.rb,
lib/torch/nn/hinge_embedding_loss.rb,
lib/torch/nn/cosine_embedding_loss.rb,
lib/torch/nn/feature_alpha_dropout.rb,
lib/torch/utils/data/tensor_dataset.rb,
lib/torch/nn/multi_label_margin_loss.rb,
lib/torch/optim/lr_scheduler/step_lr.rb,
lib/torch/utils/data/iterable_dataset.rb,
lib/torch/nn/transformer_decoder_layer.rb,
lib/torch/nn/transformer_encoder_layer.rb,
lib/torch/optim/lr_scheduler/lambda_lr.rb,
lib/torch/nn/multi_label_soft_margin_loss.rb,
lib/torch/optim/lr_scheduler/lr_scheduler.rb,
lib/torch/optim/lr_scheduler/multi_step_lr.rb,
lib/torch/optim/lr_scheduler/exponential_lr.rb,
lib/torch/optim/lr_scheduler/multiplicative_lr.rb,
lib/torch/utils/data/data_pipes/iter_data_pipe.rb,
lib/torch/optim/lr_scheduler/cosine_annealing_lr.rb,
lib/torch/utils/data/data_pipes/iter/file_lister.rb,
lib/torch/utils/data/data_pipes/iter/file_opener.rb,
lib/torch/utils/data/data_pipes/iter/stream_wrapper.rb,
lib/torch/utils/data/data_pipes/filter_iter_data_pipe.rb,
lib/torch/utils/data/data_pipes/iter/iterable_wrapper.rb

Overview

Defined Under Namespace

Modules: Autograd, Hub, Inspector, NN, Optim, Utils Classes: ByteStorage, Device, Error, NotImplementedYet, Tensor

Constant Summary collapse

DTYPE_TO_ENUM =
{
  uint8: 0,
  int8: 1,
  short: 2,
  int16: 2,
  int: 3,
  int32: 3,
  long: 4,
  int64: 4,
  half: 5,
  float16: 5,
  float: 6,
  float32: 6,
  double: 7,
  float64: 7,
  complex_half: 8,
  complex32: 8,
  complex_float: 9,
  complex64: 9,
  complex_double: 10,
  cdouble: 10,
  complex128: 10,
  bool: 11,
  qint8: 12,
  quint8: 13,
  qint32: 14,
  bfloat16: 15
}
ENUM_TO_DTYPE =
DTYPE_TO_ENUM.map(&:reverse).to_h
TENSOR_TYPE_CLASSES =
[]
DTYPE_TO_CLASS =
{
  float32: "FloatTensor",
  float64: "DoubleTensor",
  float16: "HalfTensor",
  uint8: "ByteTensor",
  int8: "CharTensor",
  int16: "ShortTensor",
  int32: "IntTensor",
  int64: "LongTensor",
  bool: "BoolTensor"
}
VERSION =
"0.19.1"

Class Method Summary collapse

Class Method Details

._dtype_to_numoObject

private use method for cases when Numo not available or available after Torch loaded

Raises:


352
353
354
355
356
357
358
359
360
361
362
363
364
# File 'lib/torch.rb', line 352

def _dtype_to_numo
  raise Error, "Numo not found" unless defined?(Numo::NArray)

  {
    uint8: Numo::UInt8,
    int8: Numo::Int8,
    int16: Numo::Int16,
    int32: Numo::Int32,
    int64: Numo::Int64,
    float32: Numo::SFloat,
    float64: Numo::DFloat
  }
end

._from_blob_ref(data, size, options) ⇒ Object

private TODO use keepAlive in Rice (currently segfaults)


340
341
342
343
344
345
346
347
# File 'lib/torch.rb', line 340

def _from_blob_ref(data, size, options)
  tensor = _from_blob(data, size, options)
  # from_blob does not own the data, so we need to keep
  # a reference to it for duration of tensor
  # can remove when passing pointer directly
  tensor.instance_variable_set("@_numo_data", data)
  tensor
end

._make_tensor_class(dtype, cuda = false) ⇒ Object


274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
# File 'lib/torch.rb', line 274

def self._make_tensor_class(dtype, cuda = false)
  cls = Class.new
  device = cuda ? "cuda" : "cpu"
  cls.define_singleton_method("new") do |*args|
    if args.size == 1 && args.first.is_a?(Tensor)
      args.first.send(dtype).to(device)
    elsif args.size == 1 && args.first.is_a?(ByteStorage) && dtype == :uint8
      bytes = args.first.bytes
      Torch._from_blob_ref(bytes, [bytes.bytesize], TensorOptions.new.dtype(DTYPE_TO_ENUM[dtype]))
    elsif args.size == 1 && args.first.is_a?(Array)
      Torch.tensor(args.first, dtype: dtype, device: device)
    elsif args.size == 0
      Torch.empty(0, dtype: dtype, device: device)
    else
      Torch.empty(*args, dtype: dtype, device: device)
    end
  end
  TENSOR_TYPE_CLASSES << cls
  cls
end

.device(str) ⇒ Object


385
386
387
388
389
390
391
# File 'lib/torch.rb', line 385

def device(str)
  if str.is_a?(Device)
    str
  else
    Device.new(str)
  end
end

.enable_grad(&block) ⇒ Object


370
371
372
# File 'lib/torch.rb', line 370

def enable_grad(&block)
  grad_enabled(true, &block)
end

.from_numo(ndarray) ⇒ Object

Raises:


330
331
332
333
334
335
336
# File 'lib/torch.rb', line 330

def from_numo(ndarray)
  dtype = _dtype_to_numo.find { |k, v| ndarray.is_a?(v) }
  raise Error, "Cannot convert #{ndarray.class.name} to tensor" unless dtype
  options = tensor_options(device: "cpu", dtype: dtype[0])
  # TODO pass pointer to array instead of creating string
  _from_blob_ref(ndarray.to_string, ndarray.shape, options)
end

.grad_enabled(value) ⇒ Object Also known as: set_grad_enabled


374
375
376
377
378
379
380
381
382
# File 'lib/torch.rb', line 374

def grad_enabled(value)
  previous_value = grad_enabled?
  begin
    _set_grad_enabled(value)
    yield
  ensure
    _set_grad_enabled(previous_value)
  end
end

.load(filename) ⇒ Object


397
398
399
400
401
402
# File 'lib/torch.rb', line 397

def load(filename)
  # keep backwards compatibility
  File.open(filename, "rb") { |f| f.read(1) }

  to_ruby(_load(filename))
end

.no_grad(&block) ⇒ Object


366
367
368
# File 'lib/torch.rb', line 366

def no_grad(&block)
  grad_enabled(false, &block)
end

.save(obj, f) ⇒ Object


393
394
395
# File 'lib/torch.rb', line 393

def save(obj, f)
  File.binwrite(f, _save(to_ivalue(obj)))
end

.tensor(data, **options) ⇒ Object

Raises:


404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
# File 'lib/torch.rb', line 404

def tensor(data, **options)
  if options[:dtype].nil? && defined?(Numo::NArray) && data.is_a?(Numo::NArray)
    numo_to_dtype = _dtype_to_numo.map(&:reverse).to_h
    options[:dtype] = numo_to_dtype[data.class]
  end

  size = []
  if data.respond_to?(:to_a)
    data = data.to_a
    d = data
    while d.is_a?(Array)
      size << d.size
      d = d.first
    end
    data = data.flatten
  else
    data = [data].compact
  end

  if options[:dtype].nil?
    if data.all? { |v| v.is_a?(Integer) }
      options[:dtype] = :int64
    elsif data.all? { |v| v == true || v == false }
      options[:dtype] = :bool
    elsif data.any? { |v| v.is_a?(Complex) }
      options[:dtype] = :complex64
    end
  end

  # TODO check each dimensions for consistency in future
  raise Error, "Inconsistent dimensions" if data.size != size.inject(1, :*)

  # TOOD move to C++
  data = data.map { |v| v ? 1 : 0 } if options[:dtype] == :bool

  _tensor(data, size, tensor_options(**options))
end

.tensor?(obj) ⇒ Boolean

Returns:

  • (Boolean)

326
327
328
# File 'lib/torch.rb', line 326

def tensor?(obj)
  obj.is_a?(Tensor)
end