Module: Torch

Defined in:
lib/torch/optim/sgd.rb,
lib/torch.rb,
lib/torch/nn/rnn.rb,
lib/torch/random.rb,
lib/torch/tensor.rb,
lib/torch/nn/init.rb,
lib/torch/nn/loss.rb,
lib/torch/nn/relu.rb,
lib/torch/version.rb,
lib/torch/nn/prelu.rb,
lib/torch/inspector.rb,
lib/torch/nn/conv2d.rb,
lib/torch/nn/convnd.rb,
lib/torch/nn/linear.rb,
lib/torch/nn/module.rb,
lib/torch/nn/dropout.rb,
lib/torch/nn/l1_loss.rb,
lib/torch/nn/sigmoid.rb,
lib/torch/nn/softmax.rb,
lib/torch/nn/softmin.rb,
lib/torch/optim/adam.rb,
lib/torch/optim/asgd.rb,
lib/torch/nn/bce_loss.rb,
lib/torch/nn/bilinear.rb,
lib/torch/nn/ctc_loss.rb,
lib/torch/nn/identity.rb,
lib/torch/nn/mse_loss.rb,
lib/torch/nn/nll_loss.rb,
lib/torch/nn/rnn_base.rb,
lib/torch/nn/softplus.rb,
lib/torch/optim/adamw.rb,
lib/torch/optim/rprop.rb,
lib/torch/nn/dropout2d.rb,
lib/torch/nn/dropout3d.rb,
lib/torch/nn/dropoutnd.rb,
lib/torch/nn/embedding.rb,
lib/torch/nn/parameter.rb,
lib/torch/nn/softmax2d.rb,
lib/torch/optim/adamax.rb,
lib/torch/native/parser.rb,
lib/torch/nn/avg_pool2d.rb,
lib/torch/nn/avg_poolnd.rb,
lib/torch/nn/functional.rb,
lib/torch/nn/leaky_relu.rb,
lib/torch/nn/max_pool2d.rb,
lib/torch/nn/max_poolnd.rb,
lib/torch/nn/sequential.rb,
lib/torch/optim/adagrad.rb,
lib/torch/optim/rmsprop.rb,
lib/torch/nn/kl_div_loss.rb,
lib/torch/nn/log_softmax.rb,
lib/torch/optim/adadelta.rb,
lib/torch/native/function.rb,
lib/torch/optim/optimizer.rb,
lib/torch/native/generator.rb,
lib/torch/nn/alpha_dropout.rb,
lib/torch/nn/embedding_bag.rb,
lib/torch/nn/weighted_loss.rb,
lib/torch/native/dispatcher.rb,
lib/torch/nn/smooth_l1_loss.rb,
lib/torch/nn/poisson_nll_loss.rb,
lib/torch/nn/soft_margin_loss.rb,
lib/torch/nn/cosine_similarity.rb,
lib/torch/nn/multi_margin_loss.rb,
lib/torch/nn/pairwise_distance.rb,
lib/torch/nn/cross_entropy_loss.rb,
lib/torch/nn/margin_ranking_loss.rb,
lib/torch/nn/triplet_margin_loss.rb,
lib/torch/utils/data/data_loader.rb,
lib/torch/nn/bce_with_logits_loss.rb,
lib/torch/nn/hinge_embedding_loss.rb,
lib/torch/nn/cosine_embedding_loss.rb,
lib/torch/nn/feature_alpha_dropout.rb,
lib/torch/utils/data/tensor_dataset.rb,
lib/torch/nn/multi_label_margin_loss.rb,
lib/torch/optim/lr_scheduler/step_lr.rb,
lib/torch/nn/multi_label_soft_margin_loss.rb,
lib/torch/optim/lr_scheduler/lr_scheduler.rb

Overview

We use a generic interface for methods (*args, **options) and this class to determine the C++ method to call

This is needed since LibTorch uses function overloading, which isn’t available in Ruby or Python

PyTorch uses this approach, but the parser/dispatcher is written in C++

We could generate Ruby methods directly, but an advantage of this approach is arguments and keyword arguments can be used interchangably like in Python, making it easier to port code

Defined Under Namespace

Modules: Inspector, NN, Native, Optim, Random, Utils Classes: Error, NotImplementedYet, Tensor

Constant Summary collapse

DTYPE_TO_ENUM =
{
  uint8: 0,
  int8: 1,
  short: 2,
  int16: 2,
  int: 3,
  int32: 3,
  long: 4,
  int64: 4,
  half: 5,
  float16: 5,
  float: 6,
  float32: 6,
  double: 7,
  float64: 7,
  complex_half: 8,
  complex_float: 9,
  complex_double: 10,
  bool: 11,
  qint8: 12,
  quint8: 13,
  qint32: 14,
  bfloat16: 15
}
ENUM_TO_DTYPE =
DTYPE_TO_ENUM.map(&:reverse).to_h
FloatTensor =
_make_tensor_class(:float32)
DoubleTensor =
_make_tensor_class(:float64)
HalfTensor =
_make_tensor_class(:float16)
ByteTensor =
_make_tensor_class(:uint8)
CharTensor =
_make_tensor_class(:int8)
ShortTensor =
_make_tensor_class(:int16)
IntTensor =
_make_tensor_class(:int32)
LongTensor =
_make_tensor_class(:int64)
BoolTensor =
_make_tensor_class(:bool)
VERSION =
"0.1.5"

Class Method Summary collapse

Class Method Details

._dtype_to_numoObject

private use method for cases when Numo not available or available after Torch loaded

Raises:



224
225
226
227
228
229
230
231
232
233
234
235
236
# File 'lib/torch.rb', line 224

def _dtype_to_numo
  raise Error, "Numo not found" unless defined?(Numo::NArray)

  {
    uint8: Numo::UInt8,
    int8: Numo::Int8,
    int16: Numo::Int16,
    int32: Numo::Int32,
    int64: Numo::Int64,
    float32: Numo::SFloat,
    float64: Numo::DFloat
  }
end

._make_tensor_class(dtype, cuda = false) ⇒ Object



154
155
156
157
158
159
160
161
162
163
164
165
166
167
# File 'lib/torch.rb', line 154

def self._make_tensor_class(dtype, cuda = false)
  cls = Class.new
  device = cuda ? "cuda" : "cpu"
  cls.define_singleton_method("new") do |*args|
    if args.size == 1 && args.first.is_a?(Tensor)
      args.first.send(dtype).to(device)
    elsif args.size == 1 && args.first.is_a?(Array)
      Torch.tensor(args.first, dtype: dtype, device: device)
    else
      Torch.empty(*args, dtype: dtype, device: device)
    end
  end
  cls
end

.arange(start, finish = nil, step = 1, **options) ⇒ Object

— begin tensor creation: pytorch.org/cppdocs/notes/tensor_creation.html



254
255
256
257
258
259
260
261
# File 'lib/torch.rb', line 254

def arange(start, finish = nil, step = 1, **options)
  # ruby doesn't support start = 0, finish, step = 1, ...
  if finish.nil?
    finish = start
    start = 0
  end
  _arange(start, finish, step, tensor_options(**options))
end

.device(str) ⇒ Object



248
249
250
# File 'lib/torch.rb', line 248

def device(str)
  Device.new(str)
end

.empty(*size, **options) ⇒ Object



263
264
265
# File 'lib/torch.rb', line 263

def empty(*size, **options)
  _empty(tensor_size(size), tensor_options(**options))
end

.empty_like(input, **options) ⇒ Object



338
339
340
# File 'lib/torch.rb', line 338

def empty_like(input, **options)
  empty(input.size, like_options(input, options))
end

.eye(n, m = nil, **options) ⇒ Object



267
268
269
# File 'lib/torch.rb', line 267

def eye(n, m = nil, **options)
  _eye(n, m || n, tensor_options(**options))
end

.from_numo(ndarray) ⇒ Object

Raises:



207
208
209
210
211
212
213
214
215
216
217
218
219
# File 'lib/torch.rb', line 207

def from_numo(ndarray)
  dtype = _dtype_to_numo.find { |k, v| ndarray.is_a?(v) }
  raise Error, "Cannot convert #{ndarray.class.name} to tensor" unless dtype
  options = tensor_options(device: "cpu", dtype: dtype[0])
  # TODO pass pointer to array instead of creating string
  str = ndarray.to_string
  tensor = _from_blob(str, ndarray.shape, options)
  # from_blob does not own the data, so we need to keep
  # a reference to it for duration of tensor
  # can remove when passing pointer directly
  tensor.instance_variable_set("@_numo_str", str)
  tensor
end

.full(size, fill_value, **options) ⇒ Object



271
272
273
# File 'lib/torch.rb', line 271

def full(size, fill_value, **options)
  _full(size, fill_value, tensor_options(**options))
end

.full_like(input, fill_value, **options) ⇒ Object



342
343
344
# File 'lib/torch.rb', line 342

def full_like(input, fill_value, **options)
  full(input.size, fill_value, like_options(input, options))
end

.linspace(start, finish, steps = 100, **options) ⇒ Object



275
276
277
# File 'lib/torch.rb', line 275

def linspace(start, finish, steps = 100, **options)
  _linspace(start, finish, steps, tensor_options(**options))
end

.log_softmax(input, dim) ⇒ Object

TODO make dim keyword argument



401
402
403
# File 'lib/torch.rb', line 401

def log_softmax(input, dim)
  _log_softmax(input, dim)
end

.logspace(start, finish, steps = 100, base = 10.0, **options) ⇒ Object



279
280
281
# File 'lib/torch.rb', line 279

def logspace(start, finish, steps = 100, base = 10.0, **options)
  _logspace(start, finish, steps, base, tensor_options(**options))
end

.max(input, dim = nil, keepdim: false, out: nil) ⇒ Object



391
392
393
394
395
396
397
398
# File 'lib/torch.rb', line 391

def max(input, dim = nil, keepdim: false, out: nil)
  if dim
    raise NotImplementedYet unless out
    _max_out(out[0], out[1], input, dim, keepdim)
  else
    _max(input)
  end
end

.mean(input, dim = nil, keepdim: false) ⇒ Object

TODO support out



370
371
372
373
374
375
376
# File 'lib/torch.rb', line 370

def mean(input, dim = nil, keepdim: false)
  if dim
    _mean_dim(input, dim, keepdim)
  else
    _mean(input)
  end
end

.no_gradObject



238
239
240
241
242
243
244
245
246
# File 'lib/torch.rb', line 238

def no_grad
  previous_value = grad_enabled?
  begin
    _set_grad_enabled(false)
    yield
  ensure
    _set_grad_enabled(previous_value)
  end
end

.ones(*size, **options) ⇒ Object



283
284
285
# File 'lib/torch.rb', line 283

def ones(*size, **options)
  _ones(tensor_size(size), tensor_options(**options))
end

.ones_like(input, **options) ⇒ Object

— begin like —



334
335
336
# File 'lib/torch.rb', line 334

def ones_like(input, **options)
  ones(input.size, like_options(input, options))
end

.rand(*size, **options) ⇒ Object



287
288
289
# File 'lib/torch.rb', line 287

def rand(*size, **options)
  _rand(tensor_size(size), tensor_options(**options))
end

.rand_like(input, **options) ⇒ Object



346
347
348
# File 'lib/torch.rb', line 346

def rand_like(input, **options)
  rand(input.size, like_options(input, options))
end

.randint(low = 0, high, size, **options) ⇒ Object



291
292
293
# File 'lib/torch.rb', line 291

def randint(low = 0, high, size, **options)
  _randint(low, high, size, tensor_options(**options))
end

.randint_like(input, low, high = nil, **options) ⇒ Object



350
351
352
353
354
355
356
357
# File 'lib/torch.rb', line 350

def randint_like(input, low, high = nil, **options)
  # ruby doesn't support input, low = 0, high, ...
  if high.nil?
    high = low
    low = 0
  end
  randint(low, high, input.size, like_options(input, options))
end

.randn(*size, **options) ⇒ Object



295
296
297
# File 'lib/torch.rb', line 295

def randn(*size, **options)
  _randn(tensor_size(size), tensor_options(**options))
end

.randn_like(input, **options) ⇒ Object



359
360
361
# File 'lib/torch.rb', line 359

def randn_like(input, **options)
  randn(input.size, like_options(input, options))
end

.randperm(n, **options) ⇒ Object



299
300
301
# File 'lib/torch.rb', line 299

def randperm(n, **options)
  _randperm(n, tensor_options(**options))
end

.softmax(input, dim: nil) ⇒ Object



405
406
407
# File 'lib/torch.rb', line 405

def softmax(input, dim: nil)
  _softmax(input, dim)
end

.sum(input, dim = nil, keepdim: false) ⇒ Object

TODO support dtype



379
380
381
382
383
384
385
# File 'lib/torch.rb', line 379

def sum(input, dim = nil, keepdim: false)
  if dim
    _sum_dim(input, dim, keepdim)
  else
    _sum(input)
  end
end

.tensor(data, **options) ⇒ Object



307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
# File 'lib/torch.rb', line 307

def tensor(data, **options)
  size = []
  if data.respond_to?(:to_a)
    data = data.to_a
    d = data
    while d.is_a?(Array)
      size << d.size
      d = d.first
    end
    data = data.flatten
  else
    data = [data].compact
  end

  if options[:dtype].nil?
    if data.all? { |v| v.is_a?(Integer) }
      options[:dtype] = :int64
    elsif data.all? { |v| v == true || v == false }
      options[:dtype] = :bool
    end
  end

  _tensor(data, size, tensor_options(**options))
end

.tensor?(obj) ⇒ Boolean

Returns:

  • (Boolean)


203
204
205
# File 'lib/torch.rb', line 203

def tensor?(obj)
  obj.is_a?(Tensor)
end

.topk(input, k) ⇒ Object



387
388
389
# File 'lib/torch.rb', line 387

def topk(input, k)
  _topk(input, k)
end

.zeros(*size, **options) ⇒ Object



303
304
305
# File 'lib/torch.rb', line 303

def zeros(*size, **options)
  _zeros(tensor_size(size), tensor_options(**options))
end

.zeros_like(input, **options) ⇒ Object



363
364
365
# File 'lib/torch.rb', line 363

def zeros_like(input, **options)
  zeros(input.size, like_options(input, options))
end