Class: Torch::Inspector::Formatter
- Inherits:
-
Object
- Object
- Torch::Inspector::Formatter
- Defined in:
- lib/torch/inspector.rb
Instance Method Summary collapse
- #format(value) ⇒ Object
-
#initialize(tensor) ⇒ Formatter
constructor
A new instance of Formatter.
- #tensor_totype(t) ⇒ Object
- #width ⇒ Object
Constructor Details
#initialize(tensor) ⇒ Formatter
Returns a new instance of Formatter.
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
# File 'lib/torch/inspector.rb', line 13 def initialize(tensor) @floating_dtype = tensor.floating_point? @complex_dtype = tensor.complex? @int_mode = true @sci_mode = false @max_width = 1 tensor_view = Torch.no_grad { tensor.reshape(-1) } if !@floating_dtype tensor_view.each do |value| value_str = value.item.to_s @max_width = [@max_width, value_str.length].max end else nonzero_finite_vals = Torch.masked_select(tensor_view, Torch.isfinite(tensor_view) & tensor_view.ne(0)) # no valid number, do nothing return if nonzero_finite_vals.numel == 0 # Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU. nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs) nonzero_finite_min = tensor_totype(nonzero_finite_abs.min) nonzero_finite_max = tensor_totype(nonzero_finite_abs.max) nonzero_finite_vals.each do |value| if value.item != value.item.ceil @int_mode = false break end end if @int_mode # in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites # to indicate that the tensor is of floating type. add 1 to the len to account for this. if nonzero_finite_max / nonzero_finite_min > 1000.0 || nonzero_finite_max > 1.0e8 @sci_mode = true nonzero_finite_vals.each do |value| value_str = "%.#{PRINT_OPTS[:precision]}e" % value.item @max_width = [@max_width, value_str.length].max end else nonzero_finite_vals.each do |value| value_str = "%.0f" % value.item @max_width = [@max_width, value_str.length + 1].max end end else # Check if scientific representation should be used. if nonzero_finite_max / nonzero_finite_min > 1000.0 || nonzero_finite_max > 1.0e8 || nonzero_finite_min < 1.0e-4 @sci_mode = true nonzero_finite_vals.each do |value| value_str = "%.#{PRINT_OPTS[:precision]}e" % value.item @max_width = [@max_width, value_str.length].max end else nonzero_finite_vals.each do |value| value_str = "%.#{PRINT_OPTS[:precision]}f" % value.item @max_width = [@max_width, value_str.length].max end end end end @sci_mode = PRINT_OPTS[:sci_mode] unless PRINT_OPTS[:sci_mode].nil? end |
Instance Method Details
#format(value) ⇒ Object
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
# File 'lib/torch/inspector.rb', line 84 def format(value) value = value.item if @floating_dtype if @sci_mode ret = "%#{@max_width}.#{PRINT_OPTS[:precision]}e" % value elsif @int_mode ret = String.new("%.0f" % value) unless value.infinite? || value.nan? ret += "." end else ret = "%.#{PRINT_OPTS[:precision]}f" % value end elsif @complex_dtype # TODO use float formatter for each part precision = PRINT_OPTS[:precision] imag = value.imag sign = imag >= 0 ? "+" : "-" ret = "%.#{precision}f#{sign}%.#{precision}fi" % [value.real, value.imag.abs] else ret = value.to_s end # Ruby throws error when negative, Python doesn't " " * [@max_width - ret.size, 0].max + ret end |
#tensor_totype(t) ⇒ Object
111 112 113 114 |
# File 'lib/torch/inspector.rb', line 111 def tensor_totype(t) dtype = t.mps? ? :float : :double t.to(dtype: dtype) end |
#width ⇒ Object
80 81 82 |
# File 'lib/torch/inspector.rb', line 80 def width @max_width end |