Class: TensorStream::NN
- Inherits:
-
Object
- Object
- TensorStream::NN
- Extended by:
- EmbeddingLookup, Maths::MathFunctions, OpHelper
- Defined in:
- lib/tensor_stream/nn/nn_ops.rb
Overview
High level machine learning functions
Class Method Summary collapse
-
.bias_add(value, bias, data_format: nil, name: nil) ⇒ Object
Adds bias to value.
- .conv2d(input, filter, strides, padding, name: nil) ⇒ Object
-
.dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil) ⇒ Object
Computes dropout.
-
.log_softmax(logits, axis: -1,, name: nil) ⇒ Object
Computes log softmax activations.
- .relu(features, name: nil) ⇒ Object
- .relu6(features, name: nil) ⇒ Object
- .sigmoid(input, name: nil) ⇒ Object
- .sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object
- .softmax(logits, axis: nil, name: nil) ⇒ Object
- .softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object
- .softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil) ⇒ Object
- .sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object
Methods included from OpHelper
_op, cons, format_source, fp_type?, i_cons, i_op, i_var, int_type?, reduced_shape, shape_eval, shape_full_specified, shapes_fully_specified_and_equal
Methods included from EmbeddingLookup
_clip, _embedding_lookup_and_transform, _rank, embedding_lookup
Methods included from PyPorts
Methods included from Maths::MathFunctions
Class Method Details
.bias_add(value, bias, data_format: nil, name: nil) ⇒ Object
Adds bias to value.
This is a narrow version of tf add where the bias is restructed to 1-D only
148 149 150 151 152 153 154 155 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 148 def bias_add(value, bias, data_format: nil, name: nil) value = TensorStream.convert_to_tensor(value, name: "input") bias = TensorStream.convert_to_tensor(bias, dtype: value.dtype, name: "bias") raise TensorStreamError, "value must be at least rank 2" if value.shape.known? && value.shape.ndims < 2 _op(:bias_add, value, bias, data_format: data_format, name: name) end |
.conv2d(input, filter, strides, padding, name: nil) ⇒ Object
140 141 142 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 140 def conv2d(input, filter, strides, padding, name: nil) _op(:conv2d, input, filter, strides: strides, padding: padding, name: name) end |
.dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil) ⇒ Object
Computes dropout.
With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0. The scaling is so that the expected sum is unchanged.
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 29 def dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil) TensorStream.name_scope(name, "dropout", values: [x]) do x = TensorStream.convert_to_tensor(x, name: "x") raise TensorStream::ValueError, "x has to be a floating point tensor since it's going to be scaled. Got a #{x.data_type} tensor instead." unless fp_type?(x.data_type) raise TensorStream::ValueError, "keep_prob must be a scalar tensor or a float in the range (0, 1], got #{keep_prob}" if keep_prob.is_a?(Float) && !(keep_prob > 0 && keep_prob <= 1) return x if keep_prob.is_a?(Float) && keep_prob.to_f == 1.0 keep_prob = TensorStream.convert_to_tensor(keep_prob, dtype: x.dtype, name: "keep_prob") return x if keep_prob.value == 1.0 noise_shape = if noise_shape.nil? TensorStream.shape(x) else noise_shape end random_tensor = keep_prob random_tensor += TensorStream.random_uniform(noise_shape, seed: seed, dtype: x.dtype) binary_tensor = TensorStream.floor(random_tensor) TensorStream.div(x, keep_prob) * binary_tensor end end |
.log_softmax(logits, axis: -1,, name: nil) ⇒ Object
Computes log softmax activations.
121 122 123 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 121 def log_softmax(logits, axis: -1, name: nil) _op(:log_softmax, logits, axis: axis, name: name) end |
.relu(features, name: nil) ⇒ Object
14 15 16 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 14 def relu(features, name: nil) TensorStream.max(features, 0, name: "relu_#{name}") end |
.relu6(features, name: nil) ⇒ Object
18 19 20 21 22 23 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 18 def relu6(features, name: nil) TensorStream.name_scope(name, "Relu6", values: [features]) do features = TensorStream.convert_to_tensor(features, name: "features") _op(:relu6, features, name: name) end end |
.sigmoid(input, name: nil) ⇒ Object
54 55 56 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 54 def sigmoid(input, name: nil) TensorStream.sigmoid(input, name: name) end |
.sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object
125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 125 def sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) TensorStream.name_scope(name, default: "logistic_loss", values: [logits, labels]) do |_name| tf = TensorStream logits = tf.convert_to_tensor(logits, name: "logits") labels = tf.convert_to_tensor(labels, name: "labels") zeros = tf.zeros_like(logits, dtype: logits.dtype) cond = (logits >= zeros) relu_logits = tf.where(cond, logits, zeros) neg_abs_logits = tf.where(cond, -logits, logits) tf.add(relu_logits - logits * labels, tf.log1p(tf.exp(neg_abs_logits)), name: name) end end |
.softmax(logits, axis: nil, name: nil) ⇒ Object
10 11 12 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 10 def softmax(logits, axis: nil, name: nil) _op(:softmax, logits, axis: axis, name: name) end |
.softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object
58 59 60 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 58 def softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) softmax_cross_entropy_with_logits_v2(labels: labels, logits: logits, name: name) end |
.softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil) ⇒ Object
62 63 64 65 66 67 68 69 70 71 72 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 62 def softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil) TensorStream.name_scope(name, default: "softmax_cross_entropy_with_logits", values: [logits, labels]) do ts = TensorStream logits = ts.convert_to_tensor(logits, name: "logits") labels = ts.convert_to_tensor(labels, name: "labels") labels = ts.cast(labels, logits.dtype) output = _op(:softmax_cross_entropy_with_logits_v2, logits, labels) output[0] end end |
.sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
# File 'lib/tensor_stream/nn/nn_ops.rb', line 74 def sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) TensorStream.name_scope(name, default: "SparseSoftmaxCrossEntropyWithLogits", values: [logits, labels]) do tf = TensorStream labels = tf.convert_to_tensor(labels) logits = tf.convert_to_tensor(logits) precise_logits = logits.data_type == :float16 ? tf.cast(logits, :float32) : logits labels_static_shape = labels.shape labels_shape = tf.shape(labels) static_shapes_fully_defined = labels_static_shape.known? && logits.shape.known? raise TensorStream::ValueError, "Logits cannot be scalars - received shape #{logits.shape.shape}." if logits.shape.known? && logits.shape.scalar? if logits.shape.known? && (labels_static_shape.known? && labels_static_shape.ndims != logits.shape.ndims - 1) raise TensorStream::ValueError, "Rank mismatch: Rank of labels (received #{labels_static_shape.ndims}) " \ "should equal rank of logits minus 1 (received #{logits.shape.ndims})." end if logits.shape.ndims == 2 cost = _op(:sparse_softmax_cross_entropy_with_logits, precise_logits, labels, name: name) if logits.data_type == :float16 return tf.cast(cost[0], :float16) else return cost[0] end end shape_checks = [] shape_checks << tf.assert_equal(tf.rank(labels), tf.rank(logits) - 1) unless static_shapes_fully_defined tf.control_dependencies(shape_checks) do num_classes = tf.shape(logits)[tf.rank(logits) - 1] precise_logits = tf.reshape(precise_logits, [-1, num_classes]) labels = tf.reshape(labels, [-1]) cost = _op(:sparse_softmax_cross_entropy_with_logits, precise_logits, labels, name: name) cost = tf.reshape(cost[0], labels_shape) if logits.data_type == :float16 tf.cast(cost, :float16) else cost end end end end |