Class: DNN::Models::Model

Inherits:
Chain
  • Object
show all
Defined in:
lib/dnn/core/models.rb

Overview

This class deals with the model of the network.

Direct Known Subclasses

FixedModel, Sequential

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from Chain

#forward, #layers, #load_hash, #to_hash

Constructor Details

#initializeModel



125
126
127
128
129
130
131
132
133
# File 'lib/dnn/core/models.rb', line 125

def initialize
  super
  @optimizer = nil
  @loss_func = nil
  @built = false
  @loss_weights = nil
  @callbacks = []
  @last_log = {}
end

Instance Attribute Details

#last_logObject (readonly)

Returns the value of attribute last_log.



113
114
115
# File 'lib/dnn/core/models.rb', line 113

def last_log
  @last_log
end

#loss_weightsObject

Returns the value of attribute loss_weights.



112
113
114
# File 'lib/dnn/core/models.rb', line 112

def loss_weights
  @loss_weights
end

#optimizerObject

Returns the value of attribute optimizer.



111
112
113
# File 'lib/dnn/core/models.rb', line 111

def optimizer
  @optimizer
end

Class Method Details

.load(file_name) ⇒ DNN::Models::Model

Load marshal model.



118
119
120
121
122
123
# File 'lib/dnn/core/models.rb', line 118

def self.load(file_name)
  model = self.allocate
  loader = Loaders::MarshalLoader.new(model)
  loader.load(file_name)
  model
end

Instance Method Details

#add_callback(callback) ⇒ Object

Add callback function.



466
467
468
469
# File 'lib/dnn/core/models.rb', line 466

def add_callback(callback)
  callback.model = self
  @callbacks << callback
end

#built?Boolean



518
519
520
# File 'lib/dnn/core/models.rb', line 518

def built?
  @built
end

#call(input_tensors) ⇒ Object



135
136
137
138
139
# File 'lib/dnn/core/models.rb', line 135

def call(input_tensors)
  output_tensors = forward(input_tensors)
  @built = true unless @built
  output_tensors
end

#clean_layersObject

Clean all layers.



523
524
525
526
527
# File 'lib/dnn/core/models.rb', line 523

def clean_layers
  layers.each(&:clean)
  @loss_func.clean
  @layers_cache = nil
end

#clear_callbacksObject

Clear the callback function registered for each event.



472
473
474
# File 'lib/dnn/core/models.rb', line 472

def clear_callbacks
  @callbacks = []
end

#copyDNN::Models::Model



498
499
500
# File 'lib/dnn/core/models.rb', line 498

def copy
  Marshal.load(Marshal.dump(self))
end

#evaluate(x, y, batch_size: 100, accuracy: true) ⇒ Array

Evaluate model and get accuracy and loss of test data.



329
330
331
332
# File 'lib/dnn/core/models.rb', line 329

def evaluate(x, y, batch_size: 100, accuracy: true)
  check_xy_type(x, y)
  evaluate_by_iterator(Iterator.new(x, y, random: false), batch_size: batch_size, accuracy: accuracy)
end

#evaluate_by_iterator(test_iterator, batch_size: 100, accuracy: true) ⇒ Array

Evaluate model by iterator.



339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
# File 'lib/dnn/core/models.rb', line 339

def evaluate_by_iterator(test_iterator, batch_size: 100, accuracy: true)
  num_test_datas = test_iterator.num_datas
  batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
  if @loss_func.is_a?(Array)
    total_correct = Array.new(@loss_func.length, 0)
    sum_loss = Array.new(@loss_func.length, 0)
  else
    total_correct = 0
    sum_loss = 0
  end
  max_steps = (num_test_datas.to_f / batch_size).ceil
  test_iterator.foreach(batch_size) do |x_batch, y_batch|
    correct, loss_value = test_on_batch(x_batch, y_batch, accuracy: accuracy)
    if @loss_func.is_a?(Array)
      @loss_func.each_index do |i|
        total_correct[i] += correct[i] if accuracy
        sum_loss[i] += loss_value[i]
      end
    else
      total_correct += correct if accuracy
      sum_loss += loss_value
    end
  end
  acc = nil
  if @loss_func.is_a?(Array)
    mean_loss = Array.new(@loss_func.length, 0)
    acc = Array.new(@loss_func.length, 0) if accuracy
    @loss_func.each_index do |i|
      mean_loss[i] += sum_loss[i] / max_steps
      acc[i] += total_correct[i].to_f / num_test_datas if accuracy
    end
  else
    mean_loss = sum_loss / max_steps
    acc = total_correct.to_f / num_test_datas if accuracy
  end
  @last_log[:test_loss] = mean_loss
  @last_log[:test_accuracy] = acc
  [acc, mean_loss]
end

#get_all_params_dataArray

Get parameter data of all layers.



531
532
533
534
535
536
537
# File 'lib/dnn/core/models.rb', line 531

def get_all_params_data
  trainable_layers.map do |layer|
    layer.get_params.to_h do |key, param|
      [key, param.data]
    end
  end
end

#get_layer(name) ⇒ DNN::Layers::Layer

Get the layer that the model has.



511
512
513
514
515
# File 'lib/dnn/core/models.rb', line 511

def get_layer(name)
  layer = instance_variable_get("@#{name}")
  return layer if layer.is_a?(Layers::Layer) || layer.is_a?(Chain) || layer.is_a?(LayersList)
  nil
end

#load_params(file_name) ⇒ Object

Load marshal params.



478
479
480
481
# File 'lib/dnn/core/models.rb', line 478

def load_params(file_name)
  loader = Loaders::MarshalLoader.new(self)
  loader.load(file_name)
end

#loss_funcObject



157
158
159
# File 'lib/dnn/core/models.rb', line 157

def loss_func
  @loss_func
end

#loss_func=(lfs) ⇒ Object



161
162
163
164
165
166
167
168
169
170
171
172
173
# File 'lib/dnn/core/models.rb', line 161

def loss_func=(lfs)
  if lfs.is_a?(Array)
    @loss_func = []
    lfs.each.with_index do |lf, i|
      unless lf.is_a?(Losses::Loss)
        raise TypeError, "loss_func[#{i}]:#{lf.class} is not an instance of DNN::Losses::Loss class."
      end
      @loss_func << lf
    end
  else
    @loss_func = lfs
  end
end

#predict(x, use_loss_activation: true) ⇒ Object

Predict data.



430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
# File 'lib/dnn/core/models.rb', line 430

def predict(x, use_loss_activation: true)
  check_xy_type(x)
  DNN.learning_phase = false
  output_tensors = call(Tensor.convert(x))
  if output_tensors.is_a?(Array)
    lfs = @loss_func
    ary_output_tensors = output_tensors
  else
    lfs = [@loss_func]
    ary_output_tensors = [output_tensors]
  end
  ys = []
  ary_output_tensors.each.with_index do |out, i|
    y = out.data
    if use_loss_activation && lfs[i].class.respond_to?(:activation)
      y = lfs[i].class.activation(y)
    end
    ys << y
  end
  output_tensors.is_a?(Array) ? ys : ys.first
end

#predict1(x, use_loss_activation: true) ⇒ Object

Predict one data.



454
455
456
457
458
459
460
461
462
# File 'lib/dnn/core/models.rb', line 454

def predict1(x, use_loss_activation: true)
  check_xy_type(x)
  input = if x.is_a?(Array)
            x.map { |v| v.reshape(1, *v.shape) }
          else
            x.reshape(1, *x.shape)
          end
  predict(input, use_loss_activation: use_loss_activation)[0, false]
end

#save(file_name) ⇒ Object

Save the model in marshal format.



485
486
487
488
# File 'lib/dnn/core/models.rb', line 485

def save(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: true)
  saver.save(file_name)
end

#save_params(file_name) ⇒ Object

Save the params in marshal format.



492
493
494
495
# File 'lib/dnn/core/models.rb', line 492

def save_params(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: false)
  saver.save(file_name)
end

#set_all_params_data(params_data) ⇒ Object

Set parameter data of all layers.



541
542
543
544
545
546
547
# File 'lib/dnn/core/models.rb', line 541

def set_all_params_data(params_data)
  trainable_layers.each.with_index do |layer, i|
    params_data[i].each do |(key, data)|
      layer.get_params[key].data = data
    end
  end
end

#setup(optimizer, loss_func, loss_weights: nil) ⇒ Object

Set optimizer and loss_func to model.



145
146
147
148
149
150
151
152
153
154
155
# File 'lib/dnn/core/models.rb', line 145

def setup(optimizer, loss_func, loss_weights: nil)
  unless optimizer.is_a?(Optimizers::Optimizer)
    raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
  end
  unless loss_func.is_a?(Losses::Loss) || loss_func.is_a?(Array)
    raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss or Array class."
  end
  @optimizer = optimizer
  self.loss_func = loss_func
  @loss_weights = loss_weights
end

#test_on_batch(x, y, accuracy: true) ⇒ Array

Evaluate once.



384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
# File 'lib/dnn/core/models.rb', line 384

def test_on_batch(x, y, accuracy: true)
  call_callbacks(:before_test_on_batch)
  DNN.learning_phase = false
  output_tensors = call(Tensor.convert(x))
  correct = nil
  if output_tensors.is_a?(Array)
    correct = [] if accuracy
    loss_data = []
    output_tensors.each.with_index do |out, i|
      correct << accuracy(out.data, y[i]) if accuracy
      loss = @loss_func[i].(out, Tensor.convert(y[i]))
      loss_data << loss.data.to_f
    end
  else
    out = output_tensors
    correct = accuracy(out.data, y) if accuracy
    loss = @loss_func.(out, Tensor.convert(y))
    loss_data = loss.data.to_f
  end
  call_callbacks(:after_test_on_batch)
  [correct, loss_data]
end

#train(x, y, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true, accuracy: true) ⇒ Object Also known as: fit

Start training. Setup the model before use this method.



186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
# File 'lib/dnn/core/models.rb', line 186

def train(x, y, epochs,
          batch_size: 1,
          initial_epoch: 1,
          test: nil,
          verbose: true,
          accuracy: true)
  check_xy_type(x, y)
  train_iterator = Iterator.new(x, y)
  train_by_iterator(train_iterator, epochs,
                    batch_size: batch_size,
                    initial_epoch: initial_epoch,
                    test: test,
                    verbose: verbose,
                    accuracy: accuracy)
end

#train_by_iterator(train_iterator, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true, accuracy: true) ⇒ Object Also known as: fit_by_iterator

Start training by iterator. Setup the model before use this method.

Raises:



214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
# File 'lib/dnn/core/models.rb', line 214

def train_by_iterator(train_iterator, epochs,
                      batch_size: 1,
                      initial_epoch: 1,
                      test: nil,
                      verbose: true,
                      accuracy: true)
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func

  num_train_datas = train_iterator.num_datas
  num_train_datas = num_train_datas / batch_size * batch_size if train_iterator.last_round_down

  stopped = catch(:stop) do
    (initial_epoch..epochs).each do |epoch|
      @last_log[:epoch] = epoch
      call_callbacks(:before_epoch)
      puts "【 epoch #{epoch}/#{epochs} 】" if verbose

      train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
        train_step_met = train_step(x_batch, y_batch)
        num_trained_datas = (index + 1) * batch_size
        num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
        log = "\r"
        40.times do |i|
          if i < num_trained_datas * 40 / num_train_datas
            log << "="
          elsif i == num_trained_datas * 40 / num_train_datas
            log << ">"
          else
            log << "_"
          end
        end

        log << "  #{num_trained_datas}/#{num_train_datas} "
        log << metrics_to_str(train_step_met)
        print log if verbose
      end

      if test
        acc, loss = if test.is_a?(Array)
                      evaluate(test[0], test[1], batch_size: batch_size, accuracy: accuracy)
                    else
                      evaluate_by_iterator(test, batch_size: batch_size, accuracy: accuracy)
                    end
        if verbose
          metrics = if accuracy
                      { accuracy: acc, test_loss: loss }
                    else
                      { test_loss: loss }
                    end
          print "  " + metrics_to_str(metrics)
        end
      end
      puts "" if verbose
      call_callbacks(:after_epoch)
    end
    nil
  end

  if stopped
    puts "\n#{stopped}" if verbose
  end
end

#train_on_batch(x, y) ⇒ Float | Numo::SFloat

Training once. Setup the model before use this method.

Raises:



294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
# File 'lib/dnn/core/models.rb', line 294

def train_on_batch(x, y)
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func
  check_xy_type(x, y)
  call_callbacks(:before_train_on_batch)
  DNN.learning_phase = true
  output_tensors = call(Tensor.convert(x))
  if output_tensors.is_a?(Array)
    loss_data = []
    output_tensors.each.with_index do |out, i|
      loss_opt = {}
      loss_opt[:layers] = layers if i == 0
      loss_opt[:loss_weight] = @loss_weights[i] if @loss_weights
      loss = @loss_func[i].loss(out, Tensor.convert(y[i]), **loss_opt)
      loss_data << loss.data.to_f
      loss.link.backward(Xumo::SFloat.ones(y[i][0...1, false].shape[0], 1))
    end
  else
    out = output_tensors
    loss = @loss_func.loss(out, Tensor.convert(y), layers: layers)
    loss_data = loss.data.to_f
    loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
  end
  @optimizer.update(get_all_trainable_params)
  @last_log[:train_loss] = loss_data
  call_callbacks(:after_train_on_batch)
  loss_data
end

#trainable_layersArray

Get the all trainable layers.



504
505
506
# File 'lib/dnn/core/models.rb', line 504

def trainable_layers
  layers.select { |layer| layer.is_a?(Layers::TrainableLayer) }
end