Class: CNTK::Learner
- Inherits:
-
Object
- Object
- CNTK::Learner
- Defined in:
- lib/cntk/learner.rb,
ext/cntk/cntk_wrap.cxx
Constant Summary collapse
- LearningRateSchedule =
MomentumSchedule
- MinibatchSizeSchedule =
TrainingParameterPerSampleSchedule
Class Method Summary collapse
- .adagrad(parameters, lr, multiplier: true, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
- .adam_sgd(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), variance_momentum: momentum_as_time_constant_schedule(720000), low_memory: true, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
- .momentum_as_time_constant_schedule(schedule, epoch_size) ⇒ MomentumAsTimeConstantSchedule
- .momentum_schedule(schedule, unit = :minibatch, epoch_size = nil) ⇒ TrainingParameterPerSampleSchedule, TrainingParameterPerMinibatchSchedule
- .momentum_sgd(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
- .nesterov(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
- .rmsprop(parameters, lr, gamma, inc, dec, max, min, multiplier: true, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
- .sgd(parameters, lr, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
- .training_parameter_schedule(schedule, unit, epoch_size = nil) ⇒ TrainingParameterPerSampleSchedule, TrainingParameterPerMinibatchSchedule
Instance Method Summary collapse
- #create_checkpoint(*args) ⇒ Object
- #learning_rate(*args) ⇒ Object
- #parameters(*args) ⇒ Object
- #reset_learning_rate(*args) ⇒ Object
- #reset_smoothed_gradients(*args) ⇒ Object
- #restore_from_checkpoint(*args) ⇒ Object
- #total_number_of_samples_seen(*args) ⇒ Object
- #update(*args) ⇒ Object
Class Method Details
.adagrad(parameters, lr, multiplier: true, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
143 144 145 146 147 148 149 |
# File 'lib/cntk/learner.rb', line 143 def adagrad(parameters, lr, multiplier: true, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ga = training_parameter_schedule(std_dev, :minibatch) opt = create_opt(l1_weight, l2_weight, ga, threshold, truncation) CNTK.__ada_grad_learner__(parameters, lr, multiplier, unit_gain, opt) end |
.adam_sgd(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), variance_momentum: momentum_as_time_constant_schedule(720000), low_memory: true, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
162 163 164 165 166 167 168 169 170 |
# File 'lib/cntk/learner.rb', line 162 def adam_sgd(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), variance_momentum: momentum_as_time_constant_schedule(720000), low_memory: true, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ga = training_parameter_schedule(std_dev, :minibatch) opt = create_opt(l1_weight, l2_weight, ga, threshold, truncation) CNTK.__adam_learner__(parameters, lr, momentum, unit_gain, variance_momentum, low_memory, opt) end |
.momentum_as_time_constant_schedule(schedule, epoch_size) ⇒ MomentumAsTimeConstantSchedule
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
# File 'lib/cntk/learner.rb', line 68 def momentum_as_time_constant_schedule(schedule, epoch_size) klass = MomentumAsTimeConstantSchedule if schedule.is_a?(Numeric) if epoch_size.nil? raise "epoch_size can't be given when schedule is Numeric." else klass.new(schedule) end else if epoch_size.nil? klass.new(schedule) else klass.new(schedule, epoch_size) end end end |
.momentum_schedule(schedule, unit = :minibatch, epoch_size = nil) ⇒ TrainingParameterPerSampleSchedule, TrainingParameterPerMinibatchSchedule
61 62 63 |
# File 'lib/cntk/learner.rb', line 61 def momentum_schedule(schedule, unit = :minibatch, epoch_size = nil) training_parameter_schedule(schedule, unit, epoch_size) end |
.momentum_sgd(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
109 110 111 112 113 114 115 |
# File 'lib/cntk/learner.rb', line 109 def momentum_sgd(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ga = training_parameter_schedule(std_dev, :minibatch) opt = create_opt(l1_weight, l2_weight, ga, threshold, truncation) CNTK.__momentum_sgd_learner__(parameters, lr, momentum, unit_gain, opt) end |
.nesterov(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
126 127 128 129 130 131 132 |
# File 'lib/cntk/learner.rb', line 126 def nesterov(parameters, lr, momentum, unit_gain: CNTK.default_unit_gain_value(), l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ga = training_parameter_schedule(std_dev, :minibatch) opt = create_opt(l1_weight, l2_weight, ga, threshold, truncation) CNTK.__nesterov_learner__(parameters, lr, momentum, unit_gain, opt) end |
.rmsprop(parameters, lr, gamma, inc, dec, max, min, multiplier: true, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
185 186 187 188 189 190 191 |
# File 'lib/cntk/learner.rb', line 185 def rmsprop(parameters, lr, gamma, inc, dec, max, min, multiplier: true, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ga = training_parameter_schedule(std_dev, :minibatch) opt = create_opt(l1_weight, l2_weight, ga, threshold, truncation) CNTK.__rmsprop_learner__(parameters, lr, gamma, inc, dec, max, min, multiplier, opt) end |
.sgd(parameters, lr, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ⇒ Learner
93 94 95 96 97 98 |
# File 'lib/cntk/learner.rb', line 93 def sgd(parameters, lr, l1_weight: 0.0, l2_weight: 0.0, std_dev: 0.0, threshold: Float::INFINITY, truncation: true) ga = training_parameter_schedule(std_dev, :minibatch) opt = create_opt(l1_weight, l2_weight, ga, threshold, truncation) CNTK.__sgdlearner__(parameters, lr, opt) end |
.training_parameter_schedule(schedule, unit, epoch_size = nil) ⇒ TrainingParameterPerSampleSchedule, TrainingParameterPerMinibatchSchedule
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
# File 'lib/cntk/learner.rb', line 31 def training_parameter_schedule(schedule, unit, epoch_size = nil) case unit when :sample klass = TrainingParameterPerSampleSchedule when :minibatch klass = TrainingParameterPerMinibatchSchedule else raise "unknown unit" end if schedule.is_a?(Numeric) unless epoch_size.nil? raise "epoch_size can't be given when schedule is Numeric." else klass.new(schedule) end else if epoch_size.nil? klass.new(schedule) else klass.new(schedule, epoch_size) end end end |
Instance Method Details
#create_checkpoint(*args) ⇒ Object
55624 55625 55626 55627 55628 55629 55630 55631 55632 55633 55634 55635 55636 55637 55638 55639 55640 55641 55642 55643 55644 55645 55646 55647 55648 55649 55650 55651 55652 55653 55654 55655 55656 55657 55658 55659 55660 55661 55662 55663 55664 55665 55666 55667 55668 55669 55670 55671 55672 55673 55674 55675 |
# File 'ext/cntk/cntk_wrap.cxx', line 55624
SWIGINTERN VALUE
_wrap_Learner_create_checkpoint(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
CNTK::Dictionary result;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","CreateCheckpoint", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (arg1)->CreateCheckpoint();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = SWIG_NewPointerObj((new CNTK::Dictionary(static_cast< const CNTK::Dictionary& >(result))), SWIGTYPE_p_CNTK__Dictionary, SWIG_POINTER_OWN | 0 );
return vresult;
fail:
return Qnil;
}
|
#learning_rate(*args) ⇒ Object
55860 55861 55862 55863 55864 55865 55866 55867 55868 55869 55870 55871 55872 55873 55874 55875 55876 55877 55878 55879 55880 55881 55882 55883 55884 55885 55886 55887 55888 55889 55890 55891 55892 55893 55894 55895 55896 55897 55898 55899 55900 55901 55902 55903 55904 55905 55906 55907 55908 55909 55910 55911 |
# File 'ext/cntk/cntk_wrap.cxx', line 55860
SWIGINTERN VALUE
_wrap_Learner_learning_rate(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
double result;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","LearningRate", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (double)(arg1)->LearningRate();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = SWIG_From_double(static_cast< double >(result));
return vresult;
fail:
return Qnil;
}
|
#parameters(*args) ⇒ Object
55570 55571 55572 55573 55574 55575 55576 55577 55578 55579 55580 55581 55582 55583 55584 55585 55586 55587 55588 55589 55590 55591 55592 55593 55594 55595 55596 55597 55598 55599 55600 55601 55602 55603 55604 55605 55606 55607 55608 55609 55610 55611 55612 55613 55614 55615 55616 55617 55618 55619 55620 55621 |
# File 'ext/cntk/cntk_wrap.cxx', line 55570
SWIGINTERN VALUE
_wrap_Learner_parameters(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
std::vector< CNTK::Parameter,std::allocator< CNTK::Parameter > > *result = 0 ;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","Parameters", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (std::vector< CNTK::Parameter,std::allocator< CNTK::Parameter > > *) &(arg1)->Parameters();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = swig::from(static_cast< std::vector< CNTK::Parameter,std::allocator< CNTK::Parameter > > >(*result));
return vresult;
fail:
return Qnil;
}
|
#reset_learning_rate(*args) ⇒ Object
55747 55748 55749 55750 55751 55752 55753 55754 55755 55756 55757 55758 55759 55760 55761 55762 55763 55764 55765 55766 55767 55768 55769 55770 55771 55772 55773 55774 55775 55776 55777 55778 55779 55780 55781 55782 55783 55784 55785 55786 55787 55788 55789 55790 55791 55792 55793 55794 55795 55796 55797 55798 55799 55800 55801 55802 55803 55804 55805 55806 |
# File 'ext/cntk/cntk_wrap.cxx', line 55747
SWIGINTERN VALUE
_wrap_Learner_reset_learning_rate(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
CNTK::LearningRateSchedule *arg2 = 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
void *argp2 ;
int res2 = 0 ;
if ((argc < 1) || (argc > 1)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 1)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","ResetLearningRate", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
res2 = SWIG_ConvertPtr(argv[0], &argp2, SWIGTYPE_p_CNTK__TrainingParameterScheduleT_double_t, 0 );
if (!SWIG_IsOK(res2)) {
SWIG_exception_fail(SWIG_ArgError(res2), Ruby_Format_TypeError( "", "CNTK::LearningRateSchedule const &","ResetLearningRate", 2, argv[0] ));
}
if (!argp2) {
SWIG_exception_fail(SWIG_ValueError, Ruby_Format_TypeError("invalid null reference ", "CNTK::LearningRateSchedule const &","ResetLearningRate", 2, argv[0]));
}
arg2 = reinterpret_cast< CNTK::LearningRateSchedule * >(argp2);
{
try {
(arg1)->ResetLearningRate((CNTK::LearningRateSchedule const &)*arg2);
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
return Qnil;
fail:
return Qnil;
}
|
#reset_smoothed_gradients(*args) ⇒ Object
55809 55810 55811 55812 55813 55814 55815 55816 55817 55818 55819 55820 55821 55822 55823 55824 55825 55826 55827 55828 55829 55830 55831 55832 55833 55834 55835 55836 55837 55838 55839 55840 55841 55842 55843 55844 55845 55846 55847 55848 55849 55850 55851 55852 55853 55854 55855 55856 55857 |
# File 'ext/cntk/cntk_wrap.cxx', line 55809
SWIGINTERN VALUE
_wrap_Learner_reset_smoothed_gradients(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","ResetSmoothedGradients", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
(arg1)->ResetSmoothedGradients();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
return Qnil;
fail:
return Qnil;
}
|
#restore_from_checkpoint(*args) ⇒ Object
55678 55679 55680 55681 55682 55683 55684 55685 55686 55687 55688 55689 55690 55691 55692 55693 55694 55695 55696 55697 55698 55699 55700 55701 55702 55703 55704 55705 55706 55707 55708 55709 55710 55711 55712 55713 55714 55715 55716 55717 55718 55719 55720 55721 55722 55723 55724 55725 55726 55727 55728 55729 55730 55731 55732 55733 55734 55735 55736 55737 |
# File 'ext/cntk/cntk_wrap.cxx', line 55678
SWIGINTERN VALUE
_wrap_Learner_restore_from_checkpoint(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
CNTK::Dictionary *arg2 = 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
void *argp2 ;
int res2 = 0 ;
if ((argc < 1) || (argc > 1)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 1)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","RestoreFromCheckpoint", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
res2 = SWIG_ConvertPtr(argv[0], &argp2, SWIGTYPE_p_CNTK__Dictionary, 0 );
if (!SWIG_IsOK(res2)) {
SWIG_exception_fail(SWIG_ArgError(res2), Ruby_Format_TypeError( "", "CNTK::Dictionary const &","RestoreFromCheckpoint", 2, argv[0] ));
}
if (!argp2) {
SWIG_exception_fail(SWIG_ValueError, Ruby_Format_TypeError("invalid null reference ", "CNTK::Dictionary const &","RestoreFromCheckpoint", 2, argv[0]));
}
arg2 = reinterpret_cast< CNTK::Dictionary * >(argp2);
{
try {
(arg1)->RestoreFromCheckpoint((CNTK::Dictionary const &)*arg2);
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
return Qnil;
fail:
return Qnil;
}
|
#total_number_of_samples_seen(*args) ⇒ Object
55914 55915 55916 55917 55918 55919 55920 55921 55922 55923 55924 55925 55926 55927 55928 55929 55930 55931 55932 55933 55934 55935 55936 55937 55938 55939 55940 55941 55942 55943 55944 55945 55946 55947 55948 55949 55950 55951 55952 55953 55954 55955 55956 55957 55958 55959 55960 55961 55962 55963 55964 55965 |
# File 'ext/cntk/cntk_wrap.cxx', line 55914
SWIGINTERN VALUE
_wrap_Learner_total_number_of_samples_seen(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
size_t result;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","TotalNumberOfSamplesSeen", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (arg1)->TotalNumberOfSamplesSeen();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = SWIG_From_size_t(static_cast< size_t >(result));
return vresult;
fail:
return Qnil;
}
|
#update(*args) ⇒ Object
55497 55498 55499 55500 55501 55502 55503 55504 55505 55506 55507 55508 55509 55510 55511 55512 55513 55514 55515 55516 55517 55518 55519 55520 55521 55522 55523 55524 55525 55526 55527 55528 55529 55530 55531 55532 55533 55534 55535 55536 55537 55538 55539 55540 55541 55542 55543 55544 55545 55546 55547 55548 55549 55550 55551 55552 55553 55554 55555 55556 55557 55558 55559 55560 55561 55562 55563 55564 55565 55566 55567 |
# File 'ext/cntk/cntk_wrap.cxx', line 55497
SWIGINTERN VALUE
_wrap_Learner_update(int argc, VALUE *argv, VALUE self) {
CNTK::Learner *arg1 = (CNTK::Learner *) 0 ;
std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > *arg2 = 0 ;
size_t arg3 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::Learner > tempshared1 ;
std::shared_ptr< CNTK::Learner > *smartarg1 = 0 ;
void *argp2 = 0 ;
int res2 = 0 ;
size_t val3 ;
int ecode3 = 0 ;
bool result;
VALUE vresult = Qnil;
if ((argc < 2) || (argc > 2)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 2)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::Learner *","Update", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::Learner > * >(argp1);
arg1 = const_cast< CNTK::Learner * >((smartarg1 ? smartarg1->get() : 0));
}
}
res2 = SWIG_ConvertPtr(argv[0], &argp2, SWIGTYPE_p_std__unordered_mapT_CNTK__Parameter_std__shared_ptrT_CNTK__NDArrayView_t_std__hashT_CNTK__Parameter_t_std__equal_toT_CNTK__Parameter_t_std__allocatorT_std__pairT_CNTK__Parameter_const_std__shared_ptrT_CNTK__NDArrayView_t_t_t_t, 0 );
if (!SWIG_IsOK(res2)) {
SWIG_exception_fail(SWIG_ArgError(res2), Ruby_Format_TypeError( "", "std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > &","Update", 2, argv[0] ));
}
if (!argp2) {
SWIG_exception_fail(SWIG_ValueError, Ruby_Format_TypeError("invalid null reference ", "std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > &","Update", 2, argv[0]));
}
arg2 = reinterpret_cast< std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > * >(argp2);
ecode3 = SWIG_AsVal_size_t(argv[1], &val3);
if (!SWIG_IsOK(ecode3)) {
SWIG_exception_fail(SWIG_ArgError(ecode3), Ruby_Format_TypeError( "", "size_t","Update", 3, argv[1] ));
}
arg3 = static_cast< size_t >(val3);
{
try {
result = (bool)(arg1)->Update(*arg2,arg3);
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = SWIG_From_bool(static_cast< bool >(result));
return vresult;
fail:
return Qnil;
}
|