Class: NekonekoGen::TextClassifierGenerator

Inherits:
Object
  • Object
show all
Defined in:
lib/nekoneko_gen/text_classifier_generator.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(filename, files, options = {}) ⇒ TextClassifierGenerator

Returns a new instance of TextClassifierGenerator.



11
12
13
14
15
16
17
18
19
20
21
22
23
# File 'lib/nekoneko_gen/text_classifier_generator.rb', line 11

def initialize(filename, files, options = {})
  @quiet = false
  @options = options
  @filename = filename
  @files = files
  @word2id = {}
  @id2word = {}
  @classifier = nil
  @k = files.size
  @name = safe_name(@filename).split("_").map(&:capitalize).join
  @labels = files.map {|file| "#{safe_name(file).upcase}"}
  @idf = {}
end

Instance Attribute Details

#quietObject

Returns the value of attribute quiet.



10
11
12
# File 'lib/nekoneko_gen/text_classifier_generator.rb', line 10

def quiet
  @quiet
end

Instance Method Details

#generate(lang = :ruby) ⇒ Object



99
100
101
102
103
104
105
106
107
108
# File 'lib/nekoneko_gen/text_classifier_generator.rb', line 99

def generate(lang = :ruby)
  lang ||= :ruby
  case lang
  when :ruby
    generate_ruby_code
  else
    raise NotImplementedError
  end
  @name
end

#generate_ruby_codeObject



109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
# File 'lib/nekoneko_gen/text_classifier_generator.rb', line 109

def generate_ruby_code
  labels = @labels.each_with_index.map{|v, i| "  #{v} = #{i}"}.join("\n")
  File.open(@filename, "w") do |f|
    f.write <<MODEL
# -*- coding: utf-8 -*-
require 'rubygems'
require 'json'
require 'nkf'
require 'bimyou_segmenter'

class #{@name}
  def self.k
K
  end
  def self.predict(text)
classify(fv(NKF::nkf('-wZX', text).downcase))
  end
#{labels}
  LABELS = #{@labels.inspect}
  K = #{@classifier.k}
  private
  MATH_LOG2_INV = 1.0 / Math.log(2.0)
  def self.fv(text)
prev = nil
svec = BimyouSegmenter.segment(text,
                               :white_space => true,
                               :symbol => true).map do |word|
  if (prev)
    if (NGRAM_TARGET =~ word)
      nword = [prev + word, word]
      prev = word
      nword
    else
      prev = nil
      word
    end
  else
    if (NGRAM_TARGET =~ word)
      prev = word
    end
    word
  end
end.flatten.map{|word| WORD_INDEX[word]}.compact.reduce(Hash.new(0)) {|h,k| h[k] += 1; h }
unless (svec.empty?)
  if (svec.size >= 2)
    r = 1.0 / (Math.log(svec.size) * MATH_LOG2_INV)
  else
    r = 1.0
  end
  svec.each do |k, freq|
    if (idf = IDF[k])
      svec[k] = Math.log(freq + 1.0) * MATH_LOG2_INV * r * idf
    else
      svec[k] = 0.0
    end
  end
  normalize(svec)
else
  svec
end
  end
  def self.normalize(svec)
norm = Math.sqrt(svec.values.map{|v| v * v }.reduce(0.0, :+))
if (norm > 0.0)
  s = 1.0 / norm
  svec.each do |k, v|
    svec[k] = v * s
  end
end
svec
  end
#{@classifier.classify_method_code(:ruby)}
  NGRAM_TARGET = Regexp.new('(^[ァ-ヾ]+$)|(^[a-zA-Z\\-_a-zA-Z‐_0-90-9]+$)|' +
                     '(^[々〇ヵヶ' + [0x3400].pack('U') + '-' + [0x9FFF].pack('U') +
                     [0xF900].pack('U') + '-' + [0xFAFF].pack('U') +
                        [0x20000].pack('U') + '-' + [0x2FFFF].pack('U') + ']+$)')
  IDF = JSON.load(#{@idf.to_json.inspect})
  WORD_INDEX = JSON.load(#{@word2id.to_json.inspect})
#{@classifier.parameter_code(:ruby)}
end
MODEL
  end
end

#train(iteration = nil) ⇒ Object



24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# File 'lib/nekoneko_gen/text_classifier_generator.rb', line 24

def train(iteration = nil)
  data = []
  word_count = Hash.new(0)
  @k.times do |i|
    t = Time.now
    data[i] = []
    print "loading #{@files[i]}... "
    
    content = nil
    File.open(@files[i]) do |f|
      content = f.read
    end
    content = NKF.nkf('-wZX', content).downcase
    content.lines do |line|
      vec = fv(line.chomp)
      if (vec.size > 0)
        data[i] << normalize(vec)
      end
    end
    puts sprintf("%.4fs", Time.now - t)
  end
  data_min = data.map{|v| v.size}.min
  data.each do |cd|
    w = data_min / cd.size.to_f
    cd.each do |vec|
      vec.keys.each do |k|
        word_count[k] += w
      end
    end
  end
  document_count = data_min * data.size
  @idf = Array.new(0, 0)
  word_count.each{|k, freq|
    @idf[k] = Math.log(document_count / freq) * MATH_LOG2_INV + 1.0
  }
  data.each do |cdata|
    cdata.each do |vec|
      if (vec.size >= 2)
        r = 1.0 / (Math.log(vec.size) * MATH_LOG2_INV)
      else
        r = 1.0
      end
      vec.each do |k, freq|
        vec[k] = Math.log(freq + 1.0) * MATH_LOG2_INV * r * @idf[k]
      end
      normalize(vec)
    end
  end
  
  @classifier = ClassifierFactory.create(@k, @word2id.size, @options)
  iteration ||= @classifier.default_iteration
  iteration.times do |step|
    loss = 0.0
    c = 0
    t = Time.now
    print sprintf("step %3d...", step)
    
    @classifier.k.times.map do |i|
      sampling(data[i], data_min).map {|vec| [vec, i] }
    end.flatten(1).shuffle!.each do |v|
      loss += @classifier.update(v[0], v[1])
      c += 1
    end
    print sprintf(" %.6f, %.4fs\n", 1.0 - loss / c.to_f, Time.now - t)
  end
  
  if (@classifier.k > 2)
    @classifier.k.times do |i|
      puts "#{@labels[i]} : #{@classifier.features(i)} features"
    end
  else
    puts "#{@labels[0]}, #{@labels[1]} : #{@classifier.features(0)} features"
  end
  puts "done nyan! "
end