Class: NTFS::DataRun
- Inherits:
-
Object
- Object
- NTFS::DataRun
- Defined in:
- lib/fs/ntfs/data_run.rb
Constant Summary collapse
- DEBUG_TRACE_READS =
false
- OffsetCode =
{ 1 => [0xffffff00, 'L', 'l'], 2 => [0xffff0000, 'L', 'l'], 3 => [0xff000000, 'L', 'l'], 4 => [0x00000000, 'L', 'l'], 5 => [0xffffff0000000000, 'Q', 'q'], 6 => [0xffff000000000000, 'Q', 'q'], 7 => [0xff00000000000000, 'Q', 'q'], 8 => [0x0000000000000000, 'Q', 'q'] }
Instance Attribute Summary collapse
-
#boot_sector ⇒ Object
readonly
Returns the value of attribute boot_sector.
-
#length ⇒ Object
readonly
Returns the value of attribute length.
-
#pos ⇒ Object
readonly
Returns the value of attribute pos.
-
#runSpec ⇒ Object
readonly
Returns the value of attribute runSpec.
Instance Method Summary collapse
- #[](offset, len) ⇒ Object
- #addClusterCache(start_vcn, data) ⇒ Object
- #addRun(r) ⇒ Object
- #dump ⇒ Object
- #dumpCacheRanges ⇒ Object
- #dumpRunList ⇒ Object
- #findCachedClusters(start_vcn, end_vcn) ⇒ Object
- #getCacheInfo(vcn) ⇒ Object
- #getClusters(start_vcn, end_vcn = nil) ⇒ Object
- #getLCNs(start_vcn, num) ⇒ Object
-
#initialize(bs, buf, header) ⇒ DataRun
constructor
A new instance of DataRun.
-
#isUnitCompr?(unit = 0) ⇒ Boolean
Return true if a particular compression unit is compressed.
-
#mkComprUnits ⇒ Object
Organize run list into compression units.
- #read(bytes = @length) ⇒ Object
- #readCachedClusters(vcn, num) ⇒ Object
- #readRawClusters(vcn, num) ⇒ Object
- #rewind ⇒ Object
- #seek(offset, method = IO::SEEK_SET) ⇒ Object
- #seekToVcn(vcn) ⇒ Object
- #suckBytes(buf) ⇒ Object
- #to_s ⇒ Object
Constructor Details
#initialize(bs, buf, header) ⇒ DataRun
Returns a new instance of DataRun.
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
# File 'lib/fs/ntfs/data_run.rb', line 20 def initialize(bs, buf, header) raise "MIQ(NTFS::DataRun.initialize) Nil boot sector" if bs.nil? raise "MIQ(NTFS::DataRun.initialize) Nil buffer" if buf.nil? # Buffer boot sector & start spec array. @boot_sector = bs @bytesPerCluster = bs.bytesPerCluster @header = header @runSpec = [] rewind # Read bytes until 0. # puts "specification is:" # buf.hex_dump(:obj => STDOUT, :meth => :puts, :newline => false) last_lcn = 0 spec_pos = 0 total_clusters = 0 while buf[spec_pos, 1].ord != 0 # print "spec 0x#{'%02x' % buf[spec_pos]}\n" # Size of offset is hi nibble, size of length is lo nibble. size_of_offset, size_of_length = buf[spec_pos, 1].ord.divmod(16) spec_pos += 1 # puts "size_of_length 0x#{'%08x' % size_of_length}" # puts "size_of_offset 0x#{'%08x' % size_of_offset}" # Get length of run (number of clusters). run_length = suckBytes(buf[spec_pos, size_of_length]) spec_pos += size_of_length # puts "length 0x#{'%08x' % run_length}" # Get offset (offset from previous cluster). run_offset = suckBytes(buf[spec_pos, size_of_offset]) spec_pos += size_of_offset # puts "offset 0x#{'%08x' % run_offset}" # Offset is signed (only check if size gt 0. 0 size means 0 offset means sparse run). if size_of_offset > 0 hi_bit = 2**(8 * size_of_offset - 1) # 1 << (8 * size_of_offset - 1) # If this goofy number is negative, make it negative. if run_offset & hi_bit == hi_bit run_offset |= OffsetCode[size_of_offset][0] run_offset = [run_offset].pack(OffsetCode[size_of_offset][1]).unpack(OffsetCode[size_of_offset][2])[0] end end # puts "offset 0x#{'%08x' % run_offset}" # Not Sparse if size_of_offset > 0 lcn = run_offset + last_lcn last_lcn = lcn else lcn = nil end # Store run spec. total_clusters += run_length @runSpec << lcn @runSpec << run_length end @length = header.specific['data_size'] @length = total_clusters * @bytesPerCluster if @length == 0 # Cache the clusters we've already read. @clusters = {} end |
Instance Attribute Details
#boot_sector ⇒ Object (readonly)
Returns the value of attribute boot_sector.
18 19 20 |
# File 'lib/fs/ntfs/data_run.rb', line 18 def boot_sector @boot_sector end |
#length ⇒ Object (readonly)
Returns the value of attribute length.
18 19 20 |
# File 'lib/fs/ntfs/data_run.rb', line 18 def length @length end |
#pos ⇒ Object (readonly)
Returns the value of attribute pos.
18 19 20 |
# File 'lib/fs/ntfs/data_run.rb', line 18 def pos @pos end |
#runSpec ⇒ Object (readonly)
Returns the value of attribute runSpec.
18 19 20 |
# File 'lib/fs/ntfs/data_run.rb', line 18 def runSpec @runSpec end |
Instance Method Details
#[](offset, len) ⇒ Object
97 98 99 100 |
# File 'lib/fs/ntfs/data_run.rb', line 97 def [](offset, len) seek(offset) read(len) end |
#addClusterCache(start_vcn, data) ⇒ Object
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 |
# File 'lib/fs/ntfs/data_run.rb', line 203 def addClusterCache(start_vcn, data) end_vcn = start_vcn + (data.length / @bytesPerCluster) - 1 has_start = @clusters.key?(start_vcn) start_data, start_data_vcn, = getCacheInfo(start_vcn) if has_start has_end = @clusters.key?(end_vcn) end_data, end_data_vcn, end_data_end_vcn, end_data_len, end_offset = getCacheInfo(end_vcn) if has_end # Determine if we are adding an existing item or sub-item back into the cache return if has_start && has_end && start_data_vcn == end_data_vcn # Determine if we are overlapping an existing cached item at the start if has_start && start_data_vcn != start_vcn leftover_len = start_vcn - start_data_vcn leftover = start_data[0, leftover_len * @bytesPerCluster] # Recache only the leftover portion @clusters[start_data_vcn] = leftover end # Determine if we are overlapping an existing cached item at the end if has_end && end_data_end_vcn != end_vcn leftover_start_vcn = end_vcn + 1 leftover = end_data[(end_offset + 1) * @bytesPerCluster..-1] # Recache only the leftover portion @clusters[leftover_start_vcn] = leftover (leftover_start_vcn + 1..end_data_end_vcn).each { |i| @clusters[i] = leftover_start_vcn } end # Cache the data @clusters[start_vcn] = data (start_vcn + 1..end_vcn).each { |i| @clusters[i] = start_vcn } end |
#addRun(r) ⇒ Object
92 93 94 95 |
# File 'lib/fs/ntfs/data_run.rb', line 92 def addRun(r) @runSpec += r.runSpec @length += r.length end |
#dump ⇒ Object
320 321 322 323 324 325 326 327 328 |
# File 'lib/fs/ntfs/data_run.rb', line 320 def dump out = "\#<#{self.class}:0x#{'%08x' % object_id}>\n" out << " Length : #{@length}\n" out << " Position : #{@pos}\n" out << " Run List : #{@runSpec.inspect}\n" out << " Cache Ranges : #{dumpCacheRanges}\n" out << "---\n" out end |
#dumpCacheRanges ⇒ Object
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 |
# File 'lib/fs/ntfs/data_run.rb', line 334 def dumpCacheRanges str = k_start = k_prev = nil invalid = true out = [] @clusters.keys.sort.each do |k| if @clusters[k].kind_of?(String) out << "#{invalid ? "*(#{str}) " : ''}#{k_start}..#{k_prev}" unless k_start.nil? str = k k_start = k invalid = false elsif @clusters[k] != str out << "#{invalid ? "*(#{str}) " : ''}#{k_start}..#{k_prev}" unless k_start.nil? str = @clusters[k] k_start = k invalid = true elsif k != k_prev + 1 out << "#{k}^" end k_prev = k end out << "#{invalid ? "*(#{str}) " : ''}#{k_start}..#{k_prev}" unless k_start.nil? out.inspect end |
#dumpRunList ⇒ Object
330 331 332 |
# File 'lib/fs/ntfs/data_run.rb', line 330 def dumpRunList @runSpec.each_slice(2) { |lcn, len| puts "lcn = #{lcn.nil? ? 0 : lcn}, len = #{len}" } end |
#findCachedClusters(start_vcn, end_vcn) ⇒ Object
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
# File 'lib/fs/ntfs/data_run.rb', line 165 def findCachedClusters(start_vcn, end_vcn) to_read = [] cur = run = last_cached = nil start_vcn.upto(end_vcn) do |vcn| check_last_cached = @clusters.key?(vcn) if last_cached == check_last_cached run += 1 else to_read << cur << run << last_cached unless last_cached.nil? last_cached = check_last_cached cur = vcn run = 1 end end to_read << cur << run << last_cached to_read end |
#getCacheInfo(vcn) ⇒ Object
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
# File 'lib/fs/ntfs/data_run.rb', line 239 def getCacheInfo(vcn) data = @clusters[vcn] offset = 0 start_vcn = vcn if data.kind_of?(Integer) start_vcn = data offset = vcn - start_vcn data = @clusters[data] end len = data.length / @bytesPerCluster end_vcn = start_vcn + len - 1 return data, start_vcn, end_vcn, len, offset end |
#getClusters(start_vcn, end_vcn = nil) ⇒ Object
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# File 'lib/fs/ntfs/data_run.rb', line 135 def getClusters(start_vcn, end_vcn = nil) end_vcn = start_vcn if end_vcn.nil? # Single cluster if start_vcn == end_vcn && @clusters.key?(start_vcn) $log.info "#{self.class} #{object_id}: Reading clusters [#{start_vcn}, 1, true]" if DEBUG_TRACE_READS return readCachedClusters(start_vcn, 1)[0] end # Multiple clusters (part of which may be cached) num = end_vcn - start_vcn + 1 ret = MemoryBuffer.create(num * @bytesPerCluster) offset = 0 to_read = findCachedClusters(start_vcn, end_vcn) $log.info "#{self.class} #{object_id}: Reading clusters #{to_read.inspect}" if DEBUG_TRACE_READS to_read.each_slice(3) do |vcn, len, cached| clusters = cached ? readCachedClusters(vcn, len) : readRawClusters(vcn, len) clusters.each do |c| len = c.length ret[offset, len] = c offset += len end end addClusterCache(start_vcn, ret) ret end |
#getLCNs(start_vcn, num) ⇒ Object
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
# File 'lib/fs/ntfs/data_run.rb', line 277 def getLCNs(start_vcn, num) lcns = [] end_vcn = start_vcn + num - 1 vcn = start_vcn total_clusters = 0 @runSpec.each_slice(2) do |lcn, len| total_clusters += len next unless lcn && len && total_clusters > start_vcn start = lcn + (vcn - (total_clusters - len)) count = len - (start - lcn) count = count >= num ? num : count lcns << start << count vcn += count num -= count break if num <= 0 end lcns << nil << end_vcn - vcn + 1 if vcn <= end_vcn lcns end |
#isUnitCompr?(unit = 0) ⇒ Boolean
Return true if a particular compression unit is compressed.
310 311 312 313 314 |
# File 'lib/fs/ntfs/data_run.rb', line 310 def isUnitCompr?(unit = 0) return false unless header.isCompressd? mkComprUnits if @compr_units.nil? @compr_units[unit].isCompressed? end |
#mkComprUnits ⇒ Object
Organize run list into compression units.
317 318 |
# File 'lib/fs/ntfs/data_run.rb', line 317 def mkComprUnits end |
#read(bytes = @length) ⇒ Object
121 122 123 124 125 126 127 128 129 130 131 132 133 |
# File 'lib/fs/ntfs/data_run.rb', line 121 def read(bytes = @length) return nil if @pos >= @length $log.info "#{self.class} #{object_id}: Reading #{bytes} bytes @ #{@pos}" if DEBUG_TRACE_READS startCluster, startOffset = @pos.divmod(@bytesPerCluster) endCluster, endOffset = (@pos + (bytes - 1)).divmod(@bytesPerCluster) ret = getClusters(startCluster, endCluster) ret = ret[startOffset..endOffset - @bytesPerCluster] @pos += ret.length ret end |
#readCachedClusters(vcn, num) ⇒ Object
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
# File 'lib/fs/ntfs/data_run.rb', line 185 def readCachedClusters(vcn, num) ret = [] while num > 0 data, start_vcn, end_vcn, data_len, offset = getCacheInfo(vcn) len = data_len - offset len = num if num < len ret << data[offset * @bytesPerCluster, len * @bytesPerCluster] num -= len vcn += len end ret end |
#readRawClusters(vcn, num) ⇒ Object
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 |
# File 'lib/fs/ntfs/data_run.rb', line 255 def readRawClusters(vcn, num) ret = [] offset = 0 lcns = getLCNs(vcn, num) lcns.each_slice(2) do |lcn, len| len *= @bytesPerCluster clusters = unless lcn.nil? @boot_sector.stream.seek(@boot_sector.lcn2abs(lcn)) @boot_sector.stream.read(len) else MemoryBuffer.create(len) end ret << clusters offset += len end ret end |
#rewind ⇒ Object
102 103 104 |
# File 'lib/fs/ntfs/data_run.rb', line 102 def rewind @pos = 0 end |
#seek(offset, method = IO::SEEK_SET) ⇒ Object
106 107 108 109 110 111 112 113 114 115 |
# File 'lib/fs/ntfs/data_run.rb', line 106 def seek(offset, method = IO::SEEK_SET) @pos = case method when IO::SEEK_SET then offset when IO::SEEK_CUR then @pos + offset when IO::SEEK_END then @length - offset end @pos = 0 if @pos < 0 @pos = @length if @pos > @length @pos end |
#seekToVcn(vcn) ⇒ Object
117 118 119 |
# File 'lib/fs/ntfs/data_run.rb', line 117 def seekToVcn(vcn) seek(vcn * @bytesPerCluster) end |
#suckBytes(buf) ⇒ Object
302 303 304 305 306 307 |
# File 'lib/fs/ntfs/data_run.rb', line 302 def suckBytes(buf) return buf[0, 1].ord if buf.size == 1 val = 0 (buf.size - 1).downto(0) { |i| val *= 256; val += buf[i, 1].ord } val end |
#to_s ⇒ Object
88 89 90 |
# File 'lib/fs/ntfs/data_run.rb', line 88 def to_s # @current_run end |