Module: RelatonGb::SecScrapper
- Extended by:
- Scrapper
- Defined in:
- lib/relaton_gb/sec_scrapper.rb
Overview
Sector standard scrapper
Constant Summary
Constants included from Scrapper
Class Method Summary collapse
Methods included from Scrapper
fetch_structuredidentifier, get_contributors, get_docid, get_status, get_titles, get_type, org, scrapped_data
Class Method Details
.scrape_doc(hit) ⇒ RelatonGb::GbBibliographicItem
41 42 43 44 45 46 47 48 49 50 |
# File 'lib/relaton_gb/sec_scrapper.rb', line 41 def scrape_doc(hit) src = "https://hbba.sacinfo.org.cn/stdDetail/#{hit.pid}" page_uri = URI src doc = Nokogiri::HTML Net::HTTP.get(page_uri) GbBibliographicItem.new(**scrapped_data(doc, src, hit)) rescue SocketError, Timeout::Error, Errno::EINVAL, Errno::ECONNRESET, EOFError, Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, Net::ProtocolError, OpenSSL::SSL::SSLError, Errno::ETIMEDOUT, Net::OpenTimeout raise RelatonBib::RequestError, "Cannot access #{src}" end |
.scrape_page(text) ⇒ RelatonGb::HitCollection
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
# File 'lib/relaton_gb/sec_scrapper.rb', line 20 def scrape_page(text) # uri = URI "http://www.std.gov.cn/hb/search/hbPage?searchText=#{text}" uri = URI "https://hbba.sacinfo.org.cn/stdQueryList" resp = Net::HTTP.post uri, URI.encode_www_form({ key: text }) # res = JSON.parse Net::HTTP.get(uri) json = JSON.parse resp.body hits = json["records"].map do |h| Hit.new pid: h["pk"], docref: h["code"], status: h["status"], scrapper: self end # hits = res["rows"].map do |r| # Hit.new pid: r["id"], title: r["STD_CODE"], scrapper: self # end HitCollection.new hits rescue SocketError, Timeout::Error, Errno::EINVAL, Errno::ECONNRESET, EOFError, Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, Net::ProtocolError, OpenSSL::SSL::SSLError, Errno::ETIMEDOUT, Net::OpenTimeout raise RelatonBib::RequestError, "Cannot access #{uri}" end |