2019-05-03 06:17:27 +08:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2017-07-22 03:29:04 +08:00
|
|
|
module RetrieveTitle
|
2018-01-29 12:36:52 +08:00
|
|
|
CRAWL_TIMEOUT = 1
|
2017-07-22 03:29:04 +08:00
|
|
|
|
|
|
|
def self.crawl(url)
|
2018-01-29 12:36:52 +08:00
|
|
|
fetch_title(url)
|
2017-07-22 03:29:04 +08:00
|
|
|
rescue Exception
|
|
|
|
# If there was a connection error, do nothing
|
|
|
|
end
|
|
|
|
|
2021-01-05 03:32:08 +08:00
|
|
|
def self.extract_title(html, encoding = nil)
|
2017-07-22 03:29:04 +08:00
|
|
|
title = nil
|
2021-09-03 15:45:58 +08:00
|
|
|
if html =~ /<title>/ && html !~ /<\/title>/
|
|
|
|
return nil
|
|
|
|
end
|
2021-01-05 03:32:08 +08:00
|
|
|
if doc = Nokogiri::HTML5(html, nil, encoding)
|
2017-07-22 03:29:04 +08:00
|
|
|
|
2017-08-03 02:27:21 +08:00
|
|
|
title = doc.at('title')&.inner_text
|
|
|
|
|
2017-09-28 21:29:50 +08:00
|
|
|
# A horrible hack - YouTube uses `document.title` to populate the title
|
|
|
|
# for some reason. For any other site than YouTube this wouldn't be worth it.
|
|
|
|
if title == "YouTube" && html =~ /document\.title *= *"(.*)";/
|
|
|
|
title = Regexp.last_match[1].sub(/ - YouTube$/, '')
|
|
|
|
end
|
|
|
|
|
2017-08-03 02:27:21 +08:00
|
|
|
if !title && node = doc.at('meta[property="og:title"]')
|
2017-07-22 03:29:04 +08:00
|
|
|
title = node['content']
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
if title.present?
|
|
|
|
title.gsub!(/\n/, ' ')
|
|
|
|
title.gsub!(/ +/, ' ')
|
|
|
|
title.strip!
|
|
|
|
return title
|
|
|
|
end
|
|
|
|
nil
|
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def self.max_chunk_size(uri)
|
2017-09-28 21:29:50 +08:00
|
|
|
|
|
|
|
# Amazon and YouTube leave the title until very late. Exceptions are bad
|
|
|
|
# but these are large sites.
|
2018-01-29 12:36:52 +08:00
|
|
|
return 500 if uri.host =~ /amazon\.(com|ca|co\.uk|es|fr|de|it|com\.au|com\.br|cn|in|co\.jp|com\.mx)$/
|
2017-09-28 21:29:50 +08:00
|
|
|
return 300 if uri.host =~ /youtube\.com$/ || uri.host =~ /youtu.be/
|
2017-07-22 03:29:04 +08:00
|
|
|
|
2021-09-03 15:45:58 +08:00
|
|
|
# default is 20k
|
|
|
|
20
|
2017-07-22 03:29:04 +08:00
|
|
|
end
|
2018-01-29 12:36:52 +08:00
|
|
|
|
|
|
|
# Fetch the beginning of a HTML document at a url
|
|
|
|
def self.fetch_title(url)
|
|
|
|
fd = FinalDestination.new(url, timeout: CRAWL_TIMEOUT)
|
|
|
|
|
|
|
|
current = nil
|
2017-07-22 03:29:04 +08:00
|
|
|
title = nil
|
2021-01-05 03:32:08 +08:00
|
|
|
encoding = nil
|
2018-01-29 12:36:52 +08:00
|
|
|
|
|
|
|
fd.get do |_response, chunk, uri|
|
2022-01-31 15:35:12 +08:00
|
|
|
if (uri.present? && Onebox::DomainChecker.is_blocked?(uri.hostname))
|
2022-01-20 14:12:34 +08:00
|
|
|
throw :done
|
|
|
|
end
|
|
|
|
|
2021-06-24 22:23:39 +08:00
|
|
|
unless Net::HTTPRedirection === _response
|
|
|
|
if current
|
|
|
|
current << chunk
|
|
|
|
else
|
|
|
|
current = chunk
|
|
|
|
end
|
2018-01-29 12:36:52 +08:00
|
|
|
|
2021-06-24 22:23:39 +08:00
|
|
|
if !encoding && content_type = _response['content-type']&.strip&.downcase
|
|
|
|
if content_type =~ /charset="?([a-z0-9_-]+)"?/
|
|
|
|
encoding = Regexp.last_match(1)
|
|
|
|
if !Encoding.list.map(&:name).map(&:downcase).include?(encoding)
|
|
|
|
encoding = nil
|
|
|
|
end
|
2021-01-05 03:32:08 +08:00
|
|
|
end
|
|
|
|
end
|
2018-06-07 13:28:18 +08:00
|
|
|
|
2021-06-24 22:23:39 +08:00
|
|
|
max_size = max_chunk_size(uri) * 1024
|
|
|
|
title = extract_title(current, encoding)
|
|
|
|
throw :done if title || max_size < current.length
|
|
|
|
end
|
2017-07-22 03:29:04 +08:00
|
|
|
end
|
2019-11-15 04:10:51 +08:00
|
|
|
title
|
2018-06-07 13:28:18 +08:00
|
|
|
end
|
2017-07-22 03:29:04 +08:00
|
|
|
end
|