2019-04-30 08:27:42 +08:00
|
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
2022-07-28 10:27:38 +08:00
|
|
|
|
RSpec.describe RetrieveTitle do
|
2022-07-28 00:14:14 +08:00
|
|
|
|
describe ".extract_title" do
|
2017-07-22 03:29:04 +08:00
|
|
|
|
it "will extract the value from the title tag" do
|
|
|
|
|
title = RetrieveTitle.extract_title("<html><title>My Cool Title</title></html>")
|
|
|
|
|
|
|
|
|
|
expect(title).to eq("My Cool Title")
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
it "will strip whitespace" do
|
|
|
|
|
title = RetrieveTitle.extract_title("<html><title> Another Title\n\n </title></html>")
|
|
|
|
|
|
|
|
|
|
expect(title).to eq("Another Title")
|
|
|
|
|
end
|
|
|
|
|
|
2017-08-03 02:27:21 +08:00
|
|
|
|
it "will pick og:title if title is missing" do
|
|
|
|
|
title = RetrieveTitle.extract_title(<<~HTML)
|
|
|
|
|
<html>
|
|
|
|
|
<meta property="og:title" content="Good Title"
|
|
|
|
|
</html>
|
|
|
|
|
HTML
|
|
|
|
|
|
|
|
|
|
expect(title).to eq("Good Title")
|
|
|
|
|
end
|
|
|
|
|
|
2021-07-12 22:35:57 +08:00
|
|
|
|
it "will prefer the title over the opengraph tag" do
|
2017-07-22 03:29:04 +08:00
|
|
|
|
title = RetrieveTitle.extract_title(<<~HTML)
|
|
|
|
|
<html>
|
2017-08-03 02:27:21 +08:00
|
|
|
|
<title>Good Title</title>
|
|
|
|
|
<meta property="og:title" content="Bad Title"
|
2017-07-22 03:29:04 +08:00
|
|
|
|
</html>
|
|
|
|
|
HTML
|
|
|
|
|
|
|
|
|
|
expect(title).to eq("Good Title")
|
|
|
|
|
end
|
|
|
|
|
|
2017-09-28 21:29:50 +08:00
|
|
|
|
it "will parse a YouTube url from javascript" do
|
|
|
|
|
title = RetrieveTitle.extract_title(<<~HTML)
|
|
|
|
|
<html>
|
|
|
|
|
<title>YouTube</title>
|
|
|
|
|
<script>document.title = "Video Title";</script>
|
|
|
|
|
</html>
|
|
|
|
|
HTML
|
|
|
|
|
expect(title).to eq("Video Title")
|
|
|
|
|
end
|
2022-08-23 13:03:57 +08:00
|
|
|
|
|
|
|
|
|
it "will not exception out for invalid html" do
|
|
|
|
|
attributes = (1..1000).map { |x| " attr#{x}='1' " }.join
|
|
|
|
|
title = RetrieveTitle.extract_title <<~HTML
|
|
|
|
|
<html>
|
|
|
|
|
<title>test</title>
|
|
|
|
|
<body #{attributes}>
|
|
|
|
|
</html>
|
|
|
|
|
HTML
|
|
|
|
|
|
|
|
|
|
expect(title).to eq(nil)
|
|
|
|
|
end
|
2018-01-29 12:36:52 +08:00
|
|
|
|
end
|
|
|
|
|
|
2022-07-28 00:14:14 +08:00
|
|
|
|
describe ".crawl" do
|
2018-01-29 12:36:52 +08:00
|
|
|
|
it "can properly extract a title from a url" do
|
|
|
|
|
stub_request(:get, "https://brelksdjflaskfj.com/amazing").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><title>very amazing</title>",
|
|
|
|
|
)
|
2017-09-28 21:29:50 +08:00
|
|
|
|
|
2018-01-29 12:36:52 +08:00
|
|
|
|
# we still resolve the IP address for every host
|
|
|
|
|
IPSocket.stubs(:getaddress).returns("100.2.3.4")
|
|
|
|
|
|
|
|
|
|
expect(RetrieveTitle.crawl("https://brelksdjflaskfj.com/amazing")).to eq("very amazing")
|
|
|
|
|
end
|
2017-07-22 03:29:04 +08:00
|
|
|
|
|
2021-01-05 03:32:08 +08:00
|
|
|
|
it "detects and uses encoding from Content-Type header" do
|
|
|
|
|
stub_request(:get, "https://brelksdjflaskfj.com/amazing").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><title>fancy apostrophes ’’’</title>".dup.force_encoding("ASCII-8BIT"),
|
|
|
|
|
headers: {
|
|
|
|
|
"Content-Type" => 'text/html; charset="utf-8"',
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
IPSocket.stubs(:getaddress).returns("100.2.3.4")
|
|
|
|
|
expect(RetrieveTitle.crawl("https://brelksdjflaskfj.com/amazing")).to eq(
|
|
|
|
|
"fancy apostrophes ’’’",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "https://brelksdjflaskfj.com/amazing").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body:
|
|
|
|
|
"<html><title>japanese こんにちは website</title>".encode("EUC-JP").force_encoding(
|
|
|
|
|
"ASCII-8BIT",
|
|
|
|
|
),
|
|
|
|
|
headers: {
|
|
|
|
|
"Content-Type" => "text/html;charset=euc-jp",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
IPSocket.stubs(:getaddress).returns("100.2.3.4")
|
|
|
|
|
expect(RetrieveTitle.crawl("https://brelksdjflaskfj.com/amazing")).to eq(
|
|
|
|
|
"japanese こんにちは website",
|
|
|
|
|
)
|
|
|
|
|
end
|
2021-06-24 22:23:39 +08:00
|
|
|
|
|
|
|
|
|
it "can follow redirect" do
|
|
|
|
|
stub_request(:get, "http://foobar.com/amazing").to_return(
|
|
|
|
|
status: 301,
|
|
|
|
|
body: "",
|
|
|
|
|
headers: {
|
|
|
|
|
"location" => "https://wikipedia.com/amazing",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "https://wikipedia.com/amazing").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><title>very amazing</title>",
|
|
|
|
|
headers: {
|
2023-01-09 19:18:21 +08:00
|
|
|
|
},
|
2021-06-24 22:23:39 +08:00
|
|
|
|
)
|
2022-01-20 14:12:34 +08:00
|
|
|
|
|
|
|
|
|
IPSocket.stubs(:getaddress).returns("100.2.3.4")
|
|
|
|
|
expect(RetrieveTitle.crawl("http://foobar.com/amazing")).to eq("very amazing")
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
it "returns empty title if redirect uri is in blacklist" do
|
|
|
|
|
SiteSetting.blocked_onebox_domains = "wikipedia.com"
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "http://foobar.com/amazing").to_return(
|
|
|
|
|
status: 301,
|
|
|
|
|
body: "",
|
|
|
|
|
headers: {
|
|
|
|
|
"location" => "https://wikipedia.com/amazing",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "https://wikipedia.com/amazing").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><title>very amazing</title>",
|
|
|
|
|
headers: {
|
2023-01-09 19:18:21 +08:00
|
|
|
|
},
|
2022-01-20 14:12:34 +08:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
expect(RetrieveTitle.crawl("http://foobar.com/amazing")).to eq(nil)
|
|
|
|
|
end
|
|
|
|
|
|
2022-03-11 14:18:12 +08:00
|
|
|
|
it "doesn't return title if a blocked domain is encountered anywhere in the redirect chain" do
|
2022-01-20 14:12:34 +08:00
|
|
|
|
SiteSetting.blocked_onebox_domains = "wikipedia.com"
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "http://foobar.com/amazing").to_return(
|
|
|
|
|
status: 301,
|
|
|
|
|
body: "",
|
|
|
|
|
headers: {
|
|
|
|
|
"location" => "https://wikipedia.com/amazing",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "https://wikipedia.com/amazing").to_return(
|
|
|
|
|
status: 301,
|
|
|
|
|
body: "",
|
|
|
|
|
headers: {
|
|
|
|
|
"location" => "https://cat.com/meow",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
stub_request(:get, "https://cat.com/meow").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><title>very amazing</title>",
|
|
|
|
|
headers: {
|
2023-01-09 19:18:21 +08:00
|
|
|
|
},
|
2022-01-20 14:12:34 +08:00
|
|
|
|
)
|
2021-06-24 22:23:39 +08:00
|
|
|
|
|
2022-03-11 14:18:12 +08:00
|
|
|
|
expect(RetrieveTitle.crawl("http://foobar.com/amazing")).to be_blank
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
it "doesn't return title if the Discourse-No-Onebox header == 1" do
|
|
|
|
|
stub_request(:get, "https://cat.com/meow/no-onebox").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><title>discourse stay away</title>",
|
|
|
|
|
headers: {
|
|
|
|
|
"Discourse-No-Onebox" => "1",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
expect(RetrieveTitle.crawl("https://cat.com/meow/no-onebox")).to be_blank
|
2021-06-24 22:23:39 +08:00
|
|
|
|
end
|
2022-03-23 02:13:27 +08:00
|
|
|
|
|
|
|
|
|
it "doesn't return a title if response is unsuccessful" do
|
|
|
|
|
stub_request(:get, "https://example.com").to_return(status: 404, body: "")
|
|
|
|
|
|
|
|
|
|
expect(RetrieveTitle.crawl("https://example.com")).to eq(nil)
|
|
|
|
|
end
|
2022-06-10 03:30:22 +08:00
|
|
|
|
|
|
|
|
|
it "it raises errors other than Net::ReadTimeout, e.g. NoMethodError" do
|
|
|
|
|
stub_request(:get, "https://example.com").to_raise(NoMethodError)
|
|
|
|
|
|
|
|
|
|
expect { RetrieveTitle.crawl("https://example.com") }.to raise_error(NoMethodError)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
it "it ignores Net::ReadTimeout errors" do
|
|
|
|
|
stub_request(:get, "https://example.com").to_raise(Net::ReadTimeout)
|
|
|
|
|
|
2022-12-28 10:30:20 +08:00
|
|
|
|
expect(RetrieveTitle.crawl("https://example.com")).to eq(nil)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
it "ignores SSRF lookup errors" do
|
2023-06-21 22:00:19 +08:00
|
|
|
|
described_class.stubs(:fetch_title).raises(FinalDestination::SSRFDetector::LookupFailedError)
|
2022-12-28 10:30:20 +08:00
|
|
|
|
|
|
|
|
|
expect(RetrieveTitle.crawl("https://example.com")).to eq(nil)
|
2022-06-10 03:30:22 +08:00
|
|
|
|
end
|
2023-12-01 15:03:06 +08:00
|
|
|
|
|
|
|
|
|
it "ignores URL encoding errors" do
|
|
|
|
|
described_class.stubs(:fetch_title).raises(FinalDestination::UrlEncodingError)
|
|
|
|
|
|
|
|
|
|
expect(RetrieveTitle.crawl("https://example.com")).to eq(nil)
|
|
|
|
|
end
|
2021-01-05 03:32:08 +08:00
|
|
|
|
end
|
2021-09-03 15:45:58 +08:00
|
|
|
|
|
2022-07-28 00:14:14 +08:00
|
|
|
|
describe ".fetch_title" do
|
2021-09-03 15:45:58 +08:00
|
|
|
|
it "does not parse broken title tag" do
|
|
|
|
|
# webmock does not do chunks
|
|
|
|
|
stub_request(:get, "https://en.wikipedia.org/wiki/Internet").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><head><title>Internet - Wikipedia</ti",
|
|
|
|
|
headers: {
|
2023-01-09 19:18:21 +08:00
|
|
|
|
},
|
2021-09-03 15:45:58 +08:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
title = RetrieveTitle.fetch_title("https://en.wikipedia.org/wiki/Internet")
|
|
|
|
|
expect(title).to eq(nil)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
it "can parse correct title tag" do
|
|
|
|
|
# webmock does not do chunks
|
|
|
|
|
stub_request(:get, "https://en.wikipedia.org/wiki/Internet").to_return(
|
|
|
|
|
status: 200,
|
|
|
|
|
body: "<html><head><title>Internet - Wikipedia</title>",
|
|
|
|
|
headers: {
|
2023-01-09 19:18:21 +08:00
|
|
|
|
},
|
2021-09-03 15:45:58 +08:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
title = RetrieveTitle.fetch_title("https://en.wikipedia.org/wiki/Internet")
|
|
|
|
|
expect(title).to eq("Internet - Wikipedia")
|
|
|
|
|
end
|
|
|
|
|
end
|
2017-07-22 03:29:04 +08:00
|
|
|
|
end
|