From 7b562d2f46c60df5323ac06731cf341d95d85027 Mon Sep 17 00:00:00 2001
From: Sam <sam.saffron@gmail.com>
Date: Tue, 16 Jan 2018 15:41:13 +1100
Subject: [PATCH] FEATURE: much improved and simplified crawler detection

- phase one does it match 'trident|webkit|gecko|chrome|safari|msie|opera'
    yes- well it is possibly a browser

- phase two does it match 'rss|bot|spider|crawler|facebook|archive|wayback|ping|monitor'
    probably a crawler then

Based off: https://gist.github.com/SamSaffron/6cfad7ea3e6df321ffb7a84f93720a53
---
 config/site_settings.yml                  |  6 +++++-
 lib/crawler_detection.rb                  | 18 ++++++++++++------
 spec/components/crawler_detection_spec.rb | 12 +++++++++---
 3 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/config/site_settings.yml b/config/site_settings.yml
index 00d579e71c0..b23589aa427 100644
--- a/config/site_settings.yml
+++ b/config/site_settings.yml
@@ -939,9 +939,13 @@ security:
   enable_escaped_fragments: true
   allow_index_in_robots_txt: true
   allow_moderators_to_create_categories: false
+  non_crawler_user_agents:
+    hidden: true
+    default: 'trident|webkit|gecko|chrome|safari|msie|opera'
+    type: list
   crawler_user_agents:
     hidden: true
-    default: 'Googlebot|Mediapartners|AdsBot|curl|HTTrack|Twitterbot|facebookexternalhit|bingbot|Baiduspider|ia_archiver|archive.org_bot|Wayback Save Page|360Spider|Swiftbot|YandexBot'
+    default: 'rss|bot|spider|crawler|facebook|archive|wayback|ping|monitor'
     type: list
   cors_origins:
     default: ''
diff --git a/lib/crawler_detection.rb b/lib/crawler_detection.rb
index 5d222ecf7bb..15ff75d384b 100644
--- a/lib/crawler_detection.rb
+++ b/lib/crawler_detection.rb
@@ -1,17 +1,23 @@
 module CrawlerDetection
 
-  # added 'ia_archiver' based on https://meta.discourse.org/t/unable-to-archive-discourse-pages-with-the-internet-archive/21232
-  # added 'Wayback Save Page' based on https://meta.discourse.org/t/unable-to-archive-discourse-with-the-internet-archive-save-page-now-button/22875
-  # added 'Swiftbot' based on https://meta.discourse.org/t/how-to-add-html-markup-or-meta-tags-for-external-search-engine/28220
   def self.to_matcher(string)
     escaped = string.split('|').map { |agent| Regexp.escape(agent) }.join('|')
-    Regexp.new(escaped)
+    Regexp.new(escaped, Regexp::IGNORECASE)
   end
 
   def self.crawler?(user_agent)
     # this is done to avoid regenerating regexes
+    @non_crawler_matchers ||= {}
     @matchers ||= {}
-    matcher = (@matchers[SiteSetting.crawler_user_agents] ||= to_matcher(SiteSetting.crawler_user_agents))
-    matcher.match?(user_agent)
+
+    possibly_real = (@non_crawler_matchers[SiteSetting.non_crawler_user_agents] ||= to_matcher(SiteSetting.non_crawler_user_agents))
+
+    if user_agent.match?(possibly_real)
+      known_bots = (@matchers[SiteSetting.crawler_user_agents] ||= to_matcher(SiteSetting.crawler_user_agents))
+      user_agent.match?(known_bots)
+    else
+      true
+    end
+
   end
 end
diff --git a/spec/components/crawler_detection_spec.rb b/spec/components/crawler_detection_spec.rb
index 6443d84a529..86b53450427 100644
--- a/spec/components/crawler_detection_spec.rb
+++ b/spec/components/crawler_detection_spec.rb
@@ -6,9 +6,9 @@ describe CrawlerDetection do
 
     it "can be amended via site settings" do
       SiteSetting.crawler_user_agents = 'Mooble|Kaboodle+*'
-      expect(CrawlerDetection.crawler?("Mozilla/5.0 (compatible; Kaboodle+*/2.1; +http://www.google.com/bot.html)")).to eq(true)
-      expect(CrawlerDetection.crawler?("Mozilla/5.0 (compatible; Mooble+*/2.1; +http://www.google.com/bot.html)")).to eq(true)
-      expect(CrawlerDetection.crawler?("Mozilla/5.0 (compatible; Gooble+*/2.1; +http://www.google.com/bot.html)")).to eq(false)
+      expect(CrawlerDetection.crawler?("Mozilla/5.0 Safari (compatible; Kaboodle+*/2.1; +http://www.google.com/bot.html)")).to eq(true)
+      expect(CrawlerDetection.crawler?("Mozilla/5.0 Safari (compatible; Mooble+*/2.1; +http://www.google.com/bot.html)")).to eq(true)
+      expect(CrawlerDetection.crawler?("Mozilla/5.0 Safari (compatible; Gooble+*/2.1; +http://www.google.com/bot.html)")).to eq(false)
     end
 
     it "returns true for crawler user agents" do
@@ -37,6 +37,12 @@ describe CrawlerDetection do
       expect(described_class.crawler?("Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25")).to eq(false)
       expect(described_class.crawler?("Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0")).to eq(false)
       expect(described_class.crawler?("Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")).to eq(false)
+
+      expect(described_class.crawler?("DiscourseAPI Ruby Gem 0.19.0")).to eq(true)
+      expect(described_class.crawler?("Pingdom.com_bot_version_1.4_(http://www.pingdom.com/)")).to eq(true)
+      expect(described_class.crawler?("LogicMonitor SiteMonitor/1.0")).to eq(true)
+      expect(described_class.crawler?("Java/1.8.0_151")).to eq(true)
+      expect(described_class.crawler?("Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)")).to eq(true)
     end
 
   end