mirror of
https://github.com/discourse/discourse.git
synced 2024-12-01 01:33:43 +08:00
d84256a876
This strips pages out of indexes that should not exist see: https://meta.discourse.org/t/pages-listed-in-the-robots-txt-are-crawled-and-indexed-by-google/100309/11?u=sam
20 lines
487 B
Plaintext
20 lines
487 B
Plaintext
<%= @robots_info[:header] %>
|
|
<% if Discourse.base_uri.present? %>
|
|
# This robots.txt file is not used. Please append the content below in the robots.txt file located at the root
|
|
<% end %>
|
|
#
|
|
<% @robots_info[:agents].each do |agent| %>
|
|
User-agent: <%= agent[:name] %>
|
|
<%- if agent[:delay] -%>
|
|
Crawl-delay: <%= agent[:delay] %>
|
|
<%- end -%>
|
|
<% agent[:disallow].each do |path| %>
|
|
Disallow: <%= path %>
|
|
Noindex: <%= path %>
|
|
<% end %>
|
|
|
|
|
|
<% end %>
|
|
|
|
<%= server_plugin_outlet "robots_txt_index" %>
|