2018-11-22 15:46:39 +08:00
|
|
|
#!/usr/bin/env ruby
|
2018-11-23 11:43:39 +08:00
|
|
|
# frozen_string_literal: true
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-07-05 09:37:50 +08:00
|
|
|
require 'optparse'
|
|
|
|
|
2022-05-26 11:39:24 +08:00
|
|
|
# cache_critical_dns is intended to be used for performing DNS lookups against
|
|
|
|
# the services critical for Discourse to run - PostgreSQL and Redis. The
|
|
|
|
# cache mechanism is storing the resolved host addresses in /etc/hosts. This can
|
|
|
|
# protect against DNS lookup failures _after_ the resolved addresses have been
|
|
|
|
# written to /etc/hosts at least once. Example lookup failures may be NXDOMAIN
|
|
|
|
# or SERVFAIL responses from DNS requests.
|
|
|
|
#
|
|
|
|
# Before a resolved address is cached, a protocol-aware healthcheck is
|
|
|
|
# performed against the host with the authentication details found for that
|
|
|
|
# service in the process environment. Any hosts that fail the healthcheck will
|
|
|
|
# never be cached.
|
|
|
|
#
|
2023-01-19 12:12:25 +08:00
|
|
|
# The list of environment variables that cache_critical_dns will read for
|
|
|
|
# critical service hostnames can be extended at process execution time by
|
|
|
|
# specifying environment variable names within the
|
|
|
|
# DISCOURSE_DNS_CACHE_ADDITIONAL_SERVICE_NAMES environment variable. This is a
|
|
|
|
# comma-delimited string of extra environment variables to be added to the list
|
|
|
|
# defined in the static CRITICAL_HOST_ENV_VARS hash.
|
|
|
|
#
|
|
|
|
# DISCOURSE_DNS_CACHE_ADDITIONAL_SERVICE_NAMES serves as a kind of lookup table
|
|
|
|
# for extra services for caching. Any environment variable names within this
|
|
|
|
# list are treated with the same rules as the DISCOURSE_DB_HOST (and co.)
|
|
|
|
# variables, as described below.
|
|
|
|
#
|
2022-05-26 11:39:24 +08:00
|
|
|
# This is as far as you need to read if you are using CNAME or A records for
|
|
|
|
# your services.
|
|
|
|
#
|
|
|
|
# The extended behaviour of cache_critical_dns is to add SRV RR lookup support
|
|
|
|
# for DNS Service Discovery (see http://www.dns-sd.org/). For any of the critical
|
|
|
|
# service environment variables (see CRITICAL_HOST_ENV_VARS), if a corresponding
|
|
|
|
# SRV environment variable is found (suffixed with _SRV), cache_critical_dns
|
|
|
|
# will assume that SRV RRs should exist and will begin to lookup SRV targets
|
|
|
|
# for resolving the host addresses for caching, and ignore the original service
|
|
|
|
# name variable. Healthy target addresses are cached against the original service
|
|
|
|
# environment variable, as the Discourse application expects. For example a
|
|
|
|
# healthy target found from the SRV lookup for DISCOURSE_DB_HOST_SRV will be
|
|
|
|
# cached against the name specified by the DISCOURSE_DB_HOST.
|
|
|
|
#
|
|
|
|
# Example environment variables for SRV lookups are:
|
|
|
|
# DISCOURSE_DB_HOST_SRV
|
|
|
|
# DISCOURSE_DB_REPLICA_HOST_SRV
|
|
|
|
# DISCOURSE_REDIS_HOST_SRV
|
|
|
|
# DISCOURSE_REDIS_REPLICA_HOST_SRV
|
|
|
|
#
|
|
|
|
# cache_critical_dns will keep an internal record of all names resolved within
|
|
|
|
# the last 30 minutes. This internal cache is to give a priority order to new
|
|
|
|
# SRV targets that have appeared during the program runtime (SRV records
|
|
|
|
# contain zero or more targets, which may appear or disappear at any time).
|
|
|
|
# If a target has not been seen for more than 30 minutes it will be evicted from
|
|
|
|
# the internal cache. The internal cache of healthy targets is a fallback for
|
|
|
|
# when errors occur during DNS lookups.
|
|
|
|
#
|
|
|
|
# Targets that are resolved and found healthy usually find themselves in the host
|
|
|
|
# cache, depending on if they are the newest or not. Targets that are resolved
|
|
|
|
# but never found healthy will never be cached or even stored in the internal
|
|
|
|
# cache. Targets that _begin_ healthy and are cached, and _become_ unhealthy
|
|
|
|
# will only be removed from the host cache if another newer target is resolved
|
|
|
|
# and found to be healthy. This is because we never write a resolved target to
|
|
|
|
# the hosts cache unless it is both the newest and healthy. We assume that
|
|
|
|
# cached hosts are healthy until they are superseded by a newer healthy target.
|
|
|
|
#
|
|
|
|
# An SRV RR specifies a priority value for each of the SRV targets that
|
|
|
|
# are present, ranging from 0 - 65535. When caching SRV records we may want to
|
|
|
|
# filter out any targets above or below a particular threshold. The LE (less
|
|
|
|
# than or equal to) and GE (greater than or equal to) environment variables
|
|
|
|
# (suffixed with _PRIORITY_LE or PRIORITY_GE) for a corresponding SRV variable
|
|
|
|
# will ignore targets above or below the threshold, respectively.
|
|
|
|
#
|
|
|
|
# This mechanism may be used for SRV RRs that share a single name and utilise
|
|
|
|
# the priority value for signalling to cache_critical_dns which targets are
|
|
|
|
# relevant to a given name. Any target found outside of the threshold is ignored.
|
|
|
|
# The host and internal caching behavior are otherwise the same.
|
|
|
|
#
|
|
|
|
# Example environment variables for SRV priority thresholds are:
|
|
|
|
# DISCOURSE_DB_HOST_SRV_PRIORITY_LE
|
|
|
|
# DISCOURSE_DB_REPLICA_HOST_SRV_PRIORITY_GE
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
# Specifying this env var ensures ruby can load gems installed via the Discourse
|
|
|
|
# project Gemfile (e.g. pg, redis).
|
|
|
|
ENV['BUNDLE_GEMFILE'] ||= '/var/www/discourse/Gemfile'
|
|
|
|
require 'bundler/setup'
|
|
|
|
|
2019-12-28 01:39:08 +08:00
|
|
|
require 'ipaddr'
|
2022-04-06 07:44:34 +08:00
|
|
|
require 'pg'
|
|
|
|
require 'redis'
|
2018-11-22 15:46:39 +08:00
|
|
|
require 'resolv'
|
|
|
|
require 'socket'
|
2022-04-06 07:44:34 +08:00
|
|
|
require 'time'
|
2018-11-22 15:46:39 +08:00
|
|
|
|
|
|
|
CRITICAL_HOST_ENV_VARS = %w{
|
|
|
|
DISCOURSE_DB_HOST
|
2019-04-30 08:42:51 +08:00
|
|
|
DISCOURSE_DB_REPLICA_HOST
|
2018-11-22 15:46:39 +08:00
|
|
|
DISCOURSE_REDIS_HOST
|
|
|
|
DISCOURSE_REDIS_SLAVE_HOST
|
2022-02-09 22:41:26 +08:00
|
|
|
DISCOURSE_REDIS_REPLICA_HOST
|
2022-10-06 14:11:24 +08:00
|
|
|
DISCOURSE_MESSAGE_BUS_REDIS_HOST
|
|
|
|
DISCOURSE_MESSAGE_BUS_REDIS_REPLICA_HOST
|
2023-01-19 12:12:25 +08:00
|
|
|
}.union(
|
|
|
|
ENV.fetch('DISCOURSE_DNS_CACHE_ADDITIONAL_SERVICE_NAMES', '')
|
|
|
|
.split(',')
|
|
|
|
.map(&:strip)
|
|
|
|
)
|
2022-05-18 04:09:32 +08:00
|
|
|
|
|
|
|
DEFAULT_DB_NAME = "discourse"
|
2023-03-09 12:03:51 +08:00
|
|
|
DEFAULT_DB_PORT = 5432
|
2022-10-06 14:11:24 +08:00
|
|
|
DEFAULT_REDIS_PORT = 6379
|
2022-05-18 04:09:32 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
HOST_RESOLVER_CACHE = {}
|
|
|
|
HOST_HEALTHY_CACHE = {}
|
|
|
|
HOSTS_PATH = ENV['DISCOURSE_DNS_CACHE_HOSTS_FILE'] || "/etc/hosts"
|
2022-05-09 09:34:04 +08:00
|
|
|
|
|
|
|
PrioFilter = Struct.new(:min, :max) do
|
|
|
|
# min and max must be integers and relate to the minimum or maximum accepted
|
|
|
|
# priority of an SRV RR target.
|
|
|
|
# The return value from within_threshold? indicates if the priority is less
|
|
|
|
# than or equal to the upper threshold, or greater than or equal to the
|
|
|
|
# lower threshold.
|
|
|
|
def within_threshold?(p)
|
|
|
|
p >= min && p <= max
|
|
|
|
end
|
|
|
|
end
|
|
|
|
SRV_PRIORITY_THRESHOLD_MIN = 0
|
|
|
|
SRV_PRIORITY_THRESHOLD_MAX = 65535
|
|
|
|
SRV_PRIORITY_FILTERS = Hash.new(
|
|
|
|
PrioFilter.new(SRV_PRIORITY_THRESHOLD_MIN, SRV_PRIORITY_THRESHOLD_MAX))
|
|
|
|
|
2022-04-27 12:42:35 +08:00
|
|
|
REFRESH_SECONDS = 30
|
2022-04-06 07:44:34 +08:00
|
|
|
|
|
|
|
module DNSClient
|
|
|
|
def dns_client_with_timeout
|
|
|
|
Resolv::DNS.open do |dns_client|
|
|
|
|
dns_client.timeouts = 2
|
|
|
|
yield dns_client
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class Name
|
|
|
|
include DNSClient
|
|
|
|
|
|
|
|
def initialize(hostname)
|
|
|
|
@name = hostname
|
|
|
|
end
|
|
|
|
|
|
|
|
def resolve
|
|
|
|
dns_client_with_timeout do |dns_client|
|
|
|
|
[].tap do |addresses|
|
|
|
|
addresses.concat(dns_client.getresources(@name, Resolv::DNS::Resource::IN::A).map(&:address))
|
|
|
|
addresses.concat(dns_client.getresources(@name, Resolv::DNS::Resource::IN::AAAA).map(&:address))
|
|
|
|
end.map(&:to_s)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class SRVName
|
|
|
|
include DNSClient
|
|
|
|
|
2022-05-09 09:34:04 +08:00
|
|
|
def initialize(srv_hostname, prio_filter)
|
2022-04-06 07:44:34 +08:00
|
|
|
@name = srv_hostname
|
2022-05-09 09:34:04 +08:00
|
|
|
@prio_filter = prio_filter
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
def resolve
|
|
|
|
dns_client_with_timeout do |dns_client|
|
|
|
|
[].tap do |addresses|
|
|
|
|
targets = dns_client.getresources(@name, Resolv::DNS::Resource::IN::SRV)
|
2022-05-09 09:34:04 +08:00
|
|
|
targets.delete_if { |t| !@prio_filter.within_threshold?(t.priority) }
|
2022-04-06 07:44:34 +08:00
|
|
|
addresses.concat(targets.map { |t| Name.new(t.target.to_s).resolve }.flatten)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
CacheMeta = Struct.new(:first_seen, :last_seen)
|
|
|
|
|
2023-03-07 10:20:24 +08:00
|
|
|
class EmptyCacheError < StandardError; end
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
class ResolverCache
|
|
|
|
def initialize(name)
|
|
|
|
# instance of Name|SRVName
|
|
|
|
@name = name
|
|
|
|
|
|
|
|
# {IPv4/IPv6 address: CacheMeta}
|
|
|
|
@cached = {}
|
|
|
|
end
|
|
|
|
|
2023-03-07 10:20:24 +08:00
|
|
|
# Returns a list of resolved addresses ordered by first seen time. Most
|
|
|
|
# recently seen address is first.
|
|
|
|
# If an exception occurs during DNS resolution we return whatever addresses
|
|
|
|
# are cached.
|
|
|
|
# Addresses last seen more than 30 minutes ago are evicted from the cache.
|
|
|
|
# Raises EmptyCacheError if the cache is empty after DNS resolution and cache
|
|
|
|
# eviction is performed.
|
2022-04-06 07:44:34 +08:00
|
|
|
def resolve
|
2023-03-07 10:20:24 +08:00
|
|
|
begin
|
|
|
|
@name.resolve.each do |address|
|
|
|
|
if @cached[address]
|
|
|
|
@cached[address].last_seen = Time.now.utc
|
|
|
|
else
|
|
|
|
@cached[address] = CacheMeta.new(Time.now.utc, Time.now.utc)
|
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
2023-03-07 10:20:24 +08:00
|
|
|
rescue Resolv::ResolvError, Resolv::ResolvTimeout
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
2023-03-07 10:20:24 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
@cached = @cached.delete_if { |_, meta| Time.now.utc - 30 * 60 > meta.last_seen }
|
2023-03-07 10:20:24 +08:00
|
|
|
if @cached.empty?
|
|
|
|
raise EmptyCacheError, "DNS resolver found no usable addresses"
|
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
@cached.sort_by { |_, meta| meta.first_seen }.reverse.map(&:first)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class HealthyCache
|
|
|
|
def initialize(resolver_cache, check)
|
|
|
|
@resolver_cache = resolver_cache # instance of ResolverCache
|
|
|
|
@check = check # lambda function to perform for health checks
|
|
|
|
@cached = nil # a single IP address that was most recently found to be healthy
|
|
|
|
end
|
|
|
|
|
2023-03-07 10:20:24 +08:00
|
|
|
# Returns the first healthy server found in the list of resolved addresses.
|
|
|
|
# Returns the last known healthy server if all servers disappear from the
|
|
|
|
# DNS.
|
|
|
|
# Raises EmptyCacheError if no healthy servers have been cached.
|
2022-04-06 07:44:34 +08:00
|
|
|
def first_healthy
|
2023-03-07 10:20:24 +08:00
|
|
|
begin
|
|
|
|
addresses = @resolver_cache.resolve
|
|
|
|
rescue EmptyCacheError
|
|
|
|
return @cached if @cached
|
|
|
|
raise
|
|
|
|
end
|
|
|
|
|
|
|
|
if (address = addresses.lazy.select { |addr| @check.call(addr) }.first)
|
2022-04-06 07:44:34 +08:00
|
|
|
@cached = address
|
|
|
|
end
|
2023-03-07 10:20:24 +08:00
|
|
|
if @cached.nil?
|
|
|
|
raise EmptyCacheError, "no healthy servers found amongst #{addresses}"
|
|
|
|
end
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
@cached
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-10-06 14:11:24 +08:00
|
|
|
def redis_healthcheck(host:, password:, port: DEFAULT_REDIS_PORT)
|
2022-04-27 10:02:26 +08:00
|
|
|
client_opts = {
|
2022-04-06 07:44:34 +08:00
|
|
|
host: host,
|
|
|
|
password: password,
|
2022-10-06 14:11:24 +08:00
|
|
|
port: port,
|
2022-04-06 07:44:34 +08:00
|
|
|
timeout: 1,
|
2022-04-27 10:02:26 +08:00
|
|
|
}
|
|
|
|
if !nilempty(ENV['DISCOURSE_REDIS_USE_SSL']).nil? then
|
|
|
|
client_opts[:ssl] = true
|
|
|
|
client_opts[:ssl_params] = {
|
|
|
|
verify_mode: OpenSSL::SSL::VERIFY_NONE,
|
|
|
|
}
|
|
|
|
end
|
|
|
|
client = Redis.new(**client_opts)
|
2022-04-06 07:44:34 +08:00
|
|
|
response = client.ping
|
|
|
|
response == "PONG"
|
|
|
|
rescue
|
|
|
|
false
|
|
|
|
ensure
|
|
|
|
client.close if client
|
|
|
|
end
|
|
|
|
|
2023-03-09 12:03:51 +08:00
|
|
|
def postgres_healthcheck(host:, user:, password:, dbname:, port: DEFAULT_DB_PORT)
|
2022-05-23 11:41:15 +08:00
|
|
|
client = PG::Connection.new(
|
2022-04-06 07:44:34 +08:00
|
|
|
host: host,
|
|
|
|
user: user,
|
|
|
|
password: password,
|
|
|
|
dbname: dbname,
|
2023-03-09 12:03:51 +08:00
|
|
|
port: port,
|
2022-04-06 07:44:34 +08:00
|
|
|
connect_timeout: 2, # minimum
|
|
|
|
)
|
2022-05-23 11:41:15 +08:00
|
|
|
client.exec(';').none?
|
2022-04-06 07:44:34 +08:00
|
|
|
rescue
|
|
|
|
false
|
2022-05-23 11:41:15 +08:00
|
|
|
ensure
|
|
|
|
client.close if client
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
|
|
|
|
2023-01-19 12:12:25 +08:00
|
|
|
HEALTH_CHECKS = Hash.new(
|
|
|
|
# unknown keys (like services defined at runtime) are assumed to be healthy
|
|
|
|
lambda { |addr| true }
|
|
|
|
).merge!({
|
2022-04-06 07:44:34 +08:00
|
|
|
"DISCOURSE_DB_HOST": lambda { |addr|
|
|
|
|
postgres_healthcheck(
|
|
|
|
host: addr,
|
2022-05-19 01:01:18 +08:00
|
|
|
user: ENV["DISCOURSE_DB_USERNAME"] || DEFAULT_DB_NAME,
|
2022-05-18 04:09:32 +08:00
|
|
|
dbname: ENV["DISCOURSE_DB_NAME"] || DEFAULT_DB_NAME,
|
2023-03-09 12:03:51 +08:00
|
|
|
port: ENV["DISCOURSE_DB_PORT"] || DEFAULT_DB_PORT,
|
2022-05-18 04:09:32 +08:00
|
|
|
password: ENV["DISCOURSE_DB_PASSWORD"])},
|
2022-04-06 07:44:34 +08:00
|
|
|
"DISCOURSE_DB_REPLICA_HOST": lambda { |addr|
|
|
|
|
postgres_healthcheck(
|
|
|
|
host: addr,
|
2022-05-19 01:01:18 +08:00
|
|
|
user: ENV["DISCOURSE_DB_USERNAME"] || DEFAULT_DB_NAME,
|
2022-05-18 04:09:32 +08:00
|
|
|
dbname: ENV["DISCOURSE_DB_NAME"] || DEFAULT_DB_NAME,
|
2023-03-09 12:03:51 +08:00
|
|
|
port: ENV["DISCOURSE_DB_REPLICA_PORT"] || DEFAULT_DB_PORT,
|
2022-05-18 04:09:32 +08:00
|
|
|
password: ENV["DISCOURSE_DB_PASSWORD"])},
|
2022-04-06 07:44:34 +08:00
|
|
|
"DISCOURSE_REDIS_HOST": lambda { |addr|
|
|
|
|
redis_healthcheck(
|
|
|
|
host: addr,
|
|
|
|
password: ENV["DISCOURSE_REDIS_PASSWORD"])},
|
|
|
|
"DISCOURSE_REDIS_REPLICA_HOST": lambda { |addr|
|
|
|
|
redis_healthcheck(
|
|
|
|
host: addr,
|
|
|
|
password: ENV["DISCOURSE_REDIS_PASSWORD"])},
|
2022-10-06 14:11:24 +08:00
|
|
|
"DISCOURSE_MESSAGE_BUS_REDIS_HOST": lambda { |addr|
|
|
|
|
redis_healthcheck(
|
|
|
|
host: addr,
|
|
|
|
port: env_as_int("DISCOURSE_MESSAGE_BUS_REDIS_PORT", DEFAULT_REDIS_PORT),
|
|
|
|
password: ENV["DISCOURSE_MESSAGE_BUS_REDIS_PASSWORD"])},
|
|
|
|
"DISCOURSE_MESSAGE_BUS_REDIS_REPLICA_HOST": lambda { |addr|
|
|
|
|
redis_healthcheck(
|
|
|
|
host: addr,
|
|
|
|
port: env_as_int("DISCOURSE_MESSAGE_BUS_REDIS_REPLICA_PORT", DEFAULT_REDIS_PORT),
|
|
|
|
password: ENV["DISCOURSE_MESSAGE_BUS_REDIS_PASSWORD"])},
|
2023-01-19 12:12:25 +08:00
|
|
|
})
|
2018-11-22 15:46:39 +08:00
|
|
|
|
|
|
|
def log(msg)
|
2022-04-06 07:44:34 +08:00
|
|
|
STDERR.puts "#{Time.now.utc.iso8601}: #{msg}"
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
def error(msg)
|
|
|
|
log(msg)
|
|
|
|
end
|
|
|
|
|
|
|
|
def swap_address(hosts, name, ips)
|
|
|
|
new_file = []
|
|
|
|
|
|
|
|
hosts.split("\n").each do |line|
|
2018-11-23 11:43:39 +08:00
|
|
|
line.strip!
|
2018-11-22 15:46:39 +08:00
|
|
|
if line[0] != '#'
|
2018-11-23 11:48:02 +08:00
|
|
|
_, hostname = line.split(/\s+/)
|
2018-11-22 15:46:39 +08:00
|
|
|
next if hostname == name
|
|
|
|
end
|
|
|
|
new_file << line
|
2018-11-28 12:18:08 +08:00
|
|
|
new_file << "\n"
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
ips.each do |ip|
|
2022-04-06 07:44:34 +08:00
|
|
|
new_file << "#{ip} #{name} # AUTO GENERATED: #{Time.now.utc.iso8601}\n"
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
|
|
|
|
2018-11-23 12:43:27 +08:00
|
|
|
new_file.join
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
def send_counter(name, description, labels, value)
|
|
|
|
host = "localhost"
|
2022-10-06 14:11:24 +08:00
|
|
|
port = env_as_int("DISCOURSE_PROMETHEUS_COLLECTOR_PORT", 9405)
|
2018-11-22 15:46:39 +08:00
|
|
|
|
|
|
|
if labels
|
|
|
|
labels = labels.map do |k, v|
|
|
|
|
"\"#{k}\": \"#{v}\""
|
|
|
|
end.join(",")
|
|
|
|
else
|
|
|
|
labels = ""
|
|
|
|
end
|
|
|
|
|
|
|
|
json = <<~JSON
|
|
|
|
{
|
|
|
|
"_type": "Custom",
|
|
|
|
"type": "Counter",
|
|
|
|
"name": "#{name}",
|
|
|
|
"description": "#{description}",
|
|
|
|
"labels": { #{labels} },
|
|
|
|
"value": #{value}
|
|
|
|
}
|
|
|
|
JSON
|
|
|
|
|
|
|
|
payload = +"POST /send-metrics HTTP/1.1\r\n"
|
|
|
|
payload << "Host: #{host}\r\n"
|
|
|
|
payload << "Connection: Close\r\n"
|
|
|
|
payload << "Content-Type: application/json\r\n"
|
|
|
|
payload << "Content-Length: #{json.bytesize}\r\n"
|
|
|
|
payload << "\r\n"
|
|
|
|
payload << json
|
|
|
|
|
|
|
|
socket = TCPSocket.new host, port
|
|
|
|
socket.write payload
|
|
|
|
socket.flush
|
|
|
|
result = socket.read
|
|
|
|
first_line = result.split("\n")[0]
|
|
|
|
if first_line.strip != "HTTP/1.1 200 OK"
|
|
|
|
error("Failed to report metric #{result}")
|
|
|
|
end
|
|
|
|
socket.close
|
|
|
|
rescue => e
|
|
|
|
error("Failed to send metric to Prometheus #{e}")
|
|
|
|
end
|
|
|
|
|
|
|
|
def report_success
|
|
|
|
send_counter('critical_dns_successes_total', 'critical DNS resolution success', nil, 1)
|
|
|
|
end
|
|
|
|
|
|
|
|
def report_failure(errors)
|
|
|
|
errors.each do |host, count|
|
|
|
|
send_counter('critical_dns_failures_total', 'critical DNS resolution failures', host ? { host: host } : nil, count)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
def nilempty(v)
|
|
|
|
if v.nil?
|
2018-11-22 15:46:39 +08:00
|
|
|
nil
|
2022-04-06 07:44:34 +08:00
|
|
|
elsif v.respond_to?(:empty?) && v.empty?
|
|
|
|
nil
|
|
|
|
else
|
|
|
|
v
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-05-09 09:34:04 +08:00
|
|
|
def env_srv_var(env_name)
|
|
|
|
"#{env_name}_SRV"
|
|
|
|
end
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
def env_srv_name(env_name)
|
2022-05-09 09:34:04 +08:00
|
|
|
nilempty(ENV[env_srv_var(env_name)])
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-10-06 14:11:24 +08:00
|
|
|
def env_as_int(env_name, default = nil)
|
|
|
|
val = ENV.fetch(env_name, default)
|
|
|
|
if nilempty(val).nil?
|
|
|
|
return val
|
|
|
|
end
|
|
|
|
val.to_i
|
|
|
|
end
|
|
|
|
|
2022-07-05 09:37:50 +08:00
|
|
|
def run_and_report(hostname_vars)
|
|
|
|
errors = run(hostname_vars)
|
|
|
|
if errors.empty?
|
|
|
|
report_success
|
|
|
|
else
|
|
|
|
report_failure(errors)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
def run(hostname_vars)
|
|
|
|
resolved = {}
|
|
|
|
errors = Hash.new(0)
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
hostname_vars.each do |var|
|
|
|
|
name = ENV[var]
|
|
|
|
HOST_RESOLVER_CACHE[var] ||= ResolverCache.new(
|
|
|
|
if (srv_name = env_srv_name(var))
|
2022-05-09 09:34:04 +08:00
|
|
|
SRVName.new(srv_name, SRV_PRIORITY_FILTERS[env_srv_var(var)])
|
2018-11-22 15:46:39 +08:00
|
|
|
else
|
2022-04-06 07:44:34 +08:00
|
|
|
Name.new(name)
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
)
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
HOST_HEALTHY_CACHE[var] ||= HealthyCache.new(HOST_RESOLVER_CACHE[var], HEALTH_CHECKS[var.to_sym])
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
begin
|
2023-03-07 10:20:24 +08:00
|
|
|
address = HOST_HEALTHY_CACHE[var].first_healthy
|
|
|
|
resolved[name] = [address]
|
|
|
|
rescue EmptyCacheError => e
|
2022-04-06 07:44:34 +08:00
|
|
|
error("#{var}: #{name}: #{e}")
|
|
|
|
errors[name] += 1
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
hosts_content = File.read(HOSTS_PATH)
|
|
|
|
hosts = Resolv::Hosts.new(HOSTS_PATH)
|
|
|
|
|
|
|
|
changed = false
|
|
|
|
resolved.each do |hostname, ips|
|
|
|
|
if hosts.getaddresses(hostname).map(&:to_s).sort != ips.sort
|
|
|
|
log("IP addresses for #{hostname} changed to #{ips}")
|
|
|
|
hosts_content = swap_address(hosts_content, hostname, ips)
|
|
|
|
changed = true
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
2018-11-22 15:46:39 +08:00
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
if changed
|
|
|
|
File.write(HOSTS_PATH, hosts_content)
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
|
|
|
rescue => e
|
2023-03-07 10:20:24 +08:00
|
|
|
error("unhandled exception during run: #{e}")
|
2018-11-22 15:46:39 +08:00
|
|
|
errors[nil] = 1
|
|
|
|
ensure
|
2022-07-05 09:37:50 +08:00
|
|
|
return errors
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|
|
|
|
|
2022-04-06 07:44:34 +08:00
|
|
|
# If any of the host variables are an explicit IP we will not attempt to cache
|
|
|
|
# them.
|
|
|
|
all_hostname_vars = CRITICAL_HOST_ENV_VARS.select do |name|
|
2022-10-06 14:11:24 +08:00
|
|
|
host = ENV[name]
|
|
|
|
if nilempty(host).nil?
|
|
|
|
# don't attempt to cache host vars that aren't present in the environment
|
2022-04-06 07:44:34 +08:00
|
|
|
false
|
2022-10-06 14:11:24 +08:00
|
|
|
else
|
|
|
|
begin
|
|
|
|
IPAddr.new(host)
|
|
|
|
# host is an IPv4 / IPv6 address
|
|
|
|
false
|
|
|
|
rescue IPAddr::InvalidAddressError, IPAddr::AddressFamilyError
|
|
|
|
true
|
|
|
|
end
|
2022-04-06 07:44:34 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-05-09 09:34:04 +08:00
|
|
|
# Populate the SRV_PRIORITY_FILTERS for any name that has a priority present in
|
|
|
|
# the environment. If no priority thresholds are found for the name, the default
|
|
|
|
# is that no filtering based on priority will be performed.
|
|
|
|
CRITICAL_HOST_ENV_VARS.each do |v|
|
|
|
|
if (name = env_srv_name(v))
|
2022-10-06 14:11:24 +08:00
|
|
|
max = env_as_int("#{env_srv_var(v)}_PRIORITY_LE", SRV_PRIORITY_THRESHOLD_MAX)
|
|
|
|
min = env_as_int("#{env_srv_var(v)}_PRIORITY_GE", SRV_PRIORITY_THRESHOLD_MIN)
|
2022-05-09 09:34:04 +08:00
|
|
|
if max > SRV_PRIORITY_THRESHOLD_MAX ||
|
|
|
|
min < SRV_PRIORITY_THRESHOLD_MIN ||
|
|
|
|
min > max
|
|
|
|
raise "invalid priority threshold set for #{v}"
|
|
|
|
end
|
|
|
|
|
|
|
|
SRV_PRIORITY_FILTERS[env_srv_var(v)] = PrioFilter.new(min, max)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-07-05 09:37:50 +08:00
|
|
|
options = {
|
|
|
|
once: false,
|
|
|
|
}
|
|
|
|
OptionParser.new do |opts|
|
|
|
|
opts.on("--once", "run script once instead of indefinitely") do |o|
|
|
|
|
options[:once] = true
|
|
|
|
end
|
|
|
|
end.parse!
|
|
|
|
|
|
|
|
if options[:once]
|
|
|
|
errors = run(all_hostname_vars)
|
|
|
|
exit errors.empty? ? 0 : 1
|
|
|
|
end
|
|
|
|
|
2018-11-22 15:46:39 +08:00
|
|
|
while true
|
2022-07-05 09:37:50 +08:00
|
|
|
run_and_report(all_hostname_vars)
|
2018-11-23 11:43:39 +08:00
|
|
|
sleep REFRESH_SECONDS
|
2018-11-22 15:46:39 +08:00
|
|
|
end
|