2019-05-03 06:17:27 +08:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2013-07-07 12:30:52 +08:00
|
|
|
# See http://unicorn.bogomips.org/Unicorn/Configurator.html
|
2024-07-08 14:03:11 +08:00
|
|
|
discourse_path = File.expand_path(File.expand_path(File.dirname(__FILE__)) + "/../")
|
|
|
|
|
|
|
|
enable_logstash_logger = ENV["ENABLE_LOGSTASH_LOGGER"] == "1"
|
|
|
|
unicorn_stderr_path = "#{discourse_path}/log/unicorn.stderr.log"
|
|
|
|
unicorn_stdout_path = "#{discourse_path}/log/unicorn.stdout.log"
|
2024-07-05 17:26:58 +08:00
|
|
|
|
2024-07-08 14:03:11 +08:00
|
|
|
if enable_logstash_logger
|
2024-07-05 17:26:58 +08:00
|
|
|
require_relative "../lib/discourse_logstash_logger"
|
|
|
|
require_relative "../lib/unicorn_logstash_patch"
|
2024-07-08 14:03:11 +08:00
|
|
|
FileUtils.touch(unicorn_stderr_path) if !File.exist?(unicorn_stderr_path)
|
2024-07-22 15:21:41 +08:00
|
|
|
logger DiscourseLogstashLogger.logger(
|
|
|
|
logdev: unicorn_stderr_path,
|
|
|
|
type: :unicorn,
|
|
|
|
customize_event: lambda { |event| event["@timestamp"] = ::Time.now.utc },
|
|
|
|
)
|
2024-07-08 14:03:11 +08:00
|
|
|
else
|
|
|
|
logger Logger.new(STDOUT)
|
2024-07-05 17:26:58 +08:00
|
|
|
end
|
|
|
|
|
2013-07-07 12:30:52 +08:00
|
|
|
# tune down if not enough ram
|
2013-10-30 13:33:08 +08:00
|
|
|
worker_processes (ENV["UNICORN_WORKERS"] || 3).to_i
|
2013-07-07 12:30:52 +08:00
|
|
|
|
|
|
|
working_directory discourse_path
|
|
|
|
|
2013-10-10 10:33:52 +08:00
|
|
|
# listen "#{discourse_path}/tmp/sockets/unicorn.sock"
|
2023-01-16 11:17:23 +08:00
|
|
|
|
|
|
|
# stree-ignore
|
|
|
|
listen ENV["UNICORN_LISTENER"] || "#{(ENV["UNICORN_BIND_ALL"] ? "" : "127.0.0.1:")}#{(ENV["UNICORN_PORT"] || 3000).to_i}"
|
2013-07-07 12:30:52 +08:00
|
|
|
|
2018-08-08 12:49:09 +08:00
|
|
|
FileUtils.mkdir_p("#{discourse_path}/tmp/pids") if !File.exist?("#{discourse_path}/tmp/pids")
|
2018-08-07 15:13:20 +08:00
|
|
|
|
2013-07-07 12:30:52 +08:00
|
|
|
# feel free to point this anywhere accessible on the filesystem
|
2023-12-07 06:25:00 +08:00
|
|
|
pid(ENV["UNICORN_PID_PATH"] || "#{discourse_path}/tmp/pids/unicorn.pid")
|
2013-07-07 12:30:52 +08:00
|
|
|
|
2024-07-08 14:03:11 +08:00
|
|
|
if ENV["RAILS_ENV"] == "production"
|
2018-08-07 15:13:20 +08:00
|
|
|
# By default, the Unicorn logger will write to stderr.
|
|
|
|
# Additionally, some applications/frameworks log to stderr or stdout,
|
|
|
|
# so prevent them from going to /dev/null when daemonized here:
|
2024-07-08 14:03:11 +08:00
|
|
|
stderr_path unicorn_stderr_path
|
|
|
|
stdout_path unicorn_stdout_path
|
|
|
|
|
2018-08-15 09:13:43 +08:00
|
|
|
# nuke workers after 30 seconds instead of 60 seconds (the default)
|
|
|
|
timeout 30
|
2024-07-08 14:03:11 +08:00
|
|
|
else
|
|
|
|
# we want a longer timeout in dev cause first request can be really slow
|
|
|
|
timeout(ENV["UNICORN_TIMEOUT"] && ENV["UNICORN_TIMEOUT"].to_i || 60)
|
2018-08-07 15:13:20 +08:00
|
|
|
end
|
2013-07-07 12:30:52 +08:00
|
|
|
|
|
|
|
# important for Ruby 2.0
|
|
|
|
preload_app true
|
|
|
|
|
|
|
|
# Enable this flag to have unicorn test client connections by writing the
|
|
|
|
# beginning of the HTTP headers before calling the application. This
|
|
|
|
# prevents calling the application for connections that have disconnected
|
|
|
|
# while queued. This is only guaranteed to detect clients on the same
|
|
|
|
# host unicorn runs on, and unlikely to detect disconnects even on a
|
|
|
|
# fast LAN.
|
|
|
|
check_client_connection false
|
|
|
|
|
|
|
|
initialized = false
|
|
|
|
before_fork do |server, worker|
|
|
|
|
unless initialized
|
2019-10-07 12:33:37 +08:00
|
|
|
Discourse.preload_rails!
|
2024-08-14 12:45:34 +08:00
|
|
|
Discourse.before_fork
|
2013-11-13 12:28:39 +08:00
|
|
|
|
|
|
|
initialized = true
|
|
|
|
|
|
|
|
supervisor = ENV["UNICORN_SUPERVISOR_PID"].to_i
|
|
|
|
if supervisor > 0
|
|
|
|
Thread.new do
|
|
|
|
while true
|
2022-01-06 01:45:08 +08:00
|
|
|
unless File.exist?("/proc/#{supervisor}")
|
2024-05-29 09:34:09 +08:00
|
|
|
server.logger.error "Kill self supervisor is gone"
|
2013-11-13 12:28:39 +08:00
|
|
|
Process.kill "TERM", Process.pid
|
|
|
|
end
|
|
|
|
sleep 2
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2014-04-23 09:03:36 +08:00
|
|
|
sidekiqs = ENV["UNICORN_SIDEKIQS"].to_i
|
|
|
|
if sidekiqs > 0
|
2020-12-16 17:43:39 +08:00
|
|
|
server.logger.info "starting #{sidekiqs} supervised sidekiqs"
|
2014-04-23 11:13:18 +08:00
|
|
|
|
2014-04-23 09:03:36 +08:00
|
|
|
require "demon/sidekiq"
|
2018-06-07 09:03:16 +08:00
|
|
|
Demon::Sidekiq.after_fork { DiscourseEvent.trigger(:sidekiq_fork_started) }
|
2024-06-03 12:51:12 +08:00
|
|
|
Demon::Sidekiq.start(sidekiqs, logger: server.logger)
|
2017-03-11 00:35:25 +08:00
|
|
|
|
2024-10-10 08:01:40 +08:00
|
|
|
if Discourse.enable_sidekiq_logging?
|
|
|
|
# Trap USR1, so we can re-issue to sidekiq workers
|
|
|
|
# but chain the default unicorn implementation as well
|
|
|
|
old_handler =
|
|
|
|
Signal.trap("USR1") do
|
|
|
|
old_handler.call
|
|
|
|
|
|
|
|
# We have seen Sidekiq processes getting stuck in production sporadically when log rotation happens.
|
|
|
|
# The cause is currently unknown but we suspect that it is related to the Unicorn master process and
|
|
|
|
# Sidekiq demon processes reopening logs at the same time as we noticed that Unicorn worker processes only
|
|
|
|
# reopen logs after the Unicorn master process is done. To workaround the problem, we are adding an arbitrary
|
|
|
|
# delay of 1 second to Sidekiq's log reopeing procedure. The 1 second delay should be
|
|
|
|
# more than enough for the Unicorn master process to finish reopening logs.
|
|
|
|
Demon::Sidekiq.kill("USR2")
|
|
|
|
end
|
|
|
|
end
|
2020-07-10 17:05:55 +08:00
|
|
|
end
|
2020-04-16 19:13:13 +08:00
|
|
|
|
2020-07-24 15:09:29 +08:00
|
|
|
if ENV["DISCOURSE_ENABLE_EMAIL_SYNC_DEMON"] == "true"
|
2020-12-16 17:43:39 +08:00
|
|
|
server.logger.info "starting up EmailSync demon"
|
2024-06-03 12:51:12 +08:00
|
|
|
Demon::EmailSync.start(1, logger: server.logger)
|
2020-07-10 17:05:55 +08:00
|
|
|
end
|
2014-04-23 09:03:36 +08:00
|
|
|
|
2020-12-16 17:43:39 +08:00
|
|
|
DiscoursePluginRegistry.demon_processes.each do |demon_class|
|
|
|
|
server.logger.info "starting #{demon_class.prefix} demon"
|
2024-06-03 12:51:12 +08:00
|
|
|
demon_class.start(1, logger: server.logger)
|
2020-12-16 17:43:39 +08:00
|
|
|
end
|
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
class ::Unicorn::HttpServer
|
|
|
|
alias master_sleep_orig master_sleep
|
2015-03-27 12:44:52 +08:00
|
|
|
|
2024-06-03 12:51:12 +08:00
|
|
|
# Original source: https://github.com/defunkt/unicorn/blob/6c9c442fb6aa12fd871237bc2bb5aec56c5b3538/lib/unicorn/http_server.rb#L477-L496
|
|
|
|
def murder_lazy_workers
|
|
|
|
next_sleep = @timeout - 1
|
|
|
|
now = time_now.to_i
|
|
|
|
@workers.dup.each_pair do |wpid, worker|
|
|
|
|
tick = worker.tick
|
|
|
|
0 == tick and next # skip workers that haven't processed any clients
|
|
|
|
diff = now - tick
|
|
|
|
tmp = @timeout - diff
|
|
|
|
|
|
|
|
# START MONKEY PATCH
|
2024-12-10 06:47:33 +08:00
|
|
|
if tmp < 2 && !worker.instance_variable_get(:@timing_out_logged)
|
|
|
|
logger.error do
|
|
|
|
"worker=#{worker.nr} PID:#{wpid} running too long (#{diff}s), sending USR2 to dump thread backtraces"
|
|
|
|
end
|
|
|
|
|
|
|
|
worker.instance_variable_set(:@timing_out_logged, true)
|
2024-06-03 12:51:12 +08:00
|
|
|
kill_worker(:USR2, wpid)
|
|
|
|
end
|
|
|
|
# END MONKEY PATCH
|
|
|
|
|
|
|
|
if tmp >= 0
|
|
|
|
next_sleep > tmp and next_sleep = tmp
|
|
|
|
next
|
|
|
|
end
|
|
|
|
next_sleep = 0
|
|
|
|
logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
|
|
|
|
"(#{diff}s > #{@timeout}s), killing"
|
|
|
|
|
|
|
|
kill_worker(:KILL, wpid) # take no prisoners for timeout violations
|
|
|
|
end
|
|
|
|
next_sleep <= 0 ? 1 : next_sleep
|
|
|
|
end
|
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
def max_sidekiq_rss
|
|
|
|
rss =
|
|
|
|
`ps -eo rss,args | grep sidekiq | grep -v grep | awk '{print $1}'`.split("\n")
|
|
|
|
.map(&:to_i)
|
|
|
|
.max
|
2015-03-27 12:44:52 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
rss ||= 0
|
2015-03-27 11:27:01 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
rss * 1024
|
|
|
|
end
|
2015-06-18 13:32:04 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
def max_allowed_sidekiq_rss
|
|
|
|
[ENV["UNICORN_SIDEKIQ_MAX_RSS"].to_i, 500].max.megabytes
|
|
|
|
end
|
2015-03-27 11:27:01 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
def force_kill_rogue_sidekiq
|
|
|
|
info = `ps -eo pid,rss,args | grep sidekiq | grep -v grep | awk '{print $1,$2}'`
|
|
|
|
info
|
|
|
|
.split("\n")
|
|
|
|
.each do |row|
|
|
|
|
pid, mem = row.split(" ").map(&:to_i)
|
|
|
|
if pid > 0 && (mem * 1024) > max_allowed_sidekiq_rss
|
|
|
|
Rails.logger.warn "Detected rogue Sidekiq pid #{pid} mem #{mem * 1024}, killing"
|
2023-01-07 19:59:28 +08:00
|
|
|
begin
|
2020-07-10 17:05:55 +08:00
|
|
|
Process.kill("KILL", pid)
|
|
|
|
rescue StandardError
|
|
|
|
nil
|
2023-01-07 19:59:28 +08:00
|
|
|
end
|
|
|
|
end
|
2015-06-18 13:32:04 +08:00
|
|
|
end
|
2020-07-10 17:05:55 +08:00
|
|
|
end
|
2015-06-18 13:32:04 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
def check_sidekiq_heartbeat
|
|
|
|
@sidekiq_heartbeat_interval ||= 30.minutes
|
|
|
|
@sidekiq_next_heartbeat_check ||= Time.now.to_i + @sidekiq_heartbeat_interval
|
2014-04-23 11:13:18 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
if @sidekiq_next_heartbeat_check < Time.now.to_i
|
|
|
|
last_heartbeat = Jobs::RunHeartbeat.last_heartbeat
|
|
|
|
restart = false
|
2015-03-27 12:44:52 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
sidekiq_rss = max_sidekiq_rss
|
|
|
|
if sidekiq_rss > max_allowed_sidekiq_rss
|
|
|
|
Rails.logger.warn(
|
|
|
|
"Sidekiq is consuming too much memory (using: %0.2fM) for '%s', restarting" %
|
|
|
|
[(sidekiq_rss.to_f / 1.megabyte), ENV["DISCOURSE_HOSTNAME"]],
|
|
|
|
)
|
2024-05-29 09:34:09 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
restart = true
|
|
|
|
end
|
2015-03-27 11:27:01 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
if last_heartbeat < Time.now.to_i - @sidekiq_heartbeat_interval
|
|
|
|
Rails.logger.warn "Sidekiq heartbeat test failed, restarting"
|
2019-08-30 18:26:16 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
restart = true
|
|
|
|
end
|
|
|
|
@sidekiq_next_heartbeat_check = Time.now.to_i + @sidekiq_heartbeat_interval
|
2015-03-27 11:27:01 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
if restart
|
|
|
|
Demon::Sidekiq.restart
|
|
|
|
sleep 10
|
|
|
|
force_kill_rogue_sidekiq
|
2014-04-23 11:13:18 +08:00
|
|
|
end
|
2020-07-10 17:05:55 +08:00
|
|
|
Discourse.redis.close
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def max_email_sync_rss
|
|
|
|
return 0 if Demon::EmailSync.demons.empty?
|
|
|
|
|
|
|
|
email_sync_pids = Demon::EmailSync.demons.map { |uid, demon| demon.pid }
|
|
|
|
return 0 if email_sync_pids.empty?
|
|
|
|
|
|
|
|
rss =
|
|
|
|
`ps -eo pid,rss,args | grep '#{email_sync_pids.join("|")}' | grep -v grep | awk '{print $2}'`.split(
|
|
|
|
"\n",
|
|
|
|
)
|
|
|
|
.map(&:to_i)
|
|
|
|
.max
|
|
|
|
|
|
|
|
(rss || 0) * 1024
|
|
|
|
end
|
|
|
|
|
|
|
|
def max_allowed_email_sync_rss
|
|
|
|
[ENV["UNICORN_EMAIL_SYNC_MAX_RSS"].to_i, 500].max.megabytes
|
|
|
|
end
|
|
|
|
|
|
|
|
def check_email_sync_heartbeat
|
|
|
|
# Skip first check to let process warm up
|
|
|
|
@email_sync_next_heartbeat_check ||= (Time.now + Demon::EmailSync::HEARTBEAT_INTERVAL).to_i
|
|
|
|
|
|
|
|
return if @email_sync_next_heartbeat_check > Time.now.to_i
|
|
|
|
@email_sync_next_heartbeat_check = (Time.now + Demon::EmailSync::HEARTBEAT_INTERVAL).to_i
|
|
|
|
|
|
|
|
restart = false
|
|
|
|
|
|
|
|
# Restart process if it does not respond anymore
|
|
|
|
last_heartbeat_ago =
|
|
|
|
Time.now.to_i - Discourse.redis.get(Demon::EmailSync::HEARTBEAT_KEY).to_i
|
|
|
|
if last_heartbeat_ago > Demon::EmailSync::HEARTBEAT_INTERVAL.to_i
|
2024-05-29 09:34:09 +08:00
|
|
|
Rails.logger.warn(
|
2020-07-10 17:05:55 +08:00
|
|
|
"EmailSync heartbeat test failed (last heartbeat was #{last_heartbeat_ago}s ago), restarting",
|
|
|
|
)
|
2024-05-29 09:34:09 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
restart = true
|
2014-04-23 11:13:18 +08:00
|
|
|
end
|
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
# Restart process if memory usage is too high
|
|
|
|
email_sync_rss = max_email_sync_rss
|
|
|
|
if email_sync_rss > max_allowed_email_sync_rss
|
2024-05-29 09:34:09 +08:00
|
|
|
Rails.logger.warn(
|
2020-07-10 17:05:55 +08:00
|
|
|
"EmailSync is consuming too much memory (using: %0.2fM) for '%s', restarting" %
|
|
|
|
[(email_sync_rss.to_f / 1.megabyte), ENV["DISCOURSE_HOSTNAME"]],
|
|
|
|
)
|
2024-05-29 09:34:09 +08:00
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
restart = true
|
|
|
|
end
|
|
|
|
|
|
|
|
Demon::EmailSync.restart if restart
|
|
|
|
end
|
|
|
|
|
|
|
|
def master_sleep(sec)
|
|
|
|
sidekiqs = ENV["UNICORN_SIDEKIQS"].to_i
|
|
|
|
if sidekiqs > 0
|
2014-04-23 09:03:36 +08:00
|
|
|
Demon::Sidekiq.ensure_running
|
2014-04-23 11:13:18 +08:00
|
|
|
check_sidekiq_heartbeat
|
2014-04-23 09:03:36 +08:00
|
|
|
end
|
2020-07-10 17:05:55 +08:00
|
|
|
|
2020-07-24 15:09:29 +08:00
|
|
|
if ENV["DISCOURSE_ENABLE_EMAIL_SYNC_DEMON"] == "true"
|
|
|
|
Demon::EmailSync.ensure_running
|
|
|
|
check_email_sync_heartbeat
|
|
|
|
end
|
2020-07-10 17:05:55 +08:00
|
|
|
|
2020-12-16 17:43:39 +08:00
|
|
|
DiscoursePluginRegistry.demon_processes.each { |demon_class| demon_class.ensure_running }
|
|
|
|
|
2020-07-10 17:05:55 +08:00
|
|
|
master_sleep_orig(sec)
|
2014-04-23 09:03:36 +08:00
|
|
|
end
|
|
|
|
end
|
2013-07-07 12:30:52 +08:00
|
|
|
end
|
2013-10-10 10:33:52 +08:00
|
|
|
|
2020-06-02 11:46:55 +08:00
|
|
|
Discourse.redis.close
|
2013-07-07 12:30:52 +08:00
|
|
|
|
|
|
|
# Throttle the master from forking too quickly by sleeping. Due
|
|
|
|
# to the implementation of standard Unix signal handlers, this
|
|
|
|
# helps (but does not completely) prevent identical, repeated signals
|
|
|
|
# from being lost when the receiving process is busy.
|
2021-04-30 18:32:13 +08:00
|
|
|
sleep 1 if !Rails.env.development?
|
2013-07-07 12:30:52 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
after_fork do |server, worker|
|
2017-10-23 12:40:31 +08:00
|
|
|
DiscourseEvent.trigger(:web_fork_started)
|
2021-04-30 18:32:13 +08:00
|
|
|
Discourse.after_fork
|
2024-06-03 12:51:12 +08:00
|
|
|
SignalTrapLogger.instance.after_fork
|
2024-05-27 12:20:38 +08:00
|
|
|
|
2024-06-03 12:51:12 +08:00
|
|
|
Signal.trap("USR2") do
|
|
|
|
message = <<~MSG
|
|
|
|
Unicorn worker received USR2 signal indicating it is about to timeout, dumping backtrace for main thread
|
|
|
|
#{Thread.current.backtrace&.join("\n")}
|
2024-05-27 12:20:38 +08:00
|
|
|
MSG
|
2024-06-03 12:51:12 +08:00
|
|
|
|
|
|
|
SignalTrapLogger.instance.log(Rails.logger, message, level: :warn)
|
|
|
|
end
|
2013-07-07 12:30:52 +08:00
|
|
|
end
|