2019-04-30 08:27:42 +08:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2016-03-02 22:01:48 +08:00
|
|
|
require 'rails_helper'
|
|
|
|
|
|
|
|
describe DiscourseRedis do
|
|
|
|
let(:slave_host) { 'testhost' }
|
|
|
|
let(:slave_port) { 1234 }
|
|
|
|
|
|
|
|
let(:config) do
|
2020-01-24 06:20:17 +08:00
|
|
|
DiscourseRedis.config.dup.merge(slave_host: 'testhost', slave_port: 1234, connector: DiscourseRedis::Connector)
|
2016-03-02 22:01:48 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
let(:fallback_handler) { DiscourseRedis::FallbackHandler.instance }
|
2016-03-02 22:01:48 +08:00
|
|
|
|
2017-10-25 10:19:43 +08:00
|
|
|
it "ignore_readonly returns nil from a pure exception" do
|
|
|
|
result = DiscourseRedis.ignore_readonly { raise Redis::CommandError.new("READONLY") }
|
|
|
|
expect(result).to eq(nil)
|
|
|
|
end
|
|
|
|
|
2017-08-02 13:32:01 +08:00
|
|
|
describe 'redis commands' do
|
|
|
|
let(:raw_redis) { Redis.new(DiscourseRedis.config) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
raw_redis.flushall
|
|
|
|
end
|
|
|
|
|
|
|
|
after do
|
|
|
|
raw_redis.flushall
|
|
|
|
end
|
|
|
|
|
|
|
|
describe 'when namespace is enabled' do
|
|
|
|
let(:redis) { DiscourseRedis.new }
|
|
|
|
|
|
|
|
it 'should append namespace to the keys' do
|
2017-08-24 09:00:15 +08:00
|
|
|
raw_redis.set('default:key', 1)
|
|
|
|
raw_redis.set('test:key2', 1)
|
2017-08-02 13:32:01 +08:00
|
|
|
|
2017-08-24 09:00:15 +08:00
|
|
|
expect(redis.keys).to include('key')
|
|
|
|
expect(redis.keys).to_not include('key2')
|
2018-12-15 08:53:52 +08:00
|
|
|
expect(redis.scan_each.to_a).to eq(['key'])
|
|
|
|
|
|
|
|
redis.scan_each.each do |key|
|
|
|
|
expect(key).to eq('key')
|
|
|
|
end
|
2017-08-02 13:32:01 +08:00
|
|
|
|
|
|
|
redis.del('key')
|
|
|
|
|
|
|
|
expect(raw_redis.get('default:key')).to eq(nil)
|
2018-12-15 08:53:52 +08:00
|
|
|
expect(redis.scan_each.to_a).to eq([])
|
2017-08-02 13:32:01 +08:00
|
|
|
|
|
|
|
raw_redis.set('default:key1', '1')
|
|
|
|
raw_redis.set('default:key2', '2')
|
|
|
|
|
|
|
|
expect(redis.mget('key1', 'key2')).to eq(['1', '2'])
|
2018-12-15 08:53:52 +08:00
|
|
|
expect(redis.scan_each.to_a).to contain_exactly('key1', 'key2')
|
2017-08-02 13:32:01 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe 'when namespace is disabled' do
|
|
|
|
let(:redis) { DiscourseRedis.new(nil, namespace: false) }
|
|
|
|
|
|
|
|
it 'should not append any namespace to the keys' do
|
2017-08-24 09:00:15 +08:00
|
|
|
raw_redis.set('default:key', 1)
|
|
|
|
raw_redis.set('test:key2', 1)
|
2017-08-02 13:32:01 +08:00
|
|
|
|
2017-08-24 09:00:15 +08:00
|
|
|
expect(redis.keys).to include('default:key', 'test:key2')
|
2017-08-02 13:32:01 +08:00
|
|
|
|
|
|
|
redis.del('key')
|
|
|
|
|
|
|
|
expect(raw_redis.get('key')).to eq(nil)
|
|
|
|
|
|
|
|
raw_redis.set('key1', '1')
|
|
|
|
raw_redis.set('key2', '2')
|
|
|
|
|
|
|
|
expect(redis.mget('key1', 'key2')).to eq(['1', '2'])
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'should noop a readonly redis' do
|
|
|
|
expect(Discourse.recently_readonly?).to eq(false)
|
|
|
|
|
|
|
|
redis.without_namespace
|
|
|
|
.expects(:set)
|
|
|
|
.raises(Redis::CommandError.new("READONLY"))
|
|
|
|
|
|
|
|
redis.set('key', 1)
|
|
|
|
|
|
|
|
expect(Discourse.recently_readonly?).to eq(true)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2016-03-02 22:01:48 +08:00
|
|
|
context '.slave_host' do
|
|
|
|
it 'should return the right config' do
|
|
|
|
slave_config = DiscourseRedis.slave_config(config)
|
|
|
|
expect(slave_config[:host]).to eq(slave_host)
|
|
|
|
expect(slave_config[:port]).to eq(slave_port)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
context 'when redis connection is to a slave redis server' do
|
|
|
|
it 'should check the status of the master server' do
|
|
|
|
begin
|
|
|
|
fallback_handler.master = false
|
|
|
|
Discourse.redis.without_namespace.expects(:set).raises(Redis::CommandError.new("READONLY"))
|
|
|
|
fallback_handler.expects(:verify_master).once
|
|
|
|
Discourse.redis.set('test', '1')
|
|
|
|
ensure
|
|
|
|
fallback_handler.master = true
|
|
|
|
Discourse.redis.del('test')
|
2016-03-02 22:01:48 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe DiscourseRedis::Connector do
|
|
|
|
let(:connector) { DiscourseRedis::Connector.new(config) }
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
after do
|
|
|
|
fallback_handler.master = true
|
2018-05-30 14:43:30 +08:00
|
|
|
end
|
|
|
|
|
2016-03-02 22:01:48 +08:00
|
|
|
it 'should return the master config when master is up' do
|
|
|
|
expect(connector.resolve).to eq(config)
|
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
class BrokenRedis
|
|
|
|
def initialize(error)
|
|
|
|
@error = error
|
2017-08-19 03:10:37 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
def call(*args)
|
|
|
|
raise @error
|
2017-08-19 03:10:37 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
def disconnect
|
2017-08-19 03:10:37 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
it 'should return the slave config when master is down' do
|
|
|
|
error = Redis::CannotConnectError
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect do
|
|
|
|
connector.resolve(BrokenRedis.new(error))
|
|
|
|
end.to raise_error(Redis::CannotConnectError)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
config = connector.resolve
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect(config[:host]).to eq(slave_host)
|
|
|
|
expect(config[:port]).to eq(slave_port)
|
2016-03-11 18:54:01 +08:00
|
|
|
end
|
2016-11-07 15:28:10 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
it "should return the slave config when master's hostname cannot be resolved" do
|
|
|
|
error = RuntimeError.new('Name or service not known')
|
2018-05-30 14:43:30 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect do
|
|
|
|
connector.resolve(BrokenRedis.new(error))
|
|
|
|
end.to raise_error(error)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect(fallback_handler.master).to eq(false)
|
2016-11-07 15:28:10 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
config = connector.resolve
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect(config[:host]).to eq(slave_host)
|
|
|
|
expect(config[:port]).to eq(slave_port)
|
|
|
|
expect(fallback_handler.master).to eq(false)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
it "should return the slave config when master is still loading data" do
|
|
|
|
Redis::Client.any_instance
|
|
|
|
.expects(:call)
|
|
|
|
.with([:info, :persistence])
|
|
|
|
.returns("
|
|
|
|
someconfig:haha\r
|
|
|
|
#{DiscourseRedis::FallbackHandler::MASTER_LOADING_STATUS}
|
|
|
|
")
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
config = connector.resolve
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect(config[:host]).to eq(slave_host)
|
|
|
|
expect(config[:port]).to eq(slave_port)
|
|
|
|
end
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
it "should raise the right error" do
|
|
|
|
error = RuntimeError.new('test')
|
2019-01-02 16:32:14 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
2.times do
|
|
|
|
expect { connector.resolve(BrokenRedis.new(error)) }
|
|
|
|
.to raise_error(error)
|
2019-01-02 16:32:14 +08:00
|
|
|
end
|
2016-03-11 18:54:01 +08:00
|
|
|
end
|
2016-03-02 22:01:48 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
describe DiscourseRedis::FallbackHandler do
|
2018-05-30 14:43:30 +08:00
|
|
|
before do
|
2020-01-24 06:20:17 +08:00
|
|
|
@original_keepalive_interval = MessageBus.keepalive_interval
|
2018-05-30 14:43:30 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
after do
|
|
|
|
fallback_handler.master = true
|
|
|
|
MessageBus.keepalive_interval = @original_keepalive_interval
|
2016-04-18 10:41:40 +08:00
|
|
|
end
|
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
describe '#initiate_fallback_to_master' do
|
|
|
|
it 'should return the right value if the master server is still down' do
|
|
|
|
fallback_handler.master = false
|
|
|
|
Redis::Client.any_instance.expects(:call).with([:info]).returns("Some other stuff")
|
2017-08-01 22:07:52 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect(fallback_handler.initiate_fallback_to_master).to eq(false)
|
|
|
|
expect(MessageBus.keepalive_interval).to eq(0)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
end
|
2019-01-22 12:38:12 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
it 'should fallback to the master server once it is up' do
|
|
|
|
fallback_handler.master = false
|
|
|
|
master_conn = mock('master')
|
|
|
|
slave_conn = mock('slave')
|
2019-01-22 12:38:12 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
Redis::Client.expects(:new)
|
|
|
|
.with(DiscourseRedis.config)
|
|
|
|
.returns(master_conn)
|
2016-03-02 22:01:48 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
Redis::Client.expects(:new)
|
|
|
|
.with(DiscourseRedis.slave_config)
|
|
|
|
.returns(slave_conn)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
master_conn.expects(:call)
|
|
|
|
.with([:info])
|
|
|
|
.returns("
|
|
|
|
#{DiscourseRedis::FallbackHandler::MASTER_ROLE_STATUS}\r\n
|
|
|
|
#{DiscourseRedis::FallbackHandler::MASTER_LOADED_STATUS}
|
|
|
|
")
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
DiscourseRedis::FallbackHandler::CONNECTION_TYPES.each do |connection_type|
|
|
|
|
slave_conn.expects(:call).with(
|
|
|
|
[:client, [:kill, 'type', connection_type]]
|
|
|
|
)
|
|
|
|
end
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
master_conn.expects(:disconnect)
|
|
|
|
slave_conn.expects(:disconnect)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-23 10:39:29 +08:00
|
|
|
|
2020-01-24 06:20:17 +08:00
|
|
|
expect(fallback_handler.initiate_fallback_to_master).to eq(true)
|
|
|
|
expect(fallback_handler.master).to eq(true)
|
|
|
|
expect(Discourse.recently_readonly?).to eq(false)
|
|
|
|
expect(MessageBus.keepalive_interval).to eq(-1)
|
2016-04-15 17:05:03 +08:00
|
|
|
end
|
2016-03-02 22:01:48 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|