Skip to content

Commit

Permalink
Test cache without SolidCache.connects_to set
Browse files Browse the repository at this point in the history
  • Loading branch information
djmb committed Sep 22, 2023
1 parent efbfabc commit 7bc11a2
Show file tree
Hide file tree
Showing 14 changed files with 287 additions and 255 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,5 @@ jobs:
bin/rails db:setup
- name: Run tests
run: bin/rails test
- name: Run tests (no connects-to)
run: NO_CONNECTS_TO=true bin/rails test
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,11 @@ $ gem install solid_cache
Adding the cache to your main database:

```bash
$ bin/rails solid_cache:install:migrations
$ bin/rails solid_cache:install
```

Install and run migrations:
Then run migrations:
```bash
$ bin/rails solid_cache:install:migrations
$ bin/rails db:migrate
```

Expand Down Expand Up @@ -82,7 +81,7 @@ Solid cache supports these options in addition to the universal `ActiveSupport::
- `max_age` - the maximum age of entries in the cache (default: `2.weeks.to_i`)
- `max_entries` - the maximum number of entries allowed in the cache (default: `2.weeks.to_i`)
- `cluster` - a Hash of options for the cache database cluster, e.g { shards: [:database1, :database2, :database3] }
- `clusters` - and Array of Hashes for separate cache clusters (ignored if `:cluster` is set)
- `clusters` - and Array of Hashes for multiple cache clusters (ignored if `:cluster` is set)
- `active_record_instrumentation` - whether to instrument the cache's queries (default: `true`)

For more information on cache clusters see [Sharding the cache](#sharding-the-cache)
Expand Down
4 changes: 2 additions & 2 deletions lib/solid_cache.rb
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ module SolidCache
mattr_accessor :executor, :connects_to

def self.all_shard_keys
all_shards_config&.keys
all_shards_config&.keys || [ Record.default_shard ]
end

def self.all_shards_config
Expand All @@ -20,7 +20,7 @@ def self.all_shards_config
def self.each_shard
return to_enum(:each_shard) unless block_given?

if (shards = connects_to[:shards]&.keys)
if (shards = connects_to && connects_to[:shards]&.keys)
shards.each do |shard|
Record.connected_to(shard: shard) { yield }
end
Expand Down
6 changes: 5 additions & 1 deletion lib/solid_cache/shards.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,17 @@ class Shards
def initialize(options)
case options
when Array, NilClass
@names = options || SolidCache.all_shard_keys || [SolidCache::Record.default_shard]
@names = options || SolidCache.all_shard_keys
@nodes = @names.to_h { |name| [ name, name ] }
when Hash
@names = options.keys
@nodes = options.invert
end

if (unknown_shards = names - SolidCache.all_shard_keys).any?
raise ArgumentError, "Unknown #{"shard".pluralize(unknown_shards)}: #{unknown_shards.join(", ")}"
end

@consistent_hash = MaglevHash.new(@nodes.keys) if sharded?
end

Expand Down
2 changes: 1 addition & 1 deletion lib/solid_cache/store/clusters.rb
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def writing_keys(entries, failsafe:, failsafe_returning: nil)
sharded_entries.map do |shard, entries|
failsafe(failsafe, returning: failsafe_returning) do
cluster.with_shard(shard, async: async) do
yield cluster, shard, entries
yield cluster, entries
end
end
end
Expand Down
2 changes: 1 addition & 1 deletion lib/solid_cache/store/entries.rb
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def entry_write(key, payload)
end

def entry_write_multi(entries)
writing_keys(entries, failsafe: :write_multi_entries, failsafe_returning: false) do |cluster, shard, entries|
writing_keys(entries, failsafe: :write_multi_entries, failsafe_returning: false) do |cluster, entries|
Entry.write_multi(entries)
cluster.trim(entries.count)
true
Expand Down
20 changes: 11 additions & 9 deletions test/dummy/config/application.rb
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,17 @@ class Application < Rails::Application
# config.time_zone = "Central Time (US & Canada)"
# config.eager_load_paths << Rails.root.join("extras")

config.solid_cache.connects_to = {
shards: {
default: { writing: :primary, reading: :primary_replica },
default2: { writing: :primary_shard_one, reading: :primary_shard_one_replica },
primary_shard_one: { writing: :primary_shard_one },
primary_shard_two: { writing: :primary_shard_two },
secondary_shard_one: { writing: :secondary_shard_one },
secondary_shard_two: { writing: :secondary_shard_two }
unless ENV["NO_CONNECTS_TO"]
config.solid_cache.connects_to = {
shards: {
default: { writing: :primary, reading: :primary_replica },
default2: { writing: :primary_shard_one, reading: :primary_shard_one_replica },
primary_shard_one: { writing: :primary_shard_one },
primary_shard_two: { writing: :primary_shard_two },
secondary_shard_one: { writing: :secondary_shard_one },
secondary_shard_two: { writing: :secondary_shard_two }
}
}
}
end
end
end
2 changes: 1 addition & 1 deletion test/test_helper.rb
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

def lookup_store(options = {})
store_options = { namespace: @namespace }.merge(options)
store_options.merge!(cluster: { shards: [:default, :default2] }) unless store_options.key?(:cluster) || store_options.key?(:clusters)
store_options.merge!(cluster: { shards: [:default] }) unless store_options.key?(:cluster) || store_options.key?(:clusters)
ActiveSupport::Cache.lookup_store(:solid_cache_store, store_options)
end

Expand Down
270 changes: 136 additions & 134 deletions test/unit/cluster_test.rb
Original file line number Diff line number Diff line change
@@ -1,139 +1,141 @@
require "test_helper"

class ClusterTest < ActiveSupport::TestCase
setup do
@cache = nil
@namespace = "test-#{SecureRandom.hex}"
primary_cluster = { shards: [:primary_shard_one, :primary_shard_two] }
secondary_cluster = { shards: [:secondary_shard_one, :secondary_shard_two] }

@cache = lookup_store(expires_in: 60, clusters: [ primary_cluster, secondary_cluster ])
@primary_cache = lookup_store(expires_in: 60, cluster: primary_cluster)
@secondary_cache = lookup_store(expires_in: 60, cluster: secondary_cluster)
end

test "writes to both clusters" do
@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")
assert_equal 1, @primary_cache.read("foo")
assert_equal 1, @secondary_cache.read("foo")
end

test "reads from primary cluster" do
@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")

@secondary_cache.delete("foo")
assert_equal 1, @cache.read("foo")

@primary_cache.delete("foo")
assert_nil @cache.read("foo")
end

test "fetch writes to both clusters" do
@cache.fetch("foo") { 1 }
sleep 0.1

assert_equal 1, @cache.read("foo")
assert_equal 1, @primary_cache.read("foo")
assert_equal 1, @secondary_cache.read("foo")
end

test "fetch reads from primary clusters" do
@cache.fetch("foo") { 1 }
sleep 0.1
assert_equal 1, @cache.read("foo")

@primary_cache.delete("foo")
@cache.fetch("foo") { 2 }
sleep 0.1

assert_equal 2, @cache.read("foo")
assert_equal 2, @primary_cache.read("foo")
assert_equal 2, @secondary_cache.read("foo")

@secondary_cache.delete("foo")
assert_equal 2, @cache.fetch("foo") { 3 }

assert_equal 2, @primary_cache.read("foo")
assert_nil @secondary_cache.read("foo")
end

test "deletes from both cluster" do
@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")

@cache.delete("foo")
sleep 0.1

assert_nil @cache.read("foo")
assert_nil @primary_cache.read("foo")
assert_nil @secondary_cache.read("foo")
end

test "multi_writes to both clusters" do
values = { "foo" => "bar", "egg" => "spam" }
@cache.write_multi(values)
sleep 0.1
assert_equal values, @cache.read_multi("foo", "egg")
assert_equal values, @primary_cache.read_multi("foo", "egg")
assert_equal values, @secondary_cache.read_multi("foo", "egg")
end

test "delete_matched deletes from both caches" do
values = { "foo" => "bar", "baz" => "zab", "bab" => "dab" }
@cache.write_multi(values)
sleep 0.1

@cache.delete_matched("ba%")
sleep 0.1

assert_equal({ "foo" => "bar" }, @cache.read_multi(*values.keys))
assert_equal({ "foo" => "bar" }, @primary_cache.read_multi(*values.keys))
assert_equal({ "foo" => "bar" }, @secondary_cache.read_multi(*values.keys))
end

test "increment and decrement hit both clusters" do
@cache.write("foo", 1, raw: true)
sleep 0.1

assert_equal 1, @cache.read("foo", raw: true).to_i
assert_equal 1, @primary_cache.read("foo", raw: true).to_i
assert_equal 1, @secondary_cache.read("foo", raw: true).to_i

@cache.increment("foo")
sleep 0.1

assert_equal 2, @cache.read("foo", raw: true).to_i
assert_equal 2, @primary_cache.read("foo", raw: true).to_i
assert_equal 2, @secondary_cache.read("foo", raw: true).to_i

@secondary_cache.write("foo", 4, raw: true)

@cache.decrement("foo")
sleep 0.1

assert_equal 1, @cache.read("foo", raw: true).to_i
assert_equal 1, @primary_cache.read("foo", raw: true).to_i
assert_equal 3, @secondary_cache.read("foo", raw: true).to_i
end

test "cache with node names" do
@namespace = "test-#{SecureRandom.hex}"
primary_cluster = { shards: { primary_shard_one: :node1, primary_shard_two: :node2 } }
secondary_cluster = { shards: { primary_shard_one: :node3, primary_shard_two: :node4 } }

@cache = lookup_store(expires_in: 60, clusters: [ primary_cluster, secondary_cluster ])
@primary_cache = lookup_store(expires_in: 60, cluster: primary_cluster)
@secondary_cache = lookup_store(expires_in: 60, cluster: secondary_cluster)

@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")
assert_equal 1, @primary_cache.read("foo")
assert_equal 1, @secondary_cache.read("foo")
unless ENV["NO_CONNECTS_TO"]
setup do
@cache = nil
@namespace = "test-#{SecureRandom.hex}"
primary_cluster = { shards: [:primary_shard_one, :primary_shard_two] }
secondary_cluster = { shards: [:secondary_shard_one, :secondary_shard_two] }

@cache = lookup_store(expires_in: 60, clusters: [ primary_cluster, secondary_cluster ])
@primary_cache = lookup_store(expires_in: 60, cluster: primary_cluster)
@secondary_cache = lookup_store(expires_in: 60, cluster: secondary_cluster)
end

test "writes to both clusters" do
@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")
assert_equal 1, @primary_cache.read("foo")
assert_equal 1, @secondary_cache.read("foo")
end

test "reads from primary cluster" do
@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")

@secondary_cache.delete("foo")
assert_equal 1, @cache.read("foo")

@primary_cache.delete("foo")
assert_nil @cache.read("foo")
end

test "fetch writes to both clusters" do
@cache.fetch("foo") { 1 }
sleep 0.1

assert_equal 1, @cache.read("foo")
assert_equal 1, @primary_cache.read("foo")
assert_equal 1, @secondary_cache.read("foo")
end

test "fetch reads from primary clusters" do
@cache.fetch("foo") { 1 }
sleep 0.1
assert_equal 1, @cache.read("foo")

@primary_cache.delete("foo")
@cache.fetch("foo") { 2 }
sleep 0.1

assert_equal 2, @cache.read("foo")
assert_equal 2, @primary_cache.read("foo")
assert_equal 2, @secondary_cache.read("foo")

@secondary_cache.delete("foo")
assert_equal 2, @cache.fetch("foo") { 3 }

assert_equal 2, @primary_cache.read("foo")
assert_nil @secondary_cache.read("foo")
end

test "deletes from both cluster" do
@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")

@cache.delete("foo")
sleep 0.1

assert_nil @cache.read("foo")
assert_nil @primary_cache.read("foo")
assert_nil @secondary_cache.read("foo")
end

test "multi_writes to both clusters" do
values = { "foo" => "bar", "egg" => "spam" }
@cache.write_multi(values)
sleep 0.1
assert_equal values, @cache.read_multi("foo", "egg")
assert_equal values, @primary_cache.read_multi("foo", "egg")
assert_equal values, @secondary_cache.read_multi("foo", "egg")
end

test "delete_matched deletes from both caches" do
values = { "foo" => "bar", "baz" => "zab", "bab" => "dab" }
@cache.write_multi(values)
sleep 0.1

@cache.delete_matched("ba%")
sleep 0.1

assert_equal({ "foo" => "bar" }, @cache.read_multi(*values.keys))
assert_equal({ "foo" => "bar" }, @primary_cache.read_multi(*values.keys))
assert_equal({ "foo" => "bar" }, @secondary_cache.read_multi(*values.keys))
end

test "increment and decrement hit both clusters" do
@cache.write("foo", 1, raw: true)
sleep 0.1

assert_equal 1, @cache.read("foo", raw: true).to_i
assert_equal 1, @primary_cache.read("foo", raw: true).to_i
assert_equal 1, @secondary_cache.read("foo", raw: true).to_i

@cache.increment("foo")
sleep 0.1

assert_equal 2, @cache.read("foo", raw: true).to_i
assert_equal 2, @primary_cache.read("foo", raw: true).to_i
assert_equal 2, @secondary_cache.read("foo", raw: true).to_i

@secondary_cache.write("foo", 4, raw: true)

@cache.decrement("foo")
sleep 0.1

assert_equal 1, @cache.read("foo", raw: true).to_i
assert_equal 1, @primary_cache.read("foo", raw: true).to_i
assert_equal 3, @secondary_cache.read("foo", raw: true).to_i
end

test "cache with node names" do
@namespace = "test-#{SecureRandom.hex}"
primary_cluster = { shards: { primary_shard_one: :node1, primary_shard_two: :node2 } }
secondary_cluster = { shards: { primary_shard_one: :node3, primary_shard_two: :node4 } }

@cache = lookup_store(expires_in: 60, clusters: [ primary_cluster, secondary_cluster ])
@primary_cache = lookup_store(expires_in: 60, cluster: primary_cluster)
@secondary_cache = lookup_store(expires_in: 60, cluster: secondary_cluster)

@cache.write("foo", 1)
sleep 0.1
assert_equal 1, @cache.read("foo")
assert_equal 1, @primary_cache.read("foo")
assert_equal 1, @secondary_cache.read("foo")
end
end
end
Loading

0 comments on commit 7bc11a2

Please sign in to comment.