提交 79cf5a28 编写于 作者: G George Claghorn

Revert "Merge pull request #31447 from fatkodima/redis_cache-connection_pool"

This reverts commit ac74e2c5, reversing
changes made to ffdb0613.
上级 bf311bb4
......@@ -52,7 +52,7 @@ end
gem "dalli", ">= 2.2.1"
gem "listen", ">= 3.0.5", "< 3.2", require: false
gem "libxml-ruby", platforms: :ruby
gem "connection_pool", require: false
gem "connection_pool"
# for railties app_generator_test
gem "bootsnap", ">= 1.1.0", require: false
......
......@@ -160,23 +160,6 @@ class Store
attr_reader :silence, :options
alias :silence? :silence
class << self
private
def retrieve_pool_options(options)
{}.tap do |pool_options|
pool_options[:size] = options[:pool_size] if options[:pool_size]
pool_options[:timeout] = options[:pool_timeout] if options[:pool_timeout]
end
end
def ensure_connection_pool_added!
require "connection_pool"
rescue LoadError => e
$stderr.puts "You don't have connection_pool installed in your application. Please add it to your Gemfile and run bundle install"
raise e
end
end
# Creates a new cache. The options will be passed to any write method calls
# except for <tt>:namespace</tt> which can be used to set the global
# namespace for the cache.
......
......@@ -63,12 +63,21 @@ def self.build_mem_cache(*addresses) # :nodoc:
addresses = addresses.flatten
options = addresses.extract_options!
addresses = ["localhost:11211"] if addresses.empty?
pool_options = retrieve_pool_options(options)
pool_options = {}
pool_options[:size] = options[:pool_size] if options[:pool_size]
pool_options[:timeout] = options[:pool_timeout] if options[:pool_timeout]
if pool_options.empty?
Dalli::Client.new(addresses, options)
else
ensure_connection_pool_added!
begin
require "connection_pool"
rescue LoadError => e
$stderr.puts "You don't have connection_pool installed in your application. Please add it to your Gemfile and run bundle install"
raise e
end
ConnectionPool.new(pool_options) { Dalli::Client.new(addresses, options.merge(threadsafe: false)) }
end
end
......
......@@ -20,31 +20,6 @@
module ActiveSupport
module Cache
module ConnectionPoolLike
def with
yield self
end
end
::Redis.include(ConnectionPoolLike)
class RedisDistributedWithConnectionPool < ::Redis::Distributed
def add_node(options)
pool_options = {}
pool_options[:size] = options[:pool_size] if options[:pool_size]
pool_options[:timeout] = options[:pool_timeout] if options[:pool_timeout]
if pool_options.empty?
super
else
options = { url: options } if options.is_a?(String)
options = @default_options.merge(options)
pool = ConnectionPool.new(pool_options) { ::Redis.new(options) }
@ring.add_node(pool)
end
end
end
# Redis cache store.
#
# Deployment note: Take care to use a *dedicated Redis cache* rather
......@@ -147,7 +122,7 @@ def build_redis(redis: nil, url: nil, **redis_options) #:nodoc:
private
def build_redis_distributed_client(urls:, **redis_options)
RedisDistributedWithConnectionPool.new([], DEFAULT_REDIS_OPTIONS.merge(redis_options)).tap do |dist|
::Redis::Distributed.new([], DEFAULT_REDIS_OPTIONS.merge(redis_options)).tap do |dist|
urls.each { |u| dist.add_node url: u }
end
end
......@@ -197,7 +172,7 @@ def initialize(namespace: nil, compress: true, compress_threshold: 1.kilobyte, e
end
def redis
@redis ||= wrap_in_connection_pool(self.class.build_redis(**redis_options))
@redis ||= self.class.build_redis(**redis_options)
end
def inspect
......@@ -236,7 +211,7 @@ def delete_matched(matcher, options = nil)
instrument :delete_matched, matcher do
case matcher
when String
redis.with { |c| c.eval DELETE_GLOB_LUA, [], [namespace_key(matcher, options)] }
redis.eval DELETE_GLOB_LUA, [], [namespace_key(matcher, options)]
else
raise ArgumentError, "Only Redis glob strings are supported: #{matcher.inspect}"
end
......@@ -254,7 +229,7 @@ def delete_matched(matcher, options = nil)
def increment(name, amount = 1, options = nil)
instrument :increment, name, amount: amount do
failsafe :increment do
redis.with { |c| c.incrby normalize_key(name, options), amount }
redis.incrby normalize_key(name, options), amount
end
end
end
......@@ -270,7 +245,7 @@ def increment(name, amount = 1, options = nil)
def decrement(name, amount = 1, options = nil)
instrument :decrement, name, amount: amount do
failsafe :decrement do
redis.with { |c| c.decrby normalize_key(name, options), amount }
redis.decrby normalize_key(name, options), amount
end
end
end
......@@ -292,7 +267,7 @@ def clear(options = nil)
if namespace = merged_options(options)[namespace]
delete_matched "*", namespace: namespace
else
redis.with { |c| c.flushdb }
redis.flushdb
end
end
end
......@@ -308,21 +283,6 @@ def mset_capable? #:nodoc:
end
private
def wrap_in_connection_pool(redis_connection)
if redis_connection.is_a?(::Redis)
pool_options = self.class.send(:retrieve_pool_options, redis_options)
if pool_options.empty?
redis_connection
else
self.class.send(:ensure_connection_pool_added!)
ConnectionPool.new(pool_options) { redis_connection }
end
else
redis_connection
end
end
def set_redis_capabilities
case redis
when Redis::Distributed
......@@ -338,7 +298,7 @@ def set_redis_capabilities
# Read an entry from the cache.
def read_entry(key, options = nil)
failsafe :read_entry do
deserialize_entry redis.with { |c| c.get(key) }
deserialize_entry redis.get(key)
end
end
......@@ -349,7 +309,7 @@ def read_multi_mget(*names)
keys = names.map { |name| normalize_key(name, options) }
values = failsafe(:read_multi_mget, returning: {}) do
redis.with { |c| c.mget(*keys) }
redis.mget(*keys)
end
names.zip(values).each_with_object({}) do |(name, value), results|
......@@ -381,9 +341,9 @@ def write_entry(key, entry, unless_exist: false, raw: false, expires_in: nil, ra
modifiers[:nx] = unless_exist
modifiers[:px] = (1000 * expires_in.to_f).ceil if expires_in
redis.with { |c| c.set key, value, modifiers }
redis.set key, value, modifiers
else
redis.with { |c| c.set key, value }
redis.set key, value
end
end
end
......@@ -391,7 +351,7 @@ def write_entry(key, entry, unless_exist: false, raw: false, expires_in: nil, ra
# Delete an entry from the cache.
def delete_entry(key, options)
failsafe :delete_entry, returning: false do
redis.with { |c| c.del key }
redis.del key
end
end
......@@ -400,7 +360,7 @@ def write_multi_entries(entries, expires_in: nil, **options)
if entries.any?
if mset_capable? && expires_in.nil?
failsafe :write_multi_entries do
redis.with { |c| c.mapped_mset(entries) }
redis.mapped_mset(entries)
end
else
super
......
......@@ -8,18 +8,6 @@
module ActiveSupport::Cache::RedisCacheStoreTests
DRIVER = %w[ ruby hiredis ].include?(ENV["REDIS_DRIVER"]) ? ENV["REDIS_DRIVER"] : "hiredis"
# Emulates a latency on Redis's back-end for the key latency to facilitate
# connection pool testing.
class SlowRedis < Redis
def get(key, options = {})
if key =~ /latency/
sleep 3
else
super
end
end
end
class LookupTest < ActiveSupport::TestCase
test "may be looked up as :redis_cache_store" do
assert_kind_of ActiveSupport::Cache::RedisCacheStore,
......@@ -122,26 +110,6 @@ class RedisCacheStoreCommonBehaviorTest < StoreTest
include AutoloadingCacheBehavior
end
class RedisCacheStoreConnectionPoolBehaviourTest < StoreTest
include ConnectionPoolBehavior
private
def store
:redis_cache_store
end
def emulating_latency
old_redis = Object.send(:remove_const, :Redis)
Object.const_set(:Redis, SlowRedis)
yield
ensure
Object.send(:remove_const, :Redis)
Object.const_set(:Redis, old_redis)
end
end
# Separate test class so we can omit the namespace which causes expected,
# appropriate complaints about incompatible string encodings.
class KeyEncodingSafetyTest < StoreTest
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册