aboutsummaryrefslogtreecommitdiffstats
path: root/activerecord/lib/active_record/connection_adapters/abstract
diff options
context:
space:
mode:
Diffstat (limited to 'activerecord/lib/active_record/connection_adapters/abstract')
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/connection_pool.rb538
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/database_limits.rb2
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/database_statements.rb33
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/query_cache.rb40
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/quoting.rb88
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/schema_creation.rb8
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/schema_definitions.rb101
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/schema_dumper.rb75
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb183
-rw-r--r--activerecord/lib/active_record/connection_adapters/abstract/transaction.rb6
10 files changed, 546 insertions, 528 deletions
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/connection_pool.rb b/activerecord/lib/active_record/connection_adapters/abstract/connection_pool.rb
index c341773be1..2d62fd8d50 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/connection_pool.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/connection_pool.rb
@@ -1,6 +1,6 @@
-require 'thread'
-require 'concurrent/map'
-require 'monitor'
+require "thread"
+require "concurrent/map"
+require "monitor"
module ActiveRecord
# Raised when a connection could not be obtained within the connection
@@ -74,7 +74,7 @@ module ActiveRecord
#--
# Synchronization policy:
# * all public methods can be called outside +synchronize+
- # * access to these i-vars needs to be in +synchronize+:
+ # * access to these instance variables needs to be in +synchronize+:
# * @connections
# * @now_connecting
# * private methods that require being called in a +synchronize+ blocks
@@ -150,61 +150,61 @@ module ActiveRecord
private
- def internal_poll(timeout)
- no_wait_poll || (timeout && wait_poll(timeout))
- end
+ def internal_poll(timeout)
+ no_wait_poll || (timeout && wait_poll(timeout))
+ end
- def synchronize(&block)
- @lock.synchronize(&block)
- end
+ def synchronize(&block)
+ @lock.synchronize(&block)
+ end
- # Test if the queue currently contains any elements.
- def any?
- !@queue.empty?
- end
+ # Test if the queue currently contains any elements.
+ def any?
+ !@queue.empty?
+ end
- # A thread can remove an element from the queue without
- # waiting if and only if the number of currently available
- # connections is strictly greater than the number of waiting
- # threads.
- def can_remove_no_wait?
- @queue.size > @num_waiting
- end
+ # A thread can remove an element from the queue without
+ # waiting if and only if the number of currently available
+ # connections is strictly greater than the number of waiting
+ # threads.
+ def can_remove_no_wait?
+ @queue.size > @num_waiting
+ end
- # Removes and returns the head of the queue if possible, or nil.
- def remove
- @queue.shift
- end
+ # Removes and returns the head of the queue if possible, or nil.
+ def remove
+ @queue.shift
+ end
- # Remove and return the head the queue if the number of
- # available elements is strictly greater than the number of
- # threads currently waiting. Otherwise, return nil.
- def no_wait_poll
- remove if can_remove_no_wait?
- end
+ # Remove and return the head the queue if the number of
+ # available elements is strictly greater than the number of
+ # threads currently waiting. Otherwise, return nil.
+ def no_wait_poll
+ remove if can_remove_no_wait?
+ end
- # Waits on the queue up to +timeout+ seconds, then removes and
- # returns the head of the queue.
- def wait_poll(timeout)
- @num_waiting += 1
+ # Waits on the queue up to +timeout+ seconds, then removes and
+ # returns the head of the queue.
+ def wait_poll(timeout)
+ @num_waiting += 1
- t0 = Time.now
- elapsed = 0
- loop do
- @cond.wait(timeout - elapsed)
+ t0 = Time.now
+ elapsed = 0
+ loop do
+ @cond.wait(timeout - elapsed)
- return remove if any?
+ return remove if any?
- elapsed = Time.now - t0
- if elapsed >= timeout
- msg = 'could not obtain a connection from the pool within %0.3f seconds (waited %0.3f seconds); all pooled connections were in use' %
- [timeout, elapsed]
- raise ConnectionTimeoutError, msg
+ elapsed = Time.now - t0
+ if elapsed >= timeout
+ msg = "could not obtain a connection from the pool within %0.3f seconds (waited %0.3f seconds); all pooled connections were in use" %
+ [timeout, elapsed]
+ raise ConnectionTimeoutError, msg
+ end
end
+ ensure
+ @num_waiting -= 1
end
- ensure
- @num_waiting -= 1
- end
end
# Adds the ability to turn a basic fair FIFO queue into one
@@ -274,11 +274,11 @@ module ActiveRecord
include BiasableQueue
private
- def internal_poll(timeout)
- conn = super
- conn.lease if conn
- conn
- end
+ def internal_poll(timeout)
+ conn = super
+ conn.lease if conn
+ conn
+ end
end
# Every +frequency+ seconds, the reaper will call +reap+ on +pool+.
@@ -329,17 +329,17 @@ module ActiveRecord
# default max pool size to 5
@size = (spec.config[:pool] && spec.config[:pool].to_i) || 5
- # The cache of threads mapped to reserved connections, the sole purpose
- # of the cache is to speed-up +connection+ method, it is not the authoritative
- # registry of which thread owns which connection, that is tracked by
- # +connection.owner+ attr on each +connection+ instance.
+ # This variable tracks the cache of threads mapped to reserved connections, with the
+ # sole purpose of speeding up the +connection+ method. It is not the authoritative
+ # registry of which thread owns which connection. Connection ownership is tracked by
+ # the +connection.owner+ attr on each +connection+ instance.
# The invariant works like this: if there is mapping of <tt>thread => conn</tt>,
- # then that +thread+ does indeed own that +conn+, however an absence of a such
- # mapping does not mean that the +thread+ doesn't own the said connection, in
+ # then that +thread+ does indeed own that +conn+. However, an absence of a such
+ # mapping does not mean that the +thread+ doesn't own the said connection. In
# that case +conn.owner+ attr should be consulted.
# Access and modification of +@thread_cached_conns+ does not require
# synchronization.
- @thread_cached_conns = Concurrent::Map.new(:initial_capacity => @size)
+ @thread_cached_conns = Concurrent::Map.new(initial_capacity: @size)
@connections = []
@automatic_reconnect = true
@@ -364,10 +364,10 @@ module ActiveRecord
@thread_cached_conns[connection_cache_key(Thread.current)] ||= checkout
end
- # Is there an open connection that is being used for the current thread?
+ # Returns true if there is an open connection being used for the current thread.
#
# This method only works for connections that have been obtained through
- # #connection or #with_connection methods, connections obtained through
+ # #connection or #with_connection methods. Connections obtained through
# #checkout will not be detected by #active_connection?
def active_connection?
@thread_cached_conns[connection_cache_key(Thread.current)]
@@ -415,7 +415,10 @@ module ActiveRecord
with_exclusively_acquired_all_connections(raise_on_acquisition_timeout) do
synchronize do
@connections.each do |conn|
- checkin conn
+ if conn.in_use?
+ conn.steal!
+ checkin conn
+ end
conn.disconnect!
end
@connections = []
@@ -426,9 +429,9 @@ module ActiveRecord
# Disconnects all connections in the pool, and clears the pool.
#
- # The pool first tries to gain ownership of all connections, if unable to
+ # The pool first tries to gain ownership of all connections. If unable to
# do so within a timeout interval (default duration is
- # <tt>spec.config[:checkout_timeout] * 2</tt> seconds), the pool is forcefully
+ # <tt>spec.config[:checkout_timeout] * 2</tt> seconds), then the pool is forcefully
# disconnected without any regard for other connection owning threads.
def disconnect!
disconnect(false)
@@ -447,7 +450,10 @@ module ActiveRecord
with_exclusively_acquired_all_connections(raise_on_acquisition_timeout) do
synchronize do
@connections.each do |conn|
- checkin conn
+ if conn.in_use?
+ conn.steal!
+ checkin conn
+ end
conn.disconnect! if conn.requires_reloading?
end
@connections.delete_if(&:requires_reloading?)
@@ -474,9 +480,9 @@ module ActiveRecord
# Clears the cache which maps classes and re-connects connections that
# require reloading.
#
- # The pool first tries to gain ownership of all connections, if unable to
+ # The pool first tries to gain ownership of all connections. If unable to
# do so within a timeout interval (default duration is
- # <tt>spec.config[:checkout_timeout] * 2</tt> seconds), the pool forcefully
+ # <tt>spec.config[:checkout_timeout] * 2</tt> seconds), then the pool forcefully
# clears the cache and reloads connections without any regard for other
# connection owning threads.
def clear_reloadable_connections!
@@ -530,20 +536,20 @@ module ActiveRecord
@available.delete conn
# @available.any_waiting? => true means that prior to removing this
- # conn, the pool was at its max size (@connections.size == @size)
- # this would mean that any threads stuck waiting in the queue wouldn't
+ # conn, the pool was at its max size (@connections.size == @size).
+ # This would mean that any threads stuck waiting in the queue wouldn't
# know they could checkout_new_connection, so let's do it for them.
# Because condition-wait loop is encapsulated in the Queue class
# (that in turn is oblivious to ConnectionPool implementation), threads
- # that are "stuck" there are helpless, they have no way of creating
+ # that are "stuck" there are helpless. They have no way of creating
# new connections and are completely reliant on us feeding available
# connections into the Queue.
needs_new_connection = @available.any_waiting?
end
# This is intentionally done outside of the synchronized section as we
- # would like not to hold the main mutex while checking out new connections,
- # thus there is some chance that needs_new_connection information is now
+ # would like not to hold the main mutex while checking out new connections.
+ # Thus there is some chance that needs_new_connection information is now
# stale, we can live with that (bulk_make_new_connections will make
# sure not to exceed the pool's @size limit).
bulk_make_new_connections(1) if needs_new_connection
@@ -556,17 +562,17 @@ module ActiveRecord
stale_connections = synchronize do
@connections.select do |conn|
conn.in_use? && !conn.owner.alive?
+ end.each do |conn|
+ conn.steal!
end
end
stale_connections.each do |conn|
- synchronize do
- if conn.active?
- conn.reset!
- checkin conn
- else
- remove conn
- end
+ if conn.active?
+ conn.reset!
+ checkin conn
+ else
+ remove conn
end
end
end
@@ -576,205 +582,205 @@ module ActiveRecord
end
private
- #--
- # this is unfortunately not concurrent
- def bulk_make_new_connections(num_new_conns_needed)
- num_new_conns_needed.times do
- # try_to_checkout_new_connection will not exceed pool's @size limit
- if new_conn = try_to_checkout_new_connection
- # make the new_conn available to the starving threads stuck @available Queue
- checkin(new_conn)
+ #--
+ # this is unfortunately not concurrent
+ def bulk_make_new_connections(num_new_conns_needed)
+ num_new_conns_needed.times do
+ # try_to_checkout_new_connection will not exceed pool's @size limit
+ if new_conn = try_to_checkout_new_connection
+ # make the new_conn available to the starving threads stuck @available Queue
+ checkin(new_conn)
+ end
end
end
- end
- #--
- # From the discussion on GitHub:
- # https://github.com/rails/rails/pull/14938#commitcomment-6601951
- # This hook-in method allows for easier monkey-patching fixes needed by
- # JRuby users that use Fibers.
- def connection_cache_key(thread)
- thread
- end
-
- # Take control of all existing connections so a "group" action such as
- # reload/disconnect can be performed safely. It is no longer enough to
- # wrap it in +synchronize+ because some pool's actions are allowed
- # to be performed outside of the main +synchronize+ block.
- def with_exclusively_acquired_all_connections(raise_on_acquisition_timeout = true)
- with_new_connections_blocked do
- attempt_to_checkout_all_existing_connections(raise_on_acquisition_timeout)
- yield
+ #--
+ # From the discussion on GitHub:
+ # https://github.com/rails/rails/pull/14938#commitcomment-6601951
+ # This hook-in method allows for easier monkey-patching fixes needed by
+ # JRuby users that use Fibers.
+ def connection_cache_key(thread)
+ thread
end
- end
- def attempt_to_checkout_all_existing_connections(raise_on_acquisition_timeout = true)
- collected_conns = synchronize do
- # account for our own connections
- @connections.select {|conn| conn.owner == Thread.current}
+ # Take control of all existing connections so a "group" action such as
+ # reload/disconnect can be performed safely. It is no longer enough to
+ # wrap it in +synchronize+ because some pool's actions are allowed
+ # to be performed outside of the main +synchronize+ block.
+ def with_exclusively_acquired_all_connections(raise_on_acquisition_timeout = true)
+ with_new_connections_blocked do
+ attempt_to_checkout_all_existing_connections(raise_on_acquisition_timeout)
+ yield
+ end
end
- newly_checked_out = []
- timeout_time = Time.now + (@checkout_timeout * 2)
+ def attempt_to_checkout_all_existing_connections(raise_on_acquisition_timeout = true)
+ collected_conns = synchronize do
+ # account for our own connections
+ @connections.select { |conn| conn.owner == Thread.current }
+ end
+
+ newly_checked_out = []
+ timeout_time = Time.now + (@checkout_timeout * 2)
- @available.with_a_bias_for(Thread.current) do
- loop do
- synchronize do
- return if collected_conns.size == @connections.size && @now_connecting == 0
- remaining_timeout = timeout_time - Time.now
- remaining_timeout = 0 if remaining_timeout < 0
- conn = checkout_for_exclusive_access(remaining_timeout)
- collected_conns << conn
- newly_checked_out << conn
+ @available.with_a_bias_for(Thread.current) do
+ loop do
+ synchronize do
+ return if collected_conns.size == @connections.size && @now_connecting == 0
+ remaining_timeout = timeout_time - Time.now
+ remaining_timeout = 0 if remaining_timeout < 0
+ conn = checkout_for_exclusive_access(remaining_timeout)
+ collected_conns << conn
+ newly_checked_out << conn
+ end
end
end
- end
- rescue ExclusiveConnectionTimeoutError
- # <tt>raise_on_acquisition_timeout == false</tt> means we are directed to ignore any
- # timeouts and are expected to just give up: we've obtained as many connections
- # as possible, note that in a case like that we don't return any of the
- # +newly_checked_out+ connections.
-
- if raise_on_acquisition_timeout
+ rescue ExclusiveConnectionTimeoutError
+ # <tt>raise_on_acquisition_timeout == false</tt> means we are directed to ignore any
+ # timeouts and are expected to just give up: we've obtained as many connections
+ # as possible, note that in a case like that we don't return any of the
+ # +newly_checked_out+ connections.
+
+ if raise_on_acquisition_timeout
+ release_newly_checked_out = true
+ raise
+ end
+ rescue Exception # if something else went wrong
+ # this can't be a "naked" rescue, because we have should return conns
+ # even for non-StandardErrors
release_newly_checked_out = true
raise
+ ensure
+ if release_newly_checked_out && newly_checked_out
+ # releasing only those conns that were checked out in this method, conns
+ # checked outside this method (before it was called) are not for us to release
+ newly_checked_out.each { |conn| checkin(conn) }
+ end
end
- rescue Exception # if something else went wrong
- # this can't be a "naked" rescue, because we have should return conns
- # even for non-StandardErrors
- release_newly_checked_out = true
- raise
- ensure
- if release_newly_checked_out && newly_checked_out
- # releasing only those conns that were checked out in this method, conns
- # checked outside this method (before it was called) are not for us to release
- newly_checked_out.each {|conn| checkin(conn)}
- end
- end
- #--
- # Must be called in a synchronize block.
- def checkout_for_exclusive_access(checkout_timeout)
- checkout(checkout_timeout)
- rescue ConnectionTimeoutError
- # this block can't be easily moved into attempt_to_checkout_all_existing_connections's
- # rescue block, because doing so would put it outside of synchronize section, without
- # being in a critical section thread_report might become inaccurate
- msg = "could not obtain ownership of all database connections in #{checkout_timeout} seconds"
-
- thread_report = []
- @connections.each do |conn|
- unless conn.owner == Thread.current
- thread_report << "#{conn} is owned by #{conn.owner}"
+ #--
+ # Must be called in a synchronize block.
+ def checkout_for_exclusive_access(checkout_timeout)
+ checkout(checkout_timeout)
+ rescue ConnectionTimeoutError
+ # this block can't be easily moved into attempt_to_checkout_all_existing_connections's
+ # rescue block, because doing so would put it outside of synchronize section, without
+ # being in a critical section thread_report might become inaccurate
+ msg = "could not obtain ownership of all database connections in #{checkout_timeout} seconds"
+
+ thread_report = []
+ @connections.each do |conn|
+ unless conn.owner == Thread.current
+ thread_report << "#{conn} is owned by #{conn.owner}"
+ end
end
- end
- msg << " (#{thread_report.join(', ')})" if thread_report.any?
+ msg << " (#{thread_report.join(', ')})" if thread_report.any?
- raise ExclusiveConnectionTimeoutError, msg
- end
+ raise ExclusiveConnectionTimeoutError, msg
+ end
- def with_new_connections_blocked
- previous_value = nil
- synchronize do
- previous_value, @new_cons_enabled = @new_cons_enabled, false
+ def with_new_connections_blocked
+ previous_value = nil
+ synchronize do
+ previous_value, @new_cons_enabled = @new_cons_enabled, false
+ end
+ yield
+ ensure
+ synchronize { @new_cons_enabled = previous_value }
end
- yield
- ensure
- synchronize { @new_cons_enabled = previous_value }
- end
- # Acquire a connection by one of 1) immediately removing one
- # from the queue of available connections, 2) creating a new
- # connection if the pool is not at capacity, 3) waiting on the
- # queue for a connection to become available.
- #
- # Raises:
- # - ActiveRecord::ConnectionTimeoutError if a connection could not be acquired
- #
- #--
- # Implementation detail: the connection returned by +acquire_connection+
- # will already be "+connection.lease+ -ed" to the current thread.
- def acquire_connection(checkout_timeout)
- # NOTE: we rely on +@available.poll+ and +try_to_checkout_new_connection+ to
- # +conn.lease+ the returned connection (and to do this in a +synchronized+
- # section), this is not the cleanest implementation, as ideally we would
- # <tt>synchronize { conn.lease }</tt> in this method, but by leaving it to +@available.poll+
- # and +try_to_checkout_new_connection+ we can piggyback on +synchronize+ sections
- # of the said methods and avoid an additional +synchronize+ overhead.
- if conn = @available.poll || try_to_checkout_new_connection
- conn
- else
- reap
- @available.poll(checkout_timeout)
+ # Acquire a connection by one of 1) immediately removing one
+ # from the queue of available connections, 2) creating a new
+ # connection if the pool is not at capacity, 3) waiting on the
+ # queue for a connection to become available.
+ #
+ # Raises:
+ # - ActiveRecord::ConnectionTimeoutError if a connection could not be acquired
+ #
+ #--
+ # Implementation detail: the connection returned by +acquire_connection+
+ # will already be "+connection.lease+ -ed" to the current thread.
+ def acquire_connection(checkout_timeout)
+ # NOTE: we rely on +@available.poll+ and +try_to_checkout_new_connection+ to
+ # +conn.lease+ the returned connection (and to do this in a +synchronized+
+ # section). This is not the cleanest implementation, as ideally we would
+ # <tt>synchronize { conn.lease }</tt> in this method, but by leaving it to +@available.poll+
+ # and +try_to_checkout_new_connection+ we can piggyback on +synchronize+ sections
+ # of the said methods and avoid an additional +synchronize+ overhead.
+ if conn = @available.poll || try_to_checkout_new_connection
+ conn
+ else
+ reap
+ @available.poll(checkout_timeout)
+ end
end
- end
- #--
- # if owner_thread param is omitted, this must be called in synchronize block
- def remove_connection_from_thread_cache(conn, owner_thread = conn.owner)
- @thread_cached_conns.delete_pair(connection_cache_key(owner_thread), conn)
- end
- alias_method :release, :remove_connection_from_thread_cache
+ #--
+ # if owner_thread param is omitted, this must be called in synchronize block
+ def remove_connection_from_thread_cache(conn, owner_thread = conn.owner)
+ @thread_cached_conns.delete_pair(connection_cache_key(owner_thread), conn)
+ end
+ alias_method :release, :remove_connection_from_thread_cache
- def new_connection
- Base.send(spec.adapter_method, spec.config).tap do |conn|
- conn.schema_cache = schema_cache.dup if schema_cache
+ def new_connection
+ Base.send(spec.adapter_method, spec.config).tap do |conn|
+ conn.schema_cache = schema_cache.dup if schema_cache
+ end
end
- end
- # If the pool is not at a +@size+ limit, establish new connection. Connecting
- # to the DB is done outside main synchronized section.
- #--
- # Implementation constraint: a newly established connection returned by this
- # method must be in the +.leased+ state.
- def try_to_checkout_new_connection
- # first in synchronized section check if establishing new conns is allowed
- # and increment @now_connecting, to prevent overstepping this pool's @size
- # constraint
- do_checkout = synchronize do
- if @new_cons_enabled && (@connections.size + @now_connecting) < @size
- @now_connecting += 1
- end
- end
- if do_checkout
- begin
- # if successfully incremented @now_connecting establish new connection
- # outside of synchronized section
- conn = checkout_new_connection
- ensure
- synchronize do
- if conn
- adopt_connection(conn)
- # returned conn needs to be already leased
- conn.lease
+ # If the pool is not at a +@size+ limit, establish new connection. Connecting
+ # to the DB is done outside main synchronized section.
+ #--
+ # Implementation constraint: a newly established connection returned by this
+ # method must be in the +.leased+ state.
+ def try_to_checkout_new_connection
+ # first in synchronized section check if establishing new conns is allowed
+ # and increment @now_connecting, to prevent overstepping this pool's @size
+ # constraint
+ do_checkout = synchronize do
+ if @new_cons_enabled && (@connections.size + @now_connecting) < @size
+ @now_connecting += 1
+ end
+ end
+ if do_checkout
+ begin
+ # if successfully incremented @now_connecting establish new connection
+ # outside of synchronized section
+ conn = checkout_new_connection
+ ensure
+ synchronize do
+ if conn
+ adopt_connection(conn)
+ # returned conn needs to be already leased
+ conn.lease
+ end
+ @now_connecting -= 1
end
- @now_connecting -= 1
end
end
end
- end
- def adopt_connection(conn)
- conn.pool = self
- @connections << conn
- end
+ def adopt_connection(conn)
+ conn.pool = self
+ @connections << conn
+ end
- def checkout_new_connection
- raise ConnectionNotEstablished unless @automatic_reconnect
- new_connection
- end
+ def checkout_new_connection
+ raise ConnectionNotEstablished unless @automatic_reconnect
+ new_connection
+ end
- def checkout_and_verify(c)
- c._run_checkout_callbacks do
- c.verify!
+ def checkout_and_verify(c)
+ c._run_checkout_callbacks do
+ c.verify!
+ end
+ c
+ rescue
+ remove c
+ c.disconnect!
+ raise
end
- c
- rescue
- remove c
- c.disconnect!
- raise
- end
end
# ConnectionHandler is a collection of ConnectionPool objects. It is used
@@ -822,13 +828,13 @@ module ActiveRecord
# should use.
#
# The ConnectionHandler class is not coupled with the Active models, as it has no knowlodge
- # about the model. The model, needs to pass a specification name to the handler,
+ # about the model. The model needs to pass a specification name to the handler,
# in order to lookup the correct connection pool.
class ConnectionHandler
def initialize
# These caches are keyed by spec.name (ConnectionSpecification#name).
- @owner_to_pool = Concurrent::Map.new(:initial_capacity => 2) do |h,k|
- h[k] = Concurrent::Map.new(:initial_capacity => 2)
+ @owner_to_pool = Concurrent::Map.new(initial_capacity: 2) do |h,k|
+ h[k] = Concurrent::Map.new(initial_capacity: 2)
end
end
@@ -842,7 +848,21 @@ module ActiveRecord
spec = resolver.spec(config)
remove_connection(spec.name)
- owner_to_pool[spec.name] = ConnectionAdapters::ConnectionPool.new(spec)
+
+ message_bus = ActiveSupport::Notifications.instrumenter
+ payload = {
+ connection_id: object_id
+ }
+ if spec
+ payload[:spec_name] = spec.name
+ payload[:config] = spec.config
+ end
+
+ message_bus.instrument("!connection.active_record", payload) do
+ owner_to_pool[spec.name] = ConnectionAdapters::ConnectionPool.new(spec)
+ end
+
+ owner_to_pool[spec.name]
end
# Returns true if there are any active connections among the connection
@@ -875,7 +895,7 @@ module ActiveRecord
# for (not necessarily the current class).
def retrieve_connection(spec_name) #:nodoc:
pool = retrieve_connection_pool(spec_name)
- raise ConnectionNotEstablished, "No connection pool with id '#{spec_name}' found." unless pool
+ raise ConnectionNotEstablished, "No connection pool with '#{spec_name}' found." unless pool
conn = pool.connection
raise ConnectionNotEstablished, "No connection for '#{spec_name}' in connection pool" unless conn
conn
@@ -890,7 +910,7 @@ module ActiveRecord
# Remove the connection for this class. This will close the active
# connection and the defined connection (if they exist). The result
- # can be used as an argument for establish_connection, for easily
+ # can be used as an argument for #establish_connection, for easily
# re-establishing the connection.
def remove_connection(spec_name)
if pool = owner_to_pool.delete(spec_name)
@@ -922,14 +942,14 @@ module ActiveRecord
private
- def owner_to_pool
- @owner_to_pool[Process.pid]
- end
+ def owner_to_pool
+ @owner_to_pool[Process.pid]
+ end
- def pool_from_any_process_for(spec_name)
- owner_to_pool = @owner_to_pool.values.find { |v| v[spec_name] }
- owner_to_pool && owner_to_pool[spec_name]
- end
+ def pool_from_any_process_for(spec_name)
+ owner_to_pool = @owner_to_pool.values.find { |v| v[spec_name] }
+ owner_to_pool && owner_to_pool[spec_name]
+ end
end
end
end
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/database_limits.rb b/activerecord/lib/active_record/connection_adapters/abstract/database_limits.rb
index 6711049588..95c72f1e20 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/database_limits.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/database_limits.rb
@@ -1,7 +1,6 @@
module ActiveRecord
module ConnectionAdapters # :nodoc:
module DatabaseLimits
-
# Returns the maximum length of a table alias.
def table_alias_length
255
@@ -61,7 +60,6 @@ module ActiveRecord
def joins_per_query
256
end
-
end
end
end
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/database_statements.rb b/activerecord/lib/active_record/connection_adapters/abstract/database_statements.rb
index 507a925d32..aa2dfdd573 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/database_statements.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/database_statements.rb
@@ -10,7 +10,7 @@ module ActiveRecord
def to_sql(arel, binds = [])
if arel.respond_to?(:ast)
collected = visitor.accept(arel.ast, collector)
- collected.compile(binds.dup, self)
+ collected.compile(binds, self)
else
arel
end
@@ -18,11 +18,12 @@ module ActiveRecord
# This is used in the StatementCache object. It returns an object that
# can be used to query the database repeatedly.
- def cacheable_query(arel) # :nodoc:
+ def cacheable_query(klass, arel) # :nodoc:
+ collected = visitor.accept(arel.ast, collector)
if prepared_statements
- ActiveRecord::StatementCache.query visitor, arel.ast
+ klass.query(collected.value)
else
- ActiveRecord::StatementCache.partial_query visitor, arel.ast, collector
+ klass.partial_query(collected.value)
end
end
@@ -81,21 +82,22 @@ module ActiveRecord
# Executes +sql+ statement in the context of this connection using
# +binds+ as the bind substitutes. +name+ is logged along with
# the executed +sql+ statement.
- def exec_query(sql, name = 'SQL', binds = [], prepare: false)
+ def exec_query(sql, name = "SQL", binds = [], prepare: false)
raise NotImplementedError
end
# Executes insert +sql+ statement in the context of this connection using
# +binds+ as the bind substitutes. +name+ is logged along with
# the executed +sql+ statement.
- def exec_insert(sql, name, binds, pk = nil, sequence_name = nil)
+ def exec_insert(sql, name = nil, binds = [], pk = nil, sequence_name = nil)
+ sql, binds = sql_for_insert(sql, pk, nil, sequence_name, binds)
exec_query(sql, name, binds)
end
# Executes delete +sql+ statement in the context of this connection using
# +binds+ as the bind substitutes. +name+ is logged along with
# the executed +sql+ statement.
- def exec_delete(sql, name, binds)
+ def exec_delete(sql, name = nil, binds = [])
exec_query(sql, name, binds)
end
@@ -107,7 +109,7 @@ module ActiveRecord
# Executes update +sql+ statement in the context of this connection using
# +binds+ as the bind substitutes. +name+ is logged along with
# the executed +sql+ statement.
- def exec_update(sql, name, binds)
+ def exec_update(sql, name = nil, binds = [])
exec_query(sql, name, binds)
end
@@ -120,8 +122,7 @@ module ActiveRecord
# If the next id was calculated in advance (as in Oracle), it should be
# passed in as +id_value+.
def insert(arel, name = nil, pk = nil, id_value = nil, sequence_name = nil, binds = [])
- sql, binds, pk, sequence_name = sql_for_insert(to_sql(arel, binds), pk, id_value, sequence_name, binds)
- value = exec_insert(sql, name, binds, pk, sequence_name)
+ value = exec_insert(to_sql(arel, binds), name, binds, pk, sequence_name)
id_value || last_inserted_id(value)
end
alias create insert
@@ -244,7 +245,7 @@ module ActiveRecord
end
def reset_transaction #:nodoc:
- @transaction_manager = TransactionManager.new(self)
+ @transaction_manager = ConnectionAdapters::TransactionManager.new(self)
end
# Register a record with the current transaction so that its after_commit and after_rollback callbacks
@@ -315,7 +316,7 @@ module ActiveRecord
end
end
key_list = fixture.keys.map { |name| quote_column_name(name) }
- value_list = prepare_binds_for_database(binds).map do |value|
+ value_list = binds.map(&:value_for_database).map do |value|
begin
quote(value)
rescue TypeError
@@ -323,7 +324,7 @@ module ActiveRecord
end
end
- execute "INSERT INTO #{quote_table_name(table_name)} (#{key_list.join(', ')}) VALUES (#{value_list.join(', ')})", 'Fixture Insert'
+ execute "INSERT INTO #{quote_table_name(table_name)} (#{key_list.join(', ')}) VALUES (#{value_list.join(', ')})", "Fixture Insert"
end
def empty_insert_statement_value
@@ -342,8 +343,8 @@ module ActiveRecord
def sanitize_limit(limit)
if limit.is_a?(Integer) || limit.is_a?(Arel::Nodes::SqlLiteral)
limit
- elsif limit.to_s.include?(',')
- Arel.sql limit.to_s.split(',').map{ |i| Integer(i) }.join(',')
+ elsif limit.to_s.include?(",")
+ Arel.sql limit.to_s.split(",").map { |i| Integer(i) }.join(",")
else
Integer(limit)
end
@@ -378,7 +379,7 @@ module ActiveRecord
end
def sql_for_insert(sql, pk, id_value, sequence_name, binds)
- [sql, binds, pk, sequence_name]
+ [sql, binds]
end
def last_inserted_id(result)
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/query_cache.rb b/activerecord/lib/active_record/connection_adapters/abstract/query_cache.rb
index 0bdfd4f900..2f8a89e88e 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/query_cache.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/query_cache.rb
@@ -65,7 +65,7 @@ module ActiveRecord
if @query_cache_enabled && !locked?(arel)
arel, binds = binds_from_relation arel, binds
sql = to_sql(arel, binds)
- cache_sql(sql, binds) { super(sql, name, binds, preparable: preparable) }
+ cache_sql(sql, name, binds) { super(sql, name, binds, preparable: preparable) }
else
super
end
@@ -73,23 +73,29 @@ module ActiveRecord
private
- def cache_sql(sql, binds)
- result =
- if @query_cache[sql].key?(binds)
- ActiveSupport::Notifications.instrument("sql.active_record",
- :sql => sql, :binds => binds, :name => "CACHE", :connection_id => object_id)
- @query_cache[sql][binds]
- else
- @query_cache[sql][binds] = yield
- end
- result.dup
- end
+ def cache_sql(sql, name, binds)
+ result =
+ if @query_cache[sql].key?(binds)
+ ActiveSupport::Notifications.instrument(
+ "sql.active_record",
+ sql: sql,
+ binds: binds,
+ name: name,
+ connection_id: object_id,
+ cached: true,
+ )
+ @query_cache[sql][binds]
+ else
+ @query_cache[sql][binds] = yield
+ end
+ result.dup
+ end
- # If arel is locked this is a SELECT ... FOR UPDATE or somesuch. Such
- # queries should not be cached.
- def locked?(arel)
- arel.respond_to?(:locked) && arel.locked
- end
+ # If arel is locked this is a SELECT ... FOR UPDATE or somesuch. Such
+ # queries should not be cached.
+ def locked?(arel)
+ arel.respond_to?(:locked) && arel.locked
+ end
end
end
end
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/quoting.rb b/activerecord/lib/active_record/connection_adapters/abstract/quoting.rb
index 860ef17dca..bbd52b8a91 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/quoting.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/quoting.rb
@@ -1,4 +1,4 @@
-require 'active_support/core_ext/big_decimal/conversions'
+require "active_support/core_ext/big_decimal/conversions"
module ActiveRecord
module ConnectionAdapters # :nodoc:
@@ -112,19 +112,19 @@ module ActiveRecord
end
def quoted_true
- "'t'"
+ "'t'".freeze
end
def unquoted_true
- 't'
+ "t".freeze
end
def quoted_false
- "'f'"
+ "'f'".freeze
end
def unquoted_false
- 'f'
+ "f".freeze
end
# Quote date/time values for use in SQL input. Includes microseconds
@@ -147,52 +147,52 @@ module ActiveRecord
end
def quoted_time(value) # :nodoc:
- quoted_date(value).sub(/\A2000-01-01 /, '')
- end
-
- def prepare_binds_for_database(binds) # :nodoc:
- binds.map(&:value_for_database)
+ quoted_date(value).sub(/\A2000-01-01 /, "")
end
private
- def types_which_need_no_typecasting
- [nil, Numeric, String]
- end
-
- def _quote(value)
- case value
- when String, ActiveSupport::Multibyte::Chars, Type::Binary::Data
- "'#{quote_string(value.to_s)}'"
- when true then quoted_true
- when false then quoted_false
- when nil then "NULL"
- # BigDecimals need to be put in a non-normalized form and quoted.
- when BigDecimal then value.to_s('F')
- when Numeric, ActiveSupport::Duration then value.to_s
- when Type::Time::Value then "'#{quoted_time(value)}'"
- when Date, Time then "'#{quoted_date(value)}'"
- when Symbol then "'#{quote_string(value.to_s)}'"
- when Class then "'#{value}'"
- else raise TypeError, "can't quote #{value.class.name}"
+ def type_casted_binds(binds)
+ binds.map { |attr| type_cast(attr.value_for_database) }
end
- end
- def _type_cast(value)
- case value
- when Symbol, ActiveSupport::Multibyte::Chars, Type::Binary::Data
- value.to_s
- when true then unquoted_true
- when false then unquoted_false
- # BigDecimals need to be put in a non-normalized form and quoted.
- when BigDecimal then value.to_s('F')
- when Type::Time::Value then quoted_time(value)
- when Date, Time then quoted_date(value)
- when *types_which_need_no_typecasting
- value
- else raise TypeError
+ def types_which_need_no_typecasting
+ [nil, Numeric, String]
+ end
+
+ def _quote(value)
+ case value
+ when String, ActiveSupport::Multibyte::Chars, Type::Binary::Data
+ "'#{quote_string(value.to_s)}'"
+ when true then quoted_true
+ when false then quoted_false
+ when nil then "NULL"
+ # BigDecimals need to be put in a non-normalized form and quoted.
+ when BigDecimal then value.to_s("F")
+ when Numeric, ActiveSupport::Duration then value.to_s
+ when Type::Time::Value then "'#{quoted_time(value)}'"
+ when Date, Time then "'#{quoted_date(value)}'"
+ when Symbol then "'#{quote_string(value.to_s)}'"
+ when Class then "'#{value}'"
+ else raise TypeError, "can't quote #{value.class.name}"
+ end
+ end
+
+ def _type_cast(value)
+ case value
+ when Symbol, ActiveSupport::Multibyte::Chars, Type::Binary::Data
+ value.to_s
+ when true then unquoted_true
+ when false then unquoted_false
+ # BigDecimals need to be put in a non-normalized form and quoted.
+ when BigDecimal then value.to_s("F")
+ when Type::Time::Value then quoted_time(value)
+ when Date, Time then quoted_date(value)
+ when *types_which_need_no_typecasting
+ value
+ else raise TypeError
+ end
end
- end
end
end
end
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/schema_creation.rb b/activerecord/lib/active_record/connection_adapters/abstract/schema_creation.rb
index 6add697eeb..322684672f 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/schema_creation.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/schema_creation.rb
@@ -1,4 +1,4 @@
-require 'active_support/core_ext/string/strip'
+require "active_support/core_ext/string/strip"
module ActiveRecord
module ConnectionAdapters
@@ -23,9 +23,9 @@ module ActiveRecord
def visit_AlterTable(o)
sql = "ALTER TABLE #{quote_table_name(o.name)} "
- sql << o.adds.map { |col| accept col }.join(' ')
- sql << o.foreign_key_adds.map { |fk| visit_AddForeignKey fk }.join(' ')
- sql << o.foreign_key_drops.map { |fk| visit_DropForeignKey fk }.join(' ')
+ sql << o.adds.map { |col| accept col }.join(" ")
+ sql << o.foreign_key_adds.map { |fk| visit_AddForeignKey fk }.join(" ")
+ sql << o.foreign_key_drops.map { |fk| visit_DropForeignKey fk }.join(" ")
end
def visit_ColumnDefinition(o)
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/schema_definitions.rb b/activerecord/lib/active_record/connection_adapters/abstract/schema_definitions.rb
index 8dbafc5a4b..83d1d7cd01 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/schema_definitions.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/schema_definitions.rb
@@ -11,7 +11,6 @@ module ActiveRecord
# +columns+ attribute of said TableDefinition object, in order to be used
# for generating a number of table creation or table changing SQL statements.
class ColumnDefinition < Struct.new(:name, :type, :limit, :precision, :scale, :default, :null, :first, :after, :auto_increment, :primary_key, :collation, :sql_type, :comment) #:nodoc:
-
def primary_key?
primary_key || type.to_sym == :primary_key
end
@@ -61,9 +60,9 @@ module ActiveRecord
end
private
- def default_primary_key
- "id"
- end
+ def default_primary_key
+ "id"
+ end
end
class ReferenceDefinition # :nodoc:
@@ -103,51 +102,51 @@ module ActiveRecord
protected
- attr_reader :name, :polymorphic, :index, :foreign_key, :type, :options
+ attr_reader :name, :polymorphic, :index, :foreign_key, :type, :options
private
- def as_options(value, default = {})
- if value.is_a?(Hash)
- value
- else
- default
+ def as_options(value, default = {})
+ if value.is_a?(Hash)
+ value
+ else
+ default
+ end
end
- end
- def polymorphic_options
- as_options(polymorphic, options)
- end
+ def polymorphic_options
+ as_options(polymorphic, options)
+ end
- def index_options
- as_options(index)
- end
+ def index_options
+ as_options(index)
+ end
- def foreign_key_options
- as_options(foreign_key).merge(column: column_name)
- end
+ def foreign_key_options
+ as_options(foreign_key).merge(column: column_name)
+ end
- def columns
- result = [[column_name, type, options]]
- if polymorphic
- result.unshift(["#{name}_type", :string, polymorphic_options])
+ def columns
+ result = [[column_name, type, options]]
+ if polymorphic
+ result.unshift(["#{name}_type", :string, polymorphic_options])
+ end
+ result
end
- result
- end
- def column_name
- "#{name}_id"
- end
+ def column_name
+ "#{name}_id"
+ end
- def column_names
- columns.map(&:first)
- end
+ def column_names
+ columns.map(&:first)
+ end
- def foreign_table_name
- foreign_key_options.fetch(:to_table) do
- Base.pluralize_table_names ? name.to_s.pluralize : name
+ def foreign_table_name
+ foreign_key_options.fetch(:to_table) do
+ Base.pluralize_table_names ? name.to_s.pluralize : name
+ end
end
- end
end
module ColumnMethods
@@ -212,7 +211,7 @@ module ActiveRecord
def initialize(name, temporary = false, options = nil, as = nil, comment: nil)
@columns_hash = {}
- @indexes = {}
+ @indexes = []
@foreign_keys = []
@primary_keys = nil
@temporary = temporary
@@ -304,7 +303,7 @@ module ActiveRecord
# end
def column(name, type, options = {})
name = name.to_s
- type = type.to_sym
+ type = type.to_sym if type
options = options.dup
if @columns_hash[name] && @columns_hash[name].primary_key?
@@ -328,7 +327,7 @@ module ActiveRecord
#
# index(:account_id, name: 'index_projects_on_account_id')
def index(column_name, options = {})
- indexes[column_name] = options
+ indexes << [column_name, options]
end
def foreign_key(table_name, options = {}) # :nodoc:
@@ -342,9 +341,7 @@ module ActiveRecord
# <tt>:updated_at</tt> to the table. See {connection.add_timestamps}[rdoc-ref:SchemaStatements#add_timestamps]
#
# t.timestamps null: false
- def timestamps(*args)
- options = args.extract_options!
-
+ def timestamps(**options)
options[:null] = false if options[:null].nil?
column(:created_at, :datetime, options)
@@ -383,13 +380,13 @@ module ActiveRecord
end
private
- def create_column_definition(name, type)
- ColumnDefinition.new name, type
- end
+ def create_column_definition(name, type)
+ ColumnDefinition.new name, type
+ end
- def aliased_types(name, fallback)
- 'timestamp' == name ? :datetime : fallback
- end
+ def aliased_types(name, fallback)
+ "timestamp" == name ? :datetime : fallback
+ end
end
class AlterTable # :nodoc:
@@ -478,7 +475,7 @@ module ActiveRecord
# Checks to see if a column exists.
#
- # t.string(:name) unless t.column_exists?(:name, :string)
+ # t.string(:name) unless t.column_exists?(:name, :string)
#
# See {connection.column_exists?}[rdoc-ref:SchemaStatements#column_exists?]
def column_exists?(column_name, type = nil, options = {})
@@ -499,9 +496,9 @@ module ActiveRecord
# Checks to see if an index exists.
#
- # unless t.index_exists?(:branch_id)
- # t.index(:branch_id)
- # end
+ # unless t.index_exists?(:branch_id)
+ # t.index(:branch_id)
+ # end
#
# See {connection.index_exists?}[rdoc-ref:SchemaStatements#index_exists?]
def index_exists?(column_name, options = {})
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/schema_dumper.rb b/activerecord/lib/active_record/connection_adapters/abstract/schema_dumper.rb
index 677a4c6bd0..06c89ca072 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/schema_dumper.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/schema_dumper.rb
@@ -7,10 +7,7 @@ module ActiveRecord
# Adapter level by over-writing this code inside the database specific adapters
module ColumnDumper
def column_spec(column)
- spec = Hash[prepare_column_options(column).map { |k, v| [k, "#{k}: #{v}"] }]
- spec[:name] = column.name.inspect
- spec[:type] = schema_type(column).to_s
- spec
+ [schema_type(column), prepare_column_options(column)]
end
def column_spec_for_primary_key(column)
@@ -40,7 +37,7 @@ module ActiveRecord
default = schema_default(column) if column.has_default?
spec[:default] = default unless default.nil?
- spec[:null] = 'false' unless column.null
+ spec[:null] = "false" unless column.null
if collation = schema_collation(column)
spec[:collation] = collation
@@ -53,53 +50,53 @@ module ActiveRecord
# Lists the valid migration options
def migration_keys
- [:name, :limit, :precision, :scale, :default, :null, :collation, :comment]
+ [:limit, :precision, :scale, :default, :null, :collation, :comment]
end
private
- def default_primary_key?(column)
- schema_type(column) == :integer
- end
+ def default_primary_key?(column)
+ schema_type(column) == :integer
+ end
- def schema_type(column)
- if column.bigint?
- :bigint
- else
- column.type
+ def schema_type(column)
+ if column.bigint?
+ :bigint
+ else
+ column.type
+ end
end
- end
- def schema_limit(column)
- limit = column.limit unless column.bigint?
- limit.inspect if limit && limit != native_database_types[column.type][:limit]
- end
+ def schema_limit(column)
+ limit = column.limit unless column.bigint?
+ limit.inspect if limit && limit != native_database_types[column.type][:limit]
+ end
- def schema_precision(column)
- column.precision.inspect if column.precision
- end
+ def schema_precision(column)
+ column.precision.inspect if column.precision
+ end
- def schema_scale(column)
- column.scale.inspect if column.scale
- end
+ def schema_scale(column)
+ column.scale.inspect if column.scale
+ end
- def schema_default(column)
- type = lookup_cast_type_from_column(column)
- default = type.deserialize(column.default)
- if default.nil?
- schema_expression(column)
- else
- type.type_cast_for_schema(default)
+ def schema_default(column)
+ type = lookup_cast_type_from_column(column)
+ default = type.deserialize(column.default)
+ if default.nil?
+ schema_expression(column)
+ else
+ type.type_cast_for_schema(default)
+ end
end
- end
- def schema_expression(column)
- "-> { #{column.default_function.inspect} }" if column.default_function
- end
+ def schema_expression(column)
+ "-> { #{column.default_function.inspect} }" if column.default_function
+ end
- def schema_collation(column)
- column.collation.inspect if column.collation
- end
+ def schema_collation(column)
+ column.collation.inspect if column.collation
+ end
end
end
end
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb b/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb
index eec0bc8518..1df20a0c56 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb
@@ -1,6 +1,6 @@
-require 'active_record/migration/join_table'
-require 'active_support/core_ext/string/access'
-require 'digest'
+require "active_record/migration/join_table"
+require "active_support/core_ext/string/access"
+require "digest"
module ActiveRecord
module ConnectionAdapters # :nodoc:
@@ -25,7 +25,7 @@ module ActiveRecord
# Truncates a table alias according to the limits of the current adapter.
def table_alias_for(table_name)
- table_name[0...table_alias_length].tr('.', '_')
+ table_name[0...table_alias_length].tr(".", "_")
end
# Returns the relation names useable to back Active Record models.
@@ -120,7 +120,7 @@ module ActiveRecord
checks = []
checks << lambda { |c| c.name == column_name }
checks << lambda { |c| c.type == type } if type
- (migration_keys - [:name]).each do |attr|
+ migration_keys.each do |attr|
checks << lambda { |c| c.send(attr) == options[attr] } if options.key?(attr)
end
@@ -129,14 +129,9 @@ module ActiveRecord
# Returns just a table's primary key
def primary_key(table_name)
- pks = primary_keys(table_name)
- warn <<-WARNING.strip_heredoc if pks.count > 1
- WARNING: Rails does not support composite primary key.
-
- #{table_name} has composite primary key. Composite primary key is ignored.
- WARNING
-
- pks.first if pks.one?
+ pk = primary_keys(table_name)
+ pk = pk.first unless pk.size > 1
+ pk
end
# Creates a new table with the name +table_name+. +table_name+ may either
@@ -179,7 +174,7 @@ module ActiveRecord
# A Symbol can be used to specify the type of the generated primary key column.
# [<tt>:primary_key</tt>]
# The name of the primary key, if one is to be added automatically.
- # Defaults to +id+. If <tt>:id</tt> is false this option is ignored.
+ # Defaults to +id+. If <tt>:id</tt> is false, then this option is ignored.
#
# Note that Active Record models will automatically detect their
# primary key. This can be avoided by using
@@ -283,16 +278,16 @@ module ActiveRecord
result = execute schema_creation.accept td
unless supports_indexes_in_create?
- td.indexes.each_pair do |column_name, index_options|
+ td.indexes.each do |column_name, index_options|
add_index(table_name, column_name, index_options)
end
end
if supports_comments? && !supports_comments_in_create?
- change_table_comment(table_name, comment) if comment
+ change_table_comment(table_name, comment) if comment.present?
td.columns.each do |column|
- change_column_comment(table_name, column.name, column.comment) if column.comment
+ change_column_comment(table_name, column.name, column.comment) if column.comment.present?
end
end
@@ -305,9 +300,9 @@ module ActiveRecord
# # Creates a table called 'assemblies_parts' with no id.
# create_join_table(:assemblies, :parts)
#
- # You can pass a +options+ hash can include the following keys:
+ # You can pass an +options+ hash which can include the following keys:
# [<tt>:table_name</tt>]
- # Sets the table name overriding the default
+ # Sets the table name, overriding the default.
# [<tt>:column_options</tt>]
# Any extra options you want appended to the columns definition.
# [<tt>:options</tt>]
@@ -344,7 +339,7 @@ module ActiveRecord
column_options.reverse_merge!(null: false)
type = column_options.delete(:type) || :integer
- t1_column, t2_column = [table_1, table_2].map{ |t| t.to_s.singularize.foreign_key }
+ t1_column, t2_column = [table_1, table_2].map { |t| t.to_s.singularize.foreign_key }
create_table(join_table_name, options.merge!(id: false)) do |td|
td.send type, t1_column, column_options
@@ -433,7 +428,7 @@ module ActiveRecord
# t.remove_index :company_id
# end
#
- # See also Table for details on all of the various column transformation.
+ # See also Table for details on all of the various column transformations.
def change_table(table_name, options = {})
if supports_bulk_alter? && options[:bulk]
recorder = ActiveRecord::Migration::CommandRecorder.new(self)
@@ -483,10 +478,10 @@ module ActiveRecord
#
# Available options are (none of these exists by default):
# * <tt>:limit</tt> -
- # Requests a maximum column length. This is number of characters for a <tt>:string</tt> column
+ # Requests a maximum column length. This is the number of characters for a <tt>:string</tt> column
# and number of bytes for <tt>:text</tt>, <tt>:binary</tt> and <tt>:integer</tt> columns.
# * <tt>:default</tt> -
- # The column's default value. Use nil for NULL.
+ # The column's default value. Use +nil+ for +NULL+.
# * <tt>:null</tt> -
# Allows or disallows +NULL+ values in the column. This option could
# have been named <tt>:null_allowed</tt>.
@@ -495,7 +490,7 @@ module ActiveRecord
# * <tt>:scale</tt> -
# Specifies the scale for the <tt>:decimal</tt> and <tt>:numeric</tt> columns.
#
- # Note: The precision is the total number of significant digits
+ # Note: The precision is the total number of significant digits,
# and the scale is the number of digits that can be stored following
# the decimal point. For example, the number 123.45 has a precision of 5
# and a scale of 2. A decimal with a precision of 5 and a scale of 2 can
@@ -516,7 +511,7 @@ module ActiveRecord
# Default is (38,0).
# * DB2: <tt>:precision</tt> [1..63], <tt>:scale</tt> [0..62].
# Default unknown.
- # * SqlServer?: <tt>:precision</tt> [1..38], <tt>:scale</tt> [0..38].
+ # * SqlServer: <tt>:precision</tt> [1..38], <tt>:scale</tt> [0..38].
# Default (38,0).
#
# == Examples
@@ -538,6 +533,10 @@ module ActiveRecord
# add_column(:measurements, :huge_integer, :decimal, precision: 30)
# # ALTER TABLE "measurements" ADD "huge_integer" decimal(30)
#
+ # # Defines a column that stores an array of a type.
+ # add_column(:users, :skills, :text, array: true)
+ # # ALTER TABLE "users" ADD "skills" text[]
+ #
# # Defines a column with a database-specific type.
# add_column(:shapes, :triangle, 'polygon')
# # ALTER TABLE "shapes" ADD "triangle" polygon
@@ -564,7 +563,7 @@ module ActiveRecord
#
# The +type+ and +options+ parameters will be ignored if present. It can be helpful
# to provide these in a migration's +change+ method so it can be reverted.
- # In that case, +type+ and +options+ will be used by add_column.
+ # In that case, +type+ and +options+ will be used by #add_column.
def remove_column(table_name, column_name, type = nil, options = {})
execute "ALTER TABLE #{quote_table_name(table_name)} DROP #{quote_column_name(column_name)}"
end
@@ -767,7 +766,7 @@ module ActiveRecord
raise ArgumentError, "You must specify the index name"
end
else
- index_name(table_name, :column => options)
+ index_name(table_name, column: options)
end
end
@@ -790,7 +789,7 @@ module ActiveRecord
# [<tt>:type</tt>]
# The reference column type. Defaults to +:integer+.
# [<tt>:index</tt>]
- # Add an appropriate index. Defaults to false.
+ # Add an appropriate index. Defaults to true.
# See #add_index for usage of this option.
# [<tt>:foreign_key</tt>]
# Add an appropriate foreign key constraint. Defaults to false.
@@ -952,13 +951,13 @@ module ActiveRecord
# Checks to see if a foreign key exists on a table for a given foreign key definition.
#
- # # Check a foreign key exists
+ # # Checks to see if a foreign key exists.
# foreign_key_exists?(:accounts, :branches)
#
- # # Check a foreign key on a specified column exists
+ # # Checks to see if a foreign key on a specified column exists.
# foreign_key_exists?(:accounts, column: :owner_id)
#
- # # Check a foreign key with a custom name exists
+ # # Checks to see if a foreign key with a custom name exists.
# foreign_key_exists?(:accounts, name: "special_fk_name")
#
def foreign_key_exists?(from_table, options_or_to_table = {})
@@ -967,12 +966,12 @@ module ActiveRecord
def foreign_key_for(from_table, options_or_to_table = {}) # :nodoc:
return unless supports_foreign_keys?
- foreign_keys(from_table).detect {|fk| fk.defined_for? options_or_to_table }
+ foreign_keys(from_table).detect { |fk| fk.defined_for? options_or_to_table }
end
def foreign_key_for!(from_table, options_or_to_table = {}) # :nodoc:
- foreign_key_for(from_table, options_or_to_table) or \
- raise ArgumentError, "Table '#{from_table}' has no foreign key for #{options_or_to_table}"
+ foreign_key_for(from_table, options_or_to_table) || \
+ raise(ArgumentError, "Table '#{from_table}' has no foreign key for #{options_or_to_table}")
end
def foreign_key_column_for(table_name) # :nodoc:
@@ -990,7 +989,7 @@ module ActiveRecord
end
def dump_schema_information #:nodoc:
- versions = ActiveRecord::SchemaMigration.order('version').pluck(:version)
+ versions = ActiveRecord::SchemaMigration.order("version").pluck(:version)
insert_versions_sql(versions)
end
@@ -998,8 +997,8 @@ module ActiveRecord
sm_table = ActiveRecord::Migrator.schema_migrations_table_name
if supports_multi_insert?
- sql = "INSERT INTO #{sm_table} (version) VALUES "
- sql << versions.map {|v| "('#{v}')" }.join(', ')
+ sql = "INSERT INTO #{sm_table} (version) VALUES\n"
+ sql << versions.map { |v| "('#{v}')" }.join(",\n")
sql << ";\n\n"
sql
else
@@ -1029,18 +1028,18 @@ module ActiveRecord
sm_table = quote_table_name(ActiveRecord::Migrator.schema_migrations_table_name)
migrated = select_values("SELECT version FROM #{sm_table}").map(&:to_i)
- paths = migrations_paths.map {|p| "#{p}/[0-9]*_*.rb" }
+ paths = migrations_paths.map { |p| "#{p}/[0-9]*_*.rb" }
versions = Dir[*paths].map do |filename|
- filename.split('/').last.split('_').first.to_i
+ filename.split("/").last.split("_").first.to_i
end
unless migrated.include?(version)
execute "INSERT INTO #{sm_table} (version) VALUES ('#{version}')"
end
- inserting = (versions - migrated).select {|v| v < version}
+ inserting = (versions - migrated).select { |v| v < version }
if inserting.any?
- if (duplicate = inserting.detect {|v| inserting.count(v) > 1})
+ if (duplicate = inserting.detect { |v| inserting.count(v) > 1 })
raise "Duplicate migration #{duplicate}. Please renumber your migrations to resolve the conflict."
end
execute insert_versions_sql(inserting)
@@ -1048,7 +1047,8 @@ module ActiveRecord
end
def type_to_sql(type, limit = nil, precision = nil, scale = nil) #:nodoc:
- if native = native_database_types[type.to_sym]
+ type = type.to_sym if type
+ if native = native_database_types[type]
column_type_sql = (native.is_a?(Hash) ? native[:name] : native).dup
if type == :decimal # ignore limit, use precision and scale
@@ -1081,7 +1081,7 @@ module ActiveRecord
end
# Given a set of columns and an ORDER BY clause, returns the columns for a SELECT DISTINCT.
- # PostgreSQL, MySQL, and Oracle overrides this for custom DISTINCT syntax - they
+ # PostgreSQL, MySQL, and Oracle override this for custom DISTINCT syntax - they
# require the order columns appear in the SELECT.
#
# columns_for_distinct("posts.id", ["posts.created_at desc"])
@@ -1166,31 +1166,34 @@ module ActiveRecord
end
protected
- def add_index_sort_order(option_strings, column_names, options = {})
- if options.is_a?(Hash) && order = options[:order]
+
+ def add_index_sort_order(quoted_columns, **options)
+ if order = options[:order]
case order
when Hash
- column_names.each {|name| option_strings[name] += " #{order[name].upcase}" if order.has_key?(name)}
+ quoted_columns.each { |name, column| column << " #{order[name].upcase}" if order[name].present? }
when String
- column_names.each {|name| option_strings[name] += " #{order.upcase}"}
+ quoted_columns.each { |name, column| column << " #{order.upcase}" if order.present? }
end
end
- return option_strings
+ quoted_columns
end
# Overridden by the MySQL adapter for supporting index lengths
- def quoted_columns_for_index(column_names, options = {})
- return [column_names] if column_names.is_a?(String)
-
- option_strings = Hash[column_names.map {|name| [name, '']}]
-
- # add index sort order if supported
+ def add_options_for_index_columns(quoted_columns, **options)
if supports_index_sort_order?
- option_strings = add_index_sort_order(option_strings, column_names, options)
+ quoted_columns = add_index_sort_order(quoted_columns, options)
end
- column_names.map {|name| quote_column_name(name) + option_strings[name]}
+ quoted_columns
+ end
+
+ def quoted_columns_for_index(column_names, **options)
+ return [column_names] if column_names.is_a?(String)
+
+ quoted_columns = Hash[column_names.map { |name| [name.to_sym, quote_column_name(name).dup] }]
+ add_options_for_index_columns(quoted_columns, options).values
end
def index_name_for_remove(table_name, options = {})
@@ -1210,10 +1213,10 @@ module ActiveRecord
end
if column_names.any?
- checks << lambda { |i| i.columns.join('_and_') == column_names.join('_and_') }
+ checks << lambda { |i| i.columns.join("_and_") == column_names.join("_and_") }
end
- raise ArgumentError "No name or columns specified" if checks.none?
+ raise ArgumentError, "No name or columns specified" if checks.none?
matching_indexes = indexes(table_name).select { |i| checks.all? { |check| check[i] } }
@@ -1250,49 +1253,49 @@ module ActiveRecord
end
private
- def create_table_definition(*args)
- TableDefinition.new(*args)
- end
-
- def create_alter_table(name)
- AlterTable.new create_table_definition(name)
- end
+ def create_table_definition(*args)
+ TableDefinition.new(*args)
+ end
- def index_name_options(column_names) # :nodoc:
- if column_names.is_a?(String)
- column_names = column_names.scan(/\w+/).join('_')
+ def create_alter_table(name)
+ AlterTable.new create_table_definition(name)
end
- { column: column_names }
- end
+ def index_name_options(column_names) # :nodoc:
+ if column_names.is_a?(String)
+ column_names = column_names.scan(/\w+/).join("_")
+ end
- def foreign_key_name(table_name, options) # :nodoc:
- identifier = "#{table_name}_#{options.fetch(:column)}_fk"
- hashed_identifier = Digest::SHA256.hexdigest(identifier).first(10)
- options.fetch(:name) do
- "fk_rails_#{hashed_identifier}"
+ { column: column_names }
end
- end
- def validate_index_length!(table_name, new_name, internal = false) # :nodoc:
- max_index_length = internal ? index_name_length : allowed_index_name_length
+ def foreign_key_name(table_name, options) # :nodoc:
+ identifier = "#{table_name}_#{options.fetch(:column)}_fk"
+ hashed_identifier = Digest::SHA256.hexdigest(identifier).first(10)
+ options.fetch(:name) do
+ "fk_rails_#{hashed_identifier}"
+ end
+ end
+
+ def validate_index_length!(table_name, new_name, internal = false) # :nodoc:
+ max_index_length = internal ? index_name_length : allowed_index_name_length
- if new_name.length > max_index_length
- raise ArgumentError, "Index name '#{new_name}' on table '#{table_name}' is too long; the limit is #{allowed_index_name_length} characters"
+ if new_name.length > max_index_length
+ raise ArgumentError, "Index name '#{new_name}' on table '#{table_name}' is too long; the limit is #{allowed_index_name_length} characters"
+ end
end
- end
- def extract_new_default_value(default_or_changes)
- if default_or_changes.is_a?(Hash) && default_or_changes.has_key?(:from) && default_or_changes.has_key?(:to)
- default_or_changes[:to]
- else
- default_or_changes
+ def extract_new_default_value(default_or_changes)
+ if default_or_changes.is_a?(Hash) && default_or_changes.has_key?(:from) && default_or_changes.has_key?(:to)
+ default_or_changes[:to]
+ else
+ default_or_changes
+ end
end
- end
- def can_remove_index_by_name?(options)
- options.is_a?(Hash) && options.key?(:name) && options.except(:name, :algorithm).empty?
- end
+ def can_remove_index_by_name?(options)
+ options.is_a?(Hash) && options.key?(:name) && options.except(:name, :algorithm).empty?
+ end
end
end
end
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/transaction.rb b/activerecord/lib/active_record/connection_adapters/abstract/transaction.rb
index ca795cb1ad..6bb072dd73 100644
--- a/activerecord/lib/active_record/connection_adapters/abstract/transaction.rb
+++ b/activerecord/lib/active_record/connection_adapters/abstract/transaction.rb
@@ -41,7 +41,6 @@ module ActiveRecord
end
class Transaction #:nodoc:
-
attr_reader :connection, :state, :records, :savepoint_name
attr_writer :joinable
@@ -101,7 +100,6 @@ module ActiveRecord
end
class SavepointTransaction < Transaction
-
def initialize(connection, savepoint_name, options, *args)
super(connection, options, *args)
if options[:isolation]
@@ -124,7 +122,6 @@ module ActiveRecord
end
class RealTransaction < Transaction
-
def initialize(connection, options, *args)
super
if options[:isolation]
@@ -195,7 +192,7 @@ module ActiveRecord
raise
ensure
unless error
- if Thread.current.status == 'aborting'
+ if Thread.current.status == "aborting"
rollback_transaction if transaction
else
begin
@@ -226,7 +223,6 @@ module ActiveRecord
return unless error.is_a?(ActiveRecord::PreparedStatementCacheExpired)
@connection.clear_cache!
end
-
end
end
end