diff options
Diffstat (limited to 'activesupport/lib/active_support/testing')
3 files changed, 93 insertions, 46 deletions
diff --git a/activesupport/lib/active_support/testing/assertions.rb b/activesupport/lib/active_support/testing/assertions.rb index a4e0361a32..05da50e150 100644 --- a/activesupport/lib/active_support/testing/assertions.rb +++ b/activesupport/lib/active_support/testing/assertions.rb @@ -29,22 +29,33 @@ module ActiveSupport # post :create, :article => {...} # end # + # A lambda or a list of lambdas can be passed in and evaluated: + # + # assert_difference lambda { Article.count }, 2 do + # post :create, :article => {...} + # end + # + # assert_difference [->{ Article.count }, ->{ Post.count }], 2 do + # post :create, :article => {...} + # end + # # A error message can be specified. # # assert_difference 'Article.count', -1, "An Article should be destroyed" do # post :delete, :id => ... # end def assert_difference(expression, difference = 1, message = nil, &block) - b = block.send(:binding) - exps = Array.wrap(expression) - before = exps.map { |e| eval(e, b) } + exps = Array.wrap(expression).map { |e| + e.respond_to?(:call) ? e : lambda { eval(e, block.binding) } + } + before = exps.map { |e| e.call } yield exps.each_with_index do |e, i| - error = "#{e.inspect} didn't change by #{difference}" - error = "#{message}.\n#{error}" if message - assert_equal(before[i] + difference, eval(e, b), error) + error = "#{e.inspect} didn't change by #{difference}" + error = "#{message}.\n#{error}" if message + assert_equal(before[i] + difference, e.call, error) end end diff --git a/activesupport/lib/active_support/testing/performance.rb b/activesupport/lib/active_support/testing/performance.rb index 598eb330eb..4db59680b0 100644 --- a/activesupport/lib/active_support/testing/performance.rb +++ b/activesupport/lib/active_support/testing/performance.rb @@ -1,5 +1,6 @@ require 'fileutils' require 'rails/version' +require 'active_support/concern' require 'active_support/core_ext/class/delegating_attributes' require 'active_support/core_ext/string/inflections' require 'action_view/helpers/number_helper' @@ -7,6 +8,19 @@ require 'action_view/helpers/number_helper' module ActiveSupport module Testing module Performance + extend ActiveSupport::Concern + + included do + superclass_delegating_accessor :profile_options + self.profile_options = {} + + if defined?(MiniTest::Assertions) && TestCase < MiniTest::Assertions + include ForMiniTest + else + include ForClassicTestUnit + end + end + # each implementation should define metrics and freeze the defaults DEFAULTS = if ARGV.include?('--benchmark') # HAX for rake test @@ -16,11 +30,6 @@ module ActiveSupport { :runs => 1, :output => 'tmp/performance' } end - - def self.included(base) - base.superclass_delegating_accessor :profile_options - base.profile_options = {} - end def full_profile_options DEFAULTS.merge(profile_options) @@ -30,43 +39,77 @@ module ActiveSupport "#{self.class.name}##{method_name}" end - def run(result) - return if method_name =~ /^default_test$/ - - yield(self.class::STARTED, name) - @_result = result - - run_warmup - if full_profile_options && metrics = full_profile_options[:metrics] - metrics.each do |metric_name| - if klass = Metrics[metric_name.to_sym] - run_profile(klass.new) - result.add_run - else - puts '%20s: unsupported' % metric_name + module ForMiniTest + def run(runner) + @runner = runner + + run_warmup + if full_profile_options && metrics = full_profile_options[:metrics] + metrics.each do |metric_name| + if klass = Metrics[metric_name.to_sym] + run_profile(klass.new) + end end end end - yield(self.class::FINISHED, name) + def run_test(metric, mode) + result = '.' + begin + run_callbacks :setup + setup + metric.send(mode) { __send__ method_name } + rescue Exception => e + result = @runner.puke(self.class, method_name, e) + ensure + begin + teardown + run_callbacks :teardown, :enumerator => :reverse_each + rescue Exception => e + result = @runner.puke(self.class, method_name, e) + end + end + result + end end - def run_test(metric, mode) - run_callbacks :setup - setup - metric.send(mode) { __send__ @method_name } - rescue ::Test::Unit::AssertionFailedError => e - add_failure(e.message, e.backtrace) - rescue StandardError, ScriptError => e - add_error(e) - ensure - begin - teardown - run_callbacks :teardown, :enumerator => :reverse_each + module ForClassicTestUnit + def run(result) + return if method_name =~ /^default_test$/ + + yield(self.class::STARTED, name) + @_result = result + + run_warmup + if full_profile_options && metrics = full_profile_options[:metrics] + metrics.each do |metric_name| + if klass = Metrics[metric_name.to_sym] + run_profile(klass.new) + result.add_run + end + end + end + + yield(self.class::FINISHED, name) + end + + def run_test(metric, mode) + run_callbacks :setup + setup + metric.send(mode) { __send__ @method_name } rescue ::Test::Unit::AssertionFailedError => e - add_failure(e.message, e.backtrace) + add_failure(e.message, e.backtrace) rescue StandardError, ScriptError => e add_error(e) + ensure + begin + teardown + run_callbacks :teardown, :enumerator => :reverse_each + rescue ::Test::Unit::AssertionFailedError => e + add_failure(e.message, e.backtrace) + rescue StandardError, ScriptError => e + add_error(e) + end end end diff --git a/activesupport/lib/active_support/testing/performance/ruby/yarv.rb b/activesupport/lib/active_support/testing/performance/ruby/yarv.rb index ad0e882214..498bc6b4db 100644 --- a/activesupport/lib/active_support/testing/performance/ruby/yarv.rb +++ b/activesupport/lib/active_support/testing/performance/ruby/yarv.rb @@ -1,10 +1,3 @@ -begin - gem 'test-unit' -rescue LoadError - $stderr.puts 'Specify test-unit as application\'s dependency in Gemfile to run benchmarks.' - exit -end - module ActiveSupport module Testing module Performance |