aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Koziarski <michael@koziarski.com>2006-10-03 23:45:32 +0000
committerMichael Koziarski <michael@koziarski.com>2006-10-03 23:45:32 +0000
commitf238d495b70a264abdb864fe8107e02766b285b4 (patch)
treecfe1f5df118b46d1426cfc87326c26c8fbe63a85
parent8cb0079feabe011b7edd1c65114efdb7047a02ec (diff)
downloadrails-f238d495b70a264abdb864fe8107e02766b285b4.tar.gz
rails-f238d495b70a264abdb864fe8107e02766b285b4.tar.bz2
rails-f238d495b70a264abdb864fe8107e02766b285b4.zip
Add ActiveSupport::Multibyte. Provides String#chars which lets you deal with strings as a sequence of chars, not of bytes. Closes #6242 [Julian Tarkhanov, Manfred Stienstra & Jan Behrens]
git-svn-id: http://svn-commit.rubyonrails.org/rails/trunk@5223 5ecf4fe2-1ee6-0310-87b1-e25e094e27de
-rw-r--r--activesupport/CHANGELOG2
-rw-r--r--activesupport/lib/active_support.rb2
-rw-r--r--activesupport/lib/active_support/core_ext/string.rb2
-rw-r--r--activesupport/lib/active_support/core_ext/string/unicode.rb42
-rw-r--r--activesupport/lib/active_support/multibyte.rb7
-rw-r--r--activesupport/lib/active_support/multibyte/chars.rb129
-rw-r--r--activesupport/lib/active_support/multibyte/generators/generate_tables.rb141
-rw-r--r--activesupport/lib/active_support/multibyte/handlers/passthru_handler.rb9
-rw-r--r--activesupport/lib/active_support/multibyte/handlers/utf8_handler.rb447
-rw-r--r--activesupport/lib/active_support/multibyte/handlers/utf8_handler_proc.rb44
-rw-r--r--activesupport/lib/active_support/values/unicode_tables-1.8.4.datbin0 -> 654866 bytes
-rw-r--r--activesupport/lib/active_support/values/unicode_tables-1.8.5.datbin0 -> 654866 bytes
-rw-r--r--activesupport/test/multibyte_chars_test.rb163
-rw-r--r--activesupport/test/multibyte_conformance.rb141
-rw-r--r--activesupport/test/multibyte_handler_test.rb261
15 files changed, 1390 insertions, 0 deletions
diff --git a/activesupport/CHANGELOG b/activesupport/CHANGELOG
index 6818f32f55..cf68da71c4 100644
--- a/activesupport/CHANGELOG
+++ b/activesupport/CHANGELOG
@@ -1,5 +1,7 @@
*SVN*
+* Add ActiveSupport::Multibyte. Provides String#chars which lets you deal with strings as a sequence of chars, not of bytes. Closes #6242 [Julian Tarkhanov, Manfred Stienstra & Jan Behrens]
+
* Fix issue with #class_inheritable_accessor saving updates to the parent class when initialized with an Array or Hash [mojombo]
* Hash#to_xml supports Bignum and BigDecimal. #6313 [edibiase]
diff --git a/activesupport/lib/active_support.rb b/activesupport/lib/active_support.rb
index 4289e57865..5504fa8f01 100644
--- a/activesupport/lib/active_support.rb
+++ b/activesupport/lib/active_support.rb
@@ -40,3 +40,5 @@ require 'active_support/option_merger'
require 'active_support/values/time_zone'
require 'active_support/json'
+
+require 'active_support/multibyte'
diff --git a/activesupport/lib/active_support/core_ext/string.rb b/activesupport/lib/active_support/core_ext/string.rb
index 240e1ff1da..b9f075522f 100644
--- a/activesupport/lib/active_support/core_ext/string.rb
+++ b/activesupport/lib/active_support/core_ext/string.rb
@@ -3,6 +3,7 @@ require File.dirname(__FILE__) + '/string/conversions'
require File.dirname(__FILE__) + '/string/access'
require File.dirname(__FILE__) + '/string/starts_ends_with'
require File.dirname(__FILE__) + '/string/iterators'
+require File.dirname(__FILE__) + '/string/unicode'
class String #:nodoc:
include ActiveSupport::CoreExtensions::String::Access
@@ -10,4 +11,5 @@ class String #:nodoc:
include ActiveSupport::CoreExtensions::String::Inflections
include ActiveSupport::CoreExtensions::String::StartsEndsWith
include ActiveSupport::CoreExtensions::String::Iterators
+ include ActiveSupport::CoreExtensions::String::Unicode
end
diff --git a/activesupport/lib/active_support/core_ext/string/unicode.rb b/activesupport/lib/active_support/core_ext/string/unicode.rb
new file mode 100644
index 0000000000..0e4c76fb86
--- /dev/null
+++ b/activesupport/lib/active_support/core_ext/string/unicode.rb
@@ -0,0 +1,42 @@
+module ActiveSupport #:nodoc:
+ module CoreExtensions #:nodoc:
+ module String #:nodoc:
+ # Define methods for handeling unicode data.
+ module Unicode
+ # +chars+ is a Unicode safe proxy for string methods. It creates and returns an instance of the
+ # ActiveSupport::Multibyte::Chars class which encapsulates the original string. A Unicode safe version of all
+ # the String methods are defined on this proxy class. Undefined methods are forwarded to String, so all of the
+ # string overrides can also be called through the +chars+ proxy.
+ #
+ # name = 'Claus Müller'
+ # name.reverse #=> "rell??M sualC"
+ # name.length #=> 13
+ #
+ # name.chars.reverse.to_s #=> "rellüM sualC"
+ # name.chars.length #=> 12
+ #
+ #
+ # All the methods on the chars proxy which normally return a string will return a Chars object. This allows
+ # method chaining on the result of any of these methods.
+ #
+ # name.chars.reverse.length #=> 12
+ #
+ # The Char object tries to be as interchangeable with String objects as possible: sorting and comparing between
+ # String and Char work like expected. The bang! methods change the internal string representation in the Chars
+ # object. Interoperability problems can be resolved easily with a +to_s+ call.
+ #
+ # For more information about the methods defined on the Chars proxy see ActiveSupport::Multibyte::Chars and
+ # ActiveSupport::Multibyte::Handlers::UTF8Handler
+ def chars
+ ActiveSupport::Multibyte::Chars.new(self)
+ end
+
+ # Returns true if the string has UTF-8 semantics (a String used for purely byte resources is unlikely to have
+ # them), returns false otherwise.
+ def is_utf8?
+ ActiveSupport::Multibyte::Handlers::UTF8Handler.consumes?(self)
+ end
+ end
+ end
+ end
+end \ No newline at end of file
diff --git a/activesupport/lib/active_support/multibyte.rb b/activesupport/lib/active_support/multibyte.rb
new file mode 100644
index 0000000000..5e518b65f0
--- /dev/null
+++ b/activesupport/lib/active_support/multibyte.rb
@@ -0,0 +1,7 @@
+module ActiveSupport::Multibyte
+ DEFAULT_NORMALIZATION_FORM = :kc
+ NORMALIZATIONS_FORMS = [:c, :kc, :d, :kd]
+ UNICODE_VERSION = '5.0.0'
+end
+
+require 'active_support/multibyte/chars' \ No newline at end of file
diff --git a/activesupport/lib/active_support/multibyte/chars.rb b/activesupport/lib/active_support/multibyte/chars.rb
new file mode 100644
index 0000000000..a9795de523
--- /dev/null
+++ b/activesupport/lib/active_support/multibyte/chars.rb
@@ -0,0 +1,129 @@
+require 'active_support/multibyte/handlers/utf8_handler'
+require 'active_support/multibyte/handlers/passthru_handler'
+
+# Encapsulates all the functionality related to the Chars proxy.
+module ActiveSupport::Multibyte
+ # Chars enables you to work transparently with multibyte encodings in the Ruby String class without having extensive
+ # knowledge about the encoding. A Chars object accepts a string upon initialization and proxies String methods in an
+ # encoding safe manner. All the normal String methods are also implemented on the proxy.
+ #
+ # String methods are proxied through the Chars object, and can be accessed through the +chars+ method. Methods
+ # which would normally return a String object now return a Chars object so methods can be chained.
+ #
+ # "The Perfect String ".chars.downcase.strip.normalize #=> "the perfect string"
+ #
+ # Chars objects are perfectly interchangeable with String objects as long as no explicit class checks are made.
+ # If certain methods do explicitly check the class, call +to_s+ before you pass chars objects to them.
+ #
+ # bad.explicit_checking_method "T".chars.downcase.to_s
+ #
+ # The actual operations on the string are delegated to handlers. Theoretically handlers can be implemented for
+ # any encoding, but the default handler handles UTF-8. This handler is set during initialization, if you want to
+ # use you own handler, you can set it on the Chars class. Look at the UTF8Handler source for an example how to
+ # implement your own handler. If you your own handler to work on anything but UTF-8 you probably also
+ # want to override Chars#handler.
+ #
+ # ActiveSupport::Multibyte::Chars.handler = MyHandler
+ #
+ # Note that a few methods are defined on Chars instead of the handler because they are defined on Object or Kernel
+ # and method_missing can't catch them.
+ class Chars
+
+ attr_reader :string # The contained string
+ alias_method :to_s, :string
+
+ include Comparable
+
+ # The magic method to make String and Chars comparable
+ def to_str
+ # Using any other ways of overriding the String itself will lead you all the way from infinite loops to
+ # core dumps. Don't go there.
+ @string
+ end
+
+ # Create a new Chars instance.
+ def initialize(str)
+ @string = (str.string rescue str)
+ end
+
+ # Returns -1, 0 or +1 depending on whether the Chars object is to be sorted before, equal or after the
+ # object on the right side of the operation. It accepts any object that implements +to_s+. See String.<=>
+ # for more details.
+ def <=>(other); @string <=> other.to_s; end
+
+ # Works just like String#split, with the exception that the items in the resulting list are Chars
+ # instances instead of String. This makes chaining methods easier.
+ def split(*args)
+ @string.split(*args).map { |i| i.chars }
+ end
+
+ # Gsub works exactly the same as gsub on a normal string.
+ def gsub(*a, &b); @string.gsub(*a, &b).chars; end
+
+ # Like String.=~ only it returns the character offset (in codepoints) instead of the byte offset.
+ def =~(other)
+ handler.translate_offset(@string, @string =~ other)
+ end
+
+ # Try to forward all undefined methods to the handler, when a method is not defined on the handler, send it to
+ # the contained string. Method_missing is also responsible for making the bang! methods destructive.
+ def method_missing(m, *a, &b)
+ begin
+ # Simulate methods with a ! at the end because we can't touch the enclosed string from the handlers.
+ if m.to_s =~ /^(.*)\!$/
+ result = handler.send($1, @string, *a, &b)
+ if result == @string
+ result = nil
+ else
+ @string.replace result
+ end
+ else
+ result = handler.send(m, @string, *a, &b)
+ end
+ rescue NoMethodError
+ result = @string.send(m, *a, &b)
+ rescue Handlers::EncodingError
+ @string.replace handler.tidy_bytes(@string)
+ retry
+ end
+
+ if result.kind_of?(String)
+ result.chars
+ else
+ result
+ end
+ end
+
+ # Set the handler class for the Char objects.
+ def self.handler=(klass)
+ @@handler = klass
+ end
+
+ # Returns the proper handler for the contained string depending on $KCODE and the encoding of the string. This
+ # method is used internally to always redirect messages to the proper classes depending on the context.
+ def handler
+ if utf8_pragma?
+ @@handler
+ else
+ ActiveSupport::Multibyte::Handlers::PassthruHandler
+ end
+ end
+
+ private
+
+ # +utf8_pragma+ checks if it can send this string to the handlers. It makes sure @string isn't nil and $KCODE is
+ # set to 'UTF8'.
+ def utf8_pragma?
+ !@string.nil? && ($KCODE == 'UTF8')
+ end
+ end
+end
+
+# When we can load the utf8proc library, override normalization with the faster methods
+begin
+ require 'utf8proc_native'
+ require 'active_support/multibyte/handlers/utf8_handler_proc'
+ ActiveSupport::Multibyte::Chars.handler = ActiveSupport::Multibyte::Handlers::UTF8HandlerProc
+rescue LoadError
+ ActiveSupport::Multibyte::Chars.handler = ActiveSupport::Multibyte::Handlers::UTF8Handler
+end \ No newline at end of file
diff --git a/activesupport/lib/active_support/multibyte/generators/generate_tables.rb b/activesupport/lib/active_support/multibyte/generators/generate_tables.rb
new file mode 100644
index 0000000000..4045b94282
--- /dev/null
+++ b/activesupport/lib/active_support/multibyte/generators/generate_tables.rb
@@ -0,0 +1,141 @@
+#!/usr/bin/env ruby
+begin
+ require File.dirname(__FILE__) + '/../../../active_support'
+rescue IOError
+end
+require 'open-uri'
+require 'tmpdir'
+
+module ActiveSupport::Multibyte::Handlers #:nodoc:
+ class UnicodeDatabase #:nodoc:
+ def self.load
+ [Hash.new(Codepoint.new),[],{},{}]
+ end
+ end
+
+ class UnicodeTableGenerator #:nodoc:
+ BASE_URI = "http://www.unicode.org/Public/#{ActiveSupport::Multibyte::UNICODE_VERSION}/ucd/"
+ SOURCES = {
+ :codepoints => BASE_URI + 'UnicodeData.txt',
+ :composition_exclusion => BASE_URI + 'CompositionExclusions.txt',
+ :grapheme_break_property => BASE_URI + 'auxiliary/GraphemeBreakProperty.txt'
+ }
+
+ def initialize
+ @ucd = UnicodeDatabase.new
+
+ default = Codepoint.new
+ default.combining_class = 0
+ default.uppercase_mapping = 0
+ default.lowercase_mapping = 0
+ @ucd.codepoints = Hash.new(default)
+
+ @ucd.composition_exclusion = []
+ @ucd.composition_map = {}
+ @ucd.boundary = {}
+ end
+
+ def parse_codepoints(line)
+ codepoint = Codepoint.new
+ raise "Could not parse input." unless line =~ /^
+ ([0-9A-F]+); # code
+ ([^;]+); # name
+ ([A-Z]+); # general category
+ ([0-9]+); # canonical combining class
+ ([A-Z]+); # bidi class
+ (<([A-Z]*)>)? # decomposition type
+ ((\ ?[0-9A-F]+)*); # decompomposition mapping
+ ([0-9]*); # decimal digit
+ ([0-9]*); # digit
+ ([^;]*); # numeric
+ ([YN]*); # bidi mirrored
+ ([^;]*); # unicode 1.0 name
+ ([^;]*); # iso comment
+ ([0-9A-F]*); # simple uppercase mapping
+ ([0-9A-F]*); # simple lowercase mapping
+ ([0-9A-F]*)$/ix # simple titlecase mapping
+ codepoint.code = $1.hex
+ #codepoint.name = $2
+ #codepoint.category = $3
+ codepoint.combining_class = Integer($4)
+ #codepoint.bidi_class = $5
+ codepoint.decomp_type = $7
+ codepoint.decomp_mapping = ($8=='') ? nil : $8.split.collect { |element| element.hex }
+ #codepoint.bidi_mirrored = ($13=='Y') ? true : false
+ codepoint.uppercase_mapping = ($16=='') ? 0 : $16.hex
+ codepoint.lowercase_mapping = ($17=='') ? 0 : $17.hex
+ #codepoint.titlecase_mapping = ($18=='') ? nil : $18.hex
+ @ucd.codepoints[codepoint.code] = codepoint
+ end
+
+ def parse_grapheme_break_property(line)
+ if line =~ /^([0-9A-F\.]+)\s*;\s*([\w]+)\s*#/
+ type = $2.downcase.intern
+ @ucd.boundary[type] ||= []
+ if $1.include? '..'
+ parts = $1.split '..'
+ @ucd.boundary[type] << (parts[0].hex..parts[1].hex)
+ else
+ @ucd.boundary[type] << $1.hex
+ end
+ end
+ end
+
+ def parse_composition_exclusion(line)
+ if line =~ /^([0-9A-F]+)/i
+ @ucd.composition_exclusion << $1.hex
+ end
+ end
+
+ def create_composition_map
+ @ucd.codepoints.each do |_, cp|
+ if !cp.nil? and cp.combining_class == 0 and cp.decomp_type.nil? and !cp.decomp_mapping.nil? and cp.decomp_mapping.length == 2 and @ucd[cp.decomp_mapping[0]].combining_class == 0 and !@ucd.composition_exclusion.include?(cp.code)
+ @ucd.composition_map[cp.decomp_mapping[0]] ||= {}
+ @ucd.composition_map[cp.decomp_mapping[0]][cp.decomp_mapping[1]] = cp.code
+ end
+ end
+ end
+
+ def normalize_boundary_map
+ @ucd.boundary.each do |k,v|
+ if [:lf, :cr].include? k
+ @ucd.boundary[k] = v[0]
+ end
+ end
+ end
+
+ def parse
+ SOURCES.each do |type, url|
+ filename = File.join(Dir.tmpdir, "#{url.split('/').last}")
+ unless File.exist?(filename)
+ $stderr.puts "Downloading #{url.split('/').last}"
+ File.open(filename, 'wb') do |target|
+ open(url) do |source|
+ source.each_line { |line| target.write line }
+ end
+ end
+ end
+ File.open(filename) do |file|
+ file.each_line { |line| send "parse_#{type}".intern, line }
+ end
+ end
+ create_composition_map
+ normalize_boundary_map
+ end
+
+ def dump_to(filename)
+ File.open(filename, 'wb') do |f|
+ f.write Marshal.dump([@ucd.codepoints, @ucd.composition_exclusion, @ucd.composition_map, @ucd.boundary])
+ end
+ end
+ end
+end
+
+if __FILE__ == $0
+ filename = ActiveSupport::Multibyte::Handlers::UnicodeDatabase.filename
+ generator = ActiveSupport::Multibyte::Handlers::UnicodeTableGenerator.new
+ generator.parse
+ print "Writing to: #{filename}"
+ generator.dump_to filename
+ puts " (#{File.size(filename)} bytes)"
+end
diff --git a/activesupport/lib/active_support/multibyte/handlers/passthru_handler.rb b/activesupport/lib/active_support/multibyte/handlers/passthru_handler.rb
new file mode 100644
index 0000000000..b28e345585
--- /dev/null
+++ b/activesupport/lib/active_support/multibyte/handlers/passthru_handler.rb
@@ -0,0 +1,9 @@
+# Chars uses this handler when $KCODE is not set to 'UTF8'. Because this handler doesn't define any methods all call
+# will be forwarded to String.
+class ActiveSupport::Multibyte::Handlers::PassthruHandler
+
+ # Return the original byteoffset
+ def self.translate_offset(string, byte_offset) #:nodoc:
+ byte_offset
+ end
+end \ No newline at end of file
diff --git a/activesupport/lib/active_support/multibyte/handlers/utf8_handler.rb b/activesupport/lib/active_support/multibyte/handlers/utf8_handler.rb
new file mode 100644
index 0000000000..0928209d85
--- /dev/null
+++ b/activesupport/lib/active_support/multibyte/handlers/utf8_handler.rb
@@ -0,0 +1,447 @@
+# Contains all the handlers and helper classes
+module ActiveSupport::Multibyte::Handlers
+ class EncodingError < ArgumentError; end
+
+ class Codepoint #:nodoc:
+ attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping
+ end
+
+ class UnicodeDatabase #:nodoc:
+ attr_accessor :codepoints, :composition_exclusion, :composition_map, :boundary
+
+ # Creates a new UnicodeDatabase instance and loads the database.
+ def initialize
+ begin
+ @codepoints, @composition_exclusion, @composition_map, @boundary = self.class.load
+ rescue Exception => e
+ raise IOError.new("Couldn't load the unicode tables for UTF8Handler (#{e.message}), handler is unusable")
+ end
+ @codepoints ||= Hash.new(Codepoint.new)
+ @composition_exclusion ||= []
+ @composition_map ||= {}
+ @boundary ||= {}
+
+ # Redefine the === method so we can write shorter rules for grapheme cluster breaks
+ @boundary.each do |k,_|
+ @boundary[k].instance_eval do
+ def ===(other)
+ detect { |i| i === other } ? true : false
+ end
+ end if @boundary[k].kind_of?(Array)
+ end
+ end
+
+ # Shortcut to ucd.codepoints[]
+ def [](index); @codepoints[index]; end
+
+ # Returns the directory in which the data files are stored
+ def self.dirname
+ File.dirname(__FILE__) + '/../../values/'
+ end
+
+ # Returns the filename for the data file for this version
+ def self.filename
+ File.expand_path File.join(dirname, "unicode_tables-#{VERSION}.dat")
+ end
+
+ # Loads the unicode database and returns all the internal objects of UnicodeDatabase
+ def self.load
+ begin
+ return load_file(filename)
+ rescue Exception
+ # If we can't load our own version, try the rest
+ Dir["#{dirname}/*.dat"].sort.each do |dat|
+ begin
+ return load_file(dat)
+ rescue Exception
+ end
+ end
+ end
+ raise IOError.new("Can't load a marshal file for your version of Ruby")
+ end
+
+ def self.load_file(filename)
+ File.open(self.filename, 'rb') { |f| Marshal.load f.read }
+ end
+ end
+
+ # UTF8Handler implements Unicode aware operations for strings, these operations will be used by the Chars
+ # proxy when $KCODE is set to 'UTF8'.
+ class UTF8Handler
+ # Hangul character boundaries and properties
+ HANGUL_SBASE = 0xAC00
+ HANGUL_LBASE = 0x1100
+ HANGUL_VBASE = 0x1161
+ HANGUL_TBASE = 0x11A7
+ HANGUL_LCOUNT = 19
+ HANGUL_VCOUNT = 21
+ HANGUL_TCOUNT = 28
+ HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT
+ HANGUL_SCOUNT = 11172
+ HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT
+ HANGUL_JAMO_FIRST = 0x1100
+ HANGUL_JAMO_LAST = 0x11FF
+
+ # All the unicode whitespace
+ UNICODE_WHITESPACE = [
+ (0x0009..0x000D).to_a, # White_Space # Cc [5] <control-0009>..<control-000D>
+ 0x0020, # White_Space # Zs SPACE
+ 0x0085, # White_Space # Cc <control-0085>
+ 0x00A0, # White_Space # Zs NO-BREAK SPACE
+ 0x1680, # White_Space # Zs OGHAM SPACE MARK
+ 0x180E, # White_Space # Zs MONGOLIAN VOWEL SEPARATOR
+ (0x2000..0x200A).to_a, # White_Space # Zs [11] EN QUAD..HAIR SPACE
+ 0x2028, # White_Space # Zl LINE SEPARATOR
+ 0x2029, # White_Space # Zp PARAGRAPH SEPARATOR
+ 0x202F, # White_Space # Zs NARROW NO-BREAK SPACE
+ 0x205F, # White_Space # Zs MEDIUM MATHEMATICAL SPACE
+ 0x3000, # White_Space # Zs IDEOGRAPHIC SPACE
+ ].flatten.freeze
+
+ # BOM (byte order mark) can also be seen as whitespace, it's a non-rendering character used to distinguish
+ # between little and big endian. This is not an issue in utf-8, so it must be ignored.
+ UNICODE_LEADERS_AND_TRAILERS = UNICODE_WHITESPACE + [65279] # ZERO-WIDTH NO-BREAK SPACE aka BOM
+
+ # Borrowed from the Kconv library by Shinji KONO - (also as seen on the W3C site)
+ UTF8_PAT = /\A(?:
+ [\x00-\x7f] |
+ [\xc2-\xdf] [\x80-\xbf] |
+ \xe0 [\xa0-\xbf] [\x80-\xbf] |
+ [\xe1-\xef] [\x80-\xbf] [\x80-\xbf] |
+ \xf0 [\x90-\xbf] [\x80-\xbf] [\x80-\xbf] |
+ [\xf1-\xf3] [\x80-\xbf] [\x80-\xbf] [\x80-\xbf] |
+ \xf4 [\x80-\x8f] [\x80-\xbf] [\x80-\xbf]
+ )*\z/xn
+
+ # Returns a regular expression pattern that matches the passed Unicode codepoints
+ def self.codepoints_to_pattern(array_of_codepoints) #:nodoc:
+ array_of_codepoints.collect{ |e| [e].pack 'U*' }.join('|')
+ end
+ UNICODE_TRAILERS_PAT = /(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+\Z/
+ UNICODE_LEADERS_PAT = /\A(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+/
+
+ class << self
+
+ # ///
+ # /// BEGIN String method overrides
+ # ///
+
+ # Inserts the passed string at specified codepoint offsets
+ def insert(str, offset, fragment)
+ str.replace(
+ u_unpack(str).insert(
+ offset,
+ u_unpack(fragment)
+ ).flatten.pack('U*')
+ )
+ end
+
+ # Returns the position of the passed argument in the string, counting in codepoints
+ def index(str, *args)
+ bidx = str.index(*args)
+ bidx ? (u_unpack(str.slice(0...bidx)).size) : nil
+ end
+
+ # Does Unicode-aware rstrip
+ def rstrip(str)
+ str.gsub(UNICODE_TRAILERS_PAT, '')
+ end
+
+ # Does Unicode-aware lstrip
+ def lstrip(str)
+ str.gsub(UNICODE_LEADERS_PAT, '')
+ end
+
+ # Removed leading and trailing whitespace
+ def strip(str)
+ str.gsub(UNICODE_LEADERS_PAT, '').gsub(UNICODE_TRAILERS_PAT, '')
+ end
+
+ # Returns the number of codepoints in the string
+ def size(str)
+ u_unpack(str).size
+ end
+ alias_method :length, :size
+
+ # Reverses codepoints in the string.
+ def reverse(str)
+ u_unpack(str).reverse.pack('U*')
+ end
+
+ # Implements Unicode-aware slice with codepoints. Slicing on one point returns the codepoints for that
+ # character.
+ def slice(str, *args)
+ if (args.size == 2 && args.first.is_a?(Range))
+ raise TypeError, 'cannot convert Range into Integer' # Do as if we were native
+ elsif args[0].kind_of? Range
+ cps = u_unpack(str).slice(*args)
+ cps.nil? ? nil : cps.pack('U*')
+ elsif args[0].kind_of? Numeric
+ u_unpack(str)[args[0]]
+ else
+ str.slice(*args)
+ end
+ end
+ alias_method :[], :slice
+
+ # Convert characters in the string to uppercase
+ def upcase(str); to_case :uppercase_mapping, str; end
+
+ # Convert characters in the string to lowercase
+ def downcase(str); to_case :lowercase_mapping, str; end
+
+ # Returns a copy of +str+ with the first character converted to uppercase and the remainder to lowercase
+ def capitalize(str)
+ upcase(slice(str, 0..0)) + downcase(slice(str, 1..-1) || '')
+ end
+
+ # ///
+ # /// Extra String methods for unicode operations
+ # ///
+
+ # Returns the KC normalization of the string by default. NFKC is considered the best normalization form for
+ # passing strings to databases and validations.
+ #
+ # * <tt>str</tt>: The string to perform normalization on.
+ # * <tt>form</tt>: The form you want to normalize in. Should be one of the following: :c, :kc, :d or :kd.
+ def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM)
+ # See http://www.unicode.org/reports/tr15, Table 1
+ codepoints = u_unpack(str)
+ case form
+ when :d
+ reorder_characters(decompose_codepoints(:canonical, codepoints))
+ when :c
+ compose_codepoints reorder_characters(decompose_codepoints(:canonical, codepoints))
+ when :kd
+ reorder_characters(decompose_codepoints(:compatability, codepoints))
+ when :kc
+ compose_codepoints reorder_characters(decompose_codepoints(:compatability, codepoints))
+ else
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
+ end.pack('U*')
+ end
+
+ # Perform decomposition on the characters in the string
+ def decompose(str)
+ decompose_codepoints(:canonical, u_unpack(str)).pack('U*')
+ end
+
+ # Perform composition on the characters in the string
+ def compose(str)
+ compose_codepoints u_unpack(str).pack('U*')
+ end
+
+ # ///
+ # /// BEGIN Helper methods for unicode operation
+ # ///
+
+ # Used to translate an offset from bytes to characters, for instance one received from a regular expression match
+ def translate_offset(str, byte_offset)
+ return 0 if str == ''
+ return nil if byte_offset.nil?
+ chunk = str[0..byte_offset]
+ begin
+ begin
+ chunk.unpack('U*').length - 1
+ rescue ArgumentError => e
+ chunk = str[0..(byte_offset+=1)]
+ # Stop retrying at the end of the string
+ raise e unless byte_offset < chunk.length
+ # We damaged a character, retry
+ retry
+ end
+ # Catch the ArgumentError so we can throw our own
+ rescue ArgumentError
+ raise EncodingError.new('malformed UTF-8 character')
+ end
+ end
+
+ # Checks if the string is valid UTF8.
+ def consumes?(str)
+ # Unpack is a little bit faster than regular expressions
+ begin
+ str.unpack('U*')
+ true
+ rescue ArgumentError
+ false
+ end
+ end
+
+ # Returns the number of grapheme clusters in the string. This method is very likely to be moved or renamed
+ # in future versions.
+ def g_length(str)
+ g_unpack(str).length
+ end
+
+ # Strips all the non-utf-8 bytes from the string resulting in a valid utf-8 string
+ def tidy_bytes(str)
+ str.split(//u).reject { |c| !UTF8_PAT.match(c) }.join
+ end
+
+ protected
+
+ # Detect whether the codepoint is in a certain character class. Primarily used by the
+ # grapheme cluster support.
+ def in_char_class?(codepoint, classes)
+ classes.detect { |c| UCD.boundary[c] === codepoint } ? true : false
+ end
+
+ # Unpack the string at codepoints boundaries
+ def u_unpack(str)
+ begin
+ str.unpack 'U*'
+ rescue ArgumentError
+ raise EncodingError.new('malformed UTF-8 character')
+ end
+ end
+
+ # Unpack the string at grapheme boundaries instead of codepoint boundaries
+ def g_unpack(str)
+ codepoints = u_unpack(str)
+ unpacked = []
+ pos = 0
+ marker = 0
+ eoc = codepoints.length
+ while(pos < eoc)
+ pos += 1
+ previous = codepoints[pos-1]
+ current = codepoints[pos]
+ if (
+ # CR X LF
+ one = ( previous == UCD.boundary[:cr] and current == UCD.boundary[:lf] ) or
+ # L X (L|V|LV|LVT)
+ two = ( UCD.boundary[:l] === previous and in_char_class?(current, [:l,:v,:lv,:lvt]) ) or
+ # (LV|V) X (V|T)
+ three = ( in_char_class?(previous, [:lv,:v]) and in_char_class?(current, [:v,:t]) ) or
+ # (LVT|T) X (T)
+ four = ( in_char_class?(previous, [:lvt,:t]) and UCD.boundary[:t] === current ) or
+ # X Extend
+ five = (UCD.boundary[:extend] === current)
+ )
+ else
+ unpacked << codepoints[marker..pos-1]
+ marker = pos
+ end
+ end
+ unpacked
+ end
+
+ # Reverse operation of g_unpack
+ def g_pack(unpacked)
+ unpacked.flatten
+ end
+
+ # Convert characters to a different case
+ def to_case(way, str)
+ u_unpack(str).map do |codepoint|
+ cp = UCD[codepoint]
+ unless cp.nil?
+ ncp = cp.send(way)
+ ncp > 0 ? ncp : codepoint
+ else
+ codepoint
+ end
+ end.pack('U*')
+ end
+
+ # Re-order codepoints so the string becomes canonical
+ def reorder_characters(codepoints)
+ length = codepoints.length- 1
+ pos = 0
+ while pos < length do
+ cp1, cp2 = UCD[codepoints[pos]], UCD[codepoints[pos+1]]
+ if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0)
+ codepoints[pos..pos+1] = cp2.code, cp1.code
+ pos += (pos > 0 ? -1 : 1)
+ else
+ pos += 1
+ end
+ end
+ codepoints
+ end
+
+ # Decompose composed characters to the decomposed form
+ def decompose_codepoints(type, codepoints)
+ codepoints.inject([]) do |decomposed, cp|
+ # if it's a hangul syllable starter character
+ if HANGUL_SBASE <= cp and cp < HANGUL_SLAST
+ sindex = cp - HANGUL_SBASE
+ ncp = [] # new codepoints
+ ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT
+ ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT
+ tindex = sindex % HANGUL_TCOUNT
+ ncp << (HANGUL_TBASE + tindex) unless tindex == 0
+ decomposed.concat ncp
+ # if the codepoint is decomposable in with the current decomposition type
+ elsif (ncp = UCD[cp].decomp_mapping) and (!UCD[cp].decomp_type || type == :compatability)
+ decomposed.concat decompose_codepoints(type, ncp.dup)
+ else
+ decomposed << cp
+ end
+ end
+ end
+
+ # Compose decomposed characters to the composed form
+ def compose_codepoints(codepoints)
+ pos = 0
+ eoa = codepoints.length - 1
+ starter_pos = 0
+ starter_char = codepoints[0]
+ previous_combining_class = -1
+ while pos < eoa
+ pos += 1
+ lindex = starter_char - HANGUL_LBASE
+ # -- Hangul
+ if 0 <= lindex and lindex < HANGUL_LCOUNT
+ vindex = codepoints[starter_pos+1] - HANGUL_VBASE rescue vindex = -1
+ if 0 <= vindex and vindex < HANGUL_VCOUNT
+ tindex = codepoints[starter_pos+2] - HANGUL_TBASE rescue tindex = -1
+ if 0 <= tindex and tindex < HANGUL_TCOUNT
+ j = starter_pos + 2
+ eoa -= 2
+ else
+ tindex = 0
+ j = starter_pos + 1
+ eoa -= 1
+ end
+ codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE
+ end
+ starter_pos += 1
+ starter_char = codepoints[starter_pos]
+ # -- Other characters
+ else
+ current_char = codepoints[pos]
+ current = UCD[current_char]
+ if current.combining_class > previous_combining_class
+ if ref = UCD.composition_map[starter_char]
+ composition = ref[current_char]
+ else
+ composition = nil
+ end
+ unless composition.nil?
+ codepoints[starter_pos] = composition
+ starter_char = composition
+ codepoints.delete_at pos
+ eoa -= 1
+ pos -= 1
+ previous_combining_class = -1
+ else
+ previous_combining_class = current.combining_class
+ end
+ else
+ previous_combining_class = current.combining_class
+ end
+ if current.combining_class == 0
+ starter_pos = pos
+ starter_char = codepoints[pos]
+ end
+ end
+ end
+ codepoints
+ end
+
+ # UniCode Database
+ UCD = UnicodeDatabase.new
+ end
+ end
+end \ No newline at end of file
diff --git a/activesupport/lib/active_support/multibyte/handlers/utf8_handler_proc.rb b/activesupport/lib/active_support/multibyte/handlers/utf8_handler_proc.rb
new file mode 100644
index 0000000000..f551af6cc7
--- /dev/null
+++ b/activesupport/lib/active_support/multibyte/handlers/utf8_handler_proc.rb
@@ -0,0 +1,44 @@
+# Methods in this handler call functions in the utf8proc ruby extension. These are significantly faster than the
+# pure ruby versions. Chars automatically uses this handler when it can load the utf8proc extension. For
+# documentation on handler methods see UTF8Handler.
+class ActiveSupport::Multibyte::Handlers::UTF8HandlerProc < ActiveSupport::Multibyte::Handlers::UTF8Handler
+
+ class << self
+ def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM) #:nodoc:
+ codepoints = str.unpack('U*')
+ case form
+ when :d
+ utf8map(str, :stable)
+ when :c
+ utf8map(str, :stable, :compose)
+ when :kd
+ utf8map(str, :stable, :compat)
+ when :kc
+ utf8map(str, :stable, :compose, :compat)
+ else
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
+ end
+ end
+
+ def decompose(str) #:nodoc:
+ utf8map(str, :stable)
+ end
+
+ def downcase(str) #:nodoc:c
+ utf8map(str, :casefold)
+ end
+
+ protected
+
+ def utf8map(str, *option_array) #:nodoc:
+ options = 0
+ option_array.each do |option|
+ flag = Utf8Proc::Options[option]
+ raise ArgumentError, "Unknown argument given to utf8map." unless
+ flag
+ options |= flag
+ end
+ return Utf8Proc::utf8map(str, options)
+ end
+ end
+end
diff --git a/activesupport/lib/active_support/values/unicode_tables-1.8.4.dat b/activesupport/lib/active_support/values/unicode_tables-1.8.4.dat
new file mode 100644
index 0000000000..13a081388a
--- /dev/null
+++ b/activesupport/lib/active_support/values/unicode_tables-1.8.4.dat
Binary files differ
diff --git a/activesupport/lib/active_support/values/unicode_tables-1.8.5.dat b/activesupport/lib/active_support/values/unicode_tables-1.8.5.dat
new file mode 100644
index 0000000000..7b96885f32
--- /dev/null
+++ b/activesupport/lib/active_support/values/unicode_tables-1.8.5.dat
Binary files differ
diff --git a/activesupport/test/multibyte_chars_test.rb b/activesupport/test/multibyte_chars_test.rb
new file mode 100644
index 0000000000..e5ad9d26ee
--- /dev/null
+++ b/activesupport/test/multibyte_chars_test.rb
@@ -0,0 +1,163 @@
+require File.dirname(__FILE__) + '/abstract_unit'
+
+$KCODE = 'UTF8'
+
+class CharsTest < Test::Unit::TestCase
+
+ def setup
+ @s = {
+ :utf8 => "Abcd Блå ffi блa 埋",
+ :ascii => "asci ias c iia s",
+ :bytes => "\270\236\010\210\245"
+ }
+ end
+
+ def test_sanity
+ @s.each do |t, s|
+ assert s.respond_to?(:chars), "All string should have the chars method (#{t})"
+ assert s.respond_to?(:to_s), "All string should have the to_s method (#{t})"
+ assert_kind_of ActiveSupport::Multibyte::Chars, s.chars, "#chars should return an instance of Chars (#{t})"
+ end
+ end
+
+ def test_comparability
+ @s.each do |t, s|
+ assert_equal s, s.chars.to_s, "Chars#to_s should return enclosed string unchanged"
+ end
+ assert_nothing_raised do
+ assert_equal "a", "a", "Normal string comparisons should be unaffected"
+ assert_not_equal "a", "b", "Normal string comparisons should be unaffected"
+ assert_not_equal "a".chars, "b".chars, "Chars objects should be comparable"
+ assert_equal "a".chars, "A".downcase.chars, "Chars objects should be comparable to each other"
+ assert_equal "a".chars, "A".downcase, "Chars objects should be comparable to strings coming from elsewhere"
+ end
+
+ assert !@s[:utf8].eql?(@s[:utf8].chars), "Strict comparison is not supported"
+ assert_equal @s[:utf8], @s[:utf8].chars, "Chars should be compared by their enclosed string"
+
+ other_string = @s[:utf8].dup
+ assert_equal other_string, @s[:utf8].chars, "Chars should be compared by their enclosed string"
+ assert_equal other_string.chars, @s[:utf8].chars, "Chars should be compared by their enclosed string"
+
+ strings = ['builder'.chars, 'armor'.chars, 'zebra'.chars]
+ strings.sort!
+ assert_equal ['armor', 'builder', 'zebra'], strings, "Chars should be sortable based on their enclosed string"
+
+ # This leads to a StackLevelTooDeep exception if the comparison is not wired properly
+ assert_raise(NameError) do
+ Chars
+ end
+ end
+
+ def test_utf8?
+ assert @s[:utf8].is_utf8?, "UTF-8 strings are UTF-8"
+ assert @s[:ascii].is_utf8?, "All ASCII strings are also valid UTF-8"
+ assert !@s[:bytes].is_utf8?, "This bytestring isn't UTF-8"
+ end
+
+ # The test for the following methods are defined here because they can only be defined on the Chars class for
+ # various reasons
+
+ def test_gsub
+ assert_equal 'éxa', 'éda'.chars.gsub(/d/, 'x')
+ with_kcode('none') do
+ assert_equal 'éxa', 'éda'.chars.gsub(/d/, 'x')
+ end
+ end
+
+ def test_split
+ word = "efficient"
+ chars = ["e", "ffi", "c", "i", "e", "n", "t"]
+ assert_equal chars, word.split(//)
+ assert_equal chars, word.chars.split(//)
+ assert_kind_of ActiveSupport::Multibyte::Chars, word.chars.split(//).first, "Split should return Chars instances"
+ end
+
+ def test_regexp
+ with_kcode('none') do
+ assert_equal 12, (@s[:utf8].chars =~ /ffi/),
+ "Regex matching should be bypassed to String"
+ end
+ with_kcode('UTF8') do
+ assert_equal 9, (@s[:utf8].chars =~ /ffi/),
+ "Regex matching should be unicode aware"
+ end
+ end
+
+ def test_pragma
+ with_kcode('UTF8') do
+ assert " ".chars.send(:utf8_pragma?), "UTF8 pragma should be on because KCODE is UTF8"
+ end
+ with_kcode('none') do
+ assert !" ".chars.send(:utf8_pragma?), "UTF8 pragma should be off"
+ end
+ end
+
+ def test_handler_setting
+ handler = ''.chars.handler
+
+ ActiveSupport::Multibyte::Chars.handler = :first
+ assert_equal :first, ''.chars.handler
+ ActiveSupport::Multibyte::Chars.handler = :second
+ assert_equal :second, ''.chars.handler
+ assert_raise(NoMethodError) do
+ ''.chars.handler.split
+ end
+
+ ActiveSupport::Multibyte::Chars.handler = handler
+ end
+
+ def test_method_chaining
+ assert_kind_of ActiveSupport::Multibyte::Chars, ''.chars.downcase
+ assert_kind_of ActiveSupport::Multibyte::Chars, ''.chars.strip, "Strip should return a Chars object"
+ assert_kind_of ActiveSupport::Multibyte::Chars, ''.chars.downcase.strip, "The Chars object should be " +
+ "forwarded down the call path for chaining"
+ assert_equal 'foo', " FOO ".chars.normalize.downcase.strip, "The Chars that results from the " +
+ " operations should be comparable to the string value of the result"
+ end
+
+ def test_passthrough_on_kcode
+ # The easiest way to check if the passthrough is in place is through #size
+ with_kcode('nonce') do
+ assert_equal 26, @s[:utf8].chars.size
+ end
+ with_kcode('UTF8') do
+ assert_equal 17, @s[:utf8].chars.size
+ end
+ end
+
+ def test_destructiveness
+ # Note that we're testing the destructiveness here and not the correct behaviour of the methods
+ str = 'ac'
+ str.chars.insert(1, 'b')
+ assert_equal 'abc', str, 'Insert should be destructive for a string'
+
+ str = 'ac'
+ str.chars.reverse!
+ assert_equal 'ca', str, 'reverse! should be destructive for a string'
+ end
+
+ def test_resilience
+ assert_nothing_raised do
+ assert_equal 1, @s[:bytes].chars.size, "There's only one valid utf-8 byte in the string"
+ end
+ assert_nothing_raised do
+ assert_equal "\010", @s[:bytes].chars.reverse, "There's only one valid utf-8 byte in the string"
+ end
+ assert_nothing_raised do
+ @s[:bytes].chars.reverse!
+ assert_equal "\010", @s[:bytes], "There's only one valid utf-8 byte in the string"
+ end
+ end
+
+ protected
+
+ def with_kcode(kcode)
+ old_kcode, $KCODE = $KCODE, kcode
+ begin
+ yield
+ ensure
+ $KCODE = old_kcode
+ end
+ end
+end
diff --git a/activesupport/test/multibyte_conformance.rb b/activesupport/test/multibyte_conformance.rb
new file mode 100644
index 0000000000..dfd77bd6e1
--- /dev/null
+++ b/activesupport/test/multibyte_conformance.rb
@@ -0,0 +1,141 @@
+require File.dirname(__FILE__) + '/abstract_unit'
+require 'open-uri'
+
+$KCODE = 'UTF8'
+UNIDATA_URL = "http://www.unicode.org/Public/#{ActiveSupport::Multibyte::UNICODE_VERSION}/ucd"
+UNIDATA_FILE = '/NormalizationTest.txt'
+CACHE_DIR = File.dirname(__FILE__) + '/cache'
+
+class Downloader
+ def self.download(from, to)
+ unless File.exist?(to)
+ $stderr.puts "Downloading #{from} to #{to}"
+ unless File.exists?(File.dirname(to))
+ system "mkdir -p #{File.dirname(to)}"
+ end
+ open(from) do |source|
+ File.open(to, 'w') do |target|
+ source.each_line do |l|
+ target.write l
+ end
+ end
+ end
+ end
+ end
+end
+
+class String
+ # Unicode Inspect returns the codepoints of the string in hex
+ def ui
+ "#{self} " + ("[%s]" % unpack("U*").map{|cp| cp.to_s(16) }.join(' '))
+ end unless ''.respond_to?(:ui)
+end
+
+Dir.mkdir(CACHE_DIR) unless File.exists?(CACHE_DIR)
+Downloader.download(UNIDATA_URL + UNIDATA_FILE, CACHE_DIR + UNIDATA_FILE)
+
+module ConformanceTest
+ def test_normalizations_C
+ each_line_of_norm_tests do |*cols|
+ col1, col2, col3, col4, col5, comment = *cols
+
+ # CONFORMANCE:
+ # 1. The following invariants must be true for all conformant implementations
+ #
+ # NFC
+ # c2 == NFC(c1) == NFC(c2) == NFC(c3)
+ assert_equal col2.ui, @handler.normalize(col1, :c).ui, "Form C - Col 2 has to be NFC(1) - #{comment}"
+ assert_equal col2.ui, @handler.normalize(col2, :c).ui, "Form C - Col 2 has to be NFC(2) - #{comment}"
+ assert_equal col2.ui, @handler.normalize(col3, :c).ui, "Form C - Col 2 has to be NFC(3) - #{comment}"
+ #
+ # c4 == NFC(c4) == NFC(c5)
+ assert_equal col4.ui, @handler.normalize(col4, :c).ui, "Form C - Col 4 has to be C(4) - #{comment}"
+ assert_equal col4.ui, @handler.normalize(col5, :c).ui, "Form C - Col 4 has to be C(5) - #{comment}"
+ end
+ end
+
+ def test_normalizations_D
+ each_line_of_norm_tests do |*cols|
+ col1, col2, col3, col4, col5, comment = *cols
+ #
+ # NFD
+ # c3 == NFD(c1) == NFD(c2) == NFD(c3)
+ assert_equal col3.ui, @handler.normalize(col1, :d).ui, "Form D - Col 3 has to be NFD(1) - #{comment}"
+ assert_equal col3.ui, @handler.normalize(col2, :d).ui, "Form D - Col 3 has to be NFD(2) - #{comment}"
+ assert_equal col3.ui, @handler.normalize(col3, :d).ui, "Form D - Col 3 has to be NFD(3) - #{comment}"
+ # c5 == NFD(c4) == NFD(c5)
+ assert_equal col5.ui, @handler.normalize(col4, :d).ui, "Form D - Col 5 has to be NFD(4) - #{comment}"
+ assert_equal col5.ui, @handler.normalize(col5, :d).ui, "Form D - Col 5 has to be NFD(5) - #{comment}"
+ end
+ end
+
+ def test_normalizations_KC
+ each_line_of_norm_tests do | *cols |
+ col1, col2, col3, col4, col5, comment = *cols
+ #
+ # NFKC
+ # c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5)
+ assert_equal col4.ui, @handler.normalize(col1, :kc).ui, "Form D - Col 4 has to be NFKC(1) - #{comment}"
+ assert_equal col4.ui, @handler.normalize(col2, :kc).ui, "Form D - Col 4 has to be NFKC(2) - #{comment}"
+ assert_equal col4.ui, @handler.normalize(col3, :kc).ui, "Form D - Col 4 has to be NFKC(3) - #{comment}"
+ assert_equal col4.ui, @handler.normalize(col4, :kc).ui, "Form D - Col 4 has to be NFKC(4) - #{comment}"
+ assert_equal col4.ui, @handler.normalize(col5, :kc).ui, "Form D - Col 4 has to be NFKC(5) - #{comment}"
+ end
+ end
+
+ def test_normalizations_KD
+ each_line_of_norm_tests do | *cols |
+ col1, col2, col3, col4, col5, comment = *cols
+ #
+ # NFKD
+ # c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5)
+ assert_equal col5.ui, @handler.normalize(col1, :kd).ui, "Form KD - Col 5 has to be NFKD(1) - #{comment}"
+ assert_equal col5.ui, @handler.normalize(col2, :kd).ui, "Form KD - Col 5 has to be NFKD(2) - #{comment}"
+ assert_equal col5.ui, @handler.normalize(col3, :kd).ui, "Form KD - Col 5 has to be NFKD(3) - #{comment}"
+ assert_equal col5.ui, @handler.normalize(col4, :kd).ui, "Form KD - Col 5 has to be NFKD(4) - #{comment}"
+ assert_equal col5.ui, @handler.normalize(col5, :kd).ui, "Form KD - Col 5 has to be NFKD(5) - #{comment}"
+ end
+ end
+
+ protected
+ def each_line_of_norm_tests(&block)
+ lines = 0
+ max_test_lines = 0 # Don't limit below 38, because that's the header of the testfile
+ File.open(File.dirname(__FILE__) + '/cache' + UNIDATA_FILE, 'r') do | f |
+ until f.eof? || (max_test_lines > 38 and lines > max_test_lines)
+ lines += 1
+ line = f.gets.chomp!
+ next if (line.empty? || line =~ /^\#/)
+
+ cols, comment = line.split("#")
+ cols = cols.split(";").map{|e| e.strip}.reject{|e| e.empty? }
+ next unless cols.length == 5
+
+ # codepoints are in hex in the test suite, pack wants them as integers
+ cols.map!{|c| c.split.map{|codepoint| codepoint.to_i(16)}.pack("U*") }
+ cols << comment
+
+ yield(*cols)
+ end
+ end
+ end
+end
+
+begin
+ require_library_or_gem('utf8proc_native')
+ require 'active_record/multibyte/handlers/utf8_handler_proc'
+ class ConformanceTestProc < Test::Unit::TestCase
+ include ConformanceTest
+ def setup
+ @handler = ::ActiveSupport::Multibyte::Handlers::UTF8HandlerProc
+ end
+ end
+rescue LoadError
+end
+
+class ConformanceTestPure < Test::Unit::TestCase
+ include ConformanceTest
+ def setup
+ @handler = ::ActiveSupport::Multibyte::Handlers::UTF8Handler
+ end
+end \ No newline at end of file
diff --git a/activesupport/test/multibyte_handler_test.rb b/activesupport/test/multibyte_handler_test.rb
new file mode 100644
index 0000000000..08291c1212
--- /dev/null
+++ b/activesupport/test/multibyte_handler_test.rb
@@ -0,0 +1,261 @@
+require File.dirname(__FILE__) + '/abstract_unit'
+
+$KCODE = 'UTF8'
+
+class String
+ # Unicode Inspect returns the codepoints of the string in hex
+ def ui
+ "#{self} " + ("[%s]" % unpack("U*").map{|cp| cp.to_s(16) }.join(' '))
+ end unless ''.respond_to?(:ui)
+end
+
+module UTF8HandlingTest
+
+ def common_setup
+ # This is an ASCII string with some russian strings and a ligature. It's nicely calibrated, because
+ # slicing it at some specific bytes will kill your characters if you use standard Ruby routines.
+ # It has both capital and standard letters, so that we can test case conversions easily.
+ # It has 26 charactes and 28 when the ligature gets split during normalization.
+ @string = "Abcd Блå ffi бла бла бла бла"
+ @string_kd = "Abcd Блå ffi бла бла бла бла"
+ @string_kc = "Abcd Блå ffi бла бла бла бла"
+ @string_c = "Abcd Блå ffi бла бла бла бла"
+ @string_d = "Abcd Блå ffi бла бла бла бла"
+ @bytestring = "\270\236\010\210\245" # Not UTF-8
+
+ # Characters from the character classes as described in UAX #29
+ @character_from_class = {
+ :l => 0x1100, :v => 0x1160, :t => 0x11A8, :lv => 0xAC00, :lvt => 0xAC01, :cr => 0x000D, :lf => 0x000A,
+ :extend => 0x094D, :n => 0x64
+ }
+ end
+
+ def test_utf8_recognition
+ assert ActiveSupport::Multibyte::Handlers::UTF8Handler.consumes?(@string),
+ "Should recognize as a valid UTF-8 string"
+ assert !ActiveSupport::Multibyte::Handlers::UTF8Handler.consumes?(@bytestring), "This is bytestring, not UTF-8"
+ end
+
+ def test_simple_normalization
+ # Normalization of DEVANAGARI LETTER QA breaks when composition exclusion isn't used correctly
+ assert_equal [0x915, 0x93c].pack('U*').ui, [0x915, 0x93c].pack('U*').chars.normalize(:c).to_s.ui
+
+ null_byte_str = "Test\0test"
+
+ assert_equal '', @handler.normalize(''), "Empty string should not break things"
+ assert_equal null_byte_str.ui, @handler.normalize(null_byte_str, :kc).ui, "Null byte should remain"
+ assert_equal null_byte_str.ui, @handler.normalize(null_byte_str, :c).ui, "Null byte should remain"
+ assert_equal null_byte_str.ui, @handler.normalize(null_byte_str, :d).ui, "Null byte should remain"
+ assert_equal null_byte_str.ui, @handler.normalize(null_byte_str, :kd).ui, "Null byte should remain"
+ assert_equal null_byte_str.ui, @handler.decompose(null_byte_str).ui, "Null byte should remain"
+ assert_equal null_byte_str.ui, @handler.compose(null_byte_str).ui, "Null byte should remain"
+
+ comp_str = [
+ 44, # LATIN CAPITAL LETTER D
+ 307, # COMBINING DOT ABOVE
+ 328, # COMBINING OGONEK
+ 323 # COMBINING DOT BELOW
+ ].pack("U*")
+ norm_str_KC = [44,105,106,328,323].pack("U*")
+ norm_str_C = [44,307,328,323].pack("U*")
+ norm_str_D = [44,307,110,780,78,769].pack("U*")
+ norm_str_KD = [44,105,106,110,780,78,769].pack("U*")
+
+ assert_equal norm_str_KC.ui, @handler.normalize(comp_str, :kc).ui, "Should normalize KC"
+ assert_equal norm_str_C.ui, @handler.normalize(comp_str, :c).ui, "Should normalize C"
+ assert_equal norm_str_D.ui, @handler.normalize(comp_str, :d).ui, "Should normalize D"
+ assert_equal norm_str_KD.ui, @handler.normalize(comp_str, :kd).ui, "Should normalize KD"
+
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.normalize(@bytestring) }
+ end
+
+ # Test for the Public Review Issue #29, bad explaination of composition might lead to a
+ # bad implementation: http://www.unicode.org/review/pr-29.html
+ def test_normalization_C_pri_29
+ [
+ [0x0B47, 0x0300, 0x0B3E],
+ [0x1100, 0x0300, 0x1161]
+ ].map { |c| c.pack('U*') }.each do |c|
+ assert_equal c.ui, @handler.normalize(c, :c).ui, "Composition is implemented incorrectly"
+ end
+ end
+
+ def test_casefolding
+ simple_str = "abCdef"
+ simple_str_upcase = "ABCDEF"
+ simple_str_downcase = "abcdef"
+
+ assert_equal '', @handler.downcase(@handler.upcase('')), "Empty string should not break things"
+ assert_equal simple_str_upcase, @handler.upcase(simple_str), "should upcase properly"
+ assert_equal simple_str_downcase, @handler.downcase(simple_str), "should downcase properly"
+ assert_equal simple_str_downcase, @handler.downcase(@handler.upcase(simple_str_downcase)), "upcase and downcase should be mirrors"
+
+ rus_str = "аБвгд\0f"
+ rus_str_upcase = "АБВГД\0F"
+ rus_str_downcase = "абвгд\0f"
+ assert_equal rus_str_upcase, @handler.upcase(rus_str), "should upcase properly honoring null-byte"
+ assert_equal rus_str_downcase, @handler.downcase(rus_str), "should downcase properly honoring null-byte"
+
+ jap_str = "の埋め込み化対応はほぼ完成"
+ assert_equal jap_str, @handler.upcase(jap_str), "Japanse has no upcase, should remain unchanged"
+ assert_equal jap_str, @handler.downcase(jap_str), "Japanse has no downcase, should remain unchanged"
+
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.upcase(@bytestring) }
+ end
+
+ def test_capitalize
+ { 'аБвг аБвг' => 'Абвг абвг',
+ 'аБвг АБВГ' => 'Абвг абвг',
+ 'АБВГ АБВГ' => 'Абвг абвг',
+ '' => '' }.each do |f,t|
+ assert_equal t, @handler.capitalize(f), "Capitalize should work as expected"
+ end
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.capitalize(@bytestring) }
+ end
+
+ def test_translate_offset
+ str = "Блaå" # [2, 2, 1, 2] bytes
+ assert_equal 0, @handler.translate_offset('', 0), "Offset for an empty string makes no sense, return 0"
+ assert_equal 0, @handler.translate_offset(str, 0), "First character, first byte"
+ assert_equal 0, @handler.translate_offset(str, 1), "First character, second byte"
+ assert_equal 1, @handler.translate_offset(str, 2), "Second character, third byte"
+ assert_equal 1, @handler.translate_offset(str, 3), "Second character, fourth byte"
+ assert_equal 2, @handler.translate_offset(str, 4), "Third character, fifth byte"
+ assert_equal 3, @handler.translate_offset(str, 5), "Fourth character, sixth byte"
+ assert_equal 3, @handler.translate_offset(str, 6), "Fourth character, seventh byte"
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.translate_offset(@bytestring, 3) }
+ end
+
+ def test_insert
+ assert_equal '', @handler.insert('', 0, ''), "Empty string should not break things"
+ assert_equal "Abcd Блå ffiБУМ бла бла бла бла", @handler.insert(@string, 10, "БУМ"),
+ "Text should be inserted at right codepoints"
+ assert_equal "Abcd Блå ffiБУМ бла бла бла бла", @string, "Insert should be destructive"
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) do
+ @handler.insert(@bytestring, 2, "\210")
+ end
+ end
+
+ def test_reverse
+ str = "wБлåa \n"
+ rev = "\n aåлБw"
+ assert_equal '', @handler.reverse(''), "Empty string shouldn't change"
+ assert_equal rev.ui, @handler.reverse(str).ui, "Should reverse properly"
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.reverse(@bytestring) }
+ end
+
+ def test_size
+ assert_equal 0, @handler.size(''), "Empty string has size 0"
+ assert_equal 26, @handler.size(@string), "String length should be 26"
+ assert_equal 26, @handler.length(@string), "String length method should be properly aliased"
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.size(@bytestring) }
+ end
+
+ def test_slice
+ assert_equal 0x41, @handler.slice(@string, 0), "Singular characters should return codepoints"
+ assert_equal 0xE5, @handler.slice(@string, 7), "Singular characters should return codepoints"
+ assert_equal nil, @handler.slice('', -1..1), "Broken range should return nil"
+ assert_equal '', @handler.slice('', 0..10), "Empty string should not break things"
+ assert_equal "d Блå ffi", @handler.slice(@string, 3..9), "Unicode characters have to be returned"
+ assert_equal " Блå ffi ", @handler.slice(@string, 4..10), "Unicode characters have to be returned"
+ assert_equal "", @handler.slice(@string, 7..6), "Range is empty, should return an empty string"
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.slice(@bytestring, 2..3) }
+ end
+
+ def test_grapheme_cluster_length
+ assert_equal 0, @handler.g_length(''), "String should count 0 grapheme clusters"
+ assert_equal 2, @handler.g_length([0x0924, 0x094D, 0x0930].pack('U*')), "String should count 2 grapheme clusters"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(cr lf))), "Don't cut between CR and LF"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(l l))), "Don't cut between L"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(l v))), "Don't cut between L and V"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(l lv))), "Don't cut between L and LV"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(l lvt))), "Don't cut between L and LVT"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(lv v))), "Don't cut between LV and V"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(lv t))), "Don't cut between LV and T"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(v v))), "Don't cut between V and V"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(v t))), "Don't cut between V and T"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(lvt t))), "Don't cut between LVT and T"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(t t))), "Don't cut between T and T"
+ assert_equal 1, @handler.g_length(string_from_classes(%w(n extend))), "Don't cut before Extend"
+ assert_equal 2, @handler.g_length(string_from_classes(%w(n n))), "Cut between normal characters"
+ assert_equal 3, @handler.g_length(string_from_classes(%w(n cr lf n))), "Don't cut between CR and LF"
+ assert_equal 2, @handler.g_length(string_from_classes(%w(n l v t))), "Don't cut between L, V and T"
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.g_length(@bytestring) }
+ end
+
+ def test_index
+ s = "Καλημέρα κόσμε!"
+ assert_equal 0, @handler.index('', ''), "The empty string is always found at the beginning of the string"
+ assert_equal 0, @handler.index('haystack', ''), "The empty string is always found at the beginning of the string"
+ assert_equal 0, @handler.index(s, 'Κ'), "Greek K is at 0"
+ assert_equal 1, @handler.index(s, 'α'), "Greek Alpha is at 1"
+
+ assert_equal nil, @handler.index(@bytestring, 'a')
+ assert_raise(ActiveSupport::Multibyte::Handlers::EncodingError) { @handler.index(@bytestring, "\010") }
+ end
+
+ def test_strip
+ # A unicode aware version of strip should strip all 26 types of whitespace. This includes the NO BREAK SPACE
+ # aka BOM (byte order mark). The byte order mark has no place in UTF-8 because it's used to detect LE and BE.
+ b = "\n" + [
+ 32, # SPACE
+ 8195, # EM SPACE
+ 8199, # FIGURE SPACE,
+ 8201, # THIN SPACE
+ 8202, # HAIR SPACE
+ 65279, # NO BREAK SPACE (ZW)
+ ].pack('U*')
+ m = "word блин\n\n\n word"
+ e = [
+ 65279, # NO BREAK SPACE (ZW)
+ 8201, # THIN SPACE
+ 8199, # FIGURE SPACE,
+ 32, # SPACE
+ ].pack('U*')
+ string = b+m+e
+
+ assert_equal '', @handler.strip(''), "Empty string should stay empty"
+ assert_equal m+e, @handler.lstrip(string), "Whitespace should be gone on the left"
+ assert_equal b+m, @handler.rstrip(string), "Whitespace should be gone on the right"
+ assert_equal m, @handler.strip(string), "Whitespace should be stripped on both sides"
+
+ bs = "\n #{@bytestring} \n\n"
+ assert_equal @bytestring, @handler.strip(bs), "Invalid unicode strings should still strip"
+ end
+
+ def test_tidy_bytes
+ assert_equal "\010", @handler.tidy_bytes(@bytestring)
+ assert_equal "a\010a", @handler.tidy_bytes('a' + @bytestring + 'a')
+ assert_nothing_raised { @handler.tidy_bytes(@bytestring).unpack('U*') }
+ end
+
+ protected
+
+ def string_from_classes(classes)
+ classes.collect do |k|
+ @character_from_class[k.intern]
+ end.pack('U*')
+ end
+end
+
+
+begin
+ require_library_or_gem('utf8proc_native')
+ require 'active_record/multibyte/handlers/utf8_handler_proc'
+ class UTF8HandlingTestProc < Test::Unit::TestCase
+ include UTF8HandlingTest
+ def setup
+ common_setup
+ @handler = ::ActiveSupport::Multibyte::Handlers::UTF8HandlerProc
+ end
+ end
+rescue LoadError
+end
+
+class UTF8HandlingTestPure < Test::Unit::TestCase
+ include UTF8HandlingTest
+ def setup
+ common_setup
+ @handler = ::ActiveSupport::Multibyte::Handlers::UTF8Handler
+ end
+end \ No newline at end of file