summaryrefslogtreecommitdiffstats
path: root/src/utf8proc/data
diff options
context:
space:
mode:
Diffstat (limited to 'src/utf8proc/data')
-rw-r--r--src/utf8proc/data/Makefile69
-rw-r--r--src/utf8proc/data/charwidths.jl184
-rw-r--r--src/utf8proc/data/data_generator.rb426
3 files changed, 679 insertions, 0 deletions
diff --git a/src/utf8proc/data/Makefile b/src/utf8proc/data/Makefile
new file mode 100644
index 000000000..1b2472807
--- /dev/null
+++ b/src/utf8proc/data/Makefile
@@ -0,0 +1,69 @@
+# Unicode data generation rules. Except for the test data files, most
+# users will not use these Makefile rules, which are primarily to re-generate
+# unicode_data.c when we get a new Unicode version or charwidth data; they
+# require ruby, fontforge, and julia to be installed.
+
+# programs
+CURL=curl
+RUBY=ruby
+PERL=perl
+MAKE=make
+JULIA=julia
+FONTFORGE=fontforge
+CURLFLAGS = --retry 5 --location
+
+.PHONY: clean
+
+.DELETE_ON_ERROR:
+
+utf8proc_data.c.new: data_generator.rb UnicodeData.txt GraphemeBreakProperty.txt DerivedCoreProperties.txt CompositionExclusions.txt CaseFolding.txt CharWidths.txt emoji-data.txt
+ $(RUBY) data_generator.rb < UnicodeData.txt > $@
+
+# GNU Unifont version for font metric calculations:
+UNIFONT_VERSION=11.0.01
+
+unifont.ttf:
+ $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://mirrors.kernel.org/gnu/unifont/unifont-$(UNIFONT_VERSION)/unifont-$(UNIFONT_VERSION).ttf
+
+unifont_upper.ttf:
+ $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://mirrors.kernel.org/gnu/unifont/unifont-$(UNIFONT_VERSION)/unifont_upper-$(UNIFONT_VERSION).ttf
+
+%.sfd: %.ttf
+ $(FONTFORGE) -lang=ff -c "Open(\"$<\");Save(\"$@\");Quit(0);"
+
+CharWidths.txt: charwidths.jl unifont.sfd unifont_upper.sfd EastAsianWidth.txt
+ $(JULIA) charwidths.jl > $@
+
+# Unicode data version
+UNICODE_VERSION=11.0.0
+
+UnicodeData.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/UnicodeData.txt
+
+EastAsianWidth.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/EastAsianWidth.txt
+
+GraphemeBreakProperty.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/auxiliary/GraphemeBreakProperty.txt
+
+DerivedCoreProperties.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/DerivedCoreProperties.txt
+
+CompositionExclusions.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/CompositionExclusions.txt
+
+CaseFolding.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/CaseFolding.txt
+
+NormalizationTest.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/NormalizationTest.txt
+
+GraphemeBreakTest.txt:
+ $(CURL) $(CURLFLAGS) $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/auxiliary/GraphemeBreakTest.txt | $(PERL) -pe 's,÷,/,g;s,×,+,g' > $@
+
+emoji-data.txt:
+ $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://unicode.org/Public/emoji/`echo $(UNICODE_VERSION) | cut -d. -f1-2`/emoji-data.txt
+
+clean:
+ rm -f UnicodeData.txt EastAsianWidth.txt GraphemeBreakProperty.txt DerivedCoreProperties.txt CompositionExclusions.txt CaseFolding.txt NormalizationTest.txt GraphemeBreakTest.txt CharWidths.txt unifont*.ttf unifont*.sfd emoji-data.txt
+ rm -f utf8proc_data.c.new
diff --git a/src/utf8proc/data/charwidths.jl b/src/utf8proc/data/charwidths.jl
new file mode 100644
index 000000000..7b3d15874
--- /dev/null
+++ b/src/utf8proc/data/charwidths.jl
@@ -0,0 +1,184 @@
+# Following work by @jiahao, we compute character widths using a combination of
+# * advance widths from GNU Unifont (advance width 512 = 1 en)
+# * UAX 11: East Asian Width
+# * a few exceptions as needed
+# Adapted from http://nbviewer.ipython.org/gist/jiahao/07e8b08bf6d8671e9734
+#
+# Requires Julia (obviously) and FontForge.
+
+#############################################################################
+CharWidths = Dict{Int,Int}()
+
+#############################################################################
+# Use ../libutf8proc for category codes, rather than the one in Julia,
+# to minimize bootstrapping complexity when a new version of Unicode comes out.
+catcode(c) = ccall((:utf8proc_category,"../libutf8proc"), Cint, (Int32,), c)
+
+# use Base.UTF8proc module to get category codes constants, since
+# we won't change these in utf8proc.
+import Base.UTF8proc
+
+#############################################################################
+# Use a default width of 1 for all character categories that are
+# letter/symbol/number-like, as well as for unassigned/private-use chars.
+# This can be overriden by Unifont or UAX 11
+# below, but provides a useful nonzero fallback for new codepoints when
+# a new Unicode version has been released but Unifont hasn't been updated yet.
+
+zerowidth = Set{Int}() # categories that may contain zero-width chars
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_MN)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_MC)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ME)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_SK)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ZS)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ZL)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ZP)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CC)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CF)
+push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CS)
+for c in 0x0000:0x110000
+ if catcode(c) ∉ zerowidth
+ CharWidths[c] = 1
+ end
+end
+
+#############################################################################
+# Widths from GNU Unifont
+
+#Read sfdfile for character widths
+function parsesfd(filename::AbstractString, CharWidths::Dict{Int,Int}=Dict{Int,Int}())
+ state=:seekchar
+ lineno = 0
+ codepoint = width = nothing
+ for line in readlines(open(filename))
+ lineno += 1
+ if state==:seekchar #StartChar: nonmarkingreturn
+ if contains(line, "StartChar: ")
+ codepoint = nothing
+ width = nothing
+ state = :readdata
+ end
+ elseif state==:readdata #Encoding: 65538 -1 2, Width: 1024
+ contains(line, "Encoding:") && (codepoint = parse(Int, split(line)[3]))
+ contains(line, "Width:") && (width = parse(Int, split(line)[2]))
+ if codepoint!=nothing && width!=nothing && codepoint >= 0
+ w=div(width, 512) # 512 units to the en
+ if w > 0
+ # only add nonzero widths, since (1) the default is zero
+ # and (2) this circumvents some apparent bugs in Unifont
+ # (https://savannah.gnu.org/bugs/index.php?45395)
+ CharWidths[codepoint] = w
+ end
+ state = :seekchar
+ end
+ end
+ end
+ CharWidths
+end
+CharWidths=parsesfd("unifont.sfd", CharWidths)
+CharWidths=parsesfd("unifont_upper.sfd", CharWidths)
+
+#############################################################################
+# Widths from UAX #11: East Asian Width
+# .. these take precedence over the Unifont width for all codepoints
+# listed explicitly as wide/full/narrow/half-width
+
+for line in readlines(open("EastAsianWidth.txt"))
+ #Strip comments
+ (isempty(line) || line[1] == '#') && continue
+ precomment = split(line, '#')[1]
+ #Parse code point range and width code
+ tokens = split(precomment, ';')
+ length(tokens) >= 2 || continue
+ charrange = tokens[1]
+ width = strip(tokens[2])
+ #Parse code point range into Julia UnitRange
+ rangetokens = split(charrange, "..")
+ charstart = parse(UInt32, "0x"*rangetokens[1])
+ charend = parse(UInt32, "0x"*rangetokens[length(rangetokens)>1 ? 2 : 1])
+
+ #Assign widths
+ for c in charstart:charend
+ if width=="W" || width=="F" # wide or full
+ CharWidths[c]=2
+ elseif width=="Na"|| width=="H"
+ CharWidths[c]=1
+ end
+ end
+end
+
+#############################################################################
+# A few exceptions to the above cases, found by manual comparison
+# to other wcwidth functions and similar checks.
+
+for c in keys(CharWidths)
+ cat = catcode(c)
+
+ # make sure format control character (category Cf) have width 0
+ # (some of these, like U+0601, can have a width in some cases
+ # but normally act like prepended combining marks. U+fff9 etc
+ # are also odd, but have zero width in typical terminal contexts)
+ if cat==UTF8proc.UTF8PROC_CATEGORY_CF
+ CharWidths[c]=0
+ end
+
+ # Unifont has nonzero width for a number of non-spacing combining
+ # characters, e.g. (in 7.0.06): f84,17b4,17b5,180b,180d,2d7f, and
+ # the variation selectors
+ if cat==UTF8proc.UTF8PROC_CATEGORY_MN
+ CharWidths[c]=0
+ end
+
+ # We also assign width of one to unassigned and private-use
+ # codepoints (Unifont includes ConScript Unicode Registry PUA fonts,
+ # but since these are nonstandard it seems questionable to use Unifont metrics;
+ # if they are printed as the replacement character U+FFFD they will have width 1).
+ if cat==UTF8proc.UTF8PROC_CATEGORY_CO || cat==UTF8proc.UTF8PROC_CATEGORY_CN
+ CharWidths[c]=1
+ end
+
+ # for some reason, Unifont has width-2 glyphs for ASCII control chars
+ if cat==UTF8proc.UTF8PROC_CATEGORY_CC
+ CharWidths[c]=0
+ end
+end
+
+#Soft hyphen is typically printed as a hyphen (-) in terminals.
+CharWidths[0x00ad]=1
+
+#By definition, should have zero width (on the same line)
+#0x002028 '
' category: Zl name: LINE SEPARATOR/
+#0x002029 '
' category: Zp name: PARAGRAPH SEPARATOR/
+CharWidths[0x2028]=0
+CharWidths[0x2029]=0
+
+#By definition, should be narrow = width of 1 en space
+#0x00202f ' ' category: Zs name: NARROW NO-BREAK SPACE/
+CharWidths[0x202f]=1
+
+#By definition, should be wide = width of 1 em space
+#0x002001 ' ' category: Zs name: EM QUAD/
+#0x002003 ' ' category: Zs name: EM SPACE/
+CharWidths[0x2001]=2
+CharWidths[0x2003]=2
+
+#############################################################################
+# Output (to a file or pipe) for processing by data_generator.rb,
+# encoded as a sequence of intervals.
+
+firstc = 0x000000
+lastv = 0
+uhex(c) = uppercase(hex(c,4))
+for c in 0x0000:0x110000
+ v = get(CharWidths, c, 0)
+ if v != lastv || c == 0x110000
+ v < 4 || error("invalid charwidth $v for $c")
+ if firstc+1 < c
+ println(uhex(firstc), "..", uhex(c-1), "; ", lastv)
+ else
+ println(uhex(firstc), "; ", lastv)
+ end
+ firstc = c
+ lastv = v
+ end
+end
diff --git a/src/utf8proc/data/data_generator.rb b/src/utf8proc/data/data_generator.rb
new file mode 100644
index 000000000..8bd87e8da
--- /dev/null
+++ b/src/utf8proc/data/data_generator.rb
@@ -0,0 +1,426 @@
+#!/usr/bin/env ruby
+
+# This file was used to generate the 'unicode_data.c' file by parsing the
+# Unicode data file 'UnicodeData.txt' of the Unicode Character Database.
+# It is included for informational purposes only and not intended for
+# production use.
+
+
+# Copyright (c) 2018 Steven G. Johnson, Tony Kelman, Keno Fischer,
+# Benito van der Zander, Michaël Meyer, and other contributors.
+# Copyright (c) 2009 Public Software Group e. V., Berlin, Germany
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+
+# This file contains derived data from a modified version of the
+# Unicode data files. The following license applies to that data:
+#
+# COPYRIGHT AND PERMISSION NOTICE
+#
+# Copyright (c) 1991-2007 Unicode, Inc. All rights reserved. Distributed
+# under the Terms of Use in http://www.unicode.org/copyright.html.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of the Unicode data files and any associated documentation (the "Data
+# Files") or Unicode software and any associated documentation (the
+# "Software") to deal in the Data Files or Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, and/or sell copies of the Data Files or Software, and
+# to permit persons to whom the Data Files or Software are furnished to do
+# so, provided that (a) the above copyright notice(s) and this permission
+# notice appear with all copies of the Data Files or Software, (b) both the
+# above copyright notice(s) and this permission notice appear in associated
+# documentation, and (c) there is clear notice in each modified Data File or
+# in the Software as well as in the documentation associated with the Data
+# File(s) or Software that the data or software has been modified.
+#
+# THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+# THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+# INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR
+# CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+#
+# Except as contained in this notice, the name of a copyright holder shall
+# not be used in advertising or otherwise to promote the sale, use or other
+# dealings in these Data Files or Software without prior written
+# authorization of the copyright holder.
+
+
+$ignorable_list = File.read("DerivedCoreProperties.txt")[/# Derived Property: Default_Ignorable_Code_Point.*?# Total code points:/m]
+$ignorable = []
+$ignorable_list.each_line do |entry|
+ if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/
+ $1.hex.upto($2.hex) { |e2| $ignorable << e2 }
+ elsif entry =~ /^[0-9A-F]+/
+ $ignorable << $&.hex
+ end
+end
+
+$grapheme_boundclass_list = File.read("GraphemeBreakProperty.txt")
+$grapheme_boundclass = Hash.new("UTF8PROC_BOUNDCLASS_OTHER")
+$grapheme_boundclass_list.each_line do |entry|
+ if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*([A-Za-z_]+)/
+ $1.hex.upto($2.hex) { |e2| $grapheme_boundclass[e2] = "UTF8PROC_BOUNDCLASS_" + $3.upcase }
+ elsif entry =~ /^([0-9A-F]+)\s*;\s*([A-Za-z_]+)/
+ $grapheme_boundclass[$1.hex] = "UTF8PROC_BOUNDCLASS_" + $2.upcase
+ end
+end
+
+$emoji_data_list = File.read("emoji-data.txt")
+$emoji_data_list.each_line do |entry|
+ if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*Extended_Pictographic\W/
+ $1.hex.upto($2.hex) { |e2| $grapheme_boundclass[e2] = "UTF8PROC_BOUNDCLASS_EXTENDED_PICTOGRAPHIC" }
+ elsif entry =~ /^([0-9A-F]+)\s*;\s*Extended_Pictographic\W/
+ $grapheme_boundclass[$1.hex] = "UTF8PROC_BOUNDCLASS_EXTENDED_PICTOGRAPHIC"
+ elsif entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*Emoji_Modifier\W/
+ $1.hex.upto($2.hex) { |e2| $grapheme_boundclass[e2] = "UTF8PROC_BOUNDCLASS_EXTEND" }
+ elsif entry =~ /^([0-9A-F]+)\s*;\s*Emoji_Modifier\W/
+ $grapheme_boundclass[$1.hex] = "UTF8PROC_BOUNDCLASS_EXTEND"
+ end
+end
+
+$charwidth_list = File.read("CharWidths.txt")
+$charwidth = Hash.new(0)
+$charwidth_list.each_line do |entry|
+ if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*([0-9]+)/
+ $1.hex.upto($2.hex) { |e2| $charwidth[e2] = $3.to_i }
+ elsif entry =~ /^([0-9A-F]+)\s*;\s*([0-9]+)/
+ $charwidth[$1.hex] = $2.to_i
+ end
+end
+
+$exclusions = File.read("CompositionExclusions.txt")[/# \(1\) Script Specifics.*?# Total code points:/m]
+$exclusions = $exclusions.chomp.split("\n").collect { |e| e.hex }
+
+$excl_version = File.read("CompositionExclusions.txt")[/# \(2\) Post Composition Version precomposed characters.*?# Total code points:/m]
+$excl_version = $excl_version.chomp.split("\n").collect { |e| e.hex }
+
+$case_folding_string = File.open("CaseFolding.txt", :encoding => 'utf-8').read
+$case_folding = {}
+$case_folding_string.chomp.split("\n").each do |line|
+ next unless line =~ /([0-9A-F]+); [CF]; ([0-9A-F ]+);/i
+ $case_folding[$1.hex] = $2.split(" ").collect { |e| e.hex }
+end
+
+$int_array = []
+$int_array_indicies = {}
+
+def str2c(string, prefix)
+ return "0" if string.nil?
+ return "UTF8PROC_#{prefix}_#{string.upcase}"
+end
+def pushary(array)
+ idx = $int_array_indicies[array]
+ unless idx
+ $int_array_indicies[array] = $int_array.length
+ idx = $int_array.length
+ array.each { |entry| $int_array << entry }
+ end
+ return idx
+end
+def cpary2utf16encoded(array)
+ return array.flat_map { |cp|
+ if (cp <= 0xFFFF)
+ raise "utf-16 code: #{cp}" if cp & 0b1111100000000000 == 0b1101100000000000
+ cp
+ else
+ temp = cp - 0x10000
+ [(temp >> 10) | 0b1101100000000000, (temp & 0b0000001111111111) | 0b1101110000000000]
+ end
+ }
+end
+def cpary2c(array)
+ return "UINT16_MAX" if array.nil? || array.length == 0
+ lencode = array.length - 1 #no sequence has len 0, so we encode len 1 as 0, len 2 as 1, ...
+ array = cpary2utf16encoded(array)
+ if lencode >= 7 #we have only 3 bits for the length (which is already cutting it close. might need to change it to 2 bits in future Unicode versions)
+ array = [lencode] + array
+ lencode = 7
+ end
+ idx = pushary(array)
+ raise "Array index out of bound" if idx > 0x1FFF
+ return "#{idx | (lencode << 13)}"
+end
+def singlecpmap(cp)
+ return "UINT16_MAX" if cp == nil
+ idx = pushary(cpary2utf16encoded([cp]))
+ raise "Array index out of bound" if idx > 0xFFFF
+ return "#{idx}"
+end
+
+class UnicodeChar
+ attr_accessor :code, :name, :category, :combining_class, :bidi_class,
+ :decomp_type, :decomp_mapping,
+ :bidi_mirrored,
+ :uppercase_mapping, :lowercase_mapping, :titlecase_mapping,
+ #caches:
+ :c_entry_index, :c_decomp_mapping, :c_case_folding
+ def initialize(line)
+ raise "Could not parse input." unless line =~ /^
+ ([0-9A-F]+); # code
+ ([^;]+); # name
+ ([A-Z]+); # general category
+ ([0-9]+); # canonical combining class
+ ([A-Z]+); # bidi class
+ (<([A-Z]*)>)? # decomposition type
+ ((\ ?[0-9A-F]+)*); # decompomposition mapping
+ ([0-9]*); # decimal digit
+ ([0-9]*); # digit
+ ([^;]*); # numeric
+ ([YN]*); # bidi mirrored
+ ([^;]*); # unicode 1.0 name
+ ([^;]*); # iso comment
+ ([0-9A-F]*); # simple uppercase mapping
+ ([0-9A-F]*); # simple lowercase mapping
+ ([0-9A-F]*)$/ix # simple titlecase mapping
+ @code = $1.hex
+ @name = $2
+ @category = $3
+ @combining_class = Integer($4)
+ @bidi_class = $5
+ @decomp_type = $7
+ @decomp_mapping = ($8=='') ? nil :
+ $8.split.collect { |element| element.hex }
+ @bidi_mirrored = ($13=='Y') ? true : false
+ # issue #130: use nonstandard uppercase ß -> ẞ
+ @uppercase_mapping = ($16=='') ? (code==0x00df ? 0x1e9e : nil) : $16.hex
+ @lowercase_mapping = ($17=='') ? nil : $17.hex
+ @titlecase_mapping = ($18=='') ? (code==0x00df ? 0x1e9e : nil) : $18.hex
+ end
+ def case_folding
+ $case_folding[code]
+ end
+ def c_entry(comb_indicies)
+ " " <<
+ "{#{str2c category, 'CATEGORY'}, #{combining_class}, " <<
+ "#{str2c bidi_class, 'BIDI_CLASS'}, " <<
+ "#{str2c decomp_type, 'DECOMP_TYPE'}, " <<
+ "#{c_decomp_mapping}, " <<
+ "#{c_case_folding}, " <<
+ "#{singlecpmap uppercase_mapping }, " <<
+ "#{singlecpmap lowercase_mapping }, " <<
+ "#{singlecpmap titlecase_mapping }, " <<
+ "#{comb_indicies[code] ? comb_indicies[code]: 'UINT16_MAX'}, " <<
+ "#{bidi_mirrored}, " <<
+ "#{$exclusions.include?(code) or $excl_version.include?(code)}, " <<
+ "#{$ignorable.include?(code)}, " <<
+ "#{%W[Zl Zp Cc Cf].include?(category) and not [0x200C, 0x200D].include?(category)}, " <<
+ "#{$charwidth[code]}, 0, " <<
+ "#{$grapheme_boundclass[code]}},\n"
+ end
+end
+
+chars = []
+char_hash = {}
+
+while gets
+ if $_ =~ /^([0-9A-F]+);<[^;>,]+, First>;/i
+ first = $1.hex
+ gets
+ char = UnicodeChar.new($_)
+ raise "No last character of sequence found." unless
+ $_ =~ /^([0-9A-F]+);<([^;>,]+), Last>;/i
+ last = $1.hex
+ name = "<#{$2}>"
+ for i in first..last
+ char_clone = char.clone
+ char_clone.code = i
+ char_clone.name = name
+ char_hash[char_clone.code] = char_clone
+ chars << char_clone
+ end
+ else
+ char = UnicodeChar.new($_)
+ char_hash[char.code] = char
+ chars << char
+ end
+end
+
+comb1st_indicies = {}
+comb2nd_indicies = {}
+comb2nd_indicies_sorted_keys = []
+comb2nd_indicies_nonbasic = {}
+comb_array = []
+
+chars.each do |char|
+ if !char.nil? and char.decomp_type.nil? and char.decomp_mapping and
+ char.decomp_mapping.length == 2 and !char_hash[char.decomp_mapping[0]].nil? and
+ char_hash[char.decomp_mapping[0]].combining_class == 0 and
+ not $exclusions.include?(char.code)
+
+ dm0 = char.decomp_mapping[0]
+ dm1 = char.decomp_mapping[1]
+ unless comb1st_indicies[dm0]
+ comb1st_indicies[dm0] = comb1st_indicies.keys.length
+ end
+ unless comb2nd_indicies[dm1]
+ comb2nd_indicies_sorted_keys << dm1
+ comb2nd_indicies[dm1] = comb2nd_indicies.keys.length
+ end
+ comb_array[comb1st_indicies[dm0]] ||= []
+ raise "Duplicate canonical mapping: #{char.code} #{dm0} #{dm1}" if comb_array[comb1st_indicies[dm0]][comb2nd_indicies[dm1]]
+ comb_array[comb1st_indicies[dm0]][comb2nd_indicies[dm1]] = char.code
+
+ comb2nd_indicies_nonbasic[dm1] = true if char.code > 0xFFFF
+ end
+ char.c_decomp_mapping = cpary2c(char.decomp_mapping)
+ char.c_case_folding = cpary2c(char.case_folding)
+end
+
+comb_indicies = {}
+cumoffset = 0
+comb1st_indicies_lastoffsets = []
+comb1st_indicies_firstoffsets = []
+comb1st_indicies.each do |dm0, index|
+ first = nil
+ last = nil
+ offset = 0
+ comb2nd_indicies_sorted_keys.each_with_index do |dm1, b|
+ if comb_array[index][b]
+ first = offset unless first
+ last = offset
+ last += 1 if comb2nd_indicies_nonbasic[dm1]
+ end
+ offset += 1
+ offset += 1 if comb2nd_indicies_nonbasic[dm1]
+ end
+ comb1st_indicies_firstoffsets[index] = first
+ comb1st_indicies_lastoffsets[index] = last
+ raise "double index" if comb_indicies[dm0]
+ comb_indicies[dm0] = cumoffset
+ cumoffset += last - first + 1 + 2
+end
+
+offset = 0
+comb2nd_indicies_sorted_keys.each do |dm1|
+ raise "double index" if comb_indicies[dm1]
+ comb_indicies[dm1] = 0x8000 | (comb2nd_indicies[dm1] + offset)
+ raise "too large comb index" if comb2nd_indicies[dm1] + offset > 0x4000
+ if comb2nd_indicies_nonbasic[dm1]
+ comb_indicies[dm1] = comb_indicies[dm1] | 0x4000
+ offset += 1
+ end
+end
+
+properties_indicies = {}
+properties = []
+chars.each do |char|
+ c_entry = char.c_entry(comb_indicies)
+ char.c_entry_index = properties_indicies[c_entry]
+ unless char.c_entry_index
+ properties_indicies[c_entry] = properties.length
+ char.c_entry_index = properties.length
+ properties << c_entry
+ end
+end
+
+stage1 = []
+stage2 = []
+for code in 0...0x110000
+ next unless code % 0x100 == 0
+ stage2_entry = []
+ for code2 in code...(code+0x100)
+ if char_hash[code2]
+ stage2_entry << (char_hash[code2].c_entry_index + 1)
+ else
+ stage2_entry << 0
+ end
+ end
+ old_index = stage2.index(stage2_entry)
+ if old_index
+ stage1 << (old_index * 0x100)
+ else
+ stage1 << (stage2.length * 0x100)
+ stage2 << stage2_entry
+ end
+end
+
+$stdout << "static const utf8proc_uint16_t utf8proc_sequences[] = {\n "
+i = 0
+$int_array.each do |entry|
+ i += 1
+ if i == 8
+ i = 0
+ $stdout << "\n "
+ end
+ $stdout << entry << ", "
+end
+$stdout << "};\n\n"
+
+$stdout << "static const utf8proc_uint16_t utf8proc_stage1table[] = {\n "
+i = 0
+stage1.each do |entry|
+ i += 1
+ if i == 8
+ i = 0
+ $stdout << "\n "
+ end
+ $stdout << entry << ", "
+end
+$stdout << "};\n\n"
+
+$stdout << "static const utf8proc_uint16_t utf8proc_stage2table[] = {\n "
+i = 0
+stage2.flatten.each do |entry|
+ i += 1
+ if i == 8
+ i = 0
+ $stdout << "\n "
+ end
+ $stdout << entry << ", "
+end
+$stdout << "};\n\n"
+
+$stdout << "static const utf8proc_property_t utf8proc_properties[] = {\n"
+$stdout << " {0, 0, 0, 0, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, false,false,false,false, 1, 0, UTF8PROC_BOUNDCLASS_OTHER},\n"
+properties.each { |line|
+ $stdout << line
+}
+$stdout << "};\n\n"
+
+
+
+$stdout << "static const utf8proc_uint16_t utf8proc_combinations[] = {\n "
+i = 0
+comb1st_indicies.keys.each_index do |a|
+ offset = 0
+ $stdout << comb1st_indicies_firstoffsets[a] << ", " << comb1st_indicies_lastoffsets[a] << ", "
+ comb2nd_indicies_sorted_keys.each_with_index do |dm1, b|
+ break if offset > comb1st_indicies_lastoffsets[a]
+ if offset >= comb1st_indicies_firstoffsets[a]
+ i += 1
+ if i == 8
+ i = 0
+ $stdout << "\n "
+ end
+ v = comb_array[a][b] ? comb_array[a][b] : 0
+ $stdout << (( v & 0xFFFF0000 ) >> 16) << ", " if comb2nd_indicies_nonbasic[dm1]
+ $stdout << (v & 0xFFFF) << ", "
+ end
+ offset += 1
+ offset += 1 if comb2nd_indicies_nonbasic[dm1]
+ end
+ $stdout << "\n"
+end
+$stdout << "};\n\n"