2014-05-13 13:20:03 +02:00
|
|
|
#! /usr/bin/python
|
|
|
|
|
|
|
|
# Multistage table builder
|
|
|
|
# (c) Peter Kankowski, 2008
|
|
|
|
|
|
|
|
##############################################################################
|
|
|
|
# This script was submitted to the PCRE project by Peter Kankowski as part of
|
|
|
|
# the upgrading of Unicode property support. The new code speeds up property
|
|
|
|
# matching many times. The script is for the use of PCRE maintainers, to
|
2018-07-07 18:10:29 +02:00
|
|
|
# generate the pcre2_ucd.c file that contains a digested form of the Unicode
|
2018-10-07 18:29:51 +02:00
|
|
|
# data tables. A number of extensions have been added to the original script.
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-07-07 18:10:29 +02:00
|
|
|
# The script has now been upgraded to Python 3 for PCRE2, and should be run in
|
2014-06-03 18:26:20 +02:00
|
|
|
# the maint subdirectory, using the command
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2014-06-03 18:26:20 +02:00
|
|
|
# [python3] ./MultiStage2.py >../src/pcre2_ucd.c
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# It requires six Unicode data tables: DerivedGeneralCategory.txt,
|
|
|
|
# GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt,
|
|
|
|
# CaseFolding.txt, and emoji-data.txt. These must be in the
|
|
|
|
# maint/Unicode.tables subdirectory.
|
2018-07-07 18:10:29 +02:00
|
|
|
#
|
|
|
|
# DerivedGeneralCategory.txt is found in the "extracted" subdirectory of the
|
|
|
|
# Unicode database (UCD) on the Unicode web site; GraphemeBreakProperty.txt is
|
2018-10-07 18:29:51 +02:00
|
|
|
# in the "auxiliary" subdirectory. Scripts.txt, ScriptExtensions.txt, and
|
|
|
|
# CaseFolding.txt are directly in the UCD directory. The emoji-data.txt file is
|
|
|
|
# in files associated with Unicode Technical Standard #51 ("Unicode Emoji"),
|
|
|
|
# for example:
|
2018-07-07 18:10:29 +02:00
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# http://unicode.org/Public/emoji/11.0/emoji-data.txt
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# -----------------------------------------------------------------------------
|
2014-05-13 13:20:03 +02:00
|
|
|
# Minor modifications made to this script:
|
|
|
|
# Added #! line at start
|
|
|
|
# Removed tabs
|
|
|
|
# Made it work with Python 2.4 by rewriting two statements that needed 2.5
|
|
|
|
# Consequent code tidy
|
|
|
|
# Adjusted data file names to take from the Unicode.tables directory
|
|
|
|
# Adjusted global table names by prefixing _pcre_.
|
|
|
|
# Commented out stuff relating to the casefolding table, which isn't used;
|
|
|
|
# removed completely in 2012.
|
|
|
|
# Corrected size calculation
|
|
|
|
# Add #ifndef SUPPORT_UCP to use dummy tables when no UCP support is needed.
|
2014-06-20 14:40:32 +02:00
|
|
|
# Update for PCRE2: name changes, and SUPPORT_UCP is abolished.
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
|
|
|
# Major modifications made to this script:
|
|
|
|
# Added code to add a grapheme break property field to records.
|
|
|
|
#
|
|
|
|
# Added code to search for sets of more than two characters that must match
|
|
|
|
# each other caselessly. A new table is output containing these sets, and
|
|
|
|
# offsets into the table are added to the main output records. This new
|
2018-07-07 18:10:29 +02:00
|
|
|
# code scans CaseFolding.txt instead of UnicodeData.txt, which is no longer
|
|
|
|
# used.
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2014-06-03 18:26:20 +02:00
|
|
|
# Update for Python3:
|
|
|
|
# . Processed with 2to3, but that didn't fix everything
|
|
|
|
# . Changed string.strip to str.strip
|
|
|
|
# . Added encoding='utf-8' to the open() call
|
|
|
|
# . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is
|
|
|
|
# required and the result of the division is a float
|
|
|
|
#
|
2018-07-07 18:10:29 +02:00
|
|
|
# Added code to scan the emoji-data.txt file to find the Extended Pictographic
|
|
|
|
# property, which is used by PCRE2 as a grapheme breaking property. This was
|
|
|
|
# done when updating to Unicode 11.0.0 (July 2018).
|
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# Added code to add a Script Extensions field to records. This has increased
|
|
|
|
# their size from 8 to 12 bytes, only 10 of which are currently used.
|
|
|
|
#
|
|
|
|
# 01-March-2010: Updated list of scripts for Unicode 5.2.0
|
|
|
|
# 30-April-2011: Updated list of scripts for Unicode 6.0.0
|
|
|
|
# July-2012: Updated list of scripts for Unicode 6.1.0
|
|
|
|
# 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new
|
|
|
|
# field in the record to hold the value. Luckily, the
|
|
|
|
# structure had a hole in it, so the resulting table is
|
|
|
|
# not much bigger than before.
|
|
|
|
# 18-September-2012: Added code for multiple caseless sets. This uses the
|
|
|
|
# final hole in the structure.
|
|
|
|
# 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0
|
|
|
|
# 13-May-2014: Updated for PCRE2
|
|
|
|
# 03-June-2014: Updated for Python 3
|
|
|
|
# 20-June-2014: Updated for Unicode 7.0.0
|
|
|
|
# 12-August-2014: Updated to put Unicode version into the file
|
|
|
|
# 19-June-2015: Updated for Unicode 8.0.0
|
|
|
|
# 02-July-2017: Updated for Unicode 10.0.0
|
|
|
|
# 03-July-2018: Updated for Unicode 11.0.0
|
|
|
|
# 07-July-2018: Added code to scan emoji-data.txt for the Extended
|
|
|
|
# Pictographic property.
|
|
|
|
# 01-October-2018: Added the 'Unknown' script name
|
|
|
|
# 03-October-2018: Added new field for Script Extensions
|
2019-07-29 17:32:36 +02:00
|
|
|
# 27-July-2019: Updated for Unicode 12.1.0
|
2018-10-07 18:29:51 +02:00
|
|
|
# ----------------------------------------------------------------------------
|
2018-10-06 19:39:52 +02:00
|
|
|
#
|
2018-07-07 18:10:29 +02:00
|
|
|
#
|
2014-05-13 13:20:03 +02:00
|
|
|
# The main tables generated by this script are used by macros defined in
|
2018-07-07 18:10:29 +02:00
|
|
|
# pcre2_internal.h. They look up Unicode character properties using short
|
2014-05-13 13:20:03 +02:00
|
|
|
# sequences of code that contains no branches, which makes for greater speed.
|
|
|
|
#
|
|
|
|
# Conceptually, there is a table of records (of type ucd_record), containing a
|
2018-10-06 19:39:52 +02:00
|
|
|
# script number, script extension value, character type, grapheme break type,
|
|
|
|
# offset to caseless matching set, offset to the character's other case, for
|
2018-10-07 18:29:51 +02:00
|
|
|
# every Unicode character. However, a real table covering all Unicode
|
|
|
|
# characters would be far too big. It can be efficiently compressed by
|
|
|
|
# observing that many characters have the same record, and many blocks of
|
|
|
|
# characters (taking 128 characters in a block) have the same set of records as
|
|
|
|
# other blocks. This leads to a 2-stage lookup process.
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-10-06 19:39:52 +02:00
|
|
|
# This script constructs six tables. The ucd_caseless_sets table contains
|
2014-05-13 13:20:03 +02:00
|
|
|
# lists of characters that all match each other caselessly. Each list is
|
|
|
|
# in order, and is terminated by NOTACHAR (0xffffffff), which is larger than
|
|
|
|
# any valid character. The first list is empty; this is used for characters
|
|
|
|
# that are not part of any list.
|
|
|
|
#
|
2018-10-06 19:39:52 +02:00
|
|
|
# The ucd_digit_sets table contains the code points of the '9' characters in
|
|
|
|
# each set of 10 decimal digits in Unicode. This is used to ensure that digits
|
|
|
|
# in script runs all come from the same set. The first element in the vector
|
|
|
|
# contains the number of subsequent elements, which are in ascending order.
|
|
|
|
#
|
|
|
|
# The ucd_script_sets vector contains lists of script numbers that are the
|
|
|
|
# Script Extensions properties of certain characters. Each list is terminated
|
|
|
|
# by zero (ucp_Unknown). A character with more than one script listed for its
|
|
|
|
# Script Extension property has a negative value in its record. This is the
|
2018-10-07 18:29:51 +02:00
|
|
|
# negated offset to the start of the relevant list in the ucd_script_sets
|
|
|
|
# vector.
|
2018-10-06 19:39:52 +02:00
|
|
|
#
|
2014-05-13 13:20:03 +02:00
|
|
|
# The ucd_records table contains one instance of every unique record that is
|
2018-10-07 18:29:51 +02:00
|
|
|
# required. The ucd_stage1 table is indexed by a character's block number,
|
|
|
|
# which is the character's code point divided by 128, since 128 is the size
|
|
|
|
# of each block. The result of a lookup in ucd_stage1 a "virtual" block number.
|
|
|
|
#
|
|
|
|
# The ucd_stage2 table is a table of "virtual" blocks; each block is indexed by
|
|
|
|
# the offset of a character within its own block, and the result is the index
|
|
|
|
# number of the required record in the ucd_records vector.
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-07-07 18:10:29 +02:00
|
|
|
# The following examples are correct for the Unicode 11.0.0 database. Future
|
|
|
|
# updates may make change the actual lookup values.
|
|
|
|
#
|
2014-05-13 13:20:03 +02:00
|
|
|
# Example: lowercase "a" (U+0061) is in block 0
|
|
|
|
# lookup 0 in stage1 table yields 0
|
2018-10-07 18:29:51 +02:00
|
|
|
# lookup 97 (0x61) in the first table in stage2 yields 17
|
|
|
|
# record 17 is { 34, 5, 12, 0, -32, 34, 0 }
|
|
|
|
# 34 = ucp_Latin => Latin script
|
2014-05-13 13:20:03 +02:00
|
|
|
# 5 = ucp_Ll => Lower case letter
|
2018-07-07 18:10:29 +02:00
|
|
|
# 12 = ucp_gbOther => Grapheme break property "Other"
|
2018-10-07 18:29:51 +02:00
|
|
|
# 0 => Not part of a caseless set
|
|
|
|
# -32 (-0x20) => Other case is U+0041
|
|
|
|
# 34 = ucp_Latin => No special Script Extension property
|
|
|
|
# 0 => Dummy value, unused at present
|
2018-07-07 18:10:29 +02:00
|
|
|
#
|
2014-05-13 13:20:03 +02:00
|
|
|
# Almost all lowercase latin characters resolve to the same record. One or two
|
|
|
|
# are different because they are part of a multi-character caseless set (for
|
|
|
|
# example, k, K and the Kelvin symbol are such a set).
|
|
|
|
#
|
|
|
|
# Example: hiragana letter A (U+3042) is in block 96 (0x60)
|
2018-07-07 18:10:29 +02:00
|
|
|
# lookup 96 in stage1 table yields 90
|
2018-10-07 18:29:51 +02:00
|
|
|
# lookup 66 (0x42) in table 90 in stage2 yields 564
|
|
|
|
# record 564 is { 27, 7, 12, 0, 0, 27, 0 }
|
|
|
|
# 27 = ucp_Hiragana => Hiragana script
|
2014-05-13 13:20:03 +02:00
|
|
|
# 7 = ucp_Lo => Other letter
|
2018-07-07 18:10:29 +02:00
|
|
|
# 12 = ucp_gbOther => Grapheme break property "Other"
|
2018-10-07 18:29:51 +02:00
|
|
|
# 0 => Not part of a caseless set
|
2018-07-07 18:10:29 +02:00
|
|
|
# 0 => No other case
|
2018-10-07 18:29:51 +02:00
|
|
|
# 27 = ucp_Hiragana => No special Script Extension property
|
|
|
|
# 0 => Dummy value, unused at present
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# Example: vedic tone karshana (U+1CD0) is in block 57 (0x39)
|
|
|
|
# lookup 57 in stage1 table yields 55
|
|
|
|
# lookup 80 (0x50) in table 55 in stage2 yields 458
|
|
|
|
# record 458 is { 28, 12, 3, 0, 0, -101, 0 }
|
|
|
|
# 28 = ucp_Inherited => Script inherited from predecessor
|
|
|
|
# 12 = ucp_Mn => Non-spacing mark
|
|
|
|
# 3 = ucp_gbExtend => Grapheme break property "Extend"
|
|
|
|
# 0 => Not part of a caseless set
|
|
|
|
# 0 => No other case
|
|
|
|
# -101 => Script Extension list offset = 101
|
|
|
|
# 0 => Dummy value, unused at present
|
2018-07-07 18:10:29 +02:00
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# At offset 101 in the ucd_script_sets vector we find the list 3, 15, 107, 29,
|
|
|
|
# and terminator 0. This means that this character is expected to be used with
|
|
|
|
# any of those scripts, which are Bengali, Devanagari, Grantha, and Kannada.
|
2014-05-13 13:20:03 +02:00
|
|
|
#
|
2018-10-07 18:29:51 +02:00
|
|
|
# Philip Hazel, 03 July 2008
|
|
|
|
# Last Updated: 07 October 2018
|
2014-05-13 13:20:03 +02:00
|
|
|
##############################################################################
|
|
|
|
|
|
|
|
|
|
|
|
import re
|
|
|
|
import string
|
|
|
|
import sys
|
|
|
|
|
|
|
|
MAX_UNICODE = 0x110000
|
|
|
|
NOTACHAR = 0xffffffff
|
|
|
|
|
2014-08-12 11:48:56 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
# Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt
|
|
|
|
def make_get_names(enum):
|
|
|
|
return lambda chardata: enum.index(chardata[1])
|
|
|
|
|
|
|
|
# Parse a line of CaseFolding.txt
|
|
|
|
def get_other_case(chardata):
|
|
|
|
if chardata[1] == 'C' or chardata[1] == 'S':
|
|
|
|
return int(chardata[2], 16) - int(chardata[0], 16)
|
|
|
|
return 0
|
2018-10-07 18:29:51 +02:00
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
# Parse a line of ScriptExtensions.txt
|
|
|
|
def get_script_extension(chardata):
|
|
|
|
this_script_list = list(chardata[1].split(' '))
|
|
|
|
if len(this_script_list) == 1:
|
|
|
|
return script_abbrevs.index(this_script_list[0])
|
2018-10-07 18:29:51 +02:00
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
script_numbers = []
|
|
|
|
for d in this_script_list:
|
|
|
|
script_numbers.append(script_abbrevs.index(d))
|
|
|
|
script_numbers.append(0)
|
|
|
|
script_numbers_length = len(script_numbers)
|
|
|
|
|
|
|
|
for i in range(1, len(script_lists) - script_numbers_length + 1):
|
|
|
|
for j in range(0, script_numbers_length):
|
2018-10-07 18:29:51 +02:00
|
|
|
found = True
|
2018-10-06 19:39:52 +02:00
|
|
|
if script_lists[i+j] != script_numbers[j]:
|
2018-10-07 18:29:51 +02:00
|
|
|
found = False
|
2018-10-06 19:39:52 +02:00
|
|
|
break
|
|
|
|
if found:
|
|
|
|
return -i
|
2018-10-07 18:29:51 +02:00
|
|
|
|
|
|
|
# Not found in existing lists
|
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
return_value = len(script_lists)
|
|
|
|
script_lists.extend(script_numbers)
|
2018-10-07 18:29:51 +02:00
|
|
|
return -return_value
|
2014-05-13 13:20:03 +02:00
|
|
|
|
2014-08-12 11:48:56 +02:00
|
|
|
# Read the whole table in memory, setting/checking the Unicode version
|
2014-05-13 13:20:03 +02:00
|
|
|
def read_table(file_name, get_value, default_value):
|
2014-08-12 11:48:56 +02:00
|
|
|
global unicode_version
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-08-12 11:48:56 +02:00
|
|
|
f = re.match(r'^[^/]+/([^.]+)\.txt$', file_name)
|
|
|
|
file_base = f.group(1)
|
|
|
|
version_pat = r"^# " + re.escape(file_base) + r"-(\d+\.\d+\.\d+)\.txt$"
|
2014-06-03 18:26:20 +02:00
|
|
|
file = open(file_name, 'r', encoding='utf-8')
|
2014-08-12 11:48:56 +02:00
|
|
|
f = re.match(version_pat, file.readline())
|
|
|
|
version = f.group(1)
|
|
|
|
if unicode_version == "":
|
|
|
|
unicode_version = version
|
|
|
|
elif unicode_version != version:
|
|
|
|
print("WARNING: Unicode version differs in %s", file_name, file=sys.stderr)
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
table = [default_value] * MAX_UNICODE
|
|
|
|
for line in file:
|
|
|
|
line = re.sub(r'#.*', '', line)
|
2014-06-03 18:26:20 +02:00
|
|
|
chardata = list(map(str.strip, line.split(';')))
|
2014-05-13 13:20:03 +02:00
|
|
|
if len(chardata) <= 1:
|
|
|
|
continue
|
|
|
|
value = get_value(chardata)
|
|
|
|
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
|
|
|
|
char = int(m.group(1), 16)
|
|
|
|
if m.group(3) is None:
|
|
|
|
last = char
|
|
|
|
else:
|
2018-07-07 18:10:29 +02:00
|
|
|
last = int(m.group(3), 16)
|
2014-05-13 13:20:03 +02:00
|
|
|
for i in range(char, last + 1):
|
|
|
|
# It is important not to overwrite a previously set
|
|
|
|
# value because in the CaseFolding file there are lines
|
2018-07-07 18:10:29 +02:00
|
|
|
# to be ignored (returning the default value of 0)
|
|
|
|
# which often come after a line which has already set
|
|
|
|
# data.
|
|
|
|
if table[i] == default_value:
|
2014-05-13 13:20:03 +02:00
|
|
|
table[i] = value
|
|
|
|
file.close()
|
|
|
|
return table
|
|
|
|
|
|
|
|
# Get the smallest possible C language type for the values
|
|
|
|
def get_type_size(table):
|
|
|
|
type_size = [("uint8_t", 1), ("uint16_t", 2), ("uint32_t", 4),
|
|
|
|
("signed char", 1), ("pcre_int16", 2), ("pcre_int32", 4)]
|
|
|
|
limits = [(0, 255), (0, 65535), (0, 4294967295),
|
|
|
|
(-128, 127), (-32768, 32767), (-2147483648, 2147483647)]
|
|
|
|
minval = min(table)
|
|
|
|
maxval = max(table)
|
|
|
|
for num, (minlimit, maxlimit) in enumerate(limits):
|
|
|
|
if minlimit <= minval and maxval <= maxlimit:
|
|
|
|
return type_size[num]
|
|
|
|
else:
|
2014-06-03 18:26:20 +02:00
|
|
|
raise OverflowError("Too large to fit into C types")
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
def get_tables_size(*tables):
|
|
|
|
total_size = 0
|
|
|
|
for table in tables:
|
|
|
|
type, size = get_type_size(table)
|
|
|
|
total_size += size * len(table)
|
|
|
|
return total_size
|
|
|
|
|
|
|
|
# Compress the table into the two stages
|
|
|
|
def compress_table(table, block_size):
|
|
|
|
blocks = {} # Dictionary for finding identical blocks
|
|
|
|
stage1 = [] # Stage 1 table contains block numbers (indices into stage 2 table)
|
|
|
|
stage2 = [] # Stage 2 table contains the blocks with property values
|
|
|
|
table = tuple(table)
|
|
|
|
for i in range(0, len(table), block_size):
|
|
|
|
block = table[i:i+block_size]
|
|
|
|
start = blocks.get(block)
|
|
|
|
if start is None:
|
|
|
|
# Allocate a new block
|
|
|
|
start = len(stage2) / block_size
|
|
|
|
stage2 += block
|
|
|
|
blocks[block] = start
|
|
|
|
stage1.append(start)
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
return stage1, stage2
|
|
|
|
|
|
|
|
# Print a table
|
|
|
|
def print_table(table, table_name, block_size = None):
|
|
|
|
type, size = get_type_size(table)
|
|
|
|
ELEMS_PER_LINE = 16
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
s = "const %s %s[] = { /* %d bytes" % (type, table_name, size * len(table))
|
|
|
|
if block_size:
|
|
|
|
s += ", block = %d" % block_size
|
2014-06-03 18:26:20 +02:00
|
|
|
print(s + " */")
|
2014-05-13 13:20:03 +02:00
|
|
|
table = tuple(table)
|
|
|
|
if block_size is None:
|
|
|
|
fmt = "%3d," * ELEMS_PER_LINE + " /* U+%04X */"
|
|
|
|
mult = MAX_UNICODE / len(table)
|
|
|
|
for i in range(0, len(table), ELEMS_PER_LINE):
|
2018-07-07 18:10:29 +02:00
|
|
|
print(fmt % (table[i:i+ELEMS_PER_LINE] +
|
2017-02-24 19:25:32 +01:00
|
|
|
(int(i * mult),)))
|
2014-05-13 13:20:03 +02:00
|
|
|
else:
|
|
|
|
if block_size > ELEMS_PER_LINE:
|
|
|
|
el = ELEMS_PER_LINE
|
|
|
|
else:
|
|
|
|
el = block_size
|
|
|
|
fmt = "%3d," * el + "\n"
|
|
|
|
if block_size > ELEMS_PER_LINE:
|
2014-06-03 18:26:20 +02:00
|
|
|
fmt = fmt * int(block_size / ELEMS_PER_LINE)
|
2014-05-13 13:20:03 +02:00
|
|
|
for i in range(0, len(table), block_size):
|
2014-06-03 18:26:20 +02:00
|
|
|
print(("/* block %d */\n" + fmt) % ((i / block_size,) + table[i:i+block_size]))
|
|
|
|
print("};\n")
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
# Extract the unique combinations of properties into records
|
|
|
|
def combine_tables(*tables):
|
|
|
|
records = {}
|
|
|
|
index = []
|
|
|
|
for t in zip(*tables):
|
|
|
|
i = records.get(t)
|
|
|
|
if i is None:
|
|
|
|
i = records[t] = len(records)
|
|
|
|
index.append(i)
|
|
|
|
return index, records
|
|
|
|
|
|
|
|
def get_record_size_struct(records):
|
|
|
|
size = 0
|
|
|
|
structure = '/* When recompiling tables with a new Unicode version, please check the\n' + \
|
|
|
|
'types in this structure definition from pcre2_internal.h (the actual\n' + \
|
|
|
|
'field names will be different):\n\ntypedef struct {\n'
|
|
|
|
for i in range(len(records[0])):
|
2014-06-03 18:26:20 +02:00
|
|
|
record_slice = [record[i] for record in records]
|
2014-05-13 13:20:03 +02:00
|
|
|
slice_type, slice_size = get_type_size(record_slice)
|
|
|
|
# add padding: round up to the nearest power of slice_size
|
|
|
|
size = (size + slice_size - 1) & -slice_size
|
|
|
|
size += slice_size
|
|
|
|
structure += '%s property_%d;\n' % (slice_type, i)
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
# round up to the first item of the next structure in array
|
2014-06-03 18:26:20 +02:00
|
|
|
record_slice = [record[0] for record in records]
|
2014-05-13 13:20:03 +02:00
|
|
|
slice_type, slice_size = get_type_size(record_slice)
|
|
|
|
size = (size + slice_size - 1) & -slice_size
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2018-10-02 17:25:58 +02:00
|
|
|
structure += '} ucd_record;\n*/\n'
|
2014-05-13 13:20:03 +02:00
|
|
|
return size, structure
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
def test_record_size():
|
|
|
|
tests = [ \
|
|
|
|
( [(3,), (6,), (6,), (1,)], 1 ), \
|
|
|
|
( [(300,), (600,), (600,), (100,)], 2 ), \
|
|
|
|
( [(25, 3), (6, 6), (34, 6), (68, 1)], 2 ), \
|
|
|
|
( [(300, 3), (6, 6), (340, 6), (690, 1)], 4 ), \
|
|
|
|
( [(3, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
|
|
|
|
( [(300, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
|
|
|
|
( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ), \
|
|
|
|
( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ), \
|
|
|
|
]
|
|
|
|
for test in tests:
|
|
|
|
size, struct = get_record_size_struct(test[0])
|
|
|
|
assert(size == test[1])
|
|
|
|
#print struct
|
|
|
|
|
|
|
|
def print_records(records, record_size):
|
2014-06-03 18:26:20 +02:00
|
|
|
print('const ucd_record PRIV(ucd_records)[] = { ' + \
|
|
|
|
'/* %d bytes, record size %d */' % (len(records) * record_size, record_size))
|
|
|
|
|
|
|
|
records = list(zip(list(records.keys()), list(records.values())))
|
|
|
|
records.sort(key = lambda x: x[1])
|
2014-05-13 13:20:03 +02:00
|
|
|
for i, record in enumerate(records):
|
2014-06-03 18:26:20 +02:00
|
|
|
print((' {' + '%6d, ' * len(record[0]) + '}, /* %3d */') % (record[0] + (i,)))
|
|
|
|
print('};\n')
|
2014-05-13 13:20:03 +02:00
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal',
|
|
|
|
'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian',
|
|
|
|
'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana',
|
|
|
|
'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam',
|
|
|
|
'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic',
|
|
|
|
'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana',
|
|
|
|
'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi',
|
2014-05-13 13:20:03 +02:00
|
|
|
# New for Unicode 5.0
|
2018-10-06 19:39:52 +02:00
|
|
|
'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician',
|
2014-05-13 13:20:03 +02:00
|
|
|
# New for Unicode 5.1
|
2018-10-06 19:39:52 +02:00
|
|
|
'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai',
|
2014-05-13 13:20:03 +02:00
|
|
|
# New for Unicode 5.2
|
2018-10-06 19:39:52 +02:00
|
|
|
'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic',
|
|
|
|
'Inscriptional_Pahlavi', 'Inscriptional_Parthian',
|
|
|
|
'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek',
|
|
|
|
'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet',
|
2014-05-13 13:20:03 +02:00
|
|
|
# New for Unicode 6.0.0
|
2018-10-06 19:39:52 +02:00
|
|
|
'Batak', 'Brahmi', 'Mandaic',
|
2014-05-13 13:20:03 +02:00
|
|
|
# New for Unicode 6.1.0
|
2014-06-20 14:40:32 +02:00
|
|
|
'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri',
|
|
|
|
# New for Unicode 7.0.0
|
|
|
|
'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi',
|
|
|
|
'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean',
|
|
|
|
'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi',
|
2015-07-17 17:44:51 +02:00
|
|
|
'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi',
|
|
|
|
# New for Unicode 8.0.0
|
|
|
|
'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian',
|
2017-07-02 18:32:01 +02:00
|
|
|
'SignWriting',
|
|
|
|
# New for Unicode 10.0.0
|
|
|
|
'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi',
|
2018-07-07 18:10:29 +02:00
|
|
|
'Nushu', 'Soyombo', 'Zanabazar_Square',
|
|
|
|
# New for Unicode 11.0.0
|
|
|
|
'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin',
|
2019-07-29 17:32:36 +02:00
|
|
|
'Old_Sogdian', 'Sogdian',
|
|
|
|
# New for Unicode 12.0.0
|
|
|
|
'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho'
|
2014-05-13 13:20:03 +02:00
|
|
|
]
|
2018-10-07 18:29:51 +02:00
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
script_abbrevs = [
|
|
|
|
'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans',
|
|
|
|
'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor',
|
|
|
|
'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr',
|
|
|
|
'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb',
|
|
|
|
'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya',
|
|
|
|
'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale',
|
|
|
|
'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii',
|
|
|
|
#New for Unicode 5.0
|
|
|
|
'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx',
|
|
|
|
#New for Unicode 5.1
|
|
|
|
'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur',
|
|
|
|
'Sund', 'Vaii',
|
|
|
|
#New for Unicode 5.2
|
|
|
|
'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu',
|
|
|
|
'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt',
|
|
|
|
#New for Unicode 6.0.0
|
|
|
|
'Batk', 'Brah', 'Mand',
|
|
|
|
#New for Unicode 6.1.0
|
|
|
|
'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr',
|
|
|
|
#New for Unicode 7.0.0
|
|
|
|
'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj',
|
|
|
|
'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm',
|
|
|
|
'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara',
|
|
|
|
#New for Unicode 8.0.0
|
|
|
|
'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw',
|
|
|
|
#New for Unicode 10.0.0
|
|
|
|
'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo',
|
|
|
|
'Zanb',
|
|
|
|
#New for Unicode 11.0.0
|
2019-07-29 17:32:36 +02:00
|
|
|
'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd',
|
|
|
|
#New for Unicode 12.0.0
|
|
|
|
'Elym', 'Nand', 'Hmnp', 'Wcho'
|
2018-10-07 18:29:51 +02:00
|
|
|
]
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
|
|
|
|
'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',
|
|
|
|
'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]
|
|
|
|
|
2018-07-07 18:10:29 +02:00
|
|
|
# The Extended_Pictographic property is not found in the file where all the
|
|
|
|
# others are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt
|
|
|
|
# file, but we list it here so that the name has the correct index value.
|
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend',
|
2017-07-02 18:32:01 +02:00
|
|
|
'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other',
|
2018-07-07 18:10:29 +02:00
|
|
|
'ZWJ', 'Extended_Pictographic' ]
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
test_record_size()
|
2014-08-12 11:48:56 +02:00
|
|
|
unicode_version = ""
|
2014-05-13 13:20:03 +02:00
|
|
|
|
2018-10-02 17:25:58 +02:00
|
|
|
script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown'))
|
2014-05-13 13:20:03 +02:00
|
|
|
category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))
|
|
|
|
break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other'))
|
|
|
|
other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0)
|
|
|
|
|
2018-07-07 18:10:29 +02:00
|
|
|
# The grapheme breaking rules were changed for Unicode 11.0.0 (June 2018). Now
|
|
|
|
# we need to find the Extended_Pictographic property for emoji characters. This
|
|
|
|
# can be set as an additional grapheme break property, because the default for
|
|
|
|
# all the emojis is "other". We scan the emoji-data.txt file and modify the
|
|
|
|
# break-props table.
|
|
|
|
|
|
|
|
file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8')
|
|
|
|
for line in file:
|
|
|
|
line = re.sub(r'#.*', '', line)
|
|
|
|
chardata = list(map(str.strip, line.split(';')))
|
|
|
|
if len(chardata) <= 1:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if chardata[1] != "Extended_Pictographic":
|
|
|
|
continue
|
|
|
|
|
|
|
|
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
|
|
|
|
char = int(m.group(1), 16)
|
|
|
|
if m.group(3) is None:
|
|
|
|
last = char
|
|
|
|
else:
|
|
|
|
last = int(m.group(3), 16)
|
|
|
|
for i in range(char, last + 1):
|
|
|
|
if break_props[i] != break_property_names.index('Other'):
|
|
|
|
print("WARNING: Emoji 0x%x has break property %s, not 'Other'",
|
|
|
|
i, break_property_names[break_props[i]], file=sys.stderr)
|
|
|
|
break_props[i] = break_property_names.index('Extended_Pictographic')
|
|
|
|
file.close()
|
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
# The Script Extensions property default value is the Script value. Parse the
|
|
|
|
# file, setting 'Unknown' as the default (this will never be a Script Extension
|
|
|
|
# value), then scan it and fill in the default from Scripts. Code added by PH
|
|
|
|
# in October 2018. Positive values are used for just a single script for a
|
|
|
|
# code point. Negative values are negated offsets in a list of lists of
|
|
|
|
# multiple scripts. Initialize this list with a single entry, as the zeroth
|
|
|
|
# element is never used.
|
|
|
|
|
|
|
|
script_lists = [0]
|
|
|
|
script_abbrevs_default = script_abbrevs.index('Zzzz')
|
|
|
|
scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default)
|
|
|
|
|
|
|
|
for i in range(0, MAX_UNICODE):
|
|
|
|
if scriptx[i] == script_abbrevs_default:
|
2018-10-07 18:29:51 +02:00
|
|
|
scriptx[i] = script[i]
|
2018-10-06 19:39:52 +02:00
|
|
|
|
2018-10-07 18:29:51 +02:00
|
|
|
# With the addition of the new Script Extensions field, we need some padding
|
|
|
|
# to get the Unicode records up to 12 bytes (multiple of 4). Set a value
|
2018-10-06 19:39:52 +02:00
|
|
|
# greater than 255 to make the field 16 bits.
|
|
|
|
|
|
|
|
padding_dummy = [0] * MAX_UNICODE
|
|
|
|
padding_dummy[0] = 256
|
2018-07-07 18:10:29 +02:00
|
|
|
|
|
|
|
# This block of code was added by PH in September 2012. I am not a Python
|
|
|
|
# programmer, so the style is probably dreadful, but it does the job. It scans
|
|
|
|
# the other_case table to find sets of more than two characters that must all
|
|
|
|
# match each other caselessly. Later in this script a table of these sets is
|
|
|
|
# written out. However, we have to do this work here in order to compute the
|
2014-05-13 13:20:03 +02:00
|
|
|
# offsets in the table that are inserted into the main table.
|
|
|
|
|
|
|
|
# The CaseFolding.txt file lists pairs, but the common logic for reading data
|
2018-07-07 18:10:29 +02:00
|
|
|
# sets only one value, so first we go through the table and set "return"
|
2014-05-13 13:20:03 +02:00
|
|
|
# offsets for those that are not already set.
|
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
for c in range(MAX_UNICODE):
|
2014-05-13 13:20:03 +02:00
|
|
|
if other_case[c] != 0 and other_case[c + other_case[c]] == 0:
|
2018-07-07 18:10:29 +02:00
|
|
|
other_case[c + other_case[c]] = -other_case[c]
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
# Now scan again and create equivalence sets.
|
|
|
|
|
|
|
|
sets = []
|
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
for c in range(MAX_UNICODE):
|
2014-05-13 13:20:03 +02:00
|
|
|
o = c + other_case[c]
|
|
|
|
|
|
|
|
# Trigger when this character's other case does not point back here. We
|
2018-07-07 18:10:29 +02:00
|
|
|
# now have three characters that are case-equivalent.
|
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
if other_case[o] != -other_case[c]:
|
|
|
|
t = o + other_case[o]
|
2018-07-07 18:10:29 +02:00
|
|
|
|
|
|
|
# Scan the existing sets to see if any of the three characters are already
|
2014-05-13 13:20:03 +02:00
|
|
|
# part of a set. If so, unite the existing set with the new set.
|
2018-07-07 18:10:29 +02:00
|
|
|
|
|
|
|
appended = 0
|
2014-05-13 13:20:03 +02:00
|
|
|
for s in sets:
|
2018-07-07 18:10:29 +02:00
|
|
|
found = 0
|
2014-05-13 13:20:03 +02:00
|
|
|
for x in s:
|
|
|
|
if x == c or x == o or x == t:
|
|
|
|
found = 1
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
# Add new characters to an existing set
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
if found:
|
2018-07-07 18:10:29 +02:00
|
|
|
found = 0
|
2014-05-13 13:20:03 +02:00
|
|
|
for y in [c, o, t]:
|
|
|
|
for x in s:
|
|
|
|
if x == y:
|
|
|
|
found = 1
|
|
|
|
if not found:
|
|
|
|
s.append(y)
|
|
|
|
appended = 1
|
2018-07-07 18:10:29 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
# If we have not added to an existing set, create a new one.
|
|
|
|
|
2018-07-07 18:10:29 +02:00
|
|
|
if not appended:
|
2014-05-13 13:20:03 +02:00
|
|
|
sets.append([c, o, t])
|
|
|
|
|
|
|
|
# End of loop looking for caseless sets.
|
|
|
|
|
|
|
|
# Now scan the sets and set appropriate offsets for the characters.
|
|
|
|
|
|
|
|
caseless_offsets = [0] * MAX_UNICODE
|
|
|
|
|
|
|
|
offset = 1;
|
|
|
|
for s in sets:
|
2018-07-07 18:10:29 +02:00
|
|
|
for x in s:
|
2014-05-13 13:20:03 +02:00
|
|
|
caseless_offsets[x] = offset
|
|
|
|
offset += len(s) + 1
|
|
|
|
|
|
|
|
# End of block of code for creating offsets for caseless matching sets.
|
|
|
|
|
|
|
|
|
|
|
|
# Combine the tables
|
|
|
|
|
2018-07-07 18:10:29 +02:00
|
|
|
table, records = combine_tables(script, category, break_props,
|
2018-10-06 19:39:52 +02:00
|
|
|
caseless_offsets, other_case, scriptx, padding_dummy)
|
2014-05-13 13:20:03 +02:00
|
|
|
|
2014-06-03 18:26:20 +02:00
|
|
|
record_size, record_struct = get_record_size_struct(list(records.keys()))
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
# Find the optimum block size for the two-stage table
|
2014-06-03 18:26:20 +02:00
|
|
|
min_size = sys.maxsize
|
2014-05-13 13:20:03 +02:00
|
|
|
for block_size in [2 ** i for i in range(5,10)]:
|
|
|
|
size = len(records) * record_size
|
|
|
|
stage1, stage2 = compress_table(table, block_size)
|
|
|
|
size += get_tables_size(stage1, stage2)
|
|
|
|
#print "/* block size %5d => %5d bytes */" % (block_size, size)
|
|
|
|
if size < min_size:
|
|
|
|
min_size = size
|
|
|
|
min_stage1, min_stage2 = stage1, stage2
|
|
|
|
min_block_size = block_size
|
|
|
|
|
2014-06-03 18:26:20 +02:00
|
|
|
print("/* This module is generated by the maint/MultiStage2.py script.")
|
|
|
|
print("Do not modify it by hand. Instead modify the script and run it")
|
|
|
|
print("to regenerate this code.")
|
|
|
|
print()
|
|
|
|
print("As well as being part of the PCRE2 library, this module is #included")
|
|
|
|
print("by the pcre2test program, which redefines the PRIV macro to change")
|
|
|
|
print("table names from _pcre2_xxx to xxxx, thereby avoiding name clashes")
|
|
|
|
print("with the library. At present, just one of these tables is actually")
|
|
|
|
print("needed. */")
|
|
|
|
print()
|
2014-06-20 14:40:32 +02:00
|
|
|
print("#ifndef PCRE2_PCRE2TEST")
|
2014-06-03 18:26:20 +02:00
|
|
|
print()
|
|
|
|
print("#ifdef HAVE_CONFIG_H")
|
|
|
|
print("#include \"config.h\"")
|
|
|
|
print("#endif")
|
|
|
|
print()
|
|
|
|
print("#include \"pcre2_internal.h\"")
|
|
|
|
print()
|
2014-06-20 14:40:32 +02:00
|
|
|
print("#endif /* PCRE2_PCRE2TEST */")
|
2014-06-03 18:26:20 +02:00
|
|
|
print()
|
|
|
|
print("/* Unicode character database. */")
|
|
|
|
print("/* This file was autogenerated by the MultiStage2.py script. */")
|
|
|
|
print("/* Total size: %d bytes, block size: %d. */" % (min_size, min_block_size))
|
|
|
|
print()
|
|
|
|
print("/* The tables herein are needed only when UCP support is built,")
|
2018-07-07 18:10:29 +02:00
|
|
|
print("and in PCRE2 that happens automatically with UTF support.")
|
2014-06-03 18:26:20 +02:00
|
|
|
print("This module should not be referenced otherwise, so")
|
|
|
|
print("it should not matter whether it is compiled or not. However")
|
|
|
|
print("a comment was received about space saving - maybe the guy linked")
|
|
|
|
print("all the modules rather than using a library - so we include a")
|
|
|
|
print("condition to cut out the tables when not needed. But don't leave")
|
|
|
|
print("a totally empty module because some compilers barf at that.")
|
2018-10-06 19:39:52 +02:00
|
|
|
print("Instead, just supply some small dummy tables. */")
|
2014-06-03 18:26:20 +02:00
|
|
|
print()
|
2014-09-19 09:43:39 +02:00
|
|
|
print("#ifndef SUPPORT_UNICODE")
|
2019-02-03 15:49:39 +01:00
|
|
|
print("const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};")
|
2018-07-07 18:10:29 +02:00
|
|
|
print("const uint16_t PRIV(ucd_stage1)[] = {0};")
|
2014-06-03 18:26:20 +02:00
|
|
|
print("const uint16_t PRIV(ucd_stage2)[] = {0};")
|
|
|
|
print("const uint32_t PRIV(ucd_caseless_sets)[] = {0};")
|
|
|
|
print("#else")
|
|
|
|
print()
|
2014-08-12 11:48:56 +02:00
|
|
|
print("const char *PRIV(unicode_version) = \"{}\";".format(unicode_version))
|
|
|
|
print()
|
2017-02-24 19:25:32 +01:00
|
|
|
print("/* If the 32-bit library is run in non-32-bit mode, character values")
|
|
|
|
print("greater than 0x10ffff may be encountered. For these we set up a")
|
|
|
|
print("special record. */")
|
|
|
|
print()
|
|
|
|
print("#if PCRE2_CODE_UNIT_WIDTH == 32")
|
|
|
|
print("const ucd_record PRIV(dummy_ucd_record)[] = {{")
|
2018-10-02 17:25:58 +02:00
|
|
|
print(" ucp_Unknown, /* script */")
|
|
|
|
print(" ucp_Cn, /* type unassigned */")
|
|
|
|
print(" ucp_gbOther, /* grapheme break property */")
|
|
|
|
print(" 0, /* case set */")
|
|
|
|
print(" 0, /* other case */")
|
2018-10-06 19:39:52 +02:00
|
|
|
print(" ucp_Unknown, /* script extension */")
|
|
|
|
print(" 0, /* dummy filler */")
|
2017-02-24 19:25:32 +01:00
|
|
|
print(" }};")
|
|
|
|
print("#endif")
|
|
|
|
print()
|
2014-06-03 18:26:20 +02:00
|
|
|
print(record_struct)
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
# --- Added by PH: output the table of caseless character sets ---
|
|
|
|
|
2018-10-02 17:25:58 +02:00
|
|
|
print("/* This table contains lists of characters that are caseless sets of")
|
|
|
|
print("more than one character. Each list is terminated by NOTACHAR. */\n")
|
|
|
|
|
2014-06-03 18:26:20 +02:00
|
|
|
print("const uint32_t PRIV(ucd_caseless_sets)[] = {")
|
|
|
|
print(" NOTACHAR,")
|
2014-05-13 13:20:03 +02:00
|
|
|
for s in sets:
|
|
|
|
s = sorted(s)
|
|
|
|
for x in s:
|
2014-06-03 18:26:20 +02:00
|
|
|
print(' 0x%04x,' % x, end=' ')
|
2018-07-07 18:10:29 +02:00
|
|
|
print(' NOTACHAR,')
|
2014-06-03 18:26:20 +02:00
|
|
|
print('};')
|
|
|
|
print()
|
2014-05-13 13:20:03 +02:00
|
|
|
|
|
|
|
# ------
|
|
|
|
|
2018-10-02 17:25:58 +02:00
|
|
|
print("/* When #included in pcre2test, we don't need the table of digit")
|
|
|
|
print("sets, nor the the large main UCD tables. */")
|
2014-06-03 18:26:20 +02:00
|
|
|
print()
|
2014-06-20 14:40:32 +02:00
|
|
|
print("#ifndef PCRE2_PCRE2TEST")
|
2014-06-03 18:26:20 +02:00
|
|
|
print()
|
2018-10-02 17:25:58 +02:00
|
|
|
|
|
|
|
# --- Added by PH: read Scripts.txt again for the sets of 10 digits. ---
|
|
|
|
|
|
|
|
digitsets = []
|
|
|
|
file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8')
|
|
|
|
|
|
|
|
for line in file:
|
|
|
|
m = re.match(r'([0-9a-fA-F]+)\.\.([0-9a-fA-F]+)\s+;\s+\S+\s+#\s+Nd\s+', line)
|
|
|
|
if m is None:
|
|
|
|
continue
|
2018-10-07 18:29:51 +02:00
|
|
|
first = int(m.group(1),16)
|
|
|
|
last = int(m.group(2),16)
|
2018-10-02 17:25:58 +02:00
|
|
|
if ((last - first + 1) % 10) != 0:
|
|
|
|
print("ERROR: %04x..%04x does not contain a multiple of 10 characters" % (first, last),
|
2018-10-07 18:29:51 +02:00
|
|
|
file=sys.stderr)
|
2018-10-02 17:25:58 +02:00
|
|
|
while first < last:
|
|
|
|
digitsets.append(first + 9)
|
|
|
|
first += 10
|
|
|
|
file.close()
|
|
|
|
digitsets.sort()
|
|
|
|
|
|
|
|
print("/* This table lists the code points for the '9' characters in each")
|
|
|
|
print("set of decimal digits. It is used to ensure that all the digits in")
|
2018-10-06 19:39:52 +02:00
|
|
|
print("a script run come from the same set. */\n")
|
2018-10-02 17:25:58 +02:00
|
|
|
print("const uint32_t PRIV(ucd_digit_sets)[] = {")
|
|
|
|
|
|
|
|
print(" %d, /* Number of subsequent values */" % len(digitsets), end='')
|
|
|
|
count = 8
|
|
|
|
for d in digitsets:
|
|
|
|
if count == 8:
|
|
|
|
print("\n ", end='')
|
|
|
|
count = 0
|
|
|
|
print(" 0x%05x," % d, end='')
|
|
|
|
count += 1
|
2018-10-06 19:39:52 +02:00
|
|
|
print("\n};\n")
|
|
|
|
|
|
|
|
print("/* This vector is a list of lists of scripts for the Script Extension")
|
|
|
|
print("property. Each sublist is zero-terminated. */\n")
|
|
|
|
print("const uint8_t PRIV(ucd_script_sets)[] = {")
|
|
|
|
|
|
|
|
count = 0
|
|
|
|
print(" /* 0 */", end='')
|
|
|
|
for d in script_lists:
|
|
|
|
print(" %3d," % d, end='')
|
2018-10-07 18:29:51 +02:00
|
|
|
count += 1
|
2018-10-06 19:39:52 +02:00
|
|
|
if d == 0:
|
2018-10-07 18:29:51 +02:00
|
|
|
print("\n /* %3d */" % count, end='')
|
2018-10-06 19:39:52 +02:00
|
|
|
print("\n};\n")
|
2018-10-02 17:25:58 +02:00
|
|
|
|
|
|
|
# Output the main UCD tables.
|
|
|
|
|
2018-10-06 19:39:52 +02:00
|
|
|
print("/* These are the main two-stage UCD tables. The fields in each record are:")
|
|
|
|
print("script (8 bits), character type (8 bits), grapheme break property (8 bits),")
|
|
|
|
print("offset to multichar other cases or zero (8 bits), offset to other case")
|
|
|
|
print("or zero (32 bits, signed), script extension (16 bits, signed), and a dummy")
|
|
|
|
print("16-bit field to make the whole thing a multiple of 4 bytes. */\n")
|
2018-10-02 17:25:58 +02:00
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
print_records(records, record_size)
|
|
|
|
print_table(min_stage1, 'PRIV(ucd_stage1)')
|
|
|
|
print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size)
|
2014-06-03 18:26:20 +02:00
|
|
|
print("#if UCD_BLOCK_SIZE != %d" % min_block_size)
|
|
|
|
print("#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h")
|
|
|
|
print("#endif")
|
2014-09-19 09:43:39 +02:00
|
|
|
print("#endif /* SUPPORT_UNICODE */")
|
2014-06-03 18:26:20 +02:00
|
|
|
print()
|
2014-06-20 14:40:32 +02:00
|
|
|
print("#endif /* PCRE2_PCRE2TEST */")
|
2014-05-13 13:20:03 +02:00
|
|
|
|
2018-10-02 17:25:58 +02:00
|
|
|
|
|
|
|
# This code was part of the original contribution, but is commented out as it
|
|
|
|
# was never used. A two-stage table has sufficed.
|
|
|
|
|
2014-05-13 13:20:03 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
# Three-stage tables:
|
|
|
|
|
|
|
|
# Find the optimum block size for 3-stage table
|
|
|
|
min_size = sys.maxint
|
|
|
|
for stage3_block in [2 ** i for i in range(2,6)]:
|
|
|
|
stage_i, stage3 = compress_table(table, stage3_block)
|
|
|
|
for stage2_block in [2 ** i for i in range(5,10)]:
|
|
|
|
size = len(records) * 4
|
|
|
|
stage1, stage2 = compress_table(stage_i, stage2_block)
|
|
|
|
size += get_tables_size(stage1, stage2, stage3)
|
|
|
|
# print "/* %5d / %3d => %5d bytes */" % (stage2_block, stage3_block, size)
|
|
|
|
if size < min_size:
|
|
|
|
min_size = size
|
|
|
|
min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3
|
|
|
|
min_stage2_block, min_stage3_block = stage2_block, stage3_block
|
|
|
|
|
|
|
|
print "/* Total size: %d bytes" % min_size */
|
|
|
|
print_records(records)
|
|
|
|
print_table(min_stage1, 'ucd_stage1')
|
|
|
|
print_table(min_stage2, 'ucd_stage2', min_stage2_block)
|
|
|
|
print_table(min_stage3, 'ucd_stage3', min_stage3_block)
|
|
|
|
|
|
|
|
"""
|