[USE] Build Universal Shaping Engine data table from Unicode 8 files

This commit is contained in:
Behdad Esfahbod 2015-07-20 11:46:17 +01:00
parent e2c95116e1
commit c48ff28852
2 changed files with 1032 additions and 19 deletions

View File

@ -3,7 +3,7 @@
import sys
if len (sys.argv) != 4:
print >>sys.stderr, "usage: ./gen-indic-table.py IndicSyllabicCategory.txt IndicMatraCategory.txt Blocks.txt"
print >>sys.stderr, "usage: ./gen-use-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt Blocks.txt"
sys.exit (1)
BLACKLISTED_BLOCKS = ["Thai", "Lao", "Tibetan"]
@ -55,13 +55,9 @@ data = combined
del combined
num = len (data)
for u in [0x17CD, 0x17CE, 0x17CF, 0x17D0, 0x17D3]:
if data[u][0] == 'Other':
data[u][0] = "Vowel_Dependent"
# Move the outliers NO-BREAK SPACE and DOTTED CIRCLE out
# Remove the outliers
singles = {}
for u in [0x00A0, 0x25CC]:
for u in []: # TODO [0x00A0, 0x200C, 0x200D, 0x25CC, 0x1107F]:
singles[u] = data[u]
del data[u]
@ -69,7 +65,7 @@ print "/* == Start of generated table == */"
print "/*"
print " * The following table is generated by running:"
print " *"
print " * ./gen-indic-table.py IndicSyllabicCategory.txt IndicMatraCategory.txt Blocks.txt"
print " * ./gen-use-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt Blocks.txt"
print " *"
print " * on files with these headers:"
print " *"
@ -78,7 +74,7 @@ for h in headers:
print " * %s" % (l.strip())
print " */"
print
print '#include "hb-ot-shape-complex-indic-private.hh"'
print '#include "hb-ot-shape-complex-use-private.hh"'
print
# Shorten values
@ -92,6 +88,7 @@ short = [{
"Vowel": 'Vo',
"Vowel_Dependent": 'M',
"Other": 'x',
"Consonant_Placeholder":'GB',
},{
"Not_Applicable": 'x',
}]
@ -104,8 +101,8 @@ for i in range (2):
for v,s in short[i].items ():
all_shorts[i][s] = v
what = ["INDIC_SYLLABIC_CATEGORY", "INDIC_MATRA_CATEGORY"]
what_short = ["ISC", "IMC"]
what = ["INDIC_SYLLABIC_CATEGORY", "INDIC_POSITIONAL_CATEGORY"]
what_short = ["SC", "PC"]
for i in range (2):
print
vv = values[i].keys ()
@ -122,10 +119,10 @@ for i in range (2):
short[i][v] = s
print "#define %s_%s %s_%s %s/* %3d chars; %s */" % \
(what_short[i], s, what[i], v.upper (), \
' '* ((48-1 - len (what[i]) - 1 - len (v)) / 8), \
' '* ((56-1 - len (what[i]) - 1 - len (v)) / 8), \
values[i][v], v)
print
print "#define _(S,M) INDIC_COMBINE_CATEGORIES (ISC_##S, IMC_##M)"
print "#define _(S,M) USE_COMBINE_CATEGORIES (SC_##S, PC_##M)"
print
print
@ -163,7 +160,7 @@ num = 0
offset = 0
starts = []
ends = []
print "static const INDIC_TABLE_ELEMENT_TYPE indic_table[] = {"
print "static const USE_TABLE_ELEMENT_TYPE use_table[] = {"
for u in uu:
if u <= last:
continue
@ -185,7 +182,7 @@ for u in uu:
offset += ends[-1] - starts[-1]
print
print
print "#define indic_offset_0x%04xu %d" % (start, offset)
print "#define use_offset_0x%04xu %d" % (start, offset)
starts.append (start)
print_block (block, start, end, data)
@ -198,8 +195,8 @@ occupancy = used * 100. / total
page_bits = 12
print "}; /* Table items: %d; occupancy: %d%% */" % (offset, occupancy)
print
print "INDIC_TABLE_ELEMENT_TYPE"
print "hb_indic_get_categories (hb_codepoint_t u)"
print "USE_TABLE_ELEMENT_TYPE"
print "hb_use_get_categories (hb_codepoint_t u)"
print "{"
print " switch (u >> %d)" % page_bits
print " {"
@ -208,8 +205,8 @@ for p in sorted(pages):
print " case 0x%0Xu:" % p
for (start,end) in zip (starts, ends):
if p not in [start>>page_bits, end>>page_bits]: continue
offset = "indic_offset_0x%04xu" % start
print " if (hb_in_range (u, 0x%04Xu, 0x%04Xu)) return indic_table[u - 0x%04Xu + %s];" % (start, end-1, start, offset)
offset = "use_offset_0x%04xu" % start
print " if (hb_in_range (u, 0x%04Xu, 0x%04Xu)) return use_table[u - 0x%04Xu + %s];" % (start, end-1, start, offset)
for u,d in singles.items ():
if p != u>>page_bits: continue
print " if (unlikely (u == 0x%04Xu)) return _(%s,%s);" % (u, short[0][d[0]], short[1][d[1]])

File diff suppressed because it is too large Load Diff