2020-02-19 12:26:55 +01:00
|
|
|
#!/usr/bin/env python3
|
2011-06-02 23:43:12 +02:00
|
|
|
|
2020-05-28 12:31:15 +02:00
|
|
|
"""usage: ./gen-indic-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt Blocks.txt
|
2020-02-10 14:49:23 +01:00
|
|
|
|
2020-03-15 20:59:31 +01:00
|
|
|
Input files:
|
2020-02-10 14:49:23 +01:00
|
|
|
* https://unicode.org/Public/UCD/latest/ucd/IndicSyllabicCategory.txt
|
|
|
|
* https://unicode.org/Public/UCD/latest/ucd/IndicPositionalCategory.txt
|
2020-05-28 12:31:15 +02:00
|
|
|
* https://unicode.org/Public/UCD/latest/ucd/Blocks.txt
|
|
|
|
"""
|
|
|
|
|
2020-05-28 21:41:19 +02:00
|
|
|
import sys
|
2020-05-28 12:31:15 +02:00
|
|
|
|
|
|
|
if len (sys.argv) != 4:
|
2020-05-28 20:21:29 +02:00
|
|
|
sys.exit (__doc__)
|
2011-06-02 23:43:12 +02:00
|
|
|
|
2016-05-06 12:50:02 +02:00
|
|
|
ALLOWED_SINGLES = [0x00A0, 0x25CC]
|
|
|
|
ALLOWED_BLOCKS = [
|
|
|
|
'Basic Latin',
|
|
|
|
'Latin-1 Supplement',
|
|
|
|
'Devanagari',
|
|
|
|
'Bengali',
|
|
|
|
'Gurmukhi',
|
|
|
|
'Gujarati',
|
|
|
|
'Oriya',
|
|
|
|
'Tamil',
|
|
|
|
'Telugu',
|
|
|
|
'Kannada',
|
|
|
|
'Malayalam',
|
|
|
|
'Sinhala',
|
|
|
|
'Myanmar',
|
|
|
|
'Khmer',
|
|
|
|
'Vedic Extensions',
|
|
|
|
'General Punctuation',
|
|
|
|
'Superscripts and Subscripts',
|
|
|
|
'Devanagari Extended',
|
|
|
|
'Myanmar Extended-B',
|
|
|
|
'Myanmar Extended-A',
|
|
|
|
]
|
2014-06-20 21:25:30 +02:00
|
|
|
|
2020-05-28 21:41:19 +02:00
|
|
|
files = [open (x, encoding='utf-8') for x in sys.argv[1:]]
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
headers = [[f.readline () for i in range (2)] for f in files]
|
|
|
|
|
|
|
|
data = [{} for f in files]
|
|
|
|
values = [{} for f in files]
|
|
|
|
for i, f in enumerate (files):
|
|
|
|
for line in f:
|
|
|
|
|
|
|
|
j = line.find ('#')
|
|
|
|
if j >= 0:
|
|
|
|
line = line[:j]
|
2011-09-20 20:34:06 +02:00
|
|
|
|
2011-06-02 23:43:12 +02:00
|
|
|
fields = [x.strip () for x in line.split (';')]
|
|
|
|
if len (fields) == 1:
|
|
|
|
continue
|
|
|
|
|
|
|
|
uu = fields[0].split ('..')
|
|
|
|
start = int (uu[0], 16)
|
|
|
|
if len (uu) == 1:
|
|
|
|
end = start
|
|
|
|
else:
|
|
|
|
end = int (uu[1], 16)
|
|
|
|
|
|
|
|
t = fields[1]
|
|
|
|
|
|
|
|
for u in range (start, end + 1):
|
|
|
|
data[i][u] = t
|
2014-06-30 20:30:54 +02:00
|
|
|
values[i][t] = values[i].get (t, 0) + end - start + 1
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
# Merge data into one dict:
|
|
|
|
defaults = ('Other', 'Not_Applicable', 'No_Block')
|
|
|
|
for i,v in enumerate (defaults):
|
|
|
|
values[i][v] = values[i].get (v, 0) + 1
|
|
|
|
combined = {}
|
|
|
|
for i,d in enumerate (data):
|
|
|
|
for u,v in d.items ():
|
|
|
|
if i == 2 and not u in combined:
|
|
|
|
continue
|
|
|
|
if not u in combined:
|
|
|
|
combined[u] = list (defaults)
|
|
|
|
combined[u][i] = v
|
2016-05-06 12:50:02 +02:00
|
|
|
combined = {k:v for k,v in combined.items() if k in ALLOWED_SINGLES or v[2] in ALLOWED_BLOCKS}
|
2011-06-02 23:43:12 +02:00
|
|
|
data = combined
|
|
|
|
del combined
|
|
|
|
num = len (data)
|
|
|
|
|
|
|
|
# Move the outliers NO-BREAK SPACE and DOTTED CIRCLE out
|
|
|
|
singles = {}
|
2016-05-06 12:50:02 +02:00
|
|
|
for u in ALLOWED_SINGLES:
|
2011-06-02 23:43:12 +02:00
|
|
|
singles[u] = data[u]
|
|
|
|
del data[u]
|
|
|
|
|
2018-03-29 10:18:47 +02:00
|
|
|
print ("/* == Start of generated table == */")
|
|
|
|
print ("/*")
|
|
|
|
print (" * The following table is generated by running:")
|
|
|
|
print (" *")
|
|
|
|
print (" * ./gen-indic-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt Blocks.txt")
|
|
|
|
print (" *")
|
|
|
|
print (" * on files with these headers:")
|
|
|
|
print (" *")
|
2011-06-02 23:43:12 +02:00
|
|
|
for h in headers:
|
|
|
|
for l in h:
|
2018-03-29 10:18:47 +02:00
|
|
|
print (" * %s" % (l.strip()))
|
|
|
|
print (" */")
|
|
|
|
print ()
|
2019-06-26 22:21:03 +02:00
|
|
|
print ('#include "hb.hh"')
|
|
|
|
print ()
|
|
|
|
print ('#ifndef HB_NO_OT_SHAPE')
|
|
|
|
print ()
|
2018-08-26 07:36:36 +02:00
|
|
|
print ('#include "hb-ot-shape-complex-indic.hh"')
|
2018-03-29 10:18:47 +02:00
|
|
|
print ()
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
# Shorten values
|
|
|
|
short = [{
|
|
|
|
"Bindu": 'Bi',
|
2014-06-22 19:32:13 +02:00
|
|
|
"Cantillation_Mark": 'Ca',
|
|
|
|
"Joiner": 'ZWJ',
|
|
|
|
"Non_Joiner": 'ZWNJ',
|
|
|
|
"Number": 'Nd',
|
2011-06-02 23:43:12 +02:00
|
|
|
"Visarga": 'Vs',
|
|
|
|
"Vowel": 'Vo',
|
2011-06-13 22:02:18 +02:00
|
|
|
"Vowel_Dependent": 'M',
|
2015-12-18 12:05:11 +01:00
|
|
|
"Consonant_Prefixed": 'CPrf',
|
2011-06-13 22:02:18 +02:00
|
|
|
"Other": 'x',
|
2011-06-02 23:43:12 +02:00
|
|
|
},{
|
2011-06-13 22:02:18 +02:00
|
|
|
"Not_Applicable": 'x',
|
2011-06-02 23:43:12 +02:00
|
|
|
}]
|
2014-06-21 23:31:10 +02:00
|
|
|
all_shorts = [{},{}]
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
# Add some of the values, to make them more readable, and to avoid duplicates
|
|
|
|
|
|
|
|
|
|
|
|
for i in range (2):
|
|
|
|
for v,s in short[i].items ():
|
2014-06-21 23:31:10 +02:00
|
|
|
all_shorts[i][s] = v
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
what = ["INDIC_SYLLABIC_CATEGORY", "INDIC_MATRA_CATEGORY"]
|
|
|
|
what_short = ["ISC", "IMC"]
|
2019-01-17 21:04:44 +01:00
|
|
|
print ('#pragma GCC diagnostic push')
|
|
|
|
print ('#pragma GCC diagnostic ignored "-Wunused-macros"')
|
2019-08-28 13:31:27 +02:00
|
|
|
cat_defs = []
|
2011-06-02 23:43:12 +02:00
|
|
|
for i in range (2):
|
2018-03-29 18:52:47 +02:00
|
|
|
vv = sorted (values[i].keys ())
|
2011-06-02 23:43:12 +02:00
|
|
|
for v in vv:
|
|
|
|
v_no_and = v.replace ('_And_', '_')
|
|
|
|
if v in short[i]:
|
|
|
|
s = short[i][v]
|
|
|
|
else:
|
|
|
|
s = ''.join ([c for c in v_no_and if ord ('A') <= ord (c) <= ord ('Z')])
|
|
|
|
if s in all_shorts[i]:
|
2014-06-21 23:31:10 +02:00
|
|
|
raise Exception ("Duplicate short value alias", v, all_shorts[i][s])
|
|
|
|
all_shorts[i][s] = v
|
2011-06-02 23:43:12 +02:00
|
|
|
short[i][v] = s
|
2019-08-28 13:31:27 +02:00
|
|
|
cat_defs.append ((what_short[i] + '_' + s, what[i] + '_' + v.upper (), str (values[i][v]), v))
|
|
|
|
|
|
|
|
maxlen_s = max ([len (c[0]) for c in cat_defs])
|
|
|
|
maxlen_l = max ([len (c[1]) for c in cat_defs])
|
|
|
|
maxlen_n = max ([len (c[2]) for c in cat_defs])
|
|
|
|
for s in what_short:
|
|
|
|
print ()
|
|
|
|
for c in [c for c in cat_defs if s in c[0]]:
|
|
|
|
print ("#define %s %s /* %s chars; %s */" %
|
|
|
|
(c[0].ljust (maxlen_s), c[1].ljust (maxlen_l), c[2].rjust (maxlen_n), c[3]))
|
|
|
|
print ()
|
2019-01-17 21:04:44 +01:00
|
|
|
print ('#pragma GCC diagnostic pop')
|
2018-03-29 10:18:47 +02:00
|
|
|
print ()
|
|
|
|
print ("#define _(S,M) INDIC_COMBINE_CATEGORIES (ISC_##S, IMC_##M)")
|
|
|
|
print ()
|
|
|
|
print ()
|
2011-06-02 23:43:12 +02:00
|
|
|
|
2012-03-07 18:08:33 +01:00
|
|
|
total = 0
|
|
|
|
used = 0
|
2014-06-20 20:56:22 +02:00
|
|
|
last_block = None
|
2011-06-02 23:43:12 +02:00
|
|
|
def print_block (block, start, end, data):
|
2014-06-20 20:56:22 +02:00
|
|
|
global total, used, last_block
|
|
|
|
if block and block != last_block:
|
2018-03-29 10:18:47 +02:00
|
|
|
print ()
|
|
|
|
print ()
|
|
|
|
print (" /* %s */" % block)
|
2011-06-02 23:43:12 +02:00
|
|
|
num = 0
|
2014-06-20 20:41:39 +02:00
|
|
|
assert start % 8 == 0
|
|
|
|
assert (end+1) % 8 == 0
|
2011-06-02 23:43:12 +02:00
|
|
|
for u in range (start, end+1):
|
|
|
|
if u % 8 == 0:
|
2018-03-29 10:18:47 +02:00
|
|
|
print ()
|
|
|
|
print (" /* %04X */" % u, end="")
|
2011-06-02 23:43:12 +02:00
|
|
|
if u in data:
|
|
|
|
num += 1
|
|
|
|
d = data.get (u, defaults)
|
2018-03-29 10:18:47 +02:00
|
|
|
print ("%9s" % ("_(%s,%s)," % (short[0][d[0]], short[1][d[1]])), end="")
|
2011-06-02 23:43:12 +02:00
|
|
|
|
2012-03-07 18:08:33 +01:00
|
|
|
total += end - start + 1
|
|
|
|
used += num
|
2014-06-20 20:56:22 +02:00
|
|
|
if block:
|
|
|
|
last_block = block
|
2011-06-02 23:43:12 +02:00
|
|
|
|
2018-03-29 18:52:47 +02:00
|
|
|
uu = sorted (data.keys ())
|
2011-06-02 23:43:12 +02:00
|
|
|
|
2014-06-22 19:29:59 +02:00
|
|
|
last = -100000
|
2011-06-02 23:43:12 +02:00
|
|
|
num = 0
|
2011-06-28 20:03:29 +02:00
|
|
|
offset = 0
|
|
|
|
starts = []
|
|
|
|
ends = []
|
2018-03-29 10:18:47 +02:00
|
|
|
print ("static const INDIC_TABLE_ELEMENT_TYPE indic_table[] = {")
|
2011-06-02 23:43:12 +02:00
|
|
|
for u in uu:
|
|
|
|
if u <= last:
|
|
|
|
continue
|
|
|
|
block = data[u][2]
|
2014-06-20 20:56:22 +02:00
|
|
|
|
|
|
|
start = u//8*8
|
|
|
|
end = start+1
|
2014-06-20 22:47:43 +02:00
|
|
|
while end in uu and block == data[end][2]:
|
2014-06-20 20:56:22 +02:00
|
|
|
end += 1
|
|
|
|
end = (end-1)//8*8 + 7
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
if start != last + 1:
|
2014-06-20 20:56:22 +02:00
|
|
|
if start - last <= 1+16*3:
|
|
|
|
print_block (None, last+1, start-1, data)
|
2011-06-02 23:43:12 +02:00
|
|
|
last = start-1
|
|
|
|
else:
|
|
|
|
if last >= 0:
|
2011-06-28 20:03:29 +02:00
|
|
|
ends.append (last + 1)
|
|
|
|
offset += ends[-1] - starts[-1]
|
2018-03-29 10:18:47 +02:00
|
|
|
print ()
|
|
|
|
print ()
|
|
|
|
print ("#define indic_offset_0x%04xu %d" % (start, offset))
|
2011-06-28 20:03:29 +02:00
|
|
|
starts.append (start)
|
2011-06-02 23:43:12 +02:00
|
|
|
|
|
|
|
print_block (block, start, end, data)
|
|
|
|
last = end
|
2011-06-28 20:03:29 +02:00
|
|
|
ends.append (last + 1)
|
|
|
|
offset += ends[-1] - starts[-1]
|
2018-03-29 10:18:47 +02:00
|
|
|
print ()
|
|
|
|
print ()
|
2012-03-07 18:08:33 +01:00
|
|
|
occupancy = used * 100. / total
|
2014-06-21 00:01:34 +02:00
|
|
|
page_bits = 12
|
2018-03-29 10:18:47 +02:00
|
|
|
print ("}; /* Table items: %d; occupancy: %d%% */" % (offset, occupancy))
|
|
|
|
print ()
|
|
|
|
print ("INDIC_TABLE_ELEMENT_TYPE")
|
|
|
|
print ("hb_indic_get_categories (hb_codepoint_t u)")
|
|
|
|
print ("{")
|
|
|
|
print (" switch (u >> %d)" % page_bits)
|
|
|
|
print (" {")
|
2018-03-29 18:52:47 +02:00
|
|
|
pages = set ([u>>page_bits for u in starts+ends+list (singles.keys ())])
|
2014-06-20 23:57:03 +02:00
|
|
|
for p in sorted(pages):
|
2018-03-29 10:18:47 +02:00
|
|
|
print (" case 0x%0Xu:" % p)
|
2018-01-03 15:22:07 +01:00
|
|
|
for u,d in singles.items ():
|
|
|
|
if p != u>>page_bits: continue
|
2018-03-29 10:18:47 +02:00
|
|
|
print (" if (unlikely (u == 0x%04Xu)) return _(%s,%s);" % (u, short[0][d[0]], short[1][d[1]]))
|
2014-06-20 21:12:49 +02:00
|
|
|
for (start,end) in zip (starts, ends):
|
2014-06-21 00:01:34 +02:00
|
|
|
if p not in [start>>page_bits, end>>page_bits]: continue
|
2014-07-11 21:05:36 +02:00
|
|
|
offset = "indic_offset_0x%04xu" % start
|
2018-03-29 10:18:47 +02:00
|
|
|
print (" if (hb_in_range<hb_codepoint_t> (u, 0x%04Xu, 0x%04Xu)) return indic_table[u - 0x%04Xu + %s];" % (start, end-1, start, offset))
|
|
|
|
print (" break;")
|
|
|
|
print ("")
|
|
|
|
print (" default:")
|
|
|
|
print (" break;")
|
|
|
|
print (" }")
|
|
|
|
print (" return _(x,x);")
|
|
|
|
print ("}")
|
|
|
|
print ()
|
|
|
|
print ("#undef _")
|
2011-06-02 23:43:12 +02:00
|
|
|
for i in range (2):
|
2019-08-28 13:31:27 +02:00
|
|
|
print ()
|
2018-03-29 18:52:47 +02:00
|
|
|
vv = sorted (values[i].keys ())
|
2011-06-02 23:43:12 +02:00
|
|
|
for v in vv:
|
2018-03-29 10:18:47 +02:00
|
|
|
print ("#undef %s_%s" %
|
|
|
|
(what_short[i], short[i][v]))
|
|
|
|
print ()
|
2019-06-26 22:21:03 +02:00
|
|
|
print ('#endif')
|
2019-08-28 13:31:27 +02:00
|
|
|
print ()
|
2018-03-29 10:18:47 +02:00
|
|
|
print ("/* == End of generated table == */")
|
2012-03-07 18:08:33 +01:00
|
|
|
|
|
|
|
# Maintain at least 30% occupancy in the table */
|
|
|
|
if occupancy < 30:
|
|
|
|
raise Exception ("Table too sparse, please investigate: ", occupancy)
|