/[pcre]/code/trunk/maint/MultiStage2.py
ViewVC logotype

Contents of /code/trunk/maint/MultiStage2.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 352 - (show annotations) (download) (as text)
Mon Jul 7 15:12:56 2008 UTC (6 years, 2 months ago) by ph10
File MIME type: text/x-python
File size: 13935 byte(s)
Final (?) tidies for new Unicode property code.

1 #! /usr/bin/python
2
3 # Multistage table builder
4 # (c) Peter Kankowski, 2008
5
6 ##############################################################################
7 # This script was submitted to the PCRE project by Peter Kankowski as part of
8 # the upgrading of Unicode property support. The new code speeds up property
9 # matching many times. The script is for the use of PCRE maintainers, to
10 # generate the pcre_ucd.c file that contains a digested form of the Unicode
11 # data tables.
12 #
13 # The script should be run in the maint subdirectory, using the command
14 #
15 # ./MultiStage2.py >../pcre_ucd.c
16 #
17 # It requires three Unicode data tables, DerivedGeneralCategory.txt,
18 # Scripts.txt, and UnicodeData.txt, to be in the Unicode.tables subdirectory.
19 #
20 # Minor modifications made to this script:
21 # Added #! line at start
22 # Removed tabs
23 # Made it work with Python 2.4 by rewriting two statements that needed 2.5
24 # Consequent code tidy
25 # Adjusted data file names to take from the Unicode.tables directory
26 # Adjusted global table names by prefixing _pcre_.
27 # Commented out stuff relating to the casefolding table, which isn't used.
28 # Corrected size calculation
29 #
30 # The tables generated by this script are used by macros defined in
31 # pcre_internal.h. They look up Unicode character properties using short
32 # sequences of code that contains no branches, which makes for greater speed.
33 #
34 # Conceptually, there is a table of records (of type ucd_record), containing a
35 # script number, character type, and offset to the character's other case for
36 # every character. However, a real table covering all Unicode characters would
37 # be far too big. It can be efficiently compressed by observing that many
38 # characters have the same record, and many blocks of characters (taking 128
39 # characters in a block) have the same set of records as other blocks. This
40 # leads to a 2-stage lookup process.
41 #
42 # This script constructs three tables. The _pcre_ucd_records table contains
43 # one instance of every unique record that is required. The _pcre_ucd_stage1
44 # table is indexed by a character's block number, and yields what is in effect
45 # a "virtual" block number. The _pcre_ucd_stage2 table is a table of "virtual"
46 # blocks; each block is indexed by the offset of a character within its own
47 # block, and the result is the offset of the required record.
48 #
49 # Example: lowercase "a" (U+0061) is in block 0
50 # lookup 0 in stage1 table yields 0
51 # lookup 97 in the first table in stage2 yields 12
52 # record 12 is { 33, 5, -32 } (Latin, lowercase, upper is U+0041)
53 #
54 # All lowercase latin characters resolve to the same record.
55 #
56 # Example: hiragana letter A (U+3042) is in block 96 (0x60)
57 # lookup 96 in stage1 table yields 83
58 # lookup 66 in the 83rd table in stage2 yields 348
59 # record 348 is { 26, 7, 0 } (Hiragana, other letter, no other case)
60 #
61 # In these examples, no other blocks resolve to the same "virtual" block, as it
62 # happens, but plenty of other blocks do share "virtual" blocks.
63 #
64 # There is a fourth table, maintained by hand, which translates from the
65 # individual character types such as ucp_Cc to the general types like ucp_C.
66 #
67 # Philip Hazel, 03 July 2008
68 ##############################################################################
69
70
71 import re
72 import string
73 import sys
74
75 MAX_UNICODE = 0x110000
76 NOTACHAR = 0xffffffff
77
78 # Parse a line of CaseFolding.txt, Scripts.txt, and DerivedGeneralCategory.txt file
79 def make_get_names(enum):
80 return lambda chardata: enum.index(chardata[1])
81
82 #def get_case_folding_value(chardata):
83 # if chardata[1] != 'C' and chardata[1] != 'S':
84 # return 0
85 # return int(chardata[2], 16) - int(chardata[0], 16)
86
87 def get_other_case(chardata):
88 if chardata[12] != '':
89 return int(chardata[12], 16) - int(chardata[0], 16)
90 if chardata[13] != '':
91 return int(chardata[13], 16) - int(chardata[0], 16)
92 return 0
93
94 # Read the whole table in memory
95 def read_table(file_name, get_value, default_value):
96 file = open(file_name, 'r')
97 table = [default_value] * MAX_UNICODE
98 for line in file:
99 line = re.sub(r'#.*', '', line)
100 chardata = map(string.strip, line.split(';'))
101 if len(chardata) <= 1:
102 continue
103 value = get_value(chardata)
104
105 m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
106 char = int(m.group(1), 16)
107 if m.group(3) is None:
108 last = char
109 else:
110 last = int(m.group(3), 16)
111 for i in range(char, last + 1):
112 table[i] = value
113 file.close()
114 return table
115
116 # Get the smallest possible C language type for the values
117 def get_type_size(table):
118 type_size = [("uschar", 1), ("pcre_uint16", 2), ("pcre_uint32", 4),
119 ("signed char", 1), ("pcre_int16", 2), ("pcre_int32", 4)]
120 limits = [(0, 255), (0, 65535), (0, 4294967295),
121 (-128, 127), (-32768, 32767), (-2147483648, 2147483647)]
122 minval = min(table)
123 maxval = max(table)
124 for num, (minlimit, maxlimit) in enumerate(limits):
125 if minlimit <= minval and maxval <= maxlimit:
126 return type_size[num]
127 else:
128 raise OverflowError, "Too large to fit into C types"
129
130 def get_tables_size(*tables):
131 total_size = 0
132 for table in tables:
133 type, size = get_type_size(table)
134 total_size += size * len(table)
135 return total_size
136
137 # Compress the table into the two stages
138 def compress_table(table, block_size):
139 blocks = {} # Dictionary for finding identical blocks
140 stage1 = [] # Stage 1 table contains block numbers (indices into stage 2 table)
141 stage2 = [] # Stage 2 table contains the blocks with property values
142 table = tuple(table)
143 for i in range(0, len(table), block_size):
144 block = table[i:i+block_size]
145 start = blocks.get(block)
146 if start is None:
147 # Allocate a new block
148 start = len(stage2) / block_size
149 stage2 += block
150 blocks[block] = start
151 stage1.append(start)
152
153 return stage1, stage2
154
155 # Print a table
156 def print_table(table, table_name, block_size = None):
157 type, size = get_type_size(table)
158 ELEMS_PER_LINE = 16
159
160 s = "const %s %s[] = { /* %d bytes" % (type, table_name, size * len(table))
161 if block_size:
162 s += ", block = %d" % block_size
163 print s + " */"
164 table = tuple(table)
165 if block_size is None:
166 fmt = "%3d," * ELEMS_PER_LINE + " /* U+%04X */"
167 mult = MAX_UNICODE / len(table)
168 for i in range(0, len(table), ELEMS_PER_LINE):
169 print fmt % (table[i:i+ELEMS_PER_LINE] + (i * mult,))
170 else:
171 if block_size > ELEMS_PER_LINE:
172 el = ELEMS_PER_LINE
173 else:
174 el = block_size
175 fmt = "%3d," * el + "\n"
176 if block_size > ELEMS_PER_LINE:
177 fmt = fmt * (block_size / ELEMS_PER_LINE)
178 for i in range(0, len(table), block_size):
179 print ("/* block %d */\n" + fmt) % ((i / block_size,) + table[i:i+block_size])
180 print "};\n"
181
182 # Extract the unique combinations of properties into records
183 def combine_tables(*tables):
184 records = {}
185 index = []
186 for t in zip(*tables):
187 i = records.get(t)
188 if i is None:
189 i = records[t] = len(records)
190 index.append(i)
191 return index, records
192
193 def get_record_size_struct(records):
194 size = 0
195 structure = '/* When recompiling tables with a new Unicode version,\n' + \
196 'please check types in the structure definition from pcre_internal.h:\ntypedef struct {\n'
197 for i in range(len(records[0])):
198 record_slice = map(lambda record: record[i], records)
199 slice_type, slice_size = get_type_size(record_slice)
200 # add padding: round up to the nearest power of slice_size
201 size = (size + slice_size - 1) & -slice_size
202 size += slice_size
203 structure += '%s property_%d;\n' % (slice_type, i)
204
205 # round up to the first item of the next structure in array
206 record_slice = map(lambda record: record[0], records)
207 slice_type, slice_size = get_type_size(record_slice)
208 size = (size + slice_size - 1) & -slice_size
209
210 structure += '} ucd_record; */\n\n'
211 return size, structure
212
213 def test_record_size():
214 tests = [ \
215 ( [(3,), (6,), (6,), (1,)], 1 ), \
216 ( [(300,), (600,), (600,), (100,)], 2 ), \
217 ( [(25, 3), (6, 6), (34, 6), (68, 1)], 2 ), \
218 ( [(300, 3), (6, 6), (340, 6), (690, 1)], 4 ), \
219 ( [(3, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
220 ( [(300, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
221 ( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ), \
222 ( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ), \
223 ]
224 for test in tests:
225 size, struct = get_record_size_struct(test[0])
226 assert(size == test[1])
227 #print struct
228
229 def print_records(records, record_size):
230 print 'const ucd_record _pcre_ucd_records[] = { ' + \
231 '/* %d bytes, record size %d */' % (len(records) * record_size, record_size)
232 records = zip(records.keys(), records.values())
233 records.sort(None, lambda x: x[1])
234 for i, record in enumerate(records):
235 print (' {' + '%6d, ' * len(record[0]) + '}, /* %3d */') % (record[0] + (i,))
236 print '};\n'
237
238 script_names = ['Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', \
239 'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', \
240 'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', \
241 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', \
242 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', \
243 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', \
244 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', \
245 # New for Unicode 5.0
246 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', \
247 # New for Unicode 5.1
248 'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai']
249
250 category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
251 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',
252 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]
253
254 test_record_size()
255
256 script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Common'))
257 category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))
258 other_case = read_table('Unicode.tables/UnicodeData.txt', get_other_case, 0)
259 # case_fold = read_table('CaseFolding.txt', get_case_folding_value, 0)
260
261 table, records = combine_tables(script, category, other_case)
262 record_size, record_struct = get_record_size_struct(records.keys())
263
264 # Find the optimum block size for the two-stage table
265 min_size = sys.maxint
266 for block_size in [2 ** i for i in range(5,10)]:
267 size = len(records) * record_size
268 stage1, stage2 = compress_table(table, block_size)
269 size += get_tables_size(stage1, stage2)
270 #print "/* block size %5d => %5d bytes */" % (block_size, size)
271 if size < min_size:
272 min_size = size
273 min_stage1, min_stage2 = stage1, stage2
274 min_block_size = block_size
275
276 print "#ifdef HAVE_CONFIG_H"
277 print "#include \"config.h\""
278 print "#endif"
279 print "#include \"pcre_internal.h\""
280 print
281 print "/* Unicode character database. */"
282 print "/* This file was autogenerated by the MultiStage2.py script. */"
283 print "/* Total size: %d bytes, block size: %d. */" % (min_size, min_block_size)
284 print record_struct
285 print_records(records, record_size)
286 print_table(min_stage1, '_pcre_ucd_stage1')
287 print_table(min_stage2, '_pcre_ucd_stage2', min_block_size)
288 print "#if UCD_BLOCK_SIZE != %d" % min_block_size
289 print "#error Please correct UCD_BLOCK_SIZE in pcre_internal.h"
290 print "#endif"
291
292 """
293
294 # Three-stage tables:
295
296 # Find the optimum block size for 3-stage table
297 min_size = sys.maxint
298 for stage3_block in [2 ** i for i in range(2,6)]:
299 stage_i, stage3 = compress_table(table, stage3_block)
300 for stage2_block in [2 ** i for i in range(5,10)]:
301 size = len(records) * 4
302 stage1, stage2 = compress_table(stage_i, stage2_block)
303 size += get_tables_size(stage1, stage2, stage3)
304 # print "/* %5d / %3d => %5d bytes */" % (stage2_block, stage3_block, size)
305 if size < min_size:
306 min_size = size
307 min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3
308 min_stage2_block, min_stage3_block = stage2_block, stage3_block
309
310 print "/* Total size: %d bytes" % min_size */
311 print_records(records)
312 print_table(min_stage1, 'ucd_stage1')
313 print_table(min_stage2, 'ucd_stage2', min_stage2_block)
314 print_table(min_stage3, 'ucd_stage3', min_stage3_block)
315
316 """

Properties

Name Value
svn:executable *

webmaster@exim.org
ViewVC Help
Powered by ViewVC 1.1.12