/[pcre]/code/trunk/sljit/sljitNativeX86_32.c
ViewVC logotype

Contents of /code/trunk/sljit/sljitNativeX86_32.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 956 - (show annotations) (download)
Wed Apr 4 05:35:52 2012 UTC (2 years, 3 months ago) by zherczeg
File MIME type: text/plain
File size: 14658 byte(s)
JIT compiler update: fix x86-64 alignemnt issue
1 /*
2 * Stack-less Just-In-Time compiler
3 *
4 * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* x86 32-bit arch dependent functions. */
28
29 static int emit_do_imm(struct sljit_compiler *compiler, sljit_ub opcode, sljit_w imm)
30 {
31 sljit_ub *buf;
32
33 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_w));
34 FAIL_IF(!buf);
35 INC_SIZE(1 + sizeof(sljit_w));
36 *buf++ = opcode;
37 *(sljit_w*)buf = imm;
38 return SLJIT_SUCCESS;
39 }
40
41 static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, int type)
42 {
43 if (type == SLJIT_JUMP) {
44 *code_ptr++ = 0xe9;
45 jump->addr++;
46 }
47 else if (type >= SLJIT_FAST_CALL) {
48 *code_ptr++ = 0xe8;
49 jump->addr++;
50 }
51 else {
52 *code_ptr++ = 0x0f;
53 *code_ptr++ = get_jump_code(type);
54 jump->addr += 2;
55 }
56
57 if (jump->flags & JUMP_LABEL)
58 jump->flags |= PATCH_MW;
59 else
60 *(sljit_w*)code_ptr = jump->u.target - (jump->addr + 4);
61 code_ptr += 4;
62
63 return code_ptr;
64 }
65
66 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_enter(struct sljit_compiler *compiler, int args, int temporaries, int saveds, int local_size)
67 {
68 int size;
69 int locals_offset;
70 sljit_ub *buf;
71
72 CHECK_ERROR();
73 check_sljit_emit_enter(compiler, args, temporaries, saveds, local_size);
74
75 compiler->temporaries = temporaries;
76 compiler->saveds = saveds;
77 compiler->args = args;
78 compiler->flags_saved = 0;
79 #if (defined SLJIT_DEBUG && SLJIT_DEBUG)
80 compiler->logical_local_size = local_size;
81 #endif
82
83 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
84 size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0);
85 #else
86 size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (2 + args * 3) : 0);
87 #endif
88 buf = (sljit_ub*)ensure_buf(compiler, 1 + size);
89 FAIL_IF(!buf);
90
91 INC_SIZE(size);
92 PUSH_REG(reg_map[TMP_REGISTER]);
93 #if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
94 if (args > 0) {
95 *buf++ = 0x8b;
96 *buf++ = 0xc4 | (reg_map[TMP_REGISTER] << 3);
97 }
98 #endif
99 if (saveds > 2)
100 PUSH_REG(reg_map[SLJIT_SAVED_REG3]);
101 if (saveds > 1)
102 PUSH_REG(reg_map[SLJIT_SAVED_REG2]);
103 if (saveds > 0)
104 PUSH_REG(reg_map[SLJIT_SAVED_REG1]);
105
106 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
107 if (args > 0) {
108 *buf++ = 0x8b;
109 *buf++ = 0xc0 | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[SLJIT_TEMPORARY_REG3];
110 }
111 if (args > 1) {
112 *buf++ = 0x8b;
113 *buf++ = 0xc0 | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[SLJIT_TEMPORARY_REG2];
114 }
115 if (args > 2) {
116 *buf++ = 0x8b;
117 *buf++ = 0x44 | (reg_map[SLJIT_SAVED_REG3] << 3);
118 *buf++ = 0x24;
119 *buf++ = sizeof(sljit_w) * (3 + 2); /* saveds >= 3 as well. */
120 }
121 #else
122 if (args > 0) {
123 *buf++ = 0x8b;
124 *buf++ = 0x40 | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[TMP_REGISTER];
125 *buf++ = sizeof(sljit_w) * 2;
126 }
127 if (args > 1) {
128 *buf++ = 0x8b;
129 *buf++ = 0x40 | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[TMP_REGISTER];
130 *buf++ = sizeof(sljit_w) * 3;
131 }
132 if (args > 2) {
133 *buf++ = 0x8b;
134 *buf++ = 0x40 | (reg_map[SLJIT_SAVED_REG3] << 3) | reg_map[TMP_REGISTER];
135 *buf++ = sizeof(sljit_w) * 4;
136 }
137 #endif
138
139 locals_offset = 2 * sizeof(sljit_uw);
140 compiler->temporaries_start = locals_offset;
141 if (temporaries > 3)
142 locals_offset += (temporaries - 3) * sizeof(sljit_uw);
143 compiler->saveds_start = locals_offset;
144 if (saveds > 3)
145 locals_offset += (saveds - 3) * sizeof(sljit_uw);
146 compiler->locals_offset = locals_offset;
147 local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1));
148
149 #ifdef _WIN32
150 if (local_size > 1024) {
151 FAIL_IF(emit_do_imm(compiler, 0xb8 + reg_map[SLJIT_TEMPORARY_REG1], local_size));
152 FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
153 }
154 #endif
155
156 compiler->local_size = local_size;
157 SLJIT_ASSERT(local_size > 0);
158 return emit_non_cum_binary(compiler, 0x2b, 0x29, 0x5 << 3, 0x2d,
159 SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, local_size);
160
161 return SLJIT_SUCCESS;
162 }
163
164 SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, int args, int temporaries, int saveds, int local_size)
165 {
166 int locals_offset;
167
168 CHECK_ERROR_VOID();
169 check_sljit_set_context(compiler, args, temporaries, saveds, local_size);
170
171 compiler->temporaries = temporaries;
172 compiler->saveds = saveds;
173 compiler->args = args;
174 #if (defined SLJIT_DEBUG && SLJIT_DEBUG)
175 compiler->logical_local_size = local_size;
176 #endif
177
178 locals_offset = 2 * sizeof(sljit_uw);
179 compiler->temporaries_start = locals_offset;
180 if (temporaries > 3)
181 locals_offset += (temporaries - 3) * sizeof(sljit_uw);
182 compiler->saveds_start = locals_offset;
183 if (saveds > 3)
184 locals_offset += (saveds - 3) * sizeof(sljit_uw);
185 compiler->locals_offset = locals_offset;
186 compiler->local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1));
187 }
188
189 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_return(struct sljit_compiler *compiler, int op, int src, sljit_w srcw)
190 {
191 int size;
192 sljit_ub *buf;
193
194 CHECK_ERROR();
195 check_sljit_emit_return(compiler, op, src, srcw);
196 SLJIT_ASSERT(compiler->args >= 0);
197 ADJUST_LOCAL_OFFSET(src, srcw);
198
199 compiler->flags_saved = 0;
200 FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
201
202 SLJIT_ASSERT(compiler->local_size > 0);
203 FAIL_IF(emit_cum_binary(compiler, 0x03, 0x01, 0x0 << 3, 0x05,
204 SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, compiler->local_size));
205
206 size = 2 + (compiler->saveds <= 3 ? compiler->saveds : 3);
207 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
208 if (compiler->args > 2)
209 size += 2;
210 #else
211 if (compiler->args > 0)
212 size += 2;
213 #endif
214 buf = (sljit_ub*)ensure_buf(compiler, 1 + size);
215 FAIL_IF(!buf);
216
217 INC_SIZE(size);
218
219 if (compiler->saveds > 0)
220 POP_REG(reg_map[SLJIT_SAVED_REG1]);
221 if (compiler->saveds > 1)
222 POP_REG(reg_map[SLJIT_SAVED_REG2]);
223 if (compiler->saveds > 2)
224 POP_REG(reg_map[SLJIT_SAVED_REG3]);
225 POP_REG(reg_map[TMP_REGISTER]);
226 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
227 if (compiler->args > 2)
228 RETN(sizeof(sljit_w));
229 else
230 RET();
231 #else
232 if (compiler->args > 0)
233 RETN(compiler->args * sizeof(sljit_w));
234 else
235 RET();
236 #endif
237
238 return SLJIT_SUCCESS;
239 }
240
241 /* --------------------------------------------------------------------- */
242 /* Operators */
243 /* --------------------------------------------------------------------- */
244
245 /* Size contains the flags as well. */
246 static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, int size,
247 /* The register or immediate operand. */
248 int a, sljit_w imma,
249 /* The general operand (not immediate). */
250 int b, sljit_w immb)
251 {
252 sljit_ub *buf;
253 sljit_ub *buf_ptr;
254 int flags = size & ~0xf;
255 int inst_size;
256
257 /* Both cannot be switched on. */
258 SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
259 /* Size flags not allowed for typed instructions. */
260 SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
261 /* Both size flags cannot be switched on. */
262 SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
263 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
264 /* SSE2 and immediate is not possible. */
265 SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
266 #endif
267
268 size &= 0xf;
269 inst_size = size;
270
271 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
272 if (flags & EX86_PREF_F2)
273 inst_size++;
274 #endif
275 if (flags & EX86_PREF_66)
276 inst_size++;
277
278 /* Calculate size of b. */
279 inst_size += 1; /* mod r/m byte. */
280 if (b & SLJIT_MEM) {
281 if ((b & 0x0f) == SLJIT_UNUSED)
282 inst_size += sizeof(sljit_w);
283 else if (immb != 0 && !(b & 0xf0)) {
284 /* Immediate operand. */
285 if (immb <= 127 && immb >= -128)
286 inst_size += sizeof(sljit_b);
287 else
288 inst_size += sizeof(sljit_w);
289 }
290
291 if ((b & 0xf) == SLJIT_LOCALS_REG && !(b & 0xf0))
292 b |= SLJIT_LOCALS_REG << 4;
293
294 if ((b & 0xf0) != SLJIT_UNUSED)
295 inst_size += 1; /* SIB byte. */
296 }
297
298 /* Calculate size of a. */
299 if (a & SLJIT_IMM) {
300 if (flags & EX86_BIN_INS) {
301 if (imma <= 127 && imma >= -128) {
302 inst_size += 1;
303 flags |= EX86_BYTE_ARG;
304 } else
305 inst_size += 4;
306 }
307 else if (flags & EX86_SHIFT_INS) {
308 imma &= 0x1f;
309 if (imma != 1) {
310 inst_size ++;
311 flags |= EX86_BYTE_ARG;
312 }
313 } else if (flags & EX86_BYTE_ARG)
314 inst_size++;
315 else if (flags & EX86_HALF_ARG)
316 inst_size += sizeof(short);
317 else
318 inst_size += sizeof(sljit_w);
319 }
320 else
321 SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
322
323 buf = (sljit_ub*)ensure_buf(compiler, 1 + inst_size);
324 PTR_FAIL_IF(!buf);
325
326 /* Encoding the byte. */
327 INC_SIZE(inst_size);
328 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
329 if (flags & EX86_PREF_F2)
330 *buf++ = 0xf2;
331 #endif
332 if (flags & EX86_PREF_66)
333 *buf++ = 0x66;
334
335 buf_ptr = buf + size;
336
337 /* Encode mod/rm byte. */
338 if (!(flags & EX86_SHIFT_INS)) {
339 if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
340 *buf = (flags & EX86_BYTE_ARG) ? 0x83 : 0x81;
341
342 if ((a & SLJIT_IMM) || (a == 0))
343 *buf_ptr = 0;
344 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
345 else if (!(flags & EX86_SSE2))
346 *buf_ptr = reg_map[a] << 3;
347 else
348 *buf_ptr = a << 3;
349 #else
350 else
351 *buf_ptr = reg_map[a] << 3;
352 #endif
353 }
354 else {
355 if (a & SLJIT_IMM) {
356 if (imma == 1)
357 *buf = 0xd1;
358 else
359 *buf = 0xc1;
360 } else
361 *buf = 0xd3;
362 *buf_ptr = 0;
363 }
364
365 if (!(b & SLJIT_MEM))
366 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
367 *buf_ptr++ |= 0xc0 + ((!(flags & EX86_SSE2)) ? reg_map[b] : b);
368 #else
369 *buf_ptr++ |= 0xc0 + reg_map[b];
370 #endif
371 else if ((b & 0x0f) != SLJIT_UNUSED) {
372 if ((b & 0xf0) == SLJIT_UNUSED || (b & 0xf0) == (SLJIT_LOCALS_REG << 4)) {
373 if (immb != 0) {
374 if (immb <= 127 && immb >= -128)
375 *buf_ptr |= 0x40;
376 else
377 *buf_ptr |= 0x80;
378 }
379
380 if ((b & 0xf0) == SLJIT_UNUSED)
381 *buf_ptr++ |= reg_map[b & 0x0f];
382 else {
383 *buf_ptr++ |= 0x04;
384 *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3);
385 }
386
387 if (immb != 0) {
388 if (immb <= 127 && immb >= -128)
389 *buf_ptr++ = immb; /* 8 bit displacement. */
390 else {
391 *(sljit_w*)buf_ptr = immb; /* 32 bit displacement. */
392 buf_ptr += sizeof(sljit_w);
393 }
394 }
395 }
396 else {
397 *buf_ptr++ |= 0x04;
398 *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3) | (immb << 6);
399 }
400 }
401 else {
402 *buf_ptr++ |= 0x05;
403 *(sljit_w*)buf_ptr = immb; /* 32 bit displacement. */
404 buf_ptr += sizeof(sljit_w);
405 }
406
407 if (a & SLJIT_IMM) {
408 if (flags & EX86_BYTE_ARG)
409 *buf_ptr = imma;
410 else if (flags & EX86_HALF_ARG)
411 *(short*)buf_ptr = imma;
412 else if (!(flags & EX86_SHIFT_INS))
413 *(sljit_w*)buf_ptr = imma;
414 }
415
416 return !(flags & EX86_SHIFT_INS) ? buf : (buf + 1);
417 }
418
419 /* --------------------------------------------------------------------- */
420 /* Call / return instructions */
421 /* --------------------------------------------------------------------- */
422
423 static SLJIT_INLINE int call_with_args(struct sljit_compiler *compiler, int type)
424 {
425 sljit_ub *buf;
426
427 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
428 buf = (sljit_ub*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2);
429 FAIL_IF(!buf);
430 INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2);
431
432 if (type >= SLJIT_CALL3)
433 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG3]);
434 *buf++ = 0x8b;
435 *buf++ = 0xc0 | (reg_map[SLJIT_TEMPORARY_REG3] << 3) | reg_map[SLJIT_TEMPORARY_REG1];
436 #else
437 buf = (sljit_ub*)ensure_buf(compiler, type - SLJIT_CALL0 + 1);
438 FAIL_IF(!buf);
439 INC_SIZE(type - SLJIT_CALL0);
440 if (type >= SLJIT_CALL3)
441 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG3]);
442 if (type >= SLJIT_CALL2)
443 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG2]);
444 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG1]);
445 #endif
446 return SLJIT_SUCCESS;
447 }
448
449 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_fast_enter(struct sljit_compiler *compiler, int dst, sljit_w dstw)
450 {
451 sljit_ub *buf;
452
453 CHECK_ERROR();
454 check_sljit_emit_fast_enter(compiler, dst, dstw);
455 ADJUST_LOCAL_OFFSET(dst, dstw);
456
457 CHECK_EXTRA_REGS(dst, dstw, (void)0);
458
459 if (dst >= SLJIT_TEMPORARY_REG1 && dst <= SLJIT_NO_REGISTERS) {
460 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1);
461 FAIL_IF(!buf);
462
463 INC_SIZE(1);
464 POP_REG(reg_map[dst]);
465 return SLJIT_SUCCESS;
466 }
467 else if (dst & SLJIT_MEM) {
468 buf = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
469 FAIL_IF(!buf);
470 *buf++ = 0x8f;
471 return SLJIT_SUCCESS;
472 }
473
474 /* For UNUSED dst. Uncommon, but possible. */
475 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1);
476 FAIL_IF(!buf);
477
478 INC_SIZE(1);
479 POP_REG(reg_map[TMP_REGISTER]);
480 return SLJIT_SUCCESS;
481 }
482
483 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_fast_return(struct sljit_compiler *compiler, int src, sljit_w srcw)
484 {
485 sljit_ub *buf;
486
487 CHECK_ERROR();
488 check_sljit_emit_fast_return(compiler, src, srcw);
489 ADJUST_LOCAL_OFFSET(src, srcw);
490
491 CHECK_EXTRA_REGS(src, srcw, (void)0);
492
493 if (src >= SLJIT_TEMPORARY_REG1 && src <= SLJIT_NO_REGISTERS) {
494 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1);
495 FAIL_IF(!buf);
496
497 INC_SIZE(1 + 1);
498 PUSH_REG(reg_map[src]);
499 }
500 else if (src & SLJIT_MEM) {
501 buf = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
502 FAIL_IF(!buf);
503 *buf++ = 0xff;
504 *buf |= 6 << 3;
505
506 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1);
507 FAIL_IF(!buf);
508 INC_SIZE(1);
509 }
510 else {
511 /* SLJIT_IMM. */
512 buf = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1);
513 FAIL_IF(!buf);
514
515 INC_SIZE(5 + 1);
516 *buf++ = 0x68;
517 *(sljit_w*)buf = srcw;
518 buf += sizeof(sljit_w);
519 }
520
521 RET();
522 return SLJIT_SUCCESS;
523 }

webmaster@exim.org
ViewVC Help
Powered by ViewVC 1.1.12