diff options
| author | Luc Van Oostenryck <luc.vanoostenryck@gmail.com> | 2017-08-15 11:24:01 +0200 |
|---|---|---|
| committer | Luc Van Oostenryck <luc.vanoostenryck@gmail.com> | 2018-06-23 07:46:40 +0200 |
| commit | 01951278a60490735677920c86f139d627b22dbf (patch) | |
| tree | 44e9168eacaf2053a4b96bc52a4fb38b288e07a2 | |
| parent | bf21036518acf0c3069c9c9e12b13fd09c8cdceb (diff) | |
| download | sparse-dev-01951278a60490735677920c86f139d627b22dbf.tar.gz | |
cast: specialize integer casts
Casts to integer used to be done with only 2 instructions:
OP_CAST & OP_SCAST.
Those are not very convenient as they don't reflect the real
operations that need to be done.
This patch specialize these instructions in:
- OP_TRUNC, for casts to a smaller type
- OP_ZEXT, for casts that need a zero extension
- OP_SEXT, for casts that need a sign extension
- Integer-to-integer casts of the same size are considered as
a NOPs and are, in fact, never emitted.
Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
| -rw-r--r-- | Documentation/IR.rst | 11 | ||||
| -rw-r--r-- | cse.c | 8 | ||||
| -rw-r--r-- | example.c | 9 | ||||
| -rw-r--r-- | linearize.c | 27 | ||||
| -rw-r--r-- | linearize.h | 4 | ||||
| -rw-r--r-- | simplify.c | 24 | ||||
| -rw-r--r-- | sparse-llvm.c | 11 | ||||
| -rw-r--r-- | sparse.c | 5 | ||||
| -rw-r--r-- | validation/bitfield-size.c | 5 | ||||
| -rw-r--r-- | validation/builtin-bswap-variable.c | 4 | ||||
| -rw-r--r-- | validation/cast-kinds-check.c | 5 | ||||
| -rw-r--r-- | validation/cast-weirds.c | 8 | ||||
| -rw-r--r-- | validation/compound-assign-type.c | 3 | ||||
| -rw-r--r-- | validation/expand/builtin-expect.c | 8 | ||||
| -rw-r--r-- | validation/linear/bitfield-init-zero.c | 8 | ||||
| -rw-r--r-- | validation/linear/bool-cast.c | 2 | ||||
| -rw-r--r-- | validation/linear/call-basic.c | 2 | ||||
| -rw-r--r-- | validation/linear/cast-kinds.c | 179 | ||||
| -rw-r--r-- | validation/linear/cast-volatile.c | 5 | ||||
| -rw-r--r-- | validation/optim/bool-simplify.c | 4 | ||||
| -rw-r--r-- | validation/optim/canonical-cmp.c | 72 | ||||
| -rw-r--r-- | validation/optim/kill-casts.c | 2 |
22 files changed, 208 insertions, 198 deletions
diff --git a/Documentation/IR.rst b/Documentation/IR.rst index 1cce2de1..ff0ccdf2 100644 --- a/Documentation/IR.rst +++ b/Documentation/IR.rst @@ -267,11 +267,14 @@ They all have the following signature: Currently, a cast to a void pointer is treated like a cast to an unsigned integer of the same size. -.. op:: OP_CAST - Cast to unsigned integer. +.. op:: OP_TRUNC + Cast from integer to an integer of a smaller size. -.. op:: OP_SCAST - Cast to signed integer. +.. op:: OP_SEXT + Cast from integer to an integer of a bigger size with sign extension. + +.. op:: OP_ZEXT + Cast from integer to an integer of a bigger size with zero extension. .. op:: OP_UTPTR Cast from pointer-sized unsigned integer to pointer type. @@ -91,8 +91,8 @@ void cse_collect(struct instruction *insn) hash += hashval(insn->symbol); break; - case OP_CAST: - case OP_SCAST: + case OP_SEXT: case OP_ZEXT: + case OP_TRUNC: case OP_PTRCAST: case OP_UTPTR: case OP_PTRTU: /* @@ -239,8 +239,8 @@ static int insn_compare(const void *_i1, const void *_i2) case OP_PHI: return phi_list_compare(i1->phi_list, i2->phi_list); - case OP_CAST: - case OP_SCAST: + case OP_SEXT: case OP_ZEXT: + case OP_TRUNC: case OP_PTRCAST: case OP_UTPTR: case OP_PTRTU: /* @@ -74,8 +74,9 @@ static const char *opcodes[] = { [OP_PHI] = "phi", [OP_PHISOURCE] = "phisrc", [OP_COPY] = "copy", - [OP_CAST] = "cast", - [OP_SCAST] = "scast", + [OP_SEXT] = "sext", + [OP_ZEXT] = "zext", + [OP_TRUNC] = "trunc", [OP_FCVTU] = "fcvtu", [OP_FCVTS] = "fcvts", [OP_UCVTF] = "ucvtf", @@ -1415,7 +1416,9 @@ static void generate_one_insn(struct instruction *insn, struct bb_state *state) generate_compare(state, insn); break; - case OP_CAST: case OP_SCAST: case OP_PTRCAST: + case OP_SEXT: case OP_ZEXT: + case OP_TRUNC: + case OP_PTRCAST: case OP_UTPTR: case OP_PTRTU: case OP_FCVTU: case OP_FCVTS: diff --git a/linearize.c b/linearize.c index 80a67ed0..58838a0b 100644 --- a/linearize.c +++ b/linearize.c @@ -261,8 +261,9 @@ static const char *opcodes[] = { /* Other */ [OP_PHI] = "phi", [OP_PHISOURCE] = "phisrc", - [OP_CAST] = "cast", - [OP_SCAST] = "scast", + [OP_SEXT] = "sext", + [OP_ZEXT] = "zext", + [OP_TRUNC] = "trunc", [OP_FCVTU] = "fcvtu", [OP_FCVTS] = "fcvts", [OP_UCVTF] = "ucvtf", @@ -449,8 +450,8 @@ const char *show_instruction(struct instruction *insn) } END_FOR_EACH_PTR(arg); break; } - case OP_CAST: - case OP_SCAST: + case OP_SEXT: case OP_ZEXT: + case OP_TRUNC: case OP_FCVTU: case OP_FCVTS: case OP_UCVTF: case OP_SCVTF: case OP_FCVTF: @@ -1240,13 +1241,14 @@ static int get_cast_opcode(struct symbol *dst, struct symbol *src) case MTYPE_PTR: case MTYPE_VPTR: case MTYPE_UINT: - return OP_CAST; + stype = MTYPE_UINT; + /* fall through */ case MTYPE_SINT: - return OP_SCAST; + break; default: return OP_BADOP; - break; } + /* fall through */ case MTYPE_UINT: case MTYPE_SINT: switch (stype) { @@ -1256,15 +1258,18 @@ static int get_cast_opcode(struct symbol *dst, struct symbol *src) return OP_PTRTU; case MTYPE_VPTR: case MTYPE_UINT: - return OP_CAST; case MTYPE_SINT: - return OP_SCAST; + if (dst->bit_size ==src->bit_size) + return OP_NOP; + if (dst->bit_size < src->bit_size) + return OP_TRUNC; + return stype == MTYPE_SINT ? OP_SEXT : OP_ZEXT; default: - break; + return OP_BADOP; } /* fall through */ default: - return OP_CAST; + return OP_BADOP; } } diff --git a/linearize.h b/linearize.h index d660d332..242cefb8 100644 --- a/linearize.h +++ b/linearize.h @@ -221,8 +221,8 @@ enum opcode { OP_FNEG, /* Casts */ - OP_CAST, - OP_SCAST, + OP_TRUNC, + OP_ZEXT, OP_SEXT, OP_FCVTU, OP_FCVTS, OP_UCVTF, OP_SCVTF, OP_FCVTF, @@ -396,7 +396,7 @@ static unsigned int operand_size(struct instruction *insn, pseudo_t pseudo) if (pseudo->type == PSEUDO_REG) { struct instruction *src = pseudo->def; - if (src && src->opcode == OP_CAST && src->orig_type) { + if (src && src->opcode == OP_ZEXT && src->orig_type) { unsigned int orig_size = src->orig_type->bit_size; if (orig_size < size) size = orig_size; @@ -988,11 +988,14 @@ static int simplify_cast(struct instruction *insn) } if (size == orig_size) { - int op = (orig_type->ctype.modifiers & MOD_SIGNED) ? OP_SCAST : OP_CAST; - if (insn->opcode == op) - goto simplify; - if (insn->opcode == OP_FCVTF) + switch (insn->opcode) { + //case OP_NOPCAST: // FIXME: what to do? + //case OP_PTRCAST: // FIXME: what to do? + case OP_FCVTF: goto simplify; + default: + break; + } } return 0; @@ -1142,11 +1145,8 @@ static int simplify_branch(struct instruction *insn) return replace_pseudo(insn, &insn->cond, def->src1); } } - if (def->opcode == OP_CAST || def->opcode == OP_SCAST) { - int orig_size = def->orig_type ? def->orig_type->bit_size : 0; - if (def->size > orig_size) - return replace_pseudo(insn, &insn->cond, def->src); - } + if (def->opcode == OP_SEXT || def->opcode == OP_ZEXT) + return replace_pseudo(insn, &insn->cond, def->src); } return 0; } @@ -1218,8 +1218,8 @@ int simplify_instruction(struct instruction *insn) if (dead_insn(insn, &insn->symbol, NULL, NULL)) return REPEAT_CSE | REPEAT_SYMBOL_CLEANUP; return replace_with_pseudo(insn, insn->symbol); - case OP_CAST: - case OP_SCAST: + case OP_SEXT: case OP_ZEXT: + case OP_TRUNC: case OP_FCVTU: case OP_FCVTS: case OP_UCVTF: case OP_SCVTF: case OP_FCVTF: diff --git a/sparse-llvm.c b/sparse-llvm.c index 69b77a37..937f4490 100644 --- a/sparse-llvm.c +++ b/sparse-llvm.c @@ -887,7 +887,7 @@ static void output_op_ptrcast(struct function *fn, struct instruction *insn) dtype = symbol_type(insn->type); switch (insn->opcode) { case OP_UTPTR: - case OP_SCAST: // FIXME + case OP_SEXT: // FIXME assert(is_int_type(otype)); assert(is_ptr_type(insn->type)); op = LLVMIntToPtr; @@ -898,7 +898,7 @@ static void output_op_ptrcast(struct function *fn, struct instruction *insn) op = LLVMPtrToInt; break; case OP_PTRCAST: - case OP_CAST: // FIXME + case OP_ZEXT: // FIXME assert(is_ptr_type(otype)); assert(is_ptr_type(insn->type)); op = LLVMBitCast; @@ -1041,12 +1041,15 @@ static void output_insn(struct function *fn, struct instruction *insn) case OP_CALL: output_op_call(fn, insn); break; - case OP_CAST: + case OP_ZEXT: output_op_cast(fn, insn, LLVMZExt); break; - case OP_SCAST: + case OP_SEXT: output_op_cast(fn, insn, LLVMSExt); break; + case OP_TRUNC: + output_op_cast(fn, insn, LLVMTrunc); + break; case OP_FCVTU: output_op_cast(fn, insn, LLVMFPToUI); break; @@ -122,7 +122,7 @@ static void check_cast_instruction(struct instruction *insn) int old = orig_type->bit_size; int new = insn->size; int oldsigned = (orig_type->ctype.modifiers & MOD_SIGNED) != 0; - int newsigned = insn->opcode == OP_SCAST; + int newsigned = insn->opcode == OP_SEXT; if (new > old) { if (oldsigned == newsigned) @@ -216,7 +216,8 @@ static void check_call_instruction(struct instruction *insn) static void check_one_instruction(struct instruction *insn) { switch (insn->opcode) { - case OP_CAST: case OP_SCAST: + case OP_SEXT: case OP_ZEXT: + case OP_TRUNC: if (verbose) check_cast_instruction(insn); break; diff --git a/validation/bitfield-size.c b/validation/bitfield-size.c index b027cbd5..a39c3966 100644 --- a/validation/bitfield-size.c +++ b/validation/bitfield-size.c @@ -35,7 +35,8 @@ unsigned int get_pbfi_b(struct bfi *bf) { return bf->b; } * check-command: test-linearize -Wno-decl $file * check-output-ignore * - * check-output-pattern(24): cast\\. - * check-output-pattern(12): cast\\.4 + * check-output-pattern(8): zext\\. + * check-output-pattern(4): sext\\. + * check-output-pattern(12): trunc\\.4 * check-output-pattern(6): lsr\\..*\\$6 */ diff --git a/validation/builtin-bswap-variable.c b/validation/builtin-bswap-variable.c index 738ba2a4..40ad6413 100644 --- a/validation/builtin-bswap-variable.c +++ b/validation/builtin-bswap-variable.c @@ -25,8 +25,8 @@ static u64 swap64v(u32 a) * * check-output-ignore * check-output-contains:call.16 .* __builtin_bswap16 - * check-output-contains:cast.32 .* (64) %arg1 + * check-output-contains:trunc.32 .* (64) %arg1 * check-output-contains:call.32 .* __builtin_bswap32 - * check-output-contains:cast.64 .* (32) %arg1 + * check-output-contains:zext.64 .* (32) %arg1 * check-output-contains:call.64 .* __builtin_bswap64 */ diff --git a/validation/cast-kinds-check.c b/validation/cast-kinds-check.c index 48b1306d..0fe705f5 100644 --- a/validation/cast-kinds-check.c +++ b/validation/cast-kinds-check.c @@ -13,11 +13,6 @@ linear/cast-kinds.c:12:48: warning: cast drops bits linear/cast-kinds.c:13:50: warning: cast drops bits linear/cast-kinds.c:14:49: warning: cast drops bits linear/cast-kinds.c:15:48: warning: cast drops bits -linear/cast-kinds.c:21:49: warning: cast wasn't removed -linear/cast-kinds.c:28:52: warning: cast wasn't removed -linear/cast-kinds.c:34:52: warning: cast wasn't removed -linear/cast-kinds.c:35:54: warning: cast wasn't removed -linear/cast-kinds.c:36:52: warning: cast wasn't removed linear/cast-kinds.c:37:42: warning: non size-preserving integer to pointer cast linear/cast-kinds.c:38:44: warning: non size-preserving integer to pointer cast * check-error-end diff --git a/validation/cast-weirds.c b/validation/cast-weirds.c index 71e52ff5..a99c65d2 100644 --- a/validation/cast-weirds.c +++ b/validation/cast-weirds.c @@ -20,7 +20,7 @@ cast-weirds.c:5:44: warning: non size-preserving integer to pointer cast int_2_iptr: .L0: <entry-point> - scast.64 %r2 <- (32) %arg1 + sext.64 %r2 <- (32) %arg1 utptr.64 %r3 <- (64) %r2 ret.64 %r3 @@ -28,7 +28,7 @@ int_2_iptr: uint_2_iptr: .L2: <entry-point> - cast.64 %r6 <- (32) %arg1 + zext.64 %r6 <- (32) %arg1 utptr.64 %r7 <- (64) %r6 ret.64 %r7 @@ -36,14 +36,14 @@ uint_2_iptr: int_2_vptr: .L4: <entry-point> - scast.64 %r10 <- (32) %arg1 + sext.64 %r10 <- (32) %arg1 ret.64 %r10 uint_2_vptr: .L6: <entry-point> - cast.64 %r13 <- (32) %arg1 + zext.64 %r13 <- (32) %arg1 ret.64 %r13 diff --git a/validation/compound-assign-type.c b/validation/compound-assign-type.c index 450fa26d..e13dcfcd 100644 --- a/validation/compound-assign-type.c +++ b/validation/compound-assign-type.c @@ -11,5 +11,6 @@ static unsigned int foo(unsigned int x, long a) * * check-output-excludes: divu\\.32 * check-output-contains: divs\\.64 - * check-output-contains: scast\\.32 + * check-output-contains: zext.64 .* (32) %arg1 + * check-output-contains: trunc.32 .* (64) */ diff --git a/validation/expand/builtin-expect.c b/validation/expand/builtin-expect.c index a80176a5..77f714ef 100644 --- a/validation/expand/builtin-expect.c +++ b/validation/expand/builtin-expect.c @@ -41,7 +41,7 @@ void *fptr(void *a) flia: .L0: <entry-point> - scast.32 %r2 <- (64) %arg1 + trunc.32 %r2 <- (64) %arg1 ret.32 %r2 @@ -54,7 +54,7 @@ flic: fila: .L4: <entry-point> - scast.64 %r6 <- (32) %arg1 + sext.64 %r6 <- (32) %arg1 ret.64 %r6 @@ -79,9 +79,7 @@ fils: fptr: .L12: <entry-point> - cast.64 %r12 <- (64) %arg1 - scast.64 %r13 <- (64) %r12 - ret.64 %r13 + ret.64 %arg1 * check-output-end diff --git a/validation/linear/bitfield-init-zero.c b/validation/linear/bitfield-init-zero.c index 39a64345..7a410a7c 100644 --- a/validation/linear/bitfield-init-zero.c +++ b/validation/linear/bitfield-init-zero.c @@ -57,7 +57,7 @@ int bfs_get0(void) bfuu_init: .L0: <entry-point> - cast.9 %r2 <- (32) %arg1 + trunc.9 %r2 <- (32) %arg1 shl.32 %r4 <- %r2, $11 ret.32 %r4 @@ -65,7 +65,7 @@ bfuu_init: bfus_init: .L2: <entry-point> - scast.9 %r10 <- (32) %arg1 + trunc.9 %r10 <- (32) %arg1 shl.32 %r12 <- %r10, $11 ret.32 %r12 @@ -79,7 +79,7 @@ bfu_get0: bfsu_init: .L6: <entry-point> - cast.9 %r23 <- (32) %arg1 + trunc.9 %r23 <- (32) %arg1 shl.32 %r25 <- %r23, $11 ret.32 %r25 @@ -87,7 +87,7 @@ bfsu_init: bfss_init: .L8: <entry-point> - scast.9 %r31 <- (32) %arg1 + trunc.9 %r31 <- (32) %arg1 shl.32 %r33 <- %r31, $11 ret.32 %r33 diff --git a/validation/linear/bool-cast.c b/validation/linear/bool-cast.c index 43276a46..0438a94f 100644 --- a/validation/linear/bool-cast.c +++ b/validation/linear/bool-cast.c @@ -29,6 +29,8 @@ static _Bool fdbl_e(dbl a) { return (_Bool)a; } * check-output-excludes: cast\\. * check-output-excludes: fcvt[us]\\. * check-output-excludes: ptrtu\\. + * check-output-excludes: [sz]ext\\. + * check-output-excludes: trunc\\. * check-output-pattern(12): setne\\. * check-output-pattern(2): fcmpune\\. */ diff --git a/validation/linear/call-basic.c b/validation/linear/call-basic.c index 46eab108..3822a267 100644 --- a/validation/linear/call-basic.c +++ b/validation/linear/call-basic.c @@ -49,7 +49,7 @@ pointer1: builtin: .L6: <entry-point> - call.32 %r11 <- __builtin_popcount, %arg1 + call.32 %r10 <- __builtin_popcount, %arg1 ret diff --git a/validation/linear/cast-kinds.c b/validation/linear/cast-kinds.c index 8e3e21a7..5df307bc 100644 --- a/validation/linear/cast-kinds.c +++ b/validation/linear/cast-kinds.c @@ -67,44 +67,44 @@ uint_2_int: long_2_int: .L2: <entry-point> - scast.32 %r5 <- (64) %arg1 - ret.32 %r5 + trunc.32 %r4 <- (64) %arg1 + ret.32 %r4 ulong_2_int: .L4: <entry-point> - cast.32 %r8 <- (64) %arg1 - ret.32 %r8 + trunc.32 %r7 <- (64) %arg1 + ret.32 %r7 vptr_2_int: .L6: <entry-point> - cast.32 %r11 <- (64) %arg1 - ret.32 %r11 + trunc.32 %r10 <- (64) %arg1 + ret.32 %r10 iptr_2_int: .L8: <entry-point> - ptrtu.64 %r14 <- (64) %arg1 - cast.32 %r15 <- (64) %r14 - ret.32 %r15 + ptrtu.64 %r13 <- (64) %arg1 + trunc.32 %r14 <- (64) %r13 + ret.32 %r14 float_2_int: .L10: <entry-point> - fcvts.32 %r18 <- (32) %arg1 - ret.32 %r18 + fcvts.32 %r17 <- (32) %arg1 + ret.32 %r17 double_2_int: .L12: <entry-point> - fcvts.32 %r21 <- (64) %arg1 - ret.32 %r21 + fcvts.32 %r20 <- (64) %arg1 + ret.32 %r20 int_2_uint: @@ -116,58 +116,58 @@ int_2_uint: long_2_uint: .L16: <entry-point> - scast.32 %r27 <- (64) %arg1 - ret.32 %r27 + trunc.32 %r25 <- (64) %arg1 + ret.32 %r25 ulong_2_uint: .L18: <entry-point> - cast.32 %r30 <- (64) %arg1 - ret.32 %r30 + trunc.32 %r28 <- (64) %arg1 + ret.32 %r28 vptr_2_uint: .L20: <entry-point> - cast.32 %r33 <- (64) %arg1 - ret.32 %r33 + trunc.32 %r31 <- (64) %arg1 + ret.32 %r31 iptr_2_uint: .L22: <entry-point> - ptrtu.64 %r36 <- (64) %arg1 - cast.32 %r37 <- (64) %r36 - ret.32 %r37 + ptrtu.64 %r34 <- (64) %arg1 + trunc.32 %r35 <- (64) %r34 + ret.32 %r35 float_2_uint: .L24: <entry-point> - fcvtu.32 %r40 <- (32) %arg1 - ret.32 %r40 + fcvtu.32 %r38 <- (32) %arg1 + ret.32 %r38 double_2_uint: .L26: <entry-point> - fcvtu.32 %r43 <- (64) %arg1 - ret.32 %r43 + fcvtu.32 %r41 <- (64) %arg1 + ret.32 %r41 int_2_long: .L28: <entry-point> - scast.64 %r46 <- (32) %arg1 - ret.64 %r46 + sext.64 %r44 <- (32) %arg1 + ret.64 %r44 uint_2_long: .L30: <entry-point> - cast.64 %r49 <- (32) %arg1 - ret.64 %r49 + zext.64 %r47 <- (32) %arg1 + ret.64 %r47 ulong_2_long: @@ -179,43 +179,42 @@ ulong_2_long: vptr_2_long: .L34: <entry-point> - cast.64 %r55 <- (64) %arg1 - ret.64 %r55 + ret.64 %arg1 iptr_2_long: .L36: <entry-point> - ptrtu.64 %r58 <- (64) %arg1 - ret.64 %r58 + ptrtu.64 %r54 <- (64) %arg1 + ret.64 %r54 float_2_long: .L38: <entry-point> - fcvts.64 %r61 <- (32) %arg1 - ret.64 %r61 + fcvts.64 %r57 <- (32) %arg1 + ret.64 %r57 double_2_long: .L40: <entry-point> - fcvts.64 %r64 <- (64) %arg1 - ret.64 %r64 + fcvts.64 %r60 <- (64) %arg1 + ret.64 %r60 int_2_ulong: .L42: <entry-point> - scast.64 %r67 <- (32) %arg1 - ret.64 %r67 + sext.64 %r63 <- (32) %arg1 + ret.64 %r63 uint_2_ulong: .L44: <entry-point> - cast.64 %r70 <- (32) %arg1 - ret.64 %r70 + zext.64 %r66 <- (32) %arg1 + ret.64 %r66 long_2_ulong: @@ -227,171 +226,167 @@ long_2_ulong: vptr_2_ulong: .L48: <entry-point> - cast.64 %r76 <- (64) %arg1 - ret.64 %r76 + ret.64 %arg1 iptr_2_ulong: .L50: <entry-point> - ptrtu.64 %r79 <- (64) %arg1 - ret.64 %r79 + ptrtu.64 %r73 <- (64) %arg1 + ret.64 %r73 float_2_ulong: .L52: <entry-point> - fcvtu.64 %r82 <- (32) %arg1 - ret.64 %r82 + fcvtu.64 %r76 <- (32) %arg1 + ret.64 %r76 double_2_ulong: .L54: <entry-point> - fcvtu.64 %r85 <- (64) %arg1 - ret.64 %r85 + fcvtu.64 %r79 <- (64) %arg1 + ret.64 %r79 int_2_vptr: .L56: <entry-point> - scast.64 %r88 <- (32) %arg1 - ret.64 %r88 + sext.64 %r82 <- (32) %arg1 + ret.64 %r82 uint_2_vptr: .L58: <entry-point> - cast.64 %r91 <- (32) %arg1 - ret.64 %r91 + zext.64 %r85 <- (32) %arg1 + ret.64 %r85 long_2_vptr: .L60: <entry-point> - scast.64 %r94 <- (64) %arg1 - ret.64 %r94 + ret.64 %arg1 ulong_2_vptr: .L62: <entry-point> - cast.64 %r97 <- (64) %arg1 - ret.64 %r97 + ret.64 %arg1 iptr_2_vptr: .L64: <entry-point> - cast.64 %r100 <- (64) %arg1 - ret.64 %r100 + ret.64 %arg1 int_2_iptr: .L66: <entry-point> - scast.64 %r103 <- (32) %arg1 - utptr.64 %r104 <- (64) %r103 - ret.64 %r104 + sext.64 %r94 <- (32) %arg1 + utptr.64 %r95 <- (64) %r94 + ret.64 %r95 uint_2_iptr: .L68: <entry-point> - cast.64 %r107 <- (32) %arg1 - utptr.64 %r108 <- (64) %r107 - ret.64 %r108 + zext.64 %r98 <- (32) %arg1 + utptr.64 %r99 <- (64) %r98 + ret.64 %r99 long_2_iptr: .L70: <entry-point> - utptr.64 %r111 <- (64) %arg1 - ret.64 %r111 + utptr.64 %r102 <- (64) %arg1 + ret.64 %r102 ulong_2_iptr: .L72: <entry-point> - utptr.64 %r114 <- (64) %arg1 - ret.64 %r114 + utptr.64 %r105 <- (64) %arg1 + ret.64 %r105 vptr_2_iptr: .L74: <entry-point> - ptrcast.64 %r117 <- (64) %arg1 - ret.64 %r117 + ptrcast.64 %r108 <- (64) %arg1 + ret.64 %r108 int_2_float: .L76: <entry-point> - scvtf.32 %r120 <- (32) %arg1 - ret.32 %r120 + scvtf.32 %r111 <- (32) %arg1 + ret.32 %r111 uint_2_float: .L78: <entry-point> - ucvtf.32 %r123 <- (32) %arg1 - ret.32 %r123 + ucvtf.32 %r114 <- (32) %arg1 + ret.32 %r114 long_2_float: .L80: <entry-point> - scvtf.32 %r126 <- (64) %arg1 - ret.32 %r126 + scvtf.32 %r117 <- (64) %arg1 + ret.32 %r117 ulong_2_float: .L82: <entry-point> - ucvtf.32 %r129 <- (64) %arg1 - ret.32 %r129 + ucvtf.32 %r120 <- (64) %arg1 + ret.32 %r120 double_2_float: .L84: <entry-point> - fcvtf.32 %r132 <- (64) %arg1 - ret.32 %r132 + fcvtf.32 %r123 <- (64) %arg1 + ret.32 %r123 int_2_double: .L86: <entry-point> - scvtf.64 %r135 <- (32) %arg1 - ret.64 %r135 + scvtf.64 %r126 <- (32) %arg1 + ret.64 %r126 uint_2_double: .L88: <entry-point> - ucvtf.64 %r138 <- (32) %arg1 - ret.64 %r138 + ucvtf.64 %r129 <- (32) %arg1 + ret.64 %r129 long_2_double: .L90: <entry-point> - scvtf.64 %r141 <- (64) %arg1 - ret.64 %r141 + scvtf.64 %r132 <- (64) %arg1 + ret.64 %r132 ulong_2_double: .L92: <entry-point> - ucvtf.64 %r144 <- (64) %arg1 - ret.64 %r144 + ucvtf.64 %r135 <- (64) %arg1 + ret.64 %r135 float_2_double: .L94: <entry-point> - fcvtf.64 %r147 <- (32) %arg1 - ret.64 %r147 + fcvtf.64 %r138 <- (32) %arg1 + ret.64 %r138 float_2_float: diff --git a/validation/linear/cast-volatile.c b/validation/linear/cast-volatile.c index f8a64937..6d12c015 100644 --- a/validation/linear/cast-volatile.c +++ b/validation/linear/cast-volatile.c @@ -8,7 +8,8 @@ static int foo(volatile int *a, int v) * check-name: cast-volatile * check-command: test-linearize -fdump-ir=linearize $file * - * check-known-to-fail * check-output-ignore - * check-output-excludes: scast\\. + * check-output-excludes: sext\\. + * check-output-excludes: zext\\. + * check-output-excludes: trunc\\. */ diff --git a/validation/optim/bool-simplify.c b/validation/optim/bool-simplify.c index 05be1149..5b3cf449 100644 --- a/validation/optim/bool-simplify.c +++ b/validation/optim/bool-simplify.c @@ -33,7 +33,7 @@ and_1: .L2: <entry-point> setne.1 %r8 <- %arg1, $0 - cast.32 %r11 <- (1) %r8 + zext.32 %r11 <- (1) %r8 ret.32 %r11 @@ -41,7 +41,7 @@ or_0: .L4: <entry-point> setne.1 %r14 <- %arg1, $0 - cast.32 %r17 <- (1) %r14 + zext.32 %r17 <- (1) %r14 ret.32 %r17 diff --git a/validation/optim/canonical-cmp.c b/validation/optim/canonical-cmp.c index 0fc531e9..e0ca7db3 100644 --- a/validation/optim/canonical-cmp.c +++ b/validation/optim/canonical-cmp.c @@ -27,97 +27,97 @@ uint uat(uint p, uint a) { return (123 < p) ? a : 0; } seq: .L0: <entry-point> - seteq.32 %r4 <- %arg1, $123 - select.32 %r5 <- %r4, %arg2, $0 - ret.32 %r5 + seteq.32 %r3 <- %arg1, $123 + select.32 %r4 <- %r3, %arg2, $0 + ret.32 %r4 sne: .L2: <entry-point> - setne.32 %r11 <- %arg1, $123 - select.32 %r12 <- %r11, %arg2, $0 - ret.32 %r12 + setne.32 %r8 <- %arg1, $123 + select.32 %r9 <- %r8, %arg2, $0 + ret.32 %r9 slt: .L4: <entry-point> - setlt.32 %r18 <- %arg1, $123 - select.32 %r19 <- %r18, %arg2, $0 - ret.32 %r19 + setlt.32 %r13 <- %arg1, $123 + select.32 %r14 <- %r13, %arg2, $0 + ret.32 %r14 sle: .L6: <entry-point> - setle.32 %r25 <- %arg1, $123 - select.32 %r26 <- %r25, %arg2, $0 - ret.32 %r26 + setle.32 %r18 <- %arg1, $123 + select.32 %r19 <- %r18, %arg2, $0 + ret.32 %r19 sge: .L8: <entry-point> - setge.32 %r32 <- %arg1, $123 - select.32 %r33 <- %r32, %arg2, $0 - ret.32 %r33 + setge.32 %r23 <- %arg1, $123 + select.32 %r24 <- %r23, %arg2, $0 + ret.32 %r24 sgt: .L10: <entry-point> - setgt.32 %r39 <- %arg1, $123 - select.32 %r40 <- %r39, %arg2, $0 - ret.32 %r40 + setgt.32 %r28 <- %arg1, $123 + select.32 %r29 <- %r28, %arg2, $0 + ret.32 %r29 ueq: .L12: <entry-point> - seteq.32 %r45 <- %arg1, $123 - select.32 %r46 <- %r45, %arg2, $0 - ret.32 %r46 + seteq.32 %r33 <- %arg1, $123 + select.32 %r34 <- %r33, %arg2, $0 + ret.32 %r34 une: .L14: <entry-point> - setne.32 %r50 <- %arg1, $123 - select.32 %r51 <- %r50, %arg2, $0 - ret.32 %r51 + setne.32 %r38 <- %arg1, $123 + select.32 %r39 <- %r38, %arg2, $0 + ret.32 %r39 ubt: .L16: <entry-point> - setb.32 %r55 <- %arg1, $123 - select.32 %r56 <- %r55, %arg2, $0 - ret.32 %r56 + setb.32 %r43 <- %arg1, $123 + select.32 %r44 <- %r43, %arg2, $0 + ret.32 %r44 ube: .L18: <entry-point> - setbe.32 %r60 <- %arg1, $123 - select.32 %r61 <- %r60, %arg2, $0 - ret.32 %r61 + setbe.32 %r48 <- %arg1, $123 + select.32 %r49 <- %r48, %arg2, $0 + ret.32 %r49 uae: .L20: <entry-point> - setae.32 %r65 <- %arg1, $123 - select.32 %r66 <- %r65, %arg2, $0 - ret.32 %r66 + setae.32 %r53 <- %arg1, $123 + select.32 %r54 <- %r53, %arg2, $0 + ret.32 %r54 uat: .L22: <entry-point> - seta.32 %r70 <- %arg1, $123 - select.32 %r71 <- %r70, %arg2, $0 - ret.32 %r71 + seta.32 %r58 <- %arg1, $123 + select.32 %r59 <- %r58, %arg2, $0 + ret.32 %r59 * check-output-end diff --git a/validation/optim/kill-casts.c b/validation/optim/kill-casts.c index 140b8d20..2aa53fda 100644 --- a/validation/optim/kill-casts.c +++ b/validation/optim/kill-casts.c @@ -22,4 +22,6 @@ void foo(struct s *x) * check-output-excludes: fcvt[us]\\. * check-output-excludes: utptr\\. * check-output-excludes: ptrtu\\. + * check-output-excludes: [sz]ext\\. + * check-output-excludes: trunc\\. */ |
