aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
authorLuc Van Oostenryck <luc.vanoostenryck@gmail.com>2018-06-18 10:15:58 +0200
committerLuc Van Oostenryck <luc.vanoostenryck@gmail.com>2018-06-23 16:54:50 +0200
commit4d851248702bebe6c8ecdd1cef54e7782c72b8a2 (patch)
tree0e0fe54d70765a08e945df62b8861314d2ed7af3
parentc64d1972a5b775b9d7169dc8db96ee0556af7b26 (diff)
downloadsparse-dev-4d851248702bebe6c8ecdd1cef54e7782c72b8a2.tar.gz
cast: keep instruction sizes consistent
The last instruction of linearize_load_gen() ensure that loading a bitfield of size N results in a object of size N. Also, we require that the usual binops & unops use the same type on their operand and result. This means that before anything can be done on the loaded bitfield it must first be sign or zero- extended in order to match the other operand's size. The same situation exists when storing a bitfield but there the extension isn't done. We can thus have some weird code like: trunc.9 %r2 <- (32) %r1 shl.32 %r3 <- %r2, ... where a bitfield of size 9 is mixed with a 32 bit shift. Avoid such mixing of size and always zero extend the bitfield before storing it (since this was the implicitly desired semantic). The combination TRUNC + ZEXT can then be optimised later into a simple masking operation. Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
-rw-r--r--linearize.c12
-rw-r--r--validation/linear/bitfield-init-zero.c26
-rw-r--r--validation/linear/bitfield-size.c174
3 files changed, 200 insertions, 12 deletions
diff --git a/linearize.c b/linearize.c
index d1a079f4..83dfec11 100644
--- a/linearize.c
+++ b/linearize.c
@@ -26,6 +26,7 @@
static pseudo_t linearize_statement(struct entrypoint *ep, struct statement *stmt);
static pseudo_t linearize_expression(struct entrypoint *ep, struct expression *expr);
+static pseudo_t add_cast(struct entrypoint *ep, struct symbol *to, struct symbol *from, int op, pseudo_t src);
static pseudo_t add_binary_op(struct entrypoint *ep, struct symbol *ctype, int op, pseudo_t left, pseudo_t right);
static pseudo_t add_setval(struct entrypoint *ep, struct symbol *ctype, struct expression *val);
static pseudo_t linearize_one_symbol(struct entrypoint *ep, struct symbol *sym);
@@ -991,8 +992,9 @@ static pseudo_t linearize_store_gen(struct entrypoint *ep,
pseudo_t orig = add_load(ep, ad);
unsigned long long mask = (1ULL << size) - 1;
+ store = add_cast(ep, btype, ctype, OP_ZEXT, store);
if (shift) {
- store = add_binary_op(ep, btype, OP_SHL, value, value_pseudo(shift));
+ store = add_binary_op(ep, btype, OP_SHL, store, value_pseudo(shift));
mask <<= shift;
}
orig = add_binary_op(ep, btype, OP_AND, orig, value_pseudo(~mask));
@@ -1108,6 +1110,14 @@ static pseudo_t add_unop(struct entrypoint *ep, struct symbol *ctype, int op, ps
return new;
}
+static pseudo_t add_cast(struct entrypoint *ep, struct symbol *to,
+ struct symbol *from, int op, pseudo_t src)
+{
+ pseudo_t new = add_unop(ep, to, op, src);
+ new->def->orig_type = from;
+ return new;
+}
+
static pseudo_t linearize_slice(struct entrypoint *ep, struct expression *expr)
{
pseudo_t pre = linearize_expression(ep, expr->base);
diff --git a/validation/linear/bitfield-init-zero.c b/validation/linear/bitfield-init-zero.c
index 7a410a7c..16422c19 100644
--- a/validation/linear/bitfield-init-zero.c
+++ b/validation/linear/bitfield-init-zero.c
@@ -58,16 +58,18 @@ bfuu_init:
.L0:
<entry-point>
trunc.9 %r2 <- (32) %arg1
- shl.32 %r4 <- %r2, $11
- ret.32 %r4
+ zext.32 %r4 <- (9) %r2
+ shl.32 %r5 <- %r4, $11
+ ret.32 %r5
bfus_init:
.L2:
<entry-point>
- trunc.9 %r10 <- (32) %arg1
- shl.32 %r12 <- %r10, $11
- ret.32 %r12
+ trunc.9 %r11 <- (32) %arg1
+ zext.32 %r13 <- (9) %r11
+ shl.32 %r14 <- %r13, $11
+ ret.32 %r14
bfu_get0:
@@ -79,17 +81,19 @@ bfu_get0:
bfsu_init:
.L6:
<entry-point>
- trunc.9 %r23 <- (32) %arg1
- shl.32 %r25 <- %r23, $11
- ret.32 %r25
+ trunc.9 %r25 <- (32) %arg1
+ zext.32 %r27 <- (9) %r25
+ shl.32 %r28 <- %r27, $11
+ ret.32 %r28
bfss_init:
.L8:
<entry-point>
- trunc.9 %r31 <- (32) %arg1
- shl.32 %r33 <- %r31, $11
- ret.32 %r33
+ trunc.9 %r34 <- (32) %arg1
+ zext.32 %r36 <- (9) %r34
+ shl.32 %r37 <- %r36, $11
+ ret.32 %r37
bfs_get0:
diff --git a/validation/linear/bitfield-size.c b/validation/linear/bitfield-size.c
new file mode 100644
index 00000000..963f6e28
--- /dev/null
+++ b/validation/linear/bitfield-size.c
@@ -0,0 +1,174 @@
+struct u {
+ unsigned int f:3;
+};
+
+unsigned int upostinc(struct u *x)
+{
+ return x->f++;
+}
+
+unsigned int upreinc(struct u *x)
+{
+ return ++x->f;
+}
+
+void ucpy(struct u *d, const struct u *s)
+{
+ d->f = s->f;
+}
+
+
+struct s {
+ int f:3;
+};
+
+int spostinc(struct s *x)
+{
+ return x->f++;
+}
+
+int spreinc(struct s *x)
+{
+ return ++x->f;
+}
+
+void scpy(struct s *d, const struct s *s)
+{
+ d->f = s->f;
+}
+
+/*
+ * check-name: bitfield-size
+ * check-command: test-linearize -m64 -Wno-decl -fdump-ir $file
+ *
+ * check-output-start
+upostinc:
+.L0:
+ <entry-point>
+ store.64 %arg1 -> 0[x]
+ load.64 %r1 <- 0[x]
+ load.32 %r2 <- 0[%r1]
+ trunc.3 %r3 <- (32) %r2
+ add.3 %r4 <- %r3, $1
+ load.32 %r5 <- 0[%r1]
+ zext.32 %r6 <- (3) %r4
+ and.32 %r7 <- %r5, $-8
+ or.32 %r8 <- %r7, %r6
+ store.32 %r8 -> 0[%r1]
+ zext.32 %r9 <- (3) %r3
+ phisrc.32 %phi1(return) <- %r9
+ br .L1
+
+.L1:
+ phi.32 %r10 <- %phi1(return)
+ ret.32 %r10
+
+
+upreinc:
+.L2:
+ <entry-point>
+ store.64 %arg1 -> 0[x]
+ load.64 %r11 <- 0[x]
+ load.32 %r12 <- 0[%r11]
+ trunc.3 %r13 <- (32) %r12
+ add.3 %r14 <- %r13, $1
+ load.32 %r15 <- 0[%r11]
+ zext.32 %r16 <- (3) %r14
+ and.32 %r17 <- %r15, $-8
+ or.32 %r18 <- %r17, %r16
+ store.32 %r18 -> 0[%r11]
+ zext.32 %r19 <- (3) %r14
+ phisrc.32 %phi2(return) <- %r19
+ br .L3
+
+.L3:
+ phi.32 %r20 <- %phi2(return)
+ ret.32 %r20
+
+
+ucpy:
+.L4:
+ <entry-point>
+ store.64 %arg1 -> 0[d]
+ store.64 %arg2 -> 0[s]
+ load.64 %r21 <- 0[s]
+ load.32 %r22 <- 0[%r21]
+ trunc.3 %r23 <- (32) %r22
+ load.64 %r24 <- 0[d]
+ load.32 %r25 <- 0[%r24]
+ zext.32 %r26 <- (3) %r23
+ and.32 %r27 <- %r25, $-8
+ or.32 %r28 <- %r27, %r26
+ store.32 %r28 -> 0[%r24]
+ br .L5
+
+.L5:
+ ret
+
+
+spostinc:
+.L6:
+ <entry-point>
+ store.64 %arg1 -> 0[x]
+ load.64 %r29 <- 0[x]
+ load.32 %r30 <- 0[%r29]
+ trunc.3 %r31 <- (32) %r30
+ add.3 %r32 <- %r31, $1
+ load.32 %r33 <- 0[%r29]
+ zext.32 %r34 <- (3) %r32
+ and.32 %r35 <- %r33, $-8
+ or.32 %r36 <- %r35, %r34
+ store.32 %r36 -> 0[%r29]
+ zext.32 %r37 <- (3) %r31
+ phisrc.32 %phi3(return) <- %r37
+ br .L7
+
+.L7:
+ phi.32 %r38 <- %phi3(return)
+ ret.32 %r38
+
+
+spreinc:
+.L8:
+ <entry-point>
+ store.64 %arg1 -> 0[x]
+ load.64 %r39 <- 0[x]
+ load.32 %r40 <- 0[%r39]
+ trunc.3 %r41 <- (32) %r40
+ add.3 %r42 <- %r41, $1
+ load.32 %r43 <- 0[%r39]
+ zext.32 %r44 <- (3) %r42
+ and.32 %r45 <- %r43, $-8
+ or.32 %r46 <- %r45, %r44
+ store.32 %r46 -> 0[%r39]
+ zext.32 %r47 <- (3) %r42
+ phisrc.32 %phi4(return) <- %r47
+ br .L9
+
+.L9:
+ phi.32 %r48 <- %phi4(return)
+ ret.32 %r48
+
+
+scpy:
+.L10:
+ <entry-point>
+ store.64 %arg1 -> 0[d]
+ store.64 %arg2 -> 0[s]
+ load.64 %r49 <- 0[s]
+ load.32 %r50 <- 0[%r49]
+ trunc.3 %r51 <- (32) %r50
+ load.64 %r52 <- 0[d]
+ load.32 %r53 <- 0[%r52]
+ zext.32 %r54 <- (3) %r51
+ and.32 %r55 <- %r53, $-8
+ or.32 %r56 <- %r55, %r54
+ store.32 %r56 -> 0[%r52]
+ br .L11
+
+.L11:
+ ret
+
+
+ * check-output-end
+ */