aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/linearize.c
diff options
authorLuc Van Oostenryck <luc.vanoostenryck@gmail.com>2018-06-18 10:15:58 +0200
committerLuc Van Oostenryck <luc.vanoostenryck@gmail.com>2018-06-23 16:54:50 +0200
commit4d851248702bebe6c8ecdd1cef54e7782c72b8a2 (patch)
tree0e0fe54d70765a08e945df62b8861314d2ed7af3 /linearize.c
parentc64d1972a5b775b9d7169dc8db96ee0556af7b26 (diff)
downloadsparse-dev-4d851248702bebe6c8ecdd1cef54e7782c72b8a2.tar.gz
cast: keep instruction sizes consistent
The last instruction of linearize_load_gen() ensure that loading a bitfield of size N results in a object of size N. Also, we require that the usual binops & unops use the same type on their operand and result. This means that before anything can be done on the loaded bitfield it must first be sign or zero- extended in order to match the other operand's size. The same situation exists when storing a bitfield but there the extension isn't done. We can thus have some weird code like: trunc.9 %r2 <- (32) %r1 shl.32 %r3 <- %r2, ... where a bitfield of size 9 is mixed with a 32 bit shift. Avoid such mixing of size and always zero extend the bitfield before storing it (since this was the implicitly desired semantic). The combination TRUNC + ZEXT can then be optimised later into a simple masking operation. Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Diffstat (limited to 'linearize.c')
-rw-r--r--linearize.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/linearize.c b/linearize.c
index d1a079f4..83dfec11 100644
--- a/linearize.c
+++ b/linearize.c
@@ -26,6 +26,7 @@
static pseudo_t linearize_statement(struct entrypoint *ep, struct statement *stmt);
static pseudo_t linearize_expression(struct entrypoint *ep, struct expression *expr);
+static pseudo_t add_cast(struct entrypoint *ep, struct symbol *to, struct symbol *from, int op, pseudo_t src);
static pseudo_t add_binary_op(struct entrypoint *ep, struct symbol *ctype, int op, pseudo_t left, pseudo_t right);
static pseudo_t add_setval(struct entrypoint *ep, struct symbol *ctype, struct expression *val);
static pseudo_t linearize_one_symbol(struct entrypoint *ep, struct symbol *sym);
@@ -991,8 +992,9 @@ static pseudo_t linearize_store_gen(struct entrypoint *ep,
pseudo_t orig = add_load(ep, ad);
unsigned long long mask = (1ULL << size) - 1;
+ store = add_cast(ep, btype, ctype, OP_ZEXT, store);
if (shift) {
- store = add_binary_op(ep, btype, OP_SHL, value, value_pseudo(shift));
+ store = add_binary_op(ep, btype, OP_SHL, store, value_pseudo(shift));
mask <<= shift;
}
orig = add_binary_op(ep, btype, OP_AND, orig, value_pseudo(~mask));
@@ -1108,6 +1110,14 @@ static pseudo_t add_unop(struct entrypoint *ep, struct symbol *ctype, int op, ps
return new;
}
+static pseudo_t add_cast(struct entrypoint *ep, struct symbol *to,
+ struct symbol *from, int op, pseudo_t src)
+{
+ pseudo_t new = add_unop(ep, to, op, src);
+ new->def->orig_type = from;
+ return new;
+}
+
static pseudo_t linearize_slice(struct entrypoint *ep, struct expression *expr)
{
pseudo_t pre = linearize_expression(ep, expr->base);