aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
authorLuc Van Oostenryck <luc.vanoostenryck@gmail.com>2018-07-28 10:39:49 +0200
committerLuc Van Oostenryck <luc.vanoostenryck@gmail.com>2018-08-06 17:43:37 +0200
commit6da6517086e523f25cef070f9a89105f43147896 (patch)
tree741717dc0c4844bd2ac7f6e2efdc3303cf79d0f0
parente3a5d3631fc2ba949fa5ba3c6d7b02387da2cf32 (diff)
downloadsparse-dev-6da6517086e523f25cef070f9a89105f43147896.tar.gz
limit the mask used for bitfield insertion
The mask used for bitfield insertion is as big as the integers used internally by sparse. Elsewhere in the code, constants are always truncated to the size of the instructions using them. It's also displaying concerned instructions oddly. For example: and.32 %r2 <- %r1, 0xfffffffffffffff0 Fix this by limiting the mask to the size of the instruction. Fixes: a8e1df573 ("bitfield: extract linearize_bitfield_insert()") Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
-rw-r--r--linearize.c3
-rw-r--r--validation/linear/bitfield-size.c12
2 files changed, 8 insertions, 7 deletions
diff --git a/linearize.c b/linearize.c
index 17497186..c2010c4b 100644
--- a/linearize.c
+++ b/linearize.c
@@ -972,13 +972,14 @@ static pseudo_t linearize_bitfield_insert(struct entrypoint *ep,
unsigned int shift = ctype->bit_offset;
unsigned int size = ctype->bit_size;
unsigned long long mask = ((1ULL << size) - 1);
+ unsigned long long smask= bits_mask(btype->bit_size);
val = add_cast(ep, btype, ctype, OP_ZEXT, val);
if (shift) {
val = add_binary_op(ep, btype, OP_SHL, val, value_pseudo(shift));
mask <<= shift;
}
- ori = add_binary_op(ep, btype, OP_AND, ori, value_pseudo(~mask));
+ ori = add_binary_op(ep, btype, OP_AND, ori, value_pseudo(~mask & smask));
val = add_binary_op(ep, btype, OP_OR, ori, val);
return val;
diff --git a/validation/linear/bitfield-size.c b/validation/linear/bitfield-size.c
index 963f6e28..7f9725f9 100644
--- a/validation/linear/bitfield-size.c
+++ b/validation/linear/bitfield-size.c
@@ -52,7 +52,7 @@ upostinc:
add.3 %r4 <- %r3, $1
load.32 %r5 <- 0[%r1]
zext.32 %r6 <- (3) %r4
- and.32 %r7 <- %r5, $-8
+ and.32 %r7 <- %r5, $0xfffffff8
or.32 %r8 <- %r7, %r6
store.32 %r8 -> 0[%r1]
zext.32 %r9 <- (3) %r3
@@ -74,7 +74,7 @@ upreinc:
add.3 %r14 <- %r13, $1
load.32 %r15 <- 0[%r11]
zext.32 %r16 <- (3) %r14
- and.32 %r17 <- %r15, $-8
+ and.32 %r17 <- %r15, $0xfffffff8
or.32 %r18 <- %r17, %r16
store.32 %r18 -> 0[%r11]
zext.32 %r19 <- (3) %r14
@@ -97,7 +97,7 @@ ucpy:
load.64 %r24 <- 0[d]
load.32 %r25 <- 0[%r24]
zext.32 %r26 <- (3) %r23
- and.32 %r27 <- %r25, $-8
+ and.32 %r27 <- %r25, $0xfffffff8
or.32 %r28 <- %r27, %r26
store.32 %r28 -> 0[%r24]
br .L5
@@ -116,7 +116,7 @@ spostinc:
add.3 %r32 <- %r31, $1
load.32 %r33 <- 0[%r29]
zext.32 %r34 <- (3) %r32
- and.32 %r35 <- %r33, $-8
+ and.32 %r35 <- %r33, $0xfffffff8
or.32 %r36 <- %r35, %r34
store.32 %r36 -> 0[%r29]
zext.32 %r37 <- (3) %r31
@@ -138,7 +138,7 @@ spreinc:
add.3 %r42 <- %r41, $1
load.32 %r43 <- 0[%r39]
zext.32 %r44 <- (3) %r42
- and.32 %r45 <- %r43, $-8
+ and.32 %r45 <- %r43, $0xfffffff8
or.32 %r46 <- %r45, %r44
store.32 %r46 -> 0[%r39]
zext.32 %r47 <- (3) %r42
@@ -161,7 +161,7 @@ scpy:
load.64 %r52 <- 0[d]
load.32 %r53 <- 0[%r52]
zext.32 %r54 <- (3) %r51
- and.32 %r55 <- %r53, $-8
+ and.32 %r55 <- %r53, $0xfffffff8
or.32 %r56 <- %r55, %r54
store.32 %r56 -> 0[%r52]
br .L11