* [PATCH v2 1/2] Revert "lzo: properly check for overruns"
2015-03-24 10:03 [PATCH v2 0/2] lzo update from kernel Stefan Müller-Klieser
@ 2015-03-24 10:03 ` Stefan Müller-Klieser
2015-03-24 10:03 ` [PATCH v2 2/2] lzo: check for length overrun in variable length encoding Stefan Müller-Klieser
2015-03-25 8:48 ` [PATCH v2 0/2] lzo update from kernel Sascha Hauer
2 siblings, 0 replies; 4+ messages in thread
From: Stefan Müller-Klieser @ 2015-03-24 10:03 UTC (permalink / raw)
To: barebox
This reverts barebox commit ecb1dc0b1e859bca9
This corresponds to kernel commit af958a38a60c7ca3d8
As analysed by Willem Pinckaers, this fix is still incomplete on
certain rare corner cases, and it is easier to restart from the
original code.
Reported-by: Willem Pinckaers <willem@lekkertech.net>
Cc: "Don A. Bailey" <donb@securitymouse.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Stefan Müller-Klieser <s.mueller-klieser@phytec.de>
---
lib/lzo/lzo1x_decompress_safe.c | 64 ++++++++++++++---------------------------
1 file changed, 22 insertions(+), 42 deletions(-)
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 48bedba..de795a3 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -16,34 +16,14 @@
#include <lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(t, x) \
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
-
-#define HAVE_OP(t, x) \
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
-
-#define NEED_IP(t, x) \
- do { \
- if (!HAVE_IP(t, x)) \
- goto input_overrun; \
- } while (0)
-
-#define NEED_OP(t, x) \
- do { \
- if (!HAVE_OP(t, x)) \
- goto output_overrun; \
- } while (0)
-
-#define TEST_LB(m_pos) \
- do { \
- if ((m_pos) < out) \
- goto lookbehind_overrun; \
- } while (0)
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len)
+ unsigned char *out, size_t *out_len)
{
unsigned char *op;
const unsigned char *ip;
@@ -75,14 +55,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -98,8 +78,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t, 0);
- NEED_IP(t, 3);
+ NEED_OP(t);
+ NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -112,7 +92,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2, 0);
+ NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -136,10 +116,10 @@ copy_literal_run:
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
t += 31 + *ip++;
- NEED_IP(2, 0);
+ NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -154,10 +134,10 @@ copy_literal_run:
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
t += 7 + *ip++;
- NEED_IP(2, 0);
+ NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -171,7 +151,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t, 15))) {
+ if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -181,7 +161,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6, 0)) {
+ if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
@@ -189,7 +169,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t, 0);
+ NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -198,7 +178,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t, 0);
+ NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -211,15 +191,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t, 3);
- NEED_OP(t, 0);
+ NEED_IP(t + 3);
+ NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
--
1.9.1
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2 2/2] lzo: check for length overrun in variable length encoding.
2015-03-24 10:03 [PATCH v2 0/2] lzo update from kernel Stefan Müller-Klieser
2015-03-24 10:03 ` [PATCH v2 1/2] Revert "lzo: properly check for overruns" Stefan Müller-Klieser
@ 2015-03-24 10:03 ` Stefan Müller-Klieser
2015-03-25 8:48 ` [PATCH v2 0/2] lzo update from kernel Sascha Hauer
2 siblings, 0 replies; 4+ messages in thread
From: Stefan Müller-Klieser @ 2015-03-24 10:03 UTC (permalink / raw)
To: barebox
This corresponds to kernel commit 72cf90124e87d975d0b
This fix ensures that we never meet an integer overflow while adding
255 while parsing a variable length encoding. It works differently from
commit 206a81c ("lzo: properly check for overruns") because instead of
ensuring that we don't overrun the input, which is tricky to guarantee
due to many assumptions in the code, it simply checks that the cumulated
number of 255 read cannot overflow by bounding this number.
The MAX_255_COUNT is the maximum number of times we can add 255 to a base
count without overflowing an integer. The multiply will overflow when
multiplying 255 by more than MAXINT/255. The sum will overflow earlier
depending on the base count. Since the base count is taken from a u8
and a few bits, it is safe to assume that it will always be lower than
or equal to 2*255, thus we can always prevent any overflow by accepting
two less 255 steps.
This patch also reduces the CPU overhead and actually increases performance
by 1.1% compared to the initial code, while the previous fix costs 3.1%
(measured on x86_64).
The fix needs to be backported to all currently supported stable kernels.
Reported-by: Willem Pinckaers <willem@lekkertech.net>
Cc: "Don A. Bailey" <donb@securitymouse.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Stefan Müller-Klieser <s.mueller-klieser@phytec.de>
---
lib/lzo/lzo1x_decompress_safe.c | 43 +++++++++++++++++++++++++++++++++++------
1 file changed, 37 insertions(+), 6 deletions(-)
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index de795a3..3c0f512 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -22,6 +22,16 @@
#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
+
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
{
@@ -52,12 +62,19 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
NEED_IP(1);
}
- t += 15 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
}
t += 3;
copy_literal_run:
@@ -113,12 +130,19 @@ copy_literal_run:
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
NEED_IP(1);
}
- t += 31 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
NEED_IP(2);
}
m_pos = op - 1;
@@ -131,12 +155,19 @@ copy_literal_run:
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
NEED_IP(1);
}
- t += 7 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
NEED_IP(2);
}
next = get_unaligned_le16(ip);
--
1.9.1
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 4+ messages in thread