diff options
Diffstat (limited to 'lib/lz4')
-rw-r--r-- | lib/lz4/lz4_compress.c | 4 | ||||
-rw-r--r-- | lib/lz4/lz4_decompress.c | 41 | ||||
-rw-r--r-- | lib/lz4/lz4defs.h | 13 | ||||
-rw-r--r-- | lib/lz4/lz4hc_compress.c | 2 |
4 files changed, 42 insertions, 18 deletions
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c index cc7b6d4cc7c7..90bb67994688 100644 --- a/lib/lz4/lz4_compress.c +++ b/lib/lz4/lz4_compress.c @@ -446,7 +446,7 @@ _last_literals: *op++ = (BYTE)(lastRun << ML_BITS); } - memcpy(op, anchor, lastRun); + LZ4_memcpy(op, anchor, lastRun); op += lastRun; } @@ -708,7 +708,7 @@ _last_literals: } else { *op++ = (BYTE)(lastRunSize<<ML_BITS); } - memcpy(op, anchor, lastRunSize); + LZ4_memcpy(op, anchor, lastRunSize); op += lastRunSize; } diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 0c9d3ad17e0f..59fe69a63800 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -141,6 +141,9 @@ static FORCE_INLINE int LZ4_decompress_generic( * space in the output for those 18 bytes earlier, upon * entering the shortcut (in other words, there is a * combined check for both stages). + * + * The & in the likely() below is intentionally not && so that + * some compilers can produce better parallelized runtime code */ if ((endOnInput ? length != RUN_MASK : length <= 8) /* @@ -150,7 +153,7 @@ static FORCE_INLINE int LZ4_decompress_generic( && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend))) { /* Copy the literals */ - memcpy(op, ip, endOnInput ? 16 : 8); + LZ4_memcpy(op, ip, endOnInput ? 16 : 8); op += length; ip += length; /* @@ -169,9 +172,9 @@ static FORCE_INLINE int LZ4_decompress_generic( (offset >= 8) && (dict == withPrefix64k || match >= lowPrefix)) { /* Copy the match. */ - memcpy(op + 0, match + 0, 8); - memcpy(op + 8, match + 8, 8); - memcpy(op + 16, match + 16, 2); + LZ4_memcpy(op + 0, match + 0, 8); + LZ4_memcpy(op + 8, match + 8, 8); + LZ4_memcpy(op + 16, match + 16, 2); op += length + MINMATCH; /* Both stages worked, load the next token. */ continue; @@ -260,12 +263,20 @@ static FORCE_INLINE int LZ4_decompress_generic( } } - memcpy(op, ip, length); + /* + * supports overlapping memory regions; only matters + * for in-place decompression scenarios + */ + LZ4_memmove(op, ip, length); ip += length; op += length; - /* Necessarily EOF, due to parsing restrictions */ - if (!partialDecoding || (cpy == oend)) + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) break; } else { /* may overwrite up to WILDCOPYLENGTH beyond cpy */ @@ -347,7 +358,7 @@ _copy_match: size_t const copySize = (size_t)(lowPrefix - match); size_t const restSize = length - copySize; - memcpy(op, dictEnd - copySize, copySize); + LZ4_memcpy(op, dictEnd - copySize, copySize); op += copySize; if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ @@ -357,7 +368,7 @@ _copy_match: while (op < endOfMatch) *op++ = *copyFrom++; } else { - memcpy(op, lowPrefix, restSize); + LZ4_memcpy(op, lowPrefix, restSize); op += restSize; } } @@ -383,7 +394,7 @@ _copy_match: while (op < copyEnd) *op++ = *match++; } else { - memcpy(op, match, mlen); + LZ4_memcpy(op, match, mlen); } op = copyEnd; if (op == oend) @@ -397,7 +408,7 @@ _copy_match: op[2] = match[2]; op[3] = match[3]; match += inc32table[offset]; - memcpy(op + 4, match, 4); + LZ4_memcpy(op + 4, match, 4); match -= dec64table[offset]; } else { LZ4_copy8(op, match); @@ -474,7 +485,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize) /* ===== Instantiate a few more decoding cases, used more than once. ===== */ -int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, +static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize) { return LZ4_decompress_generic(source, dest, @@ -496,9 +507,9 @@ static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, (BYTE *)dest - prefixSize, NULL, 0); } -int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, - int compressedSize, int maxOutputSize, - const void *dictStart, size_t dictSize) +static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index 1a7fa9d9170f..330aa539b46e 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -36,6 +36,8 @@ */ #include <asm/unaligned.h> + +#include <linux/bitops.h> #include <linux/string.h> /* memset, memcpy */ #define FORCE_INLINE __always_inline @@ -137,6 +139,17 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value) return put_unaligned_le16(value, memPtr); } +/* + * LZ4 relies on memcpy with a constant size being inlined. In freestanding + * environments, the compiler can't assume the implementation of memcpy() is + * standard compliant, so apply its specialized memcpy() inlining logic. When + * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy() + * as-if it were standard compliant, so it can inline it in freestanding + * environments. This is needed when decompressing the Linux Kernel, for example. + */ +#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) +#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size) + static FORCE_INLINE void LZ4_copy8(void *dst, const void *src) { #if LZ4_ARCH64 diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c index 1b61d874e337..e7ac8694b797 100644 --- a/lib/lz4/lz4hc_compress.c +++ b/lib/lz4/lz4hc_compress.c @@ -570,7 +570,7 @@ _Search3: *op++ = (BYTE) lastRun; } else *op++ = (BYTE)(lastRun<<ML_BITS); - memcpy(op, anchor, iend - anchor); + LZ4_memcpy(op, anchor, iend - anchor); op += iend - anchor; } |