1 // SPDX-License-Identifier: GPL 2.0+ OR BSD-2-Clause
3 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011 - 2016, Yann Collet.
5 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * You can contact the author at :
27 * - LZ4 homepage : http://www.lz4.org
28 * - LZ4 source repository : https://github.com/lz4/lz4
32 #include <linux/kernel.h>
33 #include <linux/types.h>
34 #include <linux/bug.h>
35 #include <asm/unaligned.h>
36 #include <u-boot/lz4.h>
38 #define FORCE_INLINE inline __attribute__((always_inline))
40 static FORCE_INLINE u16 LZ4_readLE16(const void *src)
42 return get_unaligned_le16(src);
45 static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
47 put_unaligned(get_unaligned((const u64 *)src), (u64 *)dst);
55 typedef uintptr_t uptrval;
57 static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
59 put_unaligned(value, (U32 *)memPtr);
62 /**************************************
63 * Reading and writing into memory
64 **************************************/
66 /* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
67 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
69 BYTE* d = (BYTE*)dstPtr;
70 const BYTE* s = (const BYTE*)srcPtr;
71 BYTE* e = (BYTE*)dstEnd;
72 do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
76 /**************************************
78 **************************************/
81 #define WILDCOPYLENGTH 8
82 #define LASTLITERALS 5
83 #define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
86 * ensure it's possible to write 2 x wildcopyLength
87 * without overflowing output buffer
89 #define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
94 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
97 #define ML_MASK ((1U<<ML_BITS)-1)
98 #define RUN_BITS (8-ML_BITS)
99 #define RUN_MASK ((1U<<RUN_BITS)-1)
101 #define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
103 /**************************************
104 * Local Structures and types
105 **************************************/
106 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
107 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
108 typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
110 #define DEBUGLOG(l, ...) {} /* disabled */
113 #define assert(condition) ((void)0)
117 * LZ4_decompress_generic() :
118 * This generic decompression function covers all use cases.
119 * It shall be instantiated several times, using different sets of directives.
120 * Note that it is important for performance that this function really get inlined,
121 * in order to remove useless branches during compilation optimization.
123 static FORCE_INLINE int LZ4_decompress_generic(
124 const char * const src,
128 * If endOnInput == endOnInputSize,
129 * this value is `dstCapacity`
132 /* endOnOutputSize, endOnInputSize */
133 endCondition_directive endOnInput,
135 earlyEnd_directive partialDecoding,
136 /* noDict, withPrefix64k, usingExtDict */
138 /* always <= dst, == dst when no prefix */
139 const BYTE * const lowPrefix,
140 /* only if dict == usingExtDict */
141 const BYTE * const dictStart,
142 /* note : = 0 if noDict */
143 const size_t dictSize
146 const BYTE *ip = (const BYTE *) src;
147 const BYTE * const iend = ip + srcSize;
149 BYTE *op = (BYTE *) dst;
150 BYTE * const oend = op + outputSize;
153 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
154 static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
155 static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
157 const int safeDecode = (endOnInput == endOnInputSize);
158 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
160 /* Set up the "end" pointers for the shortcut. */
161 const BYTE *const shortiend = iend -
162 (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
163 const BYTE *const shortoend = oend -
164 (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
166 DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
167 srcSize, outputSize);
170 assert(lowPrefix <= op);
173 /* Empty output buffer */
174 if ((endOnInput) && (unlikely(outputSize == 0)))
175 return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
177 if ((!endOnInput) && (unlikely(outputSize == 0)))
178 return (*ip == 0 ? 1 : -1);
180 if ((endOnInput) && unlikely(srcSize == 0))
183 /* Main Loop : decode sequences */
189 /* get literal length */
190 unsigned int const token = *ip++;
191 length = token>>ML_BITS;
193 /* ip < iend before the increment */
194 assert(!endOnInput || ip <= iend);
197 * A two-stage shortcut for the most common case:
198 * 1) If the literal length is 0..14, and there is enough
199 * space, enter the shortcut and copy 16 bytes on behalf
200 * of the literals (in the fast mode, only 8 bytes can be
201 * safely copied this way).
202 * 2) Further if the match length is 4..18, copy 18 bytes
203 * in a similar manner; but we ensure that there's enough
204 * space in the output for those 18 bytes earlier, upon
205 * entering the shortcut (in other words, there is a
206 * combined check for both stages).
208 * The & in the likely() below is intentionally not && so that
209 * some compilers can produce better parallelized runtime code
211 if ((endOnInput ? length != RUN_MASK : length <= 8)
213 * strictly "less than" on input, to re-enter
214 * the loop with at least one byte
216 && likely((endOnInput ? ip < shortiend : 1) &
217 (op <= shortoend))) {
218 /* Copy the literals */
219 memcpy(op, ip, endOnInput ? 16 : 8);
220 op += length; ip += length;
224 * prepare for match copying, decode full info.
225 * If it doesn't work out, the info won't be wasted.
227 length = token & ML_MASK; /* match length */
228 offset = LZ4_readLE16(ip);
231 assert(match <= op); /* check overflow */
233 /* Do not deal with overlapping matches. */
234 if ((length != ML_MASK) &&
236 (dict == withPrefix64k || match >= lowPrefix)) {
237 /* Copy the match. */
238 memcpy(op + 0, match + 0, 8);
239 memcpy(op + 8, match + 8, 8);
240 memcpy(op + 16, match + 16, 2);
241 op += length + MINMATCH;
242 /* Both stages worked, load the next token. */
247 * The second stage didn't work out, but the info
248 * is ready. Propel it right to the point of match
254 /* decode literal length */
255 if (length == RUN_MASK) {
258 if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
259 /* overflow detection */
265 } while (likely(endOnInput
266 ? ip < iend - RUN_MASK
270 && unlikely((uptrval)(op) +
271 length < (uptrval)(op))) {
272 /* overflow detection */
276 && unlikely((uptrval)(ip) +
277 length < (uptrval)(ip))) {
278 /* overflow detection */
285 LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
287 if (((endOnInput) && ((cpy > oend - MFLIMIT)
288 || (ip + length > iend - (2 + 1 + LASTLITERALS))))
289 || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
290 if (partialDecoding) {
294 * stop in the middle of literal segment
300 && (ip + length > iend)) {
303 * read attempt beyond
304 * end of input buffer
313 * block decoding must
319 && ((ip + length != iend)
323 * input must be consumed
330 * supports overlapping memory regions; only matters
331 * for in-place decompression scenarios
333 memmove(op, ip, length);
337 /* Necessarily EOF, due to parsing restrictions */
338 if (!partialDecoding || (cpy == oend))
341 /* may overwrite up to WILDCOPYLENGTH beyond cpy */
342 LZ4_wildCopy(op, ip, cpy);
348 offset = LZ4_readLE16(ip);
352 /* get matchlength */
353 length = token & ML_MASK;
356 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
357 /* Error : offset outside buffers */
361 /* costs ~1%; silence an msan warning when offset == 0 */
363 * note : when partialDecoding, there is no guarantee that
364 * at least 4 bytes remain available in output buffer
366 if (!partialDecoding) {
368 assert(oend - op >= 4);
370 LZ4_write32(op, (U32)offset);
373 if (length == ML_MASK) {
379 if ((endOnInput) && (ip > iend - LASTLITERALS))
387 (uptrval)(op) + length < (uptrval)op)) {
388 /* overflow detection */
395 /* match starting within external dictionary */
396 if ((dict == usingExtDict) && (match < lowPrefix)) {
397 if (unlikely(op + length > oend - LASTLITERALS)) {
398 /* doesn't respect parsing restriction */
399 if (!partialDecoding)
401 length = min(length, (size_t)(oend - op));
404 if (length <= (size_t)(lowPrefix - match)) {
406 * match fits entirely within external
407 * dictionary : just copy
409 memmove(op, dictEnd - (lowPrefix - match),
414 * match stretches into both external
415 * dictionary and current block
417 size_t const copySize = (size_t)(lowPrefix - match);
418 size_t const restSize = length - copySize;
420 memcpy(op, dictEnd - copySize, copySize);
422 if (restSize > (size_t)(op - lowPrefix)) {
424 BYTE * const endOfMatch = op + restSize;
425 const BYTE *copyFrom = lowPrefix;
427 while (op < endOfMatch)
430 memcpy(op, lowPrefix, restSize);
437 /* copy match within block */
442 * may not respect endBlock parsing restrictions
445 if (partialDecoding &&
446 (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
447 size_t const mlen = min(length, (size_t)(oend - op));
448 const BYTE * const matchEnd = match + mlen;
449 BYTE * const copyEnd = op + mlen;
456 memcpy(op, match, mlen);
464 if (unlikely(offset < 8)) {
469 match += inc32table[offset];
470 memcpy(op + 4, match, 4);
471 match -= dec64table[offset];
473 LZ4_copy8(op, match);
479 if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
480 BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
482 if (cpy > oend - LASTLITERALS) {
484 * Error : last LASTLITERALS bytes
485 * must be literals (uncompressed)
490 if (op < oCopyLimit) {
491 LZ4_wildCopy(op, match, oCopyLimit);
492 match += oCopyLimit - op;
498 LZ4_copy8(op, match);
500 LZ4_wildCopy(op + 8, match + 8, cpy);
502 op = cpy; /* wildcopy correction */
505 /* end of decoding */
507 /* Nb of output bytes decoded */
508 return (int) (((char *)op) - dst);
510 /* Nb of input bytes read */
511 return (int) (((const char *)ip) - src);
514 /* Overflow error detected */
516 return (int) (-(((const char *)ip) - src)) - 1;
519 int LZ4_decompress_safe(const char *source, char *dest,
520 int compressedSize, int maxDecompressedSize)
522 return LZ4_decompress_generic(source, dest,
523 compressedSize, maxDecompressedSize,
524 endOnInputSize, decode_full_block,
525 noDict, (BYTE *)dest, NULL, 0);
528 int LZ4_decompress_safe_partial(const char *src, char *dst,
529 int compressedSize, int targetOutputSize, int dstCapacity)
531 dstCapacity = min(targetOutputSize, dstCapacity);
532 return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
533 endOnInputSize, partial_decode,
534 noDict, (BYTE *)dst, NULL, 0);