*/\r
\r
//**************************************\r
-// Compilation Directives\r
+// Compiler Options\r
//**************************************\r
\r
+// Under Linux at least, pull in the *64 commands\r
+#define _LARGEFILE64_SOURCE\r
+\r
+// MSVC does not support S_ISREG\r
+#ifndef S_ISREG\r
+#define S_ISREG(x) (((x) & S_IFMT) == S_IFREG)\r
+#endif\r
+\r
\r
//**************************************\r
// Includes\r
#include <stdio.h> // fprintf, fopen, ftello64\r
#include <stdlib.h> // malloc\r
#include <sys/timeb.h> // timeb\r
+#include <sys/types.h> // stat64\r
+#include <sys/stat.h> // stat64\r
#include "lz4.h"\r
\r
\r
#define CHUNKSIZE (8<<20)\r
#define MAX_NB_CHUNKS ((MAX_MEM / CHUNKSIZE) + 1)\r
\r
- \r
+\r
//**************************************\r
// Local structures\r
//**************************************\r
// Private functions\r
//*********************************************************\r
\r
-\r
static int BMK_GetMilliStart()\r
{\r
// Supposed to be portable\r
}\r
\r
\r
-static U64 BMK_GetFileSize(FILE* f)\r
+static U64 BMK_GetFileSize(char* infilename)\r
{\r
- U64 r;\r
-#ifdef _MSC_VER\r
- r = _fseeki64(f, 0L, SEEK_END);\r
- r = (U64) _ftelli64(f);\r
- _fseeki64(f, 0L, SEEK_SET);\r
+ int r;\r
+#if defined(_MSC_VER)\r
+ struct _stat64 statbuf;\r
+ r = _stat64(infilename, &statbuf);\r
#else\r
- r = (U64) fseeko64(f, 0LL, SEEK_END);\r
- r = (U64) ftello64(f);\r
- fseeko64(f, 0LL, SEEK_SET);\r
+ struct stat statbuf;\r
+ r = stat(infilename, &statbuf);\r
#endif\r
- return r;\r
+ if (r || !S_ISREG(statbuf.st_mode)) return 0; // No good...\r
+ return (U64)statbuf.st_size;\r
}\r
\r
\r
// Public function\r
//*********************************************************\r
\r
-int BMK_benchFile(char** fileNamesTable, int nbFiles) \r
+int BMK_benchFile(char** fileNamesTable, int nbFiles)\r
{\r
int fileIdx=0;\r
FILE* fileIn;\r
U64 totalz = 0;\r
double totalc = 0.;\r
double totald = 0.;\r
- \r
+\r
\r
// Init\r
compP.compressionFunction = LZ4_compress;\r
}\r
\r
// Memory allocation & restrictions\r
- largefilesize = BMK_GetFileSize(fileIn);\r
+ largefilesize = BMK_GetFileSize(infilename);\r
benchedsize = (size_t) BMK_findMaxMem(largefilesize) / 2;\r
if ((U64)benchedsize > largefilesize) benchedsize = (size_t)largefilesize;\r
if (benchedsize < largefilesize)\r
// Alloc\r
in_buff = malloc((size_t )benchedsize);\r
nbChunks = (benchedsize / CHUNKSIZE) + 1;\r
- maxCChunkSize = CHUNKSIZE + CHUNKSIZE/255 + 64;\r
+ maxCChunkSize = LZ4_compressBound(CHUNKSIZE);\r
out_buff_size = nbChunks * maxCChunkSize;\r
out_buff = malloc((size_t )out_buff_size);\r
\r
\r
for (loopNb = 1; loopNb <= NBLOOPS; loopNb++)\r
{\r
- // Compression \r
+ // Compression\r
DISPLAY("%1i-%-14.14s : %9i ->\r", loopNb, infilename, (int)benchedsize);\r
{ size_t i; for (i=0; i<benchedsize; i++) out_buff[i]=(char)i; } // warmimg up memory\r
- \r
+\r
nb_loops = 0;\r
milliTime = BMK_GetMilliStart();\r
while(BMK_GetMilliStart() == milliTime);\r
while(BMK_GetMilliSpan(milliTime) < TIMELOOP) \r
{\r
for (chunkNb=0; chunkNb<nbChunks; chunkNb++) \r
- chunkP[chunkNb].outputSize = compP.compressionFunction(chunkP[chunkNb].inputBuffer, chunkP[chunkNb].outputBuffer, chunkP[chunkNb].inputSize); \r
+ chunkP[chunkNb].outputSize = compP.compressionFunction(chunkP[chunkNb].inputBuffer, chunkP[chunkNb].outputBuffer, chunkP[chunkNb].inputSize);\r
nb_loops++;\r
}\r
milliTime = BMK_GetMilliSpan(milliTime);\r
\r
if ((double)milliTime < fastestC*nb_loops) fastestC = (double)milliTime/nb_loops;\r
- cSize=0; for (chunkNb=0; chunkNb<nbChunks; chunkNb++) cSize += chunkP[chunkNb].outputSize; \r
+ cSize=0; for (chunkNb=0; chunkNb<nbChunks; chunkNb++) cSize += chunkP[chunkNb].outputSize;\r
\r
DISPLAY("%1i-%-14.14s : %9i -> %9i (%5.2f%%), %6.1f MB/s\r", loopNb, infilename, (int)benchedsize, (int)cSize, (double)cSize/(double)benchedsize*100., (double)benchedsize / fastestC / 1000.);\r
\r
milliTime = BMK_GetMilliStart();\r
while(BMK_GetMilliStart() == milliTime);\r
milliTime = BMK_GetMilliStart();\r
- while(BMK_GetMilliSpan(milliTime) < TIMELOOP) \r
+ while(BMK_GetMilliSpan(milliTime) < TIMELOOP)\r
{\r
- for (chunkNb=0; chunkNb<nbChunks; chunkNb++) \r
- chunkP[chunkNb].outputSize = compP.decompressionFunction(chunkP[chunkNb].outputBuffer, chunkP[chunkNb].inputBuffer, chunkP[chunkNb].inputSize); \r
+ for (chunkNb=0; chunkNb<nbChunks; chunkNb++)\r
+ chunkP[chunkNb].outputSize = compP.decompressionFunction(chunkP[chunkNb].outputBuffer, chunkP[chunkNb].inputBuffer, chunkP[chunkNb].inputSize);\r
nb_loops++;\r
}\r
milliTime = BMK_GetMilliSpan(milliTime);\r
\r
if ((double)milliTime < fastestD*nb_loops) fastestD = (double)milliTime/nb_loops;\r
DISPLAY("%1i-%-14.14s : %9i -> %9i (%5.2f%%), %6.1f MB/s , %6.1f MB/s\r", loopNb, infilename, (int)benchedsize, (int)cSize, (double)cSize/(double)benchedsize*100., (double)benchedsize / fastestC / 1000., (double)benchedsize / fastestD / 1000.);\r
- \r
+\r
// CRC Checking\r
crcd = BMK_checksum(in_buff, benchedsize);\r
if (crcc!=crcd) { DISPLAY("\n!!! WARNING !!! %14s : Invalid Checksum : %x != %x\n", infilename, (unsigned)crcc, (unsigned)crcd); break; }\r
Redistribution and use in source and binary forms, with or without\r
modification, are permitted provided that the following conditions are\r
met:\r
- \r
+\r
* Redistributions of source code must retain the above copyright\r
notice, this list of conditions and the following disclaimer.\r
* Redistributions in binary form must reproduce the above\r
copyright notice, this list of conditions and the following disclaimer\r
in the documentation and/or other materials provided with the\r
distribution.\r
- \r
+\r
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
// Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB)\r
#define COMPRESSIONLEVEL 12\r
\r
-// NONCOMPRESSIBLE_CONFIRMATION :\r
+// NOTCOMPRESSIBLE_CONFIRMATION :\r
// Decreasing this value will make the algorithm skip faster data segments considered "incompressible"\r
// This may decrease compression ratio dramatically, but will be faster on incompressible data\r
// Increasing this value will make the algorithm search more before declaring a segment "incompressible"\r
// This could improve compression a bit, but will be slower on incompressible data\r
// The default value (6) is recommended\r
-#define NONCOMPRESSIBLE_CONFIRMATION 6\r
+#define NOTCOMPRESSIBLE_CONFIRMATION 6\r
\r
// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :\r
// This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.\r
// CPU Feature Detection\r
//**************************************\r
// 32 or 64 bits ?\r
-#if (__x86_64__ || __x86_64 || __amd64__ || __amd64 || __ppc64__ || _WIN64 || __LP64__ || _LP64) // Detects 64 bits mode\r
-#define ARCH64 1\r
+#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode\r
+#define LZ4_ARCH64 1\r
#else\r
-#define ARCH64 0\r
+#define LZ4_ARCH64 0\r
#endif\r
\r
// Little Endian or Big Endian ? \r
-#if (__BIG_ENDIAN__ || _BIG_ENDIAN || _ARCH_PPC || __PPC__ || __PPC || PPC || __powerpc__ || __powerpc || powerpc || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )\r
-#define CPU_BIG_ENDIAN 1\r
+#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )\r
+#define LZ4_BIG_ENDIAN 1\r
#else\r
// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.\r
#endif\r
// Unaligned memory access ?\r
// This feature is automatically enabled for "common" CPU, such as x86.\r
// For others CPU, you may want to force this option manually to improve performance if your target CPU supports unaligned memory access\r
-#if (__ARM_FEATURE_UNALIGNED)\r
-#define CPU_UNALIGNED_ACCESS 1\r
+#if defined(__ARM_FEATURE_UNALIGNED)\r
+#define LZ4_FORCE_UNALIGNED_ACCESS 1\r
#endif\r
\r
// Uncomment this parameter if your target system does not support hardware bit count\r
#define inline __forceinline // Visual is not C99, but supports inline\r
#endif\r
\r
-#if (defined(__GNUC__) && (!(CPU_UNALIGNED_ACCESS)))\r
+#if (defined(__GNUC__) && (!defined(LZ4_FORCE_UNALIGNED_ACCESS)))\r
#define _PACKED __attribute__ ((packed))\r
#else\r
#define _PACKED\r
// Constants\r
//**************************************\r
#define MINMATCH 4\r
-#define SKIPSTRENGTH (NONCOMPRESSIBLE_CONFIRMATION>2?NONCOMPRESSIBLE_CONFIRMATION:2)\r
+#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2)\r
#define STACKLIMIT 13\r
#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).\r
#define COPYLENGTH 8\r
\r
\r
//**************************************\r
-// Local structures\r
-//**************************************\r
-struct refTables\r
-{\r
- const BYTE* hashTable[HASHTABLESIZE];\r
-};\r
-\r
-typedef struct _U64_S\r
-{\r
- U64 v;\r
-} _PACKED U64_S;\r
-\r
-typedef struct _U32_S\r
-{\r
- U32 v;\r
-} _PACKED U32_S;\r
-\r
-typedef struct _U16_S\r
-{\r
- U16 v;\r
-} _PACKED U16_S;\r
-\r
-#define A64(x) (((U64_S *)(x))->v)\r
-#define A32(x) (((U32_S *)(x))->v)\r
-#define A16(x) (((U16_S *)(x))->v)\r
-\r
-\r
-//**************************************\r
// Architecture-specific macros\r
//**************************************\r
-#if ARCH64 // 64-bit\r
+#if LZ4_ARCH64 // 64-bit\r
#define STEPSIZE 8\r
#define UARCH U64\r
#define AARCH A64\r
#define INITBASE(base) const int base = 0\r
#endif\r
\r
-#if ((CPU_BIG_ENDIAN) && !(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))\r
+#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))\r
#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = bswap16(v); d = (s) - v; }\r
#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = bswap16(v); A16(p) = v; p+=2; }\r
#else // Little Endian\r
\r
\r
//**************************************\r
+// Local structures\r
+//**************************************\r
+struct refTables\r
+{\r
+ HTYPE hashTable[HASHTABLESIZE];\r
+};\r
+\r
+typedef struct _U64_S\r
+{\r
+ U64 v;\r
+} _PACKED U64_S;\r
+\r
+typedef struct _U32_S\r
+{\r
+ U32 v;\r
+} _PACKED U32_S;\r
+\r
+typedef struct _U16_S\r
+{\r
+ U16 v;\r
+} _PACKED U16_S;\r
+\r
+#define A64(x) (((U64_S *)(x))->v)\r
+#define A32(x) (((U32_S *)(x))->v)\r
+#define A16(x) (((U16_S *)(x))->v)\r
+\r
+\r
+//**************************************\r
// Macros\r
//**************************************\r
#define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))\r
//****************************\r
// Private functions\r
//****************************\r
-#if ARCH64\r
+#if LZ4_ARCH64\r
\r
inline static int LZ4_NbCommonBytes (register U64 val)\r
{\r
-#if CPU_BIG_ENDIAN\r
+#if defined(LZ4_BIG_ENDIAN)\r
#if defined(_MSC_VER) && !defined(_FORCE_SW_BITCOUNT)\r
unsigned long r = 0;\r
_BitScanReverse64( &r, val );\r
\r
inline static int LZ4_NbCommonBytes (register U32 val)\r
{\r
-#if CPU_BIG_ENDIAN\r
+#if defined(LZ4_BIG_ENDIAN)\r
#if defined(_MSC_VER) && !defined(_FORCE_SW_BITCOUNT)\r
unsigned long r = 0;\r
_BitScanReverse( &r, val );\r
#endif\r
\r
\r
+//****************************\r
+// Public functions\r
+//****************************\r
+\r
+int LZ4_compressBound(int isize)\r
+{\r
+ return (isize + (isize/255) + 16);\r
+}\r
+\r
+\r
+\r
//******************************\r
-// Public Compression functions\r
+// Compression functions\r
//******************************\r
\r
int LZ4_compressCtx(void** ctx,\r
- const char* source, \r
+ const char* source,\r
char* dest,\r
int isize)\r
{ \r
} while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));\r
\r
// Catch up\r
- while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } \r
+ while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }\r
\r
// Encode Literal length\r
length = ip - anchor;\r
ip += LZ4_NbCommonBytes(diff);\r
goto _endCount;\r
}\r
- if (ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }\r
+ if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }\r
if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }\r
if ((ip<matchlimit) && (*ref == *ip)) ip++;\r
_endCount:\r
- \r
+\r
// Encode MatchLength\r
len = (ip - anchor);\r
- if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } \r
+ if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }\r
else *token += len; \r
\r
// Test end of chunk\r
// Encode Last Literals\r
{\r
int lastRun = iend - anchor;\r
- if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } \r
+ if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }\r
else *op++ = (lastRun<<ML_BITS);\r
memcpy(op, anchor, iend - anchor);\r
op += iend-anchor;\r
- } \r
+ }\r
\r
// End\r
return (int) (((char*)op)-dest);\r
#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))\r
#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))\r
int LZ4_compress64kCtx(void** ctx,\r
- const char* source, \r
+ const char* source,\r
char* dest,\r
int isize)\r
-{ \r
+{\r
#if HEAPMODE\r
struct refTables *srt = (struct refTables *) (*ctx);\r
U16* HashTable;\r
U16 HashTable[HASH64KTABLESIZE] = {0};\r
#endif\r
\r
- const BYTE* ip = (BYTE*) source; \r
+ const BYTE* ip = (BYTE*) source;\r
const BYTE* anchor = ip;\r
const BYTE* const base = ip;\r
const BYTE* const iend = ip + isize;\r
#define matchlimit (iend - LASTLITERALS)\r
\r
BYTE* op = (BYTE*) dest;\r
- \r
+\r
int len, length;\r
const int skipStrength = SKIPSTRENGTH;\r
U32 forwardH;\r
\r
// First Byte\r
ip++; forwardH = LZ4_HASH64K_VALUE(ip);\r
- \r
+\r
// Main Loop\r
- for ( ; ; ) \r
+ for ( ; ; )\r
{\r
int findMatchAttempts = (1U << skipStrength) + 3;\r
const BYTE* forwardIp = ip;\r
} while (A32(ref) != A32(ip));\r
\r
// Catch up\r
- while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } \r
+ while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }\r
\r
// Encode Literal length\r
length = ip - anchor;\r
token = op++;\r
- if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } \r
+ if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }\r
else *token = (length<<ML_BITS);\r
\r
// Copy Literals\r
ip += LZ4_NbCommonBytes(diff);\r
goto _endCount;\r
}\r
- if (ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }\r
+ if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }\r
if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }\r
if ((ip<matchlimit) && (*ref == *ip)) ip++;\r
_endCount:\r
- \r
+\r
// Encode MatchLength\r
len = (ip - anchor);\r
- if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } \r
+ if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }\r
else *token += len; \r
\r
// Test end of chunk\r
if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }\r
\r
// Prepare next loop\r
- anchor = ip++; \r
+ anchor = ip++;\r
forwardH = LZ4_HASH64K_VALUE(ip);\r
}\r
\r
// Encode Last Literals\r
{\r
int lastRun = iend - anchor;\r
- if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } \r
+ if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }\r
else *op++ = (lastRun<<ML_BITS);\r
memcpy(op, anchor, iend - anchor);\r
op += iend-anchor;\r
\r
\r
\r
-int LZ4_compress(const char* source, \r
+int LZ4_compress(const char* source,\r
char* dest,\r
int isize)\r
{\r
// Decompression functions\r
//****************************\r
\r
-// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize() \r
+// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize()\r
// are safe against "buffer overflow" attack type.\r
// They will never write nor read outside of the provided input and output buffers.\r
// A corrupted input will produce an error result, a negative int, indicating the position of the error within input stream.\r
\r
-int LZ4_uncompress(const char* source, \r
+int LZ4_uncompress(const char* source,\r
char* dest,\r
int osize)\r
{ \r
BYTE* cpy;\r
\r
BYTE token;\r
- \r
+\r
int len, length;\r
size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};\r
\r
{\r
// get runlength\r
token = *ip++;\r
- if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } \r
+ if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }\r
\r
// copy literals\r
cpy = op+length;\r
- if (cpy>oend-COPYLENGTH) \r
- { \r
+ if (cpy>oend-COPYLENGTH)\r
+ {\r
if (cpy > oend) goto _output_error;\r
memcpy(op, ip, length);\r
ip += length;\r
if (ref < (BYTE* const)dest) goto _output_error; \r
\r
// get matchlength\r
- if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } \r
+ if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }\r
\r
// copy repeated sequence\r
if (op-ref<STEPSIZE)\r
{\r
-#if ARCH64\r
+#if LZ4_ARCH64\r
size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};\r
size_t dec2 = dec2table[op-ref];\r
#else\r
\r
\r
int LZ4_uncompress_unknownOutputSize(\r
- const char* source, \r
+ const char* source,\r
char* dest,\r
int isize,\r
int maxOutputSize)\r
{\r
// get runlength\r
token = *ip++;\r
- if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } \r
+ if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }\r
\r
// copy literals\r
cpy = op+length;\r
- if (cpy>oend-COPYLENGTH) \r
- { \r
+ if (cpy>oend-COPYLENGTH)\r
+ {\r
if (cpy > oend) goto _output_error;\r
memcpy(op, ip, length);\r
op += length;\r
// copy repeated sequence\r
if (op-ref<STEPSIZE)\r
{\r
-#if ARCH64\r
+#if LZ4_ARCH64\r
size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};\r
size_t dec2 = dec2table[op-ref];\r
#else\r