//**************************************\r
// 32 or 64 bits ?\r
#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode\r
-#define LZ4_ARCH64 1\r
+# define LZ4_ARCH64 1\r
#else\r
-#define LZ4_ARCH64 0\r
+# define LZ4_ARCH64 0\r
#endif\r
\r
// Little Endian or Big Endian ?\r
+// Note : overwrite the below #define if you know your architecture endianess\r
#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )\r
-#define LZ4_BIG_ENDIAN 1\r
+# define LZ4_BIG_ENDIAN 1\r
#else\r
// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.\r
#endif\r
// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected\r
// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance\r
#if defined(__ARM_FEATURE_UNALIGNED)\r
-#define LZ4_FORCE_UNALIGNED_ACCESS 1\r
+# define LZ4_FORCE_UNALIGNED_ACCESS 1\r
#endif\r
\r
// Uncomment this parameter if your target system or compiler does not support hardware bit count\r
#if __STDC_VERSION__ >= 199901L // C99\r
/* "restrict" is a known keyword */\r
#else\r
-#define restrict // Disable restrict\r
+# define restrict // Disable restrict\r
#endif\r
\r
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\r
\r
#ifdef _MSC_VER // Visual Studio\r
-#define inline __forceinline // Visual is not C99, but supports some kind of inline\r
-#include <intrin.h> // _BitScanForward\r
+# define inline __forceinline // Visual is not C99, but supports some kind of inline\r
+# include <intrin.h> // _BitScanForward\r
+# if LZ4_ARCH64 // 64-bit\r
+# pragma intrinsic(_BitScanForward64) // For Visual 2005\r
+# pragma intrinsic(_BitScanReverse64) // For Visual 2005\r
+# else\r
+# pragma intrinsic(_BitScanForward) // For Visual 2005\r
+# pragma intrinsic(_BitScanReverse) // For Visual 2005\r
+# endif\r
#endif\r
\r
#ifdef _MSC_VER\r
-#define lz4_bswap16(x) _byteswap_ushort(x)\r
+# define lz4_bswap16(x) _byteswap_ushort(x)\r
#else\r
-#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))\r
+# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))\r
#endif\r
\r
#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)\r
-# define expect(expr,value) (__builtin_expect ((expr),(value)) )\r
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )\r
#else\r
-# define expect(expr,value) (expr)\r
+# define expect(expr,value) (expr)\r
#endif\r
\r
#define likely(expr) expect((expr) != 0, 1)\r
// Basic Types\r
//**************************************\r
#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively\r
-#define BYTE unsigned __int8\r
-#define U16 unsigned __int16\r
-#define U32 unsigned __int32\r
-#define S32 __int32\r
-#define U64 unsigned __int64\r
+# define BYTE unsigned __int8\r
+# define U16 unsigned __int16\r
+# define U32 unsigned __int32\r
+# define S32 __int32\r
+# define U64 unsigned __int64\r
#else\r
-#include <stdint.h>\r
-#define BYTE uint8_t\r
-#define U16 uint16_t\r
-#define U32 uint32_t\r
-#define S32 int32_t\r
-#define U64 uint64_t\r
+# include <stdint.h>\r
+# define BYTE uint8_t\r
+# define U16 uint16_t\r
+# define U32 uint32_t\r
+# define S32 int32_t\r
+# define U64 uint64_t\r
#endif\r
\r
#ifndef LZ4_FORCE_UNALIGNED_ACCESS\r
-#pragma pack(push, 1)\r
+# pragma pack(push, 1)\r
#endif\r
\r
typedef struct _U16_S { U16 v; } U16_S;\r
typedef struct _U64_S { U64 v; } U64_S;\r
\r
#ifndef LZ4_FORCE_UNALIGNED_ACCESS\r
-#pragma pack(pop)\r
+# pragma pack(pop)\r
#endif\r
\r
#define A64(x) (((U64_S *)(x))->v)\r
// Architecture-specific macros\r
//**************************************\r
#if LZ4_ARCH64 // 64-bit\r
-#define STEPSIZE 8\r
-#define UARCH U64\r
-#define AARCH A64\r
-#define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;\r
-#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)\r
-#define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)\r
-#define HTYPE U32\r
-#define INITBASE(base) const BYTE* const base = ip\r
+# define STEPSIZE 8\r
+# define UARCH U64\r
+# define AARCH A64\r
+# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;\r
+# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)\r
+# define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)\r
+# define HTYPE U32\r
+# define INITBASE(base) const BYTE* const base = ip\r
#else // 32-bit\r
-#define STEPSIZE 4\r
-#define UARCH U32\r
-#define AARCH A32\r
-#define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;\r
-#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);\r
-#define LZ4_SECURECOPY LZ4_WILDCOPY\r
-#define HTYPE const BYTE*\r
-#define INITBASE(base) const int base = 0\r
+# define STEPSIZE 4\r
+# define UARCH U32\r
+# define AARCH A32\r
+# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;\r
+# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);\r
+# define LZ4_SECURECOPY LZ4_WILDCOPY\r
+# define HTYPE const BYTE*\r
+# define INITBASE(base) const int base = 0\r
#endif\r
\r
#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))\r
-#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }\r
-#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }\r
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }\r
+# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }\r
#else // Little Endian\r
-#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }\r
-#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }\r
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }\r
+# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }\r
#endif\r
\r
\r
cpy = op+length;\r
if unlikely(cpy>oend-COPYLENGTH)\r
{\r
- if (cpy > oend) goto _output_error;\r
+ if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer\r
memcpy(op, ip, length);\r
ip += length;\r
break; // Necessarily EOF\r
\r
// get offset\r
LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;\r
- if (ref < (BYTE* const)dest) goto _output_error;\r
+ if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer\r
\r
// get matchlength\r
if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }\r
cpy = op + length - (STEPSIZE-4);\r
if (cpy>oend-COPYLENGTH)\r
{\r
- if (cpy > oend) goto _output_error;\r
+ if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer\r
LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));\r
while(op<cpy) *op++=*ref++;\r
op=cpy;\r
cpy = op+length;\r
if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))\r
{\r
- if (cpy > oend) goto _output_error;\r
- if (ip+length > iend) goto _output_error;\r
+ if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer\r
+ if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer\r
memcpy(op, ip, length);\r
op += length;\r
ip += length;\r
- if (ip<iend) goto _output_error;\r
+ if (ip<iend) goto _output_error; // Error : LZ4 format violation\r
break; // Necessarily EOF, due to parsing restrictions\r
}\r
LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;\r
\r
// get offset\r
LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;\r
- if (ref < (BYTE* const)dest) goto _output_error;\r
+ if (ref < (BYTE* const)dest) goto _output_error; // Error : offset creates reference outside of destination buffer\r
\r
// get matchlength\r
if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } }\r
cpy = op + length - (STEPSIZE-4);\r
if (cpy>oend-COPYLENGTH)\r
{\r
- if (cpy > oend) goto _output_error;\r
+ if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer\r
LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));\r
while(op<cpy) *op++=*ref++;\r
op=cpy;\r