42 #define MEMORY_USAGE 14
56 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
64 #if defined (__GLIBC__)
66 # if (__BYTE_ORDER == __BIG_ENDIAN)
67 # define LZ4_BIG_ENDIAN 1
69 #elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
70 # define LZ4_BIG_ENDIAN 1
71 #elif defined(__sparc) || defined(__sparc__) \
72 || defined(__ppc__) || defined(_POWER) || defined(__powerpc__) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) \
73 || defined(__hpux) || defined(__hppa) \
74 || defined(_MIPSEB) || defined(__s390__)
75 # define LZ4_BIG_ENDIAN 1
83 #if defined(__ARM_FEATURE_UNALIGNED)
84 # define LZ4_FORCE_UNALIGNED_ACCESS 1
88 #if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
89 # define LZ4_FORCE_SW_BITCOUNT
96 #if __STDC_VERSION__ >= 199901L // C99
99 # define restrict // Disable restrict
102 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
104 #ifdef _MSC_VER // Visual Studio
106 # if LZ4_ARCH64 // 64-bit
107 # pragma intrinsic(_BitScanForward64) // For Visual 2005
108 # pragma intrinsic(_BitScanReverse64) // For Visual 2005
110 # pragma intrinsic(_BitScanForward) // For Visual 2005
111 # pragma intrinsic(_BitScanReverse) // For Visual 2005
116 # define lz4_bswap16(x) _byteswap_ushort(x)
118 # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
121 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
122 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
124 # define expect(expr,value) (expr)
127 #define likely(expr) expect((expr) != 0, 1)
128 #define unlikely(expr) expect((expr) != 0, 0)
142 #if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
143 # define BYTE unsigned __int8
144 # define U16 unsigned __int16
145 # define U32 unsigned __int32
147 # define U64 unsigned __int64
150 # define BYTE uint8_t
151 # define U16 uint16_t
152 # define U32 uint32_t
154 # define U64 uint64_t
157 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
158 # pragma pack(push, 1)
165 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
169 #define A64(x) (((U64_S *)(x))->v)
170 #define A32(x) (((U32_S *)(x))->v)
171 #define A16(x) (((U16_S *)(x))->v)
179 #define HASH_LOG (MEMORY_USAGE-2)
180 #define HASHTABLESIZE (1 << HASH_LOG)
181 #define HASH_MASK (HASHTABLESIZE - 1)
189 #define NOTCOMPRESSIBLE_DETECTIONLEVEL 6
190 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_DETECTIONLEVEL>2?NOTCOMPRESSIBLE_DETECTIONLEVEL:2)
191 #define STACKLIMIT 13
192 #define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
194 #define LASTLITERALS 5
195 #define MFLIMIT (COPYLENGTH+MINMATCH)
196 #define MINLENGTH (MFLIMIT+1)
199 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
202 #define ML_MASK ((1U<<ML_BITS)-1)
203 #define RUN_BITS (8-ML_BITS)
204 #define RUN_MASK ((1U<<RUN_BITS)-1)
210 #if LZ4_ARCH64 // 64-bit
214 # define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
215 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
216 # define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
218 # define INITBASE(base) const BYTE* const base = ip
223 # define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
224 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
225 # define LZ4_SECURECOPY LZ4_WILDCOPY
226 # define HTYPE const BYTE*
227 # define INITBASE(base) const int base = 0
230 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
231 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
232 # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
233 #else // Little Endian
234 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
235 # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
251 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
252 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
253 #define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
254 #define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+l; LZ4_WILDCOPY(s,d,e); d=e; }
264 #if defined(LZ4_BIG_ENDIAN)
265 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
267 _BitScanReverse64( &r, val );
269 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
270 return (__builtin_clzll(val) >> 3);
273 if (!(val>>32)) { r=4; }
else { r=0; val>>=32; }
274 if (!(val>>16)) { r+=2; val>>=8; }
else { val>>=24; }
279 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
281 _BitScanForward64( &r, val );
283 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
284 return (__builtin_ctzll(val) >> 3);
286 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
287 return DeBruijnBytePos[((
U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
296 #if defined(LZ4_BIG_ENDIAN)
297 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
299 _BitScanReverse( &r, val );
301 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
302 return (__builtin_clz(val) >> 3);
305 if (!(val>>16)) { r=2; val>>=8; }
else { r=0; val>>=24; }
310 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
312 _BitScanForward( &r, val );
314 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
315 return (__builtin_ctz(val) >> 3);
317 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
318 return DeBruijnBytePos[((
U32)((val & -(
S32)val) * 0x077CB531U)) >> 27];
352 const BYTE* anchor = ip;
353 const BYTE*
const iend = ip + isize;
355 #define matchlimit (iend - LASTLITERALS)
358 BYTE*
const oend = op + maxOutputSize;
366 if (isize<
MINLENGTH)
goto _last_literals;
374 memset((
void*)HashTable, 0,
sizeof(srt->
hashTable));
387 int findMatchAttempts = (1U << skipStrength) + 3;
388 const BYTE* forwardIp = ip;
395 int step = findMatchAttempts++ >> skipStrength;
397 forwardIp = ip + step;
399 if unlikely(forwardIp > mflimit) {
goto _last_literals; }
402 ref = base + HashTable[h];
403 HashTable[h] = ip - base;
408 while ((ip>anchor) && (ref>(
BYTE*)source) &&
unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
411 length = (int)(ip - anchor);
421 do { *op++ = 255; len -= 255; }
while (len>254);
423 memcpy(op, anchor, length);
430 else *token = (length<<
ML_BITS);
437 for(; len > 254 ; len-=255) *op++ = 255;
440 else *token = (length<<
ML_BITS);
466 length = (int)(ip - anchor);
472 for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; }
473 if (length > 254) { length-=255; *op++ = 255; }
474 *op++ = (
BYTE)length;
476 else *token += length;
479 if (ip > mflimit) { anchor = ip;
break; }
487 if ((ref > ip - (
MAX_DISTANCE + 1)) && (
A32(ref) ==
A32(ip))) { token = op++; *token=0;
goto _next_match; }
497 int lastRun = (int)(iend - anchor);
498 if (((
char*)op - dest) + lastRun + 1 + ((lastRun+255-
RUN_MASK)/255) > (
U32)maxOutputSize)
return 0;
500 else *op++ = (lastRun<<
ML_BITS);
501 memcpy(op, anchor, iend - anchor);
506 return (
int) (((
char*)op)-dest);
512 #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
513 #define HASHLOG64K (HASH_LOG+1)
514 #define HASH64KTABLESIZE (1U<<HASHLOG64K)
515 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
516 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
531 const BYTE* anchor = ip;
532 const BYTE*
const base = ip;
533 const BYTE*
const iend = ip + isize;
535 #define matchlimit (iend - LASTLITERALS)
538 BYTE*
const oend = op + maxOutputSize;
546 if (isize<
MINLENGTH)
goto _last_literals;
554 memset((
void*)HashTable, 0,
sizeof(srt->
hashTable));
566 int findMatchAttempts = (1U << skipStrength) + 3;
567 const BYTE* forwardIp = ip;
574 int step = findMatchAttempts++ >> skipStrength;
576 forwardIp = ip + step;
578 if (forwardIp > mflimit) {
goto _last_literals; }
581 ref = base + HashTable[h];
582 HashTable[h] = (
U16)(ip - base);
584 }
while (
A32(ref) !=
A32(ip));
587 while ((ip>anchor) && (ref>(
BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
590 length = (int)(ip - anchor);
600 do { *op++ = 255; len -= 255; }
while (len>254);
602 memcpy(op, anchor, length);
609 else *token = (length<<
ML_BITS);
611 if (length>=(
int)
RUN_MASK) { *token=(RUN_MASK<<
ML_BITS); len = length-
RUN_MASK;
for(; len > 254 ; len-=255) *op++ = 255; *op++ = (
BYTE)len; }
612 else *token = (length<<
ML_BITS);
638 len = (int)(ip - anchor);
640 if (len>=(
int)
ML_MASK) { *token+=
ML_MASK; len-=
ML_MASK;
for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; }
if (len > 254) { len-=255; *op++ = 255; } *op++ = (
BYTE)len; }
644 if (ip > mflimit) { anchor = ip;
break; }
652 if (
A32(ref) ==
A32(ip)) { token = op++; *token=0;
goto _next_match; }
662 int lastRun = (int)(iend - anchor);
663 if (op + lastRun + 1 + (lastRun-
RUN_MASK+255)/255 > oend)
return 0;
665 else *op++ = (lastRun<<
ML_BITS);
666 memcpy(op, anchor, iend - anchor);
671 return (
int) (((
char*)op)-dest);
681 void* ctx = malloc(
sizeof(
struct refTables));
685 else result =
LZ4_compressCtx(&ctx, source, dest, isize, maxOutputSize);
724 BYTE*
const oend = op + osize;
729 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
731 size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
742 if ((length=(token>>
ML_BITS)) ==
RUN_MASK) {
size_t len;
for (;(len=*ip++)==255;length+=255){} length += len; }
748 if (cpy != oend)
goto _output_error;
749 memcpy(op, ip, length);
757 if unlikely(ref < (
BYTE*
const)dest)
goto _output_error;
760 if ((length=(token&
ML_MASK)) == ML_MASK) {
for (;*ip==255;length+=255) {ip++;} length += *ip++; }
766 size_t dec64 = dec64table[op-ref];
774 op += 4, ref += 4; ref -= dec32table[op-ref];
784 while(op<cpy) *op++=*ref++;
794 return (
int) (((
char*)ip)-source);
798 return (
int) (-(((
char*)ip)-source));
810 const BYTE*
const iend = ip + isize;
814 BYTE*
const oend = op + maxOutputSize;
817 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
819 size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
824 if unlikely(ip==iend)
goto _output_error;
837 while (
likely(ip<iend) && (s==255)) { s=*ip++; length += s; }
844 if (cpy > oend)
goto _output_error;
845 if (ip+length != iend)
goto _output_error;
846 memcpy(op, ip, length);
854 if unlikely(ref < (
BYTE*
const)dest)
goto _output_error;
857 if ((length=(token&
ML_MASK)) == ML_MASK)
863 if (s==255)
continue;
872 size_t dec64 = dec64table[op-ref];
880 op += 4, ref += 4; ref -= dec32table[op-ref];
890 while(op<cpy) *op++=*ref++;
900 return (
int) (((
char*)op)-dest);
904 return (
int) (-(((
char*)ip)-source));