NVBIO
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
lz4.c
Go to the documentation of this file.
1 /*
2  LZ4 - Fast LZ compression algorithm
3  Copyright (C) 2011-2015, Yann Collet.
4  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are
8  met:
9 
10  * Redistributions of source code must retain the above copyright
11  notice, this list of conditions and the following disclaimer.
12  * Redistributions in binary form must reproduce the above
13  copyright notice, this list of conditions and the following disclaimer
14  in the documentation and/or other materials provided with the
15  distribution.
16 
17  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29  You can contact the author at :
30  - LZ4 source repository : http://code.google.com/p/lz4
31  - LZ4 source mirror : https://github.com/Cyan4973/lz4
32  - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33 */
34 
35 
36 /**************************************
37  Tuning parameters
38 **************************************/
39 /*
40  * HEAPMODE :
41  * Select how default compression functions will allocate memory for their hash table,
42  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
43  */
44 #define HEAPMODE 0
45 
46 /*
47  * CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS :
48  * By default, the source code expects the compiler to correctly optimize
49  * 4-bytes and 8-bytes read on architectures able to handle it efficiently.
50  * This is not always the case. In some circumstances (ARM notably),
51  * the compiler will issue cautious code even when target is able to correctly handle unaligned memory accesses.
52  *
53  * You can force the compiler to use unaligned memory access by uncommenting the line below.
54  * One of the below scenarios will happen :
55  * 1 - Your target CPU correctly handle unaligned access, and was not well optimized by compiler (good case).
56  * You will witness large performance improvements (+50% and up).
57  * Keep the line uncommented and send a word to upstream (https://groups.google.com/forum/#!forum/lz4c)
58  * The goal is to automatically detect such situations by adding your target CPU within an exception list.
59  * 2 - Your target CPU correctly handle unaligned access, and was already already optimized by compiler
60  * No change will be experienced.
61  * 3 - Your target CPU inefficiently handle unaligned access.
62  * You will experience a performance loss. Comment back the line.
63  * 4 - Your target CPU does not handle unaligned access.
64  * Program will crash.
65  * If uncommenting results in better performance (case 1)
66  * please report your configuration to upstream (https://groups.google.com/forum/#!forum/lz4c)
67  * An automatic detection macro will be added to match your case within future versions of the library.
68  */
69 /* #define CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS 1 */
70 
71 
72 /**************************************
73  CPU Feature Detection
74 **************************************/
75 /*
76  * Automated efficient unaligned memory access detection
77  * Based on known hardware architectures
78  * This list will be updated thanks to feedbacks
79  */
80 #if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
81  || defined(__ARM_FEATURE_UNALIGNED) \
82  || defined(__i386__) || defined(__x86_64__) \
83  || defined(_M_IX86) || defined(_M_X64) \
84  || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
85  || (defined(_M_ARM) && (_M_ARM >= 7))
86 # define LZ4_UNALIGNED_ACCESS 1
87 #else
88 # define LZ4_UNALIGNED_ACCESS 0
89 #endif
90 
91 /*
92  * LZ4_FORCE_SW_BITCOUNT
93  * Define this parameter if your target system or compiler does not support hardware bit count
94  */
95 #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
96 # define LZ4_FORCE_SW_BITCOUNT
97 #endif
98 
99 
100 /**************************************
101  Compiler Options
102 **************************************/
103 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
104 /* "restrict" is a known keyword */
105 #else
106 # define restrict /* Disable restrict */
107 #endif
108 
109 #ifdef _MSC_VER /* Visual Studio */
110 # define FORCE_INLINE static __forceinline
111 # include <intrin.h>
112 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
113 # pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
114 #else
115 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
116 # ifdef __GNUC__
117 # define FORCE_INLINE static inline __attribute__((always_inline))
118 # else
119 # define FORCE_INLINE static inline
120 # endif
121 # else
122 # define FORCE_INLINE static
123 # endif /* __STDC_VERSION__ */
124 #endif /* _MSC_VER */
125 
126 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
127 
128 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
129 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
130 #else
131 # define expect(expr,value) (expr)
132 #endif
133 
134 #define likely(expr) expect((expr) != 0, 1)
135 #define unlikely(expr) expect((expr) != 0, 0)
136 
137 
138 /**************************************
139  Memory routines
140 **************************************/
141 #include <stdlib.h> /* malloc, calloc, free */
142 #define ALLOCATOR(n,s) calloc(n,s)
143 #define FREEMEM free
144 #include <string.h> /* memset, memcpy */
145 #define MEM_INIT memset
146 
147 
148 /**************************************
149  Includes
150 **************************************/
151 #include "lz4.h"
152 
153 
154 /**************************************
155  Basic Types
156 **************************************/
157 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
158 # include <stdint.h>
159  typedef uint8_t BYTE;
160  typedef uint16_t U16;
161  typedef uint32_t U32;
162  typedef int32_t S32;
163  typedef uint64_t U64;
164 #else
165  typedef unsigned char BYTE;
166  typedef unsigned short U16;
167  typedef unsigned int U32;
168  typedef signed int S32;
169  typedef unsigned long long U64;
170 #endif
171 
172 
173 /**************************************
174  Reading and writing into memory
175 **************************************/
176 #define STEPSIZE sizeof(size_t)
177 
178 static unsigned LZ4_64bits(void) { return sizeof(void*)==8; }
179 
180 static unsigned LZ4_isLittleEndian(void)
181 {
182  const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
183  return one.c[0];
184 }
185 
186 
187 static U16 LZ4_readLE16(const void* memPtr)
188 {
189  if ((LZ4_UNALIGNED_ACCESS) && (LZ4_isLittleEndian()))
190  return *(U16*)memPtr;
191  else
192  {
193  const BYTE* p = memPtr;
194  return (U16)((U16)p[0] + (p[1]<<8));
195  }
196 }
197 
198 static void LZ4_writeLE16(void* memPtr, U16 value)
199 {
200  if ((LZ4_UNALIGNED_ACCESS) && (LZ4_isLittleEndian()))
201  {
202  *(U16*)memPtr = value;
203  return;
204  }
205  else
206  {
207  BYTE* p = memPtr;
208  p[0] = (BYTE) value;
209  p[1] = (BYTE)(value>>8);
210  }
211 }
212 
213 
214 static U16 LZ4_read16(const void* memPtr)
215 {
217  return *(U16*)memPtr;
218  else
219  {
220  U16 val16;
221  memcpy(&val16, memPtr, 2);
222  return val16;
223  }
224 }
225 
226 static U32 LZ4_read32(const void* memPtr)
227 {
229  return *(U32*)memPtr;
230  else
231  {
232  U32 val32;
233  memcpy(&val32, memPtr, 4);
234  return val32;
235  }
236 }
237 
238 static U64 LZ4_read64(const void* memPtr)
239 {
241  return *(U64*)memPtr;
242  else
243  {
244  U64 val64;
245  memcpy(&val64, memPtr, 8);
246  return val64;
247  }
248 }
249 
250 static size_t LZ4_read_ARCH(const void* p)
251 {
252  if (LZ4_64bits())
253  return (size_t)LZ4_read64(p);
254  else
255  return (size_t)LZ4_read32(p);
256 }
257 
258 
259 static void LZ4_copy4(void* dstPtr, const void* srcPtr)
260 {
262  {
263  *(U32*)dstPtr = *(U32*)srcPtr;
264  return;
265  }
266  memcpy(dstPtr, srcPtr, 4);
267 }
268 
269 static void LZ4_copy8(void* dstPtr, const void* srcPtr)
270 {
271 #if GCC_VERSION!=409 /* disabled on GCC 4.9, as it generates invalid opcode (crash) */
273  {
274  if (LZ4_64bits())
275  *(U64*)dstPtr = *(U64*)srcPtr;
276  else
277  ((U32*)dstPtr)[0] = ((U32*)srcPtr)[0],
278  ((U32*)dstPtr)[1] = ((U32*)srcPtr)[1];
279  return;
280  }
281 #endif
282  memcpy(dstPtr, srcPtr, 8);
283 }
284 
285 /* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
286 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
287 {
288  BYTE* d = dstPtr;
289  const BYTE* s = srcPtr;
290  BYTE* e = dstEnd;
291  do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
292 }
293 
294 
295 /**************************************
296  Common Constants
297 **************************************/
298 #define MINMATCH 4
299 
300 #define COPYLENGTH 8
301 #define LASTLITERALS 5
302 #define MFLIMIT (COPYLENGTH+MINMATCH)
303 static const int LZ4_minLength = (MFLIMIT+1);
304 
305 #define KB *(1 <<10)
306 #define MB *(1 <<20)
307 #define GB *(1U<<30)
308 
309 #define MAXD_LOG 16
310 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
311 
312 #define ML_BITS 4
313 #define ML_MASK ((1U<<ML_BITS)-1)
314 #define RUN_BITS (8-ML_BITS)
315 #define RUN_MASK ((1U<<RUN_BITS)-1)
316 
317 
318 /**************************************
319  Common Utils
320 **************************************/
321 #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
322 
323 
324 /********************************
325  Common functions
326 ********************************/
327 static unsigned LZ4_NbCommonBytes (register size_t val)
328 {
329  if (LZ4_isLittleEndian())
330  {
331  if (LZ4_64bits())
332  {
333 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
334  unsigned long r = 0;
335  _BitScanForward64( &r, (U64)val );
336  return (int)(r>>3);
337 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
338  return (__builtin_ctzll((U64)val) >> 3);
339 # else
340  static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
341  return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
342 # endif
343  }
344  else /* 32 bits */
345  {
346 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
347  unsigned long r;
348  _BitScanForward( &r, (U32)val );
349  return (int)(r>>3);
350 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
351  return (__builtin_ctz((U32)val) >> 3);
352 # else
353  static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
354  return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
355 # endif
356  }
357  }
358  else /* Big Endian CPU */
359  {
360  if (LZ4_64bits())
361  {
362 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
363  unsigned long r = 0;
364  _BitScanReverse64( &r, val );
365  return (unsigned)(r>>3);
366 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
367  return (__builtin_clzll(val) >> 3);
368 # else
369  unsigned r;
370  if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
371  if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
372  r += (!val);
373  return r;
374 # endif
375  }
376  else /* 32 bits */
377  {
378 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
379  unsigned long r = 0;
380  _BitScanReverse( &r, (unsigned long)val );
381  return (unsigned)(r>>3);
382 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
383  return (__builtin_clz(val) >> 3);
384 # else
385  unsigned r;
386  if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
387  r += (!val);
388  return r;
389 # endif
390  }
391  }
392 }
393 
394 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
395 {
396  const BYTE* const pStart = pIn;
397 
398  while (likely(pIn<pInLimit-(STEPSIZE-1)))
399  {
400  size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
401  if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
402  pIn += LZ4_NbCommonBytes(diff);
403  return (unsigned)(pIn - pStart);
404  }
405 
406  if (LZ4_64bits()) if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
407  if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
408  if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
409  return (unsigned)(pIn - pStart);
410 }
411 
412 
413 #ifndef LZ4_COMMONDEFS_ONLY
414 /**************************************
415  Local Constants
416 **************************************/
417 #define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
418 #define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
419 #define HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
420 
421 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
422 static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
423 
424 
425 /**************************************
426  Local Utils
427 **************************************/
428 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
429 int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
430 
431 
432 /**************************************
433  Local Structures and types
434 **************************************/
435 typedef struct {
436  U32 hashTable[HASH_SIZE_U32];
439  const BYTE* dictionary;
443 
445 typedef enum { byPtr, byU32, byU16 } tableType_t;
446 
449 
451 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
452 
453 
454 
455 /********************************
456  Compression functions
457 ********************************/
458 
459 static U32 LZ4_hashSequence(U32 sequence, tableType_t tableType)
460 {
461  if (tableType == byU16)
462  return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
463  else
464  return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
465 }
466 
467 static U32 LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(LZ4_read32(p), tableType); }
468 
469 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
470 {
471  switch (tableType)
472  {
473  case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
474  case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
475  case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
476  }
477 }
478 
479 static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
480 {
481  U32 h = LZ4_hashPosition(p, tableType);
482  LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
483 }
484 
485 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
486 {
487  if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
488  if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
489  { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
490 }
491 
492 static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
493 {
494  U32 h = LZ4_hashPosition(p, tableType);
495  return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
496 }
497 
498 static int LZ4_compress_generic(
499  void* ctx,
500  const char* source,
501  char* dest,
502  int inputSize,
503  int maxOutputSize,
504  limitedOutput_directive outputLimited,
505  tableType_t tableType,
506  dict_directive dict,
507  dictIssue_directive dictIssue)
508 {
509  LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
510 
511  const BYTE* ip = (const BYTE*) source;
512  const BYTE* base;
513  const BYTE* lowLimit;
514  const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
515  const BYTE* const dictionary = dictPtr->dictionary;
516  const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
517  const size_t dictDelta = dictEnd - (const BYTE*)source;
518  const BYTE* anchor = (const BYTE*) source;
519  const BYTE* const iend = ip + inputSize;
520  const BYTE* const mflimit = iend - MFLIMIT;
521  const BYTE* const matchlimit = iend - LASTLITERALS;
522 
523  BYTE* op = (BYTE*) dest;
524  BYTE* const olimit = op + maxOutputSize;
525 
526  U32 forwardH;
527  size_t refDelta=0;
528 
529  /* Init conditions */
530  if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
531  switch(dict)
532  {
533  case noDict:
534  default:
535  base = (const BYTE*)source;
536  lowLimit = (const BYTE*)source;
537  break;
538  case withPrefix64k:
539  base = (const BYTE*)source - dictPtr->currentOffset;
540  lowLimit = (const BYTE*)source - dictPtr->dictSize;
541  break;
542  case usingExtDict:
543  base = (const BYTE*)source - dictPtr->currentOffset;
544  lowLimit = (const BYTE*)source;
545  break;
546  }
547  if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
548  if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
549 
550  /* First Byte */
551  LZ4_putPosition(ip, ctx, tableType, base);
552  ip++; forwardH = LZ4_hashPosition(ip, tableType);
553 
554  /* Main Loop */
555  for ( ; ; )
556  {
557  const BYTE* match;
558  BYTE* token;
559  {
560  const BYTE* forwardIp = ip;
561  unsigned step=1;
562  unsigned searchMatchNb = (1U << LZ4_skipTrigger);
563 
564  /* Find a match */
565  do {
566  U32 h = forwardH;
567  ip = forwardIp;
568  forwardIp += step;
569  step = searchMatchNb++ >> LZ4_skipTrigger;
570 
571  if (unlikely(forwardIp > mflimit)) goto _last_literals;
572 
573  match = LZ4_getPositionOnHash(h, ctx, tableType, base);
574  if (dict==usingExtDict)
575  {
576  if (match<(const BYTE*)source)
577  {
578  refDelta = dictDelta;
579  lowLimit = dictionary;
580  }
581  else
582  {
583  refDelta = 0;
584  lowLimit = (const BYTE*)source;
585  }
586  }
587  forwardH = LZ4_hashPosition(forwardIp, tableType);
588  LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
589 
590  } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
591  || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
592  || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
593  }
594 
595  /* Catch up */
596  while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
597 
598  {
599  /* Encode Literal length */
600  unsigned litLength = (unsigned)(ip - anchor);
601  token = op++;
602  if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
603  return 0; /* Check output limit */
604  if (litLength>=RUN_MASK)
605  {
606  int len = (int)litLength-RUN_MASK;
607  *token=(RUN_MASK<<ML_BITS);
608  for(; len >= 255 ; len-=255) *op++ = 255;
609  *op++ = (BYTE)len;
610  }
611  else *token = (BYTE)(litLength<<ML_BITS);
612 
613  /* Copy Literals */
614  LZ4_wildCopy(op, anchor, op+litLength);
615  op+=litLength;
616  }
617 
618 _next_match:
619  /* Encode Offset */
620  LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
621 
622  /* Encode MatchLength */
623  {
624  unsigned matchLength;
625 
626  if ((dict==usingExtDict) && (lowLimit==dictionary))
627  {
628  const BYTE* limit;
629  match += refDelta;
630  limit = ip + (dictEnd-match);
631  if (limit > matchlimit) limit = matchlimit;
632  matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
633  ip += MINMATCH + matchLength;
634  if (ip==limit)
635  {
636  unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
637  matchLength += more;
638  ip += more;
639  }
640  }
641  else
642  {
643  matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
644  ip += MINMATCH + matchLength;
645  }
646 
647  if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
648  return 0; /* Check output limit */
649  if (matchLength>=ML_MASK)
650  {
651  *token += ML_MASK;
652  matchLength -= ML_MASK;
653  for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
654  if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
655  *op++ = (BYTE)matchLength;
656  }
657  else *token += (BYTE)(matchLength);
658  }
659 
660  anchor = ip;
661 
662  /* Test end of chunk */
663  if (ip > mflimit) break;
664 
665  /* Fill table */
666  LZ4_putPosition(ip-2, ctx, tableType, base);
667 
668  /* Test next position */
669  match = LZ4_getPosition(ip, ctx, tableType, base);
670  if (dict==usingExtDict)
671  {
672  if (match<(const BYTE*)source)
673  {
674  refDelta = dictDelta;
675  lowLimit = dictionary;
676  }
677  else
678  {
679  refDelta = 0;
680  lowLimit = (const BYTE*)source;
681  }
682  }
683  LZ4_putPosition(ip, ctx, tableType, base);
684  if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
685  && (match+MAX_DISTANCE>=ip)
686  && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
687  { token=op++; *token=0; goto _next_match; }
688 
689  /* Prepare next loop */
690  forwardH = LZ4_hashPosition(++ip, tableType);
691  }
692 
693 _last_literals:
694  /* Encode Last Literals */
695  {
696  int lastRun = (int)(iend - anchor);
697  if ((outputLimited) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
698  return 0; /* Check output limit */
699  if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
700  else *op++ = (BYTE)(lastRun<<ML_BITS);
701  memcpy(op, anchor, iend - anchor);
702  op += iend-anchor;
703  }
704 
705  /* End */
706  return (int) (((char*)op)-dest);
707 }
708 
709 
710 int LZ4_compress(const char* source, char* dest, int inputSize)
711 {
712 #if (HEAPMODE)
713  void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U64, 8); /* Aligned on 8-bytes boundaries */
714 #else
715  U64 ctx[LZ4_STREAMSIZE_U64] = {0}; /* Ensure data is aligned on 8-bytes boundaries */
716 #endif
717  int result;
718 
719  if (inputSize < LZ4_64Klimit)
720  result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
721  else
722  result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
723 
724 #if (HEAPMODE)
725  FREEMEM(ctx);
726 #endif
727  return result;
728 }
729 
730 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
731 {
732 #if (HEAPMODE)
733  void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U64, 8); /* Aligned on 8-bytes boundaries */
734 #else
735  U64 ctx[LZ4_STREAMSIZE_U64] = {0}; /* Ensure data is aligned on 8-bytes boundaries */
736 #endif
737  int result;
738 
739  if (inputSize < LZ4_64Klimit)
740  result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
741  else
742  result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
743 
744 #if (HEAPMODE)
745  FREEMEM(ctx);
746 #endif
747  return result;
748 }
749 
750 
751 /*****************************************
752  Experimental : Streaming functions
753 *****************************************/
754 
755 /*
756  * LZ4_initStream
757  * Use this function once, to init a newly allocated LZ4_stream_t structure
758  * Return : 1 if OK, 0 if error
759  */
760 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
761 {
762  MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
763 }
764 
766 {
768  LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
769  LZ4_resetStream(lz4s);
770  return lz4s;
771 }
772 
773 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
774 {
775  FREEMEM(LZ4_stream);
776  return (0);
777 }
778 
779 
780 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
781 {
782  LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
783  const BYTE* p = (const BYTE*)dictionary;
784  const BYTE* const dictEnd = p + dictSize;
785  const BYTE* base;
786 
787  if (dict->initCheck) LZ4_resetStream(LZ4_dict); /* Uninitialized structure detected */
788 
789  if (dictSize < MINMATCH)
790  {
791  dict->dictionary = NULL;
792  dict->dictSize = 0;
793  return 0;
794  }
795 
796  if (p <= dictEnd - 64 KB) p = dictEnd - 64 KB;
797  base = p - dict->currentOffset;
798  dict->dictionary = p;
799  dict->dictSize = (U32)(dictEnd - p);
800  dict->currentOffset += dict->dictSize;
801 
802  while (p <= dictEnd-MINMATCH)
803  {
804  LZ4_putPosition(p, dict, byU32, base);
805  p+=3;
806  }
807 
808  return dict->dictSize;
809 }
810 
811 
812 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
813 {
814  if ((LZ4_dict->currentOffset > 0x80000000) ||
815  ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
816  {
817  /* rescale hash table */
818  U32 delta = LZ4_dict->currentOffset - 64 KB;
819  const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
820  int i;
821  for (i=0; i<HASH_SIZE_U32; i++)
822  {
823  if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
824  else LZ4_dict->hashTable[i] -= delta;
825  }
826  LZ4_dict->currentOffset = 64 KB;
827  if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
828  LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
829  }
830 }
831 
832 
833 FORCE_INLINE int LZ4_compress_continue_generic (void* LZ4_stream, const char* source, char* dest, int inputSize,
834  int maxOutputSize, limitedOutput_directive limit)
835 {
836  LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
837  const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
838 
839  const BYTE* smallest = (const BYTE*) source;
840  if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
841  if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
842  LZ4_renormDictT(streamPtr, smallest);
843 
844  /* Check overlapping input/dictionary space */
845  {
846  const BYTE* sourceEnd = (const BYTE*) source + inputSize;
847  if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
848  {
849  streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
850  if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
851  if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
852  streamPtr->dictionary = dictEnd - streamPtr->dictSize;
853  }
854  }
855 
856  /* prefix mode : source data follows dictionary */
857  if (dictEnd == (const BYTE*)source)
858  {
859  int result;
860  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
861  result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall);
862  else
863  result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue);
864  streamPtr->dictSize += (U32)inputSize;
865  streamPtr->currentOffset += (U32)inputSize;
866  return result;
867  }
868 
869  /* external dictionary mode */
870  {
871  int result;
872  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
873  result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall);
874  else
875  result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue);
876  streamPtr->dictionary = (const BYTE*)source;
877  streamPtr->dictSize = (U32)inputSize;
878  streamPtr->currentOffset += (U32)inputSize;
879  return result;
880  }
881 }
882 
883 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
884 {
885  return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited);
886 }
887 
888 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize)
889 {
890  return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput);
891 }
892 
893 
894 /* Hidden debug function, to force separate dictionary mode */
895 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
896 {
897  LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
898  int result;
899  const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
900 
901  const BYTE* smallest = dictEnd;
902  if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
903  LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
904 
905  result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue);
906 
907  streamPtr->dictionary = (const BYTE*)source;
908  streamPtr->dictSize = (U32)inputSize;
909  streamPtr->currentOffset += (U32)inputSize;
910 
911  return result;
912 }
913 
914 
915 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
916 {
917  LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
918  const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
919 
920  if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
921  if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
922 
923  memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
924 
925  dict->dictionary = (const BYTE*)safeBuffer;
926  dict->dictSize = (U32)dictSize;
927 
928  return dictSize;
929 }
930 
931 
932 
933 /****************************
934  Decompression functions
935 ****************************/
936 /*
937  * This generic decompression function cover all use cases.
938  * It shall be instantiated several times, using different sets of directives
939  * Note that it is essential this generic function is really inlined,
940  * in order to remove useless branches during compilation optimization.
941  */
943  const char* const source,
944  char* const dest,
945  int inputSize,
946  int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
947 
948  int endOnInput, /* endOnOutputSize, endOnInputSize */
949  int partialDecoding, /* full, partial */
950  int targetOutputSize, /* only used if partialDecoding==partial */
951  int dict, /* noDict, withPrefix64k, usingExtDict */
952  const BYTE* const lowPrefix, /* == dest if dict == noDict */
953  const BYTE* const dictStart, /* only if dict==usingExtDict */
954  const size_t dictSize /* note : = 0 if noDict */
955  )
956 {
957  /* Local Variables */
958  const BYTE* restrict ip = (const BYTE*) source;
959  const BYTE* const iend = ip + inputSize;
960 
961  BYTE* op = (BYTE*) dest;
962  BYTE* const oend = op + outputSize;
963  BYTE* cpy;
964  BYTE* oexit = op + targetOutputSize;
965  const BYTE* const lowLimit = lowPrefix - dictSize;
966 
967  const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
968  const size_t dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4};
969  const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
970 
971  const int safeDecode = (endOnInput==endOnInputSize);
972  const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
973 
974 
975  /* Special cases */
976  if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
977  if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
978  if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
979 
980 
981  /* Main Loop */
982  while (1)
983  {
984  unsigned token;
985  size_t length;
986  const BYTE* match;
987 
988  /* get literal length */
989  token = *ip++;
990  if ((length=(token>>ML_BITS)) == RUN_MASK)
991  {
992  unsigned s;
993  do
994  {
995  s = *ip++;
996  length += s;
997  }
998  while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
999  if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */
1000  if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */
1001  }
1002 
1003  /* copy literals */
1004  cpy = op+length;
1005  if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1006  || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
1007  {
1008  if (partialDecoding)
1009  {
1010  if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
1011  if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
1012  }
1013  else
1014  {
1015  if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
1016  if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
1017  }
1018  memcpy(op, ip, length);
1019  ip += length;
1020  op += length;
1021  break; /* Necessarily EOF, due to parsing restrictions */
1022  }
1023  LZ4_wildCopy(op, ip, cpy);
1024  ip += length; op = cpy;
1025 
1026  /* get offset */
1027  match = cpy - LZ4_readLE16(ip); ip+=2;
1028  if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
1029 
1030  /* get matchlength */
1031  length = token & ML_MASK;
1032  if (length == ML_MASK)
1033  {
1034  unsigned s;
1035  do
1036  {
1037  if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1038  s = *ip++;
1039  length += s;
1040  } while (s==255);
1041  if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */
1042  }
1043  length += MINMATCH;
1044 
1045  /* check external dictionary */
1046  if ((dict==usingExtDict) && (match < lowPrefix))
1047  {
1048  if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
1049 
1050  if (length <= (size_t)(lowPrefix-match))
1051  {
1052  /* match can be copied as a single segment from external dictionary */
1053  match = dictEnd - (lowPrefix-match);
1054  memcpy(op, match, length);
1055  op += length;
1056  }
1057  else
1058  {
1059  /* match encompass external dictionary and current segment */
1060  size_t copySize = (size_t)(lowPrefix-match);
1061  memcpy(op, dictEnd - copySize, copySize);
1062  op += copySize;
1063  copySize = length - copySize;
1064  if (copySize > (size_t)(op-lowPrefix)) /* overlap within current segment */
1065  {
1066  BYTE* const endOfMatch = op + copySize;
1067  const BYTE* copyFrom = lowPrefix;
1068  while (op < endOfMatch) *op++ = *copyFrom++;
1069  }
1070  else
1071  {
1072  memcpy(op, lowPrefix, copySize);
1073  op += copySize;
1074  }
1075  }
1076  continue;
1077  }
1078 
1079  /* copy repeated sequence */
1080  cpy = op + length;
1081  if (unlikely((op-match)<8))
1082  {
1083  const size_t dec64 = dec64table[op-match];
1084  op[0] = match[0];
1085  op[1] = match[1];
1086  op[2] = match[2];
1087  op[3] = match[3];
1088  match += dec32table[op-match];
1089  LZ4_copy4(op+4, match);
1090  op += 8; match -= dec64;
1091  } else { LZ4_copy8(op, match); op+=8; match+=8; }
1092 
1093  if (unlikely(cpy>oend-12))
1094  {
1095  if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals */
1096  if (op < oend-8)
1097  {
1098  LZ4_wildCopy(op, match, oend-8);
1099  match += (oend-8) - op;
1100  op = oend-8;
1101  }
1102  while (op<cpy) *op++ = *match++;
1103  }
1104  else
1105  LZ4_wildCopy(op, match, cpy);
1106  op=cpy; /* correction */
1107  }
1108 
1109  /* end of decoding */
1110  if (endOnInput)
1111  return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
1112  else
1113  return (int) (((char*)ip)-source); /* Nb of input bytes read */
1114 
1115  /* Overflow error detected */
1116 _output_error:
1117  return (int) (-(((char*)ip)-source))-1;
1118 }
1119 
1120 
1121 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1122 {
1123  return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1124 }
1125 
1126 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1127 {
1128  return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1129 }
1130 
1131 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1132 {
1133  return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1134 }
1135 
1136 
1137 /* streaming decompression functions */
1138 
1139 typedef struct
1140 {
1142  size_t extDictSize;
1144  size_t prefixSize;
1146 
1147 /*
1148  * If you prefer dynamic allocation methods,
1149  * LZ4_createStreamDecode()
1150  * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1151  */
1153 {
1155  return lz4s;
1156 }
1157 
1159 {
1160  FREEMEM(LZ4_stream);
1161  return 0;
1162 }
1163 
1164 /*
1165  * LZ4_setStreamDecode
1166  * Use this function to instruct where to find the dictionary
1167  * This function is not necessary if previous data is still available where it was decoded.
1168  * Loading a size of 0 is allowed (same effect as no dictionary).
1169  * Return : 1 if OK, 0 if error
1170  */
1171 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1172 {
1173  LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1174  lz4sd->prefixSize = (size_t) dictSize;
1175  lz4sd->prefixEnd = (BYTE*) dictionary + dictSize;
1176  lz4sd->externalDict = NULL;
1177  lz4sd->extDictSize = 0;
1178  return 1;
1179 }
1180 
1181 /*
1182 *_continue() :
1183  These decoding functions allow decompression of multiple blocks in "streaming" mode.
1184  Previously decoded blocks must still be available at the memory position where they were decoded.
1185  If it's not possible, save the relevant part of decoded data into a safe buffer,
1186  and indicate where it stands using LZ4_setStreamDecode()
1187 */
1188 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1189 {
1190  LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1191  int result;
1192 
1193  if (lz4sd->prefixEnd == (BYTE*)dest)
1194  {
1195  result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1196  endOnInputSize, full, 0,
1197  usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1198  if (result <= 0) return result;
1199  lz4sd->prefixSize += result;
1200  lz4sd->prefixEnd += result;
1201  }
1202  else
1203  {
1204  lz4sd->extDictSize = lz4sd->prefixSize;
1205  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1206  result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1207  endOnInputSize, full, 0,
1208  usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1209  if (result <= 0) return result;
1210  lz4sd->prefixSize = result;
1211  lz4sd->prefixEnd = (BYTE*)dest + result;
1212  }
1213 
1214  return result;
1215 }
1216 
1217 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1218 {
1219  LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1220  int result;
1221 
1222  if (lz4sd->prefixEnd == (BYTE*)dest)
1223  {
1224  result = LZ4_decompress_generic(source, dest, 0, originalSize,
1225  endOnOutputSize, full, 0,
1226  usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1227  if (result <= 0) return result;
1228  lz4sd->prefixSize += originalSize;
1229  lz4sd->prefixEnd += originalSize;
1230  }
1231  else
1232  {
1233  lz4sd->extDictSize = lz4sd->prefixSize;
1234  lz4sd->externalDict = (BYTE*)dest - lz4sd->extDictSize;
1235  result = LZ4_decompress_generic(source, dest, 0, originalSize,
1236  endOnOutputSize, full, 0,
1237  usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1238  if (result <= 0) return result;
1239  lz4sd->prefixSize = originalSize;
1240  lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1241  }
1242 
1243  return result;
1244 }
1245 
1246 
1247 /*
1248 Advanced decoding functions :
1249 *_usingDict() :
1250  These decoding functions work the same as "_continue" ones,
1251  the dictionary must be explicitly provided within parameters
1252 */
1253 
1254 FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1255 {
1256  if (dictSize==0)
1257  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1258  if (dictStart+dictSize == dest)
1259  {
1260  if (dictSize >= (int)(64 KB - 1))
1261  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1262  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1263  }
1264  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (BYTE*)dictStart, dictSize);
1265 }
1266 
1267 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1268 {
1269  return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1270 }
1271 
1272 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1273 {
1274  return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1275 }
1276 
1277 /* debug function */
1278 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1279 {
1280  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (BYTE*)dictStart, dictSize);
1281 }
1282 
1283 
1284 /***************************************************
1285  Obsolete Functions
1286 ***************************************************/
1287 /*
1288 These function names are deprecated and should no longer be used.
1289 They are only provided here for compatibility with older user programs.
1290 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1291 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1292 */
1293 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
1294 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1295 
1296 
1297 /* Obsolete Streaming functions */
1298 
1300 
1301 static void LZ4_init(LZ4_stream_t_internal* lz4ds, const BYTE* base)
1302 {
1303  MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE);
1304  lz4ds->bufferStart = base;
1305 }
1306 
1307 int LZ4_resetStreamState(void* state, const char* inputBuffer)
1308 {
1309  if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1310  LZ4_init((LZ4_stream_t_internal*)state, (const BYTE*)inputBuffer);
1311  return 0;
1312 }
1313 
1314 void* LZ4_create (const char* inputBuffer)
1315 {
1316  void* lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64);
1317  LZ4_init ((LZ4_stream_t_internal*)lz4ds, (const BYTE*)inputBuffer);
1318  return lz4ds;
1319 }
1320 
1321 char* LZ4_slideInputBuffer (void* LZ4_Data)
1322 {
1323  LZ4_stream_t_internal* ctx = (LZ4_stream_t_internal*)LZ4_Data;
1324  int dictSize = LZ4_saveDict((LZ4_stream_t*)ctx, (char*)ctx->bufferStart, 64 KB);
1325  return (char*)(ctx->bufferStart + dictSize);
1326 }
1327 
1328 /* Obsolete compresson functions using User-allocated state */
1329 
1331 
1332 int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
1333 {
1334  if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
1335  MEM_INIT(state, 0, LZ4_STREAMSIZE);
1336 
1337  if (inputSize < LZ4_64Klimit)
1338  return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
1339  else
1340  return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
1341 }
1342 
1343 int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
1344 {
1345  if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
1346  MEM_INIT(state, 0, LZ4_STREAMSIZE);
1347 
1348  if (inputSize < LZ4_64Klimit)
1349  return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
1350  else
1351  return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
1352 }
1353 
1354 /* Obsolete streaming decompression functions */
1355 
1356 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1357 {
1358  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1359 }
1360 
1361 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1362 {
1363  return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1364 }
1365 
1366 #endif /* LZ4_COMMONDEFS_ONLY */
1367