Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
CTCaer
GitHub Repository: CTCaer/hekate
Path: blob/master/bdk/libs/compr/lz4.c
1476 views
1
/*
2
LZ4 - Fast LZ compression algorithm
3
Copyright (C) 2011-2017, Yann Collet.
4
5
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7
Redistribution and use in source and binary forms, with or without
8
modification, are permitted provided that the following conditions are
9
met:
10
11
* Redistributions of source code must retain the above copyright
12
notice, this list of conditions and the following disclaimer.
13
* Redistributions in binary form must reproduce the above
14
copyright notice, this list of conditions and the following disclaimer
15
in the documentation and/or other materials provided with the
16
distribution.
17
18
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
You can contact the author at :
31
- LZ4 homepage : http://www.lz4.org
32
- LZ4 source repository : https://github.com/lz4/lz4
33
*/
34
35
36
/*-************************************
37
* Tuning parameters
38
**************************************/
39
/*
40
* ACCELERATION_DEFAULT :
41
* Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
42
*/
43
#define ACCELERATION_DEFAULT 1
44
45
46
/*-************************************
47
* Dependency
48
**************************************/
49
#define LZ4_STATIC_LINKING_ONLY
50
#include "lz4.h"
51
/* see also "memory routines" below */
52
53
54
/*-************************************
55
* Compiler Options
56
**************************************/
57
#ifndef LZ4_FORCE_INLINE
58
# ifdef _MSC_VER /* Visual Studio */
59
# define LZ4_FORCE_INLINE static __forceinline
60
# else
61
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
62
# ifdef __GNUC__
63
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
64
# else
65
# define LZ4_FORCE_INLINE static inline
66
# endif
67
# else
68
# define LZ4_FORCE_INLINE static
69
# endif /* __STDC_VERSION__ */
70
# endif /* _MSC_VER */
71
#endif /* LZ4_FORCE_INLINE */
72
73
/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
74
* Gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy,
75
* together with a simple 8-byte copy loop as a fall-back path.
76
* However, this optimization hurts the decompression speed by >30%,
77
* because the execution does not go to the optimized loop
78
* for typical compressible data, and all of the preamble checks
79
* before going to the fall-back path become useless overhead.
80
* This optimization happens only with the -O3 flag, and -O2 generates
81
* a simple 8-byte copy loop.
82
* With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy
83
* functions are annotated with __attribute__((optimize("O2"))),
84
* and also LZ4_wildCopy is forcibly inlined, so that the O2 attribute
85
* of LZ4_wildCopy does not affect the compression speed.
86
*/
87
#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__)
88
# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
89
# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
90
#else
91
# define LZ4_FORCE_O2_GCC_PPC64LE
92
# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
93
#endif
94
95
/*-************************************
96
* Memory routines
97
**************************************/
98
#include <mem/heap.h> /* malloc, calloc, free */
99
#define ALLOC(s) malloc(s)
100
#define ALLOC_AND_ZERO(s) zalloc(s)
101
#define FREEMEM free
102
#include <string.h> /* memset, memcpy */
103
#define MEM_INIT memset
104
105
106
/*-************************************
107
* Basic Types
108
**************************************/
109
typedef uint16_t U16;
110
typedef uint32_t U32;
111
typedef int32_t S32;
112
typedef uint64_t U64;
113
typedef uintptr_t uptrval;
114
typedef size_t reg_t; /* 32-bits in x32 mode */
115
116
/*-************************************
117
* Reading and writing into memory
118
**************************************/
119
static unsigned LZ4_isLittleEndian(void)
120
{
121
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
122
return one.c[0];
123
}
124
125
static U16 LZ4_read16(const void* memPtr)
126
{
127
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
128
}
129
130
static U32 LZ4_read32(const void* memPtr)
131
{
132
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
133
}
134
135
static reg_t LZ4_read_ARCH(const void* memPtr)
136
{
137
reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
138
}
139
140
static void LZ4_write16(void* memPtr, U16 value)
141
{
142
memcpy(memPtr, &value, sizeof(value));
143
}
144
145
static void LZ4_write32(void* memPtr, U32 value)
146
{
147
memcpy(memPtr, &value, sizeof(value));
148
}
149
150
static U16 LZ4_readLE16(const void* memPtr)
151
{
152
if (LZ4_isLittleEndian()) {
153
return LZ4_read16(memPtr);
154
} else {
155
const BYTE* p = (const BYTE*)memPtr;
156
return (U16)((U16)p[0] + (p[1]<<8));
157
}
158
}
159
160
static void LZ4_writeLE16(void* memPtr, U16 value)
161
{
162
if (LZ4_isLittleEndian()) {
163
LZ4_write16(memPtr, value);
164
} else {
165
BYTE* p = (BYTE*)memPtr;
166
p[0] = (BYTE) value;
167
p[1] = (BYTE)(value>>8);
168
}
169
}
170
171
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
172
LZ4_FORCE_O2_INLINE_GCC_PPC64LE
173
void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
174
{
175
BYTE* d = (BYTE*)dstPtr;
176
const BYTE* s = (const BYTE*)srcPtr;
177
BYTE* const e = (BYTE*)dstEnd;
178
179
do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
180
}
181
182
183
/*-************************************
184
* Common Constants
185
**************************************/
186
#define MINMATCH 4
187
188
#define WILDCOPYLENGTH 8
189
#define LASTLITERALS 5
190
#define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
191
static const int LZ4_minLength = (MFLIMIT+1);
192
193
#define KB *(1 <<10)
194
#define MB *(1 <<20)
195
#define GB *(1U<<30)
196
197
#define MAXD_LOG 16
198
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
199
200
#define ML_BITS 4
201
#define ML_MASK ((1U<<ML_BITS)-1)
202
#define RUN_BITS (8-ML_BITS)
203
#define RUN_MASK ((1U<<RUN_BITS)-1)
204
205
206
/*-************************************
207
* Error detection
208
**************************************/
209
210
#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
211
212
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
213
# include <stdio.h>
214
static int g_debuglog_enable = 1;
215
# define DEBUGLOG(l, ...) { \
216
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
217
fprintf(stderr, __FILE__ ": "); \
218
fprintf(stderr, __VA_ARGS__); \
219
fprintf(stderr, " \n"); \
220
} }
221
#else
222
# define DEBUGLOG(l, ...) {} /* disabled */
223
#endif
224
225
226
/*-************************************
227
* Common functions
228
**************************************/
229
static unsigned LZ4_NbCommonBytes (reg_t val)
230
{
231
if (LZ4_isLittleEndian()) {
232
if (sizeof(val)==8) {
233
return (__builtin_ctzll((U64)val) >> 3);
234
} else /* 32 bits */ {
235
return (__builtin_ctz((U32)val) >> 3);
236
}
237
} else /* Big Endian CPU */ {
238
if (sizeof(val)==8) { /* 64-bits */
239
return (__builtin_clzll((U64)val) >> 3);
240
} else /* 32 bits */ {
241
return (__builtin_clz((U32)val) >> 3);
242
}
243
}
244
}
245
246
#define STEPSIZE sizeof(reg_t)
247
LZ4_FORCE_INLINE
248
unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
249
{
250
const BYTE* const pStart = pIn;
251
252
if (likely(pIn < pInLimit-(STEPSIZE-1))) {
253
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
254
if (!diff) {
255
pIn+=STEPSIZE; pMatch+=STEPSIZE;
256
} else {
257
return LZ4_NbCommonBytes(diff);
258
} }
259
260
while (likely(pIn < pInLimit-(STEPSIZE-1))) {
261
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
262
if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
263
pIn += LZ4_NbCommonBytes(diff);
264
return (unsigned)(pIn - pStart);
265
}
266
267
if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
268
if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
269
if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
270
return (unsigned)(pIn - pStart);
271
}
272
273
274
#ifndef LZ4_COMMONDEFS_ONLY
275
/*-************************************
276
* Local Constants
277
**************************************/
278
static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
279
static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
280
281
282
/*-************************************
283
* Local Structures and types
284
**************************************/
285
typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
286
typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
287
288
/**
289
* This enum distinguishes several different modes of accessing previous
290
* content in the stream.
291
*
292
* - noDict : There is no preceding content.
293
* - withPrefix64k : Table entries up to ctx->dictSize before the current blob
294
* blob being compressed are valid and refer to the preceding
295
* content (of length ctx->dictSize), which is available
296
* contiguously preceding in memory the content currently
297
* being compressed.
298
* - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
299
* else in memory, starting at ctx->dictionary with length
300
* ctx->dictSize.
301
* - usingDictCtx : Like usingExtDict, but everything concerning the preceding
302
* content is in a separate context, pointed to by
303
* ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
304
* entries in the current context that refer to positions
305
* preceding the beginning of the current compression are
306
* ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
307
* ->dictSize describe the location and size of the preceding
308
* content, and matches are found by looking in the ctx
309
* ->dictCtx->hashTable.
310
*/
311
typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
312
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
313
314
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
315
typedef enum { full = 0, partial = 1 } earlyEnd_directive;
316
317
318
/*-************************************
319
* Local Utils
320
**************************************/
321
int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
322
const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
323
int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
324
int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
325
326
327
/*-******************************
328
* Compression functions
329
********************************/
330
static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
331
{
332
if (tableType == byU16)
333
return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
334
else
335
return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
336
}
337
338
static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
339
{
340
static const U64 prime5bytes = 889523592379ULL;
341
static const U64 prime8bytes = 11400714785074694791ULL;
342
const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
343
if (LZ4_isLittleEndian())
344
return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
345
else
346
return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
347
}
348
349
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
350
{
351
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
352
return LZ4_hash4(LZ4_read32(p), tableType);
353
}
354
355
static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
356
{
357
switch (tableType)
358
{
359
default: /* fallthrough */
360
case clearedTable: /* fallthrough */
361
case byPtr: { /* illegal! */ return; }
362
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
363
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)idx; return; }
364
}
365
}
366
367
static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
368
{
369
switch (tableType)
370
{
371
case clearedTable: { /* illegal! */ return; }
372
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
373
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
374
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
375
}
376
}
377
378
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
379
{
380
U32 const h = LZ4_hashPosition(p, tableType);
381
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
382
}
383
384
/* LZ4_getIndexOnHash() :
385
* Index of match position registered in hash table.
386
* hash position must be calculated by using base+index, or dictBase+index.
387
* Assumption 1 : only valid if tableType == byU32 or byU16.
388
* Assumption 2 : h is presumed valid (within limits of hash table)
389
*/
390
static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
391
{
392
LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
393
if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h]; }
394
if (tableType == byU16) { const U16* const hashTable = (const U16*) tableBase; return hashTable[h]; }
395
return 0; /* forbidden case */
396
}
397
398
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
399
{
400
if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
401
if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
402
{ const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
403
}
404
405
LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
406
{
407
U32 const h = LZ4_hashPosition(p, tableType);
408
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
409
}
410
411
LZ4_FORCE_INLINE void LZ4_prepareTable(
412
LZ4_stream_t_internal* const cctx,
413
const int inputSize,
414
const tableType_t tableType) {
415
/* If the table hasn't been used, it's guaranteed to be zeroed out, and is
416
* therefore safe to use no matter what mode we're in. Otherwise, we figure
417
* out if it's safe to leave as is or whether it needs to be reset.
418
*/
419
if (cctx->tableType != clearedTable) {
420
if (cctx->tableType != tableType
421
|| (tableType == byU16 && cctx->currentOffset + inputSize >= 0xFFFFU)
422
|| (tableType == byU32 && cctx->currentOffset > 1 GB)
423
|| tableType == byPtr
424
|| inputSize >= 4 KB)
425
{
426
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
427
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
428
cctx->currentOffset = 0;
429
cctx->tableType = clearedTable;
430
} else {
431
DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
432
}
433
}
434
435
/* Adding a gap, so all previous entries are > MAX_DISTANCE back, is faster
436
* than compressing without a gap. However, compressing with
437
* currentOffset == 0 is faster still, so we preserve that case.
438
*/
439
if (cctx->currentOffset != 0 && tableType == byU32) {
440
DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
441
cctx->currentOffset += 64 KB;
442
}
443
444
/* Finally, clear history */
445
cctx->dictCtx = NULL;
446
cctx->dictionary = NULL;
447
cctx->dictSize = 0;
448
}
449
450
/** LZ4_compress_generic() :
451
inlined, to ensure branches are decided at compilation time */
452
LZ4_FORCE_INLINE int LZ4_compress_generic(
453
LZ4_stream_t_internal* const cctx,
454
const char* const source,
455
char* const dest,
456
const int inputSize,
457
const int maxOutputSize,
458
const limitedOutput_directive outputLimited,
459
const tableType_t tableType,
460
const dict_directive dictDirective,
461
const dictIssue_directive dictIssue,
462
const U32 acceleration)
463
{
464
const BYTE* ip = (const BYTE*) source;
465
466
U32 const startIndex = cctx->currentOffset;
467
const BYTE* base = (const BYTE*) source - startIndex;
468
const BYTE* lowLimit;
469
470
const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
471
const BYTE* const dictionary =
472
dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
473
const U32 dictSize =
474
dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
475
const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
476
477
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
478
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
479
const BYTE* const dictEnd = dictionary + dictSize;
480
const BYTE* anchor = (const BYTE*) source;
481
const BYTE* const iend = ip + inputSize;
482
const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
483
const BYTE* const matchlimit = iend - LASTLITERALS;
484
485
/* the dictCtx currentOffset is indexed on the start of the dictionary,
486
* while a dictionary in the current context precedes the currentOffset */
487
const BYTE* dictBase = dictDirective == usingDictCtx ?
488
dictionary + dictSize - dictCtx->currentOffset : /* is it possible that dictCtx->currentOffset != dictCtx->dictSize ? Yes if the dictionary context is not reset */
489
dictionary + dictSize - startIndex;
490
491
BYTE* op = (BYTE*) dest;
492
BYTE* const olimit = op + maxOutputSize;
493
494
U32 offset = 0;
495
U32 forwardH;
496
497
DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
498
/* Init conditions */
499
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
500
501
lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
502
503
if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
504
505
/* Update context state */
506
if (dictDirective == usingDictCtx) {
507
/* Subsequent linked blocks can't use the dictionary. */
508
/* Instead, they use the block we just compressed. */
509
cctx->dictCtx = NULL;
510
cctx->dictSize = (U32)inputSize;
511
} else {
512
cctx->dictSize += (U32)inputSize;
513
}
514
cctx->currentOffset += (U32)inputSize;
515
cctx->tableType = tableType;
516
517
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
518
519
/* First Byte */
520
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
521
ip++; forwardH = LZ4_hashPosition(ip, tableType);
522
523
/* Main Loop */
524
for ( ; ; ) {
525
const BYTE* match;
526
BYTE* token;
527
528
/* Find a match */
529
if (tableType == byPtr) {
530
const BYTE* forwardIp = ip;
531
unsigned step = 1;
532
unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
533
do {
534
U32 const h = forwardH;
535
ip = forwardIp;
536
forwardIp += step;
537
step = (searchMatchNb++ >> LZ4_skipTrigger);
538
539
if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
540
541
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
542
forwardH = LZ4_hashPosition(forwardIp, tableType);
543
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
544
545
} while ( (match+MAX_DISTANCE < ip)
546
|| (LZ4_read32(match) != LZ4_read32(ip)) );
547
548
} else { /* byU32, byU16 */
549
550
const BYTE* forwardIp = ip;
551
unsigned step = 1;
552
unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
553
do {
554
U32 const h = forwardH;
555
U32 const current = (U32)(forwardIp - base);
556
U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
557
ip = forwardIp;
558
forwardIp += step;
559
step = (searchMatchNb++ >> LZ4_skipTrigger);
560
561
if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
562
563
if (dictDirective == usingDictCtx) {
564
if (matchIndex < startIndex) {
565
/* there was no match, try the dictionary */
566
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
567
match = dictBase + matchIndex;
568
matchIndex += dictDelta; /* make dictCtx index comparable with current context */
569
lowLimit = dictionary;
570
} else {
571
match = base + matchIndex;
572
lowLimit = (const BYTE*)source;
573
}
574
} else if (dictDirective==usingExtDict) {
575
if (matchIndex < startIndex) {
576
DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
577
match = dictBase + matchIndex;
578
lowLimit = dictionary;
579
} else {
580
match = base + matchIndex;
581
lowLimit = (const BYTE*)source;
582
}
583
} else { /* single continuous memory segment */
584
match = base + matchIndex;
585
}
586
forwardH = LZ4_hashPosition(forwardIp, tableType);
587
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
588
589
if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; /* match outside of valid area */
590
if ((tableType != byU16) && (current - matchIndex > MAX_DISTANCE)) continue; /* too far - note: works even if matchIndex overflows */
591
592
if (LZ4_read32(match) == LZ4_read32(ip)) {
593
if (maybe_extMem) offset = current - matchIndex;
594
break; /* match found */
595
}
596
597
} while(1);
598
}
599
600
/* Catch up */
601
while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
602
603
/* Encode Literals */
604
{ unsigned const litLength = (unsigned)(ip - anchor);
605
token = op++;
606
if ((outputLimited) && /* Check output buffer overflow */
607
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
608
return 0;
609
if (litLength >= RUN_MASK) {
610
int len = (int)litLength-RUN_MASK;
611
*token = (RUN_MASK<<ML_BITS);
612
for(; len >= 255 ; len-=255) *op++ = 255;
613
*op++ = (BYTE)len;
614
}
615
else *token = (BYTE)(litLength<<ML_BITS);
616
617
/* Copy Literals */
618
LZ4_wildCopy(op, anchor, op+litLength);
619
op+=litLength;
620
DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
621
}
622
623
_next_match:
624
/* at this stage, the following variables must be correctly set :
625
* - ip : at start of LZ operation
626
* - match : at start of previous pattern occurence; can be within current prefix, or within extDict
627
* - offset : if maybe_ext_memSegment==1 (constant)
628
* - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
629
* - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
630
*/
631
632
/* Encode Offset */
633
if (maybe_extMem) { /* static test */
634
LZ4_writeLE16(op, (U16)offset); op+=2;
635
} else {
636
LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
637
}
638
639
/* Encode MatchLength */
640
{ unsigned matchCode;
641
642
if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
643
&& (lowLimit==dictionary) /* match within extDict */ ) {
644
const BYTE* limit = ip + (dictEnd-match);
645
if (limit > matchlimit) limit = matchlimit;
646
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
647
ip += MINMATCH + matchCode;
648
if (ip==limit) {
649
unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
650
matchCode += more;
651
ip += more;
652
}
653
DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
654
} else {
655
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
656
ip += MINMATCH + matchCode;
657
DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
658
}
659
660
if ( outputLimited && /* Check output buffer overflow */
661
(unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
662
return 0;
663
if (matchCode >= ML_MASK) {
664
*token += ML_MASK;
665
matchCode -= ML_MASK;
666
LZ4_write32(op, 0xFFFFFFFF);
667
while (matchCode >= 4*255) {
668
op+=4;
669
LZ4_write32(op, 0xFFFFFFFF);
670
matchCode -= 4*255;
671
}
672
op += matchCode / 255;
673
*op++ = (BYTE)(matchCode % 255);
674
} else
675
*token += (BYTE)(matchCode);
676
}
677
678
anchor = ip;
679
680
/* Test end of chunk */
681
if (ip >= mflimitPlusOne) break;
682
683
/* Fill table */
684
LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
685
686
/* Test next position */
687
if (tableType == byPtr) {
688
689
match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
690
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
691
if ( (match+MAX_DISTANCE >= ip)
692
&& (LZ4_read32(match) == LZ4_read32(ip)) )
693
{ token=op++; *token=0; goto _next_match; }
694
695
} else { /* byU32, byU16 */
696
697
U32 const h = LZ4_hashPosition(ip, tableType);
698
U32 const current = (U32)(ip-base);
699
U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
700
if (dictDirective == usingDictCtx) {
701
if (matchIndex < startIndex) {
702
/* there was no match, try the dictionary */
703
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
704
match = dictBase + matchIndex;
705
lowLimit = dictionary; /* required for match length counter */
706
matchIndex += dictDelta;
707
} else {
708
match = base + matchIndex;
709
lowLimit = (const BYTE*)source; /* required for match length counter */
710
}
711
} else if (dictDirective==usingExtDict) {
712
if (matchIndex < startIndex) {
713
match = dictBase + matchIndex;
714
lowLimit = dictionary; /* required for match length counter */
715
} else {
716
match = base + matchIndex;
717
lowLimit = (const BYTE*)source; /* required for match length counter */
718
}
719
} else { /* single memory segment */
720
match = base + matchIndex;
721
}
722
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
723
if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
724
&& ((tableType==byU16) ? 1 : (current - matchIndex <= MAX_DISTANCE))
725
&& (LZ4_read32(match) == LZ4_read32(ip)) ) {
726
token=op++;
727
*token=0;
728
if (maybe_extMem) offset = current - matchIndex;
729
DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
730
goto _next_match;
731
}
732
}
733
734
/* Prepare next loop */
735
forwardH = LZ4_hashPosition(++ip, tableType);
736
737
}
738
739
_last_literals:
740
/* Encode Last Literals */
741
{ size_t const lastRun = (size_t)(iend - anchor);
742
if ( (outputLimited) && /* Check output buffer overflow */
743
((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
744
return 0;
745
if (lastRun >= RUN_MASK) {
746
size_t accumulator = lastRun - RUN_MASK;
747
*op++ = RUN_MASK << ML_BITS;
748
for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
749
*op++ = (BYTE) accumulator;
750
} else {
751
*op++ = (BYTE)(lastRun<<ML_BITS);
752
}
753
memcpy(op, anchor, lastRun);
754
op += lastRun;
755
}
756
757
return (int)(((char*)op) - dest);
758
}
759
760
761
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
762
{
763
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
764
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
765
LZ4_resetStream((LZ4_stream_t*)state);
766
if (maxOutputSize >= LZ4_compressBound(inputSize)) {
767
if (inputSize < LZ4_64Klimit) {
768
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
769
} else {
770
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32;
771
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
772
}
773
} else {
774
if (inputSize < LZ4_64Klimit) {;
775
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
776
} else {
777
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32;
778
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
779
}
780
}
781
}
782
783
/**
784
* LZ4_compress_fast_extState_fastReset() :
785
* A variant of LZ4_compress_fast_extState().
786
*
787
* Using this variant avoids an expensive initialization step. It is only safe
788
* to call if the state buffer is known to be correctly initialized already
789
* (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
790
* "correctly initialized").
791
*/
792
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
793
{
794
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
795
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
796
797
if (dstCapacity >= LZ4_compressBound(srcSize)) {
798
if (srcSize < LZ4_64Klimit) {
799
const tableType_t tableType = byU16;
800
LZ4_prepareTable(ctx, srcSize, tableType);
801
if (ctx->currentOffset) {
802
return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, dictSmall, acceleration);
803
} else {
804
return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
805
}
806
} else {
807
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
808
LZ4_prepareTable(ctx, srcSize, tableType);
809
return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
810
}
811
} else {
812
if (srcSize < LZ4_64Klimit) {
813
const tableType_t tableType = byU16;
814
LZ4_prepareTable(ctx, srcSize, tableType);
815
if (ctx->currentOffset) {
816
return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
817
} else {
818
return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
819
}
820
} else {
821
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
822
LZ4_prepareTable(ctx, srcSize, tableType);
823
return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
824
}
825
}
826
}
827
828
829
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
830
{
831
int result;
832
LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
833
LZ4_stream_t* const ctxPtr = ctx;
834
result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
835
836
FREEMEM(ctx);
837
838
return result;
839
}
840
841
842
int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
843
{
844
return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
845
}
846
847
848
/* hidden debug function */
849
/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
850
int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
851
{
852
int result;
853
LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
854
LZ4_resetStream(ctx);
855
856
if (inputSize < LZ4_64Klimit)
857
result = LZ4_compress_generic(&ctx->internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
858
else
859
result = LZ4_compress_generic(&ctx->internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
860
861
FREEMEM(ctx);
862
863
return result;
864
}
865
866
867
/*-******************************
868
* *_destSize() variant
869
********************************/
870
871
static int LZ4_compress_destSize_generic(
872
LZ4_stream_t_internal* const ctx,
873
const char* const src,
874
char* const dst,
875
int* const srcSizePtr,
876
const int targetDstSize,
877
const tableType_t tableType)
878
{
879
const BYTE* ip = (const BYTE*) src;
880
const BYTE* base = (const BYTE*) src;
881
const BYTE* lowLimit = (const BYTE*) src;
882
const BYTE* anchor = ip;
883
const BYTE* const iend = ip + *srcSizePtr;
884
const BYTE* const mflimit = iend - MFLIMIT;
885
const BYTE* const matchlimit = iend - LASTLITERALS;
886
887
BYTE* op = (BYTE*) dst;
888
BYTE* const oend = op + targetDstSize;
889
BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
890
BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
891
BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
892
893
U32 forwardH;
894
895
896
/* Init conditions */
897
if (targetDstSize < 1) return 0; /* Impossible to store anything */
898
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
899
if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
900
if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
901
902
/* First Byte */
903
*srcSizePtr = 0;
904
LZ4_putPosition(ip, ctx->hashTable, tableType, base);
905
ip++; forwardH = LZ4_hashPosition(ip, tableType);
906
907
/* Main Loop */
908
for ( ; ; ) {
909
const BYTE* match;
910
BYTE* token;
911
912
/* Find a match */
913
{ const BYTE* forwardIp = ip;
914
unsigned step = 1;
915
unsigned searchMatchNb = 1 << LZ4_skipTrigger;
916
917
do {
918
U32 h = forwardH;
919
ip = forwardIp;
920
forwardIp += step;
921
step = (searchMatchNb++ >> LZ4_skipTrigger);
922
923
if (unlikely(forwardIp > mflimit)) goto _last_literals;
924
925
match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
926
forwardH = LZ4_hashPosition(forwardIp, tableType);
927
LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
928
929
} while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
930
|| (LZ4_read32(match) != LZ4_read32(ip)) );
931
}
932
933
/* Catch up */
934
while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
935
936
/* Encode Literal length */
937
{ unsigned litLength = (unsigned)(ip - anchor);
938
token = op++;
939
if (op + ((litLength+240)/255) + litLength > oMaxLit) {
940
/* Not enough space for a last match */
941
op--;
942
goto _last_literals;
943
}
944
if (litLength>=RUN_MASK) {
945
unsigned len = litLength - RUN_MASK;
946
*token=(RUN_MASK<<ML_BITS);
947
for(; len >= 255 ; len-=255) *op++ = 255;
948
*op++ = (BYTE)len;
949
}
950
else *token = (BYTE)(litLength<<ML_BITS);
951
952
/* Copy Literals */
953
LZ4_wildCopy(op, anchor, op+litLength);
954
op += litLength;
955
}
956
957
_next_match:
958
/* Encode Offset */
959
LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
960
961
/* Encode MatchLength */
962
{ size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
963
964
if (op + ((matchLength+240)/255) > oMaxMatch) {
965
/* Match description too long : reduce it */
966
matchLength = (15-1) + (oMaxMatch-op) * 255;
967
}
968
ip += MINMATCH + matchLength;
969
970
if (matchLength>=ML_MASK) {
971
*token += ML_MASK;
972
matchLength -= ML_MASK;
973
while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
974
*op++ = (BYTE)matchLength;
975
}
976
else *token += (BYTE)(matchLength);
977
}
978
979
anchor = ip;
980
981
/* Test end of block */
982
if (ip > mflimit) break;
983
if (op > oMaxSeq) break;
984
985
/* Fill table */
986
LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
987
988
/* Test next position */
989
match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
990
LZ4_putPosition(ip, ctx->hashTable, tableType, base);
991
if ( (match+MAX_DISTANCE>=ip)
992
&& (LZ4_read32(match)==LZ4_read32(ip)) )
993
{ token=op++; *token=0; goto _next_match; }
994
995
/* Prepare next loop */
996
forwardH = LZ4_hashPosition(++ip, tableType);
997
}
998
999
_last_literals:
1000
/* Encode Last Literals */
1001
{ size_t lastRunSize = (size_t)(iend - anchor);
1002
if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
1003
/* adapt lastRunSize to fill 'dst' */
1004
lastRunSize = (oend-op) - 1;
1005
lastRunSize -= (lastRunSize+240)/255;
1006
}
1007
ip = anchor + lastRunSize;
1008
1009
if (lastRunSize >= RUN_MASK) {
1010
size_t accumulator = lastRunSize - RUN_MASK;
1011
*op++ = RUN_MASK << ML_BITS;
1012
for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1013
*op++ = (BYTE) accumulator;
1014
} else {
1015
*op++ = (BYTE)(lastRunSize<<ML_BITS);
1016
}
1017
memcpy(op, anchor, lastRunSize);
1018
op += lastRunSize;
1019
}
1020
1021
/* End */
1022
*srcSizePtr = (int) (((const char*)ip)-src);
1023
return (int) (((char*)op)-dst);
1024
}
1025
1026
1027
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1028
{
1029
LZ4_resetStream(state);
1030
1031
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
1032
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
1033
} else {
1034
if (*srcSizePtr < LZ4_64Klimit) {
1035
return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
1036
} else {
1037
tableType_t const tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
1038
return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, tableType);
1039
} }
1040
}
1041
1042
1043
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1044
{
1045
LZ4_stream_t* ctxBody = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));;
1046
LZ4_stream_t* ctx = ctxBody;
1047
1048
int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1049
1050
FREEMEM(ctxBody);
1051
1052
return result;
1053
}
1054
1055
1056
1057
/*-******************************
1058
* Streaming functions
1059
********************************/
1060
1061
LZ4_stream_t* LZ4_createStream(void)
1062
{
1063
LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1064
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
1065
DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1066
if (lz4s == NULL) return NULL;
1067
LZ4_resetStream(lz4s);
1068
return lz4s;
1069
}
1070
1071
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
1072
{
1073
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1074
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
1075
}
1076
1077
void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
1078
LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
1079
}
1080
1081
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
1082
{
1083
if (!LZ4_stream) return 0; /* support free on NULL */
1084
DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1085
FREEMEM(LZ4_stream);
1086
return (0);
1087
}
1088
1089
1090
#define HASH_UNIT sizeof(reg_t)
1091
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1092
{
1093
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1094
const tableType_t tableType = byU32;
1095
const BYTE* p = (const BYTE*)dictionary;
1096
const BYTE* const dictEnd = p + dictSize;
1097
const BYTE* base;
1098
1099
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1100
1101
LZ4_prepareTable(dict, 0, tableType);
1102
1103
/* We always increment the offset by 64 KB, since, if the dict is longer,
1104
* we truncate it to the last 64k, and if it's shorter, we still want to
1105
* advance by a whole window length so we can provide the guarantee that
1106
* there are only valid offsets in the window, which allows an optimization
1107
* in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1108
* dictionary isn't a full 64k. */
1109
1110
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1111
base = dictEnd - 64 KB - dict->currentOffset;
1112
dict->dictionary = p;
1113
dict->dictSize = (U32)(dictEnd - p);
1114
dict->currentOffset += 64 KB;
1115
dict->tableType = tableType;
1116
1117
if (dictSize < (int)HASH_UNIT) {
1118
return 0;
1119
}
1120
1121
while (p <= dictEnd-HASH_UNIT) {
1122
LZ4_putPosition(p, dict->hashTable, tableType, base);
1123
p+=3;
1124
}
1125
1126
return dict->dictSize;
1127
}
1128
1129
void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream) {
1130
if (dictionary_stream != NULL) {
1131
/* If the current offset is zero, we will never look in the
1132
* external dictionary context, since there is no value a table
1133
* entry can take that indicate a miss. In that case, we need
1134
* to bump the offset to something non-zero.
1135
*/
1136
if (working_stream->internal_donotuse.currentOffset == 0) {
1137
working_stream->internal_donotuse.currentOffset = 64 KB;
1138
}
1139
working_stream->internal_donotuse.dictCtx = &(dictionary_stream->internal_donotuse);
1140
} else {
1141
working_stream->internal_donotuse.dictCtx = NULL;
1142
}
1143
}
1144
1145
1146
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1147
{
1148
if (LZ4_dict->currentOffset + nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
1149
/* rescale hash table */
1150
U32 const delta = LZ4_dict->currentOffset - 64 KB;
1151
const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1152
int i;
1153
DEBUGLOG(4, "LZ4_renormDictT");
1154
for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1155
if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1156
else LZ4_dict->hashTable[i] -= delta;
1157
}
1158
LZ4_dict->currentOffset = 64 KB;
1159
if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1160
LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1161
}
1162
}
1163
1164
1165
int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1166
{
1167
const tableType_t tableType = byU32;
1168
LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1169
const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1170
1171
if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
1172
LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
1173
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1174
1175
/* Check overlapping input/dictionary space */
1176
{ const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1177
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1178
streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1179
if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1180
if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1181
streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1182
}
1183
}
1184
1185
/* prefix mode : source data follows dictionary */
1186
if (dictEnd == (const BYTE*)source) {
1187
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1188
return LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
1189
else
1190
return LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
1191
}
1192
1193
/* external dictionary mode */
1194
{ int result;
1195
if (streamPtr->dictCtx) {
1196
/* We depend here on the fact that dictCtx'es (produced by
1197
* LZ4_loadDict) guarantee that their tables contain no references
1198
* to offsets between dictCtx->currentOffset - 64 KB and
1199
* dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1200
* to use noDictIssue even when the dict isn't a full 64 KB.
1201
*/
1202
if (inputSize > 4 KB) {
1203
/* For compressing large blobs, it is faster to pay the setup
1204
* cost to copy the dictionary's tables into the active context,
1205
* so that the compression loop is only looking into one table.
1206
*/
1207
memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
1208
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1209
} else {
1210
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
1211
}
1212
} else {
1213
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1214
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
1215
} else {
1216
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1217
}
1218
}
1219
streamPtr->dictionary = (const BYTE*)source;
1220
streamPtr->dictSize = (U32)inputSize;
1221
return result;
1222
}
1223
}
1224
1225
1226
/* Hidden debug function, to force-test external dictionary mode */
1227
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
1228
{
1229
LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1230
int result;
1231
1232
LZ4_renormDictT(streamPtr, srcSize);
1233
1234
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1235
result = LZ4_compress_generic(streamPtr, source, dest, srcSize, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
1236
} else {
1237
result = LZ4_compress_generic(streamPtr, source, dest, srcSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1238
}
1239
1240
streamPtr->dictionary = (const BYTE*)source;
1241
streamPtr->dictSize = (U32)srcSize;
1242
1243
return result;
1244
}
1245
1246
1247
/*! LZ4_saveDict() :
1248
* If previously compressed data block is not guaranteed to remain available at its memory location,
1249
* save it into a safer place (char* safeBuffer).
1250
* Note : you don't need to call LZ4_loadDict() afterwards,
1251
* dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
1252
* Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1253
*/
1254
int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1255
{
1256
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1257
const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1258
1259
if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1260
if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1261
1262
memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1263
1264
dict->dictionary = (const BYTE*)safeBuffer;
1265
dict->dictSize = (U32)dictSize;
1266
1267
return dictSize;
1268
}
1269
1270
1271
1272
/*-*****************************
1273
* Decompression functions
1274
*******************************/
1275
/*! LZ4_decompress_generic() :
1276
* This generic decompression function covers all use cases.
1277
* It shall be instantiated several times, using different sets of directives.
1278
* Note that it is important for performance that this function really get inlined,
1279
* in order to remove useless branches during compilation optimization.
1280
*/
1281
LZ4_FORCE_O2_GCC_PPC64LE
1282
LZ4_FORCE_INLINE int LZ4_decompress_generic(
1283
const char* const src,
1284
char* const dst,
1285
int srcSize,
1286
int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1287
1288
int endOnInput, /* endOnOutputSize, endOnInputSize */
1289
int partialDecoding, /* full, partial */
1290
int targetOutputSize, /* only used if partialDecoding==partial */
1291
int dict, /* noDict, withPrefix64k, usingExtDict */
1292
const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
1293
const BYTE* const dictStart, /* only if dict==usingExtDict */
1294
const size_t dictSize /* note : = 0 if noDict */
1295
)
1296
{
1297
const BYTE* ip = (const BYTE*) src;
1298
const BYTE* const iend = ip + srcSize;
1299
1300
BYTE* op = (BYTE*) dst;
1301
BYTE* const oend = op + outputSize;
1302
BYTE* cpy;
1303
BYTE* oexit = op + targetOutputSize;
1304
1305
const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1306
const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
1307
const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
1308
1309
const int safeDecode = (endOnInput==endOnInputSize);
1310
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1311
1312
1313
/* Special cases */
1314
if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => just decode everything */
1315
if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
1316
if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1317
1318
/* Main Loop : decode sequences */
1319
while (1) {
1320
size_t length;
1321
const BYTE* match;
1322
size_t offset;
1323
1324
unsigned const token = *ip++;
1325
1326
/* shortcut for common case :
1327
* in most circumstances, we expect to decode small matches (<= 18 bytes) separated by few literals (<= 14 bytes).
1328
* this shortcut was tested on x86 and x64, where it improves decoding speed.
1329
* it has not yet been benchmarked on ARM, Power, mips, etc. */
1330
if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend)
1331
& (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend))
1332
& ((token < (15<<ML_BITS)) & ((token & ML_MASK) != 15)) ) {
1333
size_t const ll = token >> ML_BITS;
1334
size_t const off = LZ4_readLE16(ip+ll);
1335
const BYTE* const matchPtr = op + ll - off; /* pointer underflow risk ? */
1336
if ((off >= 8) /* do not deal with overlapping matches */ & (matchPtr >= lowPrefix)) {
1337
size_t const ml = (token & ML_MASK) + MINMATCH;
1338
memcpy(op, ip, 16); op += ll; ip += ll + 2 /*offset*/;
1339
memcpy(op + 0, matchPtr + 0, 8);
1340
memcpy(op + 8, matchPtr + 8, 8);
1341
memcpy(op +16, matchPtr +16, 2);
1342
op += ml;
1343
continue;
1344
}
1345
}
1346
1347
/* decode literal length */
1348
if ((length=(token>>ML_BITS)) == RUN_MASK) {
1349
unsigned s;
1350
do {
1351
s = *ip++;
1352
length += s;
1353
} while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
1354
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error; /* overflow detection */
1355
if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error; /* overflow detection */
1356
}
1357
1358
/* copy literals */
1359
cpy = op+length;
1360
if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1361
|| ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1362
{
1363
if (partialDecoding) {
1364
if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
1365
if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
1366
} else {
1367
if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
1368
if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
1369
}
1370
memcpy(op, ip, length);
1371
ip += length;
1372
op += length;
1373
break; /* Necessarily EOF, due to parsing restrictions */
1374
}
1375
LZ4_wildCopy(op, ip, cpy);
1376
ip += length; op = cpy;
1377
1378
/* get offset */
1379
offset = LZ4_readLE16(ip); ip+=2;
1380
match = op - offset;
1381
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
1382
LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
1383
1384
/* get matchlength */
1385
length = token & ML_MASK;
1386
if (length == ML_MASK) {
1387
unsigned s;
1388
do {
1389
s = *ip++;
1390
if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1391
length += s;
1392
} while (s==255);
1393
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
1394
}
1395
length += MINMATCH;
1396
1397
/* check external dictionary */
1398
if ((dict==usingExtDict) && (match < lowPrefix)) {
1399
if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
1400
1401
if (length <= (size_t)(lowPrefix-match)) {
1402
/* match can be copied as a single segment from external dictionary */
1403
memmove(op, dictEnd - (lowPrefix-match), length);
1404
op += length;
1405
} else {
1406
/* match encompass external dictionary and current block */
1407
size_t const copySize = (size_t)(lowPrefix-match);
1408
size_t const restSize = length - copySize;
1409
memcpy(op, dictEnd - copySize, copySize);
1410
op += copySize;
1411
if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */
1412
BYTE* const endOfMatch = op + restSize;
1413
const BYTE* copyFrom = lowPrefix;
1414
while (op < endOfMatch) *op++ = *copyFrom++;
1415
} else {
1416
memcpy(op, lowPrefix, restSize);
1417
op += restSize;
1418
} }
1419
continue;
1420
}
1421
1422
/* copy match within block */
1423
cpy = op + length;
1424
if (unlikely(offset<8)) {
1425
op[0] = match[0];
1426
op[1] = match[1];
1427
op[2] = match[2];
1428
op[3] = match[3];
1429
match += inc32table[offset];
1430
memcpy(op+4, match, 4);
1431
match -= dec64table[offset];
1432
} else { memcpy(op, match, 8); match+=8; }
1433
op += 8;
1434
1435
if (unlikely(cpy>oend-12)) {
1436
BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1437
if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1438
if (op < oCopyLimit) {
1439
LZ4_wildCopy(op, match, oCopyLimit);
1440
match += oCopyLimit - op;
1441
op = oCopyLimit;
1442
}
1443
while (op<cpy) *op++ = *match++;
1444
} else {
1445
memcpy(op, match, 8);
1446
if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
1447
}
1448
op = cpy; /* correction */
1449
}
1450
1451
/* end of decoding */
1452
if (endOnInput)
1453
return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
1454
else
1455
return (int) (((const char*)ip)-src); /* Nb of input bytes read */
1456
1457
/* Overflow error detected */
1458
_output_error:
1459
return (int) (-(((const char*)ip)-src))-1;
1460
}
1461
1462
1463
LZ4_FORCE_O2_GCC_PPC64LE
1464
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1465
{
1466
return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1467
}
1468
1469
LZ4_FORCE_O2_GCC_PPC64LE
1470
int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1471
{
1472
return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1473
}
1474
1475
LZ4_FORCE_O2_GCC_PPC64LE
1476
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1477
{
1478
return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1479
}
1480
1481
1482
/*===== streaming decompression functions =====*/
1483
1484
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1485
{
1486
LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
1487
return lz4s;
1488
}
1489
1490
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1491
{
1492
if (!LZ4_stream) return 0; /* support free on NULL */
1493
FREEMEM(LZ4_stream);
1494
return 0;
1495
}
1496
1497
/*!
1498
* LZ4_setStreamDecode() :
1499
* Use this function to instruct where to find the dictionary.
1500
* This function is not necessary if previous data is still available where it was decoded.
1501
* Loading a size of 0 is allowed (same effect as no dictionary).
1502
* Return : 1 if OK, 0 if error
1503
*/
1504
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1505
{
1506
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1507
lz4sd->prefixSize = (size_t) dictSize;
1508
lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1509
lz4sd->externalDict = NULL;
1510
lz4sd->extDictSize = 0;
1511
return 1;
1512
}
1513
1514
/*
1515
*_continue() :
1516
These decoding functions allow decompression of multiple blocks in "streaming" mode.
1517
Previously decoded blocks must still be available at the memory position where they were decoded.
1518
If it's not possible, save the relevant part of decoded data into a safe buffer,
1519
and indicate where it stands using LZ4_setStreamDecode()
1520
*/
1521
LZ4_FORCE_O2_GCC_PPC64LE
1522
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1523
{
1524
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1525
int result;
1526
1527
if (lz4sd->prefixEnd == (BYTE*)dest) {
1528
result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1529
endOnInputSize, full, 0,
1530
usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1531
if (result <= 0) return result;
1532
lz4sd->prefixSize += result;
1533
lz4sd->prefixEnd += result;
1534
} else {
1535
lz4sd->extDictSize = lz4sd->prefixSize;
1536
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1537
result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1538
endOnInputSize, full, 0,
1539
usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1540
if (result <= 0) return result;
1541
lz4sd->prefixSize = result;
1542
lz4sd->prefixEnd = (BYTE*)dest + result;
1543
}
1544
1545
return result;
1546
}
1547
1548
LZ4_FORCE_O2_GCC_PPC64LE
1549
int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1550
{
1551
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1552
int result;
1553
1554
if (lz4sd->prefixEnd == (BYTE*)dest) {
1555
result = LZ4_decompress_generic(source, dest, 0, originalSize,
1556
endOnOutputSize, full, 0,
1557
usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1558
if (result <= 0) return result;
1559
lz4sd->prefixSize += originalSize;
1560
lz4sd->prefixEnd += originalSize;
1561
} else {
1562
lz4sd->extDictSize = lz4sd->prefixSize;
1563
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1564
result = LZ4_decompress_generic(source, dest, 0, originalSize,
1565
endOnOutputSize, full, 0,
1566
usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1567
if (result <= 0) return result;
1568
lz4sd->prefixSize = originalSize;
1569
lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1570
}
1571
1572
return result;
1573
}
1574
1575
1576
/*
1577
Advanced decoding functions :
1578
*_usingDict() :
1579
These decoding functions work the same as "_continue" ones,
1580
the dictionary must be explicitly provided within parameters
1581
*/
1582
1583
LZ4_FORCE_O2_GCC_PPC64LE
1584
LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1585
{
1586
if (dictSize==0)
1587
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1588
if (dictStart+dictSize == dest) {
1589
if (dictSize >= (int)(64 KB - 1))
1590
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1591
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1592
}
1593
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1594
}
1595
1596
LZ4_FORCE_O2_GCC_PPC64LE
1597
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1598
{
1599
return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1600
}
1601
1602
LZ4_FORCE_O2_GCC_PPC64LE
1603
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1604
{
1605
return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1606
}
1607
1608
/* debug function */
1609
LZ4_FORCE_O2_GCC_PPC64LE
1610
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1611
{
1612
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1613
}
1614
1615
1616
/*=*************************************************
1617
* Obsolete Functions
1618
***************************************************/
1619
/* obsolete compression functions */
1620
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
1621
int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
1622
int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
1623
int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
1624
int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
1625
int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1626
1627
/*
1628
These function names are deprecated and should no longer be used.
1629
They are only provided here for compatibility with older user programs.
1630
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1631
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1632
*/
1633
int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
1634
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1635
1636
/* Obsolete Streaming functions */
1637
1638
int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1639
1640
int LZ4_resetStreamState(void* state, char* inputBuffer)
1641
{
1642
(void)inputBuffer;
1643
LZ4_resetStream((LZ4_stream_t*)state);
1644
return 0;
1645
}
1646
1647
void* LZ4_create (char* inputBuffer)
1648
{
1649
(void)inputBuffer;
1650
return LZ4_createStream();
1651
}
1652
1653
char* LZ4_slideInputBuffer (void* state)
1654
{
1655
/* avoid const char * -> char * conversion warning */
1656
return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
1657
}
1658
1659
/* Obsolete streaming decompression functions */
1660
1661
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1662
{
1663
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1664
}
1665
1666
int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1667
{
1668
return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1669
}
1670
1671
#endif /* LZ4_COMMONDEFS_ONLY */
1672
1673