ToolDAQFramework
Classes | Macros | Typedefs | Enumerations | Functions | Variables
zstd.c File Reference
#include <limits.h>
#include <stddef.h>
#include <string.h>
#include <stdlib.h>
#include <pthread.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>

Classes

struct  BIT_CStream_t
 
struct  BIT_DStream_t
 
struct  FSE_CState_t
 
struct  FSE_DState_t
 
struct  FSE_symbolCompressionTransform
 
struct  FSE_DTableHeader
 
struct  FSE_decode_t
 
struct  HUF_CTableHeader
 
struct  FSE_DecompressWksp
 
struct  ZSTD_bounds
 
struct  ZSTD_inBuffer_s
 
struct  ZSTD_outBuffer_s
 
struct  ZSTD_Sequence
 
struct  ZSTD_compressionParameters
 
struct  ZSTD_frameParameters
 
struct  ZSTD_parameters
 
struct  ZSTD_FrameHeader
 
struct  ZSTD_customMem
 
struct  ZSTD_frameProgression
 
struct  POOL_job_s
 
struct  POOL_ctx_s
 
struct  ZSTD_cpuid_t
 
struct  XXH32_canonical_t
 Canonical (big endian) representation of XXH32_hash_t. More...
 
struct  XXH64_canonical_t
 Canonical (big endian) representation of XXH64_hash_t. More...
 
struct  XXH32_state_s
 
struct  XXH64_state_s
 
struct  ZSTD_frameSizeInfo
 
struct  blockProperties_t
 
struct  nodeElt_s
 
struct  HUF_CompressWeightsWksp
 
struct  HUF_WriteCTableWksp
 
struct  rankPos
 
struct  HUF_buildCTable_wksp_tables
 
struct  HUF_CStream_t
 
struct  HUF_compress_tables_t
 
struct  ZSTD_cwksp
 
struct  ZSTD_prefixDict_s
 
struct  ZSTD_localDict
 
struct  ZSTD_hufCTables_t
 
struct  ZSTD_fseCTables_t
 
struct  ZSTD_entropyCTables_t
 
struct  SeqDef_s
 
struct  SeqStore_t
 
struct  ZSTD_SequenceLength
 
struct  ZSTD_hufCTablesMetadata_t
 
struct  ZSTD_fseCTablesMetadata_t
 
struct  ZSTD_entropyCTablesMetadata_t
 
struct  ZSTD_match_t
 
struct  rawSeq
 
struct  RawSeqStore_t
 
struct  ZSTD_optimal_t
 
struct  optState_t
 
struct  ZSTD_compressedBlockState_t
 
struct  ZSTD_window_t
 
struct  ZSTD_MatchState_t
 
struct  ZSTD_blockState_t
 
struct  ldmEntry_t
 
struct  ldmMatchCandidate_t
 
struct  ldmState_t
 
struct  ldmParams_t
 
struct  SeqCollector
 
struct  ZSTD_CCtx_params_s
 
struct  ZSTD_blockSplitCtx
 
struct  ZSTD_CCtx_s
 
struct  repcodes_s
 
struct  ZSTD_SequencePosition
 
struct  BlockSummary
 
struct  ZSTD_BuildCTableWksp
 
struct  EstimatedBlockSize
 
struct  Fingerprint
 
struct  FPStats
 
struct  ZSTD_CDict_s
 
struct  ZSTD_symbolEncodingTypeStats_t
 
struct  seqStoreSplits
 
struct  ldmRollingHashState_t
 
struct  ZSTD_optLdm_t
 
struct  buffer_s
 
struct  ZSTDMT_bufferPool_s
 
struct  ZSTDMT_CCtxPool
 
struct  Range
 
struct  SerialState
 
struct  ZSTDMT_jobDescription
 
struct  InBuff_t
 
struct  RoundBuff_t
 
struct  RSyncState_t
 
struct  ZSTDMT_CCtx_s
 
struct  SyncPoint
 
struct  DTableDesc
 
struct  HUF_DecompressFastArgs
 
struct  HUF_DEltX1
 
struct  HUF_ReadDTableX1_Workspace
 
struct  HUF_DEltX2
 
struct  sortedSymbol_t
 
struct  HUF_ReadDTableX2_Workspace
 
struct  algo_time_t
 
struct  ZSTD_seqSymbol_header
 
struct  ZSTD_seqSymbol
 
struct  ZSTD_entropyDTables_t
 
struct  ZSTD_DDictHashSet
 
struct  ZSTD_DCtx_s
 
struct  ZSTD_DDict_s
 
struct  seq_t
 
struct  ZSTD_fseState
 
struct  seqState_t
 
struct  ZSTD_OffsetInfo
 
struct  ZDICT_params_t
 
struct  ZDICT_cover_params_t
 
struct  ZDICT_fastCover_params_t
 
struct  ZDICT_legacy_params_t
 
struct  COVER_best_s
 
struct  COVER_segment_t
 
struct  COVER_epoch_info_t
 
struct  COVER_dictSelection
 
struct  COVER_map_pair_t_s
 
struct  COVER_map_s
 
struct  COVER_ctx_t
 
struct  COVER_tryParameters_data_s
 
struct  _trbudget_t
 
struct  FASTCOVER_accel_t
 
struct  FASTCOVER_ctx_t
 
struct  FASTCOVER_tryParameters_data_s
 
struct  dictItem
 
struct  EStats_ress_t
 
struct  offsetCount_t
 

Macros

#define DEBUGLEVEL   0
 
#define MEM_MODULE
 
#define XXH_NAMESPACE   ZSTD_
 
#define XXH_PRIVATE_API
 
#define XXH_INLINE_ALL
 
#define ZSTD_LEGACY_SUPPORT   0
 
#define ZSTD_MULTITHREAD
 
#define ZSTD_TRACE   0
 
#define ZSTD_DISABLE_ASM   1
 
#define ZSTD_DEPS_NEED_MALLOC
 
#define ZSTD_DEPS_NEED_MATH64
 
#define ZSTD_DEPS_COMMON
 
#define ZSTD_memcpy(d, s, l)   memcpy((d),(s),(l))
 
#define ZSTD_memmove(d, s, l)   memmove((d),(s),(l))
 
#define ZSTD_memset(p, v, l)   memset((p),(v),(l))
 
#define ZSTD_DEPS_MALLOC
 
#define ZSTD_malloc(s)   malloc(s)
 
#define ZSTD_calloc(n, s)   calloc((n), (s))
 
#define ZSTD_free(p)   free((p))
 
#define ZSTD_DEPS_MATH64
 
#define ZSTD_div64(dividend, divisor)   ((dividend) / (divisor))
 
#define DEBUG_H_12987983217
 
#define DEBUG_STATIC_ASSERT(c)   (void)sizeof(char[(c) ? 1 : -1])
 
#define assert(condition)   ((void)0) /* disable assert (default) */
 
#define RAWLOG(l, ...)   do { } while (0) /* disabled */
 
#define DEBUGLOG(l, ...)   do { } while (0) /* disabled */
 
#define MEM_H_MODULE
 
#define ZSTD_COMPILER_H
 
#define ZSTD_PORTABILITY_MACROS_H
 
#define __has_attribute(x)   0
 
#define __has_builtin(x)   0
 
#define __has_feature(x)   0
 
#define ZSTD_MEMORY_SANITIZER   0
 
#define ZSTD_ADDRESS_SANITIZER   0
 
#define ZSTD_DATAFLOW_SANITIZER   0
 
#define ZSTD_HIDE_ASM_FUNCTION(func)
 
#define STATIC_BMI2   0
 
#define DYNAMIC_BMI2   0
 
#define ZSTD_ASM_SUPPORTED   0
 
#define ZSTD_ENABLE_ASM_X86_64_BMI2   0
 
#define ZSTD_CET_ENDBRANCH
 
#define ZSTD_IS_DETERMINISTIC_BUILD   1
 
#define INLINE_KEYWORD
 
#define FORCE_INLINE_ATTR
 
#define WIN_CDECL
 
#define UNUSED_ATTR
 
#define FORCE_INLINE_TEMPLATE   static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR
 
#define HINT_INLINE   FORCE_INLINE_TEMPLATE
 
#define MEM_STATIC   static /* this version may generate warnings for unused static functions; disable the relevant warning */
 
#define FORCE_NOINLINE   static
 
#define TARGET_ATTRIBUTE(target)
 
#define BMI2_TARGET_ATTRIBUTE   TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
 
#define PREFETCH_L1(ptr)   do { (void)(ptr); } while (0) /* disabled */
 
#define PREFETCH_L2(ptr)   do { (void)(ptr); } while (0) /* disabled */
 
#define CACHELINE_SIZE   64
 
#define PREFETCH_AREA(p, s)
 
#define DONT_VECTORIZE
 
#define LIKELY(x)   (x)
 
#define UNLIKELY(x)   (x)
 
#define ZSTD_UNREACHABLE   do { assert(0); } while (0)
 
#define ZSTD_HAS_C_ATTRIBUTE(x)   0
 
#define ZSTD_HAS_CPP_ATTRIBUTE(x)   0
 
#define ZSTD_FALLTHROUGH
 
#define ZSTD_ALIGNOF(T)   (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
 
#define ZSTD_ALIGNED(...)
 
#define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
 
#define ERROR_H_MODULE
 
#define ZSTD_ERRORS_H_398273423
 
#define ZSTDERRORLIB_VISIBLE
 
#define ZSTDERRORLIB_HIDDEN
 
#define ZSTDERRORLIB_API   ZSTDERRORLIB_VISIBLE
 
#define ERR_STATIC   static /* this version may generate warnings for unused static functions; disable the relevant warning */
 
#define PREFIX(name)   ZSTD_error_##name
 
#define ERROR(name)   ZSTD_ERROR(name)
 
#define ZSTD_ERROR(name)   ((size_t)-PREFIX(name))
 
#define CHECK_V_F(e, f)
 
#define CHECK_F(f)   do { CHECK_V_F(_var_err__, f); } while (0)
 
#define _FORCE_HAS_FORMAT_STRING(...)
 
#define ERR_QUOTE(str)   #str
 
#define RETURN_ERROR_IF(cond, err, ...)
 
#define RETURN_ERROR(err, ...)
 
#define FORWARD_IF_ERROR(err, ...)
 
#define FSE_STATIC_LINKING_ONLY   /* FSE_MIN_TABLELOG */
 
#define FSE_H
 
#define FSE_PUBLIC_API
 
#define FSE_VERSION_MAJOR   0
 
#define FSE_VERSION_MINOR   9
 
#define FSE_VERSION_RELEASE   0
 
#define FSE_LIB_VERSION   FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
 
#define FSE_QUOTE(str)   #str
 
#define FSE_EXPAND_AND_QUOTE(str)   FSE_QUOTE(str)
 
#define FSE_VERSION_STRING   FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
 
#define FSE_VERSION_NUMBER   (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
 
#define FSE_H_FSE_STATIC_LINKING_ONLY
 
#define BITSTREAM_H_MODULE
 
#define ZSTD_BITS_H
 
#define STREAM_ACCUMULATOR_MIN_32   25
 
#define STREAM_ACCUMULATOR_MIN_64   57
 
#define STREAM_ACCUMULATOR_MIN   ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
 
#define BIT_MASK_SIZE   (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
 
#define FSE_NCOUNTBOUND   512
 
#define FSE_BLOCKBOUND(size)   ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
 
#define FSE_COMPRESSBOUND(size)   (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
 
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
 
#define FSE_DTABLE_SIZE_U32(maxTableLog)   (1 + (1<<(maxTableLog)))
 
#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
 
#define FSE_DTABLE_SIZE(maxTableLog)   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
 
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)   (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
 
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog)   (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
 
#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue)   (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
 
#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
 
#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
 
#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue)   (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
 
#define FSE_MAX_MEMORY_USAGE   14
 
#define FSE_DEFAULT_MEMORY_USAGE   13
 
#define FSE_MAX_SYMBOL_VALUE   255
 
#define FSE_FUNCTION_TYPE   BYTE
 
#define FSE_FUNCTION_EXTENSION
 
#define FSE_DECODE_TYPE   FSE_decode_t
 
#define FSE_MAX_TABLELOG   (FSE_MAX_MEMORY_USAGE-2)
 
#define FSE_MAX_TABLESIZE   (1U<<FSE_MAX_TABLELOG)
 
#define FSE_MAXTABLESIZE_MASK   (FSE_MAX_TABLESIZE-1)
 
#define FSE_DEFAULT_TABLELOG   (FSE_DEFAULT_MEMORY_USAGE-2)
 
#define FSE_MIN_TABLELOG   5
 
#define FSE_TABLELOG_ABSOLUTE_MAX   15
 
#define FSE_TABLESTEP(tableSize)   (((tableSize)>>1) + ((tableSize)>>3) + 3)
 
#define HUF_H_298734234
 
#define FSE_STATIC_LINKING_ONLY
 
#define HUF_BLOCKSIZE_MAX   (128 * 1024)
 
#define HUF_WORKSPACE_SIZE   ((8 << 10) + 512 /* sorting scratch space */)
 
#define HUF_WORKSPACE_SIZE_U64   (HUF_WORKSPACE_SIZE / sizeof(U64))
 
#define HUF_TABLELOG_MAX   12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
 
#define HUF_TABLELOG_DEFAULT   11 /* default tableLog value when none specified */
 
#define HUF_SYMBOLVALUE_MAX   255
 
#define HUF_TABLELOG_ABSOLUTEMAX   12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
 
#define HUF_CTABLEBOUND   129
 
#define HUF_BLOCKBOUND(size)   (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
 
#define HUF_COMPRESSBOUND(size)   (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
 
#define HUF_CTABLE_SIZE_ST(maxSymbolValue)   ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
 
#define HUF_CTABLE_SIZE(maxSymbolValue)   (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
 
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue)    HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
 
#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
 
#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog)    HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
 
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog)    HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
 
#define HUF_OPTIMAL_DEPTH_THRESHOLD   ZSTD_btultra
 
#define HUF_CTABLE_WORKSPACE_SIZE_U32   ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
 
#define HUF_CTABLE_WORKSPACE_SIZE   (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
 
#define HUF_READ_STATS_WORKSPACE_SIZE_U32   FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
 
#define HUF_READ_STATS_WORKSPACE_SIZE   (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
 
#define HUF_DECOMPRESS_WORKSPACE_SIZE   ((2 << 10) + (1 << 9))
 
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32   (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
 
#define FSE_STATIC_LINKING_ONLY
 
#define FSE_isError   ERR_isError
 
#define FSE_STATIC_ASSERT(c)   DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
 
#define FSE_CAT(X, Y)   X##Y
 
#define FSE_FUNCTION_NAME(X, Y)   FSE_CAT(X,Y)
 
#define FSE_TYPE_NAME(X, Y)   FSE_CAT(X,Y)
 
#define FSE_GETSYMBOL(statePtr)   fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
 
#define THREADING_H_938743
 
#define ZSTD_pthread_mutex_t   pthread_mutex_t
 
#define ZSTD_pthread_mutex_init(a, b)   pthread_mutex_init((a), (b))
 
#define ZSTD_pthread_mutex_destroy(a)   pthread_mutex_destroy((a))
 
#define ZSTD_pthread_mutex_lock(a)   pthread_mutex_lock((a))
 
#define ZSTD_pthread_mutex_unlock(a)   pthread_mutex_unlock((a))
 
#define ZSTD_pthread_cond_t   pthread_cond_t
 
#define ZSTD_pthread_cond_init(a, b)   pthread_cond_init((a), (b))
 
#define ZSTD_pthread_cond_destroy(a)   pthread_cond_destroy((a))
 
#define ZSTD_pthread_cond_wait(a, b)   pthread_cond_wait((a), (b))
 
#define ZSTD_pthread_cond_signal(a)   pthread_cond_signal((a))
 
#define ZSTD_pthread_cond_broadcast(a)   pthread_cond_broadcast((a))
 
#define ZSTD_pthread_t   pthread_t
 
#define ZSTD_pthread_create(a, b, c, d)   pthread_create((a), (b), (c), (d))
 
#define ZSTD_pthread_join(a)   pthread_join((a),NULL)
 
#define ZSTD_DEPS_NEED_MALLOC
 
#define ZSTD_STATIC_LINKING_ONLY
 
#define ZSTD_H_235446
 
#define ZSTDLIB_VISIBLE
 
#define ZSTDLIB_HIDDEN
 
#define ZSTDLIB_API   ZSTDLIB_VISIBLE
 
#define ZSTD_DEPRECATED(message)
 
#define ZSTD_VERSION_MAJOR   1
 
#define ZSTD_VERSION_MINOR   5
 
#define ZSTD_VERSION_RELEASE   8
 
#define ZSTD_VERSION_NUMBER   (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
 
#define ZSTD_LIB_VERSION   ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
 
#define ZSTD_QUOTE(str)   #str
 
#define ZSTD_EXPAND_AND_QUOTE(str)   ZSTD_QUOTE(str)
 
#define ZSTD_VERSION_STRING   ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
 
#define ZSTD_CLEVEL_DEFAULT   3
 
#define ZSTD_MAGICNUMBER   0xFD2FB528 /* valid since v0.8.0 */
 
#define ZSTD_MAGIC_DICTIONARY   0xEC30A437 /* valid since v0.7.0 */
 
#define ZSTD_MAGIC_SKIPPABLE_START   0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
 
#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
 
#define ZSTD_BLOCKSIZELOG_MAX   17
 
#define ZSTD_BLOCKSIZE_MAX   (1<<ZSTD_BLOCKSIZELOG_MAX)
 
#define ZSTD_CONTENTSIZE_UNKNOWN   (0ULL - 1)
 
#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
 
#define ZSTD_MAX_INPUT_SIZE   ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
 
#define ZSTD_COMPRESSBOUND(srcSize)   (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
 
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
 
#define ZSTDLIB_STATIC_API   ZSTDLIB_VISIBLE
 
#define ZSTD_FRAMEHEADERSIZE_PREFIX(format)   ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
 
#define ZSTD_FRAMEHEADERSIZE_MIN(format)   ((format) == ZSTD_f_zstd1 ? 6 : 2)
 
#define ZSTD_FRAMEHEADERSIZE_MAX   18 /* can be useful for static allocation */
 
#define ZSTD_SKIPPABLEHEADERSIZE   8
 
#define ZSTD_WINDOWLOG_MAX_32   30
 
#define ZSTD_WINDOWLOG_MAX_64   31
 
#define ZSTD_WINDOWLOG_MAX   ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
 
#define ZSTD_WINDOWLOG_MIN   10
 
#define ZSTD_HASHLOG_MAX   ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
 
#define ZSTD_HASHLOG_MIN   6
 
#define ZSTD_CHAINLOG_MAX_32   29
 
#define ZSTD_CHAINLOG_MAX_64   30
 
#define ZSTD_CHAINLOG_MAX   ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
 
#define ZSTD_CHAINLOG_MIN   ZSTD_HASHLOG_MIN
 
#define ZSTD_SEARCHLOG_MAX   (ZSTD_WINDOWLOG_MAX-1)
 
#define ZSTD_SEARCHLOG_MIN   1
 
#define ZSTD_MINMATCH_MAX   7 /* only for ZSTD_fast, other strategies are limited to 6 */
 
#define ZSTD_MINMATCH_MIN   3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */
 
#define ZSTD_TARGETLENGTH_MAX   ZSTD_BLOCKSIZE_MAX
 
#define ZSTD_TARGETLENGTH_MIN   0 /* note : comparing this constant to an unsigned results in a tautological test */
 
#define ZSTD_STRATEGY_MIN   ZSTD_fast
 
#define ZSTD_STRATEGY_MAX   ZSTD_btultra2
 
#define ZSTD_BLOCKSIZE_MAX_MIN   (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */
 
#define ZSTD_OVERLAPLOG_MIN   0
 
#define ZSTD_OVERLAPLOG_MAX   9
 
#define ZSTD_WINDOWLOG_LIMIT_DEFAULT
 
#define ZSTD_LDM_HASHLOG_MIN   ZSTD_HASHLOG_MIN
 
#define ZSTD_LDM_HASHLOG_MAX   ZSTD_HASHLOG_MAX
 
#define ZSTD_LDM_MINMATCH_MIN   4
 
#define ZSTD_LDM_MINMATCH_MAX   4096
 
#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
 
#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
 
#define ZSTD_LDM_HASHRATELOG_MIN   0
 
#define ZSTD_LDM_HASHRATELOG_MAX   (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
 
#define ZSTD_TARGETCBLOCKSIZE_MIN   1340 /* suitable to fit into an ethernet / wifi / 4G transport frame */
 
#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
 
#define ZSTD_SRCSIZEHINT_MIN   0
 
#define ZSTD_SRCSIZEHINT_MAX   INT_MAX
 
#define ZSTD_paramSwitch_e   ZSTD_ParamSwitch_e /* old name */
 
#define ZSTD_frameType_e   ZSTD_FrameType_e /* old name */
 
#define ZSTD_frameHeader   ZSTD_FrameHeader /* old name */
 
#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize)
 
#define ZSTD_sequenceFormat_e   ZSTD_SequenceFormat_e /* old name */
 
#define ZSTD_c_rsyncable   ZSTD_c_experimentalParam1
 
#define ZSTD_c_format   ZSTD_c_experimentalParam2
 
#define ZSTD_c_forceMaxWindow   ZSTD_c_experimentalParam3
 
#define ZSTD_c_forceAttachDict   ZSTD_c_experimentalParam4
 
#define ZSTD_c_literalCompressionMode   ZSTD_c_experimentalParam5
 
#define ZSTD_c_srcSizeHint   ZSTD_c_experimentalParam7
 
#define ZSTD_c_enableDedicatedDictSearch   ZSTD_c_experimentalParam8
 
#define ZSTD_c_stableInBuffer   ZSTD_c_experimentalParam9
 
#define ZSTD_c_stableOutBuffer   ZSTD_c_experimentalParam10
 
#define ZSTD_c_blockDelimiters   ZSTD_c_experimentalParam11
 
#define ZSTD_c_validateSequences   ZSTD_c_experimentalParam12
 
#define ZSTD_BLOCKSPLITTER_LEVEL_MAX   6
 
#define ZSTD_c_blockSplitterLevel   ZSTD_c_experimentalParam20
 
#define ZSTD_c_splitAfterSequences   ZSTD_c_experimentalParam13
 
#define ZSTD_c_useRowMatchFinder   ZSTD_c_experimentalParam14
 
#define ZSTD_c_deterministicRefPrefix   ZSTD_c_experimentalParam15
 
#define ZSTD_c_prefetchCDictTables   ZSTD_c_experimentalParam16
 
#define ZSTD_c_enableSeqProducerFallback   ZSTD_c_experimentalParam17
 
#define ZSTD_c_maxBlockSize   ZSTD_c_experimentalParam18
 
#define ZSTD_c_repcodeResolution   ZSTD_c_experimentalParam19
 
#define ZSTD_c_searchForExternalRepcodes   ZSTD_c_experimentalParam19 /* older name */
 
#define ZSTD_d_format   ZSTD_d_experimentalParam1
 
#define ZSTD_d_stableOutBuffer   ZSTD_d_experimentalParam2
 
#define ZSTD_d_forceIgnoreChecksum   ZSTD_d_experimentalParam3
 
#define ZSTD_d_refMultipleDDicts   ZSTD_d_experimentalParam4
 
#define ZSTD_d_disableHuffmanAssembly   ZSTD_d_experimentalParam5
 
#define ZSTD_d_maxBlockSize   ZSTD_d_experimentalParam6
 
#define ZSTD_SEQUENCE_PRODUCER_ERROR   ((size_t)(-1))
 
#define ZSTD_ALLOCATIONS_H
 
#define POOL_H
 
#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_customMem */
 
#define ZSTD_DEPS_NEED_MALLOC
 
#define ZSTD_CCOMMON_H_MODULE
 
#define ZSTD_COMMON_CPU_H
 
#define X(name, r, bit)
 
#define C(name, bit)   X(name, f1c, bit)
 
#define D(name, bit)   X(name, f1d, bit)
 
#define B(name, bit)   X(name, f7b, bit)
 
#define C(name, bit)   X(name, f7c, bit)
 
#define ZSTD_STATIC_LINKING_ONLY
 
#define FSE_STATIC_LINKING_ONLY
 
#define XXH_STATIC_LINKING_ONLY   /* XXH64_state_t */
 
#define XXH_NO_XXH3
 
#define XXH_INLINE_ALL_31684351384
 
#define XXH_STATIC_LINKING_ONLY
 
#define XXH_PUBLIC_API   static
 
#define XXH_NAMESPACE   XXH_INLINE_
 
#define XXH_IPREF(Id)   XXH_NAMESPACE ## Id
 
#define XXH_OK   XXH_IPREF(XXH_OK)
 
#define XXH_ERROR   XXH_IPREF(XXH_ERROR)
 
#define XXH_errorcode   XXH_IPREF(XXH_errorcode)
 
#define XXH32_canonical_t   XXH_IPREF(XXH32_canonical_t)
 
#define XXH64_canonical_t   XXH_IPREF(XXH64_canonical_t)
 
#define XXH128_canonical_t   XXH_IPREF(XXH128_canonical_t)
 
#define XXH32_state_s   XXH_IPREF(XXH32_state_s)
 
#define XXH32_state_t   XXH_IPREF(XXH32_state_t)
 The opaque state struct for the XXH32 streaming API. More...
 
#define XXH64_state_s   XXH_IPREF(XXH64_state_s)
 
#define XXH64_state_t   XXH_IPREF(XXH64_state_t)
 
#define XXH3_state_s   XXH_IPREF(XXH3_state_s)
 
#define XXH3_state_t   XXH_IPREF(XXH3_state_t)
 
#define XXH128_hash_t   XXH_IPREF(XXH128_hash_t)
 
#define XXHASH_H_5627135585666179   1
 
#define XXH_CAT(A, B)   A##B
 
#define XXH_NAME2(A, B)   XXH_CAT(A,B)
 
#define XXH_versionNumber   XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
 
#define XXH32   XXH_NAME2(XXH_NAMESPACE, XXH32)
 
#define XXH32_createState(void)   XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
 Allocates an XXH32_state_t. More...
 
#define XXH32_freeState   XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
 
#define XXH32_reset   XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
 
#define XXH32_update   XXH_NAME2(XXH_NAMESPACE, XXH32_update)
 
#define XXH32_digest   XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
 
#define XXH32_copyState   XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
 
#define XXH32_canonicalFromHash   XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
 
#define XXH32_hashFromCanonical   XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
 
#define XXH64   XXH_NAME2(XXH_NAMESPACE, XXH64)
 
#define XXH64_createState(void)   XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
 Allocates an XXH64_state_t. More...
 
#define XXH64_freeState   XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
 
#define XXH64_reset   XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
 
#define XXH64_update   XXH_NAME2(XXH_NAMESPACE, XXH64_update)
 
#define XXH64_digest   XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
 
#define XXH64_copyState   XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
 
#define XXH64_canonicalFromHash   XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
 
#define XXH64_hashFromCanonical   XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
 
#define XXH3_64bits   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
 
#define XXH3_64bits_withSecret   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
 
#define XXH3_64bits_withSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
 
#define XXH3_64bits_withSecretandSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
 
#define XXH3_createState   XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
 
#define XXH3_freeState   XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
 
#define XXH3_copyState   XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
 
#define XXH3_64bits_reset   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
 
#define XXH3_64bits_reset_withSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
 
#define XXH3_64bits_reset_withSecret   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
 
#define XXH3_64bits_reset_withSecretandSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
 
#define XXH3_64bits_update   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
 
#define XXH3_64bits_digest   XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
 
#define XXH3_generateSecret   XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
 
#define XXH3_generateSecret_fromSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
 
#define XXH128   XXH_NAME2(XXH_NAMESPACE, XXH128)
 
#define XXH3_128bits   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
 
#define XXH3_128bits_withSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
 
#define XXH3_128bits_withSecret   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
 
#define XXH3_128bits_withSecretandSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
 
#define XXH3_128bits_reset   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
 
#define XXH3_128bits_reset_withSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
 
#define XXH3_128bits_reset_withSecret   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
 
#define XXH3_128bits_reset_withSecretandSeed   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
 
#define XXH3_128bits_update   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
 
#define XXH3_128bits_digest   XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
 
#define XXH128_isEqual   XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
 
#define XXH128_cmp   XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
 
#define XXH128_canonicalFromHash   XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
 
#define XXH128_hashFromCanonical   XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
 
#define XXH_CONSTF   /* disable */
 
#define XXH_PUREF
 
#define XXH_MALLOCF
 
#define XXH_VERSION_MAJOR   0
 
#define XXH_VERSION_MINOR   8
 
#define XXH_VERSION_RELEASE   2
 
#define XXH_VERSION_NUMBER   (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
 
#define XXHASH_H_STATIC_13879238742
 
#define XXH_IMPLEMENTATION
 
#define XXH_IMPLEM_13a8737387
 
#define XXH_SIZE_OPT   0
 
#define XXH_FORCE_ALIGN_CHECK   1
 
#define XXH_NO_INLINE_HINTS   0
 
#define XXH3_INLINE_SECRET   1
 
#define XXH32_ENDJMP   0
 
#define XXH_FORCE_INLINE   static
 
#define XXH_NO_INLINE   static
 
#define XXH3_WITH_SECRET_INLINE   XXH_FORCE_INLINE
 
#define XXH_DEBUGLEVEL   DEBUGLEVEL
 Sets the debugging level. More...
 
#define XXH_ASSERT(c)   XXH_ASSUME(c)
 
#define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m)   do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
 
#define XXH_STATIC_ASSERT(c)   XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
 
#define XXH_COMPILER_GUARD(var)   ((void)0)
 
#define XXH_COMPILER_GUARD_CLANG_NEON(var)   ((void)0)
 
#define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
 Whether the target is little endian. More...
 
#define XXH_GCC_VERSION   (__GNUC__ * 100 + __GNUC_MINOR__)
 
#define XXH_HAS_BUILTIN(x)   __has_builtin(x)
 
#define XXH_UNREACHABLE()
 
#define XXH_ASSUME(c)   if (!(c)) { XXH_UNREACHABLE(); }
 
#define XXH_rotl32(x, r)   (((x) << (r)) | ((x) >> (32 - (r))))
 
#define XXH_rotl64(x, r)   (((x) << (r)) | ((x) >> (64 - (r))))
 
#define XXH_PRIME32_1   0x9E3779B1U
 
#define XXH_PRIME32_2   0x85EBCA77U
 
#define XXH_PRIME32_3   0xC2B2AE3DU
 
#define XXH_PRIME32_4   0x27D4EB2FU
 
#define XXH_PRIME32_5   0x165667B1U
 
#define XXH_get32bits(p)   XXH_readLE32_align(p, align)
 
#define XXH_PROCESS1
 
#define XXH_PROCESS4
 
#define XXH_PRIME64_1   0x9E3779B185EBCA87ULL
 
#define XXH_PRIME64_2   0xC2B2AE3D27D4EB4FULL
 
#define XXH_PRIME64_3   0x165667B19E3779F9ULL
 
#define XXH_PRIME64_4   0x85EBCA77C2B2AE63ULL
 
#define XXH_PRIME64_5   0x27D4EB2F165667C5ULL
 
#define XXH_get64bits(p)   XXH_readLE64_align(p, align)
 
#define ZSTD_TRACE_H
 
#define ZSTD_HAVE_WEAK_SYMBOLS   0
 
#define ZSTD_WEAK_ATTR
 
#define ZSTD_STATIC_ASSERT(c)   DEBUG_STATIC_ASSERT(c)
 
#define ZSTD_isError   ERR_isError /* for inlining */
 
#define FSE_isError   ERR_isError
 
#define HUF_isError   ERR_isError
 
#define MIN(a, b)   ((a)<(b) ? (a) : (b))
 
#define MAX(a, b)   ((a)>(b) ? (a) : (b))
 
#define BOUNDED(min, val, max)   (MAX(min,MIN(val,max)))
 
#define ZSTD_OPT_NUM   (1<<12)
 
#define ZSTD_REP_NUM   3 /* number of repcodes */
 
#define KB   *(1 <<10)
 
#define MB   *(1 <<20)
 
#define GB   *(1U<<30)
 
#define BIT7   128
 
#define BIT6   64
 
#define BIT5   32
 
#define BIT4   16
 
#define BIT1   2
 
#define BIT0   1
 
#define ZSTD_WINDOWLOG_ABSOLUTEMIN   10
 
#define ZSTD_FRAMEIDSIZE   4 /* magic number size */
 
#define ZSTD_BLOCKHEADERSIZE   3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
 
#define ZSTD_FRAMECHECKSUMSIZE   4
 
#define MIN_SEQUENCES_SIZE   1 /* nbSeq==0 */
 
#define MIN_CBLOCK_SIZE   (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
 
#define MIN_LITERALS_FOR_4_STREAMS   6
 
#define LONGNBSEQ   0x7F00
 
#define MINMATCH   3
 
#define Litbits   8
 
#define LitHufLog   11
 
#define MaxLit   ((1<<Litbits) - 1)
 
#define MaxML   52
 
#define MaxLL   35
 
#define DefaultMaxOff   28
 
#define MaxOff   31
 
#define MaxSeq   MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
 
#define MLFSELog   9
 
#define LLFSELog   9
 
#define OffFSELog   8
 
#define MaxFSELog   MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
 
#define MaxMLBits   16
 
#define MaxLLBits   16
 
#define ZSTD_MAX_HUF_HEADER_SIZE   128 /* header + <= 127 byte tree description */
 
#define ZSTD_MAX_FSE_HEADERS_SIZE   (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
 
#define LL_DEFAULTNORMLOG   6 /* for static allocation */
 
#define ML_DEFAULTNORMLOG   6 /* for static allocation */
 
#define OF_DEFAULTNORMLOG   5 /* for static allocation */
 
#define COPY8(d, s)   do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0)
 
#define COPY16(d, s)   do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0)
 
#define WILDCOPY_OVERLENGTH   32
 
#define WILDCOPY_VECLEN   16
 
#define ZSTD_WORKSPACETOOLARGE_FACTOR   3
 
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION   128
 
#define HIST_WKSP_SIZE_U32   1024
 
#define HIST_WKSP_SIZE   (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
 
#define FSE_STATIC_LINKING_ONLY
 
#define ZSTD_DEPS_NEED_MALLOC
 
#define ZSTD_DEPS_NEED_MATH64
 
#define FSE_isError   ERR_isError
 
#define FSE_CAT(X, Y)   X##Y
 
#define FSE_FUNCTION_NAME(X, Y)   FSE_CAT(X,Y)
 
#define FSE_TYPE_NAME(X, Y)   FSE_CAT(X,Y)
 
#define FSE_FLUSHBITS(s)   (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
 
#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
 
#define HUF_isError   ERR_isError
 
#define HUF_STATIC_ASSERT(c)   DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
 
#define HUF_WORKSPACE_MAX_ALIGNMENT   8
 
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER   6
 
#define RANK_POSITION_TABLE_SIZE   192
 
#define RANK_POSITION_MAX_COUNT_LOG   32
 
#define RANK_POSITION_LOG_BUCKETS_BEGIN   ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */)
 
#define RANK_POSITION_DISTINCT_COUNT_CUTOFF   (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */)
 
#define STARTNODE   (HUF_SYMBOLVALUE_MAX+1)
 
#define HUF_BITS_IN_CONTAINER   (sizeof(size_t) * 8)
 
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE   4096
 
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO   10 /* Must be >= 2 */
 
#define ZSTD_COMPRESS_LITERALS_H
 
#define ZSTD_COMPRESS_H
 
#define ZSTD_CWKSP_H
 
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE   128
 
#define ZSTD_CWKSP_ALIGNMENT_BYTES   64
 
#define ZSTDMT_COMPRESS_H
 
#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters */
 
#define ZSTDMT_NBWORKERS_MAX   ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256)
 
#define ZSTDMT_JOBSIZE_MIN   (512 KB)
 
#define ZSTDMT_JOBLOG_MAX   (MEM_32bits() ? 29 : 30)
 
#define ZSTDMT_JOBSIZE_MAX   (MEM_32bits() ? (512 MB) : (1024 MB))
 
#define ZSTD_PRESPLIT_H
 
#define ZSTD_SLIPBLOCK_WORKSPACESIZE   8208
 
#define kSearchStrength   8
 
#define HASH_READ_SIZE   8
 
#define ZSTD_DUBT_UNSORTED_MARK
 
#define ZSTD_OPT_SIZE   (ZSTD_OPT_NUM+3)
 
#define ZSTD_WINDOW_START_INDEX   2
 
#define ZSTD_ROW_HASH_CACHE_SIZE   8 /* Size of prefetching hash cache for row-based matchfinder */
 
#define LDM_BATCH_SIZE   64
 
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE   (sizeof(unsigned) * (MaxSeq + 2))
 
#define ENTROPY_WORKSPACE_SIZE   (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
 
#define TMP_WORKSPACE_SIZE   (MAX(ENTROPY_WORKSPACE_SIZE, ZSTD_SLIPBLOCK_WORKSPACESIZE))
 
#define ZSTD_MAX_NB_BLOCK_SPLITS   196
 
#define REPCODE1_TO_OFFBASE   REPCODE_TO_OFFBASE(1)
 
#define REPCODE2_TO_OFFBASE   REPCODE_TO_OFFBASE(2)
 
#define REPCODE3_TO_OFFBASE   REPCODE_TO_OFFBASE(3)
 
#define REPCODE_TO_OFFBASE(r)   (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
 
#define OFFSET_TO_OFFBASE(o)   (assert((o)>0), o + ZSTD_REP_NUM)
 
#define OFFBASE_IS_OFFSET(o)   ((o) > ZSTD_REP_NUM)
 
#define OFFBASE_IS_REPCODE(o)   ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
 
#define OFFBASE_TO_OFFSET(o)   (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
 
#define OFFBASE_TO_REPCODE(o)   (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
 
#define ZSTD_ROLL_HASH_CHAR_OFFSET   10
 
#define ZSTD_CURRENT_MAX   (MEM_64bits() ? 3500U MB : 2000U MB)
 
#define ZSTD_CHUNKSIZE_MAX
 
#define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY   0
 
#define ZSTD_SHORT_CACHE_TAG_BITS   8
 
#define ZSTD_SHORT_CACHE_TAG_MASK   ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
 
#define ZSTD_COMPRESS_SEQUENCES_H
 
#define ZSTD_COMPRESS_ADVANCED_H
 
#define BYTESCALE   256
 
#define BLOCKSIZE_MIN   3500
 
#define THRESHOLD_PENALTY_RATE   16
 
#define THRESHOLD_BASE   (THRESHOLD_PENALTY_RATE - 2)
 
#define THRESHOLD_PENALTY   3
 
#define HASHLENGTH   2
 
#define HASHLOG_MAX   10
 
#define HASHTABLESIZE   (1 << HASHLOG_MAX)
 
#define HASHMASK   (HASHTABLESIZE - 1)
 
#define KNUTH   0x9e3779b9
 
#define FP_RECORD(_rate)   ZSTD_recordFingerprint_##_rate
 
#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize)
 
#define CHUNKSIZE   (8 << 10)
 
#define SEGMENT_SIZE   512
 
#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
 
#define ZSTD_FAST_H
 
#define ZSTD_DOUBLE_FAST_H
 
#define ZSTD_COMPRESSBLOCK_DOUBLEFAST   ZSTD_compressBlock_doubleFast
 
#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE   ZSTD_compressBlock_doubleFast_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT   ZSTD_compressBlock_doubleFast_extDict
 
#define ZSTD_LAZY_H
 
#define ZSTD_LAZY_DDSS_BUCKET_LOG   2
 
#define ZSTD_ROW_HASH_TAG_BITS   8 /* nb bits to use for the tag */
 
#define ZSTD_COMPRESSBLOCK_GREEDY   ZSTD_compressBlock_greedy
 
#define ZSTD_COMPRESSBLOCK_GREEDY_ROW   ZSTD_compressBlock_greedy_row
 
#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE   ZSTD_compressBlock_greedy_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW   ZSTD_compressBlock_greedy_dictMatchState_row
 
#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH   ZSTD_compressBlock_greedy_dedicatedDictSearch
 
#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW   ZSTD_compressBlock_greedy_dedicatedDictSearch_row
 
#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT   ZSTD_compressBlock_greedy_extDict
 
#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW   ZSTD_compressBlock_greedy_extDict_row
 
#define ZSTD_COMPRESSBLOCK_LAZY   ZSTD_compressBlock_lazy
 
#define ZSTD_COMPRESSBLOCK_LAZY_ROW   ZSTD_compressBlock_lazy_row
 
#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE   ZSTD_compressBlock_lazy_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW   ZSTD_compressBlock_lazy_dictMatchState_row
 
#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH   ZSTD_compressBlock_lazy_dedicatedDictSearch
 
#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW   ZSTD_compressBlock_lazy_dedicatedDictSearch_row
 
#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT   ZSTD_compressBlock_lazy_extDict
 
#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW   ZSTD_compressBlock_lazy_extDict_row
 
#define ZSTD_COMPRESSBLOCK_LAZY2   ZSTD_compressBlock_lazy2
 
#define ZSTD_COMPRESSBLOCK_LAZY2_ROW   ZSTD_compressBlock_lazy2_row
 
#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE   ZSTD_compressBlock_lazy2_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW   ZSTD_compressBlock_lazy2_dictMatchState_row
 
#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH   ZSTD_compressBlock_lazy2_dedicatedDictSearch
 
#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW   ZSTD_compressBlock_lazy2_dedicatedDictSearch_row
 
#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT   ZSTD_compressBlock_lazy2_extDict
 
#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW   ZSTD_compressBlock_lazy2_extDict_row
 
#define ZSTD_COMPRESSBLOCK_BTLAZY2   ZSTD_compressBlock_btlazy2
 
#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE   ZSTD_compressBlock_btlazy2_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT   ZSTD_compressBlock_btlazy2_extDict
 
#define ZSTD_OPT_H
 
#define ZSTD_COMPRESSBLOCK_BTOPT   ZSTD_compressBlock_btopt
 
#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE   ZSTD_compressBlock_btopt_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT   ZSTD_compressBlock_btopt_extDict
 
#define ZSTD_COMPRESSBLOCK_BTULTRA   ZSTD_compressBlock_btultra
 
#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE   ZSTD_compressBlock_btultra_dictMatchState
 
#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT   ZSTD_compressBlock_btultra_extDict
 
#define ZSTD_COMPRESSBLOCK_BTULTRA2   ZSTD_compressBlock_btultra2
 
#define ZSTD_LDM_H
 
#define ZSTD_LDM_DEFAULT_WINDOW_LOG   ZSTD_WINDOWLOG_LIMIT_DEFAULT
 
#define ZSTD_COMPRESS_HEAPMODE   0
 
#define ZSTD_HASHLOG3_MAX   17
 
#define ZSTD_NO_CLEVEL   0
 
#define BOUNDCHECK(cParam, val)
 
#define CLAMP_TYPE(cParam, val, type)
 
#define CLAMP(cParam, val)   CLAMP_TYPE(cParam, val, unsigned)
 
#define ZSTD_INDEXOVERFLOW_MARGIN   (16 MB)
 
#define ZSTD_ROWSIZE   16
 
#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO   20
 
#define COMPRESS_LITERALS_SIZE_MIN   63 /* heuristic */
 
#define MIN_SEQUENCES_BLOCK_SPLITTING   300
 
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF   (128 KB)
 
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER   (6ULL)
 
#define ZSTD_CLEVELS_H
 
#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_compressionParameters */
 
#define ZSTD_MAX_CLEVEL   22
 
#define ZSTD_GEN_DFAST_FN(dictMode, mls)
 
#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov)
 
#define kLazySkippingStep   8
 
#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
 
#define ZSTD_ROW_HASH_TAG_MASK   ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
 
#define ZSTD_ROW_HASH_MAX_ENTRIES   64 /* absolute maximum number of entries per row, for all configurations */
 
#define ZSTD_ROW_HASH_CACHE_MASK   (ZSTD_ROW_HASH_CACHE_SIZE - 1)
 
#define ZSTD_BT_SEARCH_FN(dictMode, mls)   ZSTD_BtFindBestMatch_##dictMode##_##mls
 
#define ZSTD_HC_SEARCH_FN(dictMode, mls)   ZSTD_HcFindBestMatch_##dictMode##_##mls
 
#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)   ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
 
#define ZSTD_SEARCH_FN_ATTRS   FORCE_NOINLINE
 
#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls)
 
#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls)
 
#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)
 
#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls)
 
#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode)
 
#define ZSTD_FOR_EACH_MLS(X, dictMode)
 
#define ZSTD_FOR_EACH_DICT_MODE(X, ...)
 
#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls)
 
#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls)
 
#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog)
 
#define ZSTD_SWITCH_MLS(X, dictMode)
 
#define ZSTD_SWITCH_ROWLOG(dictMode, mls)
 
#define ZSTD_SWITCH_SEARCH_METHOD(dictMode)
 
#define ZSTD_LDM_GEARTAB_H
 
#define LDM_BUCKET_SIZE_LOG   4
 
#define LDM_MIN_MATCH_LENGTH   64
 
#define LDM_HASH_RLOG   7
 
#define GEAR_ITER_ONCE()
 
#define GEAR_ITER_ONCE()
 
#define ZSTD_LITFREQ_ADD   2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
 
#define ZSTD_MAX_PRICE   (1<<30)
 
#define ZSTD_PREDEF_THRESHOLD   8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
 
#define BITCOST_ACCURACY   8
 
#define BITCOST_MULTIPLIER   (1 << BITCOST_ACCURACY)
 
#define WEIGHT(stat, opt)   ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
 
#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)   ZSTD_btGetAllMatches_##dictMode##_##mls
 
#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls)
 
#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode)
 
#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode)
 
#define LIT_PRICE(_p)   (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel)
 
#define LL_PRICE(_l)   (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel)
 
#define LL_INCPRICE(_l)   (LL_PRICE(_l) - LL_PRICE(_l-1))
 
#define ZSTD_RESIZE_SEQPOOL   0
 
#define ZSTD_PTHREAD_MUTEX_LOCK(m)   ZSTD_pthread_mutex_lock(m)
 
#define DEBUG_PRINTHEX(l, p, n)   do { } while (0)
 
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers)   (2*(nbWorkers) + 3)
 
#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers)   (nbWorkers)
 
#define JOB_ERROR(e)
 
#define RSYNC_LENGTH   32
 
#define RSYNC_MIN_BLOCK_LOG   ZSTD_BLOCKSIZELOG_MAX
 
#define RSYNC_MIN_BLOCK_SIZE   (1<<RSYNC_MIN_BLOCK_LOG)
 
#define HUF_DECODER_FAST_TABLELOG   11
 
#define HUF_ENABLE_FAST_DECODE   1
 
#define HUF_FAST_BMI2_ATTRS
 
#define HUF_EXTERN_C
 
#define HUF_ASM_DECL   HUF_EXTERN_C
 
#define HUF_NEED_BMI2_FUNCTION   0
 
#define HUF_isError   ERR_isError
 
#define HUF_ALIGN(x, a)   HUF_ALIGN_MASK((x), (a) - 1)
 
#define HUF_ALIGN_MASK(x, mask)   (((x) + (mask)) & ~(mask))
 
#define HUF_DGEN(fn)
 
#define HUF_4X_FOR_EACH_STREAM(X)
 
#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var)
 
#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)    do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0)
 
#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)
 
#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr)
 
#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol)
 
#define HUF_4X1_RELOAD_STREAM(_stream)
 
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)    do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0)
 
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)
 
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr)
 
#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3)
 
#define HUF_4X2_RELOAD_STREAM(_stream)
 
#define FSE_STATIC_LINKING_ONLY
 
#define ZSTD_DECOMPRESS_INTERNAL_H
 
#define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
 
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE   (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
 
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32   ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
 
#define ZSTD_HUFFDTABLE_CAPACITY_LOG   12
 
#define ZSTD_DECODER_INTERNAL_BUFFER   (1 << 16)
 
#define ZSTD_LBMIN   64
 
#define ZSTD_LBMAX   (128 << 10)
 
#define ZSTD_LITBUFFEREXTRASIZE   BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
 
#define ZSTD_DDICT_H
 
#define ZSTD_HEAPMODE   1
 
#define ZSTD_MAXWINDOWSIZE_DEFAULT   (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
 
#define ZSTD_NO_FORWARD_PROGRESS_MAX   16
 
#define FSE_STATIC_LINKING_ONLY
 
#define ZSTD_DEC_BLOCK_H
 
#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT   4
 
#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT
 
#define DDICT_HASHSET_TABLE_BASE_SIZE   64
 
#define DDICT_HASHSET_RESIZE_FACTOR   2
 
#define CHECK_DBOUNDS(p, v)
 
#define FSE_STATIC_LINKING_ONLY
 
#define LONG_OFFSETS_MAX_EXTRA_BITS_32
 
#define STORED_SEQS   8
 
#define STORED_SEQS_MASK   (STORED_SEQS-1)
 
#define ADVANCED_SEQS   STORED_SEQS
 
#define __STDC_WANT_LIB_EXT1__   1 /* request C11 Annex K, which includes qsort_s() */
 
#define ZDICT_STATIC_LINKING_ONLY
 
#define ZSTD_ZDICT_H
 
#define ZDICTLIB_VISIBLE
 
#define ZDICTLIB_HIDDEN
 
#define ZDICTLIB_API   ZDICTLIB_VISIBLE
 
#define ZSTD_ZDICT_H_STATIC
 
#define ZDICTLIB_STATIC_API   ZDICTLIB_VISIBLE
 
#define ZDICT_DICTSIZE_MIN   256
 
#define ZDICT_CONTENTSIZE_MIN   128
 
#define ZDICT_GCC_VERSION   (__GNUC__ * 100 + __GNUC_MINOR__)
 
#define ZDICT_DEPRECATED(message)
 
#define COVER_MAX_SAMPLES_SIZE   (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
 
#define COVER_DEFAULT_SPLITPOINT   1.0
 
#define ZDICT_QSORT_MIN   0
 
#define ZDICT_QSORT_C90   ZDICT_QSORT_MIN
 
#define ZDICT_QSORT_GNU   1
 
#define ZDICT_QSORT_APPLE   2
 
#define ZDICT_QSORT_MSVC   3
 
#define ZDICT_QSORT_C11   ZDICT_QSORT_MAX
 
#define ZDICT_QSORT_MAX   4
 
#define ZDICT_QSORT   ZDICT_QSORT_C90 /* uses standard qsort() which is not re-entrant (requires global variable) */
 
#define DISPLAY(...)
 
#define DISPLAYLEVEL(l, ...)
 
#define DISPLAYUPDATE(lastUpdateTime, l, ...)
 
#define MAP_EMPTY_VALUE   ((U32)-1)
 
#define _DIVSUFSORT_H   1
 
#define INLINE   __inline
 
#define ALPHABET_SIZE   (256)
 
#define BUCKET_A_SIZE   (ALPHABET_SIZE)
 
#define BUCKET_B_SIZE   (ALPHABET_SIZE * ALPHABET_SIZE)
 
#define SS_INSERTIONSORT_THRESHOLD   (8)
 
#define SS_BLOCKSIZE   (1024)
 
#define SS_MISORT_STACKSIZE   (16)
 
#define SS_SMERGE_STACKSIZE   (32)
 
#define TR_INSERTIONSORT_THRESHOLD   (8)
 
#define TR_STACKSIZE   (64)
 
#define SWAP(_a, _b)   do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
 
#define STACK_PUSH(_a, _b, _c, _d)
 
#define STACK_PUSH5(_a, _b, _c, _d, _e)
 
#define STACK_POP(_a, _b, _c, _d)
 
#define STACK_POP5(_a, _b, _c, _d, _e)
 
#define BUCKET_A(_c0)   bucket_A[(_c0)]
 
#define BUCKET_B(_c0, _c1)   (bucket_B[((_c1) << 8) | (_c0)])
 
#define BUCKET_BSTAR(_c0, _c1)   (bucket_B[((_c0) << 8) | (_c1)])
 
#define STACK_SIZE   SS_MISORT_STACKSIZE
 
#define STACK_SIZE   SS_SMERGE_STACKSIZE
 
#define GETIDX(a)   ((0 <= (a)) ? (a) : (~(a)))
 
#define MERGE_CHECK(a, b, c)
 
#define STACK_SIZE   TR_STACKSIZE
 
#define FASTCOVER_MAX_SAMPLES_SIZE   (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
 
#define FASTCOVER_MAX_F   31
 
#define FASTCOVER_MAX_ACCEL   10
 
#define FASTCOVER_DEFAULT_SPLITPOINT   0.75
 
#define DEFAULT_F   20
 
#define DEFAULT_ACCEL   1
 
#define DISPLAY(...)
 
#define DISPLAYLEVEL(l, ...)
 
#define DISPLAYUPDATE(lastUpdateTime, l, ...)
 
#define MINRATIO   4 /* minimum nb of apparition to be selected in dictionary */
 
#define ZDICT_MAX_SAMPLES_SIZE   (2000U << 20)
 
#define ZDICT_MIN_SAMPLES_SIZE   (ZDICT_CONTENTSIZE_MIN * MINRATIO)
 
#define _FILE_OFFSET_BITS   64
 
#define _LARGEFILE64_SOURCE
 
#define KB   *(1 <<10)
 
#define MB   *(1 <<20)
 
#define GB   *(1U<<30)
 
#define DICTLISTSIZE_DEFAULT   10000
 
#define NOISELENGTH   32
 
#define DISPLAY(...)   do { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } while (0)
 
#define DISPLAYLEVEL(l, ...)   do { if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } } while (0) /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
 
#define LLIMIT   64 /* heuristic determined experimentally */
 
#define MINMATCHLENGTH   7 /* heuristic determined experimentally */
 
#define DISPLAYUPDATE(l, ...)
 
#define MAXREPOFFSET   1024
 
#define OFFCODE_MAX   30 /* only applicable to first block */
 
#define HBUFFSIZE   256 /* should prove large enough for all entropy headers */
 

Typedefs

typedef unsigned char BYTE
 
typedef unsigned char U8
 
typedef signed char S8
 
typedef unsigned short U16
 
typedef signed short S16
 
typedef unsigned int U32
 
typedef signed int S32
 
typedef unsigned long long U64
 
typedef signed long long S64
 
typedef ZSTD_ErrorCode ERR_enum
 
typedef unsigned FSE_CTable
 
typedef unsigned FSE_DTable
 
typedef size_t BitContainerType
 
typedef size_t HUF_CElt
 
typedef U32 HUF_DTable
 
typedef struct ZSTD_CCtx_s ZSTD_CCtx
 
typedef struct ZSTD_DCtx_s ZSTD_DCtx
 
typedef struct ZSTD_inBuffer_s ZSTD_inBuffer
 
typedef struct ZSTD_outBuffer_s ZSTD_outBuffer
 
typedef ZSTD_CCtx ZSTD_CStream
 
typedef ZSTD_DCtx ZSTD_DStream
 
typedef struct ZSTD_CDict_s ZSTD_CDict
 
typedef struct ZSTD_DDict_s ZSTD_DDict
 
typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params
 
typedef void *(* ZSTD_allocFunction) (void *opaque, size_t size)
 
typedef void(* ZSTD_freeFunction) (void *opaque, void *address)
 
typedef struct POOL_ctx_s ZSTD_threadPool
 
typedef size_t(* ZSTD_sequenceProducer_F) (void *sequenceProducerState, ZSTD_Sequence *outSeqs, size_t outSeqsCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, int compressionLevel, size_t windowSize)
 
typedef struct POOL_ctx_s POOL_ctx
 
typedef void(* POOL_function) (void *)
 
typedef struct POOL_job_s POOL_job
 
typedef struct XXH32_state_s XXH32_state_t
 
typedef struct XXH64_state_s XXH64_state_t
 The opaque state struct for the XXH64 streaming API. More...
 
typedef unsigned char xxh_u8
 
typedef XXH32_hash_t xxh_u32
 
typedef struct nodeElt_s nodeElt
 
typedef nodeElt huffNodeTable[2 *(HUF_SYMBOLVALUE_MAX+1)]
 
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx
 
typedef struct ZSTD_prefixDict_s ZSTD_prefixDict
 
typedef struct SeqDef_s SeqDef
 
typedef struct ZSTD_MatchState_t ZSTD_MatchState_t
 
typedef size_t(* ZSTD_BlockCompressor_f) (ZSTD_MatchState_t *bs, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
typedef struct repcodes_s Repcodes_t
 
typedef void(* RecordEvents_f) (Fingerprint *fp, const void *src, size_t srcSize)
 
typedef size_t(* ZSTD_SequenceCopier_f) (ZSTD_CCtx *cctx, ZSTD_SequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_ParamSwitch_e externalRepSearch)
 
typedef int(* ZSTD_match4Found) (const BYTE *currentPtr, const BYTE *matchAddress, U32 matchIdx, U32 idxLowLimit)
 
typedef U64 ZSTD_VecMask
 
typedef U32(* ZSTD_getAllMatchesFn) (ZSTD_match_t *, ZSTD_MatchState_t *, U32 *, const BYTE *, const BYTE *, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat)
 
typedef struct buffer_s Buffer
 
typedef struct ZSTDMT_bufferPool_s ZSTDMT_bufferPool
 
typedef ZSTDMT_bufferPool ZSTDMT_seqPool
 
typedef size_t(* HUF_DecompressUsingDTableFn) (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
 
typedef void(* HUF_DecompressFastLoopFn) (HUF_DecompressFastArgs *)
 
typedef U32 rankValCol_t[HUF_TABLELOG_MAX+1]
 
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]
 
typedef struct COVER_best_s COVER_best_t
 
typedef struct COVER_dictSelection COVER_dictSelection_t
 
typedef struct COVER_map_pair_t_s COVER_map_pair_t
 
typedef struct COVER_map_s COVER_map_t
 
typedef struct COVER_tryParameters_data_s COVER_tryParameters_data_t
 
typedef struct _trbudget_t trbudget_t
 
typedef struct FASTCOVER_tryParameters_data_s FASTCOVER_tryParameters_data_t
 
typedef unsigned long long XXH64_hash_t
 

Enumerations

enum  ZSTD_ErrorCode {
  ZSTD_error_no_error = 0 , ZSTD_error_GENERIC = 1 , ZSTD_error_prefix_unknown = 10 , ZSTD_error_version_unsupported = 12 ,
  ZSTD_error_frameParameter_unsupported = 14 , ZSTD_error_frameParameter_windowTooLarge = 16 , ZSTD_error_corruption_detected = 20 , ZSTD_error_checksum_wrong = 22 ,
  ZSTD_error_literals_headerWrong = 24 , ZSTD_error_dictionary_corrupted = 30 , ZSTD_error_dictionary_wrong = 32 , ZSTD_error_dictionaryCreation_failed = 34 ,
  ZSTD_error_parameter_unsupported = 40 , ZSTD_error_parameter_combination_unsupported = 41 , ZSTD_error_parameter_outOfBound = 42 , ZSTD_error_tableLog_tooLarge = 44 ,
  ZSTD_error_maxSymbolValue_tooLarge = 46 , ZSTD_error_maxSymbolValue_tooSmall = 48 , ZSTD_error_cannotProduce_uncompressedBlock = 49 , ZSTD_error_stabilityCondition_notRespected = 50 ,
  ZSTD_error_stage_wrong = 60 , ZSTD_error_init_missing = 62 , ZSTD_error_memory_allocation = 64 , ZSTD_error_workSpace_tooSmall = 66 ,
  ZSTD_error_dstSize_tooSmall = 70 , ZSTD_error_srcSize_wrong = 72 , ZSTD_error_dstBuffer_null = 74 , ZSTD_error_noForwardProgress_destFull = 80 ,
  ZSTD_error_noForwardProgress_inputEmpty = 82 , ZSTD_error_frameIndex_tooLarge = 100 , ZSTD_error_seekableIO = 102 , ZSTD_error_dstBuffer_wrong = 104 ,
  ZSTD_error_srcBuffer_wrong = 105 , ZSTD_error_sequenceProducer_failed = 106 , ZSTD_error_externalSequences_invalid = 107 , ZSTD_error_maxCode = 120 ,
  ZSTD_error_no_error = 0 , ZSTD_error_GENERIC = 1 , ZSTD_error_prefix_unknown = 10 , ZSTD_error_version_unsupported = 12 ,
  ZSTD_error_frameParameter_unsupported = 14 , ZSTD_error_frameParameter_windowTooLarge = 16 , ZSTD_error_corruption_detected = 20 , ZSTD_error_checksum_wrong = 22 ,
  ZSTD_error_literals_headerWrong = 24 , ZSTD_error_dictionary_corrupted = 30 , ZSTD_error_dictionary_wrong = 32 , ZSTD_error_dictionaryCreation_failed = 34 ,
  ZSTD_error_parameter_unsupported = 40 , ZSTD_error_parameter_combination_unsupported = 41 , ZSTD_error_parameter_outOfBound = 42 , ZSTD_error_tableLog_tooLarge = 44 ,
  ZSTD_error_maxSymbolValue_tooLarge = 46 , ZSTD_error_maxSymbolValue_tooSmall = 48 , ZSTD_error_cannotProduce_uncompressedBlock = 49 , ZSTD_error_stabilityCondition_notRespected = 50 ,
  ZSTD_error_stage_wrong = 60 , ZSTD_error_init_missing = 62 , ZSTD_error_memory_allocation = 64 , ZSTD_error_workSpace_tooSmall = 66 ,
  ZSTD_error_dstSize_tooSmall = 70 , ZSTD_error_srcSize_wrong = 72 , ZSTD_error_dstBuffer_null = 74 , ZSTD_error_noForwardProgress_destFull = 80 ,
  ZSTD_error_noForwardProgress_inputEmpty = 82 , ZSTD_error_frameIndex_tooLarge = 100 , ZSTD_error_seekableIO = 102 , ZSTD_error_dstBuffer_wrong = 104 ,
  ZSTD_error_srcBuffer_wrong = 105 , ZSTD_error_sequenceProducer_failed = 106 , ZSTD_error_externalSequences_invalid = 107 , ZSTD_error_maxCode = 120
}
 
enum  BIT_DStream_status { BIT_DStream_unfinished = 0 , BIT_DStream_endOfBuffer = 1 , BIT_DStream_completed = 2 , BIT_DStream_overflow = 3 }
 
enum  FSE_repeat { FSE_repeat_none , FSE_repeat_check , FSE_repeat_valid }
 
enum  HUF_flags_e {
  HUF_flags_bmi2 = (1 << 0) , HUF_flags_optimalDepth = (1 << 1) , HUF_flags_preferRepeat = (1 << 2) , HUF_flags_suspectUncompressible = (1 << 3) ,
  HUF_flags_disableAsm = (1 << 4) , HUF_flags_disableFast = (1 << 5)
}
 
enum  HUF_repeat { HUF_repeat_none , HUF_repeat_check , HUF_repeat_valid }
 
enum  ZSTD_strategy {
  ZSTD_fast =1 , ZSTD_dfast =2 , ZSTD_greedy =3 , ZSTD_lazy =4 ,
  ZSTD_lazy2 =5 , ZSTD_btlazy2 =6 , ZSTD_btopt =7 , ZSTD_btultra =8 ,
  ZSTD_btultra2 =9 , ZSTD_fast =1 , ZSTD_dfast =2 , ZSTD_greedy =3 ,
  ZSTD_lazy =4 , ZSTD_lazy2 =5 , ZSTD_btlazy2 =6 , ZSTD_btopt =7 ,
  ZSTD_btultra =8 , ZSTD_btultra2 =9
}
 
enum  ZSTD_cParameter {
  ZSTD_c_compressionLevel =100 , ZSTD_c_windowLog =101 , ZSTD_c_hashLog =102 , ZSTD_c_chainLog =103 ,
  ZSTD_c_searchLog =104 , ZSTD_c_minMatch =105 , ZSTD_c_targetLength =106 , ZSTD_c_strategy =107 ,
  ZSTD_c_targetCBlockSize =130 , ZSTD_c_enableLongDistanceMatching =160 , ZSTD_c_ldmHashLog =161 , ZSTD_c_ldmMinMatch =162 ,
  ZSTD_c_ldmBucketSizeLog =163 , ZSTD_c_ldmHashRateLog =164 , ZSTD_c_contentSizeFlag =200 , ZSTD_c_checksumFlag =201 ,
  ZSTD_c_dictIDFlag =202 , ZSTD_c_nbWorkers =400 , ZSTD_c_jobSize =401 , ZSTD_c_overlapLog =402 ,
  ZSTD_c_experimentalParam1 =500 , ZSTD_c_experimentalParam2 =10 , ZSTD_c_experimentalParam3 =1000 , ZSTD_c_experimentalParam4 =1001 ,
  ZSTD_c_experimentalParam5 =1002 , ZSTD_c_experimentalParam7 =1004 , ZSTD_c_experimentalParam8 =1005 , ZSTD_c_experimentalParam9 =1006 ,
  ZSTD_c_experimentalParam10 =1007 , ZSTD_c_experimentalParam11 =1008 , ZSTD_c_experimentalParam12 =1009 , ZSTD_c_experimentalParam13 =1010 ,
  ZSTD_c_experimentalParam14 =1011 , ZSTD_c_experimentalParam15 =1012 , ZSTD_c_experimentalParam16 =1013 , ZSTD_c_experimentalParam17 =1014 ,
  ZSTD_c_experimentalParam18 =1015 , ZSTD_c_experimentalParam19 =1016 , ZSTD_c_experimentalParam20 =1017 , ZSTD_c_compressionLevel =100 ,
  ZSTD_c_windowLog =101 , ZSTD_c_hashLog =102 , ZSTD_c_chainLog =103 , ZSTD_c_searchLog =104 ,
  ZSTD_c_minMatch =105 , ZSTD_c_targetLength =106 , ZSTD_c_strategy =107 , ZSTD_c_targetCBlockSize =130 ,
  ZSTD_c_enableLongDistanceMatching =160 , ZSTD_c_ldmHashLog =161 , ZSTD_c_ldmMinMatch =162 , ZSTD_c_ldmBucketSizeLog =163 ,
  ZSTD_c_ldmHashRateLog =164 , ZSTD_c_contentSizeFlag =200 , ZSTD_c_checksumFlag =201 , ZSTD_c_dictIDFlag =202 ,
  ZSTD_c_nbWorkers =400 , ZSTD_c_jobSize =401 , ZSTD_c_overlapLog =402 , ZSTD_c_experimentalParam1 =500 ,
  ZSTD_c_experimentalParam2 =10 , ZSTD_c_experimentalParam3 =1000 , ZSTD_c_experimentalParam4 =1001 , ZSTD_c_experimentalParam5 =1002 ,
  ZSTD_c_experimentalParam7 =1004 , ZSTD_c_experimentalParam8 =1005 , ZSTD_c_experimentalParam9 =1006 , ZSTD_c_experimentalParam10 =1007 ,
  ZSTD_c_experimentalParam11 =1008 , ZSTD_c_experimentalParam12 =1009 , ZSTD_c_experimentalParam13 =1010 , ZSTD_c_experimentalParam14 =1011 ,
  ZSTD_c_experimentalParam15 =1012 , ZSTD_c_experimentalParam16 =1013 , ZSTD_c_experimentalParam17 =1014 , ZSTD_c_experimentalParam18 =1015 ,
  ZSTD_c_experimentalParam19 =1016 , ZSTD_c_experimentalParam20 =1017
}
 
enum  ZSTD_ResetDirective {
  ZSTD_reset_session_only = 1 , ZSTD_reset_parameters = 2 , ZSTD_reset_session_and_parameters = 3 , ZSTD_reset_session_only = 1 ,
  ZSTD_reset_parameters = 2 , ZSTD_reset_session_and_parameters = 3
}
 
enum  ZSTD_dParameter {
  ZSTD_d_windowLogMax =100 , ZSTD_d_experimentalParam1 =1000 , ZSTD_d_experimentalParam2 =1001 , ZSTD_d_experimentalParam3 =1002 ,
  ZSTD_d_experimentalParam4 =1003 , ZSTD_d_experimentalParam5 =1004 , ZSTD_d_experimentalParam6 =1005 , ZSTD_d_windowLogMax =100 ,
  ZSTD_d_experimentalParam1 =1000 , ZSTD_d_experimentalParam2 =1001 , ZSTD_d_experimentalParam3 =1002 , ZSTD_d_experimentalParam4 =1003 ,
  ZSTD_d_experimentalParam5 =1004 , ZSTD_d_experimentalParam6 =1005
}
 
enum  ZSTD_EndDirective {
  ZSTD_e_continue =0 , ZSTD_e_flush =1 , ZSTD_e_end =2 , ZSTD_e_continue =0 ,
  ZSTD_e_flush =1 , ZSTD_e_end =2
}
 
enum  ZSTD_dictContentType_e { ZSTD_dct_auto = 0 , ZSTD_dct_rawContent = 1 , ZSTD_dct_fullDict = 2 }
 
enum  ZSTD_dictLoadMethod_e { ZSTD_dlm_byCopy = 0 , ZSTD_dlm_byRef = 1 }
 
enum  ZSTD_format_e { ZSTD_f_zstd1 = 0 , ZSTD_f_zstd1_magicless = 1 }
 
enum  ZSTD_forceIgnoreChecksum_e { ZSTD_d_validateChecksum = 0 , ZSTD_d_ignoreChecksum = 1 }
 
enum  ZSTD_refMultipleDDicts_e { ZSTD_rmd_refSingleDDict = 0 , ZSTD_rmd_refMultipleDDicts = 1 }
 
enum  ZSTD_dictAttachPref_e { ZSTD_dictDefaultAttach = 0 , ZSTD_dictForceAttach = 1 , ZSTD_dictForceCopy = 2 , ZSTD_dictForceLoad = 3 }
 
enum  ZSTD_literalCompressionMode_e { ZSTD_lcm_auto = 0 , ZSTD_lcm_huffman = 1 , ZSTD_lcm_uncompressed = 2 }
 
enum  ZSTD_ParamSwitch_e { ZSTD_ps_auto = 0 , ZSTD_ps_enable = 1 , ZSTD_ps_disable = 2 }
 
enum  ZSTD_FrameType_e { ZSTD_frame , ZSTD_skippableFrame }
 
enum  ZSTD_SequenceFormat_e { ZSTD_sf_noBlockDelimiters = 0 , ZSTD_sf_explicitBlockDelimiters = 1 }
 
enum  ZSTD_nextInputType_e {
  ZSTDnit_frameHeader , ZSTDnit_blockHeader , ZSTDnit_block , ZSTDnit_lastBlock ,
  ZSTDnit_checksum , ZSTDnit_skippableFrame
}
 
enum  XXH_alignment { XXH_aligned , XXH_unaligned }
 
enum  blockType_e { bt_raw , bt_rle , bt_compressed , bt_reserved }
 
enum  SymbolEncodingType_e { set_basic , set_rle , set_compressed , set_repeat }
 
enum  ZSTD_overlap_e { ZSTD_no_overlap , ZSTD_overlap_src_before_dst }
 
enum  ZSTD_bufferMode_e { ZSTD_bm_buffered = 0 , ZSTD_bm_stable = 1 }
 
enum  HIST_checkInput_e { trustInput , checkMaxSymbolValue }
 
enum  HUF_nbStreams_e { HUF_singleStream , HUF_fourStreams }
 
enum  ZSTD_cwksp_alloc_phase_e { ZSTD_cwksp_alloc_objects , ZSTD_cwksp_alloc_aligned_init_once , ZSTD_cwksp_alloc_aligned , ZSTD_cwksp_alloc_buffers }
 
enum  ZSTD_cwksp_static_alloc_e { ZSTD_cwksp_dynamic_alloc , ZSTD_cwksp_static_alloc }
 
enum  ZSTD_compressionStage_e { ZSTDcs_created =0 , ZSTDcs_init , ZSTDcs_ongoing , ZSTDcs_ending }
 
enum  ZSTD_cStreamStage { zcss_init =0 , zcss_load , zcss_flush }
 
enum  ZSTD_longLengthType_e { ZSTD_llt_none = 0 , ZSTD_llt_literalLength = 1 , ZSTD_llt_matchLength = 2 }
 
enum  ZSTD_OptPrice_e { zop_dynamic =0 , zop_predef }
 
enum  ZSTD_buffered_policy_e { ZSTDb_not_buffered , ZSTDb_buffered }
 
enum  ZSTD_dictTableLoadMethod_e { ZSTD_dtlm_fast , ZSTD_dtlm_full }
 
enum  ZSTD_tableFillPurpose_e { ZSTD_tfp_forCCtx , ZSTD_tfp_forCDict }
 
enum  ZSTD_dictMode_e { ZSTD_noDict = 0 , ZSTD_extDict = 1 , ZSTD_dictMatchState = 2 , ZSTD_dedicatedDictSearch = 3 }
 
enum  ZSTD_CParamMode_e { ZSTD_cpm_noAttachDict = 0 , ZSTD_cpm_attachDict = 1 , ZSTD_cpm_createCDict = 2 , ZSTD_cpm_unknown = 3 }
 
enum  ZSTD_DefaultPolicy_e { ZSTD_defaultDisallowed = 0 , ZSTD_defaultAllowed = 1 }
 
enum  ZSTD_compResetPolicy_e { ZSTDcrp_makeClean , ZSTDcrp_leaveDirty }
 
enum  ZSTD_indexResetPolicy_e { ZSTDirp_continue , ZSTDirp_reset }
 
enum  ZSTD_resetTarget_e { ZSTD_resetTarget_CDict , ZSTD_resetTarget_CCtx }
 
enum  ZSTD_BuildSeqStore_e { ZSTDbss_compress , ZSTDbss_noCompress }
 
enum  searchMethod_e { search_hashChain =0 , search_binaryTree =1 , search_rowHash =2 }
 
enum  base_directive_e { base_0possible =0 , base_1guaranteed =1 }
 
enum  ZSTD_dStage {
  ZSTDds_getFrameHeaderSize , ZSTDds_decodeFrameHeader , ZSTDds_decodeBlockHeader , ZSTDds_decompressBlock ,
  ZSTDds_decompressLastBlock , ZSTDds_checkChecksum , ZSTDds_decodeSkippableHeader , ZSTDds_skipFrame
}
 
enum  ZSTD_dStreamStage {
  zdss_init =0 , zdss_loadHeader , zdss_read , zdss_load ,
  zdss_flush
}
 
enum  ZSTD_dictUses_e { ZSTD_use_indefinitely = -1 , ZSTD_dont_use = 0 , ZSTD_use_once = 1 }
 
enum  ZSTD_litLocation_e { ZSTD_not_in_dst = 0 , ZSTD_in_dst = 1 , ZSTD_split = 2 }
 
enum  streaming_operation { not_streaming = 0 , is_streaming = 1 }
 
enum  ZSTD_longOffset_e { ZSTD_lo_isRegularOffset , ZSTD_lo_isLongOffset =1 }
 

Functions

MEM_STATIC int ZSTD_isPower2 (size_t u)
 
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR ptrdiff_t ZSTD_wrappedPtrDiff (unsigned char const *lhs, unsigned char const *rhs)
 
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR const void * ZSTD_wrappedPtrAdd (const void *ptr, ptrdiff_t add)
 
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR const void * ZSTD_wrappedPtrSub (const void *ptr, ptrdiff_t sub)
 
MEM_STATIC void * ZSTD_maybeNullPtrAdd (void *ptr, ptrdiff_t add)
 
MEM_STATIC unsigned MEM_32bits (void)
 
MEM_STATIC unsigned MEM_64bits (void)
 
MEM_STATIC unsigned MEM_isLittleEndian (void)
 
MEM_STATIC U16 MEM_read16 (const void *memPtr)
 
MEM_STATIC U32 MEM_read32 (const void *memPtr)
 
MEM_STATIC U64 MEM_read64 (const void *memPtr)
 
MEM_STATIC size_t MEM_readST (const void *memPtr)
 
MEM_STATIC void MEM_write16 (void *memPtr, U16 value)
 
MEM_STATIC void MEM_write32 (void *memPtr, U32 value)
 
MEM_STATIC void MEM_write64 (void *memPtr, U64 value)
 
MEM_STATIC U16 MEM_readLE16 (const void *memPtr)
 
MEM_STATIC U32 MEM_readLE24 (const void *memPtr)
 
MEM_STATIC U32 MEM_readLE32 (const void *memPtr)
 
MEM_STATIC U64 MEM_readLE64 (const void *memPtr)
 
MEM_STATIC size_t MEM_readLEST (const void *memPtr)
 
MEM_STATIC void MEM_writeLE16 (void *memPtr, U16 val)
 
MEM_STATIC void MEM_writeLE24 (void *memPtr, U32 val)
 
MEM_STATIC void MEM_writeLE32 (void *memPtr, U32 val32)
 
MEM_STATIC void MEM_writeLE64 (void *memPtr, U64 val64)
 
MEM_STATIC void MEM_writeLEST (void *memPtr, size_t val)
 
MEM_STATIC U32 MEM_readBE32 (const void *memPtr)
 
MEM_STATIC U64 MEM_readBE64 (const void *memPtr)
 
MEM_STATIC size_t MEM_readBEST (const void *memPtr)
 
MEM_STATIC void MEM_writeBE32 (void *memPtr, U32 val32)
 
MEM_STATIC void MEM_writeBE64 (void *memPtr, U64 val64)
 
MEM_STATIC void MEM_writeBEST (void *memPtr, size_t val)
 
MEM_STATIC U32 MEM_swap32 (U32 in)
 
MEM_STATIC U64 MEM_swap64 (U64 in)
 
MEM_STATIC size_t MEM_swapST (size_t in)
 
MEM_STATIC U32 MEM_swap32_fallback (U32 in)
 
MEM_STATIC U64 MEM_swap64_fallback (U64 in)
 
MEM_STATIC void MEM_check (void)
 
ZSTDERRORLIB_API const char * ZSTD_getErrorString (ZSTD_ErrorCode code)
 
ERR_STATIC unsigned ERR_isError (size_t code)
 
ERR_STATIC ERR_enum ERR_getErrorCode (size_t code)
 
const char * ERR_getErrorString (ERR_enum code)
 
ERR_STATIC const char * ERR_getErrorName (size_t code)
 
FSE_PUBLIC_API unsigned FSE_versionNumber (void)
 
FSE_PUBLIC_API size_t FSE_compressBound (size_t size)
 
FSE_PUBLIC_API unsigned FSE_isError (size_t code)
 
FSE_PUBLIC_API const char * FSE_getErrorName (size_t code)
 
FSE_PUBLIC_API unsigned FSE_optimalTableLog (unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
 
FSE_PUBLIC_API size_t FSE_normalizeCount (short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount)
 
FSE_PUBLIC_API size_t FSE_NCountWriteBound (unsigned maxSymbolValue, unsigned tableLog)
 
FSE_PUBLIC_API size_t FSE_writeNCount (void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
 
FSE_PUBLIC_API size_t FSE_buildCTable (FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
 
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct)
 
FSE_PUBLIC_API size_t FSE_readNCount (short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize)
 
FSE_PUBLIC_API size_t FSE_readNCount_bmi2 (short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize, int bmi2)
 
MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback (U32 val)
 
MEM_STATIC unsigned ZSTD_countTrailingZeros32 (U32 val)
 
MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback (U32 val)
 
MEM_STATIC unsigned ZSTD_countLeadingZeros32 (U32 val)
 
MEM_STATIC unsigned ZSTD_countTrailingZeros64 (U64 val)
 
MEM_STATIC unsigned ZSTD_countLeadingZeros64 (U64 val)
 
MEM_STATIC unsigned ZSTD_NbCommonBytes (size_t val)
 
MEM_STATIC unsigned ZSTD_highbit32 (U32 val)
 
MEM_STATIC U64 ZSTD_rotateRight_U64 (U64 const value, U32 count)
 
MEM_STATIC U32 ZSTD_rotateRight_U32 (U32 const value, U32 count)
 
MEM_STATIC U16 ZSTD_rotateRight_U16 (U16 const value, U32 count)
 
MEM_STATIC size_t BIT_initCStream (BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity)
 
MEM_STATIC void BIT_addBits (BIT_CStream_t *bitC, BitContainerType value, unsigned nbBits)
 
MEM_STATIC void BIT_flushBits (BIT_CStream_t *bitC)
 
MEM_STATIC size_t BIT_closeCStream (BIT_CStream_t *bitC)
 
MEM_STATIC size_t BIT_initDStream (BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
 
MEM_STATIC BitContainerType BIT_readBits (BIT_DStream_t *bitD, unsigned nbBits)
 
MEM_STATIC BIT_DStream_status BIT_reloadDStream (BIT_DStream_t *bitD)
 
MEM_STATIC unsigned BIT_endOfDStream (const BIT_DStream_t *bitD)
 
MEM_STATIC void BIT_addBitsFast (BIT_CStream_t *bitC, BitContainerType value, unsigned nbBits)
 
MEM_STATIC void BIT_flushBitsFast (BIT_CStream_t *bitC)
 
MEM_STATIC size_t BIT_readBitsFast (BIT_DStream_t *bitD, unsigned nbBits)
 
FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits (BitContainerType bitContainer, U32 const nbBits)
 
FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits (BitContainerType bitContainer, U32 const start)
 
FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits (BitContainerType bitContainer, U32 const start, U32 const nbBits)
 
FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits (const BIT_DStream_t *bitD, U32 nbBits)
 
MEM_STATIC BitContainerType BIT_lookBitsFast (const BIT_DStream_t *bitD, U32 nbBits)
 
FORCE_INLINE_TEMPLATE void BIT_skipBits (BIT_DStream_t *bitD, U32 nbBits)
 
MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal (BIT_DStream_t *bitD)
 
MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast (BIT_DStream_t *bitD)
 
unsigned FSE_optimalTableLog_internal (unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
 
size_t FSE_buildCTable_rle (FSE_CTable *ct, unsigned char symbolValue)
 
size_t FSE_buildCTable_wksp (FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize)
 
FSE_PUBLIC_API size_t FSE_buildDTable_wksp (FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize)
 
size_t FSE_decompress_wksp_bmi2 (void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workSpace, size_t wkspSize, int bmi2)
 
MEM_STATIC void FSE_initCState2 (FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
 
MEM_STATIC U32 FSE_getMaxNbBits (const void *symbolTTPtr, U32 symbolValue)
 
MEM_STATIC U32 FSE_bitCost (const void *symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
 
MEM_STATIC BYTE FSE_peekSymbol (const FSE_DState_t *DStatePtr)
 
MEM_STATIC void FSE_updateState (FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
 
size_t HUF_compressBound (size_t size)
 
unsigned HUF_isError (size_t code)
 
const char * HUF_getErrorName (size_t code)
 
unsigned HUF_minTableLog (unsigned symbolCardinality)
 
unsigned HUF_cardinality (const unsigned *count, unsigned maxSymbolValue)
 
unsigned HUF_optimalTableLog (unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void *workSpace, size_t wkspSize, HUF_CElt *table, const unsigned *count, int flags)
 
size_t HUF_writeCTable_wksp (void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize)
 
size_t HUF_compress4X_usingCTable (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable, int flags)
 
size_t HUF_estimateCompressedSize (const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
 
int HUF_validateCTable (const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
 
size_t HUF_compress4X_repeat (void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int flags)
 
size_t HUF_buildCTable_wksp (HUF_CElt *tree, const unsigned *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
 
size_t HUF_readStats (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize)
 
size_t HUF_readStats_wksp (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t wkspSize, int flags)
 
size_t HUF_readCTable (HUF_CElt *CTable, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *hasZeroWeights)
 
U32 HUF_getNbBitsFromCTable (const HUF_CElt *symbolTable, U32 symbolValue)
 
HUF_CTableHeader HUF_readCTableHeader (HUF_CElt const *ctable)
 
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
 
size_t HUF_compress1X_usingCTable (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable, int flags)
 
size_t HUF_compress1X_repeat (void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int flags)
 
size_t HUF_decompress1X_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags)
 
size_t HUF_decompress1X2_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags)
 
size_t HUF_decompress1X_usingDTable (void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable, int flags)
 
size_t HUF_decompress1X1_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags)
 
size_t HUF_decompress4X_usingDTable (void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable, int flags)
 
size_t HUF_decompress4X_hufOnly_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags)
 
size_t HUF_readDTableX1_wksp (HUF_DTable *DTable, const void *src, size_t srcSize, void *workSpace, size_t wkspSize, int flags)
 
size_t HUF_readDTableX2_wksp (HUF_DTable *DTable, const void *src, size_t srcSize, void *workSpace, size_t wkspSize, int flags)
 
FORCE_INLINE_TEMPLATE size_t FSE_readNCount_body (short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
 
FORCE_INLINE_TEMPLATE size_t HUF_readStats_body (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workSpace, size_t wkspSize, int bmi2)
 
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic (void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt, const unsigned fast)
 
FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body (void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workSpace, size_t wkspSize, int bmi2)
 
ZSTDLIB_API unsigned ZSTD_versionNumber (void)
 
ZSTDLIB_API const char * ZSTD_versionString (void)
 
ZSTDLIB_API size_t ZSTD_compress (void *dst, size_t dstCapacity, const void *src, size_t srcSize, int compressionLevel)
 
ZSTDLIB_API size_t ZSTD_decompress (void *dst, size_t dstCapacity, const void *src, size_t compressedSize)
 
ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize (const void *src, size_t srcSize)
 
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize (const void *src, size_t srcSize)
 
ZSTDLIB_API size_t ZSTD_findFrameCompressedSize (const void *src, size_t srcSize)
 
ZSTDLIB_API size_t ZSTD_compressBound (size_t srcSize)
 
ZSTDLIB_API unsigned ZSTD_isError (size_t result)
 
ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode (size_t functionResult)
 
ZSTDLIB_API const char * ZSTD_getErrorName (size_t result)
 
ZSTDLIB_API int ZSTD_minCLevel (void)
 
ZSTDLIB_API int ZSTD_maxCLevel (void)
 
ZSTDLIB_API int ZSTD_defaultCLevel (void)
 
ZSTDLIB_API ZSTD_CCtxZSTD_createCCtx (void)
 
ZSTDLIB_API size_t ZSTD_freeCCtx (ZSTD_CCtx *cctx)
 
ZSTDLIB_API size_t ZSTD_compressCCtx (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, int compressionLevel)
 
ZSTDLIB_API ZSTD_DCtxZSTD_createDCtx (void)
 
ZSTDLIB_API size_t ZSTD_freeDCtx (ZSTD_DCtx *dctx)
 
ZSTDLIB_API size_t ZSTD_decompressDCtx (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds (ZSTD_cParameter cParam)
 
ZSTDLIB_API size_t ZSTD_CCtx_setParameter (ZSTD_CCtx *cctx, ZSTD_cParameter param, int value)
 
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize (ZSTD_CCtx *cctx, unsigned long long pledgedSrcSize)
 
ZSTDLIB_API size_t ZSTD_CCtx_reset (ZSTD_CCtx *cctx, ZSTD_ResetDirective reset)
 
ZSTDLIB_API size_t ZSTD_compress2 (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds (ZSTD_dParameter dParam)
 
ZSTDLIB_API size_t ZSTD_DCtx_setParameter (ZSTD_DCtx *dctx, ZSTD_dParameter param, int value)
 
ZSTDLIB_API size_t ZSTD_DCtx_reset (ZSTD_DCtx *dctx, ZSTD_ResetDirective reset)
 
ZSTDLIB_API ZSTD_CStreamZSTD_createCStream (void)
 
ZSTDLIB_API size_t ZSTD_freeCStream (ZSTD_CStream *zcs)
 
ZSTDLIB_API size_t ZSTD_compressStream2 (ZSTD_CCtx *cctx, ZSTD_outBuffer *output, ZSTD_inBuffer *input, ZSTD_EndDirective endOp)
 
ZSTDLIB_API size_t ZSTD_CStreamInSize (void)
 
ZSTDLIB_API size_t ZSTD_CStreamOutSize (void)
 
ZSTDLIB_API size_t ZSTD_initCStream (ZSTD_CStream *zcs, int compressionLevel)
 
ZSTDLIB_API size_t ZSTD_compressStream (ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
 
ZSTDLIB_API size_t ZSTD_flushStream (ZSTD_CStream *zcs, ZSTD_outBuffer *output)
 
ZSTDLIB_API size_t ZSTD_endStream (ZSTD_CStream *zcs, ZSTD_outBuffer *output)
 
ZSTDLIB_API ZSTD_DStreamZSTD_createDStream (void)
 
ZSTDLIB_API size_t ZSTD_freeDStream (ZSTD_DStream *zds)
 
ZSTDLIB_API size_t ZSTD_initDStream (ZSTD_DStream *zds)
 
ZSTDLIB_API size_t ZSTD_decompressStream (ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
 
ZSTDLIB_API size_t ZSTD_DStreamInSize (void)
 
ZSTDLIB_API size_t ZSTD_DStreamOutSize (void)
 
ZSTDLIB_API size_t ZSTD_compress_usingDict (ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, int compressionLevel)
 
ZSTDLIB_API size_t ZSTD_decompress_usingDict (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
 
ZSTDLIB_API ZSTD_CDictZSTD_createCDict (const void *dictBuffer, size_t dictSize, int compressionLevel)
 
ZSTDLIB_API size_t ZSTD_freeCDict (ZSTD_CDict *CDict)
 
ZSTDLIB_API size_t ZSTD_compress_usingCDict (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
 
ZSTDLIB_API ZSTD_DDictZSTD_createDDict (const void *dictBuffer, size_t dictSize)
 
ZSTDLIB_API size_t ZSTD_freeDDict (ZSTD_DDict *ddict)
 
ZSTDLIB_API size_t ZSTD_decompress_usingDDict (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
 
ZSTDLIB_API unsigned ZSTD_getDictID_fromDict (const void *dict, size_t dictSize)
 
ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict (const ZSTD_CDict *cdict)
 
ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict (const ZSTD_DDict *ddict)
 
ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame (const void *src, size_t srcSize)
 
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary (ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
 
ZSTDLIB_API size_t ZSTD_CCtx_refCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict)
 
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix (ZSTD_CCtx *cctx, const void *prefix, size_t prefixSize)
 
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary (ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
 
ZSTDLIB_API size_t ZSTD_DCtx_refDDict (ZSTD_DCtx *dctx, const ZSTD_DDict *ddict)
 
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix (ZSTD_DCtx *dctx, const void *prefix, size_t prefixSize)
 
ZSTDLIB_API size_t ZSTD_sizeof_CCtx (const ZSTD_CCtx *cctx)
 
ZSTDLIB_API size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx *dctx)
 
ZSTDLIB_API size_t ZSTD_sizeof_CStream (const ZSTD_CStream *zcs)
 
ZSTDLIB_API size_t ZSTD_sizeof_DStream (const ZSTD_DStream *zds)
 
ZSTDLIB_API size_t ZSTD_sizeof_CDict (const ZSTD_CDict *cdict)
 
ZSTDLIB_API size_t ZSTD_sizeof_DDict (const ZSTD_DDict *ddict)
 
ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize (const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound (const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize (const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader (ZSTD_FrameHeader *zfhPtr, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced (ZSTD_FrameHeader *zfhPtr, const void *src, size_t srcSize, ZSTD_format_e format)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin (const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound (size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_generateSequences (ZSTD_CCtx *zc, ZSTD_Sequence *outSeqs, size_t outSeqsCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters (ZSTD_Sequence *sequences, size_t seqsSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressSequences (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const ZSTD_Sequence *inSeqs, size_t inSeqsSize, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressSequencesAndLiterals (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const ZSTD_Sequence *inSeqs, size_t nbSequences, const void *literals, size_t litSize, size_t litBufCapacity, size_t decompressedSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame (void *dst, size_t dstCapacity, const void *src, size_t srcSize, unsigned magicVariant)
 
ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame (void *dst, size_t dstCapacity, unsigned *magicVariant, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame (const void *buffer, size_t size)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize (int maxCompressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams (ZSTD_compressionParameters cParams)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams (const ZSTD_CCtx_params *params)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize (void)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize (int maxCompressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams (ZSTD_compressionParameters cParams)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams (const ZSTD_CCtx_params *params)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize (size_t maxWindowSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame (const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize (size_t dictSize, int compressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced (size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod)
 
ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize (size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
 
ZSTDLIB_STATIC_API ZSTD_CCtxZSTD_initStaticCCtx (void *workspace, size_t workspaceSize)
 
ZSTDLIB_STATIC_API ZSTD_CStreamZSTD_initStaticCStream (void *workspace, size_t workspaceSize)
 
ZSTDLIB_STATIC_API ZSTD_DCtxZSTD_initStaticDCtx (void *workspace, size_t workspaceSize)
 
ZSTDLIB_STATIC_API ZSTD_DStreamZSTD_initStaticDStream (void *workspace, size_t workspaceSize)
 
ZSTDLIB_STATIC_API const ZSTD_CDictZSTD_initStaticCDict (void *workspace, size_t workspaceSize, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams)
 
ZSTDLIB_STATIC_API const ZSTD_DDictZSTD_initStaticDDict (void *workspace, size_t workspaceSize, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
 
ZSTDLIB_STATIC_API ZSTD_CCtxZSTD_createCCtx_advanced (ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_CStreamZSTD_createCStream_advanced (ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_DCtxZSTD_createDCtx_advanced (ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_DStreamZSTD_createDStream_advanced (ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_CDictZSTD_createCDict_advanced (const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_threadPoolZSTD_createThreadPool (size_t numThreads)
 
ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool *pool)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool (ZSTD_CCtx *cctx, ZSTD_threadPool *pool)
 
ZSTDLIB_STATIC_API ZSTD_CDictZSTD_createCDict_advanced2 (const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params *cctxParams, ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_DDictZSTD_createDDict_advanced (const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem)
 
ZSTDLIB_STATIC_API ZSTD_CDictZSTD_createCDict_byReference (const void *dictBuffer, size_t dictSize, int compressionLevel)
 
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams (int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize)
 
ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams (int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_checkCParams (ZSTD_compressionParameters params)
 
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams (ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams (ZSTD_CCtx *cctx, ZSTD_compressionParameters cparams)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams (ZSTD_CCtx *cctx, ZSTD_frameParameters fparams)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams (ZSTD_CCtx *cctx, ZSTD_parameters params)
 
ZSTDLIB_STATIC_API size_t ZSTD_compress_advanced (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, ZSTD_parameters params)
 
ZSTDLIB_STATIC_API size_t ZSTD_compress_usingCDict_advanced (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict, ZSTD_frameParameters fParams)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference (ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced (ZSTD_CCtx *cctx, const void *prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter (const ZSTD_CCtx *cctx, ZSTD_cParameter param, int *value)
 
ZSTDLIB_STATIC_API ZSTD_CCtx_paramsZSTD_createCCtxParams (void)
 
ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams (ZSTD_CCtx_params *params)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset (ZSTD_CCtx_params *params)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init (ZSTD_CCtx_params *cctxParams, int compressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced (ZSTD_CCtx_params *cctxParams, ZSTD_parameters params)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter (ZSTD_CCtx_params *params, ZSTD_cParameter param, int value)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter (const ZSTD_CCtx_params *params, ZSTD_cParameter param, int *value)
 
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams (ZSTD_CCtx *cctx, const ZSTD_CCtx_params *params)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, size_t *dstPos, const void *src, size_t srcSize, size_t *srcPos, ZSTD_EndDirective endOp)
 
ZSTDLIB_STATIC_API unsigned ZSTD_isFrame (const void *buffer, size_t size)
 
ZSTDLIB_STATIC_API ZSTD_DDictZSTD_createDDict_byReference (const void *dictBuffer, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference (ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced (ZSTD_DCtx *dctx, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
 
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced (ZSTD_DCtx *dctx, const void *prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
 
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize (ZSTD_DCtx *dctx, size_t maxWindowSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter (ZSTD_DCtx *dctx, ZSTD_dParameter param, int *value)
 
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setFormat (ZSTD_DCtx *dctx, ZSTD_format_e format)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, size_t *dstPos, const void *src, size_t srcSize, size_t *srcPos)
 
ZSTDLIB_STATIC_API size_t ZSTD_initCStream_srcSize (ZSTD_CStream *zcs, int compressionLevel, unsigned long long pledgedSrcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingDict (ZSTD_CStream *zcs, const void *dict, size_t dictSize, int compressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_initCStream_advanced (ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict (ZSTD_CStream *zcs, const ZSTD_CDict *cdict)
 
ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict_advanced (ZSTD_CStream *zcs, const ZSTD_CDict *cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_resetCStream (ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
 
ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression (const ZSTD_CCtx *cctx)
 
ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow (ZSTD_CCtx *cctx)
 
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict (ZSTD_DStream *zds, const void *dict, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict (ZSTD_DStream *zds, const ZSTD_DDict *ddict)
 
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream (ZSTD_DStream *zds)
 
ZSTDLIB_STATIC_API void ZSTD_registerSequenceProducer (ZSTD_CCtx *cctx, void *sequenceProducerState, ZSTD_sequenceProducer_F sequenceProducer)
 
ZSTDLIB_STATIC_API void ZSTD_CCtxParams_registerSequenceProducer (ZSTD_CCtx_params *params, void *sequenceProducerState, ZSTD_sequenceProducer_F sequenceProducer)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin (ZSTD_CCtx *cctx, int compressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict)
 
ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx (ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, unsigned long long pledgedSrcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_advanced (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict_advanced (ZSTD_CCtx *const cctx, const ZSTD_CDict *const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min (unsigned long long windowSize, unsigned long long frameContentSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin (ZSTD_DCtx *dctx)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict (ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict (ZSTD_DCtx *dctx, const ZSTD_DDict *ddict)
 
ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress (ZSTD_DCtx *dctx)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API void ZSTD_copyDCtx (ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx)
 
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType (ZSTD_DCtx *dctx)
 
ZSTDLIB_API int ZSTD_isDeterministicBuild (void)
 
ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx *cctx)
 
ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
 
MEM_STATIC void * ZSTD_customMalloc (size_t size, ZSTD_customMem customMem)
 
MEM_STATIC void * ZSTD_customCalloc (size_t size, ZSTD_customMem customMem)
 
MEM_STATIC void ZSTD_customFree (void *ptr, ZSTD_customMem customMem)
 
POOL_ctxPOOL_create (size_t numThreads, size_t queueSize)
 
POOL_ctxPOOL_create_advanced (size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
 
void POOL_free (POOL_ctx *ctx)
 
void POOL_joinJobs (POOL_ctx *ctx)
 
int POOL_resize (POOL_ctx *ctx, size_t numThreads)
 
size_t POOL_sizeof (const POOL_ctx *ctx)
 
void POOL_add (POOL_ctx *ctx, POOL_function function, void *opaque)
 
int POOL_tryAdd (POOL_ctx *ctx, POOL_function function, void *opaque)
 
MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid (void)
 
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void *input, size_t length, XXH32_hash_t seed)
 Calculates the 32-bit hash of input using xxHash32. More...
 
XXH_PUBLIC_API XXH_errorcode XXH32_freeState (XXH32_state_t *statePtr)
 Frees an XXH32_state_t. More...
 
XXH_PUBLIC_API void XXH32_copyState (XXH32_state_t *dst_state, const XXH32_state_t *src_state)
 Copies one XXH32_state_t to another. More...
 
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t *statePtr, XXH32_hash_t seed)
 Resets an XXH32_state_t to begin a new hash. More...
 
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t *statePtr, const void *input, size_t length)
 Consumes a block of input to an XXH32_state_t. More...
 
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t *statePtr)
 Returns the calculated hash value from an XXH32_state_t. More...
 
XXH_PUBLIC_API void XXH32_canonicalFromHash (XXH32_canonical_t *dst, XXH32_hash_t hash)
 Converts an XXH32_hash_t to a big endian XXH32_canonical_t. More...
 
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical (const XXH32_canonical_t *src)
 Converts an XXH32_canonical_t to a native XXH32_hash_t. More...
 
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64 (XXH_NOESCAPE const void *input, size_t length, XXH64_hash_t seed)
 Calculates the 64-bit hash of input using xxHash64. More...
 
XXH_PUBLIC_API XXH_errorcode XXH64_freeState (XXH64_state_t *statePtr)
 Frees an XXH64_state_t. More...
 
XXH_PUBLIC_API void XXH64_copyState (XXH_NOESCAPE XXH64_state_t *dst_state, const XXH64_state_t *src_state)
 Copies one XXH64_state_t to another. More...
 
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t *statePtr, XXH64_hash_t seed)
 Resets an XXH64_state_t to begin a new hash. More...
 
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t *statePtr, XXH_NOESCAPE const void *input, size_t length)
 Consumes a block of input to an XXH64_state_t. More...
 
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t *statePtr)
 Returns the calculated hash value from an XXH64_state_t. More...
 
XXH_PUBLIC_API void XXH64_canonicalFromHash (XXH_NOESCAPE XXH64_canonical_t *dst, XXH64_hash_t hash)
 Converts an XXH64_hash_t to a big endian XXH64_canonical_t. More...
 
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical (XXH_NOESCAPE const XXH64_canonical_t *src)
 Converts an XXH64_canonical_t to a native XXH64_hash_t. More...
 
XXH_FORCE_INLINE xxh_u32 XXH_readLE32 (const void *ptr)
 
XXH_FORCE_INLINE xxh_u32 XXH_readLE32_align (const void *ptr, XXH_alignment align)
 
XXH_FORCE_INLINE XXH_PUREF xxh_u32 XXH32_endian_align (const xxh_u8 *input, size_t len, xxh_u32 seed, XXH_alignment align)
 
XXH_FORCE_INLINE XXH_PUREF xxh_u64 XXH64_endian_align (const xxh_u8 *input, size_t len, xxh_u64 seed, XXH_alignment align)
 
MEM_STATIC FORCE_INLINE_ATTR void ZSTD_wildcopy (void *dst, const void *src, size_t length, ZSTD_overlap_e const ovtype)
 
MEM_STATIC size_t ZSTD_limitCopy (void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
void ZSTD_invalidateRepCodes (ZSTD_CCtx *cctx)
 
size_t ZSTD_getcBlockSize (const void *src, size_t srcSize, blockProperties_t *bpPtr)
 
size_t ZSTD_decodeSeqHeaders (ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
 
MEM_STATIC int ZSTD_cpuSupportsBmi2 (void)
 
size_t HIST_count (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
 
unsigned HIST_isError (size_t code)
 
size_t HIST_count_wksp (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, void *workSpace, size_t workSpaceSize)
 
size_t HIST_countFast (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
 
size_t HIST_countFast_wksp (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, void *workSpace, size_t workSpaceSize)
 
unsigned HIST_count_simple (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
 
void HIST_add (unsigned *count, const void *src, size_t srcSize)
 
MEM_STATIC int HUF_isSorted (nodeElt huffNode[], U32 const maxSymbolValue1)
 
HINT_INLINE void HUF_insertionSort (nodeElt huffNode[], int const low, int const high)
 
FORCE_INLINE_TEMPLATE void HUF_addBits (HUF_CStream_t *bitC, HUF_CElt elt, int idx, int kFast)
 
FORCE_INLINE_TEMPLATE void HUF_zeroIndex1 (HUF_CStream_t *bitC)
 
FORCE_INLINE_TEMPLATE void HUF_mergeIndex1 (HUF_CStream_t *bitC)
 
FORCE_INLINE_TEMPLATE void HUF_flushBits (HUF_CStream_t *bitC, int kFast)
 
FORCE_INLINE_TEMPLATE void HUF_encodeSymbol (HUF_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable, int idx, int fast)
 
FORCE_INLINE_TEMPLATE void HUF_compress1X_usingCTable_internal_body_loop (HUF_CStream_t *bitC, const BYTE *ip, size_t srcSize, const HUF_CElt *ct, int kUnroll, int kFastFlush, int kLastFast)
 
FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
 
MEM_STATIC size_t ZSTD_cwksp_available_space (ZSTD_cwksp *ws)
 
MEM_STATIC void * ZSTD_cwksp_initialAllocStart (ZSTD_cwksp *ws)
 
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency (ZSTD_cwksp *ws)
 
MEM_STATIC size_t ZSTD_cwksp_align (size_t size, size_t align)
 
MEM_STATIC size_t ZSTD_cwksp_alloc_size (size_t size)
 
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size (size_t size, size_t alignment)
 
MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size (size_t size)
 
MEM_STATIC size_t ZSTD_cwksp_slack_space_required (void)
 
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr (void *ptr, const size_t alignBytes)
 
MEM_STATIC void * ZSTD_cwksp_reserve_internal_buffer_space (ZSTD_cwksp *ws, size_t const bytes)
 
MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase (ZSTD_cwksp *ws, ZSTD_cwksp_alloc_phase_e phase)
 
MEM_STATIC int ZSTD_cwksp_owns_buffer (const ZSTD_cwksp *ws, const void *ptr)
 
MEM_STATIC void * ZSTD_cwksp_reserve_internal (ZSTD_cwksp *ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
 
MEM_STATIC BYTE * ZSTD_cwksp_reserve_buffer (ZSTD_cwksp *ws, size_t bytes)
 
MEM_STATIC void * ZSTD_cwksp_reserve_aligned_init_once (ZSTD_cwksp *ws, size_t bytes)
 
MEM_STATIC void * ZSTD_cwksp_reserve_aligned64 (ZSTD_cwksp *ws, size_t bytes)
 
MEM_STATIC void * ZSTD_cwksp_reserve_table (ZSTD_cwksp *ws, size_t bytes)
 
MEM_STATIC void * ZSTD_cwksp_reserve_object (ZSTD_cwksp *ws, size_t bytes)
 
MEM_STATIC void * ZSTD_cwksp_reserve_object_aligned (ZSTD_cwksp *ws, size_t byteSize, size_t alignment)
 
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty (ZSTD_cwksp *ws)
 
MEM_STATIC void ZSTD_cwksp_mark_tables_clean (ZSTD_cwksp *ws)
 
MEM_STATIC void ZSTD_cwksp_clean_tables (ZSTD_cwksp *ws)
 
MEM_STATIC void ZSTD_cwksp_clear_tables (ZSTD_cwksp *ws)
 
MEM_STATIC void ZSTD_cwksp_clear (ZSTD_cwksp *ws)
 
MEM_STATIC size_t ZSTD_cwksp_sizeof (const ZSTD_cwksp *ws)
 
MEM_STATIC size_t ZSTD_cwksp_used (const ZSTD_cwksp *ws)
 
MEM_STATIC void ZSTD_cwksp_init (ZSTD_cwksp *ws, void *start, size_t size, ZSTD_cwksp_static_alloc_e isStatic)
 
MEM_STATIC size_t ZSTD_cwksp_create (ZSTD_cwksp *ws, size_t size, ZSTD_customMem customMem)
 
MEM_STATIC void ZSTD_cwksp_free (ZSTD_cwksp *ws, ZSTD_customMem customMem)
 
MEM_STATIC void ZSTD_cwksp_move (ZSTD_cwksp *dst, ZSTD_cwksp *src)
 
MEM_STATIC int ZSTD_cwksp_reserve_failed (const ZSTD_cwksp *ws)
 
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds (const ZSTD_cwksp *const ws, size_t const estimatedSpace)
 
MEM_STATIC int ZSTD_cwksp_check_available (ZSTD_cwksp *ws, size_t additionalNeededSpace)
 
MEM_STATIC int ZSTD_cwksp_check_too_large (ZSTD_cwksp *ws, size_t additionalNeededSpace)
 
MEM_STATIC int ZSTD_cwksp_check_wasteful (ZSTD_cwksp *ws, size_t additionalNeededSpace)
 
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration (ZSTD_cwksp *ws, size_t additionalNeededSpace)
 
ZSTDMT_CCtxZSTDMT_createCCtx_advanced (unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool *pool)
 
size_t ZSTDMT_freeCCtx (ZSTDMT_CCtx *mtctx)
 
size_t ZSTDMT_sizeof_CCtx (ZSTDMT_CCtx *mtctx)
 
size_t ZSTDMT_nextInputSizeHint (const ZSTDMT_CCtx *mtctx)
 
size_t ZSTDMT_initCStream_internal (ZSTDMT_CCtx *mtctx, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict *cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
 
size_t ZSTDMT_compressStream_generic (ZSTDMT_CCtx *mtctx, ZSTD_outBuffer *output, ZSTD_inBuffer *input, ZSTD_EndDirective endOp)
 
size_t ZSTDMT_toFlushNow (ZSTDMT_CCtx *mtctx)
 
void ZSTDMT_updateCParams_whileCompressing (ZSTDMT_CCtx *mtctx, const ZSTD_CCtx_params *cctxParams)
 
ZSTD_frameProgression ZSTDMT_getFrameProgression (ZSTDMT_CCtx *mtctx)
 
size_t ZSTD_splitBlock (const void *blockStart, size_t blockSize, int level, void *workspace, size_t wkspSize)
 
MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength (SeqStore_t const *seqStore, SeqDef const *seq)
 
const SeqStore_tZSTD_getSeqStore (const ZSTD_CCtx *ctx)
 
int ZSTD_seqToCodes (const SeqStore_t *seqStorePtr)
 
size_t ZSTD_buildBlockEntropyStats (const SeqStore_t *seqStorePtr, const ZSTD_entropyCTables_t *prevEntropy, ZSTD_entropyCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, ZSTD_entropyCTablesMetadata_t *entropyMetadata, void *workspace, size_t wkspSize)
 
ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor (ZSTD_strategy strat, ZSTD_ParamSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode)
 
MEM_STATIC U32 ZSTD_LLcode (U32 litLength)
 
MEM_STATIC U32 ZSTD_MLcode (U32 mlBase)
 
MEM_STATIC int ZSTD_cParam_withinBounds (ZSTD_cParameter cParam, int value)
 
MEM_STATIC const BYTE * ZSTD_selectAddr (U32 index, U32 lowLimit, const BYTE *candidate, const BYTE *backup)
 
MEM_STATIC size_t ZSTD_noCompressBlock (void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastBlock)
 
MEM_STATIC size_t ZSTD_rleCompressBlock (void *dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
 
MEM_STATIC size_t ZSTD_minGain (size_t srcSize, ZSTD_strategy strat)
 
MEM_STATIC int ZSTD_literalsCompressionIsDisabled (const ZSTD_CCtx_params *cctxParams)
 
HINT_INLINE UNUSED_ATTR void ZSTD_storeSeqOnly (SeqStore_t *seqStorePtr, size_t litLength, U32 offBase, size_t matchLength)
 
HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq (SeqStore_t *seqStorePtr, size_t litLength, const BYTE *literals, const BYTE *litLimit, U32 offBase, size_t matchLength)
 
MEM_STATIC void ZSTD_updateRep (U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
 
MEM_STATIC Repcodes_t ZSTD_newRep (U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
 
MEM_STATIC size_t ZSTD_count (const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
 
MEM_STATIC size_t ZSTD_count_2segments (const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
 
MEM_STATIC size_t ZSTD_hash3Ptr (const void *ptr, U32 h)
 
MEM_STATIC size_t ZSTD_hash3PtrS (const void *ptr, U32 h, U32 s)
 
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr (const void *p, U32 hBits, U32 mls)
 
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtrSalted (const void *p, U32 hBits, U32 mls, const U64 hashSalt)
 
MEM_STATIC U64 ZSTD_rollingHash_compute (void const *buf, size_t size)
 
MEM_STATIC U64 ZSTD_rollingHash_primePower (U32 length)
 
MEM_STATIC U64 ZSTD_rollingHash_rotate (U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
 
MEM_STATIC void ZSTD_window_clear (ZSTD_window_t *window)
 
MEM_STATIC U32 ZSTD_window_isEmpty (ZSTD_window_t const window)
 
MEM_STATIC U32 ZSTD_window_hasExtDict (ZSTD_window_t const window)
 
MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode (const ZSTD_MatchState_t *ms)
 
MEM_STATIC U32 ZSTD_window_canOverflowCorrect (ZSTD_window_t const window, U32 cycleLog, U32 maxDist, U32 loadedDictEnd, void const *src)
 
MEM_STATIC U32 ZSTD_window_needOverflowCorrection (ZSTD_window_t const window, U32 cycleLog, U32 maxDist, U32 loadedDictEnd, void const *src, void const *srcEnd)
 
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_window_correctOverflow (ZSTD_window_t *window, U32 cycleLog, U32 maxDist, void const *src)
 
MEM_STATIC void ZSTD_window_enforceMaxDist (ZSTD_window_t *window, const void *blockEnd, U32 maxDist, U32 *loadedDictEndPtr, const ZSTD_MatchState_t **dictMatchStatePtr)
 
MEM_STATIC void ZSTD_checkDictValidity (const ZSTD_window_t *window, const void *blockEnd, U32 maxDist, U32 *loadedDictEndPtr, const ZSTD_MatchState_t **dictMatchStatePtr)
 
MEM_STATIC void ZSTD_window_init (ZSTD_window_t *window)
 
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_window_update (ZSTD_window_t *window, const void *src, size_t srcSize, int forceNonContiguous)
 
MEM_STATIC U32 ZSTD_getLowestMatchIndex (const ZSTD_MatchState_t *ms, U32 curr, unsigned windowLog)
 
MEM_STATIC U32 ZSTD_getLowestPrefixIndex (const ZSTD_MatchState_t *ms, U32 curr, unsigned windowLog)
 
MEM_STATIC int ZSTD_index_overlap_check (const U32 prefixLowestIndex, const U32 repIndex)
 
MEM_STATIC void ZSTD_writeTaggedIndex (U32 *const hashTable, size_t hashAndTag, U32 index)
 
MEM_STATIC int ZSTD_comparePackedTags (size_t packedTag1, size_t packedTag2)
 
size_t ZSTD_loadCEntropy (ZSTD_compressedBlockState_t *bs, void *workspace, const void *const dict, size_t dictSize)
 
void ZSTD_reset_compressedBlockState (ZSTD_compressedBlockState_t *bs)
 
size_t ZSTD_convertBlockSequences (ZSTD_CCtx *cctx, const ZSTD_Sequence *const inSeqs, size_t nbSequences, int repcodeResolution)
 
BlockSummary ZSTD_get1BlockSummary (const ZSTD_Sequence *seqs, size_t nbSeqs)
 
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams (const ZSTD_CCtx_params *CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
 
size_t ZSTD_initCStream_internal (ZSTD_CStream *zcs, const void *dict, size_t dictSize, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, unsigned long long pledgedSrcSize)
 
void ZSTD_resetSeqStore (SeqStore_t *ssPtr)
 
ZSTD_compressionParameters ZSTD_getCParamsFromCDict (const ZSTD_CDict *cdict)
 
size_t ZSTD_compressBegin_advanced_internal (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, unsigned long long pledgedSrcSize)
 
size_t ZSTD_compress_advanced_internal (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, const ZSTD_CCtx_params *params)
 
size_t ZSTD_writeLastEmptyBlock (void *dst, size_t dstCapacity)
 
void ZSTD_referenceExternalSequences (ZSTD_CCtx *cctx, rawSeq *seq, size_t nbSeq)
 
U32 ZSTD_cycleLog (U32 hashLog, ZSTD_strategy strat)
 
void ZSTD_CCtx_trace (ZSTD_CCtx *cctx, size_t extraCSize)
 
MEM_STATIC int ZSTD_hasExtSeqProd (const ZSTD_CCtx_params *params)
 
size_t ZSTD_compressBegin_usingCDict_deprecated (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict)
 
size_t ZSTD_compressContinue_public (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
size_t ZSTD_compressEnd_public (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
size_t ZSTD_compressBlock_deprecated (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
size_t ZSTD_noCompressLiterals (void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
size_t ZSTD_compressRleLiteralsBlock (void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
size_t ZSTD_compressLiterals (void *dst, size_t dstCapacity, const void *src, size_t srcSize, void *entropyWorkspace, size_t entropyWorkspaceSize, const ZSTD_hufCTables_t *prevHuf, ZSTD_hufCTables_t *nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, int suspectUncompressible, int bmi2)
 
SymbolEncodingType_e ZSTD_selectEncodingType (FSE_repeat *repeatMode, unsigned const *count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const *prevCTable, short const *defaultNorm, U32 defaultNormLog, ZSTD_DefaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy)
 
size_t ZSTD_buildCTable (void *dst, size_t dstCapacity, FSE_CTable *nextCTable, U32 FSELog, SymbolEncodingType_e type, unsigned *count, U32 max, const BYTE *codeTable, size_t nbSeq, const S16 *defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable *prevCTable, size_t prevCTableSize, void *entropyWorkspace, size_t entropyWorkspaceSize)
 
size_t ZSTD_encodeSequences (void *dst, size_t dstCapacity, FSE_CTable const *CTable_MatchLength, BYTE const *mlCodeTable, FSE_CTable const *CTable_OffsetBits, BYTE const *ofCodeTable, FSE_CTable const *CTable_LitLength, BYTE const *llCodeTable, SeqDef const *sequences, size_t nbSeq, int longOffsets, int bmi2)
 
size_t ZSTD_fseBitCost (FSE_CTable const *ctable, unsigned const *count, unsigned const max)
 
size_t ZSTD_crossEntropyCost (short const *norm, unsigned accuracyLog, unsigned const *count, unsigned const max)
 
FORCE_INLINE_TEMPLATE size_t ZSTD_encodeSequences_body (void *dst, size_t dstCapacity, FSE_CTable const *CTable_MatchLength, BYTE const *mlCodeTable, FSE_CTable const *CTable_OffsetBits, BYTE const *ofCodeTable, FSE_CTable const *CTable_LitLength, BYTE const *llCodeTable, SeqDef const *sequences, size_t nbSeq, int longOffsets)
 
size_t ZSTD_compressSuperBlock (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, void const *src, size_t srcSize, unsigned lastBlock)
 
FORCE_INLINE_TEMPLATE unsigned hash2 (const void *p, unsigned hashLog)
 
FORCE_INLINE_TEMPLATE void addEvents_generic (Fingerprint *fp, const void *src, size_t srcSize, size_t samplingRate, unsigned hashLog)
 
FORCE_INLINE_TEMPLATE void recordFingerprint_generic (Fingerprint *fp, const void *src, size_t srcSize, size_t samplingRate, unsigned hashLog)
 
void ZSTD_fillHashTable (ZSTD_MatchState_t *ms, void const *end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp)
 
size_t ZSTD_compressBlock_fast (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_fast_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_fast_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
void ZSTD_fillDoubleHashTable (ZSTD_MatchState_t *ms, void const *end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp)
 
size_t ZSTD_compressBlock_doubleFast (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_doubleFast_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_doubleFast_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
U32 ZSTD_insertAndFindFirstIndex (ZSTD_MatchState_t *ms, const BYTE *ip)
 
void ZSTD_row_update (ZSTD_MatchState_t *const ms, const BYTE *ip)
 
void ZSTD_dedicatedDictSearch_lazy_loadDictionary (ZSTD_MatchState_t *ms, const BYTE *const ip)
 
void ZSTD_preserveUnsortedMark (U32 *const table, U32 const size, U32 const reducerValue)
 
size_t ZSTD_compressBlock_greedy (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_dictMatchState_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_greedy_extDict_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_dictMatchState_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy_extDict_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2 (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_dictMatchState_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_lazy2_extDict_row (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btlazy2 (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btlazy2_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btlazy2_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
void ZSTD_updateTree (ZSTD_MatchState_t *ms, const BYTE *ip, const BYTE *iend)
 
size_t ZSTD_compressBlock_btopt (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btopt_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btopt_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btultra (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btultra_dictMatchState (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btultra_extDict (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
size_t ZSTD_compressBlock_btultra2 (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
 
void ZSTD_ldm_fillHashTable (ldmState_t *state, const BYTE *ip, const BYTE *iend, ldmParams_t const *params)
 
size_t ZSTD_ldm_generateSequences (ldmState_t *ldms, RawSeqStore_t *sequences, ldmParams_t const *params, void const *src, size_t srcSize)
 
size_t ZSTD_ldm_blockCompress (RawSeqStore_t *rawSeqStore, ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_ParamSwitch_e useRowMatchFinder, void const *src, size_t srcSize)
 
void ZSTD_ldm_skipSequences (RawSeqStore_t *rawSeqStore, size_t srcSize, U32 const minMatch)
 
void ZSTD_ldm_skipRawSeqStoreBytes (RawSeqStore_t *rawSeqStore, size_t nbBytes)
 
size_t ZSTD_ldm_getTableSize (ldmParams_t params)
 
size_t ZSTD_ldm_getMaxNbSeq (ldmParams_t params, size_t maxChunkSize)
 
void ZSTD_ldm_adjustParameters (ldmParams_t *params, ZSTD_compressionParameters const *cParams)
 
FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal (U32 *const table, U32 const size, U32 const reducerValue, int const preserveMark)
 
MEM_STATIC size_t ZSTD_entropyCompressSeqStore_internal (void *dst, size_t dstCapacity, const void *literals, size_t litSize, const SeqStore_t *seqStorePtr, const ZSTD_entropyCTables_t *prevEntropy, ZSTD_entropyCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, void *entropyWorkspace, size_t entropyWkspSize, const int bmi2)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_doubleFast_noDict_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_noDict_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, int useCmov)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_dictMatchState_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, U32 const hasStep)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_BtFindBestMatch (ZSTD_MatchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offBasePtr, const U32 mls, const ZSTD_dictMode_e dictMode)
 
FORCE_INLINE_TEMPLATE size_t ZSTD_dedicatedDictSearch_lazy_search (size_t *offsetPtr, size_t ml, U32 nbAttempts, const ZSTD_MatchState_t *const dms, const BYTE *const ip, const BYTE *const iLimit, const BYTE *const prefixStart, const U32 curr, const U32 dictLimit, const size_t ddsIdx)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_insertAndFindFirstIndex_internal (ZSTD_MatchState_t *ms, const ZSTD_compressionParameters *const cParams, const BYTE *ip, U32 const mls, U32 const lazySkipping)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_HcFindBestMatch (ZSTD_MatchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode)
 
MEM_STATIC U32 ZSTD_VecMask_next (ZSTD_VecMask val)
 
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex (BYTE *const tagRow, U32 const rowMask)
 
MEM_STATIC int ZSTD_isAligned (void const *ptr, size_t align)
 
FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch (U32 const *hashTable, BYTE const *tagTable, U32 const relRow, U32 const rowLog)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_row_fillHashCache (ZSTD_MatchState_t *ms, const BYTE *base, U32 const rowLog, U32 const mls, U32 idx, const BYTE *const iLimit)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_row_nextCachedHash (U32 *cache, U32 const *hashTable, BYTE const *tagTable, BYTE const *base, U32 idx, U32 const hashLog, U32 const rowLog, U32 const mls, U64 const hashSalt)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_row_update_internalImpl (ZSTD_MatchState_t *ms, U32 updateStartIdx, U32 const updateEndIdx, U32 const mls, U32 const rowLog, U32 const rowMask, U32 const useCache)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_row_update_internal (ZSTD_MatchState_t *ms, const BYTE *ip, U32 const mls, U32 const rowLog, U32 const rowMask, U32 const useCache)
 
FORCE_INLINE_TEMPLATE U32 ZSTD_row_matchMaskGroupWidth (const U32 rowEntries)
 
FORCE_INLINE_TEMPLATE ZSTD_VecMask ZSTD_row_getMatchMask (const BYTE *const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_RowFindBestMatch (ZSTD_MatchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode, const U32 rowLog)
 
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax (ZSTD_MatchState_t *ms, const BYTE *ip, const BYTE *iend, size_t *offsetPtr, U32 const mls, U32 const rowLog, searchMethod_e const searchMethod, ZSTD_dictMode_e const dictMode)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_lazy_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth, ZSTD_dictMode_e const dictMode)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_lazy_extDict_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth)
 
MEM_STATIC U32 ZSTD_bitWeight (U32 stat)
 
MEM_STATIC U32 ZSTD_fracWeight (U32 rawStat)
 
FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice (U32 const offBase, U32 const matchLength, const optState_t *const optPtr, int const optLevel)
 
MEM_STATIC U32 ZSTD_readMINMATCH (const void *memPtr, U32 length)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_updateTree_internal (ZSTD_MatchState_t *ms, const BYTE *const ip, const BYTE *const iend, const U32 mls, const ZSTD_dictMode_e dictMode)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_insertBtAndGetAllMatches (ZSTD_match_t *matches, ZSTD_MatchState_t *ms, U32 *nextToUpdate3, const BYTE *const ip, const BYTE *const iLimit, const ZSTD_dictMode_e dictMode, const U32 rep[ZSTD_REP_NUM], const U32 ll0, const U32 lengthToBeat, const U32 mls)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_btGetAllMatches_internal (ZSTD_match_t *matches, ZSTD_MatchState_t *ms, U32 *nextToUpdate3, const BYTE *ip, const BYTE *const iHighLimit, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat, const ZSTD_dictMode_e dictMode, const U32 mls)
 
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_opt_generic (ZSTD_MatchState_t *ms, SeqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const int optLevel, const ZSTD_dictMode_e dictMode)
 
MEM_STATIC ZSTDMT_CCtxZSTDMT_createCCtx_advanced_internal (unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool *pool)
 
FORCE_INLINE_TEMPLATE BYTE HUF_decodeSymbolX1 (BIT_DStream_t *Dstream, const HUF_DEltX1 *dt, const U32 dtLog)
 
HINT_INLINE size_t HUF_decodeStreamX1 (BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX1 *const dt, const U32 dtLog)
 
FORCE_INLINE_TEMPLATE size_t HUF_decompress1X1_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
 
FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
 
FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2 (void *op, BIT_DStream_t *DStream, const HUF_DEltX2 *dt, const U32 dtLog)
 
FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2 (void *op, BIT_DStream_t *DStream, const HUF_DEltX2 *dt, const U32 dtLog)
 
HINT_INLINE size_t HUF_decodeStreamX2 (BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
 
FORCE_INLINE_TEMPLATE size_t HUF_decompress1X2_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
 
FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
 
MEM_STATIC int ZSTD_DCtx_get_bmi2 (const struct ZSTD_DCtx_s *dctx)
 
size_t ZSTD_loadDEntropy (ZSTD_entropyDTables_t *entropy, const void *const dict, size_t const dictSize)
 
void ZSTD_checkContinuity (ZSTD_DCtx *dctx, const void *dst, size_t dstSize)
 
const void * ZSTD_DDict_dictContent (const ZSTD_DDict *ddict)
 
size_t ZSTD_DDict_dictSize (const ZSTD_DDict *ddict)
 
void ZSTD_copyDDictParameters (ZSTD_DCtx *dctx, const ZSTD_DDict *ddict)
 
size_t ZSTD_decompressBlock_internal (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const streaming_operation streaming)
 
void ZSTD_buildFSETable (ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U8 *nbAdditionalBits, unsigned tableLog, void *wksp, size_t wkspSize, int bmi2)
 
size_t ZSTD_decompressBlock_deprecated (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
 
size_t ZSTD_decodeLiteralsBlock_wrapper (ZSTD_DCtx *dctx, const void *src, size_t srcSize, void *dst, size_t dstCapacity)
 
FORCE_INLINE_TEMPLATE void ZSTD_buildFSETable_body (ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U8 *nbAdditionalBits, unsigned tableLog, void *wksp, size_t wkspSize)
 
HINT_INLINE void ZSTD_overlapCopy8 (BYTE **op, BYTE const **ip, size_t offset)
 
FORCE_NOINLINE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequenceEnd (BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
 
FORCE_NOINLINE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequenceEndSplitLitBuffer (BYTE *op, BYTE *const oend, const BYTE *const oend_w, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
 
HINT_INLINE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequence (BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
 
HINT_INLINE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequenceSplitLitBuffer (BYTE *op, BYTE *const oend, const BYTE *const oend_w, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd)
 
FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo (ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, U16 nextState, U32 nbBits)
 
FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence (seqState_t *seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq)
 
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_bodySplitLitBuffer (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset)
 
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset)
 
FORCE_INLINE_TEMPLATE size_t ZSTD_prefetchMatch (size_t prefetchPos, seq_t const sequence, const BYTE *const prefixStart, const BYTE *const dictEnd)
 
FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset)
 
ZDICTLIB_API size_t ZDICT_trainFromBuffer (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples)
 
ZDICTLIB_API size_t ZDICT_finalizeDictionary (void *dstDictBuffer, size_t maxDictSize, const void *dictContent, size_t dictContentSize, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_params_t parameters)
 
ZDICTLIB_API unsigned ZDICT_getDictID (const void *dictBuffer, size_t dictSize)
 
ZDICTLIB_API size_t ZDICT_getDictHeaderSize (const void *dictBuffer, size_t dictSize)
 
ZDICTLIB_API unsigned ZDICT_isError (size_t errorCode)
 
ZDICTLIB_API const char * ZDICT_getErrorName (size_t errorCode)
 
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters)
 
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t *parameters)
 
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters)
 
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t *parameters)
 
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters)
 
ZDICTLIB_STATIC_API size_t ZDICT_addEntropyTablesFromBuffer (void *dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples)
 
COVER_epoch_info_t COVER_computeEpochs (U32 maxDictSize, U32 nbDmers, U32 k, U32 passes)
 
void COVER_warnOnSmallCorpus (size_t maxDictSize, size_t nbDmers, int displayLevel)
 
size_t COVER_checkTotalCompressedSize (const ZDICT_cover_params_t parameters, const size_t *samplesSizes, const BYTE *samples, size_t *offsets, size_t nbTrainSamples, size_t nbSamples, BYTE *const dict, size_t dictBufferCapacity)
 
size_t COVER_sum (const size_t *samplesSizes, unsigned nbSamples)
 
void COVER_best_init (COVER_best_t *best)
 
void COVER_best_wait (COVER_best_t *best)
 
void COVER_best_destroy (COVER_best_t *best)
 
void COVER_best_start (COVER_best_t *best)
 
void COVER_best_finish (COVER_best_t *best, ZDICT_cover_params_t parameters, COVER_dictSelection_t selection)
 
unsigned COVER_dictSelectionIsError (COVER_dictSelection_t selection)
 
COVER_dictSelection_t COVER_dictSelectionError (size_t error)
 
void COVER_dictSelectionFree (COVER_dictSelection_t selection)
 
COVER_dictSelection_t COVER_selectDict (BYTE *customDictContent, size_t dictBufferCapacity, size_t dictContentSize, const BYTE *samplesBuffer, const size_t *samplesSizes, unsigned nbFinalizeSamples, size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t *offsets, size_t totalCompressedSize)
 
int divsufsort (const unsigned char *T, int *SA, int n, int openMP)
 
int divbwt (const unsigned char *T, unsigned char *U, int *A, int n, unsigned char *num_indexes, int *indexes, int openMP)
 

Variables

int g_debuglevel = DEBUGLEVEL
 
int g_ZSTD_threading_useless_symbol
 
 XXH_errorcode
 
typedef XXH64_hash_t xxh_u64
 
XXH_FORCE_INLINE xxh_u64 XXH_readLE64 (const void *ptr)
 
XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align (const void *ptr, XXH_alignment align)
 

Detailed Description

Single-file Zstandard library.

Generate using:

python combine.py -r ../../lib -x legacy/zstd_legacy.h -o zstd.c zstd-in.c

Macro Definition Documentation

◆ __has_attribute

#define __has_attribute (   x)    0

This header file contains macro definitions to support portability. This header is shared between C and ASM code, so it MUST only contain macro definitions. It MUST not contain any C code.

This header ONLY defines macros to detect platforms/feature support.

◆ _FORCE_HAS_FORMAT_STRING

#define _FORCE_HAS_FORMAT_STRING (   ...)
Value:
do { \
if (0) { \
_force_has_format_string(__VA_ARGS__); \
} \
} while (0)

Ignore: this is an internal helper.

We want to force this function invocation to be syntactically correct, but we don't want to force runtime evaluation of its arguments.

◆ BOUNDCHECK

#define BOUNDCHECK (   cParam,
  val 
)
Value:
do { \
RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
parameter_outOfBound, "Param out of bounds"); \
} while (0)

◆ CHECK_DBOUNDS

#define CHECK_DBOUNDS (   p,
 
)
Value:
{ \
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
}

◆ CHECK_V_F

#define CHECK_V_F (   e,
 
)
Value:
size_t const e = f; \
do { \
if (ERR_isError(e)) \
return e; \
} while (0)

◆ CLAMP_TYPE

#define CLAMP_TYPE (   cParam,
  val,
  type 
)
Value:
do { \
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
} while (0)
ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam)
Definition: zstd.c:23411

◆ COVER_MAX_SAMPLES_SIZE

#define COVER_MAX_SAMPLES_SIZE   (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))

There are 32bit indexes used to ref samples, so limit samples size to 4GB on 64bit builds. For 32bit builds we choose 1 GB. Most 32bit platforms have 2GB user-mode addressable space and we allocate a large contiguous buffer, so 1GB is already a high limit.

◆ DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT

#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT
Value:
3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
* Currently, that means a 0.75 load factor.
* So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
* the load factor of the ddict hash set.
*/

◆ DISPLAY [1/2]

#define DISPLAY (   ...)
Value:
{ \
fprintf(stderr, __VA_ARGS__); \
fflush(stderr); \
}

◆ DISPLAY [2/2]

#define DISPLAY (   ...)
Value:
{ \
fprintf(stderr, __VA_ARGS__); \
fflush(stderr); \
}

◆ DISPLAYLEVEL [1/2]

#define DISPLAYLEVEL (   l,
  ... 
)
Value:
if (displayLevel >= l) { \
DISPLAY(__VA_ARGS__); \
} /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */

◆ DISPLAYLEVEL [2/2]

#define DISPLAYLEVEL (   l,
  ... 
)
Value:
if (displayLevel >= l) { \
DISPLAY(__VA_ARGS__); \
} /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */

◆ DISPLAYUPDATE [1/3]

#define DISPLAYUPDATE (   l,
  ... 
)
Value:
do { \
if (notificationLevel>=l) { \
if (ZDICT_clockSpan(displayClock) > refreshRate) { \
displayClock = clock(); \
DISPLAY(__VA_ARGS__); \
} \
if (notificationLevel>=4) fflush(stderr); \
} \
} while (0)

◆ DISPLAYUPDATE [2/3]

#define DISPLAYUPDATE (   lastUpdateTime,
  l,
  ... 
)
Value:
if (displayLevel >= l) { \
const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; \
if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) { \
lastUpdateTime = clock(); \
DISPLAY(__VA_ARGS__); \
} \
}

◆ DISPLAYUPDATE [3/3]

#define DISPLAYUPDATE (   lastUpdateTime,
  l,
  ... 
)
Value:
if (displayLevel >= l) { \
const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; \
if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) { \
lastUpdateTime = clock(); \
DISPLAY(__VA_ARGS__); \
} \
}

◆ FASTCOVER_MAX_SAMPLES_SIZE

#define FASTCOVER_MAX_SAMPLES_SIZE   (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))

There are 32bit indexes used to ref samples, so limit samples size to 4GB on 64bit builds. For 32bit builds we choose 1 GB. Most 32bit platforms have 2GB user-mode addressable space and we allocate a large contiguous buffer, so 1GB is already a high limit.

◆ FORCE_INLINE_TEMPLATE

#define FORCE_INLINE_TEMPLATE   static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR

FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant parameters. They must be inlined for the compiler to eliminate the constant branches.

◆ FORWARD_IF_ERROR

#define FORWARD_IF_ERROR (   err,
  ... 
)
Value:
do { \
size_t const err_code = (err); \
if (ERR_isError(err_code)) { \
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
__FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return err_code; \
} \
} while(0)

If the provided expression evaluates to an error code, returns that error code.

In debug modes, prints additional information.

◆ FSE_H_FSE_STATIC_LINKING_ONLY

#define FSE_H_FSE_STATIC_LINKING_ONLY

Tutorial :

(Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead )

The first step is to obtain the normalized frequencies of symbols. This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSE_isError().

The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. This is performed by the function FSE_buildDTable(). The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). If there is an error, the function will return an error code, which can be tested using FSE_isError().

FSE_DTable can then be used to decompress cSrc, with FSE_decompress_usingDTable(). cSrcSize must be strictly correct, otherwise decompression will fail. FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=dstCapacity). If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)

◆ FSE_MAX_MEMORY_USAGE

#define FSE_MAX_MEMORY_USAGE   14

MEMORY_USAGE : Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) Increasing memory usage improves compression ratio Reduced memory usage can improve speed, due to cache effect Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache

◆ FSE_MAX_SYMBOL_VALUE

#define FSE_MAX_SYMBOL_VALUE   255

FSE_MAX_SYMBOL_VALUE : Maximum symbol value authorized. Required for proper stack allocation

◆ GEAR_ITER_ONCE [1/2]

#define GEAR_ITER_ONCE ( )
Value:
do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
} while (0)

◆ GEAR_ITER_ONCE [2/2]

#define GEAR_ITER_ONCE ( )
Value:
do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
if (UNLIKELY((hash & mask) == 0)) { \
splits[*numSplits] = n; \
*numSplits += 1; \
if (*numSplits == LDM_BATCH_SIZE) \
goto done; \
} \
} while (0)

◆ GEN_ZSTD_BT_GET_ALL_MATCHES

#define GEN_ZSTD_BT_GET_ALL_MATCHES (   dictMode)
Value:
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)

◆ GEN_ZSTD_BT_GET_ALL_MATCHES_

#define GEN_ZSTD_BT_GET_ALL_MATCHES_ (   dictMode,
  mls 
)
Value:
static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
ZSTD_match_t* matches, \
ZSTD_MatchState_t* ms, \
U32* nextToUpdate3, \
const BYTE* ip, \
const BYTE* const iHighLimit, \
const U32 rep[ZSTD_REP_NUM], \
U32 const ll0, \
U32 const lengthToBeat) \
{ \
return ZSTD_btGetAllMatches_internal( \
matches, ms, nextToUpdate3, ip, iHighLimit, \
rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
}

◆ GEN_ZSTD_BT_SEARCH_FN

#define GEN_ZSTD_BT_SEARCH_FN (   dictMode,
  mls 
)
Value:
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offBasePtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
} \
#define ZSTD_BT_SEARCH_FN(dictMode, mls)
Definition: zstd.c:34098
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_BtFindBestMatch(ZSTD_MatchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offBasePtr, const U32 mls, const ZSTD_dictMode_e dictMode)
Definition: zstd.c:33134

◆ GEN_ZSTD_CALL_BT_SEARCH_FN

#define GEN_ZSTD_CALL_BT_SEARCH_FN (   dictMode,
  mls 
)
Value:
case mls: \
return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);

◆ GEN_ZSTD_CALL_HC_SEARCH_FN

#define GEN_ZSTD_CALL_HC_SEARCH_FN (   dictMode,
  mls 
)
Value:
case mls: \
return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);

◆ GEN_ZSTD_CALL_ROW_SEARCH_FN

#define GEN_ZSTD_CALL_ROW_SEARCH_FN (   dictMode,
  mls,
  rowLog 
)
Value:
case rowLog: \
return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);

◆ GEN_ZSTD_HC_SEARCH_FN

#define GEN_ZSTD_HC_SEARCH_FN (   dictMode,
  mls 
)
Value:
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
} \

◆ GEN_ZSTD_ROW_SEARCH_FN

#define GEN_ZSTD_ROW_SEARCH_FN (   dictMode,
  mls,
  rowLog 
)
Value:
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
} \

◆ HINT_INLINE

#define HINT_INLINE   FORCE_INLINE_TEMPLATE

HINT_INLINE is used to help the compiler generate better code. It is not used for "templates", so it can be tweaked based on the compilers performance.

gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the always_inline attribute.

clang up to 5.0.0 (trunk) benefit tremendously from the always_inline attribute.

◆ HUF_4X1_DECODE_SYMBOL

#define HUF_4X1_DECODE_SYMBOL (   _stream,
  _symbol 
)
Value:
do { \
int const index = (int)(bits[(_stream)] >> 53); \
int const entry = (int)dtable[index]; \
bits[(_stream)] <<= (entry & 0x3F); \
op[(_stream)][(_symbol)] = (BYTE)((entry >> 8) & 0xFF); \
} while (0)

◆ HUF_4X1_RELOAD_STREAM

#define HUF_4X1_RELOAD_STREAM (   _stream)
Value:
do { \
int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
int const nbBits = ctz & 7; \
int const nbBytes = ctz >> 3; \
op[(_stream)] += 5; \
ip[(_stream)] -= nbBytes; \
bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
bits[(_stream)] <<= nbBits; \
} while (0)

◆ HUF_4X2_DECODE_SYMBOL

#define HUF_4X2_DECODE_SYMBOL (   _stream,
  _decode3 
)
Value:
do { \
if ((_decode3) || (_stream) != 3) { \
int const index = (int)(bits[(_stream)] >> 53); \
HUF_DEltX2 const entry = dtable[index]; \
MEM_write16(op[(_stream)], entry.sequence); \
bits[(_stream)] <<= (entry.nbBits) & 0x3F; \
op[(_stream)] += (entry.length); \
} \
} while (0)

◆ HUF_4X2_RELOAD_STREAM

#define HUF_4X2_RELOAD_STREAM (   _stream)
Value:
do { \
HUF_4X2_DECODE_SYMBOL(3, 1); \
{ \
int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
int const nbBits = ctz & 7; \
int const nbBytes = ctz >> 3; \
ip[(_stream)] -= nbBytes; \
bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
bits[(_stream)] <<= nbBits; \
} \
} while (0)

◆ HUF_4X_FOR_EACH_STREAM

#define HUF_4X_FOR_EACH_STREAM (   X)
Value:
do { \
X(0); \
X(1); \
X(2); \
X(3); \
} while (0)

◆ HUF_4X_FOR_EACH_STREAM_WITH_VAR

#define HUF_4X_FOR_EACH_STREAM_WITH_VAR (   X,
  var 
)
Value:
do { \
X(0, (var)); \
X(1, (var)); \
X(2, (var)); \
X(3, (var)); \
} while (0)

◆ HUF_BITS_IN_CONTAINER

#define HUF_BITS_IN_CONTAINER   (sizeof(size_t) * 8)

HUF_CStream_t: Huffman uses its own BIT_CStream_t implementation. There are three major differences from BIT_CStream_t:

  1. HUF_addBits() takes a HUF_CElt (size_t) which is the pair (nbBits, value) in the format: format:
    • Bits [0, 4) = nbBits
    • Bits [4, 64 - nbBits) = 0
    • Bits [64 - nbBits, 64) = value
  2. The bitContainer is built from the upper bits and right shifted. E.g. to add a new value of N bits you right shift the bitContainer by N, then or in the new value into the N upper bits.
  3. The bitstream has two bit containers. You can add bits to the second container and merge them into the first container.

◆ HUF_BLOCKSIZE_MAX

#define HUF_BLOCKSIZE_MAX   (128 * 1024)

maximum input size for a single block compressed with HUF_compress

◆ HUF_CTABLE_WORKSPACE_SIZE_U32

#define HUF_CTABLE_WORKSPACE_SIZE_U32   ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)

HUF_buildCTable_wksp() : Same as HUF_buildCTable(), but using externally allocated scratch buffer. workSpace must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.

◆ HUF_DECODE_SYMBOLX1_1

#define HUF_DECODE_SYMBOLX1_1 (   ptr,
  DStreamPtr 
)
Value:
do { \
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
} while (0)

◆ HUF_DECODE_SYMBOLX1_2

#define HUF_DECODE_SYMBOLX1_2 (   ptr,
  DStreamPtr 
)
Value:
do { \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
} while (0)

◆ HUF_DECODE_SYMBOLX2_1

#define HUF_DECODE_SYMBOLX2_1 (   ptr,
  DStreamPtr 
)
Value:
do { \
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
} while (0)

◆ HUF_DECODE_SYMBOLX2_2

#define HUF_DECODE_SYMBOLX2_2 (   ptr,
  DStreamPtr 
)
Value:
do { \
if (MEM_64bits()) \
ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
} while (0)

◆ HUF_DECOMPRESS_WORKSPACE_SIZE

#define HUF_DECOMPRESS_WORKSPACE_SIZE   ((2 << 10) + (1 << 9))

The minimum workspace size for the workSpace used in HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().

The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. Buffer overflow errors may potentially occur if code modifications result in a required workspace size greater than that specified in the following macro.

◆ HUF_DGEN

#define HUF_DGEN (   fn)
Value:
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
size_t cSrcSize, HUF_DTable const* DTable, int flags) \
{ \
(void)flags; \
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
}

◆ HUF_READ_STATS_WORKSPACE_SIZE_U32

#define HUF_READ_STATS_WORKSPACE_SIZE_U32   FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)

HUF_readStats_wksp() : Same as HUF_readStats() but takes an external workspace which must be 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE. If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.

◆ JOB_ERROR

#define JOB_ERROR (   e)
Value:
do { \
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
job->cSize = e; \
ZSTD_pthread_mutex_unlock(&job->job_mutex); \
goto _endJob; \
} while (0)

◆ LONG_OFFSETS_MAX_EXTRA_BITS_32

#define LONG_OFFSETS_MAX_EXTRA_BITS_32
Value:
(ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
: 0)

◆ MERGE_CHECK

#define MERGE_CHECK (   a,
  b,
 
)
Value:
do {\
if(((c) & 1) ||\
(((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
*(a) = ~*(a);\
}\
if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
*(b) = ~*(b);\
}\
} while(0)

◆ PREFETCH_AREA

#define PREFETCH_AREA (   p,
 
)
Value:
do { \
const char* const _ptr = (const char*)(p); \
size_t const _size = (size_t)(s); \
size_t _pos; \
for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
PREFETCH_L2(_ptr + _pos); \
} \
} while (0)

◆ RETURN_ERROR

#define RETURN_ERROR (   err,
  ... 
)
Value:
do { \
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
__FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
} while(0)

Unconditionally return the specified error.

In debug modes, prints additional information.

◆ RETURN_ERROR_IF

#define RETURN_ERROR_IF (   cond,
  err,
  ... 
)
Value:
do { \
if (cond) { \
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
__FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
} \
} while (0)

Return the specified error if the condition evaluates to true.

In debug modes, prints additional information. In order to do that (particularly, printing the conditional that failed), this can't just wrap RETURN_ERROR().

◆ STACK_POP

#define STACK_POP (   _a,
  _b,
  _c,
  _d 
)
Value:
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d;\
} while(0)

◆ STACK_POP5

#define STACK_POP5 (   _a,
  _b,
  _c,
  _d,
  _e 
)
Value:
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
} while(0)

◆ STACK_PUSH

#define STACK_PUSH (   _a,
  _b,
  _c,
  _d 
)
Value:
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize++].d = (_d);\
} while(0)

◆ STACK_PUSH5

#define STACK_PUSH5 (   _a,
  _b,
  _c,
  _d,
  _e 
)
Value:
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
} while(0)

◆ STARTNODE

#define STARTNODE   (HUF_SYMBOLVALUE_MAX+1)

HUF_buildCTable_wksp() : Same as HUF_buildCTable(), but using externally allocated scratch buffer. workSpace must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).

◆ THREADING_H_938743

#define THREADING_H_938743

Copyright (c) 2016 Tino Reichardt All rights reserved.

You can contact the author at:

This source code is licensed under both the BSD-style license (found in the LICENSE file in the root directory of this source tree) and the GPLv2 (found in the COPYING file in the root directory of this source tree). You may select, at your option, one of the above-listed licenses. This file will hold wrapper for systems, which do not support pthreads Copyright (c) 2016 Tino Reichardt All rights reserved.

You can contact the author at:

This source code is licensed under both the BSD-style license (found in the LICENSE file in the root directory of this source tree) and the GPLv2 (found in the COPYING file in the root directory of this source tree). You may select, at your option, one of the above-listed licenses.

◆ WIN_CDECL

#define WIN_CDECL

On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). This explicitly marks such functions as __cdecl so that the code will still compile if a CC other than __cdecl has been made the default.

◆ X

#define X (   name,
  r,
  bit 
)
Value:
MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
return ((cpuid.r) & (1U << bit)) != 0; \
}
Definition: zstd.c:8068

◆ XXH_PROCESS1

#define XXH_PROCESS1
Value:
do { \
hash += (*ptr++) * XXH_PRIME32_5; \
hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
} while (0)
#define XXH_PRIME32_1
Definition: zstd.c:11146
#define XXH_PRIME32_5
Definition: zstd.c:11150

◆ XXH_PROCESS4

#define XXH_PROCESS4
Value:
do { \
hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
ptr += 4; \
hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
} while (0)
#define XXH_PRIME32_4
Definition: zstd.c:11149
#define XXH_PRIME32_3
Definition: zstd.c:11148

◆ ZDICT_QSORT_MIN

#define ZDICT_QSORT_MIN   0

Select the qsort() variant used by cover

◆ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR

#define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR

Zstd relies on pointer overflow in its decompressor. We add this attribute to functions that rely on pointer overflow.

◆ ZSTD_ASM_SUPPORTED

#define ZSTD_ASM_SUPPORTED   0

Only enable assembly for GNU C compatible compilers, because other platforms may not support GAS assembly syntax.

Only enable assembly for Linux / MacOS / Win32, other platforms may work, but they haven't been tested. This could likely be extended to BSD systems.

Disable assembly when MSAN is enabled, because MSAN requires 100% of code to be instrumented to work.

◆ ZSTD_BT_GET_ALL_MATCHES_ARRAY

#define ZSTD_BT_GET_ALL_MATCHES_ARRAY (   dictMode)
Value:
{ \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
}

◆ ZSTD_BT_SEARCH_FN

#define ZSTD_BT_SEARCH_FN (   dictMode,
  mls 
)    ZSTD_BtFindBestMatch_##dictMode##_##mls

Generate search functions templated on (dictMode, mls, rowLog). These functions are outlined for code size & compilation time. ZSTD_searchMax() dispatches to the correct implementation function.

TODO: The start of the search function involves loading and calculating a bunch of constants from the ZSTD_MatchState_t. These computations could be done in an initialization function, and saved somewhere in the match state. Then we could pass a pointer to the saved state instead of the match state, and avoid duplicate computations.

TODO: Move the match re-winding into searchMax. This improves compression ratio, and unlocks further simplifications with the next TODO.

TODO: Try moving the repcode search into searchMax. After the re-winding and repcode search are in searchMax, there is no more logic in the match finder loop that requires knowledge about the dictMode. So we should be able to avoid force inlining it, and we can join the extDict loop with the single segment loop. It should go in searchMax instead of its own function to avoid having multiple virtual function calls per search.

◆ ZSTD_CHUNKSIZE_MAX

#define ZSTD_CHUNKSIZE_MAX
Value:
( ((U32)-1) /* Maximum ending current index */ \
- ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */

◆ ZSTD_COMPRESS_HEAPMODE

#define ZSTD_COMPRESS_HEAPMODE   0

COMPRESS_HEAPMODE : Select how default decompression function ZSTD_compress() allocates its context, on stack (0, default), or into heap (1). Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.

◆ ZSTD_CONTENTSIZE_UNKNOWN

#define ZSTD_CONTENTSIZE_UNKNOWN   (0ULL - 1)

ZSTD_getFrameContentSize() : requires v1.3.0+ src should point to the start of a ZSTD encoded frame. srcSize must be at least as large as the frame header. hint : any size >= ZSTD_frameHeaderSize_max is large enough.

Returns
: - decompressed size of src frame content, if known
  • ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
  • ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) note 1 : a 0 return value means the frame is valid but "empty". When invoking this method on a skippable frame, it will return 0. note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode). When return==ZSTD_CONTENTSIZE_UNKNOWN, data to decompress could be any size. In which case, it's necessary to use streaming mode to decompress data. Optionally, application can rely on some implicit limit, as ZSTD_decompress() only needs an upper bound of decompressed size. (For example, data could be necessarily cut into blocks <= 16 KB). note 3 : decompressed size is always present when compression is completed using single-pass functions, such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). note 4 : decompressed size can be very large (64-bits value), potentially larger than what local system can handle as a single memory segment. In which case, it's necessary to use streaming mode to decompress data. note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. Always ensure return value fits within application's authorized limits. Each application can set its own limits. note 6 : This function replaces ZSTD_getDecompressedSize()

◆ ZSTD_DECOMPRESSION_MARGIN

#define ZSTD_DECOMPRESSION_MARGIN (   originalSize,
  blockSize 
)
Value:
((size_t)( \
ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \
4 /* checksum */ + \
((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
(blockSize) /* One block of margin */ \
))

ZSTD_DECOMPRESS_MARGIN() : Similar to ZSTD_decompressionMargin(), but instead of computing the margin from the compressed frame, compute it from the original size and the blockSizeLog. See ZSTD_decompressionMargin() for details.

WARNING: This macro does not support multi-frame input, the input must be a single zstd frame. If you need that support use the function, or implement it yourself.

Parameters
originalSizeThe original uncompressed size of the data.
blockSizeThe block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). Unless you explicitly set the windowLog smaller than ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX.

◆ ZSTD_DUBT_UNSORTED_MARK

#define ZSTD_DUBT_UNSORTED_MARK
Value:
1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
It's not a big deal though : candidate will just be sorted again.
Additionally, candidate position 1 will be lost.
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy.
This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */

◆ ZSTD_ENABLE_ASM_X86_64_BMI2

#define ZSTD_ENABLE_ASM_X86_64_BMI2   0

Determines whether we should enable assembly for x86-64 with BMI2.

Enable if all of the following conditions hold:

  • ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
  • Assembly is supported
  • We are compiling for x86-64 and either:
    • DYNAMIC_BMI2 is enabled
    • BMI2 is supported at compile time

◆ ZSTD_FOR_EACH_DICT_MODE

#define ZSTD_FOR_EACH_DICT_MODE (   X,
  ... 
)
Value:
X(__VA_ARGS__, noDict) \
X(__VA_ARGS__, extDict) \
X(__VA_ARGS__, dictMatchState) \
X(__VA_ARGS__, dedicatedDictSearch)

◆ ZSTD_FOR_EACH_MLS

#define ZSTD_FOR_EACH_MLS (   X,
  dictMode 
)
Value:
X(dictMode, 4) \
X(dictMode, 5) \
X(dictMode, 6)

◆ ZSTD_FOR_EACH_MLS_ROWLOG

#define ZSTD_FOR_EACH_MLS_ROWLOG (   X,
  dictMode 
)
Value:
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)

◆ ZSTD_FOR_EACH_ROWLOG

#define ZSTD_FOR_EACH_ROWLOG (   X,
  dictMode,
  mls 
)
Value:
X(dictMode, mls, 4) \
X(dictMode, mls, 5) \
X(dictMode, mls, 6)

◆ ZSTD_GEN_DFAST_FN

#define ZSTD_GEN_DFAST_FN (   dictMode,
  mls 
)
Value:
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
}
Definition: zstd.c:19282
Definition: zstd.c:19456

◆ ZSTD_GEN_FAST_FN

#define ZSTD_GEN_FAST_FN (   dictMode,
  mml,
  cmov 
)
Value:
static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \
ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \
}

◆ ZSTD_GEN_RECORD_FINGERPRINT

#define ZSTD_GEN_RECORD_FINGERPRINT (   _rate,
  _hSize 
)
Value:
static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \
{ \
recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \
}
Definition: zstd.c:22335

◆ ZSTD_HASHLOG3_MAX

#define ZSTD_HASHLOG3_MAX   17

ZSTD_HASHLOG3_MAX : Maximum size of the hash table dedicated to find 3-bytes matches, in log format, aka 17 => 1 << 17 == 128Ki positions. This structure is only used in zstd_opt. Since allocation is centralized for all strategies, it has to be known here. The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3, so that zstd_opt.c doesn't need to know about this constant.

◆ ZSTD_HEAPMODE

#define ZSTD_HEAPMODE   1

HEAPMODE : Select how default decompression function ZSTD_decompress() allocates its context, on stack (0), or into heap (1, default; requires malloc()). Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.

◆ ZSTD_IS_DETERMINISTIC_BUILD

#define ZSTD_IS_DETERMINISTIC_BUILD   1

ZSTD_IS_DETERMINISTIC_BUILD must be set to 0 if any compilation macro is active that impacts the compressed output.

NOTE: ZSTD_MULTITHREAD is allowed to be set or unset.

◆ ZSTD_LAZY_DDSS_BUCKET_LOG

#define ZSTD_LAZY_DDSS_BUCKET_LOG   2

Dedicated Dictionary Search Structure bucket log. In the ZSTD_dedicatedDictSearch mode, the hashTable has 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just one.

◆ ZSTD_MAX_INPUT_SIZE

#define ZSTD_MAX_INPUT_SIZE   ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)

ZSTD_compressBound() : maximum compressed size in worst case single-pass scenario. When invoking ZSTD_compress(), or any other one-pass compression function, it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) as it eliminates one potential failure scenario, aka not enough room in dst buffer to write the compressed frame. Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . In which case, ZSTD_compressBound() will return an error code which can be tested using ZSTD_isError().

ZSTD_COMPRESSBOUND() : same as ZSTD_compressBound(), but as a macro. It can be used to produce constants, which can be useful for static allocation, for example to size a static array on stack. Will produce constant value 0 if srcSize is too large.

◆ ZSTD_MAX_NB_BLOCK_SPLITS

#define ZSTD_MAX_NB_BLOCK_SPLITS   196

Struct that contains all elements of block splitter that should be allocated in a wksp.

◆ ZSTD_MAXWINDOWSIZE_DEFAULT

#define ZSTD_MAXWINDOWSIZE_DEFAULT   (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)

LEGACY_SUPPORT : if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)

MAXWINDOWSIZE_DEFAULT : maximum window size accepted by DStream by default. Frames requiring more memory will be rejected. It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().

◆ ZSTD_NO_FORWARD_PROGRESS_MAX

#define ZSTD_NO_FORWARD_PROGRESS_MAX   16

NO_FORWARD_PROGRESS_MAX : maximum allowed nb of calls to ZSTD_decompressStream() without any forward progress (defined as: no byte read from input, and no byte flushed to output) before triggering an error.

◆ ZSTD_SWITCH_MLS

#define ZSTD_SWITCH_MLS (   X,
  dictMode 
)
Value:
switch (mls) { \
ZSTD_FOR_EACH_MLS(X, dictMode) \
}

◆ ZSTD_SWITCH_ROWLOG

#define ZSTD_SWITCH_ROWLOG (   dictMode,
  mls 
)
Value:
case mls: \
switch (rowLog) { \
ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
} \
ZSTD_UNREACHABLE; \
break;

◆ ZSTD_SWITCH_SEARCH_METHOD

#define ZSTD_SWITCH_SEARCH_METHOD (   dictMode)
Value:
switch (searchMethod) { \
case search_hashChain: \
ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
break; \
case search_binaryTree: \
ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
break; \
case search_rowHash: \
ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
break; \
} \
ZSTD_UNREACHABLE;

◆ ZSTD_WINDOWLOG_LIMIT_DEFAULT

#define ZSTD_WINDOWLOG_LIMIT_DEFAULT
Value:
27 /* by default, the streaming decoder will refuse any frame
* requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
* to preserve host's memory from unreasonable requirements.
* This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
* The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */

Typedef Documentation

◆ COVER_best_t

typedef struct COVER_best_s COVER_best_t

COVER_best_t is used for two purposes:

  1. Synchronizing threads.
  2. Saving the best parameters and dictionary.

All of the methods except COVER_best_init() are thread safe if zstd is compiled with multithreaded support.

◆ COVER_dictSelection_t

Struct used for the dictionary selection function.

◆ COVER_tryParameters_data_t

Parameters for COVER_tryParameters().

◆ FASTCOVER_tryParameters_data_t

Parameters for FASTCOVER_tryParameters().

◆ FSE_CTable

typedef unsigned FSE_CTable

Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue'

◆ POOL_function

typedef void(* POOL_function) (void *)

POOL_function : The function type that can be added to a thread pool.

◆ ZSTD_allocFunction

typedef void*(* ZSTD_allocFunction) (void *opaque, size_t size)

Custom memory allocation : These prototypes make it possible to pass your own allocation/free functions. ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.

◆ ZSTD_CStream

CCtx and CStream are now effectively same object (>= v1.3.0)

◆ ZSTD_DStream

DCtx and DStream are now effectively same object (>= v1.3.0)

◆ ZSTD_threadPool

typedef struct POOL_ctx_s ZSTD_threadPool

Thread pool : These prototypes make it possible to share a thread pool among multiple compression contexts. This can limit resources for applications with multiple threads where each one uses a threaded compression mode (via ZSTD_c_nbWorkers parameter). ZSTD_createThreadPool creates a new thread pool with a given number of threads. Note that the lifetime of such pool must exist while being used. ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value to use an internal thread pool). ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.

Enumeration Type Documentation

◆ FSE_repeat

enum FSE_repeat
Enumerator
FSE_repeat_none 

Cannot use the previous table

FSE_repeat_check 

Can use the previous table but it must be checked

FSE_repeat_valid 

Can use the previous table and it is assumed to be valid

◆ HUF_flags_e

Huffman flags bitset. For all flags, 0 is the default value.

Enumerator
HUF_flags_bmi2 

If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. Otherwise: Ignored.

HUF_flags_optimalDepth 

If set: Test possible table depths to find the one that produces the smallest header + encoded size. If unset: Use heuristic to find the table depth.

HUF_flags_preferRepeat 

If set: If the previous table can encode the input, always reuse the previous table. If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.

HUF_flags_suspectUncompressible 

If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. If unset: Always histogram the entire input.

HUF_flags_disableAsm 

If set: Don't use assembly implementations If unset: Allow using assembly implementations

HUF_flags_disableFast 

If set: Don't use the fast decoding loop, always use the fallback decoding loop. If unset: Use the fast decoding loop when possible.

◆ HUF_repeat

enum HUF_repeat
Enumerator
HUF_repeat_none 

Cannot use the previous table

HUF_repeat_check 

Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat

HUF_repeat_valid 

Can use the previous table and it is assumed to be valid

◆ ZSTD_buffered_policy_e

Indicates whether this compression proceeds directly from user-provided source buffer to user-provided destination buffer (ZSTDb_not_buffered), or whether the context needs to buffer the input/output (ZSTDb_buffered).

◆ ZSTD_compResetPolicy_e

Controls, for this matchState reset, whether the tables need to be cleared / prepared for the coming compression (ZSTDcrp_makeClean), or whether the tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a subsequent operation will overwrite the table space anyways (e.g., copying the matchState contents in from a CDict).

◆ ZSTD_cwksp_static_alloc_e

Used to describe whether the workspace is statically allocated (and will not necessarily ever be freed), or if it's dynamically allocated and we can expect a well-formed caller to free this.

◆ ZSTD_dictLoadMethod_e

Enumerator
ZSTD_dlm_byCopy 

Copy dictionary content internally

ZSTD_dlm_byRef 

Reference dictionary content – the dictionary buffer must outlive its users.

◆ ZSTD_indexResetPolicy_e

Controls, for this matchState reset, whether indexing can continue where it left off (ZSTDirp_continue), or whether it needs to be restarted from zero (ZSTDirp_reset).

◆ ZSTD_literalCompressionMode_e

Enumerator
ZSTD_lcm_auto 

Automatically determine the compression mode based on the compression level. Negative compression levels will be uncompressed, and positive compression levels will be compressed.

ZSTD_lcm_huffman 

Always attempt Huffman compression. Uncompressed literals will still be emitted if Huffman compression is not profitable.

ZSTD_lcm_uncompressed 

Always emit uncompressed literals.

Function Documentation

◆ BIT_addBits()

MEM_STATIC void BIT_addBits ( BIT_CStream_t bitC,
BitContainerType  value,
unsigned  nbBits 
)

BIT_addBits() : can add up to 31 bits into bitC. Note : does not check for register overflow !

◆ BIT_addBitsFast()

MEM_STATIC void BIT_addBitsFast ( BIT_CStream_t bitC,
BitContainerType  value,
unsigned  nbBits 
)

BIT_addBitsFast() : works only if value is clean, meaning all high bits above nbBits are 0

◆ BIT_closeCStream()

MEM_STATIC size_t BIT_closeCStream ( BIT_CStream_t bitC)

BIT_closeCStream() :

Returns
: size of CStream, in bytes, or 0 if it could not fit into dstBuffer

◆ BIT_endOfDStream()

MEM_STATIC unsigned BIT_endOfDStream ( const BIT_DStream_t DStream)

BIT_endOfDStream() :

Returns
: 1 if DStream has exactly reached its end (all bits consumed).

◆ BIT_flushBits()

MEM_STATIC void BIT_flushBits ( BIT_CStream_t bitC)

BIT_flushBits() : assumption : bitContainer has not overflowed safe version; check for buffer overflow, and prevents it. note : does not signal buffer overflow. overflow will be revealed later on using BIT_closeCStream()

◆ BIT_flushBitsFast()

MEM_STATIC void BIT_flushBitsFast ( BIT_CStream_t bitC)

BIT_flushBitsFast() : assumption : bitContainer has not overflowed unsafe version; does not check buffer overflow

◆ BIT_initCStream()

MEM_STATIC size_t BIT_initCStream ( BIT_CStream_t bitC,
void *  startPtr,
size_t  dstCapacity 
)

BIT_initCStream() : dstCapacity must be > sizeof(size_t)

Returns
: 0 if success, otherwise an error code (can be tested using ERR_isError())

◆ BIT_initDStream()

MEM_STATIC size_t BIT_initDStream ( BIT_DStream_t bitD,
const void *  srcBuffer,
size_t  srcSize 
)

BIT_initDStream() : Initialize a BIT_DStream_t. bitD : a pointer to an already allocated BIT_DStream_t structure. srcSize must be the exact size of the bitStream, in bytes.

Returns
: size of stream (== srcSize), or an errorCode if a problem is detected

◆ BIT_lookBits()

FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits ( const BIT_DStream_t bitD,
U32  nbBits 
)

BIT_lookBits() : Provides next n bits from local register. local register is not modified. On 32-bits, maxNbBits==24. On 64-bits, maxNbBits==56.

Returns
: value extracted

◆ BIT_lookBitsFast()

MEM_STATIC BitContainerType BIT_lookBitsFast ( const BIT_DStream_t bitD,
U32  nbBits 
)

BIT_lookBitsFast() : unsafe version; only works if nbBits >= 1

◆ BIT_readBits()

FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits ( BIT_DStream_t bitD,
unsigned  nbBits 
)

BIT_readBits() : Read (consume) next n bits from local register and update. Pay attention to not read more than nbBits contained into local register.

Returns
: extracted value.

◆ BIT_readBitsFast()

MEM_STATIC BitContainerType BIT_readBitsFast ( BIT_DStream_t bitD,
unsigned  nbBits 
)

BIT_readBitsFast() : unsafe version; only works if nbBits >= 1

◆ BIT_reloadDStream()

FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream ( BIT_DStream_t bitD)

BIT_reloadDStream() : Refill bitD from buffer previously set in BIT_initDStream() . This function is safe, it guarantees it will not never beyond src buffer.

Returns
: status of BIT_DStream_t internal register. when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits

◆ BIT_reloadDStream_internal()

MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal ( BIT_DStream_t bitD)

BIT_reloadDStream_internal() : Simple variant of BIT_reloadDStream(), with two conditions:

  1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
  2. look window is valid after shifted down : bitD->ptr >= bitD->start

◆ BIT_reloadDStreamFast()

MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast ( BIT_DStream_t bitD)

BIT_reloadDStreamFast() : Similar to BIT_reloadDStream(), but with two differences:

  1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
  2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this point you must use BIT_reloadDStream() to reload.

◆ COVER_best_destroy()

void COVER_best_destroy ( COVER_best_t best)

Call COVER_best_wait() and then destroy the COVER_best_t.

◆ COVER_best_finish()

void COVER_best_finish ( COVER_best_t best,
ZDICT_cover_params_t  parameters,
COVER_dictSelection_t  selection 
)

Called when a thread finishes executing, both on error or success. Decrements liveJobs and signals any waiting threads if liveJobs == 0. If this dictionary is the best so far save it and its parameters.

◆ COVER_best_init()

void COVER_best_init ( COVER_best_t best)

Initialize the COVER_best_t.

◆ COVER_best_start()

void COVER_best_start ( COVER_best_t best)

Called when a thread is about to be launched. Increments liveJobs.

◆ COVER_best_wait()

void COVER_best_wait ( COVER_best_t best)

Wait until liveJobs == 0.

◆ COVER_checkTotalCompressedSize()

size_t COVER_checkTotalCompressedSize ( const ZDICT_cover_params_t  parameters,
const size_t *  samplesSizes,
const BYTE *  samples,
size_t *  offsets,
size_t  nbTrainSamples,
size_t  nbSamples,
BYTE *const  dict,
size_t  dictBufferCapacity 
)

Checks total compressed size of a dictionary

◆ COVER_computeEpochs()

COVER_epoch_info_t COVER_computeEpochs ( U32  maxDictSize,
U32  nbDmers,
U32  k,
U32  passes 
)

Computes the number of epochs and the size of each epoch. We will make sure that each epoch gets at least 10 * k bytes.

The COVER algorithms divide the data up into epochs of equal size and select one segment from each epoch.

Parameters
maxDictSizeThe maximum allowed dictionary size.
nbDmersThe number of dmers we are training on.
kThe parameter k (segment size).
passesThe target number of passes over the dmer corpus. More passes means a better dictionary.

◆ COVER_dictSelectionError()

COVER_dictSelection_t COVER_dictSelectionError ( size_t  error)

Error function for COVER_selectDict function. Returns a struct where return.totalCompressedSize is a ZSTD error.

◆ COVER_dictSelectionFree()

void COVER_dictSelectionFree ( COVER_dictSelection_t  selection)

Always call after selectDict is called to free up used memory from newly created dictionary.

◆ COVER_dictSelectionIsError()

unsigned COVER_dictSelectionIsError ( COVER_dictSelection_t  selection)

Error function for COVER_selectDict function. Checks if the return value is an error.

◆ COVER_selectDict()

COVER_dictSelection_t COVER_selectDict ( BYTE *  customDictContent,
size_t  dictBufferCapacity,
size_t  dictContentSize,
const BYTE *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbFinalizeSamples,
size_t  nbCheckSamples,
size_t  nbSamples,
ZDICT_cover_params_t  params,
size_t *  offsets,
size_t  totalCompressedSize 
)

Called to finalize the dictionary and select one based on whether or not the shrink-dict flag was enabled. If enabled the dictionary used is the smallest dictionary within a specified regression of the compressed size from the largest dictionary.

◆ COVER_sum()

size_t COVER_sum ( const size_t *  samplesSizes,
unsigned  nbSamples 
)

Returns the sum of the sample sizes.

◆ COVER_warnOnSmallCorpus()

void COVER_warnOnSmallCorpus ( size_t  maxDictSize,
size_t  nbDmers,
int  displayLevel 
)

Warns the user when their corpus is too small.

◆ divbwt()

int divbwt ( const unsigned char *  T,
unsigned char *  U,
int *  A,
int  n,
unsigned char *  num_indexes,
int *  indexes,
int  openMP 
)

Constructs the burrows-wheeler transformed string of a given string.

Parameters
T[0..n-1] The input string.
U[0..n-1] The output string. (can be T)
A[0..n-1] The temporary array. (can be NULL)
nThe length of the given string.
num_indexesThe length of secondary indexes array. (can be NULL)
indexesThe secondary indexes array. (can be NULL)
openMPenables OpenMP optimization.
Returns
The primary index if no error occurred, -1 or -2 otherwise.

◆ divsufsort()

int divsufsort ( const unsigned char *  T,
int *  SA,
int  n,
int  openMP 
)

Constructs the suffix array of a given string.

Parameters
T[0..n-1] The input string.
SA[0..n-1] The output array of suffixes.
nThe length of the given string.
openMPenables OpenMP optimization.
Returns
0 if no error occurred, -1 or -2 otherwise.

◆ FSE_buildCTable()

FSE_PUBLIC_API size_t FSE_buildCTable ( FSE_CTable ct,
const short *  normalizedCounter,
unsigned  maxSymbolValue,
unsigned  tableLog 
)

FSE_buildCTable(): Builds ct, which must be already allocated, using FSE_createCTable().

Returns
: 0, or an errorCode, which can be tested using FSE_isError()

◆ FSE_buildCTable_rle()

size_t FSE_buildCTable_rle ( FSE_CTable ct,
unsigned char  symbolValue 
)

build a fake FSE_CTable, designed to compress always the same symbolValue

◆ FSE_buildDTable_wksp()

size_t FSE_buildDTable_wksp ( FSE_DTable *  dt,
const short *  normalizedCounter,
unsigned  maxSymbolValue,
unsigned  tableLog,
void *  workSpace,
size_t  wkspSize 
)

Same as FSE_buildDTable(), using an externally allocated workspace produced with FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)

◆ FSE_compress_usingCTable()

size_t FSE_compress_usingCTable ( void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const FSE_CTable ct 
)

FSE_compress_usingCTable(): Compress src using ct into dst which must be already allocated.

Returns
: size of compressed data (<= dstCapacity), or 0 if compressed data could not fit into dst, or an errorCode, which can be tested using FSE_isError()

◆ FSE_decompress_wksp_bmi2()

size_t FSE_decompress_wksp_bmi2 ( void *  dst,
size_t  dstCapacity,
const void *  cSrc,
size_t  cSrcSize,
unsigned  maxLog,
void *  workSpace,
size_t  wkspSize,
int  bmi2 
)

same as FSE_decompress(), using an externally allocated workSpace produced with FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue). Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't

◆ FSE_initCState2()

MEM_STATIC void FSE_initCState2 ( FSE_CState_t statePtr,
const FSE_CTable ct,
U32  symbol 
)

FSE_initCState2() : Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) uses the smallest state value possible, saving the cost of this symbol

◆ FSE_NCountWriteBound()

size_t FSE_NCountWriteBound ( unsigned  maxSymbolValue,
unsigned  tableLog 
)

FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. Typically useful for allocation purpose.

◆ FSE_normalizeCount()

size_t FSE_normalizeCount ( short *  normalizedCounter,
unsigned  tableLog,
const unsigned *  count,
size_t  srcSize,
unsigned  maxSymbolValue,
unsigned  useLowProbCount 
)

FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). useLowProbCount is a boolean parameter which trades off compressed size for faster header decoding. When it is set to 1, the compressed data will be slightly smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 is a good default, since header deserialization makes a big speed difference. Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.

Returns
: tableLog, or an errorCode, which can be tested using FSE_isError()

◆ FSE_optimalTableLog()

unsigned FSE_optimalTableLog ( unsigned  maxTableLog,
size_t  srcSize,
unsigned  maxSymbolValue 
)

FSE_compress() does the following:

  1. count symbol occurrence from source[] into table count[] (see hist.h)
  2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
  3. save normalized counters to memory buffer using writeNCount()
  4. build encoding table 'CTable' from normalized counters
  5. encode the data stream using encoding table 'CTable'

FSE_decompress() does the following:

  1. read normalized counters with readNCount()
  2. build decoding table 'DTable' from normalized counters
  3. decode the data stream using decoding table 'DTable'

The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method.

FSE_optimalTableLog(): dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.

Returns
: recommended tableLog (necessarily <= 'maxTableLog')

◆ FSE_optimalTableLog_internal()

unsigned FSE_optimalTableLog_internal ( unsigned  maxTableLog,
size_t  srcSize,
unsigned  maxSymbolValue,
unsigned  minus 
)

same as FSE_optimalTableLog(), which used minus==2

◆ FSE_readNCount()

size_t FSE_readNCount ( short *  normalizedCounter,
unsigned *  maxSymbolValuePtr,
unsigned *  tableLogPtr,
const void *  rBuffer,
size_t  rBuffSize 
)

Tutorial :

The first step is to count all symbols. FSE_count() does this job very fast. Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. 'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) FSE_count() will return the number of occurrence of the most frequent symbol. This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).

The next step is to normalize the frequencies. FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. It also guarantees a minimum of 1 to any Symbol with frequency >= 1. You can use 'tableLog'==0 to mean "use default tableLog value". If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").

The result of FSE_normalizeCount() will be saved into a table, called 'normalizedCounter', which is a table of signed short. 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. The return value is tableLog if everything proceeded as expected. It is 0 if there is a single symbol within distribution. If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).

'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). 'buffer' must be already allocated. For guaranteed success, buffer size must be at least FSE_headerBound(). The result of the function is the number of bytes written into 'buffer'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).

'normalizedCounter' can then be used to create the compression table 'CTable'. The space required by 'CTable' must be already allocated, using FSE_createCTable(). You can then use FSE_buildCTable() to fill 'CTable'. If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).

'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' The function returns the size of compressed data (without header), necessarily <= dstCapacity. If it returns '0', compressed data could not fit into 'dst'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).

FSE_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'.

Returns
: size read from 'rBuffer', or an errorCode, which can be tested using FSE_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values

◆ FSE_readNCount_bmi2()

size_t FSE_readNCount_bmi2 ( short *  normalizedCounter,
unsigned *  maxSymbolValuePtr,
unsigned *  tableLogPtr,
const void *  rBuffer,
size_t  rBuffSize,
int  bmi2 
)

FSE_readNCount_bmi2(): Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.

◆ FSE_versionNumber()

unsigned FSE_versionNumber ( void  )

library version number; to be used when checking dll version

◆ FSE_writeNCount()

size_t FSE_writeNCount ( void *  buffer,
size_t  bufferSize,
const short *  normalizedCounter,
unsigned  maxSymbolValue,
unsigned  tableLog 
)

FSE_writeNCount(): Compactly save 'normalizedCounter' into 'buffer'.

Returns
: size of the compressed table, or an errorCode, which can be tested using FSE_isError().

◆ HIST_add()

void HIST_add ( unsigned *  count,
const void *  src,
size_t  srcSize 
)

HIST_add() : Lowest level: just add nb of occurrences of characters from @src into @count. @count is not reset. @count array is presumed large enough (i.e. 1 KB). @ This function does not need any additional stack memory.

◆ HIST_count()

size_t HIST_count ( unsigned *  count,
unsigned *  maxSymbolValuePtr,
const void *  src,
size_t  srcSize 
)

HIST_count(): Provides the precise count of each byte within a table 'count'. 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). Updates *maxSymbolValuePtr with actual largest symbol value detected.

Returns
: count of the most frequent symbol (which isn't identified). or an error code, which can be tested using HIST_isError(). note : if return == srcSize, there is only one symbol.

◆ HIST_count_simple()

unsigned HIST_count_simple ( unsigned *  count,
unsigned *  maxSymbolValuePtr,
const void *  src,
size_t  srcSize 
)

HIST_count_simple() : Same as HIST_countFast(), this function is unsafe, and will segfault if any value within src is > *maxSymbolValuePtr. It is also a bit slower for large inputs. However, it does not need any additional memory (not even on stack).

Returns
: count of the most frequent symbol. Note this function doesn't produce any error (i.e. it must succeed).

◆ HIST_count_wksp()

size_t HIST_count_wksp ( unsigned *  count,
unsigned *  maxSymbolValuePtr,
const void *  src,
size_t  srcSize,
void *  workSpace,
size_t  workSpaceSize 
)

HIST_count_wksp() : Same as HIST_count(), but using an externally provided scratch buffer. Benefit is this function will use very little stack space. workSpace is a writable buffer which must be 4-bytes aligned, workSpaceSize must be >= HIST_WKSP_SIZE

◆ HIST_countFast()

size_t HIST_countFast ( unsigned *  count,
unsigned *  maxSymbolValuePtr,
const void *  src,
size_t  srcSize 
)

HIST_countFast() : same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr. This function is unsafe, and will segfault if any value within src is > *maxSymbolValuePtr

◆ HIST_countFast_wksp()

size_t HIST_countFast_wksp ( unsigned *  count,
unsigned *  maxSymbolValuePtr,
const void *  src,
size_t  srcSize,
void *  workSpace,
size_t  workSpaceSize 
)

HIST_countFast_wksp() : Same as HIST_countFast(), but using an externally provided scratch buffer. workSpace is a writable buffer which must be 4-bytes aligned, workSpaceSize must be >= HIST_WKSP_SIZE

◆ HIST_isError()

unsigned HIST_isError ( size_t  code)

tells if a return value is an error code

◆ HUF_addBits()

FORCE_INLINE_TEMPLATE void HUF_addBits ( HUF_CStream_t bitC,
HUF_CElt  elt,
int  idx,
int  kFast 
)

HUF_addBits(): Adds the symbol stored in HUF_CElt elt to the bitstream.

Parameters
eltThe element we're adding. This is a (nbBits, value) pair. See the HUF_CStream_t docs for the format.
idxInsert into the bitstream at this idx.
kFastThis is a template parameter. If the bitstream is guaranteed to have at least 4 unused bits after this call it may be 1, otherwise it must be 0. HUF_addBits() is faster when fast is set.

◆ HUF_compress1X_repeat()

size_t HUF_compress1X_repeat ( void *  dst,
size_t  dstSize,
const void *  src,
size_t  srcSize,
unsigned  maxSymbolValue,
unsigned  tableLog,
void *  workSpace,
size_t  wkspSize,
HUF_CElt *  hufTable,
HUF_repeat repeat,
int  flags 
)

HUF_compress1X_repeat() : Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. If it uses hufTable it does not modify hufTable or repeat. If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. If preferRepeat then the old table will always be used if valid. If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding

Parameters
wkspSize`workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE

◆ HUF_compress4X_repeat()

size_t HUF_compress4X_repeat ( void *  dst,
size_t  dstSize,
const void *  src,
size_t  srcSize,
unsigned  maxSymbolValue,
unsigned  tableLog,
void *  workSpace,
size_t  wkspSize,
HUF_CElt *  hufTable,
HUF_repeat repeat,
int  flags 
)

HUF_compress4X_repeat() : Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. If it uses hufTable it does not modify hufTable or repeat. If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. If preferRepeat then the old table will always be used if valid. If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding

Parameters
wkspSize`workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE

◆ HUF_compressBound()

size_t HUF_compressBound ( size_t  size)

maximum compressed size (worst case)

◆ HUF_decompress1X2_DCtx_wksp()

size_t HUF_decompress1X2_DCtx_wksp ( HUF_DTable *  dctx,
void *  dst,
size_t  dstSize,
const void *  cSrc,
size_t  cSrcSize,
void *  workSpace,
size_t  wkspSize,
int  flags 
)

double-symbols decoder

◆ HUF_flushBits()

FORCE_INLINE_TEMPLATE void HUF_flushBits ( HUF_CStream_t bitC,
int  kFast 
)

HUF_flushBits() : Flushes the bits in the bit container @ index 0.

Postcondition
bitPos will be < 8.
Parameters
kFastIf kFast is set then we must know a-priori that the bit container will not overflow.

◆ HUF_getErrorName()

const char * HUF_getErrorName ( size_t  code)

provides error code string (useful for debugging)

◆ HUF_getNbBitsFromCTable()

U32 HUF_getNbBitsFromCTable ( const HUF_CElt *  symbolTable,
U32  symbolValue 
)

HUF_getNbBitsFromCTable() : Read nbBits from CTable symbolTable, for symbol symbolValue presumed <= HUF_SYMBOLVALUE_MAX Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 Note 2 : is not inlined, as HUF_CElt definition is private

◆ HUF_isError()

unsigned HUF_isError ( size_t  code)

tells if a return value is an error code

◆ HUF_mergeIndex1()

FORCE_INLINE_TEMPLATE void HUF_mergeIndex1 ( HUF_CStream_t bitC)

HUF_mergeIndex1() : Merges the bit container @ index 1 into the bit container @ index 0 and zeros the bit container @ index 1.

◆ HUF_minTableLog()

unsigned HUF_minTableLog ( unsigned  symbolCardinality)

HUF_compress() does the following:

  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
  2. (optional) refine tableLog using HUF_optimalTableLog()
  3. build Huffman table from count using HUF_buildCTable()
  4. save Huffman table to memory buffer using HUF_writeCTable()
  5. encode the data stream using HUF_compress4X_usingCTable()

The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and regenerate 'CTable' using external methods.

◆ HUF_readCTable()

size_t HUF_readCTable ( HUF_CElt *  CTable,
unsigned *  maxSymbolValuePtr,
const void *  src,
size_t  srcSize,
unsigned *  hasZeroWeights 
)

HUF_readCTable() : Loading a CTable saved with HUF_writeCTable()

◆ HUF_readCTableHeader()

HUF_CTableHeader HUF_readCTableHeader ( HUF_CElt const *  ctable)

HUF_readCTableHeader() :

Returns
The header from the CTable specifying the tableLog and the maxSymbolValue.

◆ HUF_readStats()

size_t HUF_readStats ( BYTE *  huffWeight,
size_t  hwSize,
U32 *  rankStats,
U32 *  nbSymbolsPtr,
U32 *  tableLogPtr,
const void *  src,
size_t  srcSize 
)

HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). huffWeight is destination buffer.

Returns
: size read from src , or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableXn() .

HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). huffWeight is destination buffer. rankStats is assumed to be a table of at least HUF_TABLELOG_MAX U32.

Returns
: size read from src , or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableX?() .

◆ HUF_selectDecoder()

U32 HUF_selectDecoder ( size_t  dstSize,
size_t  cSrcSize 
)

HUF_selectDecoder() : Tells which decoder is likely to decode faster, based on a set of pre-computed metrics.

Returns
: 0==HUF_decompress4X1, 1==HUF_decompress4X2 . Assumption : 0 < dstSize <= 128 KB

◆ POOL_add()

void POOL_add ( POOL_ctx ctx,
POOL_function  function,
void *  opaque 
)

POOL_add() : Add the job function(opaque) to the thread pool. ctx must be valid. Possibly blocks until there is room in the queue. Note : The function may be executed asynchronously, therefore, opaque must live until function has been completed.

◆ POOL_create()

POOL_ctx * POOL_create ( size_t  numThreads,
size_t  queueSize 
)

POOL_create() : Create a thread pool with at most numThreads threads. numThreads must be at least 1. The maximum number of queued jobs before blocking is queueSize.

Returns
: POOL_ctx pointer on success, else NULL.

◆ POOL_free()

void POOL_free ( POOL_ctx ctx)

POOL_free() : Free a thread pool returned by POOL_create().

◆ POOL_joinJobs()

void POOL_joinJobs ( POOL_ctx ctx)

POOL_joinJobs() : Waits for all queued jobs to finish executing.

◆ POOL_resize()

int POOL_resize ( POOL_ctx ctx,
size_t  numThreads 
)

POOL_resize() : Expands or shrinks pool's number of threads. This is more efficient than releasing + creating a new context, since it tries to preserve and reuse existing threads. numThreads must be at least 1.

Returns
: 0 when resize was successful, !0 (typically 1) if there is an error. note : only numThreads can be resized, queueSize remains unchanged.

◆ POOL_sizeof()

size_t POOL_sizeof ( const POOL_ctx ctx)

POOL_sizeof() :

Returns
threadpool memory usage note : compatible with NULL (returns 0 in this case)

◆ POOL_tryAdd()

int POOL_tryAdd ( POOL_ctx ctx,
POOL_function  function,
void *  opaque 
)

POOL_tryAdd() : Add the job function(opaque) to thread pool if a queue slot is available. Returns immediately even if not (does not block).

Returns
: 1 if successful, 0 if not.

◆ ZDICT_finalizeDictionary()

size_t ZDICT_finalizeDictionary ( void *  dstDictBuffer,
size_t  maxDictSize,
const void *  dictContent,
size_t  dictContentSize,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples,
ZDICT_params_t  parameters 
)

ZDICT_finalizeDictionary(): Given a custom content as a basis for dictionary, and a set of samples, finalize dictionary by adding headers and statistics according to the zstd dictionary format.

Samples must be stored concatenated in a flat buffer samplesBuffer, supplied with an array of sizes samplesSizes, providing the size of each sample in order. The samples are used to construct the statistics, so they should be representative of what you will compress with this dictionary.

The compression level can be set in parameters. You should pass the compression level you expect to use in production. The statistics for each compression level differ, so tuning the dictionary for the compression level can help quite a bit.

You can set an explicit dictionary ID in parameters, or allow us to pick a random dictionary ID for you, but we can't guarantee no collisions.

The dstDictBuffer and the dictContent may overlap, and the content will be appended to the end of the header. If the header + the content doesn't fit in maxDictSize the beginning of the content is truncated to make room, since it is presumed that the most profitable content is at the end of the dictionary, since that is the cheapest to reference.

maxDictSize must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN).

Returns
: size of dictionary stored into dstDictBuffer (<= maxDictSize), or an error code, which can be tested by ZDICT_isError(). Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0. NOTE: This function currently may fail in several edge cases including:
  • Not enough samples
  • Samples are uncompressible
  • Samples are all exactly the same

◆ ZDICT_getDictID()

unsigned ZDICT_getDictID ( const void *  dictBuffer,
size_t  dictSize 
)

extracts dictID;

Returns
zero if error (not a valid dictionary)

◆ ZDICT_optimizeTrainFromBuffer_cover()

ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover ( void *  dictBuffer,
size_t  dictBufferCapacity,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples,
ZDICT_cover_params_t parameters 
)

ZDICT_optimizeTrainFromBuffer_cover(): The same requirements as above hold for all the parameters except parameters. This function tries many parameter combinations and picks the best parameters. *parameters is filled with the best parameters found, dictionary constructed with those parameters is stored in dictBuffer.

All of the parameters d, k, steps are optional. If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. if steps is zero it defaults to its default value. If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].

Returns
: size of dictionary stored into dictBuffer (<= dictBufferCapacity) or an error code, which can be tested with ZDICT_isError(). On success *parameters contains the parameters selected. See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.

◆ ZDICT_optimizeTrainFromBuffer_fastCover()

ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover ( void *  dictBuffer,
size_t  dictBufferCapacity,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples,
ZDICT_fastCover_params_t parameters 
)

ZDICT_optimizeTrainFromBuffer_fastCover(): The same requirements as above hold for all the parameters except parameters. This function tries many parameter combinations (specifically, k and d combinations) and picks the best parameters. *parameters is filled with the best parameters found, dictionary constructed with those parameters is stored in dictBuffer. All of the parameters d, k, steps, f, and accel are optional. If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. if steps is zero it defaults to its default value. If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. If f is zero, default value of 20 is used. If accel is zero, default value of 1 is used.

Returns
: size of dictionary stored into dictBuffer (<= dictBufferCapacity) or an error code, which can be tested with ZDICT_isError(). On success *parameters contains the parameters selected. See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.

◆ ZDICT_trainFromBuffer()

size_t ZDICT_trainFromBuffer ( void *  dictBuffer,
size_t  dictBufferCapacity,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples 
)

ZDICT_trainFromBuffer(): Train a dictionary from an array of samples. Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, f=20, and accel=1. Samples must be stored concatenated in a single flat buffer samplesBuffer, supplied with an array of sizes samplesSizes, providing the size of each sample, in order. The resulting dictionary will be saved into dictBuffer.

Returns
: size of dictionary stored into dictBuffer (<= dictBufferCapacity) or an error code, which can be tested with ZDICT_isError(). Note: Dictionary training will fail if there are not enough samples to construct a dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). If dictionary training fails, you should use zstd without a dictionary, as the dictionary would've been ineffective anyways. If you believe your samples would benefit from a dictionary please open an issue with details, and we can look into it. Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary.

◆ ZDICT_trainFromBuffer_cover()

ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover ( void *  dictBuffer,
size_t  dictBufferCapacity,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples,
ZDICT_cover_params_t  parameters 
)

ZDICT_trainFromBuffer_cover(): Train a dictionary from an array of samples using the COVER algorithm. Samples must be stored concatenated in a single flat buffer samplesBuffer, supplied with an array of sizes samplesSizes, providing the size of each sample, in order. The resulting dictionary will be saved into dictBuffer.

Returns
: size of dictionary stored into dictBuffer (<= dictBufferCapacity) or an error code, which can be tested with ZDICT_isError(). See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary.

◆ ZDICT_trainFromBuffer_fastCover()

ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover ( void *  dictBuffer,
size_t  dictBufferCapacity,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples,
ZDICT_fastCover_params_t  parameters 
)

ZDICT_trainFromBuffer_fastCover(): Train a dictionary from an array of samples using a modified version of COVER algorithm. Samples must be stored concatenated in a single flat buffer samplesBuffer, supplied with an array of sizes samplesSizes, providing the size of each sample, in order. d and k are required. All other parameters are optional, will use default values if not provided The resulting dictionary will be saved into dictBuffer.

Returns
: size of dictionary stored into dictBuffer (<= dictBufferCapacity) or an error code, which can be tested with ZDICT_isError(). See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary.

◆ ZDICT_trainFromBuffer_legacy()

size_t ZDICT_trainFromBuffer_legacy ( void *  dictBuffer,
size_t  dictBufferCapacity,
const void *  samplesBuffer,
const size_t *  samplesSizes,
unsigned  nbSamples,
ZDICT_legacy_params_t  parameters 
)

ZDICT_trainFromBuffer_legacy(): Train a dictionary from an array of samples. Samples must be stored concatenated in a single flat buffer samplesBuffer, supplied with an array of sizes samplesSizes, providing the size of each sample, in order. The resulting dictionary will be saved into dictBuffer. parameters is optional and can be provided with values set to 0 to mean "default".

Returns
: size of dictionary stored into dictBuffer (<= dictBufferCapacity) or an error code, which can be tested with ZDICT_isError(). See ZDICT_trainFromBuffer() for details on failure modes. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary. Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.

◆ ZSTD_adjustCParams()

ZSTD_compressionParameters ZSTD_adjustCParams ( ZSTD_compressionParameters  cPar,
unsigned long long  srcSize,
size_t  dictSize 
)

ZSTD_adjustCParams() : optimize params for a given srcSize and dictSize. srcSize can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. dictSize must be 0 when there is no dictionary. cPar can be invalid : all parameters will be clamped within valid range in the

Returns
struct. This function never fails (wide contract)

◆ ZSTD_BtFindBestMatch()

FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_BtFindBestMatch ( ZSTD_MatchState_t ms,
const BYTE *const  ip,
const BYTE *const  iLimit,
size_t *  offBasePtr,
const U32  mls,
const ZSTD_dictMode_e  dictMode 
)

ZSTD_BtFindBestMatch() : Tree updater, providing best match

◆ ZSTD_buildBlockEntropyStats()

size_t ZSTD_buildBlockEntropyStats ( const SeqStore_t seqStorePtr,
const ZSTD_entropyCTables_t prevEntropy,
ZSTD_entropyCTables_t nextEntropy,
const ZSTD_CCtx_params cctxParams,
ZSTD_entropyCTablesMetadata_t entropyMetadata,
void *  workspace,
size_t  wkspSize 
)

ZSTD_buildBlockEntropyStats() : Builds entropy for the block.

Returns
: 0 on success or error code

ZSTD_buildBlockEntropyStats() : Builds entropy for the block. Requires workspace size ENTROPY_WORKSPACE_SIZE

Returns
: 0 on success, or an error code Note : also employed in superblock

◆ ZSTD_CCtx_getParameter()

size_t ZSTD_CCtx_getParameter ( const ZSTD_CCtx cctx,
ZSTD_cParameter  param,
int *  value 
)

ZSTD_CCtx_getParameter() : Get the requested compression parameter value, selected by enum ZSTD_cParameter, and store it into int* value.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()).

◆ ZSTD_CCtx_loadDictionary()

size_t ZSTD_CCtx_loadDictionary ( ZSTD_CCtx cctx,
const void *  dict,
size_t  dictSize 
)

ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ Create an internal CDict from dict buffer. Decompression will have to use same dictionary.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, meaning "return to no-dictionary mode". Note 1 : Dictionary is sticky, it will be used for all future compressed frames, until parameters are reset, a new dictionary is loaded, or the dictionary is explicitly invalidated by loading a NULL dictionary. Note 2 : Loading a dictionary involves building tables. It's also a CPU consuming operation, with non-negligible impact on latency. Tables are dependent on compression parameters, and for this reason, compression parameters can no longer be changed after loading a dictionary. Note 3 :dict content will be copied internally. Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. In such a case, dictionary buffer must outlive its users. Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() to precisely select how dictionary content must be interpreted. Note 5 : This method does not benefit from LDM (long distance mode). If you want to employ LDM on some large dictionary content, prefer employing ZSTD_CCtx_refPrefix() described below.

◆ ZSTD_CCtx_loadDictionary_advanced()

size_t ZSTD_CCtx_loadDictionary_advanced ( ZSTD_CCtx cctx,
const void *  dict,
size_t  dictSize,
ZSTD_dictLoadMethod_e  dictLoadMethod,
ZSTD_dictContentType_e  dictContentType 
)

ZSTD_CCtx_loadDictionary_advanced() : Same as ZSTD_CCtx_loadDictionary(), but gives finer control over how to load the dictionary (by copy ? by reference ?) and how to interpret it (automatic ? force raw mode ? full mode only ?)

◆ ZSTD_CCtx_loadDictionary_byReference()

size_t ZSTD_CCtx_loadDictionary_byReference ( ZSTD_CCtx cctx,
const void *  dict,
size_t  dictSize 
)

ZSTD_CCtx_loadDictionary_byReference() : Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. It saves some memory, but also requires that dict outlives its usage within cctx

◆ ZSTD_CCtx_refCDict()

size_t ZSTD_CCtx_refCDict ( ZSTD_CCtx cctx,
const ZSTD_CDict cdict 
)

ZSTD_CCtx_refCDict() : Requires v1.4.0+ Reference a prepared dictionary, to be used for all future compressed frames. Note that compression parameters are enforced from within CDict, and supersede any compression parameter previously set within CCtx. The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. The dictionary will remain valid for future compressed frames using same CCtx.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Special : Referencing a NULL CDict means "return to no-dictionary mode". Note 1 : Currently, only one dictionary can be managed. Referencing a new dictionary effectively "discards" any previous one. Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx.

◆ ZSTD_CCtx_refPrefix()

size_t ZSTD_CCtx_refPrefix ( ZSTD_CCtx cctx,
const void *  prefix,
size_t  prefixSize 
)

ZSTD_CCtx_refPrefix() : Requires v1.4.0+ Reference a prefix (single-usage dictionary) for next compressed frame. A prefix is only used once. Tables are discarded at end of frame (ZSTD_e_end). Decompression will need same prefix to properly regenerate data. Compressing with a prefix is similar in outcome as performing a diff and compressing it, but performs much faster, especially during decompression (compression speed is tunable with compression level). This method is compatible with LDM (long distance mode).

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary Note 1 : Prefix buffer is referenced. It must outlive compression. Its content must remain unmodified during compression. Note 2 : If the intention is to diff some large src data blob with some prior version of itself, ensure that the window size is large enough to contain the entire source. See ZSTD_c_windowLog. Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. It's a CPU consuming operation, with non-negligible impact on latency. If there is a need to use the same prefix multiple times, consider loadDictionary instead. Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation.

◆ ZSTD_CCtx_refPrefix_advanced()

size_t ZSTD_CCtx_refPrefix_advanced ( ZSTD_CCtx cctx,
const void *  prefix,
size_t  prefixSize,
ZSTD_dictContentType_e  dictContentType 
)

ZSTD_CCtx_refPrefix_advanced() : Same as ZSTD_CCtx_refPrefix(), but gives finer control over how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)

◆ ZSTD_CCtx_reset()

size_t ZSTD_CCtx_reset ( ZSTD_CCtx cctx,
ZSTD_ResetDirective  reset 
)

ZSTD_CCtx_reset() : There are 2 different things that can be reset, independently or jointly :

  • The session : will stop compressing current frame, and make CCtx ready to start a new one. Useful after an error, or to interrupt any ongoing compression. Any internal data not yet flushed is cancelled. Compression parameters and dictionary remain unchanged. They will be used to compress next frame. Resetting session never fails.
  • The parameters : changes all parameters back to "default". This also removes any reference to any dictionary or external sequence producer. Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
  • Both : similar to resetting the session, followed by resetting parameters.

ZSTD_CCtx_reset() : Also dumps dictionary

◆ ZSTD_CCtx_setCParams()

size_t ZSTD_CCtx_setCParams ( ZSTD_CCtx cctx,
ZSTD_compressionParameters  cparams 
)

ZSTD_CCtx_setCParams() : Set all parameters provided within cparams into the working cctx. Note : if modifying parameters during compression (MT mode only), note that changes to the .windowLog parameter will be ignored.

Returns
0 on success, or an error code (can be checked with ZSTD_isError()). On failure, no parameters are updated.

◆ ZSTD_CCtx_setFParams()

size_t ZSTD_CCtx_setFParams ( ZSTD_CCtx cctx,
ZSTD_frameParameters  fparams 
)

ZSTD_CCtx_setFParams() : Set all parameters provided within fparams into the working cctx.

Returns
0 on success, or an error code (can be checked with ZSTD_isError()).

◆ ZSTD_CCtx_setParameter()

size_t ZSTD_CCtx_setParameter ( ZSTD_CCtx cctx,
ZSTD_cParameter  param,
int  value 
)

ZSTD_CCtx_setParameter() : Set one compression parameter, selected by enum ZSTD_cParameter. All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). Setting a parameter is generally only possible during frame initialization (before starting compression). Exception : when using multi-threading mode (nbWorkers >= 1), the following parameters can be updated during compression (within same frame): => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. new parameters will be active for next job only (after a flush()).

Returns
: an error code (which can be tested using ZSTD_isError()).

◆ ZSTD_CCtx_setParametersUsingCCtxParams()

size_t ZSTD_CCtx_setParametersUsingCCtxParams ( ZSTD_CCtx cctx,
const ZSTD_CCtx_params params 
)

ZSTD_CCtx_setParametersUsingCCtxParams() : Apply a set of ZSTD_CCtx_params to the compression context. This can be done even after compression is started, if nbWorkers==0, this will have no impact until a new compression is started. if nbWorkers>=1, new parameters will be picked up at next job, with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).

ZSTD_CCtx_setParametersUsingCCtxParams() : just applies params into cctx no action is performed, parameters are merely stored. If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. This is possible even if a compression is ongoing. In which case, new parameters will be applied on the fly, starting with next compression job.

◆ ZSTD_CCtx_setParams()

size_t ZSTD_CCtx_setParams ( ZSTD_CCtx cctx,
ZSTD_parameters  params 
)

ZSTD_CCtx_setParams() : Set all parameters provided within params into the working cctx.

Returns
0 on success, or an error code (can be checked with ZSTD_isError()).

◆ ZSTD_CCtx_setPledgedSrcSize()

size_t ZSTD_CCtx_setPledgedSrcSize ( ZSTD_CCtx cctx,
unsigned long long  pledgedSrcSize 
)

ZSTD_CCtx_setPledgedSrcSize() : Total input data size to be compressed as a single frame. Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. This value will also be controlled at end of frame, and trigger an error if not respected.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. Note 2 : pledgedSrcSize is only valid once, for the next frame. It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. Note 3 : Whenever all input data is provided and consumed in a single round, for example with ZSTD_compress2(), or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), this value is automatically overridden by srcSize instead.

◆ ZSTD_CCtx_trace()

void ZSTD_CCtx_trace ( ZSTD_CCtx cctx,
size_t  extraCSize 
)

ZSTD_CCtx_trace() : Trace the end of a compression call.

◆ ZSTD_CCtxParams_getParameter()

size_t ZSTD_CCtxParams_getParameter ( const ZSTD_CCtx_params params,
ZSTD_cParameter  param,
int *  value 
)

ZSTD_CCtxParams_getParameter() : Similar to ZSTD_CCtx_getParameter. Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()).

◆ ZSTD_CCtxParams_init()

size_t ZSTD_CCtxParams_init ( ZSTD_CCtx_params cctxParams,
int  compressionLevel 
)

ZSTD_CCtxParams_init() : Initializes the compression parameters of cctxParams according to compression level. All other parameters are reset to their default values.

◆ ZSTD_CCtxParams_init_advanced()

size_t ZSTD_CCtxParams_init_advanced ( ZSTD_CCtx_params cctxParams,
ZSTD_parameters  params 
)

ZSTD_CCtxParams_init_advanced() : Initializes the compression and frame parameters of cctxParams according to params. All other parameters are reset to their default values.

◆ ZSTD_CCtxParams_registerSequenceProducer()

void ZSTD_CCtxParams_registerSequenceProducer ( ZSTD_CCtx_params params,
void *  sequenceProducerState,
ZSTD_sequenceProducer_F  sequenceProducer 
)

ZSTD_CCtxParams_registerSequenceProducer() : Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().

If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() is required, then this function is for you. Otherwise, you probably don't need it.

See tests/zstreamtest.c for example usage.

◆ ZSTD_CCtxParams_reset()

size_t ZSTD_CCtxParams_reset ( ZSTD_CCtx_params params)

ZSTD_CCtxParams_reset() : Reset params to default values.

◆ ZSTD_CCtxParams_setParameter()

size_t ZSTD_CCtxParams_setParameter ( ZSTD_CCtx_params params,
ZSTD_cParameter  param,
int  value 
)

ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ Similar to ZSTD_CCtx_setParameter. Set one compression parameter, selected by enum ZSTD_cParameter. Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().

Returns
: a code representing success or failure (which can be tested with ZSTD_isError()).

◆ ZSTD_checkContinuity()

ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_checkContinuity ( ZSTD_DCtx dctx,
const void *  dst,
size_t  dstSize 
)

ZSTD_checkContinuity() : check if next dst follows previous position, where decompression ended. If yes, do nothing (continue on current segment). If not, classify previous segment as "external dictionary", and start a new segment. This function cannot fail.

◆ ZSTD_checkCParams()

size_t ZSTD_checkCParams ( ZSTD_compressionParameters  cParams)

ZSTD_checkCParams() : Ensure param values remain within authorized range.

Returns
0 on success, or an error code (can be checked with ZSTD_isError())

ZSTD_checkCParams() : control CParam values remain within authorized range.

Returns
: 0, or an error code if one value is beyond authorized range

◆ ZSTD_compress()

size_t ZSTD_compress ( void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
int  compressionLevel 
)

ZSTD_compress() : Compresses src content as a single zstd compressed frame into already allocated dst. NOTE: Providing dstCapacity >= ZSTD_compressBound(srcSize) guarantees that zstd will have enough space to successfully compress the data.

Returns
: compressed size written into dst (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).

◆ ZSTD_compress2()

size_t ZSTD_compress2 ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize 
)

ZSTD_compress2() : Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. (note that this entry point doesn't even expose a compression level parameter). ZSTD_compress2() always starts a new frame. Should cctx hold data from a previously unfinished frame, everything about it is forgotten.

  • Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
  • The function is always blocking, returns when compression is completed. NOTE: Providing dstCapacity >= ZSTD_compressBound(srcSize) guarantees that zstd will have enough space to successfully compress the data, though it is possible it fails for other reasons.
    Returns
    : compressed size written into dst (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).

◆ ZSTD_compress_advanced()

size_t ZSTD_compress_advanced ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const void *  dict,
size_t  dictSize,
ZSTD_parameters  params 
)

ZSTD_compress_advanced() : Note : this function is now DEPRECATED. It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. This prototype will generate compilation warnings.

◆ ZSTD_compress_usingCDict()

size_t ZSTD_compress_usingCDict ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const ZSTD_CDict cdict 
)

ZSTD_compress_usingCDict() : Compression using a digested Dictionary. Recommended when same dictionary is used multiple times. Note : compression level is decided at dictionary creation time, and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)

ZSTD_compress_usingCDict() : Compression using a digested Dictionary. Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. Note that compression parameters are decided at CDict creation time while frame parameters are hardcoded

◆ ZSTD_compress_usingCDict_advanced()

size_t ZSTD_compress_usingCDict_advanced ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const ZSTD_CDict cdict,
ZSTD_frameParameters  fParams 
)

ZSTD_compress_usingCDict_advanced() : Note : this function is now DEPRECATED. It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. This prototype will generate compilation warnings.

ZSTD_compress_usingCDict_advanced(): This function is DEPRECATED.

◆ ZSTD_compress_usingDict()

size_t ZSTD_compress_usingDict ( ZSTD_CCtx ctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const void *  dict,
size_t  dictSize,
int  compressionLevel 
)

ZSTD_compress_usingDict() : Compression at an explicit compression level using a Dictionary. A dictionary can be any arbitrary data segment (also called a prefix), or a buffer with specified information (see zdict.h). Note : This function loads the dictionary, resulting in significant startup delay. It's intended for a dictionary used only once. Note 2 : When dict == NULL || dictSize < 8 no dictionary is used.

◆ ZSTD_compressBegin()

size_t ZSTD_compressBegin ( ZSTD_CCtx cctx,
int  compressionLevel 
)

Buffer-less streaming compression (synchronous mode)

A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. ZSTD_CCtx object can be reused multiple times within successive compression operations.

Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.

Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function :

  • ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
  • Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
  • Caller must ensure there is enough space in dst to store compressed data under worst case scenario. Worst case evaluation is provided by ZSTD_compressBound(). ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
  • ZSTD_compressContinue() presumes prior input is still accessible and unmodified (up to maximum distance size, see WindowLog). It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
  • ZSTD_compressContinue() detects that prior input has been overwritten when src buffer overlaps. In which case, it will "discard" the relevant memory section from its history.

Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.

ZSTD_CCtx object can be reused (ZSTD_compressBegin()) to compress again.

◆ ZSTD_compressBegin_advanced()

size_t ZSTD_compressBegin_advanced ( ZSTD_CCtx cctx,
const void *  dict,
size_t  dictSize,
ZSTD_parameters  params,
unsigned long long  pledgedSrcSize 
)

pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN

ZSTD_compressBegin_advanced() :

Returns
: 0, or an error code

◆ ZSTD_compressBegin_usingCDict()

size_t ZSTD_compressBegin_usingCDict ( ZSTD_CCtx cctx,
const ZSTD_CDict cdict 
)

note: fails if cdict==NULL

◆ ZSTD_compressBlock_fast_noDict_generic()

FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_noDict_generic ( ZSTD_MatchState_t ms,
SeqStore_t seqStore,
U32  rep[ZSTD_REP_NUM],
void const *  src,
size_t  srcSize,
U32 const  mls,
int  useCmov 
)

If you squint hard enough (and ignore repcodes), the search operation at any given position is broken into 4 stages:

  1. Hash (map position to hash value via input read)
  2. Lookup (map hash val to index via hashtable read)
  3. Load (map index to value at that position via input read)
  4. Compare

Each of these steps involves a memory read at an address which is computed from the previous step. This means these steps must be sequenced and their latencies are cumulative.

Rather than do 1->2->3->4 sequentially for a single position before moving onto the next, this implementation interleaves these operations across the next few positions:

R = Repcode Read & Compare H = Hash T = Table Lookup M = Match Read & Compare

Pos | Time --> -—+----------------— N | ... M N+1 | ... TM N+2 | R H T M N+3 | H TM N+4 | R H T M N+5 | H ... N+6 | R ...

This is very much analogous to the pipelining of execution in a CPU. And just like a CPU, we have to dump the pipeline when we find a match (i.e., take a branch).

When this happens, we throw away our current state, and do the following prep to re-enter the loop:

Pos | Time --> -—+----------------— N | H T N+1 | H

This is also the work we do at the beginning to enter the loop initially.

◆ ZSTD_compressBlock_greedy()

size_t ZSTD_compressBlock_greedy ( ZSTD_MatchState_t ms,
SeqStore_t seqStore,
U32  rep[ZSTD_REP_NUM],
void const *  src,
size_t  srcSize 
)

used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK

◆ ZSTD_compressBound()

size_t ZSTD_compressBound ( size_t  srcSize)

maximum compressed size in worst case single-pass scenario

◆ ZSTD_compressCCtx()

size_t ZSTD_compressCCtx ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
int  compressionLevel 
)

ZSTD_compressCCtx() : Same as ZSTD_compress(), using an explicit ZSTD_CCtx. Important : in order to mirror ZSTD_compress() behavior, this function compresses at the requested compression level, ignoring any other advanced parameter . If any advanced parameter was set using the advanced API, they will all be reset. Only @compressionLevel remains.

◆ ZSTD_compressSequences()

size_t ZSTD_compressSequences ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const ZSTD_Sequence inSeqs,
size_t  inSeqsSize,
const void *  src,
size_t  srcSize 
)

ZSTD_compressSequences() : Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. @src contains the entire input (not just the literals). If @srcSize > sum(sequence.length), the remaining bytes are considered all literals If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). The entire source is compressed into a single frame.

The compression behavior changes based on cctx params. In particular: If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on the block size derived from the cctx, and sequences may be split. This is the default setting.

If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.

When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.

If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for specifics regarding offset/matchlength requirements) and then bail out and return an error.

In addition to the two adjustable experimental params, there are other important cctx params.

  • ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
  • ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
  • ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md

Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, and cannot emit an RLE block that disagrees with the repcode history.

Returns
: final compressed size, or a ZSTD error code.

◆ ZSTD_compressSequencesAndLiterals()

size_t ZSTD_compressSequencesAndLiterals ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
const ZSTD_Sequence inSeqs,
size_t  nbSequences,
const void *  literals,
size_t  litSize,
size_t  litBufCapacity,
size_t  decompressedSize 
)

ZSTD_compressSequencesAndLiterals() : This is a variant of ZSTD_compressSequences() which, instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), aka all the literals, already extracted and laid out into a single continuous buffer. This can be useful if the process generating the sequences also happens to generate the buffer of literals, thus skipping an extraction + caching stage. It's a speed optimization, useful when the right conditions are met, but it also features the following limitations:

  • Only supports explicit delimiter mode
  • Currently does not support Sequences validation (so input Sequences are trusted)
  • Not compatible with frame checksum, which must be disabled
  • If any block is incompressible, will fail and return an error
  • @litSize must be == sum of all .litLength fields in @inSeqs. Any discrepancy will generate an error.
  • @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. @litBufCapacity must be at least 8 bytes larger than @litSize.
  • @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.
    Returns
    : final compressed size, or a ZSTD error code.

◆ ZSTD_compressStream()

size_t ZSTD_compressStream ( ZSTD_CStream zcs,
ZSTD_outBuffer output,
ZSTD_inBuffer input 
)

Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). NOTE: The return value is different. ZSTD_compressStream() returns a hint for the next read size (if non-zero and not an error). ZSTD_compressStream2() returns the minimum nb of bytes left to flush (if non-zero and not an error).

◆ ZSTD_compressStream2()

size_t ZSTD_compressStream2 ( ZSTD_CCtx cctx,
ZSTD_outBuffer output,
ZSTD_inBuffer input,
ZSTD_EndDirective  endOp 
)

ZSTD_compressStream2() : Requires v1.4.0+ Behaves about the same as ZSTD_compressStream, with additional control on end directive.

  • Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
  • Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
  • output->pos must be <= dstCapacity, input->pos must be <= srcSize
  • output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
  • endOp must be a valid directive
  • When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
  • When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available, and then immediately returns, just indicating that there is some data remaining to be flushed. The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
  • Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
  • Returns
    provides a minimum amount of data remaining to be flushed from internal buffers or an error code, which can be tested using ZSTD_isError(). if
    != 0, flush is not fully completed, there is still some data left within internal buffers. This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers. For ZSTD_e_end,
    == 0 when internal buffers are fully flushed and frame is completed.
  • after a ZSTD_e_end directive, if internal buffer is not fully flushed (
    Returns
    != 0), only ZSTD_e_end or ZSTD_e_flush operations are allowed. Before starting a new compression job, or changing compression parameters, it is required to fully flush internal buffers.
  • note: if an operation ends with an error, it may leave @cctx in an undefined state. Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state. In order to be re-employed after an error, a state must be reset, which can be done explicitly (ZSTD_CCtx_reset()), or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())

◆ ZSTD_compressStream2_simpleArgs()

size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx cctx,
void *  dst,
size_t  dstCapacity,
size_t *  dstPos,
const void *  src,
size_t  srcSize,
size_t *  srcPos,
ZSTD_EndDirective  endOp 
)

ZSTD_compressStream2_simpleArgs() : Same as ZSTD_compressStream2(), but using only integral types as arguments. This variant might be helpful for binders from dynamic languages which have troubles handling structures containing memory pointers.

◆ ZSTD_copyCCtx()

size_t ZSTD_copyCCtx ( ZSTD_CCtx dstCCtx,
const ZSTD_CCtx srcCCtx,
unsigned long long  pledgedSrcSize 
)

note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN

ZSTD_copyCCtx() : Duplicate an existing context srcCCtx into another one dstCCtx. Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). pledgedSrcSize==0 means "unknown".

Returns
: 0, or an error code

◆ ZSTD_count_2segments()

MEM_STATIC size_t ZSTD_count_2segments ( const BYTE *  ip,
const BYTE *  match,
const BYTE *  iEnd,
const BYTE *  mEnd,
const BYTE *  iStart 
)

ZSTD_count_2segments() : can count match length with ip & match in 2 different segments. convention : on reaching mEnd, match count continue starting from iStart

◆ ZSTD_cParam_getBounds()

ZSTD_bounds ZSTD_cParam_getBounds ( ZSTD_cParameter  cParam)

ZSTD_cParam_getBounds() : All parameters must belong to an interval with lower and upper bounds, otherwise they will either trigger an error or be automatically clamped.

Returns
: a structure, ZSTD_bounds, which contains
  • an error status field, which must be tested using ZSTD_isError()
  • lower and upper bounds, both inclusive

◆ ZSTD_cpuSupportsBmi2()

MEM_STATIC int ZSTD_cpuSupportsBmi2 ( void  )
Returns
true iff the CPU supports dynamic BMI2 dispatch.

◆ ZSTD_createCCtxParams()

ZSTD_CCtx_params * ZSTD_createCCtxParams ( void  )

ZSTD_CCtx_params : Quick howto :

This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() for static allocation of CCtx for single-threaded compression.

◆ ZSTD_createCDict()

ZSTD_CDict * ZSTD_createCDict ( const void *  dictBuffer,
size_t  dictSize,
int  compressionLevel 
)

ZSTD_createCDict() : When compressing multiple messages or blocks using the same dictionary, it's recommended to digest the dictionary only once, since it's a costly operation. ZSTD_createCDict() will create a state from digesting a dictionary. The resulting state can be used for future compression operations with very limited startup cost. ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. Note 1 : Consider experimental function ZSTD_createCDict_byReference() if you prefer to not duplicate @dictBuffer content. Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, in which case the only thing that it transports is the @compressionLevel. This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.

◆ ZSTD_createCDict_byReference()

ZSTD_CDict * ZSTD_createCDict_byReference ( const void *  dictBuffer,
size_t  dictSize,
int  compressionLevel 
)

ZSTD_createCDict_byReference() : Create a digested dictionary for compression Dictionary content is just referenced, not duplicated. As a consequence, dictBuffer must outlive CDict, and its content must remain unmodified throughout the lifetime of CDict. note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef

◆ ZSTD_createDDict()

ZSTD_DDict * ZSTD_createDDict ( const void *  dict,
size_t  dictSize 
)

ZSTD_createDDict() : Create a digested dictionary, ready to start decompression operation without startup delay. dictBuffer can be released after DDict creation, as its content is copied inside DDict.

ZSTD_createDDict() : Create a digested dictionary, to start decompression without startup delay. dict content is copied inside DDict. Consequently, dict can be released after ZSTD_DDict creation

◆ ZSTD_createDDict_byReference()

ZSTD_DDict * ZSTD_createDDict_byReference ( const void *  dictBuffer,
size_t  dictSize 
)

ZSTD_createDDict_byReference() : Create a digested dictionary, ready to start decompression operation without startup delay. Dictionary content is referenced, and therefore stays in dictBuffer. It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict

ZSTD_createDDict_byReference() : Create a digested dictionary, to start decompression without startup delay. Dictionary content is simply referenced, it will be accessed during decompression. Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer)

◆ ZSTD_crossEntropyCost()

size_t ZSTD_crossEntropyCost ( short const *  norm,
unsigned  accuracyLog,
unsigned const *  count,
unsigned const  max 
)

Returns the cost in bits of encoding the distribution in count using the table described by norm. The max symbol support by norm is assumed >= max. norm must be valid for every symbol with non-zero probability in count.

◆ ZSTD_CStreamInSize()

size_t ZSTD_CStreamInSize ( void  )

recommended size for input buffer

◆ ZSTD_CStreamOutSize()

size_t ZSTD_CStreamOutSize ( void  )

recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block.

◆ ZSTD_cwksp_align()

MEM_STATIC size_t ZSTD_cwksp_align ( size_t  size,
size_t  align 
)

Align must be a power of 2.

◆ ZSTD_cwksp_aligned64_alloc_size()

MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size ( size_t  size)

Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. Used to determine the number of bytes required for a given "aligned".

◆ ZSTD_cwksp_alloc_size()

MEM_STATIC size_t ZSTD_cwksp_alloc_size ( size_t  size)

Use this to determine how much space in the workspace we will consume to allocate this object. (Normally it should be exactly the size of the object, but under special conditions, like ASAN, where we pad each object, it might be larger.)

Since tables aren't currently redzoned, you don't need to call through this to figure out how much space you need for the matchState tables. Everything else is though.

Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size().

◆ ZSTD_cwksp_bytes_to_align_ptr()

MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr ( void *  ptr,
const size_t  alignBytes 
)

Return the number of additional bytes required to align a pointer to the given number of bytes. alignBytes must be a power of two.

◆ ZSTD_cwksp_clean_tables()

MEM_STATIC void ZSTD_cwksp_clean_tables ( ZSTD_cwksp ws)

Zero the part of the allocated tables not already marked clean.

◆ ZSTD_cwksp_clear()

MEM_STATIC void ZSTD_cwksp_clear ( ZSTD_cwksp ws)

Invalidates all buffer, aligned, and table allocations. Object allocations remain valid.

◆ ZSTD_cwksp_clear_tables()

MEM_STATIC void ZSTD_cwksp_clear_tables ( ZSTD_cwksp ws)

Invalidates table allocations. All other allocations remain valid.

◆ ZSTD_cwksp_init()

MEM_STATIC void ZSTD_cwksp_init ( ZSTD_cwksp ws,
void *  start,
size_t  size,
ZSTD_cwksp_static_alloc_e  isStatic 
)

The provided workspace takes ownership of the buffer [start, start+size). Any existing values in the workspace are ignored (the previously managed buffer, if present, must be separately freed).

◆ ZSTD_cwksp_initialAllocStart()

MEM_STATIC void * ZSTD_cwksp_initialAllocStart ( ZSTD_cwksp ws)

Returns the initial value for allocStart which is used to determine the position from which we can allocate from the end of the workspace.

◆ ZSTD_cwksp_internal_advance_phase()

MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase ( ZSTD_cwksp ws,
ZSTD_cwksp_alloc_phase_e  phase 
)

Moves the cwksp to the next phase, and does any necessary allocations. cwksp initialization must necessarily go through each phase in order. Returns a 0 on success, or zstd error

◆ ZSTD_cwksp_move()

MEM_STATIC void ZSTD_cwksp_move ( ZSTD_cwksp dst,
ZSTD_cwksp src 
)

Moves the management of a workspace from one cwksp to another. The src cwksp is left in an invalid state (src must be re-init()'ed before it's used again).

◆ ZSTD_cwksp_owns_buffer()

MEM_STATIC int ZSTD_cwksp_owns_buffer ( const ZSTD_cwksp ws,
const void *  ptr 
)

Returns whether this object/buffer/etc was allocated in this workspace.

◆ ZSTD_cwksp_reserve_aligned64()

MEM_STATIC void* ZSTD_cwksp_reserve_aligned64 ( ZSTD_cwksp ws,
size_t  bytes 
)

Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).

◆ ZSTD_cwksp_reserve_aligned_init_once()

MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once ( ZSTD_cwksp ws,
size_t  bytes 
)

Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). This memory has been initialized at least once in the past. This doesn't mean it has been initialized this time, and it might contain data from previous operations. The main usage is for algorithms that might need read access into uninitialized memory. The algorithm must maintain safety under these conditions and must make sure it doesn't leak any of the past data (directly or in side channels).

◆ ZSTD_cwksp_reserve_buffer()

MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer ( ZSTD_cwksp ws,
size_t  bytes 
)

Reserves and returns unaligned memory.

◆ ZSTD_cwksp_reserve_internal()

MEM_STATIC void* ZSTD_cwksp_reserve_internal ( ZSTD_cwksp ws,
size_t  bytes,
ZSTD_cwksp_alloc_phase_e  phase 
)

Internal function. Do not use directly.

◆ ZSTD_cwksp_reserve_internal_buffer_space()

MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space ( ZSTD_cwksp ws,
size_t const  bytes 
)

Internal function. Do not use directly. Reserves the given number of bytes within the aligned/buffer segment of the wksp, which counts from the end of the wksp (as opposed to the object/table segment).

Returns a pointer to the beginning of that space.

◆ ZSTD_cwksp_reserve_object()

MEM_STATIC void* ZSTD_cwksp_reserve_object ( ZSTD_cwksp ws,
size_t  bytes 
)

Aligned on sizeof(void*). Note : should happen only once, at workspace first initialization

◆ ZSTD_cwksp_reserve_object_aligned()

MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned ( ZSTD_cwksp ws,
size_t  byteSize,
size_t  alignment 
)

with alignment control Note : should happen only once, at workspace first initialization

◆ ZSTD_cwksp_reserve_table()

MEM_STATIC void* ZSTD_cwksp_reserve_table ( ZSTD_cwksp ws,
size_t  bytes 
)

Aligned on 64 bytes. These buffers have the special property that their values remain constrained, allowing us to reuse them without memset()-ing them.

◆ ZSTD_cwksp_slack_space_required()

MEM_STATIC size_t ZSTD_cwksp_slack_space_required ( void  )

Returns the amount of additional space the cwksp must allocate for internal purposes (currently only alignment).

◆ ZSTD_cycleLog()

U32 ZSTD_cycleLog ( U32  hashLog,
ZSTD_strategy  strat 
)

ZSTD_cycleLog() : condition for correct operation : hashLog > 1

◆ ZSTD_DCtx_getParameter()

size_t ZSTD_DCtx_getParameter ( ZSTD_DCtx dctx,
ZSTD_dParameter  param,
int *  value 
)

ZSTD_DCtx_getParameter() : Get the requested decompression parameter value, selected by enum ZSTD_dParameter, and store it into int* value.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()).

◆ ZSTD_DCtx_loadDictionary()

size_t ZSTD_DCtx_loadDictionary ( ZSTD_DCtx dctx,
const void *  dict,
size_t  dictSize 
)

ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ Create an internal DDict from dict buffer, to be used to decompress all future frames. The dictionary remains valid for all future frames, until explicitly invalidated, or a new dictionary is loaded.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, meaning "return to no-dictionary mode". Note 1 : Loading a dictionary involves building tables, which has a non-negligible impact on CPU usage and latency. It's recommended to "load once, use many times", to amortize the cost Note 2 :dict content will be copied internally, so dict can be released after loading. Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of how dictionary content is loaded and interpreted.

◆ ZSTD_DCtx_loadDictionary_advanced()

size_t ZSTD_DCtx_loadDictionary_advanced ( ZSTD_DCtx dctx,
const void *  dict,
size_t  dictSize,
ZSTD_dictLoadMethod_e  dictLoadMethod,
ZSTD_dictContentType_e  dictContentType 
)

ZSTD_DCtx_loadDictionary_advanced() : Same as ZSTD_DCtx_loadDictionary(), but gives direct control over how to load the dictionary (by copy ? by reference ?) and how to interpret it (automatic ? force raw mode ? full mode only ?).

◆ ZSTD_DCtx_loadDictionary_byReference()

size_t ZSTD_DCtx_loadDictionary_byReference ( ZSTD_DCtx dctx,
const void *  dict,
size_t  dictSize 
)

ZSTD_DCtx_loadDictionary_byReference() : Same as ZSTD_DCtx_loadDictionary(), but references dict content instead of copying it into dctx. This saves memory if dict remains around., However, it's imperative that dict remains accessible (and unmodified) while being used, so it must outlive decompression.

◆ ZSTD_DCtx_refDDict()

size_t ZSTD_DCtx_refDDict ( ZSTD_DCtx dctx,
const ZSTD_DDict ddict 
)

ZSTD_DCtx_refDDict() : Requires v1.4.0+ Reference a prepared dictionary, to be used to decompress next frames. The dictionary remains active for decompression of future frames using same DCtx.

If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function will store the DDict references in a table, and the DDict used for decompression will be determined at decompression time, as per the dict ID in the frame. The memory for the table is allocated on the first call to refDDict, and can be freed with ZSTD_freeDCtx().

If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary will be managed, and referencing a dictionary effectively "discards" any previous one.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Special: referencing a NULL DDict means "return to no-dictionary mode". Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.

◆ ZSTD_DCtx_refPrefix()

size_t ZSTD_DCtx_refPrefix ( ZSTD_DCtx dctx,
const void *  prefix,
size_t  prefixSize 
)

ZSTD_DCtx_refPrefix() : Requires v1.4.0+ Reference a prefix (single-usage dictionary) to decompress next frame. This is the reverse operation of ZSTD_CCtx_refPrefix(), and must use the same prefix as the one used during compression. Prefix is only used once. Reference is discarded at end of frame. End of frame is reached when ZSTD_decompressStream() returns 0.

Returns
: 0, or an error code (which can be tested with ZSTD_isError()). Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary Note 2 : Prefix buffer is referenced. It must outlive decompression. Prefix buffer must remain unmodified up to the end of frame, reached when ZSTD_decompressStream() returns 0. Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. A full dictionary is more costly, as it requires building tables.

◆ ZSTD_DCtx_refPrefix_advanced()

size_t ZSTD_DCtx_refPrefix_advanced ( ZSTD_DCtx dctx,
const void *  prefix,
size_t  prefixSize,
ZSTD_dictContentType_e  dictContentType 
)

ZSTD_DCtx_refPrefix_advanced() : Same as ZSTD_DCtx_refPrefix(), but gives finer control over how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)

◆ ZSTD_DCtx_reset()

size_t ZSTD_DCtx_reset ( ZSTD_DCtx dctx,
ZSTD_ResetDirective  reset 
)

ZSTD_DCtx_reset() : Return a DCtx to clean state. Session and parameters can be reset jointly or separately. Parameters can only be reset when no active frame is being decompressed.

Returns
: 0, or an error code, which can be tested with ZSTD_isError()

◆ ZSTD_DCtx_setFormat()

size_t ZSTD_DCtx_setFormat ( ZSTD_DCtx dctx,
ZSTD_format_e  format 
)

ZSTD_DCtx_setFormat() : This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). Instruct the decoder context about what kind of data to decode next. This instruction is mandatory to decode data without a fully-formed header, such ZSTD_f_zstd1_magicless for example.

Returns
: 0, or an error code (which can be tested using ZSTD_isError()).

◆ ZSTD_DCtx_setMaxWindowSize()

size_t ZSTD_DCtx_setMaxWindowSize ( ZSTD_DCtx dctx,
size_t  maxWindowSize 
)

ZSTD_DCtx_setMaxWindowSize() : Refuses allocating internal buffers for frames requiring a window size larger than provided limit. This protects a decoder context from reserving too much memory for itself (potential attack scenario). This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)

Returns
: 0, or an error code (which can be tested using ZSTD_isError()).

◆ ZSTD_DCtx_setParameter()

size_t ZSTD_DCtx_setParameter ( ZSTD_DCtx dctx,
ZSTD_dParameter  param,
int  value 
)

ZSTD_DCtx_setParameter() : Set one compression parameter, selected by enum ZSTD_dParameter. All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). Setting a parameter is only possible during frame initialization (before starting decompression).

Returns
: 0, or an error code (which can be tested using ZSTD_isError()).

◆ ZSTD_decodeSeqHeaders()

size_t ZSTD_decodeSeqHeaders ( ZSTD_DCtx dctx,
int *  nbSeqPtr,
const void *  src,
size_t  srcSize 
)

ZSTD_decodeSeqHeaders() : decode sequence header from src

◆ ZSTD_decodeSequence()

FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence ( seqState_t seqState,
const ZSTD_longOffset_e  longOffsets,
const int  isLastSeq 
)

ZSTD_decodeSequence(): longOffsets : tells the decoder to reload more bit while decoding large offsets only used in 32-bit mode

Returns
: Sequence (litL + matchL + offset)

◆ ZSTD_decodingBufferSize_min()

size_t ZSTD_decodingBufferSize_min ( unsigned long long  windowSize,
unsigned long long  frameContentSize 
)

Buffer-less streaming decompression (synchronous mode)

A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. A ZSTD_DCtx object can be reused multiple times.

First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. ZSTD_frameHeaderSize_max bytes is guaranteed to always be large enough. result : 0 : successful decoding, the ZSTD_frameHeader structure is correctly filled. >0 : srcSize is too small, please provide at least result bytes on next attempt. errorCode, which can be tested using ZSTD_isError().

It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame, such as the dictionary ID, content size, or maximum back-reference distance (windowSize). Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. As a consequence, check that values remain within valid application range. For example, do not allocate memory blindly, check that windowSize is within expectation. Each application can set its own limits, depending on local restrictions. For extended interoperability, it is recommended to support windowSize of at least 8 MB.

ZSTD_decompressContinue() needs previous data blocks during decompression, up to windowSize bytes. ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, or that previous contiguous segment is large enough to properly handle maximum back-reference distance. There are multiple ways to guarantee this condition.

The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), which can return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in ZSTD_frameHeader structure, field blockSizeMax. At which point, decoding can resume from the beginning of the buffer. Note that already decoded data stored in the buffer should be flushed before being overwritten.

There are alternatives possible, for example using two or more buffers of size windowSize each, though they consume more memory.

Finally, if you control the compression process, you can also ignore all buffer size rules, as long as the encoder and decoder progress in "lock-step", aka use exactly the same buffer sizes, break contiguity at the same place, etc.

Once buffers are setup, start decompression, with ZSTD_decompressBegin(). If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().

Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.

result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError().

A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression.

Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). This information is not required to properly decode a frame.

== Special case : skippable frames ==

Skippable frames allow integration of user-defined data into a flow of concatenated frames. Skippable frames will be ignored (skipped) by decompressor. The format of skippable frames is as follows : a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits c) Frame Content - any content (User Data) of length equal to Frame Size For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame. For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content. when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN

◆ ZSTD_decompress()

size_t ZSTD_decompress ( void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  compressedSize 
)

ZSTD_decompress() : compressedSize : must be the exact size of some number of compressed and/or skippable frames. Multiple compressed frames can be decompressed at once with this method. The result will be the concatenation of all decompressed frames, back to back. dstCapacity is an upper bound of originalSize to regenerate. First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). If maximum upper bound isn't known, prefer using streaming mode to decompress data.

Returns
: the number of bytes decompressed into dst (<= dstCapacity), or an errorCode if it fails (which can be tested using ZSTD_isError()).

◆ ZSTD_decompress_usingDDict()

size_t ZSTD_decompress_usingDDict ( ZSTD_DCtx dctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const ZSTD_DDict ddict 
)

ZSTD_decompress_usingDDict() : Decompression using a digested Dictionary. Recommended when same dictionary is used multiple times.

ZSTD_decompress_usingDDict() : Decompression using a pre-digested Dictionary Use dictionary without significant overhead.

◆ ZSTD_decompress_usingDict()

size_t ZSTD_decompress_usingDict ( ZSTD_DCtx dctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
const void *  dict,
size_t  dictSize 
)

ZSTD_decompress_usingDict() : Decompression using a known Dictionary. Dictionary must be identical to the one used during compression. Note : This function loads the dictionary, resulting in significant startup delay. It's intended for a dictionary used only once. Note : When dict == NULL || dictSize < 8 no dictionary is used.

◆ ZSTD_decompressBound()

unsigned long long ZSTD_decompressBound ( const void *  src,
size_t  srcSize 
)

ZSTD_decompressBound() : src should point to the start of a series of ZSTD encoded and/or skippable frames srcSize must be the exact size of this series (i.e. there should be a frame boundary at src + srcSize)

Returns
: - upper-bound for the decompressed size of all data in all successive frames
  • if an error occurred: ZSTD_CONTENTSIZE_ERROR

note 1 : an error can occur if src contains an invalid or incorrectly formatted frame. note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of src. in this case, ZSTD_findDecompressedSize and ZSTD_decompressBound return the same value. note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: upper-bound = # blocks * min(128 KB, Window_Size)

ZSTD_decompressBound() : compatible with legacy mode src must point to the start of a ZSTD frame or a skippable frame srcSize must be at least as large as the frame contained

Returns
: the maximum decompressed size of the compressed source

◆ ZSTD_decompressContinue()

size_t ZSTD_decompressContinue ( ZSTD_DCtx dctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize 
)

ZSTD_decompressContinue() : srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())

Returns
: nb of bytes generated into dst (necessarily <= `dstCapacity) or an error code, which can be tested using ZSTD_isError()

◆ ZSTD_decompressDCtx()

size_t ZSTD_decompressDCtx ( ZSTD_DCtx dctx,
void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize 
)

ZSTD_decompressDCtx() : Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx. Compatible with sticky parameters (see below).

◆ ZSTD_decompressionMargin()

size_t ZSTD_decompressionMargin ( const void *  src,
size_t  srcSize 
)

ZSTD_decompressionMargin() : Zstd supports in-place decompression, where the input and output buffers overlap. In this case, the output buffer must be at least (Margin + Output_Size) bytes large, and the input buffer must be at the end of the output buffer.

_______________________ Output Buffer ________________________ | | | ____ Input Buffer ____| | | | v v v |------------------------------------—|--------—|-------—| ^ ^ ^ |___________________ Output_Size ___________________|_ Margin _|

NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). NOTE: This applies only to single-pass decompression through ZSTD_decompress() or ZSTD_decompressDCtx(). NOTE: This function supports multi-frame input.

Parameters
srcThe compressed frame(s)
srcSizeThe size of the compressed frame(s)
Returns
The decompression margin or an error that can be checked with ZSTD_isError().

◆ ZSTD_decompressStream()

size_t ZSTD_decompressStream ( ZSTD_DStream zds,
ZSTD_outBuffer output,
ZSTD_inBuffer input 
)

ZSTD_decompressStream() : Streaming decompression function. Call repetitively to consume full input updating it as necessary. Function will update both input and output pos fields exposing current state via these fields:

  • input.pos < input.size, some input remaining and caller should provide remaining input on the next call.
  • output.pos < output.size, decoder flushed internal output buffer.
  • output.pos == output.size, unflushed data potentially present in the internal buffers, check ZSTD_decompressStream()
    Returns
    value, if > 0, invoke it again to flush remaining data to output. Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
    : 0 when a frame is completely decoded and fully flushed, or an error code, which can be tested using ZSTD_isError(), or any other value > 0, which means there is some decoding or flushing to do to complete current frame.
    Note: when an operation returns with an error code, the @zds state may be left in undefined state. It's UB to invoke ZSTD_decompressStream() on such a state. In order to re-use such a state, it must be first reset, which can be done explicitly (ZSTD_DCtx_reset()), or is implied for operations starting some new decompression job (ZSTD_initDStream, ZSTD_decompressDCtx(), ZSTD_decompress_usingDict())

◆ ZSTD_decompressStream_simpleArgs()

size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx dctx,
void *  dst,
size_t  dstCapacity,
size_t *  dstPos,
const void *  src,
size_t  srcSize,
size_t *  srcPos 
)

ZSTD_decompressStream_simpleArgs() : Same as ZSTD_decompressStream(), but using only integral types as arguments. This can be helpful for binders from dynamic languages which have troubles handling structures containing memory pointers.

◆ ZSTD_defaultCLevel()

int ZSTD_defaultCLevel ( void  )

default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+

◆ ZSTD_dParam_getBounds()

ZSTD_bounds ZSTD_dParam_getBounds ( ZSTD_dParameter  dParam)

ZSTD_dParam_getBounds() : All parameters must belong to an interval with lower and upper bounds, otherwise they will either trigger an error or be automatically clamped.

Returns
: a structure, ZSTD_bounds, which contains
  • an error status field, which must be tested using ZSTD_isError()
  • both lower and upper bounds, inclusive

◆ ZSTD_DStreamInSize()

size_t ZSTD_DStreamInSize ( void  )

recommended size for input buffer

◆ ZSTD_DStreamOutSize()

size_t ZSTD_DStreamOutSize ( void  )

recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances.

◆ ZSTD_endStream()

size_t ZSTD_endStream ( ZSTD_CStream zcs,
ZSTD_outBuffer output 
)

Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end).

◆ ZSTD_estimateCCtxSize()

size_t ZSTD_estimateCCtxSize ( int  maxCompressionLevel)

ZSTD_estimate*() : These functions make it possible to estimate memory usage of a future {D,C}Ctx, before its creation. This is useful in combination with ZSTD_initStatic(), which makes it possible to employ a static buffer for ZSTD_CCtx* state.

ZSTD_estimateCCtxSize() will provide a memory budget large enough to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() associated with any compression level up to max specified one. The estimate will assume the input may be arbitrarily large, which is the worst case.

Note that the size estimation is specific for one-shot compression, it is not valid for streaming (see ZSTD_estimateCStreamSize*()) nor other potential ways of using a ZSTD_CCtx* state.

When srcSize can be bound by a known and rather "small" value, this knowledge can be used to provide a tighter budget estimation because the ZSTD_CCtx* state will need less memory for small inputs. This tighter estimation can be provided by employing more advanced functions ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.

Note : only single-threaded compression is supported. ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.

◆ ZSTD_estimateCDictSize()

size_t ZSTD_estimateCDictSize ( size_t  dictSize,
int  compressionLevel 
)

ZSTD_estimate?DictSize() : ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). Note : dictionaries created by reference (ZSTD_dlm_byRef) are logically smaller.

◆ ZSTD_estimateCDictSize_advanced()

size_t ZSTD_estimateCDictSize_advanced ( size_t  dictSize,
ZSTD_compressionParameters  cParams,
ZSTD_dictLoadMethod_e  dictLoadMethod 
)

ZSTD_estimateCDictSize_advanced() : Estimate amount of memory that will be needed to create a dictionary with following arguments

◆ ZSTD_estimateCStreamSize()

size_t ZSTD_estimateCStreamSize ( int  maxCompressionLevel)

ZSTD_estimateCStreamSize() : ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression using any compression level up to the max specified one. It will also consider src size to be arbitrarily "large", which is a worst case scenario. If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. Note : CStream size estimation is only correct for single-threaded compression. ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. Size estimates assume that no external sequence producer is registered.

ZSTD_DStream memory budget depends on frame's window Size. This information can be passed manually, using ZSTD_estimateDStreamSize, or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); Any frame requesting a window size larger than max specified one will be rejected. Note : if streaming is init with function ZSTD_init?Stream_usingDict(), an internal ?Dict will be created, which additional size is not estimated here. In this case, get total size by adding ZSTD_estimate?DictSize

◆ ZSTD_estimateDDictSize()

size_t ZSTD_estimateDDictSize ( size_t  dictSize,
ZSTD_dictLoadMethod_e  dictLoadMethod 
)

ZSTD_estimateDDictSize() : Estimate amount of memory that will be needed to create a dictionary for decompression. Note : dictionary created by reference using ZSTD_dlm_byRef are smaller

◆ ZSTD_findDecompressedSize()

unsigned long long ZSTD_findDecompressedSize ( const void *  src,
size_t  srcSize 
)

ZSTD_findDecompressedSize() : src should point to the start of a series of ZSTD encoded and/or skippable frames srcSize must be the exact size of this series (i.e. there should be a frame boundary at src + srcSize)

Returns
: - decompressed size of all data in all successive frames
  • if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
  • if an error occurred: ZSTD_CONTENTSIZE_ERROR

note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. When return==ZSTD_CONTENTSIZE_UNKNOWN, data to decompress could be any size. In which case, it's necessary to use streaming mode to decompress data. note 2 : decompressed size is always present when compression is done with ZSTD_compress() note 3 : decompressed size can be very large (64-bits value), potentially larger than what local system can handle as a single memory segment. In which case, it's necessary to use streaming mode to decompress data. note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. Always ensure result fits within application's authorized limits. Each application can set its own limits. note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to read each contained frame header. This is fast as most of the data is skipped, however it does mean that all frame data must be present and valid.

ZSTD_findDecompressedSize() : srcSize must be the exact length of some number of ZSTD compressed and/or skippable frames note: compatible with legacy mode

Returns
: decompressed size of the frames contained

◆ ZSTD_findFrameCompressedSize()

size_t ZSTD_findFrameCompressedSize ( const void *  src,
size_t  srcSize 
)

ZSTD_findFrameCompressedSize() : Requires v1.4.0+ src should point to the start of a ZSTD frame or skippable frame. srcSize must be >= first frame size

Returns
: the compressed size of the first frame starting at src, suitable to pass as srcSize to ZSTD_decompress or similar, or an error code if input is invalid Note 1: this method is called _find*() because it's not enough to read the header, it may have to scan through the frame's content, to reach its end. Note 2: this method also works with Skippable Frames. In which case, it returns the size of the complete skippable frame, which is always equal to its content size + 8 bytes for headers.

ZSTD_findFrameCompressedSize() : See docs in zstd.h Note: compatible with legacy mode

◆ ZSTD_flushStream()

size_t ZSTD_flushStream ( ZSTD_CStream zcs,
ZSTD_outBuffer output 
)

Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush).

ZSTD_flushStream() :

Returns
: amount of data remaining to flush

◆ ZSTD_frameHeaderSize()

size_t ZSTD_frameHeaderSize ( const void *  src,
size_t  srcSize 
)

ZSTD_frameHeaderSize() : srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX.

Returns
: size of the Frame Header, or an error code (if srcSize is too small)

ZSTD_frameHeaderSize() : srcSize must be >= ZSTD_frameHeaderSize_prefix.

Returns
: size of the Frame Header, or an error code (if srcSize is too small)

◆ ZSTD_freeCDict()

size_t ZSTD_freeCDict ( ZSTD_CDict CDict)

ZSTD_freeCDict() : Function frees memory allocated by ZSTD_createCDict(). If a NULL pointer is passed, no operation is performed.

◆ ZSTD_freeDDict()

size_t ZSTD_freeDDict ( ZSTD_DDict ddict)

ZSTD_freeDDict() : Function frees memory allocated with ZSTD_createDDict() If a NULL pointer is passed, no operation is performed.

◆ ZSTD_fseBitCost()

size_t ZSTD_fseBitCost ( FSE_CTable const *  ctable,
unsigned const *  count,
unsigned const  max 
)

Returns the cost in bits of encoding the distribution in count using ctable. Returns an error if ctable cannot represent all the symbols in count.

◆ ZSTD_generateSequences()

size_t ZSTD_generateSequences ( ZSTD_CCtx zc,
ZSTD_Sequence outSeqs,
size_t  outSeqsCapacity,
const void *  src,
size_t  srcSize 
)

ZSTD_generateSequences() : WARNING: This function is meant for debugging and informational purposes ONLY! Its implementation is flawed, and it will be deleted in a future version. It is not guaranteed to succeed, as there are several cases where it will give up and fail. You should NOT use this function in production code.

This function is deprecated, and will be removed in a future version.

Generate sequences using ZSTD_compress2(), given a source buffer.

Parameters
zcThe compression context to be used for ZSTD_compress2(). Set any compression parameters you need on this context.
outSeqsThe output sequences buffer of size outSeqsSize
outSeqsCapacityThe size of the output sequences buffer. ZSTD_sequenceBound(srcSize) is an upper bound on the number of sequences that can be generated.
srcThe source buffer to generate sequences from of size srcSize.
srcSizeThe size of the source buffer.

Each block will end with a dummy sequence with offset == 0, matchLength == 0, and litLength == length of last literals. litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) simply acts as a block delimiter.

Returns
The number of sequences generated, necessarily less than ZSTD_sequenceBound(srcSize), or an error code that can be checked with ZSTD_isError().

◆ ZSTD_getBlockSize()

size_t ZSTD_getBlockSize ( const ZSTD_CCtx cctx)

Block level API (DEPRECATED)

This API is deprecated in favor of the regular compression API. You can get the frame header down to 2 bytes by setting:

  • ZSTD_c_format = ZSTD_f_zstd1_magicless
  • ZSTD_c_contentSizeFlag = 0
  • ZSTD_c_checksumFlag = 0
  • ZSTD_c_dictIDFlag = 0

This API is not as well tested as our normal API, so we recommend not using it. We will be removing it in a future version. If the normal API doesn't provide the functionality you need, please open a GitHub issue.

Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.

A few rules to respect :

  • Compressing and decompressing require a context structure
    • Use ZSTD_createCCtx() and ZSTD_createDCtx()
  • It is necessary to init context before starting
    • compression : any ZSTD_compressBegin*() variant, including with dictionary
    • decompression : any ZSTD_decompressBegin*() variant, including with dictionary
  • Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
    • If input is larger than a block size, it's necessary to split input data into multiple blocks
    • For inputs larger than a single block, consider using regular ZSTD_compress() instead. Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
  • When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) ! ===> In which case, nothing is produced into dst !
    • User must test for such outcome and deal directly with uncompressed data
    • A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0. Doing so would mess up with statistics history, leading to potential data corruption.
    • ZSTD_decompressBlock() doesn't accept uncompressed data as input !!
    • In case of multiple successive blocks, should some of them be uncompressed, decoder must be informed of their existence in order to follow proper history. Use ZSTD_insertBlock() for such a case.

◆ ZSTD_getcBlockSize()

size_t ZSTD_getcBlockSize ( const void *  src,
size_t  srcSize,
blockProperties_t bpPtr 
)

ZSTD_getcBlockSize() : Provides the size of compressed block from block header src

◆ ZSTD_getCParams()

ZSTD_compressionParameters ZSTD_getCParams ( int  compressionLevel,
unsigned long long  srcSizeHint,
size_t  dictSize 
)

ZSTD_getCParams() :

Returns
ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. estimatedSrcSize value is optional, select 0 if not known

ZSTD_getCParams() :

Returns
ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. Size values are optional, provide 0 if not known or unused

◆ ZSTD_getCParamsFromCDict()

ZSTD_compressionParameters ZSTD_getCParamsFromCDict ( const ZSTD_CDict cdict)

ZSTD_getCParamsFromCDict() : as the name implies

◆ ZSTD_getDecompressedSize()

unsigned long long ZSTD_getDecompressedSize ( const void *  src,
size_t  srcSize 
)

ZSTD_getDecompressedSize() (obsolete): This function is now obsolete, in favor of ZSTD_getFrameContentSize(). Both functions work the same way, but ZSTD_getDecompressedSize() blends "empty", "unknown" and "error" results to the same return value (0), while ZSTD_getFrameContentSize() gives them separate return values.

Returns
: decompressed size of src frame content if known and not empty, 0 otherwise.

ZSTD_getDecompressedSize() : compatible with legacy mode

Returns
: decompressed size if known, 0 otherwise note : 0 can mean any of the following :
  • frame content is empty
  • decompressed size field is not present in frame header
  • frame header unknown / not supported
  • frame header not complete (srcSize too small)

◆ ZSTD_getDictID_fromCDict()

unsigned ZSTD_getDictID_fromCDict ( const ZSTD_CDict cdict)

ZSTD_getDictID_fromCDict() : Requires v1.5.0+ Provides the dictID of the dictionary loaded into cdict. If

Returns
== 0, the dictionary is not conformant to Zstandard specification, or empty. Non-conformant dictionaries can still be loaded, but as content-only dictionaries.

ZSTD_getDictID_fromCDict() : Provides the dictID of the dictionary loaded into cdict. If

Returns
== 0, the dictionary is not conformant to Zstandard specification, or empty. Non-conformant dictionaries can still be loaded, but as content-only dictionaries.

◆ ZSTD_getDictID_fromDDict()

unsigned ZSTD_getDictID_fromDDict ( const ZSTD_DDict ddict)

ZSTD_getDictID_fromDDict() : Requires v1.4.0+ Provides the dictID of the dictionary loaded into ddict. If

Returns
== 0, the dictionary is not conformant to Zstandard specification, or empty. Non-conformant dictionaries can still be loaded, but as content-only dictionaries.

ZSTD_getDictID_fromDDict() : Provides the dictID of the dictionary loaded into ddict. If

Returns
== 0, the dictionary is not conformant to Zstandard specification, or empty. Non-conformant dictionaries can still be loaded, but as content-only dictionaries.

◆ ZSTD_getDictID_fromDict()

unsigned ZSTD_getDictID_fromDict ( const void *  dict,
size_t  dictSize 
)

ZSTD_getDictID_fromDict() : Requires v1.4.0+ Provides the dictID stored within dictionary. if

Returns
== 0, the dictionary is not conformant with Zstandard specification. It can still be loaded, but as a content-only dictionary.

ZSTD_getDictID_fromDict() : Provides the dictID stored within dictionary. if

Returns
== 0, the dictionary is not conformant with Zstandard specification. It can still be loaded, but as a content-only dictionary.

◆ ZSTD_getDictID_fromFrame()

unsigned ZSTD_getDictID_fromFrame ( const void *  src,
size_t  srcSize 
)

ZSTD_getDictID_fromFrame() : Requires v1.4.0+ Provides the dictID required to decompressed the frame stored within src. If

Returns
== 0, the dictID could not be decoded. This could for one of the following reasons :
  • The frame does not require a dictionary to be decoded (most common case).
  • The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information. Note : this use case also happens when using a non-conformant dictionary.
  • srcSize is too small, and as a result, the frame header could not be decoded (only possible if srcSize < ZSTD_FRAMEHEADERSIZE_MAX).
  • This is not a Zstandard frame. When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.

ZSTD_getDictID_fromFrame() : Provides the dictID required to decompress frame stored within src. If

Returns
== 0, the dictID could not be decoded. This could for one of the following reasons :
  • The frame does not require a dictionary (most common case).
  • The frame was built with dictID intentionally removed. Needed dictionary is a hidden piece of information. Note : this use case also happens when using a non-conformant dictionary.
  • srcSize is too small, and as a result, frame header could not be decoded. Note : possible if srcSize < ZSTD_FRAMEHEADERSIZE_MAX.
  • This is not a Zstandard frame. When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.

◆ ZSTD_getErrorCode()

ZSTD_ErrorCode ZSTD_getErrorCode ( size_t  code)

ZSTD_getError() : convert a size_t function result into a proper ZSTD_errorCode enum

◆ ZSTD_getErrorName()

const char * ZSTD_getErrorName ( size_t  code)

provides readable string from a function result

ZSTD_getErrorName() : provides error code string from function result (useful for debugging)

◆ ZSTD_getErrorString()

const char * ZSTD_getErrorString ( ZSTD_ErrorCode  code)

Same as ZSTD_getErrorName, but using a ZSTD_ErrorCode enum argument

ZSTD_getErrorString() : provides error code string from enum

◆ ZSTD_getFrameContentSize()

unsigned long long ZSTD_getFrameContentSize ( const void *  src,
size_t  srcSize 
)

ZSTD_getFrameContentSize() : compatible with legacy mode

Returns
: decompressed size of the single frame pointed to be src if known, otherwise
  • ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
  • ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)

◆ ZSTD_getFrameHeader()

size_t ZSTD_getFrameHeader ( ZSTD_FrameHeader zfhPtr,
const void *  src,
size_t  srcSize 
)

ZSTD_getFrameHeader() : decode Frame Header into zfhPtr, or requires larger srcSize.

Returns
: 0 => header is complete, zfhPtr is correctly filled, >0 => srcSize is too small,
value is the wanted srcSize amount, zfhPtr is not filled, or an error code, which can be tested using ZSTD_isError()

ZSTD_getFrameHeader() : decode Frame Header, or require larger srcSize. note : this function does not consume input, it only reads it.

Returns
: 0, zfhPtr is correctly filled, >0, srcSize is too small, value is wanted srcSize amount, or an error code, which can be tested using ZSTD_isError()

◆ ZSTD_getFrameHeader_advanced()

size_t ZSTD_getFrameHeader_advanced ( ZSTD_FrameHeader zfhPtr,
const void *  src,
size_t  srcSize,
ZSTD_format_e  format 
)

ZSTD_getFrameHeader_advanced() : same as ZSTD_getFrameHeader(), with added capability to select a format (like ZSTD_f_zstd1_magicless)

ZSTD_getFrameHeader_advanced() : decode Frame Header, or require larger srcSize. note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless

Returns
: 0, zfhPtr is correctly filled, >0, srcSize is too small, value is wanted srcSize amount, or an error code, which can be tested using ZSTD_isError()

◆ ZSTD_getLowestMatchIndex()

MEM_STATIC U32 ZSTD_getLowestMatchIndex ( const ZSTD_MatchState_t ms,
U32  curr,
unsigned  windowLog 
)

Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.

◆ ZSTD_getLowestPrefixIndex()

MEM_STATIC U32 ZSTD_getLowestPrefixIndex ( const ZSTD_MatchState_t ms,
U32  curr,
unsigned  windowLog 
)

Returns the lowest allowed match index in the prefix.

◆ ZSTD_getParams()

ZSTD_parameters ZSTD_getParams ( int  compressionLevel,
unsigned long long  srcSizeHint,
size_t  dictSize 
)

ZSTD_getParams() : same as ZSTD_getCParams(), but

Returns
a full ZSTD_parameters object instead of sub-component ZSTD_compressionParameters. All fields of ZSTD_frameParameters are set to default : contentSize=1, checksum=0, noDictID=0

ZSTD_getParams() : same idea as ZSTD_getCParams()

Returns
a ZSTD_parameters structure (instead of ZSTD_compressionParameters). Fields of ZSTD_frameParameters are set to default values

◆ ZSTD_getSequenceLength()

MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength ( SeqStore_t const *  seqStore,
SeqDef const *  seq 
)

Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.

◆ ZSTD_initCStream()

size_t ZSTD_initCStream ( ZSTD_CStream zcs,
int  compressionLevel 
)

Equivalent to:

ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);

Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API to compress with a dictionary.

◆ ZSTD_initCStream_advanced()

size_t ZSTD_initCStream_advanced ( ZSTD_CStream zcs,
const void *  dict,
size_t  dictSize,
ZSTD_parameters  params,
unsigned long long  pledgedSrcSize 
)

ZSTD_initCStream_advanced() : This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_setParams(zcs, params); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);

dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. This prototype will generate compilation warnings.

◆ ZSTD_initCStream_internal()

size_t ZSTD_initCStream_internal ( ZSTD_CStream zcs,
const void *  dict,
size_t  dictSize,
const ZSTD_CDict cdict,
const ZSTD_CCtx_params params,
unsigned long long  pledgedSrcSize 
)

ZSTD_initCStream_internal() : Private use only. Init streaming operation. expects params to be valid. must receive dict, or cdict, or none, but not both.

Returns
: 0, or an error code

ZSTD_initCStream_internal() : Note : for lib/compress only. Used by zstdmt_compress.c. Assumption 1 : params are valid Assumption 2 : either dict, or cdict, is defined, not both

◆ ZSTD_initCStream_srcSize()

size_t ZSTD_initCStream_srcSize ( ZSTD_CStream zcs,
int  compressionLevel,
unsigned long long  pledgedSrcSize 
)

ZSTD_initCStream_srcSize() : This function is DEPRECATED, and equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);

pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. This prototype will generate compilation warnings.

◆ ZSTD_initCStream_usingCDict()

size_t ZSTD_initCStream_usingCDict ( ZSTD_CStream zcs,
const ZSTD_CDict cdict 
)

ZSTD_initCStream_usingCDict() : This function is DEPRECATED, and equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_refCDict(zcs, cdict);

note : cdict will just be referenced, and must outlive compression session This prototype will generate compilation warnings.

◆ ZSTD_initCStream_usingCDict_advanced()

size_t ZSTD_initCStream_usingCDict_advanced ( ZSTD_CStream zcs,
const ZSTD_CDict cdict,
ZSTD_frameParameters  fParams,
unsigned long long  pledgedSrcSize 
)

ZSTD_initCStream_usingCDict_advanced() : This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_setFParams(zcs, fParams); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); ZSTD_CCtx_refCDict(zcs, cdict);

same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. This prototype will generate compilation warnings.

◆ ZSTD_initCStream_usingDict()

size_t ZSTD_initCStream_usingDict ( ZSTD_CStream zcs,
const void *  dict,
size_t  dictSize,
int  compressionLevel 
)

ZSTD_initCStream_usingDict() : This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);

Creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. This prototype will generate compilation warnings.

◆ ZSTD_initDStream()

size_t ZSTD_initDStream ( ZSTD_DStream zds)

ZSTD_initDStream() : Initialize/reset DStream state for new decompression operation. Call before new decompression operation using same DStream.

Note : This function is redundant with the advanced API and equivalent to: ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_refDDict(zds, NULL);

◆ ZSTD_initDStream_usingDDict()

size_t ZSTD_initDStream_usingDDict ( ZSTD_DStream zds,
const ZSTD_DDict ddict 
)

This function is deprecated, and is equivalent to:

ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
ZSTD_DCtx_refDDict(zds, ddict);

note : ddict is referenced, it must outlive decompression session

◆ ZSTD_initDStream_usingDict()

size_t ZSTD_initDStream_usingDict ( ZSTD_DStream zds,
const void *  dict,
size_t  dictSize 
)

This function is deprecated, and is equivalent to:

ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
ZSTD_DCtx_loadDictionary(zds, dict, dictSize);

note: no dictionary will be used if dict == NULL or dictSize < 8

◆ ZSTD_initStaticCCtx()

ZSTD_CCtx * ZSTD_initStaticCCtx ( void *  workspace,
size_t  workspaceSize 
)

ZSTD_initStatic*() : Initialize an object using a pre-allocated fixed-size buffer. workspace: The memory area to emplace the object into. Provided pointer must be 8-bytes aligned. Buffer must outlive object. workspaceSize: Use ZSTD_estimate*Size() to determine how large workspace must be to support target scenario.

Returns
: pointer to object (same address as workspace, just different type), or NULL if error (size too small, incorrect alignment, etc.) Note : zstd will never resize nor malloc() when using a static buffer. If the object requires more memory than available, zstd will just error out (typically ZSTD_error_memory_allocation). Note 2 : there is no corresponding "free" function. Since workspace is allocated externally, it must be freed externally too. Note 3 : cParams : use ZSTD_getCParams() to convert a compression level into its associated cParams. Limitation 1 : currently not compatible with internal dictionary creation, triggered by ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). Limitation 2 : static cctx currently not compatible with multi-threading. Limitation 3 : static dctx is incompatible with legacy support.

◆ ZSTD_initStaticCDict()

const ZSTD_CDict * ZSTD_initStaticCDict ( void *  workspace,
size_t  workspaceSize,
const void *  dict,
size_t  dictSize,
ZSTD_dictLoadMethod_e  dictLoadMethod,
ZSTD_dictContentType_e  dictContentType,
ZSTD_compressionParameters  cParams 
)

ZSTD_initStaticCDict_advanced() : Generate a digested dictionary in provided memory area. workspace: The memory area to emplace the dictionary into. Provided pointer must 8-bytes aligned. It must outlive dictionary usage. workspaceSize: Use ZSTD_estimateCDictSize() to determine how large workspace must be. cParams : use ZSTD_getCParams() to transform a compression level into its relevant cParams.

Returns
: pointer to ZSTD_CDict*, or NULL if error (size too small) Note : there is no corresponding "free" function. Since workspace was allocated externally, it must be freed externally.

◆ ZSTD_initStaticCStream()

ZSTD_CStream * ZSTD_initStaticCStream ( void *  workspace,
size_t  workspaceSize 
)

◆ ZSTD_initStaticDStream()

ZSTD_DStream * ZSTD_initStaticDStream ( void *  workspace,
size_t  workspaceSize 
)

same as ZSTD_initStaticDCtx()

◆ ZSTD_insertBlock()

size_t ZSTD_insertBlock ( ZSTD_DCtx dctx,
const void *  blockStart,
size_t  blockSize 
)

insert uncompressed block into dctx history. Useful for multi-blocks decompression.

ZSTD_insertBlock() : insert src block into dctx history. Useful to track uncompressed blocks.

◆ ZSTD_isDeterministicBuild()

int ZSTD_isDeterministicBuild ( void  )

ZSTD_isDeterministicBuild() : Returns 1 if the library is built using standard compilation flags, and participates in determinism guarantees with other builds of the same version. If this function returns 0, it means the library was compiled with non-standard compilation flags that change the output of the compressor. This is mainly used for Zstd's determinism test suite, which is only run when this function returns 1.

◆ ZSTD_isError()

unsigned ZSTD_isError ( size_t  code)

tells if a size_t function result is an error code

ZSTD_isError() : tells if a return value is an error code symbol is required for external callers

◆ ZSTD_isFrame()

unsigned ZSTD_isFrame ( const void *  buffer,
size_t  size 
)

ZSTD_isFrame() : Tells if the content of buffer starts with a valid Frame Identifier. Note : Frame Identifier is 4 bytes. If size < 4,

Returns
will always be 0. Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. Note 3 : Skippable Frame Identifiers are considered valid.

◆ ZSTD_isSkippableFrame()

unsigned ZSTD_isSkippableFrame ( const void *  buffer,
size_t  size 
)

ZSTD_isSkippableFrame() : Tells if the content of buffer starts with a valid Frame Identifier for a skippable frame.

ZSTD_isSkippableFrame() : Tells if the content of buffer starts with a valid Frame Identifier for a skippable frame. Note : Frame Identifier is 4 bytes. If size < 4,

Returns
will always be 0.

◆ ZSTD_ldm_adjustParameters()

void ZSTD_ldm_adjustParameters ( ldmParams_t params,
ZSTD_compressionParameters const *  cParams 
)

ZSTD_ldm_adjustParameters() : If the params->hashRateLog is not set, set it to its default value based on windowLog and params->hashLog.

Ensures that params->bucketSizeLog is <= params->hashLog (setting it to params->hashLog if it is not).

Ensures that the minMatchLength >= targetLength during optimal parsing.

◆ ZSTD_ldm_blockCompress()

size_t ZSTD_ldm_blockCompress ( RawSeqStore_t rawSeqStore,
ZSTD_MatchState_t ms,
SeqStore_t seqStore,
U32  rep[ZSTD_REP_NUM],
ZSTD_ParamSwitch_e  useRowMatchFinder,
void const *  src,
size_t  srcSize 
)

ZSTD_ldm_blockCompress():

Compresses a block using the predefined sequences, along with a secondary block compressor. The literals section of every sequence is passed to the secondary block compressor, and those sequences are interspersed with the predefined sequences. Returns the length of the last literals. Updates rawSeqStore.pos to indicate how many sequences have been consumed. rawSeqStore.seq may also be updated to split the last sequence between two blocks.

Returns
The length of the last literals.

NOTE: The source must be at most the maximum block size, but the predefined sequences can be any size, and may be longer than the block. In the case that they are longer than the block, the last sequences may need to be split into two. We handle that case correctly, and update rawSeqStore appropriately. NOTE: This function does not return any errors.

◆ ZSTD_ldm_generateSequences()

size_t ZSTD_ldm_generateSequences ( ldmState_t ldms,
RawSeqStore_t sequences,
ldmParams_t const *  params,
void const *  src,
size_t  srcSize 
)

ZSTD_ldm_generateSequences():

Generates the sequences using the long distance match finder. Generates long range matching sequences in sequences, which parse a prefix of the source. sequences must be large enough to store every sequence, which can be checked with ZSTD_ldm_getMaxNbSeq().

Returns
0 or an error code.

NOTE: The user must have called ZSTD_window_update() for all of the input they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. NOTE: This function returns an error if it runs out of space to store sequences.

◆ ZSTD_ldm_getMaxNbSeq()

size_t ZSTD_ldm_getMaxNbSeq ( ldmParams_t  params,
size_t  maxChunkSize 
)

ZSTD_ldm_getSeqSpace() : Return an upper bound on the number of sequences that can be produced by the long distance matcher, or 0 if LDM is disabled.

◆ ZSTD_ldm_getTableSize()

size_t ZSTD_ldm_getTableSize ( ldmParams_t  params)

ZSTD_ldm_getTableSize() : Estimate the space needed for long distance matching tables or 0 if LDM is disabled.

◆ ZSTD_ldm_skipSequences()

void ZSTD_ldm_skipSequences ( RawSeqStore_t rawSeqStore,
size_t  srcSize,
U32 const  minMatch 
)

ZSTD_ldm_skipSequences():

Skip past srcSize bytes worth of sequences in rawSeqStore. Avoids emitting matches less than minMatch bytes. Must be called for data that is not passed to ZSTD_ldm_blockCompress().

◆ ZSTD_loadDEntropy()

size_t ZSTD_loadDEntropy ( ZSTD_entropyDTables_t entropy,
const void *const  dict,
size_t const  dictSize 
)

ZSTD_loadDEntropy() : dict : must point at beginning of a valid zstd dictionary.

Returns
: size of dictionary header (size of magic number + dict ID + entropy tables)

ZSTD_loadDEntropy() : dict : must point at beginning of a valid zstd dictionary.

Returns
: size of entropy tables read

◆ ZSTD_matchState_dictMode()

MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode ( const ZSTD_MatchState_t ms)

ZSTD_matchState_dictMode(): Inspects the provided matchState and figures out what dictMode should be passed to the compressor.

◆ ZSTD_maxCLevel()

int ZSTD_maxCLevel ( void  )

maximum compression level available

◆ ZSTD_maybeNullPtrAdd()

MEM_STATIC void* ZSTD_maybeNullPtrAdd ( void *  ptr,
ptrdiff_t  add 
)

Helper function to add to a pointer that works around C's undefined behavior of adding 0 to NULL.

Returns
ptr + add except it defines NULL + 0 == NULL.

◆ ZSTD_mergeBlockDelimiters()

size_t ZSTD_mergeBlockDelimiters ( ZSTD_Sequence sequences,
size_t  seqsSize 
)

ZSTD_mergeBlockDelimiters() : Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals by merging them into the literals of the next sequence.

As such, the final generated result has no explicit representation of block boundaries, and the final last literals segment is not represented in the sequences.

The output of this function can be fed into ZSTD_compressSequences() with CCtx setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters

Returns
: number of sequences left after merging

◆ ZSTD_minCLevel()

int ZSTD_minCLevel ( void  )

minimum negative compression level allowed, requires v1.4.0+

◆ ZSTD_overlapCopy8()

HINT_INLINE void ZSTD_overlapCopy8 ( BYTE **  op,
BYTE const **  ip,
size_t  offset 
)

ZSTD_overlapCopy8() : Copies 8 bytes from ip to op and updates op and ip where ip <= op. If the offset is < 8 then the offset is spread to at least 8 bytes.

Precondition: *ip <= *op Postcondition: *op - *op >= 8

◆ ZSTD_readSkippableFrame()

size_t ZSTD_readSkippableFrame ( void *  dst,
size_t  dstCapacity,
unsigned *  magicVariant,
const void *  src,
size_t  srcSize 
)

ZSTD_readSkippableFrame() : Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer.

The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written, i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested in the magicVariant.

Returns an error if destination buffer is not large enough, or if the frame is not skippable.

Returns
: number of bytes written or a ZSTD error.

ZSTD_readSkippableFrame() : Retrieves content of a skippable frame, and writes it to dst buffer.

The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested in the magicVariant.

Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame.

Returns
: number of bytes written or a ZSTD error.

◆ ZSTD_reduceTable_internal()

FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal ( U32 *const  table,
U32 const  size,
U32 const  reducerValue,
int const  preserveMark 
)

ZSTD_reduceTable() : reduce table indexes by reducerValue, or squash to zero. PreserveMark preserves "unsorted mark" for btlazy2 strategy. It must be set to a clear 0/1 value, to remove branch during inlining. Presume table size is a multiple of ZSTD_ROWSIZE to help auto-vectorization

◆ ZSTD_registerSequenceProducer()

void ZSTD_registerSequenceProducer ( ZSTD_CCtx cctx,
void *  sequenceProducerState,
ZSTD_sequenceProducer_F  sequenceProducer 
)

ZSTD_registerSequenceProducer() : Instruct zstd to use a block-level external sequence producer function.

The sequenceProducerState must be initialized by the caller, and the caller is responsible for managing its lifetime. This parameter is sticky across compressions. It will remain set until the user explicitly resets compression parameters.

Sequence producer registration is considered to be an "advanced parameter", part of the "advanced API". This means it will only have an effect on compression APIs which respect advanced parameters, such as compress2() and compressStream2(). Older compression APIs such as compressCCtx(), which predate the introduction of "advanced parameters", will ignore any external sequence producer setting.

The sequence producer can be "cleared" by registering a NULL function pointer. This removes all limitations described above in the "LIMITATIONS" section of the API docs.

The user is strongly encouraged to read the full API documentation (above) before calling this function.

◆ ZSTD_resetCStream()

size_t ZSTD_resetCStream ( ZSTD_CStream zcs,
unsigned long long  pledgedSrcSize 
)

ZSTD_resetCStream() : This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be explicitly specified.

start a new frame, using same parameters from previous frame. This is typically useful to skip dictionary loading stage, since it will reuse it in-place. Note that zcs must be init at least once before using ZSTD_resetCStream(). If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.

Returns
: 0, or an error code (which can be tested using ZSTD_isError()) This prototype will generate compilation warnings.

◆ ZSTD_resetDStream()

size_t ZSTD_resetDStream ( ZSTD_DStream zds)

This function is deprecated, and is equivalent to:

ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);

reuse decompression parameters from previous init; saves dictionary loading

◆ ZSTD_rollingHash_compute()

MEM_STATIC U64 ZSTD_rollingHash_compute ( void const *  buf,
size_t  size 
)

ZSTD_rollingHash_compute() : Compute the rolling hash value of the buffer.

◆ ZSTD_rollingHash_primePower()

MEM_STATIC U64 ZSTD_rollingHash_primePower ( U32  length)

ZSTD_rollingHash_primePower() : Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash over a window of length bytes.

◆ ZSTD_rollingHash_rotate()

MEM_STATIC U64 ZSTD_rollingHash_rotate ( U64  hash,
BYTE  toRemove,
BYTE  toAdd,
U64  primePower 
)

ZSTD_rollingHash_rotate() : Rotate the rolling hash by one byte.

◆ ZSTD_searchMax()

FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax ( ZSTD_MatchState_t ms,
const BYTE *  ip,
const BYTE *  iend,
size_t *  offsetPtr,
U32 const  mls,
U32 const  rowLog,
searchMethod_e const  searchMethod,
ZSTD_dictMode_e const  dictMode 
)

Searches for the longest match at ip. Dispatches to the correct implementation function based on the (searchMethod, dictMode, mls, rowLog). We use switch statements here instead of using an indirect function call through a function pointer because after Spectre and Meltdown mitigations, indirect function calls can be very costly, especially in the kernel.

NOTE: dictMode and searchMethod should be templated, so those switch statements should be optimized out. Only the mls & rowLog switches should be left.

Parameters
msThe match state.
ipThe position to search at.
iendThe end of the input data.
[out]offsetPtrStores the match offset into this pointer.
mlsThe minimum search length, in the range [4, 6].
rowLogThe row log (if applicable), in the range [4, 6].
searchMethodThe search method to use (templated).
dictModeThe dictMode (templated).
Returns
The length of the longest match found, or < mls if no match is found. If a match is found its offset is stored in offsetPtr.

◆ ZSTD_sequenceBound()

size_t ZSTD_sequenceBound ( size_t  srcSize)

ZSTD_sequenceBound() : srcSize : size of the input buffer

Returns
: upper-bound for the number of sequences that can be generated from a buffer of srcSize bytes

note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).

◆ ZSTD_sizeof_CCtx()

size_t ZSTD_sizeof_CCtx ( const ZSTD_CCtx cctx)

ZSTD_sizeof_*() : Requires v1.4.0+ These functions give the current memory usage of selected object. Note that object memory usage can evolve (increase or decrease) over time.

◆ ZSTD_storeSeq()

HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq ( SeqStore_t seqStorePtr,
size_t  litLength,
const BYTE *  literals,
const BYTE *  litLimit,
U32  offBase,
size_t  matchLength 
)

ZSTD_storeSeq() : Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). @matchLength : must be >= MINMATCH Allowed to over-read literals up to litLimit.

◆ ZSTD_storeSeqOnly()

HINT_INLINE UNUSED_ATTR void ZSTD_storeSeqOnly ( SeqStore_t seqStorePtr,
size_t  litLength,
U32  offBase,
size_t  matchLength 
)

ZSTD_storeSeqOnly() : Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. Literals themselves are not copied, but @litPtr is updated. @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). @matchLength : must be >= MINMATCH

◆ ZSTD_toFlushNow()

size_t ZSTD_toFlushNow ( ZSTD_CCtx cctx)

ZSTD_toFlushNow() : Tell how many bytes are ready to be flushed immediately. Useful for multithreading scenarios (nbWorkers >= 1). Probe the oldest active job, defined as oldest job not yet entirely flushed, and check its output buffer.

Returns
: amount of data stored in oldest job and ready to be flushed immediately. if
== 0, it means either :
  • there is no active job (could be checked with ZSTD_frameProgression()), or
  • oldest job is still actively compressing data, but everything it has produced has also been flushed so far, therefore flush speed is limited by production speed of oldest job irrespective of the speed of concurrent (and newer) jobs.

ZSTD_toFlushNow() Only useful for multithreading scenarios currently (nbWorkers >= 1).

◆ ZSTD_versionNumber()

unsigned ZSTD_versionNumber ( void  )

ZSTD_versionNumber() : Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE).

◆ ZSTD_versionString()

const char * ZSTD_versionString ( void  )

ZSTD_versionString() : Return runtime library version, like "1.4.5". Requires v1.3.0+.

◆ ZSTD_wildcopy()

MEM_STATIC FORCE_INLINE_ATTR void ZSTD_wildcopy ( void *  dst,
const void *  src,
size_t  length,
ZSTD_overlap_e const  ovtype 
)

ZSTD_wildcopy() : Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)

Parameters
ovtypecontrols the overlap detection
  • ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
  • ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart. The src buffer must be before the dst buffer.

◆ ZSTD_window_canOverflowCorrect()

MEM_STATIC U32 ZSTD_window_canOverflowCorrect ( ZSTD_window_t const  window,
U32  cycleLog,
U32  maxDist,
U32  loadedDictEnd,
void const *  src 
)

ZSTD_window_canOverflowCorrect(): Returns non-zero if the indices are large enough for overflow correction to work correctly without impacting compression ratio.

◆ ZSTD_window_clear()

MEM_STATIC void ZSTD_window_clear ( ZSTD_window_t window)

ZSTD_window_clear(): Clears the window containing the history by simply setting it to empty.

◆ ZSTD_window_correctOverflow()

MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_window_correctOverflow ( ZSTD_window_t window,
U32  cycleLog,
U32  maxDist,
void const *  src 
)

ZSTD_window_correctOverflow(): Reduces the indices to protect from index overflow. Returns the correction made to the indices, which must be applied to every stored index.

The least significant cycleLog bits of the indices must remain the same, which may be 0. Every index up to maxDist in the past must be valid.

◆ ZSTD_window_enforceMaxDist()

MEM_STATIC void ZSTD_window_enforceMaxDist ( ZSTD_window_t window,
const void *  blockEnd,
U32  maxDist,
U32 *  loadedDictEndPtr,
const ZSTD_MatchState_t **  dictMatchStatePtr 
)

ZSTD_window_enforceMaxDist(): Updates lowLimit so that: (srcEnd - base) - lowLimit == maxDist + loadedDictEnd

It ensures index is valid as long as index >= lowLimit. This must be called before a block compression call.

loadedDictEnd is only defined if a dictionary is in use for current compression. As the name implies, loadedDictEnd represents the index at end of dictionary. The value lies within context's referential, it can be directly compared to blockEndIdx.

If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. This is because dictionaries are allowed to be referenced fully as long as the last byte of the dictionary is in the window. Once input has progressed beyond window size, dictionary cannot be referenced anymore.

In normal dict mode, the dictionary lies between lowLimit and dictLimit. In dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary is below them. forceWindow and dictMatchState are therefore incompatible.

◆ ZSTD_window_hasExtDict()

MEM_STATIC U32 ZSTD_window_hasExtDict ( ZSTD_window_t const  window)

ZSTD_window_hasExtDict(): Returns non-zero if the window has a non-empty extDict.

◆ ZSTD_window_needOverflowCorrection()

MEM_STATIC U32 ZSTD_window_needOverflowCorrection ( ZSTD_window_t const  window,
U32  cycleLog,
U32  maxDist,
U32  loadedDictEnd,
void const *  src,
void const *  srcEnd 
)

ZSTD_window_needOverflowCorrection(): Returns non-zero if the indices are getting too large and need overflow protection.

◆ ZSTD_window_update()

MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_window_update ( ZSTD_window_t window,
const void *  src,
size_t  srcSize,
int  forceNonContiguous 
)

ZSTD_window_update(): Updates the window by appending [src, src + srcSize) to the window. If it is not contiguous, the current prefix becomes the extDict, and we forget about the extDict. Handles overlap of the prefix and extDict. Returns non-zero if the segment is contiguous.

◆ ZSTD_wrappedPtrAdd()

MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR const void* ZSTD_wrappedPtrAdd ( const void *  ptr,
ptrdiff_t  add 
)

Helper function to perform a wrapped pointer add without triggering UBSAN.

Returns
ptr + add with wrapping

◆ ZSTD_wrappedPtrDiff()

MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR ptrdiff_t ZSTD_wrappedPtrDiff ( unsigned char const *  lhs,
unsigned char const *  rhs 
)

Helper function to perform a wrapped pointer difference without triggering UBSAN.

Returns
lhs - rhs with wrapping

◆ ZSTD_wrappedPtrSub()

MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR const void* ZSTD_wrappedPtrSub ( const void *  ptr,
ptrdiff_t  sub 
)

Helper function to perform a wrapped pointer subtraction without triggering UBSAN.

Returns
ptr - sub with wrapping

◆ ZSTD_writeSkippableFrame()

size_t ZSTD_writeSkippableFrame ( void *  dst,
size_t  dstCapacity,
const void *  src,
size_t  srcSize,
unsigned  magicVariant 
)

ZSTD_writeSkippableFrame() : Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.

Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.

Returns an error if destination buffer is not large enough, if the source size is not representable with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).

Returns
: number of bytes written or a ZSTD error.

◆ ZSTDMT_compressStream_generic()

size_t ZSTDMT_compressStream_generic ( ZSTDMT_CCtx mtctx,
ZSTD_outBuffer output,
ZSTD_inBuffer input,
ZSTD_EndDirective  endOp 
)

ZSTDMT_compressStream_generic() : Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() depending on flush directive.

Returns
: minimum amount of data still to be flushed 0 if fully flushed or an error code note : needs to be init using any ZSTD_initCStream*() variant

ZSTDMT_compressStream_generic() : internal use only - exposed to be invoked from zstd_compress.c assumption : output and input are valid (pos <= size)

Returns
: minimum amount of data remaining to flush, 0 if none

◆ ZSTDMT_getFrameProgression()

ZSTD_frameProgression ZSTDMT_getFrameProgression ( ZSTDMT_CCtx mtctx)

ZSTDMT_getFrameProgression(): tells how much data has been consumed (input) and produced (output) for current frame. able to count progression inside worker threads.

◆ ZSTDMT_initCStream_internal()

size_t ZSTDMT_initCStream_internal ( ZSTDMT_CCtx mtctx,
const void *  dict,
size_t  dictSize,
ZSTD_dictContentType_e  dictContentType,
const ZSTD_CDict cdict,
ZSTD_CCtx_params  params,
unsigned long long  pledgedSrcSize 
)

ZSTDMT_initCStream_internal() : Private use only. Init streaming operation. expects params to be valid. must receive dict, or cdict, or none, but not both. mtctx can be freshly constructed or reused from a prior compression. If mtctx is reused, memory allocations from the prior compression may not be freed, even if they are not needed for the current compression.

Returns
: 0, or an error code

◆ ZSTDMT_toFlushNow()

size_t ZSTDMT_toFlushNow ( ZSTDMT_CCtx mtctx)

ZSTDMT_toFlushNow() Tell how many bytes are ready to be flushed immediately. Probe the oldest active job (not yet entirely flushed) and check its output buffer. If return 0, it means there is no active job, or, it means oldest job is still active, but everything produced has been flushed so far, therefore flushing is limited by speed of oldest job.

◆ ZSTDMT_updateCParams_whileCompressing()

void ZSTDMT_updateCParams_whileCompressing ( ZSTDMT_CCtx mtctx,
const ZSTD_CCtx_params cctxParams 
)

ZSTDMT_updateCParams_whileCompressing() : Updates only a selected set of compression parameters, to remain compatible with current frame. New parameters will be applied to next compression job.

ZSTDMT_updateCParams_whileCompressing() : Updates a selected set of compression parameters, remaining compatible with currently active frame. New parameters will be applied to next compression job.