mirror of
https://github.com/Kitware/CMake.git
synced 2026-02-18 05:01:50 -06:00
Merge topic 'update-liblzma'
1e9c1d0c87CTestCustom: Suppress scanbuild warning in liblzma2461dd79e5liblzma: Suppress MSVC warning parameter difference warning9f77124ea8liblzma: Drop checks for inline and restrict keywords7a976ee742Merge branch 'upstream-liblzma' into update-liblzma352b8fa70dliblzma 2018-04-29 (b5be61cc)021b54cab8liblzma: Revise update script to get version 5.2.46b494f567aMerge branch 'upstream-liblzma' into update-liblzma897b790d57liblzma 2014-12-21 (265e5ffb) ... Acked-by: Kitware Robot <kwrobot@kitware.com> Merge-request: !2264
This commit is contained in:
@@ -92,6 +92,7 @@ list(APPEND CTEST_CUSTOM_WARNING_EXCEPTION
|
||||
"cmFortranLexer.cxx:[0-9]+:[0-9]+: warning: Call to 'realloc' has an allocation size of 0 bytes"
|
||||
"testProcess.*warning: Dereference of null pointer .loaded from variable .invalidAddress.."
|
||||
"liblzma/simple/x86.c:[0-9]+:[0-9]+: warning: The result of the '<<' expression is undefined"
|
||||
"liblzma/common/index_encoder.c:[0-9]+:[0-9]+: warning: Value stored to .* during its initialization is never read"
|
||||
"libuv/src/.*:[0-9]+:[0-9]+: warning: Dereference of null pointer"
|
||||
"libuv/src/.*:[0-9]+:[0-9]+: warning: The left operand of '==' is a garbage value"
|
||||
)
|
||||
|
||||
@@ -7,8 +7,8 @@ shopt -s dotglob
|
||||
readonly name="liblzma"
|
||||
readonly ownership="liblzma upstream <xz-devel@tukaani.org>"
|
||||
readonly subtree="Utilities/cmliblzma"
|
||||
readonly repo="http://git.tukaani.org/xz.git"
|
||||
readonly tag="v5.0.8"
|
||||
readonly repo="https://git.tukaani.org/xz.git"
|
||||
readonly tag="v5.2.4"
|
||||
readonly shortlog=false
|
||||
readonly paths="
|
||||
COPYING
|
||||
@@ -24,6 +24,10 @@ extract_source () {
|
||||
mv src/common .
|
||||
mv src/liblzma .
|
||||
rmdir src
|
||||
rm liblzma/Makefile.*
|
||||
rm liblzma/*/Makefile.*
|
||||
rm liblzma/liblzma.map
|
||||
rm liblzma/validate_map.sh
|
||||
popd
|
||||
}
|
||||
|
||||
|
||||
@@ -6,22 +6,6 @@ include(CheckSymbolExists)
|
||||
include(CheckTypeSize)
|
||||
include(TestBigEndian)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES(
|
||||
"int test (void *restrict x);\nint main (void) {return 0;}"
|
||||
HAVE_RESTRICT)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES(
|
||||
"typedef struct abc *d;\nint test (d __restrict x);\nint main (void) {return 0;}"
|
||||
HAVE___RESTRICT)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES(
|
||||
"static inline int test (void) {return 0;}\nint main (void) {return test();}"
|
||||
HAVE_INLINE)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES (
|
||||
"static __inline int test (void) {return 0;}\nint main (void) {return test();}"
|
||||
HAVE___INLINE)
|
||||
|
||||
CHECK_INCLUDE_FILE(byteswap.h HAVE_BYTESWAP_H)
|
||||
CHECK_INCLUDE_FILE(inttypes.h HAVE_INTTYPES_H)
|
||||
CHECK_INCLUDE_FILE(limits.h HAVE_LIMITS_H)
|
||||
@@ -95,7 +79,7 @@ CHECK_TYPE_SIZE("unsigned short" SIZE_OF_UNSIGNED_SHORT)
|
||||
CHECK_TYPE_SIZE("unsigned" SIZE_OF_UNSIGNED)
|
||||
CHECK_TYPE_SIZE("unsigned long" SIZE_OF_UNSIGNED_LONG)
|
||||
CHECK_TYPE_SIZE("unsigned long long" SIZE_OF_UNSIGNED_LONG_LONG)
|
||||
CHECK_TYPE_SIZE("size_t" SIZE_OF_SIZE_T)
|
||||
CHECK_TYPE_SIZE("size_t" SIZEOF_SIZE_T)
|
||||
|
||||
CHECK_TYPE_SIZE("__int64" __INT64)
|
||||
CHECK_TYPE_SIZE("unsigned __int64" UNSIGNED___INT64)
|
||||
|
||||
@@ -47,7 +47,7 @@ XZ Utils Licensing
|
||||
naturally it is not legally required. Here is an example of a good
|
||||
notice to put into "about box" or into documentation:
|
||||
|
||||
This software includes code from XZ Utils <http://tukaani.org/xz/>.
|
||||
This software includes code from XZ Utils <https://tukaani.org/xz/>.
|
||||
|
||||
The following license texts are included in the following files:
|
||||
- COPYING.LGPLv2.1: GNU Lesser General Public License version 2.1
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
#define MY_VERSION LZMA_VERSION_MAJOR,LZMA_VERSION_MINOR,LZMA_VERSION_PATCH,MY_BUILD
|
||||
|
||||
#define MY_FILENAME MY_NAME MY_SUFFIX
|
||||
#define MY_COMPANY "The Tukaani Project <http://tukaani.org/>"
|
||||
#define MY_COMPANY "The Tukaani Project <https://tukaani.org/>"
|
||||
#define MY_PRODUCT PACKAGE_NAME " <" PACKAGE_URL ">"
|
||||
|
||||
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# pragma warning(push,1)
|
||||
# pragma warning(disable: 4028) /* formal parameter different from decl */
|
||||
# pragma warning(disable: 4142) /* benign redefinition of type */
|
||||
# pragma warning(disable: 4761) /* integral size mismatch in argument */
|
||||
#endif
|
||||
@@ -124,9 +125,9 @@
|
||||
|
||||
// The code currently assumes that size_t is either 32-bit or 64-bit.
|
||||
#ifndef SIZE_MAX
|
||||
# if SIZE_OF_SIZE_T == 4
|
||||
# if SIZEOF_SIZE_T == 4
|
||||
# define SIZE_MAX UINT32_MAX
|
||||
# elif SIZE_OF_SIZE_T == 8
|
||||
# elif SIZEOF_SIZE_T == 8
|
||||
# define SIZE_MAX UINT64_MAX
|
||||
# else
|
||||
# error size_t is not 32-bit or 64-bit
|
||||
@@ -175,6 +176,16 @@ typedef unsigned char _Bool;
|
||||
# include <memory.h>
|
||||
#endif
|
||||
|
||||
// As of MSVC 2013, inline and restrict are supported with
|
||||
// non-standard keywords.
|
||||
#if defined(_WIN32) && defined(_MSC_VER)
|
||||
# ifndef inline
|
||||
# define inline __inline
|
||||
# endif
|
||||
# ifndef restrict
|
||||
# define restrict __restrict
|
||||
# endif
|
||||
#endif
|
||||
|
||||
////////////
|
||||
// Macros //
|
||||
|
||||
@@ -106,6 +106,17 @@
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////
|
||||
// Compiler-specific features //
|
||||
////////////////////////////////
|
||||
|
||||
// Newer Intel C compilers require immintrin.h for _bit_scan_reverse()
|
||||
// and such functions.
|
||||
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500)
|
||||
# include <immintrin.h>
|
||||
#endif
|
||||
|
||||
|
||||
///////////////////
|
||||
// Byte swapping //
|
||||
///////////////////
|
||||
@@ -329,8 +340,8 @@ unaligned_read32le(const uint8_t *buf)
|
||||
static inline void
|
||||
unaligned_write16be(uint8_t *buf, uint16_t num)
|
||||
{
|
||||
buf[0] = num >> 8;
|
||||
buf[1] = num;
|
||||
buf[0] = (uint8_t)(num >> 8);
|
||||
buf[1] = (uint8_t)num;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -338,8 +349,8 @@ unaligned_write16be(uint8_t *buf, uint16_t num)
|
||||
static inline void
|
||||
unaligned_write16le(uint8_t *buf, uint16_t num)
|
||||
{
|
||||
buf[0] = num;
|
||||
buf[1] = num >> 8;
|
||||
buf[0] = (uint8_t)num;
|
||||
buf[1] = (uint8_t)(num >> 8);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -347,10 +358,10 @@ unaligned_write16le(uint8_t *buf, uint16_t num)
|
||||
static inline void
|
||||
unaligned_write32be(uint8_t *buf, uint32_t num)
|
||||
{
|
||||
buf[0] = num >> 24;
|
||||
buf[1] = num >> 16;
|
||||
buf[2] = num >> 8;
|
||||
buf[3] = num;
|
||||
buf[0] = (uint8_t)(num >> 24);
|
||||
buf[1] = (uint8_t)(num >> 16);
|
||||
buf[2] = (uint8_t)(num >> 8);
|
||||
buf[3] = (uint8_t)num;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -358,10 +369,10 @@ unaligned_write32be(uint8_t *buf, uint32_t num)
|
||||
static inline void
|
||||
unaligned_write32le(uint8_t *buf, uint32_t num)
|
||||
{
|
||||
buf[0] = num;
|
||||
buf[1] = num >> 8;
|
||||
buf[2] = num >> 16;
|
||||
buf[3] = num >> 24;
|
||||
buf[0] = (uint8_t)num;
|
||||
buf[1] = (uint8_t)(num >> 8);
|
||||
buf[2] = (uint8_t)(num >> 16);
|
||||
buf[3] = (uint8_t)(num >> 24);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
@SIZE_OF_UNSIGNED_CODE@
|
||||
@SIZE_OF_UNSIGNED_LONG_CODE@
|
||||
@SIZE_OF_UNSIGNED_LONG_LONG_CODE@
|
||||
@SIZE_OF_SIZE_T_CODE@
|
||||
@SIZEOF_SIZE_T_CODE@
|
||||
|
||||
/*
|
||||
* If we lack int64_t, define it to the first of __int64, int, long, and long long
|
||||
@@ -180,32 +180,6 @@ typedef uint64_t uintmax_t;
|
||||
|
||||
#cmakedefine uintptr_t @uintptr_t@
|
||||
|
||||
|
||||
#cmakedefine HAVE_RESTRICT
|
||||
#cmakedefine HAVE___RESTRICT
|
||||
|
||||
#cmakedefine HAVE_INLINE
|
||||
#cmakedefine HAVE___INLINE
|
||||
|
||||
#ifndef HAVE_RESTRICT
|
||||
# ifdef HAVE___RESTRICT
|
||||
# define LZMA_RESTRICT __restrict
|
||||
# else
|
||||
# define LZMA_RESTRICT
|
||||
# endif
|
||||
#else
|
||||
# define LZMA_RESTRICT restrict
|
||||
#endif /* HAVE_RESTRICT */
|
||||
|
||||
#ifndef HAVE_INLINE
|
||||
# ifdef HAVE___INLINE
|
||||
# define inline __inline
|
||||
# else
|
||||
# define inline
|
||||
# endif
|
||||
#endif /* HAVE_INLINE */
|
||||
|
||||
|
||||
#cmakedefine WORDS_BIGENDIAN 1
|
||||
|
||||
#cmakedefine HAVE_BYTESWAP_H 1
|
||||
|
||||
@@ -82,12 +82,20 @@
|
||||
# if !defined(UINT32_C) || !defined(UINT64_C) \
|
||||
|| !defined(UINT32_MAX) || !defined(UINT64_MAX)
|
||||
/*
|
||||
* MSVC has no C99 support, and thus it cannot be used to
|
||||
* compile liblzma. The liblzma API has to still be usable
|
||||
* from MSVC, so we need to define the required standard
|
||||
* integer types here.
|
||||
* MSVC versions older than 2013 have no C99 support, and
|
||||
* thus they cannot be used to compile liblzma. Using an
|
||||
* existing liblzma.dll with old MSVC can work though(*),
|
||||
* but we need to define the required standard integer
|
||||
* types here in a MSVC-specific way.
|
||||
*
|
||||
* (*) If you do this, the existing liblzma.dll probably uses
|
||||
* a different runtime library than your MSVC-built
|
||||
* application. Mixing runtimes is generally bad, but
|
||||
* in this case it should work as long as you avoid
|
||||
* the few rarely-needed liblzma functions that allocate
|
||||
* memory and expect the caller to free it using free().
|
||||
*/
|
||||
# if defined(_WIN32) && defined(_MSC_VER)
|
||||
# if defined(_WIN32) && defined(_MSC_VER) && _MSC_VER < 1800
|
||||
typedef unsigned __int8 uint8_t;
|
||||
typedef unsigned __int32 uint32_t;
|
||||
typedef unsigned __int64 uint64_t;
|
||||
@@ -211,7 +219,11 @@
|
||||
*/
|
||||
#ifndef lzma_nothrow
|
||||
# if defined(__cplusplus)
|
||||
# define lzma_nothrow throw()
|
||||
# if __cplusplus >= 201103L
|
||||
# define lzma_nothrow noexcept
|
||||
# else
|
||||
# define lzma_nothrow throw()
|
||||
# endif
|
||||
# elif __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
|
||||
# define lzma_nothrow __attribute__((__nothrow__))
|
||||
# else
|
||||
@@ -286,7 +298,7 @@ extern "C" {
|
||||
#include "lzma/filter.h"
|
||||
#include "lzma/bcj.h"
|
||||
#include "lzma/delta.h"
|
||||
#include "lzma/lzma.h"
|
||||
#include "lzma/lzma12.h"
|
||||
|
||||
/* Container formats */
|
||||
#include "lzma/container.h"
|
||||
|
||||
@@ -240,12 +240,12 @@ typedef enum {
|
||||
/**
|
||||
* \brief The `action' argument for lzma_code()
|
||||
*
|
||||
* After the first use of LZMA_SYNC_FLUSH, LZMA_FULL_FLUSH, or LZMA_FINISH,
|
||||
* the same `action' must is used until lzma_code() returns LZMA_STREAM_END.
|
||||
* Also, the amount of input (that is, strm->avail_in) must not be modified
|
||||
* by the application until lzma_code() returns LZMA_STREAM_END. Changing the
|
||||
* `action' or modifying the amount of input will make lzma_code() return
|
||||
* LZMA_PROG_ERROR.
|
||||
* After the first use of LZMA_SYNC_FLUSH, LZMA_FULL_FLUSH, LZMA_FULL_BARRIER,
|
||||
* or LZMA_FINISH, the same `action' must is used until lzma_code() returns
|
||||
* LZMA_STREAM_END. Also, the amount of input (that is, strm->avail_in) must
|
||||
* not be modified by the application until lzma_code() returns
|
||||
* LZMA_STREAM_END. Changing the `action' or modifying the amount of input
|
||||
* will make lzma_code() return LZMA_PROG_ERROR.
|
||||
*/
|
||||
typedef enum {
|
||||
LZMA_RUN = 0,
|
||||
@@ -293,7 +293,7 @@ typedef enum {
|
||||
*
|
||||
* All the input data going to the current Block must have
|
||||
* been given to the encoder (the last bytes can still be
|
||||
* pending in* next_in). Call lzma_code() with LZMA_FULL_FLUSH
|
||||
* pending in *next_in). Call lzma_code() with LZMA_FULL_FLUSH
|
||||
* until it returns LZMA_STREAM_END. Then continue normally
|
||||
* with LZMA_RUN or finish the Stream with LZMA_FINISH.
|
||||
*
|
||||
@@ -302,6 +302,29 @@ typedef enum {
|
||||
* no unfinished Block, no empty Block is created.
|
||||
*/
|
||||
|
||||
LZMA_FULL_BARRIER = 4,
|
||||
/**<
|
||||
* \brief Finish encoding of the current Block
|
||||
*
|
||||
* This is like LZMA_FULL_FLUSH except that this doesn't
|
||||
* necessarily wait until all the input has been made
|
||||
* available via the output buffer. That is, lzma_code()
|
||||
* might return LZMA_STREAM_END as soon as all the input
|
||||
* has been consumed (avail_in == 0).
|
||||
*
|
||||
* LZMA_FULL_BARRIER is useful with a threaded encoder if
|
||||
* one wants to split the .xz Stream into Blocks at specific
|
||||
* offsets but doesn't care if the output isn't flushed
|
||||
* immediately. Using LZMA_FULL_BARRIER allows keeping
|
||||
* the threads busy while LZMA_FULL_FLUSH would make
|
||||
* lzma_code() wait until all the threads have finished
|
||||
* until more data could be passed to the encoder.
|
||||
*
|
||||
* With a lzma_stream initialized with the single-threaded
|
||||
* lzma_stream_encoder() or lzma_easy_encoder(),
|
||||
* LZMA_FULL_BARRIER is an alias for LZMA_FULL_FLUSH.
|
||||
*/
|
||||
|
||||
LZMA_FINISH = 3
|
||||
/**<
|
||||
* \brief Finish the coding operation
|
||||
@@ -332,11 +355,19 @@ typedef enum {
|
||||
* malloc() and free(). C++ users should note that the custom memory
|
||||
* handling functions must not throw exceptions.
|
||||
*
|
||||
* liblzma doesn't make an internal copy of lzma_allocator. Thus, it is
|
||||
* OK to change these function pointers in the middle of the coding
|
||||
* process, but obviously it must be done carefully to make sure that the
|
||||
* replacement `free' can deallocate memory allocated by the earlier
|
||||
* `alloc' function(s).
|
||||
* Single-threaded mode only: liblzma doesn't make an internal copy of
|
||||
* lzma_allocator. Thus, it is OK to change these function pointers in
|
||||
* the middle of the coding process, but obviously it must be done
|
||||
* carefully to make sure that the replacement `free' can deallocate
|
||||
* memory allocated by the earlier `alloc' function(s).
|
||||
*
|
||||
* Multithreaded mode: liblzma might internally store pointers to the
|
||||
* lzma_allocator given via the lzma_stream structure. The application
|
||||
* must not change the allocator pointer in lzma_stream or the contents
|
||||
* of the pointed lzma_allocator structure until lzma_end() has been used
|
||||
* to free the memory associated with that lzma_stream. The allocation
|
||||
* functions might be called simultaneously from multiple threads, and
|
||||
* thus they must be thread safe.
|
||||
*/
|
||||
typedef struct {
|
||||
/**
|
||||
@@ -448,7 +479,8 @@ typedef struct lzma_internal_s lzma_internal;
|
||||
*
|
||||
* Application may modify the values of total_in and total_out as it wants.
|
||||
* They are updated by liblzma to match the amount of data read and
|
||||
* written, but aren't used for anything else.
|
||||
* written but aren't used for anything else except as a possible return
|
||||
* values from lzma_get_progress().
|
||||
*/
|
||||
typedef struct {
|
||||
const uint8_t *next_in; /**< Pointer to the next input byte. */
|
||||
@@ -464,8 +496,10 @@ typedef struct {
|
||||
*
|
||||
* In most cases this is NULL which makes liblzma use
|
||||
* the standard malloc() and free().
|
||||
*
|
||||
* \note In 5.0.x this is not a const pointer.
|
||||
*/
|
||||
lzma_allocator *allocator;
|
||||
const lzma_allocator *allocator;
|
||||
|
||||
/** Internal state is not visible to applications. */
|
||||
lzma_internal *internal;
|
||||
@@ -546,6 +580,25 @@ extern LZMA_API(lzma_ret) lzma_code(lzma_stream *strm, lzma_action action)
|
||||
extern LZMA_API(void) lzma_end(lzma_stream *strm) lzma_nothrow;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Get progress information
|
||||
*
|
||||
* In single-threaded mode, applications can get progress information from
|
||||
* strm->total_in and strm->total_out. In multi-threaded mode this is less
|
||||
* useful because a significant amount of both input and output data gets
|
||||
* buffered internally by liblzma. This makes total_in and total_out give
|
||||
* misleading information and also makes the progress indicator updates
|
||||
* non-smooth.
|
||||
*
|
||||
* This function gives realistic progress information also in multi-threaded
|
||||
* mode by taking into account the progress made by each thread. In
|
||||
* single-threaded mode *progress_in and *progress_out are set to
|
||||
* strm->total_in and strm->total_out, respectively.
|
||||
*/
|
||||
extern LZMA_API(void) lzma_get_progress(lzma_stream *strm,
|
||||
uint64_t *progress_in, uint64_t *progress_out) lzma_nothrow;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Get the memory usage of decoder filter chain
|
||||
*
|
||||
@@ -591,11 +644,16 @@ extern LZMA_API(uint64_t) lzma_memlimit_get(const lzma_stream *strm)
|
||||
* This function is supported only when *strm has been initialized with
|
||||
* a function that takes a memlimit argument.
|
||||
*
|
||||
* liblzma 5.2.3 and earlier has a bug where memlimit value of 0 causes
|
||||
* this function to do nothing (leaving the limit unchanged) and still
|
||||
* return LZMA_OK. Later versions treat 0 as if 1 had been specified (so
|
||||
* lzma_memlimit_get() will return 1 even if you specify 0 here).
|
||||
*
|
||||
* \return - LZMA_OK: New memory usage limit successfully set.
|
||||
* - LZMA_MEMLIMIT_ERROR: The new limit is too small.
|
||||
* The limit was not changed.
|
||||
* - LZMA_PROG_ERROR: Invalid arguments, e.g. *strm doesn't
|
||||
* support memory usage limit or memlimit was zero.
|
||||
* support memory usage limit.
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_memlimit_set(
|
||||
lzma_stream *strm, uint64_t memlimit) lzma_nothrow;
|
||||
|
||||
@@ -31,11 +31,16 @@ typedef struct {
|
||||
/**
|
||||
* \brief Block format version
|
||||
*
|
||||
* To prevent API and ABI breakages if new features are needed in
|
||||
* the Block field, a version number is used to indicate which
|
||||
* fields in this structure are in use. For now, version must always
|
||||
* be zero. With non-zero version, most Block related functions will
|
||||
* return LZMA_OPTIONS_ERROR.
|
||||
* To prevent API and ABI breakages when new features are needed,
|
||||
* a version number is used to indicate which fields in this
|
||||
* structure are in use:
|
||||
* - liblzma >= 5.0.0: version = 0 is supported.
|
||||
* - liblzma >= 5.1.4beta: Support for version = 1 was added,
|
||||
* which adds the ignore_check field.
|
||||
*
|
||||
* If version is greater than one, most Block related functions
|
||||
* will return LZMA_OPTIONS_ERROR (lzma_block_header_decode() works
|
||||
* with any version value).
|
||||
*
|
||||
* Read by:
|
||||
* - All functions that take pointer to lzma_block as argument,
|
||||
@@ -233,7 +238,28 @@ typedef struct {
|
||||
lzma_reserved_enum reserved_enum2;
|
||||
lzma_reserved_enum reserved_enum3;
|
||||
lzma_reserved_enum reserved_enum4;
|
||||
lzma_bool reserved_bool1;
|
||||
|
||||
/**
|
||||
* \brief A flag to Block decoder to not verify the Check field
|
||||
*
|
||||
* This field is supported by liblzma >= 5.1.4beta if .version >= 1.
|
||||
*
|
||||
* If this is set to true, the integrity check won't be calculated
|
||||
* and verified. Unless you know what you are doing, you should
|
||||
* leave this to false. (A reason to set this to true is when the
|
||||
* file integrity is verified externally anyway and you want to
|
||||
* speed up the decompression, which matters mostly when using
|
||||
* SHA-256 as the integrity check.)
|
||||
*
|
||||
* If .version >= 1, read by:
|
||||
* - lzma_block_decoder()
|
||||
* - lzma_block_buffer_decode()
|
||||
*
|
||||
* Written by (.version is ignored):
|
||||
* - lzma_block_header_decode() always sets this to false
|
||||
*/
|
||||
lzma_bool ignore_check;
|
||||
|
||||
lzma_bool reserved_bool2;
|
||||
lzma_bool reserved_bool3;
|
||||
lzma_bool reserved_bool4;
|
||||
@@ -310,10 +336,14 @@ extern LZMA_API(lzma_ret) lzma_block_header_encode(
|
||||
/**
|
||||
* \brief Decode Block Header
|
||||
*
|
||||
* block->version should be set to the highest value supported by the
|
||||
* application; currently the only possible version is zero. This function
|
||||
* will set version to the lowest value that still supports all the features
|
||||
* required by the Block Header.
|
||||
* block->version should (usually) be set to the highest value supported
|
||||
* by the application. If the application sets block->version to a value
|
||||
* higher than supported by the current liblzma version, this function will
|
||||
* downgrade block->version to the highest value supported by it. Thus one
|
||||
* should check the value of block->version after calling this function if
|
||||
* block->version was set to a non-zero value and the application doesn't
|
||||
* otherwise know that the liblzma version being used is new enough to
|
||||
* support the specified block->version.
|
||||
*
|
||||
* The size of the Block Header must have already been decoded with
|
||||
* lzma_block_header_size_decode() macro and stored to block->header_size.
|
||||
@@ -344,7 +374,7 @@ extern LZMA_API(lzma_ret) lzma_block_header_encode(
|
||||
* block->header_size is invalid or block->filters is NULL.
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_block_header_decode(lzma_block *block,
|
||||
lzma_allocator *allocator, const uint8_t *in)
|
||||
const lzma_allocator *allocator, const uint8_t *in)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
@@ -493,7 +523,25 @@ extern LZMA_API(size_t) lzma_block_buffer_bound(size_t uncompressed_size)
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_block_buffer_encode(
|
||||
lzma_block *block, lzma_allocator *allocator,
|
||||
lzma_block *block, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Single-call uncompressed .xz Block encoder
|
||||
*
|
||||
* This is like lzma_block_buffer_encode() except this doesn't try to
|
||||
* compress the data and instead encodes the data using LZMA2 uncompressed
|
||||
* chunks. The required output buffer size can be determined with
|
||||
* lzma_block_buffer_bound().
|
||||
*
|
||||
* Since the data won't be compressed, this function ignores block->filters.
|
||||
* This function doesn't take lzma_allocator because this function doesn't
|
||||
* allocate any memory from the heap.
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_block_uncomp_encode(lzma_block *block,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
@@ -527,7 +575,7 @@ extern LZMA_API(lzma_ret) lzma_block_buffer_encode(
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_block_buffer_decode(
|
||||
lzma_block *block, lzma_allocator *allocator,
|
||||
lzma_block *block, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
lzma_nothrow;
|
||||
|
||||
@@ -60,6 +60,131 @@
|
||||
#define LZMA_PRESET_EXTREME (UINT32_C(1) << 31)
|
||||
|
||||
|
||||
/**
|
||||
* \brief Multithreading options
|
||||
*/
|
||||
typedef struct {
|
||||
/**
|
||||
* \brief Flags
|
||||
*
|
||||
* Set this to zero if no flags are wanted.
|
||||
*
|
||||
* No flags are currently supported.
|
||||
*/
|
||||
uint32_t flags;
|
||||
|
||||
/**
|
||||
* \brief Number of worker threads to use
|
||||
*/
|
||||
uint32_t threads;
|
||||
|
||||
/**
|
||||
* \brief Maximum uncompressed size of a Block
|
||||
*
|
||||
* The encoder will start a new .xz Block every block_size bytes.
|
||||
* Using LZMA_FULL_FLUSH or LZMA_FULL_BARRIER with lzma_code()
|
||||
* the caller may tell liblzma to start a new Block earlier.
|
||||
*
|
||||
* With LZMA2, a recommended block size is 2-4 times the LZMA2
|
||||
* dictionary size. With very small dictionaries, it is recommended
|
||||
* to use at least 1 MiB block size for good compression ratio, even
|
||||
* if this is more than four times the dictionary size. Note that
|
||||
* these are only recommendations for typical use cases; feel free
|
||||
* to use other values. Just keep in mind that using a block size
|
||||
* less than the LZMA2 dictionary size is waste of RAM.
|
||||
*
|
||||
* Set this to 0 to let liblzma choose the block size depending
|
||||
* on the compression options. For LZMA2 it will be 3*dict_size
|
||||
* or 1 MiB, whichever is more.
|
||||
*
|
||||
* For each thread, about 3 * block_size bytes of memory will be
|
||||
* allocated. This may change in later liblzma versions. If so,
|
||||
* the memory usage will probably be reduced, not increased.
|
||||
*/
|
||||
uint64_t block_size;
|
||||
|
||||
/**
|
||||
* \brief Timeout to allow lzma_code() to return early
|
||||
*
|
||||
* Multithreading can make liblzma to consume input and produce
|
||||
* output in a very bursty way: it may first read a lot of input
|
||||
* to fill internal buffers, then no input or output occurs for
|
||||
* a while.
|
||||
*
|
||||
* In single-threaded mode, lzma_code() won't return until it has
|
||||
* either consumed all the input or filled the output buffer. If
|
||||
* this is done in multithreaded mode, it may cause a call
|
||||
* lzma_code() to take even tens of seconds, which isn't acceptable
|
||||
* in all applications.
|
||||
*
|
||||
* To avoid very long blocking times in lzma_code(), a timeout
|
||||
* (in milliseconds) may be set here. If lzma_code() would block
|
||||
* longer than this number of milliseconds, it will return with
|
||||
* LZMA_OK. Reasonable values are 100 ms or more. The xz command
|
||||
* line tool uses 300 ms.
|
||||
*
|
||||
* If long blocking times are fine for you, set timeout to a special
|
||||
* value of 0, which will disable the timeout mechanism and will make
|
||||
* lzma_code() block until all the input is consumed or the output
|
||||
* buffer has been filled.
|
||||
*
|
||||
* \note Even with a timeout, lzma_code() might sometimes take
|
||||
* somewhat long time to return. No timing guarantees
|
||||
* are made.
|
||||
*/
|
||||
uint32_t timeout;
|
||||
|
||||
/**
|
||||
* \brief Compression preset (level and possible flags)
|
||||
*
|
||||
* The preset is set just like with lzma_easy_encoder().
|
||||
* The preset is ignored if filters below is non-NULL.
|
||||
*/
|
||||
uint32_t preset;
|
||||
|
||||
/**
|
||||
* \brief Filter chain (alternative to a preset)
|
||||
*
|
||||
* If this is NULL, the preset above is used. Otherwise the preset
|
||||
* is ignored and the filter chain specified here is used.
|
||||
*/
|
||||
const lzma_filter *filters;
|
||||
|
||||
/**
|
||||
* \brief Integrity check type
|
||||
*
|
||||
* See check.h for available checks. The xz command line tool
|
||||
* defaults to LZMA_CHECK_CRC64, which is a good choice if you
|
||||
* are unsure.
|
||||
*/
|
||||
lzma_check check;
|
||||
|
||||
/*
|
||||
* Reserved space to allow possible future extensions without
|
||||
* breaking the ABI. You should not touch these, because the names
|
||||
* of these variables may change. These are and will never be used
|
||||
* with the currently supported options, so it is safe to leave these
|
||||
* uninitialized.
|
||||
*/
|
||||
lzma_reserved_enum reserved_enum1;
|
||||
lzma_reserved_enum reserved_enum2;
|
||||
lzma_reserved_enum reserved_enum3;
|
||||
uint32_t reserved_int1;
|
||||
uint32_t reserved_int2;
|
||||
uint32_t reserved_int3;
|
||||
uint32_t reserved_int4;
|
||||
uint64_t reserved_int5;
|
||||
uint64_t reserved_int6;
|
||||
uint64_t reserved_int7;
|
||||
uint64_t reserved_int8;
|
||||
void *reserved_ptr1;
|
||||
void *reserved_ptr2;
|
||||
void *reserved_ptr3;
|
||||
void *reserved_ptr4;
|
||||
|
||||
} lzma_mt;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Calculate approximate memory usage of easy encoder
|
||||
*
|
||||
@@ -165,7 +290,8 @@ extern LZMA_API(lzma_ret) lzma_easy_encoder(
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_easy_buffer_encode(
|
||||
uint32_t preset, lzma_check check,
|
||||
lzma_allocator *allocator, const uint8_t *in, size_t in_size,
|
||||
const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size) lzma_nothrow;
|
||||
|
||||
|
||||
@@ -190,6 +316,49 @@ extern LZMA_API(lzma_ret) lzma_stream_encoder(lzma_stream *strm,
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Calculate approximate memory usage of multithreaded .xz encoder
|
||||
*
|
||||
* Since doing the encoding in threaded mode doesn't affect the memory
|
||||
* requirements of single-threaded decompressor, you can use
|
||||
* lzma_easy_decoder_memusage(options->preset) or
|
||||
* lzma_raw_decoder_memusage(options->filters) to calculate
|
||||
* the decompressor memory requirements.
|
||||
*
|
||||
* \param options Compression options
|
||||
*
|
||||
* \return Number of bytes of memory required for encoding with the
|
||||
* given options. If an error occurs, for example due to
|
||||
* unsupported preset or filter chain, UINT64_MAX is returned.
|
||||
*/
|
||||
extern LZMA_API(uint64_t) lzma_stream_encoder_mt_memusage(
|
||||
const lzma_mt *options) lzma_nothrow lzma_attr_pure;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Initialize multithreaded .xz Stream encoder
|
||||
*
|
||||
* This provides the functionality of lzma_easy_encoder() and
|
||||
* lzma_stream_encoder() as a single function for multithreaded use.
|
||||
*
|
||||
* The supported actions for lzma_code() are LZMA_RUN, LZMA_FULL_FLUSH,
|
||||
* LZMA_FULL_BARRIER, and LZMA_FINISH. Support for LZMA_SYNC_FLUSH might be
|
||||
* added in the future.
|
||||
*
|
||||
* \param strm Pointer to properly prepared lzma_stream
|
||||
* \param options Pointer to multithreaded compression options
|
||||
*
|
||||
* \return - LZMA_OK
|
||||
* - LZMA_MEM_ERROR
|
||||
* - LZMA_UNSUPPORTED_CHECK
|
||||
* - LZMA_OPTIONS_ERROR
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_stream_encoder_mt(
|
||||
lzma_stream *strm, const lzma_mt *options)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Initialize .lzma encoder (legacy file format)
|
||||
*
|
||||
@@ -269,7 +438,8 @@ extern LZMA_API(size_t) lzma_stream_buffer_bound(size_t uncompressed_size)
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_stream_buffer_encode(
|
||||
lzma_filter *filters, lzma_check check,
|
||||
lzma_allocator *allocator, const uint8_t *in, size_t in_size,
|
||||
const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
@@ -304,6 +474,30 @@ extern LZMA_API(lzma_ret) lzma_stream_buffer_encode(
|
||||
#define LZMA_TELL_ANY_CHECK UINT32_C(0x04)
|
||||
|
||||
|
||||
/**
|
||||
* This flag makes lzma_code() not calculate and verify the integrity check
|
||||
* of the compressed data in .xz files. This means that invalid integrity
|
||||
* check values won't be detected and LZMA_DATA_ERROR won't be returned in
|
||||
* such cases.
|
||||
*
|
||||
* This flag only affects the checks of the compressed data itself; the CRC32
|
||||
* values in the .xz headers will still be verified normally.
|
||||
*
|
||||
* Don't use this flag unless you know what you are doing. Possible reasons
|
||||
* to use this flag:
|
||||
*
|
||||
* - Trying to recover data from a corrupt .xz file.
|
||||
*
|
||||
* - Speeding up decompression, which matters mostly with SHA-256
|
||||
* or with files that have compressed extremely well. It's recommended
|
||||
* to not use this flag for this purpose unless the file integrity is
|
||||
* verified externally in some other way.
|
||||
*
|
||||
* Support for this flag was added in liblzma 5.1.4beta.
|
||||
*/
|
||||
#define LZMA_IGNORE_CHECK UINT32_C(0x10)
|
||||
|
||||
|
||||
/**
|
||||
* This flag enables decoding of concatenated files with file formats that
|
||||
* allow concatenating compressed files as is. From the formats currently
|
||||
@@ -326,7 +520,10 @@ extern LZMA_API(lzma_ret) lzma_stream_buffer_encode(
|
||||
*
|
||||
* \param strm Pointer to properly prepared lzma_stream
|
||||
* \param memlimit Memory usage limit as bytes. Use UINT64_MAX
|
||||
* to effectively disable the limiter.
|
||||
* to effectively disable the limiter. liblzma
|
||||
* 5.2.3 and earlier don't allow 0 here and return
|
||||
* LZMA_PROG_ERROR; later versions treat 0 as if 1
|
||||
* had been specified.
|
||||
* \param flags Bitwise-or of zero or more of the decoder flags:
|
||||
* LZMA_TELL_NO_CHECK, LZMA_TELL_UNSUPPORTED_CHECK,
|
||||
* LZMA_TELL_ANY_CHECK, LZMA_CONCATENATED
|
||||
@@ -350,7 +547,10 @@ extern LZMA_API(lzma_ret) lzma_stream_decoder(
|
||||
*
|
||||
* \param strm Pointer to properly prepared lzma_stream
|
||||
* \param memlimit Memory usage limit as bytes. Use UINT64_MAX
|
||||
* to effectively disable the limiter.
|
||||
* to effectively disable the limiter. liblzma
|
||||
* 5.2.3 and earlier don't allow 0 here and return
|
||||
* LZMA_PROG_ERROR; later versions treat 0 as if 1
|
||||
* had been specified.
|
||||
* \param flags Bitwise-or of flags, or zero for no flags.
|
||||
*
|
||||
* \return - LZMA_OK: Initialization was successful.
|
||||
@@ -366,9 +566,16 @@ extern LZMA_API(lzma_ret) lzma_auto_decoder(
|
||||
/**
|
||||
* \brief Initialize .lzma decoder (legacy file format)
|
||||
*
|
||||
* \param strm Pointer to properly prepared lzma_stream
|
||||
* \param memlimit Memory usage limit as bytes. Use UINT64_MAX
|
||||
* to effectively disable the limiter. liblzma
|
||||
* 5.2.3 and earlier don't allow 0 here and return
|
||||
* LZMA_PROG_ERROR; later versions treat 0 as if 1
|
||||
* had been specified.
|
||||
*
|
||||
* Valid `action' arguments to lzma_code() are LZMA_RUN and LZMA_FINISH.
|
||||
* There is no need to use LZMA_FINISH, but allowing it may simplify
|
||||
* certain types of applications.
|
||||
* There is no need to use LZMA_FINISH, but it's allowed because it may
|
||||
* simplify certain types of applications.
|
||||
*
|
||||
* \return - LZMA_OK
|
||||
* - LZMA_MEM_ERROR
|
||||
@@ -418,7 +625,8 @@ extern LZMA_API(lzma_ret) lzma_alone_decoder(
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_stream_buffer_decode(
|
||||
uint64_t *memlimit, uint32_t flags, lzma_allocator *allocator,
|
||||
uint64_t *memlimit, uint32_t flags,
|
||||
const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
@@ -116,8 +116,9 @@ extern LZMA_API(lzma_bool) lzma_filter_decoder_is_supported(lzma_vli id)
|
||||
* is not NULL.
|
||||
* - LZMA_PROG_ERROR: src or dest is NULL.
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_filters_copy(const lzma_filter *src,
|
||||
lzma_filter *dest, lzma_allocator *allocator) lzma_nothrow;
|
||||
extern LZMA_API(lzma_ret) lzma_filters_copy(
|
||||
const lzma_filter *src, lzma_filter *dest,
|
||||
const lzma_allocator *allocator) lzma_nothrow;
|
||||
|
||||
|
||||
/**
|
||||
@@ -256,7 +257,7 @@ extern LZMA_API(lzma_ret) lzma_filters_update(
|
||||
* won't necessarily meet that bound.)
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_raw_buffer_encode(
|
||||
const lzma_filter *filters, lzma_allocator *allocator,
|
||||
const lzma_filter *filters, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size, uint8_t *out,
|
||||
size_t *out_pos, size_t out_size) lzma_nothrow;
|
||||
|
||||
@@ -280,7 +281,7 @@ extern LZMA_API(lzma_ret) lzma_raw_buffer_encode(
|
||||
* which no data is written to is out[out_size].
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_raw_buffer_decode(
|
||||
const lzma_filter *filters, lzma_allocator *allocator,
|
||||
const lzma_filter *filters, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size) lzma_nothrow;
|
||||
|
||||
@@ -356,7 +357,7 @@ extern LZMA_API(lzma_ret) lzma_properties_encode(
|
||||
* - LZMA_MEM_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_properties_decode(
|
||||
lzma_filter *filter, lzma_allocator *allocator,
|
||||
lzma_filter *filter, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size) lzma_nothrow;
|
||||
|
||||
|
||||
@@ -419,6 +420,6 @@ extern LZMA_API(lzma_ret) lzma_filter_flags_encode(const lzma_filter *filter,
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_filter_flags_decode(
|
||||
lzma_filter *filter, lzma_allocator *allocator,
|
||||
lzma_filter *filter, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
@@ -48,3 +48,17 @@
|
||||
* of RAM on the specific operating system.
|
||||
*/
|
||||
extern LZMA_API(uint64_t) lzma_physmem(void) lzma_nothrow;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Get the number of processor cores or threads
|
||||
*
|
||||
* This function may be useful when determining how many threads to use.
|
||||
* If the hardware supports more than one thread per CPU core, the number
|
||||
* of hardware threads is returned if that information is available.
|
||||
*
|
||||
* \brief On success, the number of available CPU threads or cores is
|
||||
* returned. If this information isn't available or an error
|
||||
* occurs, zero is returned.
|
||||
*/
|
||||
extern LZMA_API(uint32_t) lzma_cputhreads(void) lzma_nothrow;
|
||||
|
||||
@@ -303,7 +303,7 @@ extern LZMA_API(uint64_t) lzma_index_memused(const lzma_index *i)
|
||||
* \return On success, a pointer to an empty initialized lzma_index is
|
||||
* returned. If allocation fails, NULL is returned.
|
||||
*/
|
||||
extern LZMA_API(lzma_index *) lzma_index_init(lzma_allocator *allocator)
|
||||
extern LZMA_API(lzma_index *) lzma_index_init(const lzma_allocator *allocator)
|
||||
lzma_nothrow;
|
||||
|
||||
|
||||
@@ -312,8 +312,8 @@ extern LZMA_API(lzma_index *) lzma_index_init(lzma_allocator *allocator)
|
||||
*
|
||||
* If i is NULL, this does nothing.
|
||||
*/
|
||||
extern LZMA_API(void) lzma_index_end(lzma_index *i, lzma_allocator *allocator)
|
||||
lzma_nothrow;
|
||||
extern LZMA_API(void) lzma_index_end(
|
||||
lzma_index *i, const lzma_allocator *allocator) lzma_nothrow;
|
||||
|
||||
|
||||
/**
|
||||
@@ -341,7 +341,7 @@ extern LZMA_API(void) lzma_index_end(lzma_index *i, lzma_allocator *allocator)
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_index_append(
|
||||
lzma_index *i, lzma_allocator *allocator,
|
||||
lzma_index *i, const lzma_allocator *allocator,
|
||||
lzma_vli unpadded_size, lzma_vli uncompressed_size)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
@@ -564,8 +564,8 @@ extern LZMA_API(lzma_bool) lzma_index_iter_locate(
|
||||
* - LZMA_MEM_ERROR
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_index_cat(
|
||||
lzma_index *dest, lzma_index *src, lzma_allocator *allocator)
|
||||
extern LZMA_API(lzma_ret) lzma_index_cat(lzma_index *dest, lzma_index *src,
|
||||
const lzma_allocator *allocator)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
@@ -575,7 +575,7 @@ extern LZMA_API(lzma_ret) lzma_index_cat(
|
||||
* \return A copy of the lzma_index, or NULL if memory allocation failed.
|
||||
*/
|
||||
extern LZMA_API(lzma_index *) lzma_index_dup(
|
||||
const lzma_index *i, lzma_allocator *allocator)
|
||||
const lzma_index *i, const lzma_allocator *allocator)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
@@ -586,8 +586,7 @@ extern LZMA_API(lzma_index *) lzma_index_dup(
|
||||
* \param i Pointer to lzma_index which should be encoded.
|
||||
*
|
||||
* The valid `action' values for lzma_code() are LZMA_RUN and LZMA_FINISH.
|
||||
* It is enough to use only one of them (you can choose freely; use LZMA_RUN
|
||||
* to support liblzma versions older than 5.0.0).
|
||||
* It is enough to use only one of them (you can choose freely).
|
||||
*
|
||||
* \return - LZMA_OK: Initialization succeeded, continue with lzma_code().
|
||||
* - LZMA_MEM_ERROR
|
||||
@@ -610,16 +609,21 @@ extern LZMA_API(lzma_ret) lzma_index_encoder(
|
||||
* to a new lzma_index, which the application
|
||||
* has to later free with lzma_index_end().
|
||||
* \param memlimit How much memory the resulting lzma_index is
|
||||
* allowed to require.
|
||||
* allowed to require. liblzma 5.2.3 and earlier
|
||||
* don't allow 0 here and return LZMA_PROG_ERROR;
|
||||
* later versions treat 0 as if 1 had been specified.
|
||||
*
|
||||
* The valid `action' values for lzma_code() are LZMA_RUN and LZMA_FINISH.
|
||||
* It is enough to use only one of them (you can choose freely; use LZMA_RUN
|
||||
* to support liblzma versions older than 5.0.0).
|
||||
* Valid `action' arguments to lzma_code() are LZMA_RUN and LZMA_FINISH.
|
||||
* There is no need to use LZMA_FINISH, but it's allowed because it may
|
||||
* simplify certain types of applications.
|
||||
*
|
||||
* \return - LZMA_OK: Initialization succeeded, continue with lzma_code().
|
||||
* - LZMA_MEM_ERROR
|
||||
* - LZMA_MEMLIMIT_ERROR
|
||||
* - LZMA_PROG_ERROR
|
||||
*
|
||||
* liblzma 5.2.3 and older list also LZMA_MEMLIMIT_ERROR here
|
||||
* but that error code has never been possible from this
|
||||
* initialization function.
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_index_decoder(
|
||||
lzma_stream *strm, lzma_index **i, uint64_t memlimit)
|
||||
@@ -677,6 +681,6 @@ extern LZMA_API(lzma_ret) lzma_index_buffer_encode(const lzma_index *i,
|
||||
* - LZMA_PROG_ERROR
|
||||
*/
|
||||
extern LZMA_API(lzma_ret) lzma_index_buffer_decode(lzma_index **i,
|
||||
uint64_t *memlimit, lzma_allocator *allocator,
|
||||
uint64_t *memlimit, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size)
|
||||
lzma_nothrow;
|
||||
|
||||
@@ -37,7 +37,7 @@ typedef struct lzma_index_hash_s lzma_index_hash;
|
||||
* pointer than the index_hash that was given as an argument.
|
||||
*/
|
||||
extern LZMA_API(lzma_index_hash *) lzma_index_hash_init(
|
||||
lzma_index_hash *index_hash, lzma_allocator *allocator)
|
||||
lzma_index_hash *index_hash, const lzma_allocator *allocator)
|
||||
lzma_nothrow lzma_attr_warn_unused_result;
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ extern LZMA_API(lzma_index_hash *) lzma_index_hash_init(
|
||||
* \brief Deallocate lzma_index_hash structure
|
||||
*/
|
||||
extern LZMA_API(void) lzma_index_hash_end(
|
||||
lzma_index_hash *index_hash, lzma_allocator *allocator)
|
||||
lzma_index_hash *index_hash, const lzma_allocator *allocator)
|
||||
lzma_nothrow;
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* \file lzma/lzma.h
|
||||
* \file lzma/lzma12.h
|
||||
* \brief LZMA1 and LZMA2 filters
|
||||
*/
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
* Version number split into components
|
||||
*/
|
||||
#define LZMA_VERSION_MAJOR 5
|
||||
#define LZMA_VERSION_MINOR 0
|
||||
#define LZMA_VERSION_PATCH 8
|
||||
#define LZMA_VERSION_MINOR 2
|
||||
#define LZMA_VERSION_PATCH 4
|
||||
#define LZMA_VERSION_STABILITY LZMA_VERSION_STABILITY_STABLE
|
||||
|
||||
#ifndef LZMA_VERSION_COMMIT
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
extern LZMA_API(lzma_bool)
|
||||
lzma_check_is_supported(lzma_check type)
|
||||
{
|
||||
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
|
||||
return false;
|
||||
|
||||
static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = {
|
||||
true, // LZMA_CHECK_NONE
|
||||
|
||||
@@ -53,9 +56,6 @@ lzma_check_is_supported(lzma_check type)
|
||||
false, // Reserved
|
||||
};
|
||||
|
||||
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
|
||||
return false;
|
||||
|
||||
return available_checks[(unsigned int)(type)];
|
||||
}
|
||||
|
||||
@@ -63,6 +63,9 @@ lzma_check_is_supported(lzma_check type)
|
||||
extern LZMA_API(uint32_t)
|
||||
lzma_check_size(lzma_check type)
|
||||
{
|
||||
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
|
||||
return UINT32_MAX;
|
||||
|
||||
// See file-format.txt section 2.1.1.2.
|
||||
static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = {
|
||||
0,
|
||||
@@ -73,9 +76,6 @@ lzma_check_size(lzma_check type)
|
||||
64, 64, 64
|
||||
};
|
||||
|
||||
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
|
||||
return UINT32_MAX;
|
||||
|
||||
return check_sizes[(unsigned int)(type)];
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,53 @@
|
||||
|
||||
#include "common.h"
|
||||
|
||||
// If the function for external SHA-256 is missing, use the internal SHA-256
|
||||
// code. Due to how configure works, these defines can only get defined when
|
||||
// both a usable header and a type have already been found.
|
||||
#if !(defined(HAVE_CC_SHA256_INIT) \
|
||||
|| defined(HAVE_SHA256_INIT) \
|
||||
|| defined(HAVE_SHA256INIT))
|
||||
# define HAVE_INTERNAL_SHA256 1
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_INTERNAL_SHA256)
|
||||
// Nothing
|
||||
#elif defined(HAVE_COMMONCRYPTO_COMMONDIGEST_H)
|
||||
# include <CommonCrypto/CommonDigest.h>
|
||||
#elif defined(HAVE_SHA256_H)
|
||||
# include <sys/types.h>
|
||||
# include <sha256.h>
|
||||
#elif defined(HAVE_SHA2_H)
|
||||
# include <sys/types.h>
|
||||
# include <sha2.h>
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_INTERNAL_SHA256)
|
||||
/// State for the internal SHA-256 implementation
|
||||
typedef struct {
|
||||
/// Internal state
|
||||
uint32_t state[8];
|
||||
|
||||
/// Size of the message excluding padding
|
||||
uint64_t size;
|
||||
} lzma_sha256_state;
|
||||
#elif defined(HAVE_CC_SHA256_CTX)
|
||||
typedef CC_SHA256_CTX lzma_sha256_state;
|
||||
#elif defined(HAVE_SHA256_CTX)
|
||||
typedef SHA256_CTX lzma_sha256_state;
|
||||
#elif defined(HAVE_SHA2_CTX)
|
||||
typedef SHA2_CTX lzma_sha256_state;
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_INTERNAL_SHA256)
|
||||
// Nothing
|
||||
#elif defined(HAVE_CC_SHA256_INIT)
|
||||
# define LZMA_SHA256FUNC(x) CC_SHA256_ ## x
|
||||
#elif defined(HAVE_SHA256_INIT)
|
||||
# define LZMA_SHA256FUNC(x) SHA256_ ## x
|
||||
#elif defined(HAVE_SHA256INIT)
|
||||
# define LZMA_SHA256FUNC(x) SHA256 ## x
|
||||
#endif
|
||||
|
||||
// Index hashing needs the best possible hash function (preferably
|
||||
// a cryptographic hash) for maximum reliability.
|
||||
@@ -43,14 +90,7 @@ typedef struct {
|
||||
union {
|
||||
uint32_t crc32;
|
||||
uint64_t crc64;
|
||||
|
||||
struct {
|
||||
/// Internal state
|
||||
uint32_t state[8];
|
||||
|
||||
/// Size of the message excluding padding
|
||||
uint64_t size;
|
||||
} sha256;
|
||||
lzma_sha256_state sha256;
|
||||
} state;
|
||||
|
||||
} lzma_check_state;
|
||||
@@ -82,6 +122,8 @@ extern void lzma_check_update(lzma_check_state *check, lzma_check type,
|
||||
extern void lzma_check_finish(lzma_check_state *check, lzma_check type);
|
||||
|
||||
|
||||
#ifndef LZMA_SHA256FUNC
|
||||
|
||||
/// Prepare SHA-256 state for new input.
|
||||
extern void lzma_sha256_init(lzma_check_state *check);
|
||||
|
||||
@@ -92,4 +134,39 @@ extern void lzma_sha256_update(
|
||||
/// Finish the SHA-256 calculation and store the result to check->buffer.u8.
|
||||
extern void lzma_sha256_finish(lzma_check_state *check);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
static inline void
|
||||
lzma_sha256_init(lzma_check_state *check)
|
||||
{
|
||||
LZMA_SHA256FUNC(Init)(&check->state.sha256);
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check)
|
||||
{
|
||||
#if defined(HAVE_CC_SHA256_INIT) && SIZE_MAX > UINT32_MAX
|
||||
// Darwin's CC_SHA256_Update takes uint32_t as the buffer size,
|
||||
// so use a loop to support size_t.
|
||||
while (size > UINT32_MAX) {
|
||||
LZMA_SHA256FUNC(Update)(&check->state.sha256, buf, UINT32_MAX);
|
||||
buf += UINT32_MAX;
|
||||
size -= UINT32_MAX;
|
||||
}
|
||||
#endif
|
||||
|
||||
LZMA_SHA256FUNC(Update)(&check->state.sha256, buf, size);
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
lzma_sha256_finish(lzma_check_state *check)
|
||||
{
|
||||
LZMA_SHA256FUNC(Final)(check->buffer.u8, &check->state.sha256);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -33,8 +33,6 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
|
||||
#endif
|
||||
|
||||
if (size > 8) {
|
||||
const uint8_t * limit;
|
||||
|
||||
// Fix the alignment, if needed. The if statement above
|
||||
// ensures that this won't read past the end of buf[].
|
||||
while ((uintptr_t)(buf) & 7) {
|
||||
@@ -43,7 +41,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
|
||||
}
|
||||
|
||||
// Calculate the position where to stop.
|
||||
limit = buf + (size & ~(size_t)(7));
|
||||
const uint8_t *const limit = buf + (size & ~(size_t)(7));
|
||||
|
||||
// Calculate how many bytes must be calculated separately
|
||||
// before returning the result.
|
||||
@@ -51,8 +49,6 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
|
||||
|
||||
// Calculate the CRC32 using the slice-by-eight algorithm.
|
||||
while (buf < limit) {
|
||||
uint32_t tmp;
|
||||
|
||||
crc ^= *(const uint32_t *)(buf);
|
||||
buf += 4;
|
||||
|
||||
@@ -61,7 +57,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
|
||||
^ lzma_crc32_table[5][C(crc)]
|
||||
^ lzma_crc32_table[4][D(crc)];
|
||||
|
||||
tmp = *(const uint32_t *)(buf);
|
||||
const uint32_t tmp = *(const uint32_t *)(buf);
|
||||
buf += 4;
|
||||
|
||||
// At least with some compilers, it is critical for
|
||||
|
||||
@@ -36,14 +36,12 @@ lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
|
||||
#endif
|
||||
|
||||
if (size > 4) {
|
||||
const uint8_t *limit;
|
||||
|
||||
while ((uintptr_t)(buf) & 3) {
|
||||
crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
|
||||
--size;
|
||||
}
|
||||
|
||||
limit = buf + (size & ~(size_t)(3));
|
||||
const uint8_t *const limit = buf + (size & ~(size_t)(3));
|
||||
size &= (size_t)(3);
|
||||
|
||||
while (buf < limit) {
|
||||
|
||||
@@ -21,22 +21,22 @@
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Avoid bogus warnings in transform().
|
||||
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) || __GNUC__ > 4
|
||||
# pragma GCC diagnostic ignored "-Wuninitialized"
|
||||
#endif
|
||||
|
||||
#include "check.h"
|
||||
|
||||
// At least on x86, GCC is able to optimize this to a rotate instruction.
|
||||
#define rotr_32(num, amount) ((num) >> (amount) | (num) << (32 - (amount)))
|
||||
// Rotate a uint32_t. GCC can optimize this to a rotate instruction
|
||||
// at least on x86.
|
||||
static inline uint32_t
|
||||
rotr_32(uint32_t num, unsigned amount)
|
||||
{
|
||||
return (num >> amount) | (num << (32 - amount));
|
||||
}
|
||||
|
||||
#define blk0(i) (W[i] = data[i])
|
||||
#define blk0(i) (W[i] = conv32be(data[i]))
|
||||
#define blk2(i) (W[i & 15] += s1(W[(i - 2) & 15]) + W[(i - 7) & 15] \
|
||||
+ s0(W[(i - 15) & 15]))
|
||||
|
||||
#define Ch(x, y, z) (z ^ (x & (y ^ z)))
|
||||
#define Maj(x, y, z) ((x & y) | (z & (x | y)))
|
||||
#define Maj(x, y, z) ((x & (y ^ z)) + (y & z))
|
||||
|
||||
#define a(i) T[(0 - i) & 7]
|
||||
#define b(i) T[(1 - i) & 7]
|
||||
@@ -47,16 +47,17 @@
|
||||
#define g(i) T[(6 - i) & 7]
|
||||
#define h(i) T[(7 - i) & 7]
|
||||
|
||||
#define R(i) \
|
||||
h(i) += S1(e(i)) + Ch(e(i), f(i), g(i)) + SHA256_K[i + j] \
|
||||
+ (j ? blk2(i) : blk0(i)); \
|
||||
#define R(i, j, blk) \
|
||||
h(i) += S1(e(i)) + Ch(e(i), f(i), g(i)) + SHA256_K[i + j] + blk; \
|
||||
d(i) += h(i); \
|
||||
h(i) += S0(a(i)) + Maj(a(i), b(i), c(i))
|
||||
#define R0(i) R(i, 0, blk0(i))
|
||||
#define R2(i) R(i, j, blk2(i))
|
||||
|
||||
#define S0(x) (rotr_32(x, 2) ^ rotr_32(x, 13) ^ rotr_32(x, 22))
|
||||
#define S1(x) (rotr_32(x, 6) ^ rotr_32(x, 11) ^ rotr_32(x, 25))
|
||||
#define s0(x) (rotr_32(x, 7) ^ rotr_32(x, 18) ^ (x >> 3))
|
||||
#define s1(x) (rotr_32(x, 17) ^ rotr_32(x, 19) ^ (x >> 10))
|
||||
#define S0(x) rotr_32(x ^ rotr_32(x ^ rotr_32(x, 9), 11), 2)
|
||||
#define S1(x) rotr_32(x ^ rotr_32(x ^ rotr_32(x, 14), 5), 6)
|
||||
#define s0(x) (rotr_32(x ^ rotr_32(x, 11), 7) ^ (x >> 3))
|
||||
#define s1(x) (rotr_32(x ^ rotr_32(x, 2), 17) ^ (x >> 10))
|
||||
|
||||
|
||||
static const uint32_t SHA256_K[64] = {
|
||||
@@ -84,17 +85,22 @@ transform(uint32_t state[8], const uint32_t data[16])
|
||||
{
|
||||
uint32_t W[16];
|
||||
uint32_t T[8];
|
||||
unsigned int j;
|
||||
|
||||
// Copy state[] to working vars.
|
||||
memcpy(T, state, sizeof(T));
|
||||
|
||||
// 64 operations, partially loop unrolled
|
||||
for (j = 0; j < 64; j += 16) {
|
||||
R( 0); R( 1); R( 2); R( 3);
|
||||
R( 4); R( 5); R( 6); R( 7);
|
||||
R( 8); R( 9); R(10); R(11);
|
||||
R(12); R(13); R(14); R(15);
|
||||
// The first 16 operations unrolled
|
||||
R0( 0); R0( 1); R0( 2); R0( 3);
|
||||
R0( 4); R0( 5); R0( 6); R0( 7);
|
||||
R0( 8); R0( 9); R0(10); R0(11);
|
||||
R0(12); R0(13); R0(14); R0(15);
|
||||
|
||||
// The remaining 48 operations partially unrolled
|
||||
for (unsigned int j = 16; j < 64; j += 16) {
|
||||
R2( 0); R2( 1); R2( 2); R2( 3);
|
||||
R2( 4); R2( 5); R2( 6); R2( 7);
|
||||
R2( 8); R2( 9); R2(10); R2(11);
|
||||
R2(12); R2(13); R2(14); R2(15);
|
||||
}
|
||||
|
||||
// Add the working vars back into state[].
|
||||
@@ -112,19 +118,7 @@ transform(uint32_t state[8], const uint32_t data[16])
|
||||
static void
|
||||
process(lzma_check_state *check)
|
||||
{
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
transform(check->state.sha256.state, check->buffer.u32);
|
||||
|
||||
#else
|
||||
uint32_t data[16];
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < 16; ++i)
|
||||
data[i] = bswap32(check->buffer.u32[i]);
|
||||
|
||||
transform(check->state.sha256.state, data);
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -174,8 +168,6 @@ lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check)
|
||||
extern void
|
||||
lzma_sha256_finish(lzma_check_state *check)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
// Add padding as described in RFC 3174 (it describes SHA-1 but
|
||||
// the same padding style is used for SHA-256 too).
|
||||
size_t pos = check->state.sha256.size & 0x3F;
|
||||
@@ -197,7 +189,7 @@ lzma_sha256_finish(lzma_check_state *check)
|
||||
|
||||
process(check);
|
||||
|
||||
for (i = 0; i < 8; ++i)
|
||||
for (size_t i = 0; i < 8; ++i)
|
||||
check->buffer.u32[i] = conv32be(check->state.sha256.state[i]);
|
||||
|
||||
return;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
#include "lz_decoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
lzma_next_coder next;
|
||||
|
||||
enum {
|
||||
@@ -46,17 +46,19 @@ struct lzma_coder_s {
|
||||
/// Options decoded from the header needed to initialize
|
||||
/// the LZMA decoder
|
||||
lzma_options_lzma options;
|
||||
};
|
||||
} lzma_alone_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
alone_decode(lzma_coder *coder,
|
||||
lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size,
|
||||
alone_decode(void *coder_ptr,
|
||||
const lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size,
|
||||
lzma_action action)
|
||||
{
|
||||
lzma_alone_coder *coder = coder_ptr;
|
||||
|
||||
while (*out_pos < out_size
|
||||
&& (coder->sequence == SEQ_CODE || *in_pos < in_size))
|
||||
switch (coder->sequence) {
|
||||
@@ -126,17 +128,19 @@ alone_decode(lzma_coder *coder,
|
||||
// Fall through
|
||||
|
||||
case SEQ_CODER_INIT: {
|
||||
lzma_ret ret;
|
||||
|
||||
lzma_filter_info filters[2] = {
|
||||
{ 0, &lzma_lzma_decoder_init, &coder->options },
|
||||
{ 0, NULL, NULL }
|
||||
};
|
||||
|
||||
if (coder->memusage > coder->memlimit)
|
||||
return LZMA_MEMLIMIT_ERROR;
|
||||
|
||||
ret = lzma_next_filter_init(&coder->next,
|
||||
lzma_filter_info filters[2] = {
|
||||
{
|
||||
.init = &lzma_lzma_decoder_init,
|
||||
.options = &coder->options,
|
||||
}, {
|
||||
.init = NULL,
|
||||
}
|
||||
};
|
||||
|
||||
const lzma_ret ret = lzma_next_filter_init(&coder->next,
|
||||
allocator, filters);
|
||||
if (ret != LZMA_OK)
|
||||
return ret;
|
||||
@@ -164,8 +168,9 @@ alone_decode(lzma_coder *coder,
|
||||
|
||||
|
||||
static void
|
||||
alone_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
alone_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_alone_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -173,9 +178,11 @@ alone_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
alone_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
alone_decoder_memconfig(void *coder_ptr, uint64_t *memusage,
|
||||
uint64_t *old_memlimit, uint64_t new_memlimit)
|
||||
{
|
||||
lzma_alone_coder *coder = coder_ptr;
|
||||
|
||||
*memusage = coder->memusage;
|
||||
*old_memlimit = coder->memlimit;
|
||||
|
||||
@@ -191,34 +198,34 @@ alone_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_alone_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_alone_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
uint64_t memlimit, bool picky)
|
||||
{
|
||||
lzma_next_coder_init(&lzma_alone_decoder_init, next, allocator);
|
||||
|
||||
if (memlimit == 0)
|
||||
return LZMA_PROG_ERROR;
|
||||
lzma_alone_coder *coder = next->coder;
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_alone_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &alone_decode;
|
||||
next->end = &alone_decoder_end;
|
||||
next->memconfig = &alone_decoder_memconfig;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
next->coder->sequence = SEQ_PROPERTIES;
|
||||
next->coder->picky = picky;
|
||||
next->coder->pos = 0;
|
||||
next->coder->options.dict_size = 0;
|
||||
next->coder->options.preset_dict = NULL;
|
||||
next->coder->options.preset_dict_size = 0;
|
||||
next->coder->uncompressed_size = 0;
|
||||
next->coder->memlimit = memlimit;
|
||||
next->coder->memusage = LZMA_MEMUSAGE_BASE;
|
||||
coder->sequence = SEQ_PROPERTIES;
|
||||
coder->picky = picky;
|
||||
coder->pos = 0;
|
||||
coder->options.dict_size = 0;
|
||||
coder->options.preset_dict = NULL;
|
||||
coder->options.preset_dict_size = 0;
|
||||
coder->uncompressed_size = 0;
|
||||
coder->memlimit = my_max(1, memlimit);
|
||||
coder->memusage = LZMA_MEMUSAGE_BASE;
|
||||
|
||||
return LZMA_OK;
|
||||
}
|
||||
@@ -227,7 +234,7 @@ lzma_alone_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_alone_decoder(lzma_stream *strm, uint64_t memlimit)
|
||||
{
|
||||
lzma_next_strm_init2(lzma_alone_decoder_init, strm, memlimit, false);
|
||||
lzma_next_strm_init(lzma_alone_decoder_init, strm, memlimit, false);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
|
||||
extern lzma_ret lzma_alone_decoder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
uint64_t memlimit, bool picky);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
#define ALONE_HEADER_SIZE (1 + 4 + 8)
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
lzma_next_coder next;
|
||||
|
||||
enum {
|
||||
@@ -27,17 +27,19 @@ struct lzma_coder_s {
|
||||
|
||||
size_t header_pos;
|
||||
uint8_t header[ALONE_HEADER_SIZE];
|
||||
};
|
||||
} lzma_alone_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
alone_encode(lzma_coder *coder,
|
||||
lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size,
|
||||
alone_encode(void *coder_ptr,
|
||||
const lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size,
|
||||
lzma_action action)
|
||||
{
|
||||
lzma_alone_coder *coder = coder_ptr;
|
||||
|
||||
while (*out_pos < out_size)
|
||||
switch (coder->sequence) {
|
||||
case SEQ_HEADER:
|
||||
@@ -65,8 +67,9 @@ alone_encode(lzma_coder *coder,
|
||||
|
||||
|
||||
static void
|
||||
alone_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
alone_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_alone_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -75,36 +78,31 @@ alone_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
// At least for now, this is not used by any internal function.
|
||||
static lzma_ret
|
||||
alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
alone_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_options_lzma *options)
|
||||
{
|
||||
uint32_t d;
|
||||
|
||||
// Initialize the LZMA encoder.
|
||||
const lzma_filter_info filters[2] = {
|
||||
{ 0, &lzma_lzma_encoder_init, (void *)(options) },
|
||||
{ 0, NULL, NULL }
|
||||
};
|
||||
|
||||
lzma_next_coder_init(&alone_encoder_init, next, allocator);
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_alone_coder *coder = next->coder;
|
||||
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_alone_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &alone_encode;
|
||||
next->end = &alone_encoder_end;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
// Basic initializations
|
||||
next->coder->sequence = SEQ_HEADER;
|
||||
next->coder->header_pos = 0;
|
||||
coder->sequence = SEQ_HEADER;
|
||||
coder->header_pos = 0;
|
||||
|
||||
// Encode the header:
|
||||
// - Properties (1 byte)
|
||||
if (lzma_lzma_lclppb_encode(options, next->coder->header))
|
||||
if (lzma_lzma_lclppb_encode(options, coder->header))
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// - Dictionary size (4 bytes)
|
||||
@@ -115,7 +113,7 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
// one is the next unless it is UINT32_MAX. While the header would
|
||||
// allow any 32-bit integer, we do this to keep the decoder of liblzma
|
||||
// accepting the resulting files.
|
||||
d = options->dict_size - 1;
|
||||
uint32_t d = options->dict_size - 1;
|
||||
d |= d >> 2;
|
||||
d |= d >> 3;
|
||||
d |= d >> 4;
|
||||
@@ -124,18 +122,28 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
if (d != UINT32_MAX)
|
||||
++d;
|
||||
|
||||
unaligned_write32le(next->coder->header + 1, d);
|
||||
unaligned_write32le(coder->header + 1, d);
|
||||
|
||||
// - Uncompressed size (always unknown and using EOPM)
|
||||
memset(next->coder->header + 1 + 4, 0xFF, 8);
|
||||
memset(coder->header + 1 + 4, 0xFF, 8);
|
||||
|
||||
return lzma_next_filter_init(&next->coder->next, allocator, filters);
|
||||
// Initialize the LZMA encoder.
|
||||
const lzma_filter_info filters[2] = {
|
||||
{
|
||||
.init = &lzma_lzma_encoder_init,
|
||||
.options = (void *)(options),
|
||||
}, {
|
||||
.init = NULL,
|
||||
}
|
||||
};
|
||||
|
||||
return lzma_next_filter_init(&coder->next, allocator, filters);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
extern lzma_ret
|
||||
lzma_alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_alone_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_options_alone *options)
|
||||
{
|
||||
lzma_next_coder_init(&alone_encoder_init, next, allocator, options);
|
||||
@@ -146,7 +154,7 @@ lzma_alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_alone_encoder(lzma_stream *strm, const lzma_options_lzma *options)
|
||||
{
|
||||
lzma_next_strm_init1(alone_encoder_init, strm, options);
|
||||
lzma_next_strm_init(alone_encoder_init, strm, options);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "alone_decoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
/// Stream decoder or LZMA_Alone decoder
|
||||
lzma_next_coder next;
|
||||
|
||||
@@ -26,15 +26,17 @@ struct lzma_coder_s {
|
||||
SEQ_CODE,
|
||||
SEQ_FINISH,
|
||||
} sequence;
|
||||
};
|
||||
} lzma_auto_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
auto_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
auto_decode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_auto_coder *coder = coder_ptr;
|
||||
|
||||
switch (coder->sequence) {
|
||||
case SEQ_INIT:
|
||||
if (*in_pos >= in_size)
|
||||
@@ -100,8 +102,9 @@ auto_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static void
|
||||
auto_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
auto_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_auto_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -109,8 +112,10 @@ auto_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_check
|
||||
auto_decoder_get_check(const lzma_coder *coder)
|
||||
auto_decoder_get_check(const void *coder_ptr)
|
||||
{
|
||||
const lzma_auto_coder *coder = coder_ptr;
|
||||
|
||||
// It is LZMA_Alone if get_check is NULL.
|
||||
return coder->next.get_check == NULL ? LZMA_CHECK_NONE
|
||||
: coder->next.get_check(coder->next.coder);
|
||||
@@ -118,9 +123,11 @@ auto_decoder_get_check(const lzma_coder *coder)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
auto_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
auto_decoder_memconfig(void *coder_ptr, uint64_t *memusage,
|
||||
uint64_t *old_memlimit, uint64_t new_memlimit)
|
||||
{
|
||||
lzma_auto_coder *coder = coder_ptr;
|
||||
|
||||
lzma_ret ret;
|
||||
|
||||
if (coder->next.memconfig != NULL) {
|
||||
@@ -132,7 +139,10 @@ auto_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
// the current memory usage.
|
||||
*memusage = LZMA_MEMUSAGE_BASE;
|
||||
*old_memlimit = coder->memlimit;
|
||||
|
||||
ret = LZMA_OK;
|
||||
if (new_memlimit != 0 && new_memlimit < *memusage)
|
||||
ret = LZMA_MEMLIMIT_ERROR;
|
||||
}
|
||||
|
||||
if (ret == LZMA_OK && new_memlimit != 0)
|
||||
@@ -143,32 +153,31 @@ auto_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
auto_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
auto_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
uint64_t memlimit, uint32_t flags)
|
||||
{
|
||||
lzma_next_coder_init(&auto_decoder_init, next, allocator);
|
||||
|
||||
if (memlimit == 0)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (flags & ~LZMA_SUPPORTED_FLAGS)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_auto_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_auto_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &auto_decode;
|
||||
next->end = &auto_decoder_end;
|
||||
next->get_check = &auto_decoder_get_check;
|
||||
next->memconfig = &auto_decoder_memconfig;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
next->coder->memlimit = memlimit;
|
||||
next->coder->flags = flags;
|
||||
next->coder->sequence = SEQ_INIT;
|
||||
coder->memlimit = my_max(1, memlimit);
|
||||
coder->flags = flags;
|
||||
coder->sequence = SEQ_INIT;
|
||||
|
||||
return LZMA_OK;
|
||||
}
|
||||
@@ -177,7 +186,7 @@ auto_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_auto_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
|
||||
{
|
||||
lzma_next_strm_init2(auto_decoder_init, strm, memlimit, flags);
|
||||
lzma_next_strm_init(auto_decoder_init, strm, memlimit, flags);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
@@ -14,13 +14,10 @@
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
|
||||
lzma_block_buffer_decode(lzma_block *block, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
lzma_next_coder block_decoder;
|
||||
lzma_ret ret;
|
||||
|
||||
if (in_pos == NULL || (in == NULL && *in_pos != in_size)
|
||||
|| *in_pos > in_size || out_pos == NULL
|
||||
|| (out == NULL && *out_pos != out_size)
|
||||
@@ -28,8 +25,9 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Initialize the Block decoder.
|
||||
block_decoder = LZMA_NEXT_CODER_INIT;
|
||||
ret = lzma_block_decoder_init(&block_decoder, allocator, block);
|
||||
lzma_next_coder block_decoder = LZMA_NEXT_CODER_INIT;
|
||||
lzma_ret ret = lzma_block_decoder_init(
|
||||
&block_decoder, allocator, block);
|
||||
|
||||
if (ret == LZMA_OK) {
|
||||
// Save the positions so that we can restore them in case
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "block_buffer_encoder.h"
|
||||
#include "block_encoder.h"
|
||||
#include "filter_encoder.h"
|
||||
#include "lzma2_encoder.h"
|
||||
@@ -28,11 +29,9 @@
|
||||
+ LZMA_CHECK_SIZE_MAX + 3) & ~3)
|
||||
|
||||
|
||||
static lzma_vli
|
||||
lzma2_bound(lzma_vli uncompressed_size)
|
||||
static uint64_t
|
||||
lzma2_bound(uint64_t uncompressed_size)
|
||||
{
|
||||
lzma_vli overhead;
|
||||
|
||||
// Prevent integer overflow in overhead calculation.
|
||||
if (uncompressed_size > COMPRESSED_SIZE_MAX)
|
||||
return 0;
|
||||
@@ -41,7 +40,7 @@ lzma2_bound(lzma_vli uncompressed_size)
|
||||
// uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX,
|
||||
// multiply by the size of per-chunk header, and add one byte for
|
||||
// the end marker.
|
||||
overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)
|
||||
const uint64_t overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)
|
||||
/ LZMA2_CHUNK_MAX)
|
||||
* LZMA2_HEADER_UNCOMPRESSED + 1;
|
||||
|
||||
@@ -53,30 +52,36 @@ lzma2_bound(lzma_vli uncompressed_size)
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(size_t)
|
||||
lzma_block_buffer_bound(size_t uncompressed_size)
|
||||
extern uint64_t
|
||||
lzma_block_buffer_bound64(uint64_t uncompressed_size)
|
||||
{
|
||||
// For now, if the data doesn't compress, we always use uncompressed
|
||||
// chunks of LZMA2. In future we may use Subblock filter too, but
|
||||
// but for simplicity we probably will still use the same bound
|
||||
// calculation even though Subblock filter would have slightly less
|
||||
// overhead.
|
||||
lzma_vli lzma2_size = lzma2_bound(uncompressed_size);
|
||||
// If the data doesn't compress, we always use uncompressed
|
||||
// LZMA2 chunks.
|
||||
uint64_t lzma2_size = lzma2_bound(uncompressed_size);
|
||||
if (lzma2_size == 0)
|
||||
return 0;
|
||||
|
||||
// Take Block Padding into account.
|
||||
lzma2_size = (lzma2_size + 3) & ~LZMA_VLI_C(3);
|
||||
lzma2_size = (lzma2_size + 3) & ~UINT64_C(3);
|
||||
|
||||
#if SIZE_MAX < LZMA_VLI_MAX
|
||||
// Catch the possible integer overflow on 32-bit systems. There's no
|
||||
// overflow on 64-bit systems, because lzma2_bound() already takes
|
||||
// No risk of integer overflow because lzma2_bound() already takes
|
||||
// into account the size of the headers in the Block.
|
||||
if (SIZE_MAX - HEADERS_BOUND < lzma2_size)
|
||||
return HEADERS_BOUND + lzma2_size;
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(size_t)
|
||||
lzma_block_buffer_bound(size_t uncompressed_size)
|
||||
{
|
||||
uint64_t ret = lzma_block_buffer_bound64(uncompressed_size);
|
||||
|
||||
#if SIZE_MAX < UINT64_MAX
|
||||
// Catch the possible integer overflow on 32-bit systems.
|
||||
if (ret > SIZE_MAX)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return HEADERS_BOUND + lzma2_size;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -84,17 +89,12 @@ static lzma_ret
|
||||
block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
size_t in_pos = 0;
|
||||
uint8_t control = 0x01; // Dictionary reset
|
||||
lzma_filter *filters_orig;
|
||||
|
||||
// TODO: Figure out if the last filter is LZMA2 or Subblock and use
|
||||
// that filter to encode the uncompressed chunks.
|
||||
|
||||
// Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at
|
||||
// all, but LZMA2 always requires a dictionary, so use the minimum
|
||||
// value to minimize memory usage of the decoder.
|
||||
lzma_options_lzma lzma2 = { LZMA_DICT_SIZE_MIN };
|
||||
lzma_options_lzma lzma2 = {
|
||||
.dict_size = LZMA_DICT_SIZE_MIN,
|
||||
};
|
||||
|
||||
lzma_filter filters[2];
|
||||
filters[0].id = LZMA_FILTER_LZMA2;
|
||||
@@ -103,7 +103,7 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
|
||||
|
||||
// Set the above filter options to *block temporarily so that we can
|
||||
// encode the Block Header.
|
||||
filters_orig = block->filters;
|
||||
lzma_filter *filters_orig = block->filters;
|
||||
block->filters = filters;
|
||||
|
||||
if (lzma_block_header_size(block) != LZMA_OK) {
|
||||
@@ -132,17 +132,18 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
|
||||
*out_pos += block->header_size;
|
||||
|
||||
// Encode the data using LZMA2 uncompressed chunks.
|
||||
size_t in_pos = 0;
|
||||
uint8_t control = 0x01; // Dictionary reset
|
||||
|
||||
while (in_pos < in_size) {
|
||||
size_t copy_size;
|
||||
|
||||
// Control byte: Indicate uncompressed chunk, of which
|
||||
// the first resets the dictionary.
|
||||
out[(*out_pos)++] = control;
|
||||
control = 0x02; // No dictionary reset
|
||||
|
||||
// Size of the uncompressed chunk
|
||||
copy_size = my_min(in_size - in_pos, LZMA2_CHUNK_MAX);
|
||||
const size_t copy_size
|
||||
= my_min(in_size - in_pos, LZMA2_CHUNK_MAX);
|
||||
out[(*out_pos)++] = (copy_size - 1) >> 8;
|
||||
out[(*out_pos)++] = (copy_size - 1) & 0xFF;
|
||||
|
||||
@@ -163,27 +164,18 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
block_encode_normal(lzma_block *block, lzma_allocator *allocator,
|
||||
block_encode_normal(lzma_block *block, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
size_t out_start;
|
||||
lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;
|
||||
lzma_ret ret;
|
||||
|
||||
// Find out the size of the Block Header.
|
||||
block->compressed_size = lzma2_bound(in_size);
|
||||
if (block->compressed_size == 0)
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
block->uncompressed_size = in_size;
|
||||
return_if_error(lzma_block_header_size(block));
|
||||
|
||||
// Reserve space for the Block Header and skip it for now.
|
||||
if (out_size - *out_pos <= block->header_size)
|
||||
return LZMA_BUF_ERROR;
|
||||
|
||||
out_start = *out_pos;
|
||||
const size_t out_start = *out_pos;
|
||||
*out_pos += block->header_size;
|
||||
|
||||
// Limit out_size so that we stop encoding if the output would grow
|
||||
@@ -193,7 +185,8 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
|
||||
|
||||
// TODO: In many common cases this could be optimized to use
|
||||
// significantly less memory.
|
||||
ret = lzma_raw_encoder_init(
|
||||
lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;
|
||||
lzma_ret ret = lzma_raw_encoder_init(
|
||||
&raw_encoder, allocator, block->filters);
|
||||
|
||||
if (ret == LZMA_OK) {
|
||||
@@ -227,15 +220,12 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
|
||||
static lzma_ret
|
||||
block_buffer_encode(lzma_block *block, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
uint8_t *out, size_t *out_pos, size_t out_size,
|
||||
bool try_to_compress)
|
||||
{
|
||||
size_t check_size;
|
||||
lzma_ret ret;
|
||||
size_t i;
|
||||
|
||||
// Validate the arguments.
|
||||
if (block == NULL || (in == NULL && in_size != 0) || out == NULL
|
||||
|| out_pos == NULL || *out_pos > out_size)
|
||||
@@ -243,11 +233,11 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
|
||||
|
||||
// The contents of the structure may depend on the version so
|
||||
// check the version before validating the contents of *block.
|
||||
if (block->version != 0)
|
||||
if (block->version > 1)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
if ((unsigned int)(block->check) > LZMA_CHECK_ID_MAX
|
||||
|| block->filters == NULL)
|
||||
|| (try_to_compress && block->filters == NULL))
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (!lzma_check_is_supported(block->check))
|
||||
@@ -259,7 +249,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
|
||||
out_size -= (out_size - *out_pos) & 3;
|
||||
|
||||
// Get the size of the Check field.
|
||||
check_size = lzma_check_size(block->check);
|
||||
const size_t check_size = lzma_check_size(block->check);
|
||||
assert(check_size != UINT32_MAX);
|
||||
|
||||
// Reserve space for the Check field.
|
||||
@@ -268,9 +258,19 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
|
||||
|
||||
out_size -= check_size;
|
||||
|
||||
// Initialize block->uncompressed_size and calculate the worst-case
|
||||
// value for block->compressed_size.
|
||||
block->uncompressed_size = in_size;
|
||||
block->compressed_size = lzma2_bound(in_size);
|
||||
if (block->compressed_size == 0)
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
// Do the actual compression.
|
||||
ret = block_encode_normal(block, allocator,
|
||||
in, in_size, out, out_pos, out_size);
|
||||
lzma_ret ret = LZMA_BUF_ERROR;
|
||||
if (try_to_compress)
|
||||
ret = block_encode_normal(block, allocator,
|
||||
in, in_size, out, out_pos, out_size);
|
||||
|
||||
if (ret != LZMA_OK) {
|
||||
// If the error was something else than output buffer
|
||||
// becoming full, return the error now.
|
||||
@@ -291,7 +291,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
|
||||
// Block Padding. No buffer overflow here, because we already adjusted
|
||||
// out_size so that (out_size - out_start) is a multiple of four.
|
||||
// Thus, if the buffer is full, the loop body can never run.
|
||||
for (i = (size_t)(block->compressed_size); i & 3; ++i) {
|
||||
for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) {
|
||||
assert(*out_pos < out_size);
|
||||
out[(*out_pos)++] = 0x00;
|
||||
}
|
||||
@@ -313,3 +313,25 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
|
||||
|
||||
return LZMA_OK;
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_buffer_encode(lzma_block *block, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
return block_buffer_encode(block, allocator,
|
||||
in, in_size, out, out_pos, out_size, true);
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_uncomp_encode(lzma_block *block,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
// It won't allocate any memory from heap so no need
|
||||
// for lzma_allocator.
|
||||
return block_buffer_encode(block, NULL,
|
||||
in, in_size, out, out_pos, out_size, false);
|
||||
}
|
||||
|
||||
24
Utilities/cmliblzma/liblzma/common/block_buffer_encoder.h
Normal file
24
Utilities/cmliblzma/liblzma/common/block_buffer_encoder.h
Normal file
@@ -0,0 +1,24 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
/// \file block_buffer_encoder.h
|
||||
/// \brief Single-call .xz Block encoder
|
||||
//
|
||||
// Author: Lasse Collin
|
||||
//
|
||||
// This file has been put into the public domain.
|
||||
// You can do whatever you want with this file.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef LZMA_BLOCK_BUFFER_ENCODER_H
|
||||
#define LZMA_BLOCK_BUFFER_ENCODER_H
|
||||
|
||||
#include "common.h"
|
||||
|
||||
|
||||
/// uint64_t version of lzma_block_buffer_bound(). It is used by
|
||||
/// stream_encoder_mt.c. Probably the original lzma_block_buffer_bound()
|
||||
/// should have been 64-bit, but fixing it would break the ABI.
|
||||
extern uint64_t lzma_block_buffer_bound64(uint64_t uncompressed_size);
|
||||
|
||||
#endif
|
||||
@@ -15,7 +15,7 @@
|
||||
#include "check.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum {
|
||||
SEQ_CODE,
|
||||
SEQ_PADDING,
|
||||
@@ -45,7 +45,10 @@ struct lzma_coder_s {
|
||||
|
||||
/// Check of the uncompressed data
|
||||
lzma_check_state check;
|
||||
};
|
||||
|
||||
/// True if the integrity check won't be calculated and verified.
|
||||
bool ignore_check;
|
||||
} lzma_block_coder;
|
||||
|
||||
|
||||
static inline bool
|
||||
@@ -71,11 +74,13 @@ is_size_valid(lzma_vli size, lzma_vli reference)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
block_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
block_decode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_block_coder *coder = coder_ptr;
|
||||
|
||||
switch (coder->sequence) {
|
||||
case SEQ_CODE: {
|
||||
const size_t in_start = *in_pos;
|
||||
@@ -97,8 +102,9 @@ block_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
coder->block->uncompressed_size))
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
lzma_check_update(&coder->check, coder->block->check,
|
||||
out + out_start, out_used);
|
||||
if (!coder->ignore_check)
|
||||
lzma_check_update(&coder->check, coder->block->check,
|
||||
out + out_start, out_used);
|
||||
|
||||
if (ret != LZMA_STREAM_END)
|
||||
return ret;
|
||||
@@ -140,7 +146,9 @@ block_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
if (coder->block->check == LZMA_CHECK_NONE)
|
||||
return LZMA_STREAM_END;
|
||||
|
||||
lzma_check_finish(&coder->check, coder->block->check);
|
||||
if (!coder->ignore_check)
|
||||
lzma_check_finish(&coder->check, coder->block->check);
|
||||
|
||||
coder->sequence = SEQ_CHECK;
|
||||
|
||||
// Fall through
|
||||
@@ -155,7 +163,8 @@ block_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
// Validate the Check only if we support it.
|
||||
// coder->check.buffer may be uninitialized
|
||||
// when the Check ID is not supported.
|
||||
if (lzma_check_is_supported(coder->block->check)
|
||||
if (!coder->ignore_check
|
||||
&& lzma_check_is_supported(coder->block->check)
|
||||
&& memcmp(coder->block->raw_check,
|
||||
coder->check.buffer.u8,
|
||||
check_size) != 0)
|
||||
@@ -170,8 +179,9 @@ block_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static void
|
||||
block_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
block_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_block_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -179,7 +189,7 @@ block_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_block_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
lzma_block *block)
|
||||
{
|
||||
lzma_next_coder_init(&lzma_block_decoder_init, next, allocator);
|
||||
@@ -191,27 +201,29 @@ lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
|| !lzma_vli_is_valid(block->uncompressed_size))
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Allocate and initialize *next->coder if needed.
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
// Allocate *next->coder if needed.
|
||||
lzma_block_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_block_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &block_decode;
|
||||
next->end = &block_decoder_end;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
// Basic initializations
|
||||
next->coder->sequence = SEQ_CODE;
|
||||
next->coder->block = block;
|
||||
next->coder->compressed_size = 0;
|
||||
next->coder->uncompressed_size = 0;
|
||||
coder->sequence = SEQ_CODE;
|
||||
coder->block = block;
|
||||
coder->compressed_size = 0;
|
||||
coder->uncompressed_size = 0;
|
||||
|
||||
// If Compressed Size is not known, we calculate the maximum allowed
|
||||
// value so that encoded size of the Block (including Block Padding)
|
||||
// is still a valid VLI and a multiple of four.
|
||||
next->coder->compressed_limit
|
||||
coder->compressed_limit
|
||||
= block->compressed_size == LZMA_VLI_UNKNOWN
|
||||
? (LZMA_VLI_MAX & ~LZMA_VLI_C(3))
|
||||
- block->header_size
|
||||
@@ -221,11 +233,14 @@ lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
// Initialize the check. It's caller's problem if the Check ID is not
|
||||
// supported, and the Block decoder cannot verify the Check field.
|
||||
// Caller can test lzma_check_is_supported(block->check).
|
||||
next->coder->check_pos = 0;
|
||||
lzma_check_init(&next->coder->check, block->check);
|
||||
coder->check_pos = 0;
|
||||
lzma_check_init(&coder->check, block->check);
|
||||
|
||||
coder->ignore_check = block->version >= 1
|
||||
? block->ignore_check : false;
|
||||
|
||||
// Initialize the filter chain.
|
||||
return lzma_raw_decoder_init(&next->coder->next, allocator,
|
||||
return lzma_raw_decoder_init(&coder->next, allocator,
|
||||
block->filters);
|
||||
}
|
||||
|
||||
@@ -233,7 +248,7 @@ lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_decoder(lzma_stream *strm, lzma_block *block)
|
||||
{
|
||||
lzma_next_strm_init1(lzma_block_decoder_init, strm, block);
|
||||
lzma_next_strm_init(lzma_block_decoder_init, strm, block);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
@@ -17,6 +17,6 @@
|
||||
|
||||
|
||||
extern lzma_ret lzma_block_decoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, lzma_block *block);
|
||||
const lzma_allocator *allocator, lzma_block *block);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
#include "check.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
/// The filters in the chain; initialized with lzma_raw_decoder_init().
|
||||
lzma_next_coder next;
|
||||
|
||||
@@ -41,15 +41,17 @@ struct lzma_coder_s {
|
||||
|
||||
/// Check of the uncompressed data
|
||||
lzma_check_state check;
|
||||
};
|
||||
} lzma_block_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
block_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
block_encode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_block_coder *coder = coder_ptr;
|
||||
|
||||
// Check that our amount of input stays in proper limits.
|
||||
if (LZMA_VLI_MAX - coder->uncompressed_size < in_size - *in_pos)
|
||||
return LZMA_DATA_ERROR;
|
||||
@@ -134,8 +136,9 @@ block_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static void
|
||||
block_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
block_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_block_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -143,10 +146,12 @@ block_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
block_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
block_encoder_update(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters lzma_attribute((__unused__)),
|
||||
const lzma_filter *reversed_filters)
|
||||
{
|
||||
lzma_block_coder *coder = coder_ptr;
|
||||
|
||||
if (coder->sequence != SEQ_CODE)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
@@ -156,7 +161,7 @@ block_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_block_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
lzma_block *block)
|
||||
{
|
||||
lzma_next_coder_init(&lzma_block_encoder_init, next, allocator);
|
||||
@@ -166,7 +171,7 @@ lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
|
||||
// The contents of the structure may depend on the version so
|
||||
// check the version first.
|
||||
if (block->version != 0)
|
||||
if (block->version > 1)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// If the Check ID is not supported, we cannot calculate the check and
|
||||
@@ -178,37 +183,38 @@ lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
return LZMA_UNSUPPORTED_CHECK;
|
||||
|
||||
// Allocate and initialize *next->coder if needed.
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_block_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_block_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &block_encode;
|
||||
next->end = &block_encoder_end;
|
||||
next->update = &block_encoder_update;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
// Basic initializations
|
||||
next->coder->sequence = SEQ_CODE;
|
||||
next->coder->block = block;
|
||||
next->coder->compressed_size = 0;
|
||||
next->coder->uncompressed_size = 0;
|
||||
next->coder->pos = 0;
|
||||
coder->sequence = SEQ_CODE;
|
||||
coder->block = block;
|
||||
coder->compressed_size = 0;
|
||||
coder->uncompressed_size = 0;
|
||||
coder->pos = 0;
|
||||
|
||||
// Initialize the check
|
||||
lzma_check_init(&next->coder->check, block->check);
|
||||
lzma_check_init(&coder->check, block->check);
|
||||
|
||||
// Initialize the requested filters.
|
||||
return lzma_raw_encoder_init(&next->coder->next, allocator,
|
||||
block->filters);
|
||||
return lzma_raw_encoder_init(&coder->next, allocator, block->filters);
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_encoder(lzma_stream *strm, lzma_block *block)
|
||||
{
|
||||
lzma_next_strm_init1(lzma_block_encoder_init, strm, block);
|
||||
lzma_next_strm_init(lzma_block_encoder_init, strm, block);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
@@ -42,6 +42,6 @@
|
||||
|
||||
|
||||
extern lzma_ret lzma_block_encoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, lzma_block *block);
|
||||
const lzma_allocator *allocator, lzma_block *block);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,14 +15,12 @@
|
||||
|
||||
|
||||
static void
|
||||
free_properties(lzma_block *block, lzma_allocator *allocator)
|
||||
free_properties(lzma_block *block, const lzma_allocator *allocator)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
// Free allocated filter options. The last array member is not
|
||||
// touched after the initialization in the beginning of
|
||||
// lzma_block_header_decode(), so we don't need to touch that here.
|
||||
for (i = 0; i < LZMA_FILTERS_MAX; ++i) {
|
||||
for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) {
|
||||
lzma_free(block->filters[i].options, allocator);
|
||||
block->filters[i].id = LZMA_VLI_UNKNOWN;
|
||||
block->filters[i].options = NULL;
|
||||
@@ -34,15 +32,8 @@ free_properties(lzma_block *block, lzma_allocator *allocator)
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_header_decode(lzma_block *block,
|
||||
lzma_allocator *allocator, const uint8_t *in)
|
||||
const lzma_allocator *allocator, const uint8_t *in)
|
||||
{
|
||||
const size_t filter_count = (in[1] & 3) + 1;
|
||||
size_t in_size;
|
||||
size_t i;
|
||||
|
||||
// Start after the Block Header Size and Block Flags fields.
|
||||
size_t in_pos = 2;
|
||||
|
||||
// NOTE: We consider the header to be corrupt not only when the
|
||||
// CRC32 doesn't match, but also when variable-length integers
|
||||
// are invalid or over 63 bits, or if the header is too small
|
||||
@@ -50,13 +41,21 @@ lzma_block_header_decode(lzma_block *block,
|
||||
|
||||
// Initialize the filter options array. This way the caller can
|
||||
// safely free() the options even if an error occurs in this function.
|
||||
for (i = 0; i <= LZMA_FILTERS_MAX; ++i) {
|
||||
for (size_t i = 0; i <= LZMA_FILTERS_MAX; ++i) {
|
||||
block->filters[i].id = LZMA_VLI_UNKNOWN;
|
||||
block->filters[i].options = NULL;
|
||||
}
|
||||
|
||||
// Always zero for now.
|
||||
block->version = 0;
|
||||
// Versions 0 and 1 are supported. If a newer version was specified,
|
||||
// we need to downgrade it.
|
||||
if (block->version > 1)
|
||||
block->version = 1;
|
||||
|
||||
// This isn't a Block Header option, but since the decompressor will
|
||||
// read it if version >= 1, it's better to initialize it here than
|
||||
// to expect the caller to do it since in almost all cases this
|
||||
// should be false.
|
||||
block->ignore_check = false;
|
||||
|
||||
// Validate Block Header Size and Check type. The caller must have
|
||||
// already set these, so it is a programming error if this test fails.
|
||||
@@ -65,7 +64,7 @@ lzma_block_header_decode(lzma_block *block,
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Exclude the CRC32 field.
|
||||
in_size = block->header_size - 4;
|
||||
const size_t in_size = block->header_size - 4;
|
||||
|
||||
// Verify CRC32
|
||||
if (lzma_crc32(in, in_size, 0) != unaligned_read32le(in + in_size))
|
||||
@@ -75,6 +74,9 @@ lzma_block_header_decode(lzma_block *block,
|
||||
if (in[1] & 0x3C)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// Start after the Block Header Size and Block Flags fields.
|
||||
size_t in_pos = 2;
|
||||
|
||||
// Compressed Size
|
||||
if (in[1] & 0x40) {
|
||||
return_if_error(lzma_vli_decode(&block->compressed_size,
|
||||
@@ -96,7 +98,8 @@ lzma_block_header_decode(lzma_block *block,
|
||||
block->uncompressed_size = LZMA_VLI_UNKNOWN;
|
||||
|
||||
// Filter Flags
|
||||
for (i = 0; i < filter_count; ++i) {
|
||||
const size_t filter_count = (in[1] & 3) + 1;
|
||||
for (size_t i = 0; i < filter_count; ++i) {
|
||||
const lzma_ret ret = lzma_filter_flags_decode(
|
||||
&block->filters[i], allocator,
|
||||
in, &in_pos, in_size);
|
||||
|
||||
@@ -17,14 +17,12 @@
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_header_size(lzma_block *block)
|
||||
{
|
||||
size_t i;
|
||||
if (block->version > 1)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// Block Header Size + Block Flags + CRC32.
|
||||
uint32_t size = 1 + 1 + 4;
|
||||
|
||||
if (block->version != 0)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// Compressed Size
|
||||
if (block->compressed_size != LZMA_VLI_UNKNOWN) {
|
||||
const uint32_t add = lzma_vli_size(block->compressed_size);
|
||||
@@ -47,13 +45,12 @@ lzma_block_header_size(lzma_block *block)
|
||||
if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
for (i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
|
||||
uint32_t add;
|
||||
|
||||
for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
|
||||
// Don't allow too many filters.
|
||||
if (i == LZMA_FILTERS_MAX)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
uint32_t add;
|
||||
return_if_error(lzma_filter_flags_size(&add,
|
||||
block->filters + i));
|
||||
|
||||
@@ -76,23 +73,20 @@ lzma_block_header_size(lzma_block *block)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_header_encode(const lzma_block *block, uint8_t *out)
|
||||
{
|
||||
size_t out_size;
|
||||
size_t out_pos = 2;
|
||||
size_t filter_count = 0;
|
||||
|
||||
// Validate everything but filters.
|
||||
if (lzma_block_unpadded_size(block) == 0
|
||||
|| !lzma_vli_is_valid(block->uncompressed_size))
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Indicate the size of the buffer _excluding_ the CRC32 field.
|
||||
out_size = block->header_size - 4;
|
||||
const size_t out_size = block->header_size - 4;
|
||||
|
||||
// Store the Block Header Size.
|
||||
out[0] = out_size / 4;
|
||||
|
||||
// We write Block Flags in pieces.
|
||||
out[1] = 0x00;
|
||||
size_t out_pos = 2;
|
||||
|
||||
// Compressed Size
|
||||
if (block->compressed_size != LZMA_VLI_UNKNOWN) {
|
||||
@@ -114,6 +108,7 @@ lzma_block_header_encode(const lzma_block *block, uint8_t *out)
|
||||
if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
size_t filter_count = 0;
|
||||
do {
|
||||
// There can be a maximum of four filters.
|
||||
if (filter_count == LZMA_FILTERS_MAX)
|
||||
|
||||
@@ -17,14 +17,11 @@
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
|
||||
{
|
||||
uint32_t container_size;
|
||||
lzma_vli compressed_size;
|
||||
|
||||
// Validate everything but Uncompressed Size and filters.
|
||||
if (lzma_block_unpadded_size(block) == 0)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
container_size = block->header_size
|
||||
const uint32_t container_size = block->header_size
|
||||
+ lzma_check_size(block->check);
|
||||
|
||||
// Validate that Compressed Size will be greater than zero.
|
||||
@@ -34,7 +31,7 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
|
||||
// Calculate what Compressed Size is supposed to be.
|
||||
// If Compressed Size was present in Block Header,
|
||||
// compare that the new value matches it.
|
||||
compressed_size = unpadded_size - container_size;
|
||||
const lzma_vli compressed_size = unpadded_size - container_size;
|
||||
if (block->compressed_size != LZMA_VLI_UNKNOWN
|
||||
&& block->compressed_size != compressed_size)
|
||||
return LZMA_DATA_ERROR;
|
||||
@@ -48,15 +45,13 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
|
||||
extern LZMA_API(lzma_vli)
|
||||
lzma_block_unpadded_size(const lzma_block *block)
|
||||
{
|
||||
lzma_vli unpadded_size;
|
||||
|
||||
// Validate the values that we are interested in i.e. all but
|
||||
// Uncompressed Size and the filters.
|
||||
//
|
||||
// NOTE: This function is used for validation too, so it is
|
||||
// essential that these checks are always done even if
|
||||
// Compressed Size is unknown.
|
||||
if (block == NULL || block->version != 0
|
||||
if (block == NULL || block->version > 1
|
||||
|| block->header_size < LZMA_BLOCK_HEADER_SIZE_MIN
|
||||
|| block->header_size > LZMA_BLOCK_HEADER_SIZE_MAX
|
||||
|| (block->header_size & 3)
|
||||
@@ -71,7 +66,7 @@ lzma_block_unpadded_size(const lzma_block *block)
|
||||
return LZMA_VLI_UNKNOWN;
|
||||
|
||||
// Calculate Unpadded Size and validate it.
|
||||
unpadded_size = block->compressed_size
|
||||
const lzma_vli unpadded_size = block->compressed_size
|
||||
+ block->header_size
|
||||
+ lzma_check_size(block->check);
|
||||
|
||||
|
||||
@@ -36,14 +36,14 @@ lzma_version_string(void)
|
||||
///////////////////////
|
||||
|
||||
extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1)
|
||||
lzma_alloc(size_t size, lzma_allocator *allocator)
|
||||
lzma_alloc(size_t size, const lzma_allocator *allocator)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
// Some malloc() variants return NULL if called with size == 0.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
|
||||
void *ptr;
|
||||
|
||||
if (allocator != NULL && allocator->alloc != NULL)
|
||||
ptr = allocator->alloc(allocator->opaque, 1, size);
|
||||
else
|
||||
@@ -53,8 +53,29 @@ lzma_alloc(size_t size, lzma_allocator *allocator)
|
||||
}
|
||||
|
||||
|
||||
extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1)
|
||||
lzma_alloc_zero(size_t size, const lzma_allocator *allocator)
|
||||
{
|
||||
// Some calloc() variants return NULL if called with size == 0.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
|
||||
void *ptr;
|
||||
|
||||
if (allocator != NULL && allocator->alloc != NULL) {
|
||||
ptr = allocator->alloc(allocator->opaque, 1, size);
|
||||
if (ptr != NULL)
|
||||
memzero(ptr, size);
|
||||
} else {
|
||||
ptr = calloc(1, size);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
extern void
|
||||
lzma_free(void *ptr, lzma_allocator *allocator)
|
||||
lzma_free(void *ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
if (allocator != NULL && allocator->free != NULL)
|
||||
allocator->free(allocator->opaque, ptr);
|
||||
@@ -70,9 +91,9 @@ lzma_free(void *ptr, lzma_allocator *allocator)
|
||||
//////////
|
||||
|
||||
extern size_t
|
||||
lzma_bufcpy(const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size)
|
||||
lzma_bufcpy(const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size)
|
||||
{
|
||||
const size_t in_avail = in_size - *in_pos;
|
||||
const size_t out_avail = out_size - *out_pos;
|
||||
@@ -88,7 +109,7 @@ lzma_bufcpy(const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_next_filter_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_filter_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
lzma_next_coder_init(filters[0].init, next, allocator);
|
||||
@@ -99,7 +120,7 @@ lzma_next_filter_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_next_filter_update(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_filter_update(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *reversed_filters)
|
||||
{
|
||||
// Check that the application isn't trying to change the Filter ID.
|
||||
@@ -117,7 +138,7 @@ lzma_next_filter_update(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern void
|
||||
lzma_next_end(lzma_next_coder *next, lzma_allocator *allocator)
|
||||
lzma_next_end(lzma_next_coder *next, const lzma_allocator *allocator)
|
||||
{
|
||||
if (next->init != (uintptr_t)(NULL)) {
|
||||
// To avoid tiny end functions that simply call
|
||||
@@ -156,10 +177,8 @@ lzma_strm_init(lzma_stream *strm)
|
||||
strm->internal->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = false;
|
||||
strm->internal->supported_actions[LZMA_SYNC_FLUSH] = false;
|
||||
strm->internal->supported_actions[LZMA_FULL_FLUSH] = false;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = false;
|
||||
memzero(strm->internal->supported_actions,
|
||||
sizeof(strm->internal->supported_actions));
|
||||
strm->internal->sequence = ISEQ_RUN;
|
||||
strm->internal->allow_buf_error = false;
|
||||
|
||||
@@ -173,16 +192,12 @@ lzma_strm_init(lzma_stream *strm)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_code(lzma_stream *strm, lzma_action action)
|
||||
{
|
||||
size_t in_pos = 0;
|
||||
size_t out_pos = 0;
|
||||
lzma_ret ret;
|
||||
|
||||
// Sanity checks
|
||||
if ((strm->next_in == NULL && strm->avail_in != 0)
|
||||
|| (strm->next_out == NULL && strm->avail_out != 0)
|
||||
|| strm->internal == NULL
|
||||
|| strm->internal->next.code == NULL
|
||||
|| (unsigned int)(action) > LZMA_FINISH
|
||||
|| (unsigned int)(action) > LZMA_ACTION_MAX
|
||||
|| !strm->internal->supported_actions[action])
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
@@ -217,6 +232,10 @@ lzma_code(lzma_stream *strm, lzma_action action)
|
||||
case LZMA_FINISH:
|
||||
strm->internal->sequence = ISEQ_FINISH;
|
||||
break;
|
||||
|
||||
case LZMA_FULL_BARRIER:
|
||||
strm->internal->sequence = ISEQ_FULL_BARRIER;
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -244,6 +263,13 @@ lzma_code(lzma_stream *strm, lzma_action action)
|
||||
|
||||
break;
|
||||
|
||||
case ISEQ_FULL_BARRIER:
|
||||
if (action != LZMA_FULL_BARRIER
|
||||
|| strm->internal->avail_in != strm->avail_in)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
break;
|
||||
|
||||
case ISEQ_END:
|
||||
return LZMA_STREAM_END;
|
||||
|
||||
@@ -252,7 +278,9 @@ lzma_code(lzma_stream *strm, lzma_action action)
|
||||
return LZMA_PROG_ERROR;
|
||||
}
|
||||
|
||||
ret = strm->internal->next.code(
|
||||
size_t in_pos = 0;
|
||||
size_t out_pos = 0;
|
||||
lzma_ret ret = strm->internal->next.code(
|
||||
strm->internal->next.coder, strm->allocator,
|
||||
strm->next_in, &in_pos, strm->avail_in,
|
||||
strm->next_out, &out_pos, strm->avail_out, action);
|
||||
@@ -267,7 +295,9 @@ lzma_code(lzma_stream *strm, lzma_action action)
|
||||
|
||||
strm->internal->avail_in = strm->avail_in;
|
||||
|
||||
switch (ret) {
|
||||
// Cast is needed to silence a warning about LZMA_TIMED_OUT, which
|
||||
// isn't part of lzma_ret enumeration.
|
||||
switch ((unsigned int)(ret)) {
|
||||
case LZMA_OK:
|
||||
// Don't return LZMA_BUF_ERROR when it happens the first time.
|
||||
// This is to avoid returning LZMA_BUF_ERROR when avail_out
|
||||
@@ -283,9 +313,16 @@ lzma_code(lzma_stream *strm, lzma_action action)
|
||||
}
|
||||
break;
|
||||
|
||||
case LZMA_TIMED_OUT:
|
||||
strm->internal->allow_buf_error = false;
|
||||
ret = LZMA_OK;
|
||||
break;
|
||||
|
||||
case LZMA_STREAM_END:
|
||||
if (strm->internal->sequence == ISEQ_SYNC_FLUSH
|
||||
|| strm->internal->sequence == ISEQ_FULL_FLUSH)
|
||||
|| strm->internal->sequence == ISEQ_FULL_FLUSH
|
||||
|| strm->internal->sequence
|
||||
== ISEQ_FULL_BARRIER)
|
||||
strm->internal->sequence = ISEQ_RUN;
|
||||
else
|
||||
strm->internal->sequence = ISEQ_END;
|
||||
@@ -325,6 +362,22 @@ lzma_end(lzma_stream *strm)
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(void)
|
||||
lzma_get_progress(lzma_stream *strm,
|
||||
uint64_t *progress_in, uint64_t *progress_out)
|
||||
{
|
||||
if (strm->internal->next.get_progress != NULL) {
|
||||
strm->internal->next.get_progress(strm->internal->next.coder,
|
||||
progress_in, progress_out);
|
||||
} else {
|
||||
*progress_in = strm->total_in;
|
||||
*progress_out = strm->total_out;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_check)
|
||||
lzma_get_check(const lzma_stream *strm)
|
||||
{
|
||||
@@ -382,8 +435,10 @@ lzma_memlimit_set(lzma_stream *strm, uint64_t new_memlimit)
|
||||
|| strm->internal->next.memconfig == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (new_memlimit != 0 && new_memlimit < LZMA_MEMUSAGE_BASE)
|
||||
return LZMA_MEMLIMIT_ERROR;
|
||||
// Zero is a special value that cannot be used as an actual limit.
|
||||
// If 0 was specified, use 1 instead.
|
||||
if (new_memlimit == 0)
|
||||
new_memlimit = 1;
|
||||
|
||||
return strm->internal->next.memconfig(strm->internal->next.coder,
|
||||
&memusage, &old_memlimit, new_memlimit);
|
||||
|
||||
@@ -48,6 +48,13 @@
|
||||
#define LZMA_BUFFER_SIZE 4096
|
||||
|
||||
|
||||
/// Maximum number of worker threads within one multithreaded component.
|
||||
/// The limit exists solely to make it simpler to prevent integer overflows
|
||||
/// when allocating structures etc. This should be big enough for now...
|
||||
/// the code won't scale anywhere close to this number anyway.
|
||||
#define LZMA_THREADS_MAX 16384
|
||||
|
||||
|
||||
/// Starting value for memory usage estimates. Instead of calculating size
|
||||
/// of _every_ structure and taking into account malloc() overhead etc., we
|
||||
/// add a base size to all memory usage estimates. It's not very accurate
|
||||
@@ -65,12 +72,20 @@
|
||||
( LZMA_TELL_NO_CHECK \
|
||||
| LZMA_TELL_UNSUPPORTED_CHECK \
|
||||
| LZMA_TELL_ANY_CHECK \
|
||||
| LZMA_IGNORE_CHECK \
|
||||
| LZMA_CONCATENATED )
|
||||
|
||||
|
||||
/// Type of encoder/decoder specific data; the actual structure is defined
|
||||
/// differently in different coders.
|
||||
typedef struct lzma_coder_s lzma_coder;
|
||||
/// Largest valid lzma_action value as unsigned integer.
|
||||
#define LZMA_ACTION_MAX ((unsigned int)(LZMA_FULL_BARRIER))
|
||||
|
||||
|
||||
/// Special return value (lzma_ret) to indicate that a timeout was reached
|
||||
/// and lzma_code() must not return LZMA_BUF_ERROR. This is converted to
|
||||
/// LZMA_OK in lzma_code(). This is not in the lzma_ret enumeration because
|
||||
/// there's no need to have it in the public API.
|
||||
#define LZMA_TIMED_OUT 32
|
||||
|
||||
|
||||
typedef struct lzma_next_coder_s lzma_next_coder;
|
||||
|
||||
@@ -79,7 +94,7 @@ typedef struct lzma_filter_info_s lzma_filter_info;
|
||||
|
||||
/// Type of a function used to initialize a filter encoder or decoder
|
||||
typedef lzma_ret (*lzma_init_function)(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
/// Type of a function to do some kind of coding work (filters, Stream,
|
||||
@@ -87,15 +102,15 @@ typedef lzma_ret (*lzma_init_function)(
|
||||
/// input and output buffers, but for simplicity they still use this same
|
||||
/// function prototype.
|
||||
typedef lzma_ret (*lzma_code_function)(
|
||||
lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size,
|
||||
void *coder, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size,
|
||||
lzma_action action);
|
||||
|
||||
/// Type of a function to free the memory allocated for the coder
|
||||
typedef void (*lzma_end_function)(
|
||||
lzma_coder *coder, lzma_allocator *allocator);
|
||||
void *coder, const lzma_allocator *allocator);
|
||||
|
||||
|
||||
/// Raw coder validates and converts an array of lzma_filter structures to
|
||||
@@ -118,7 +133,7 @@ struct lzma_filter_info_s {
|
||||
/// Hold data and function pointers of the next filter in the chain.
|
||||
struct lzma_next_coder_s {
|
||||
/// Pointer to coder-specific data
|
||||
lzma_coder *coder;
|
||||
void *coder;
|
||||
|
||||
/// Filter ID. This is LZMA_VLI_UNKNOWN when this structure doesn't
|
||||
/// point to a filter coder.
|
||||
@@ -138,35 +153,41 @@ struct lzma_next_coder_s {
|
||||
/// lzma_next_coder.coder.
|
||||
lzma_end_function end;
|
||||
|
||||
/// Pointer to a function to get progress information. If this is NULL,
|
||||
/// lzma_stream.total_in and .total_out are used instead.
|
||||
void (*get_progress)(void *coder,
|
||||
uint64_t *progress_in, uint64_t *progress_out);
|
||||
|
||||
/// Pointer to function to return the type of the integrity check.
|
||||
/// Most coders won't support this.
|
||||
lzma_check (*get_check)(const lzma_coder *coder);
|
||||
lzma_check (*get_check)(const void *coder);
|
||||
|
||||
/// Pointer to function to get and/or change the memory usage limit.
|
||||
/// If new_memlimit == 0, the limit is not changed.
|
||||
lzma_ret (*memconfig)(lzma_coder *coder, uint64_t *memusage,
|
||||
lzma_ret (*memconfig)(void *coder, uint64_t *memusage,
|
||||
uint64_t *old_memlimit, uint64_t new_memlimit);
|
||||
|
||||
/// Update the filter-specific options or the whole filter chain
|
||||
/// in the encoder.
|
||||
lzma_ret (*update)(lzma_coder *coder, lzma_allocator *allocator,
|
||||
lzma_ret (*update)(void *coder, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters,
|
||||
const lzma_filter *reversed_filters);
|
||||
};
|
||||
|
||||
|
||||
/// Constant to initialize lzma_next_coder structure
|
||||
static const lzma_next_coder LZMA_NEXT_CODER_INIT =
|
||||
{
|
||||
NULL,
|
||||
LZMA_VLI_UNKNOWN,
|
||||
(uintptr_t)(NULL),
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
};
|
||||
/// Macro to initialize lzma_next_coder structure
|
||||
#define LZMA_NEXT_CODER_INIT \
|
||||
(lzma_next_coder){ \
|
||||
.coder = NULL, \
|
||||
.init = (uintptr_t)(NULL), \
|
||||
.id = LZMA_VLI_UNKNOWN, \
|
||||
.code = NULL, \
|
||||
.end = NULL, \
|
||||
.get_progress = NULL, \
|
||||
.get_check = NULL, \
|
||||
.memconfig = NULL, \
|
||||
.update = NULL, \
|
||||
}
|
||||
|
||||
|
||||
/// Internal data for lzma_strm_init, lzma_code, and lzma_end. A pointer to
|
||||
@@ -184,6 +205,7 @@ struct lzma_internal_s {
|
||||
ISEQ_SYNC_FLUSH,
|
||||
ISEQ_FULL_FLUSH,
|
||||
ISEQ_FINISH,
|
||||
ISEQ_FULL_BARRIER,
|
||||
ISEQ_END,
|
||||
ISEQ_ERROR,
|
||||
} sequence;
|
||||
@@ -194,7 +216,7 @@ struct lzma_internal_s {
|
||||
size_t avail_in;
|
||||
|
||||
/// Indicates which lzma_action values are allowed by next.code.
|
||||
bool supported_actions[4];
|
||||
bool supported_actions[LZMA_ACTION_MAX + 1];
|
||||
|
||||
/// If true, lzma_code will return LZMA_BUF_ERROR if no progress was
|
||||
/// made (no input consumed and no output produced by next.code).
|
||||
@@ -203,15 +225,21 @@ struct lzma_internal_s {
|
||||
|
||||
|
||||
/// Allocates memory
|
||||
extern void *lzma_alloc(size_t size, lzma_allocator *allocator)
|
||||
extern void *lzma_alloc(size_t size, const lzma_allocator *allocator)
|
||||
lzma_attribute((__malloc__)) lzma_attr_alloc_size(1);
|
||||
|
||||
/// Allocates memory and zeroes it (like calloc()). This can be faster
|
||||
/// than lzma_alloc() + memzero() while being backward compatible with
|
||||
/// custom allocators.
|
||||
extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1)
|
||||
lzma_alloc_zero(size_t size, const lzma_allocator *allocator);
|
||||
|
||||
/// Frees memory
|
||||
extern void lzma_free(void *ptr, lzma_allocator *allocator);
|
||||
extern void lzma_free(void *ptr, const lzma_allocator *allocator);
|
||||
|
||||
|
||||
/// Allocates strm->internal if it is NULL, and initializes *strm and
|
||||
/// strm->internal. This function is only called via lzma_next_strm_init2 macro.
|
||||
/// strm->internal. This function is only called via lzma_next_strm_init macro.
|
||||
extern lzma_ret lzma_strm_init(lzma_stream *strm);
|
||||
|
||||
/// Initializes the next filter in the chain, if any. This takes care of
|
||||
@@ -219,24 +247,26 @@ extern lzma_ret lzma_strm_init(lzma_stream *strm);
|
||||
/// than the filter being initialized now. This way the actual filter
|
||||
/// initialization functions don't need to use lzma_next_coder_init macro.
|
||||
extern lzma_ret lzma_next_filter_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters);
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
/// Update the next filter in the chain, if any. This checks that
|
||||
/// the application is not trying to change the Filter IDs.
|
||||
extern lzma_ret lzma_next_filter_update(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *reversed_filters);
|
||||
|
||||
/// Frees the memory allocated for next->coder either using next->end or,
|
||||
/// if next->end is NULL, using lzma_free.
|
||||
extern void lzma_next_end(lzma_next_coder *next, lzma_allocator *allocator);
|
||||
extern void lzma_next_end(lzma_next_coder *next,
|
||||
const lzma_allocator *allocator);
|
||||
|
||||
|
||||
/// Copy as much data as possible from in[] to out[] and update *in_pos
|
||||
/// and *out_pos accordingly. Returns the number of bytes copied.
|
||||
extern size_t lzma_bufcpy(const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size);
|
||||
extern size_t lzma_bufcpy(const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size);
|
||||
|
||||
|
||||
/// \brief Return if expression doesn't evaluate to LZMA_OK
|
||||
@@ -269,37 +299,15 @@ do { \
|
||||
/// (The function being called will use lzma_next_coder_init()). If
|
||||
/// initialization fails, memory that wasn't freed by func() is freed
|
||||
/// along strm->internal.
|
||||
#define lzma_next_strm_init1(func, strm, arg1) \
|
||||
#define lzma_next_strm_init(func, strm, ...) \
|
||||
do { \
|
||||
lzma_ret ret_; \
|
||||
return_if_error(lzma_strm_init(strm)); \
|
||||
ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1); \
|
||||
if (ret_ != LZMA_OK) { \
|
||||
lzma_end(strm); \
|
||||
return ret_; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define lzma_next_strm_init2(func, strm, arg1, arg2) \
|
||||
do { \
|
||||
lzma_ret ret_; \
|
||||
return_if_error(lzma_strm_init(strm)); \
|
||||
ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2); \
|
||||
if (ret_ != LZMA_OK) { \
|
||||
lzma_end(strm); \
|
||||
return ret_; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define lzma_next_strm_init3(func, strm, arg1, arg2, arg3) \
|
||||
do { \
|
||||
lzma_ret ret_; \
|
||||
return_if_error(lzma_strm_init(strm)); \
|
||||
ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2, arg3); \
|
||||
if (ret_ != LZMA_OK) { \
|
||||
lzma_end(strm); \
|
||||
return ret_; \
|
||||
} \
|
||||
return_if_error(lzma_strm_init(strm)); \
|
||||
const lzma_ret ret_ = func(&(strm)->internal->next, \
|
||||
(strm)->allocator, __VA_ARGS__); \
|
||||
if (ret_ != LZMA_OK) { \
|
||||
lzma_end(strm); \
|
||||
return ret_; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_easy_buffer_encode(uint32_t preset, lzma_check check,
|
||||
lzma_allocator *allocator, const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
const lzma_allocator *allocator, const uint8_t *in,
|
||||
size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
lzma_options_easy opt_easy;
|
||||
if (lzma_easy_preset(&opt_easy, preset))
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "easy_preset.h"
|
||||
#include "stream_encoder.h"
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
|
||||
@@ -14,30 +14,27 @@
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_raw_buffer_decode(const lzma_filter *filters, lzma_allocator *allocator,
|
||||
lzma_raw_buffer_decode(
|
||||
const lzma_filter *filters, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
|
||||
size_t in_start;
|
||||
size_t out_start;
|
||||
lzma_ret ret;
|
||||
|
||||
// Validate what isn't validated later in filter_common.c.
|
||||
if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL
|
||||
|| out_pos == NULL || *out_pos > out_size)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Initialize the decoer.
|
||||
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
|
||||
return_if_error(lzma_raw_decoder_init(&next, allocator, filters));
|
||||
|
||||
// Store the positions so that we can restore them if something
|
||||
// goes wrong.
|
||||
in_start = *in_pos;
|
||||
out_start = *out_pos;
|
||||
const size_t in_start = *in_pos;
|
||||
const size_t out_start = *out_pos;
|
||||
|
||||
// Do the actual decoding and free decoder's memory.
|
||||
ret = next.code(next.coder, allocator, in, in_pos, in_size,
|
||||
lzma_ret ret = next.code(next.coder, allocator, in, in_pos, in_size,
|
||||
out, out_pos, out_size, LZMA_FINISH);
|
||||
|
||||
if (ret == LZMA_STREAM_END) {
|
||||
|
||||
@@ -14,29 +14,27 @@
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_raw_buffer_encode(const lzma_filter *filters, lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size, uint8_t *out,
|
||||
size_t *out_pos, size_t out_size)
|
||||
lzma_raw_buffer_encode(
|
||||
const lzma_filter *filters, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
|
||||
size_t out_start;
|
||||
size_t in_pos = 0;
|
||||
lzma_ret ret;
|
||||
|
||||
// Validate what isn't validated later in filter_common.c.
|
||||
if ((in == NULL && in_size != 0) || out == NULL
|
||||
|| out_pos == NULL || *out_pos > out_size)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Initialize the encoder
|
||||
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
|
||||
return_if_error(lzma_raw_encoder_init(&next, allocator, filters));
|
||||
|
||||
// Store the output position so that we can restore it if
|
||||
// something goes wrong.
|
||||
out_start = *out_pos;
|
||||
const size_t out_start = *out_pos;
|
||||
|
||||
// Do the actual encoding and free coder's memory.
|
||||
ret = next.code(next.coder, allocator, in, &in_pos, in_size,
|
||||
size_t in_pos = 0;
|
||||
lzma_ret ret = next.code(next.coder, allocator, in, &in_pos, in_size,
|
||||
out, out_pos, out_size, LZMA_FINISH);
|
||||
lzma_next_end(&next, allocator);
|
||||
|
||||
|
||||
@@ -36,101 +36,100 @@ static const struct {
|
||||
} features[] = {
|
||||
#if defined (HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1)
|
||||
{
|
||||
LZMA_FILTER_LZMA1,
|
||||
sizeof(lzma_options_lzma),
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
.id = LZMA_FILTER_LZMA1,
|
||||
.options_size = sizeof(lzma_options_lzma),
|
||||
.non_last_ok = false,
|
||||
.last_ok = true,
|
||||
.changes_size = true,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_LZMA2) || defined(HAVE_DECODER_LZMA2)
|
||||
{
|
||||
LZMA_FILTER_LZMA2,
|
||||
sizeof(lzma_options_lzma),
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
.id = LZMA_FILTER_LZMA2,
|
||||
.options_size = sizeof(lzma_options_lzma),
|
||||
.non_last_ok = false,
|
||||
.last_ok = true,
|
||||
.changes_size = true,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_X86) || defined(HAVE_DECODER_X86)
|
||||
{
|
||||
LZMA_FILTER_X86,
|
||||
sizeof(lzma_options_bcj),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_X86,
|
||||
.options_size = sizeof(lzma_options_bcj),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_POWERPC) || defined(HAVE_DECODER_POWERPC)
|
||||
{
|
||||
LZMA_FILTER_POWERPC,
|
||||
sizeof(lzma_options_bcj),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_POWERPC,
|
||||
.options_size = sizeof(lzma_options_bcj),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_IA64) || defined(HAVE_DECODER_IA64)
|
||||
{
|
||||
LZMA_FILTER_IA64,
|
||||
sizeof(lzma_options_bcj),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_IA64,
|
||||
.options_size = sizeof(lzma_options_bcj),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_ARM) || defined(HAVE_DECODER_ARM)
|
||||
{
|
||||
LZMA_FILTER_ARM,
|
||||
sizeof(lzma_options_bcj),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_ARM,
|
||||
.options_size = sizeof(lzma_options_bcj),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_ARMTHUMB) || defined(HAVE_DECODER_ARMTHUMB)
|
||||
{
|
||||
LZMA_FILTER_ARMTHUMB,
|
||||
sizeof(lzma_options_bcj),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_ARMTHUMB,
|
||||
.options_size = sizeof(lzma_options_bcj),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_SPARC) || defined(HAVE_DECODER_SPARC)
|
||||
{
|
||||
LZMA_FILTER_SPARC,
|
||||
sizeof(lzma_options_bcj),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_SPARC,
|
||||
.options_size = sizeof(lzma_options_bcj),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA)
|
||||
{
|
||||
LZMA_FILTER_DELTA,
|
||||
sizeof(lzma_options_delta),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
.id = LZMA_FILTER_DELTA,
|
||||
.options_size = sizeof(lzma_options_delta),
|
||||
.non_last_ok = true,
|
||||
.last_ok = false,
|
||||
.changes_size = false,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
LZMA_VLI_UNKNOWN
|
||||
.id = LZMA_VLI_UNKNOWN
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_filters_copy(const lzma_filter *src, lzma_filter *dest,
|
||||
lzma_allocator *allocator)
|
||||
const lzma_allocator *allocator)
|
||||
{
|
||||
size_t i;
|
||||
lzma_ret ret;
|
||||
|
||||
if (src == NULL || dest == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
lzma_ret ret;
|
||||
size_t i;
|
||||
for (i = 0; src[i].id != LZMA_VLI_UNKNOWN; ++i) {
|
||||
// There must be a maximum of four filters plus
|
||||
// the array terminator.
|
||||
@@ -194,6 +193,10 @@ error:
|
||||
static lzma_ret
|
||||
validate_chain(const lzma_filter *filters, size_t *count)
|
||||
{
|
||||
// There must be at least one filter.
|
||||
if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Number of non-last filters that may change the size of the data
|
||||
// significantly (that is, more than 1-2 % or so).
|
||||
size_t changes_size_count = 0;
|
||||
@@ -207,11 +210,6 @@ validate_chain(const lzma_filter *filters, size_t *count)
|
||||
bool last_ok = false;
|
||||
|
||||
size_t i = 0;
|
||||
|
||||
// There must be at least one filter.
|
||||
if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
do {
|
||||
size_t j;
|
||||
for (j = 0; filters[i].id != features[j].id; ++j)
|
||||
@@ -241,21 +239,18 @@ validate_chain(const lzma_filter *filters, size_t *count)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_raw_coder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *options,
|
||||
lzma_filter_find coder_find, bool is_encoder)
|
||||
{
|
||||
lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
|
||||
size_t count;
|
||||
size_t i;
|
||||
lzma_ret ret;
|
||||
|
||||
// Do some basic validation and get the number of filters.
|
||||
size_t count;
|
||||
return_if_error(validate_chain(options, &count));
|
||||
|
||||
// Set the filter functions and copy the options pointer.
|
||||
lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
|
||||
if (is_encoder) {
|
||||
for (i = 0; i < count; ++i) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
// The order of the filters is reversed in the
|
||||
// encoder. It allows more efficient handling
|
||||
// of the uncompressed data.
|
||||
@@ -271,7 +266,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
filters[j].options = options[i].options;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < count; ++i) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
const lzma_filter_coder *const fc
|
||||
= coder_find(options[i].id);
|
||||
if (fc == NULL || fc->init == NULL)
|
||||
@@ -288,7 +283,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
filters[count].init = NULL;
|
||||
|
||||
// Initialize the filters.
|
||||
ret = lzma_next_filter_init(next, allocator, filters);
|
||||
const lzma_ret ret = lzma_next_filter_init(next, allocator, filters);
|
||||
if (ret != LZMA_OK)
|
||||
lzma_next_end(next, allocator);
|
||||
|
||||
@@ -300,9 +295,6 @@ extern uint64_t
|
||||
lzma_raw_coder_memusage(lzma_filter_find coder_find,
|
||||
const lzma_filter *filters)
|
||||
{
|
||||
uint64_t total = 0;
|
||||
size_t i = 0;
|
||||
|
||||
// The chain has to have at least one filter.
|
||||
{
|
||||
size_t tmp;
|
||||
@@ -310,6 +302,9 @@ lzma_raw_coder_memusage(lzma_filter_find coder_find,
|
||||
return UINT64_MAX;
|
||||
}
|
||||
|
||||
uint64_t total = 0;
|
||||
size_t i = 0;
|
||||
|
||||
do {
|
||||
const lzma_filter_coder *const fc
|
||||
= coder_find(filters[i].id);
|
||||
|
||||
@@ -36,7 +36,7 @@ typedef const lzma_filter_coder *(*lzma_filter_find)(lzma_vli id);
|
||||
|
||||
|
||||
extern lzma_ret lzma_raw_coder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters,
|
||||
lzma_filter_find coder_find, bool is_encoder);
|
||||
|
||||
|
||||
@@ -35,7 +35,8 @@ typedef struct {
|
||||
/// \return - LZMA_OK: Properties decoded successfully.
|
||||
/// - LZMA_OPTIONS_ERROR: Unsupported properties
|
||||
/// - LZMA_MEM_ERROR: Memory allocation failed.
|
||||
lzma_ret (*props_decode)(void **options, lzma_allocator *allocator,
|
||||
lzma_ret (*props_decode)(
|
||||
void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size);
|
||||
|
||||
} lzma_filter_decoder;
|
||||
@@ -44,74 +45,74 @@ typedef struct {
|
||||
static const lzma_filter_decoder decoders[] = {
|
||||
#ifdef HAVE_DECODER_LZMA1
|
||||
{
|
||||
LZMA_FILTER_LZMA1,
|
||||
&lzma_lzma_decoder_init,
|
||||
&lzma_lzma_decoder_memusage,
|
||||
&lzma_lzma_props_decode,
|
||||
.id = LZMA_FILTER_LZMA1,
|
||||
.init = &lzma_lzma_decoder_init,
|
||||
.memusage = &lzma_lzma_decoder_memusage,
|
||||
.props_decode = &lzma_lzma_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_LZMA2
|
||||
{
|
||||
LZMA_FILTER_LZMA2,
|
||||
&lzma_lzma2_decoder_init,
|
||||
&lzma_lzma2_decoder_memusage,
|
||||
&lzma_lzma2_props_decode,
|
||||
.id = LZMA_FILTER_LZMA2,
|
||||
.init = &lzma_lzma2_decoder_init,
|
||||
.memusage = &lzma_lzma2_decoder_memusage,
|
||||
.props_decode = &lzma_lzma2_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_X86
|
||||
{
|
||||
LZMA_FILTER_X86,
|
||||
&lzma_simple_x86_decoder_init,
|
||||
NULL,
|
||||
&lzma_simple_props_decode,
|
||||
.id = LZMA_FILTER_X86,
|
||||
.init = &lzma_simple_x86_decoder_init,
|
||||
.memusage = NULL,
|
||||
.props_decode = &lzma_simple_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_POWERPC
|
||||
{
|
||||
LZMA_FILTER_POWERPC,
|
||||
&lzma_simple_powerpc_decoder_init,
|
||||
NULL,
|
||||
&lzma_simple_props_decode,
|
||||
.id = LZMA_FILTER_POWERPC,
|
||||
.init = &lzma_simple_powerpc_decoder_init,
|
||||
.memusage = NULL,
|
||||
.props_decode = &lzma_simple_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_IA64
|
||||
{
|
||||
LZMA_FILTER_IA64,
|
||||
&lzma_simple_ia64_decoder_init,
|
||||
NULL,
|
||||
&lzma_simple_props_decode,
|
||||
.id = LZMA_FILTER_IA64,
|
||||
.init = &lzma_simple_ia64_decoder_init,
|
||||
.memusage = NULL,
|
||||
.props_decode = &lzma_simple_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_ARM
|
||||
{
|
||||
LZMA_FILTER_ARM,
|
||||
&lzma_simple_arm_decoder_init,
|
||||
NULL,
|
||||
&lzma_simple_props_decode,
|
||||
.id = LZMA_FILTER_ARM,
|
||||
.init = &lzma_simple_arm_decoder_init,
|
||||
.memusage = NULL,
|
||||
.props_decode = &lzma_simple_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_ARMTHUMB
|
||||
{
|
||||
LZMA_FILTER_ARMTHUMB,
|
||||
&lzma_simple_armthumb_decoder_init,
|
||||
NULL,
|
||||
&lzma_simple_props_decode,
|
||||
.id = LZMA_FILTER_ARMTHUMB,
|
||||
.init = &lzma_simple_armthumb_decoder_init,
|
||||
.memusage = NULL,
|
||||
.props_decode = &lzma_simple_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_SPARC
|
||||
{
|
||||
LZMA_FILTER_SPARC,
|
||||
&lzma_simple_sparc_decoder_init,
|
||||
NULL,
|
||||
&lzma_simple_props_decode,
|
||||
.id = LZMA_FILTER_SPARC,
|
||||
.init = &lzma_simple_sparc_decoder_init,
|
||||
.memusage = NULL,
|
||||
.props_decode = &lzma_simple_props_decode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_DECODER_DELTA
|
||||
{
|
||||
LZMA_FILTER_DELTA,
|
||||
&lzma_delta_decoder_init,
|
||||
&lzma_delta_coder_memusage,
|
||||
&lzma_delta_props_decode,
|
||||
.id = LZMA_FILTER_DELTA,
|
||||
.init = &lzma_delta_decoder_init,
|
||||
.memusage = &lzma_delta_coder_memusage,
|
||||
.props_decode = &lzma_delta_props_decode,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
@@ -120,8 +121,7 @@ static const lzma_filter_decoder decoders[] = {
|
||||
static const lzma_filter_decoder *
|
||||
decoder_find(lzma_vli id)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < ARRAY_SIZE(decoders); ++i)
|
||||
for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i)
|
||||
if (decoders[i].id == id)
|
||||
return decoders + i;
|
||||
|
||||
@@ -137,7 +137,7 @@ lzma_filter_decoder_is_supported(lzma_vli id)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_raw_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_raw_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *options)
|
||||
{
|
||||
return lzma_raw_coder_init(next, allocator,
|
||||
@@ -148,7 +148,7 @@ lzma_raw_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_raw_decoder(lzma_stream *strm, const lzma_filter *options)
|
||||
{
|
||||
lzma_next_strm_init1(lzma_raw_decoder_init, strm, options);
|
||||
lzma_next_strm_init(lzma_raw_decoder_init, strm, options);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
@@ -166,14 +166,13 @@ lzma_raw_decoder_memusage(const lzma_filter *filters)
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_properties_decode(lzma_filter *filter, lzma_allocator *allocator,
|
||||
lzma_properties_decode(lzma_filter *filter, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size)
|
||||
{
|
||||
const lzma_filter_decoder *const fd = decoder_find(filter->id);
|
||||
|
||||
// Make it always NULL so that the caller can always safely free() it.
|
||||
filter->options = NULL;
|
||||
|
||||
const lzma_filter_decoder *const fd = decoder_find(filter->id);
|
||||
if (fd == NULL)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
|
||||
extern lzma_ret lzma_raw_decoder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *options);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -30,11 +30,11 @@ typedef struct {
|
||||
/// invalid, UINT64_MAX is returned.
|
||||
uint64_t (*memusage)(const void *options);
|
||||
|
||||
/// Calculates the minimum sane size for Blocks (or other types of
|
||||
/// chunks) to which the input data can be split to make
|
||||
/// multithreaded encoding possible. If this is NULL, it is assumed
|
||||
/// that the encoder is fast enough with single thread.
|
||||
lzma_vli (*chunk_size)(const void *options);
|
||||
/// Calculates the recommended Uncompressed Size for .xz Blocks to
|
||||
/// which the input data can be split to make multithreaded
|
||||
/// encoding possible. If this is NULL, it is assumed that
|
||||
/// the encoder is fast enough with single thread.
|
||||
uint64_t (*block_size)(const void *options);
|
||||
|
||||
/// Tells the size of the Filter Properties field. If options are
|
||||
/// invalid, UINT32_MAX is returned. If this is NULL, props_size_fixed
|
||||
@@ -56,101 +56,95 @@ typedef struct {
|
||||
static const lzma_filter_encoder encoders[] = {
|
||||
#ifdef HAVE_ENCODER_LZMA1
|
||||
{
|
||||
LZMA_FILTER_LZMA1,
|
||||
&lzma_lzma_encoder_init,
|
||||
&lzma_lzma_encoder_memusage,
|
||||
NULL, // FIXME
|
||||
NULL,
|
||||
5,
|
||||
&lzma_lzma_props_encode,
|
||||
.id = LZMA_FILTER_LZMA1,
|
||||
.init = &lzma_lzma_encoder_init,
|
||||
.memusage = &lzma_lzma_encoder_memusage,
|
||||
.block_size = NULL, // FIXME
|
||||
.props_size_get = NULL,
|
||||
.props_size_fixed = 5,
|
||||
.props_encode = &lzma_lzma_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_LZMA2
|
||||
{
|
||||
LZMA_FILTER_LZMA2,
|
||||
&lzma_lzma2_encoder_init,
|
||||
&lzma_lzma2_encoder_memusage,
|
||||
NULL, // FIXME
|
||||
NULL,
|
||||
1,
|
||||
&lzma_lzma2_props_encode,
|
||||
.id = LZMA_FILTER_LZMA2,
|
||||
.init = &lzma_lzma2_encoder_init,
|
||||
.memusage = &lzma_lzma2_encoder_memusage,
|
||||
.block_size = &lzma_lzma2_block_size, // FIXME
|
||||
.props_size_get = NULL,
|
||||
.props_size_fixed = 1,
|
||||
.props_encode = &lzma_lzma2_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_X86
|
||||
{
|
||||
LZMA_FILTER_X86,
|
||||
&lzma_simple_x86_encoder_init,
|
||||
NULL,
|
||||
NULL,
|
||||
&lzma_simple_props_size,
|
||||
0,
|
||||
&lzma_simple_props_encode,
|
||||
.id = LZMA_FILTER_X86,
|
||||
.init = &lzma_simple_x86_encoder_init,
|
||||
.memusage = NULL,
|
||||
.block_size = NULL,
|
||||
.props_size_get = &lzma_simple_props_size,
|
||||
.props_encode = &lzma_simple_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_POWERPC
|
||||
{
|
||||
LZMA_FILTER_POWERPC,
|
||||
&lzma_simple_powerpc_encoder_init,
|
||||
NULL,
|
||||
NULL,
|
||||
&lzma_simple_props_size,
|
||||
0,
|
||||
&lzma_simple_props_encode,
|
||||
.id = LZMA_FILTER_POWERPC,
|
||||
.init = &lzma_simple_powerpc_encoder_init,
|
||||
.memusage = NULL,
|
||||
.block_size = NULL,
|
||||
.props_size_get = &lzma_simple_props_size,
|
||||
.props_encode = &lzma_simple_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_IA64
|
||||
{
|
||||
LZMA_FILTER_IA64,
|
||||
&lzma_simple_ia64_encoder_init,
|
||||
NULL,
|
||||
NULL,
|
||||
&lzma_simple_props_size,
|
||||
0,
|
||||
&lzma_simple_props_encode,
|
||||
.id = LZMA_FILTER_IA64,
|
||||
.init = &lzma_simple_ia64_encoder_init,
|
||||
.memusage = NULL,
|
||||
.block_size = NULL,
|
||||
.props_size_get = &lzma_simple_props_size,
|
||||
.props_encode = &lzma_simple_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_ARM
|
||||
{
|
||||
LZMA_FILTER_ARM,
|
||||
&lzma_simple_arm_encoder_init,
|
||||
NULL,
|
||||
NULL,
|
||||
&lzma_simple_props_size,
|
||||
0,
|
||||
&lzma_simple_props_encode,
|
||||
.id = LZMA_FILTER_ARM,
|
||||
.init = &lzma_simple_arm_encoder_init,
|
||||
.memusage = NULL,
|
||||
.block_size = NULL,
|
||||
.props_size_get = &lzma_simple_props_size,
|
||||
.props_encode = &lzma_simple_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_ARMTHUMB
|
||||
{
|
||||
LZMA_FILTER_ARMTHUMB,
|
||||
&lzma_simple_armthumb_encoder_init,
|
||||
NULL,
|
||||
NULL,
|
||||
&lzma_simple_props_size,
|
||||
0,
|
||||
&lzma_simple_props_encode,
|
||||
.id = LZMA_FILTER_ARMTHUMB,
|
||||
.init = &lzma_simple_armthumb_encoder_init,
|
||||
.memusage = NULL,
|
||||
.block_size = NULL,
|
||||
.props_size_get = &lzma_simple_props_size,
|
||||
.props_encode = &lzma_simple_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_SPARC
|
||||
{
|
||||
LZMA_FILTER_SPARC,
|
||||
&lzma_simple_sparc_encoder_init,
|
||||
NULL,
|
||||
NULL,
|
||||
&lzma_simple_props_size,
|
||||
0,
|
||||
&lzma_simple_props_encode,
|
||||
.id = LZMA_FILTER_SPARC,
|
||||
.init = &lzma_simple_sparc_encoder_init,
|
||||
.memusage = NULL,
|
||||
.block_size = NULL,
|
||||
.props_size_get = &lzma_simple_props_size,
|
||||
.props_encode = &lzma_simple_props_encode,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_ENCODER_DELTA
|
||||
{
|
||||
LZMA_FILTER_DELTA,
|
||||
&lzma_delta_encoder_init,
|
||||
&lzma_delta_coder_memusage,
|
||||
NULL,
|
||||
NULL,
|
||||
1,
|
||||
&lzma_delta_props_encode,
|
||||
.id = LZMA_FILTER_DELTA,
|
||||
.init = &lzma_delta_encoder_init,
|
||||
.memusage = &lzma_delta_coder_memusage,
|
||||
.block_size = NULL,
|
||||
.props_size_get = NULL,
|
||||
.props_size_fixed = 1,
|
||||
.props_encode = &lzma_delta_props_encode,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
@@ -159,8 +153,7 @@ static const lzma_filter_encoder encoders[] = {
|
||||
static const lzma_filter_encoder *
|
||||
encoder_find(lzma_vli id)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < ARRAY_SIZE(encoders); ++i)
|
||||
for (size_t i = 0; i < ARRAY_SIZE(encoders); ++i)
|
||||
if (encoders[i].id == id)
|
||||
return encoders + i;
|
||||
|
||||
@@ -178,10 +171,6 @@ lzma_filter_encoder_is_supported(lzma_vli id)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
|
||||
{
|
||||
size_t i;
|
||||
size_t count = 1;
|
||||
lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1];
|
||||
|
||||
if (strm->internal->next.update == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
@@ -191,10 +180,12 @@ lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
|
||||
|
||||
// The actual filter chain in the encoder is reversed. Some things
|
||||
// still want the normal order chain, so we provide both.
|
||||
size_t count = 1;
|
||||
while (filters[count].id != LZMA_VLI_UNKNOWN)
|
||||
++count;
|
||||
|
||||
for (i = 0; i < count; ++i)
|
||||
lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1];
|
||||
for (size_t i = 0; i < count; ++i)
|
||||
reversed_filters[count - i - 1] = filters[i];
|
||||
|
||||
reversed_filters[count].id = LZMA_VLI_UNKNOWN;
|
||||
@@ -205,7 +196,7 @@ lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_raw_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_raw_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *options)
|
||||
{
|
||||
return lzma_raw_coder_init(next, allocator,
|
||||
@@ -216,7 +207,7 @@ lzma_raw_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_raw_encoder(lzma_stream *strm, const lzma_filter *options)
|
||||
{
|
||||
lzma_next_strm_init3(lzma_raw_coder_init, strm, options,
|
||||
lzma_next_strm_init(lzma_raw_coder_init, strm, options,
|
||||
(lzma_filter_find)(&encoder_find), true);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
@@ -235,20 +226,19 @@ lzma_raw_encoder_memusage(const lzma_filter *filters)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
extern LZMA_API(lzma_vli)
|
||||
lzma_chunk_size(const lzma_filter *filters)
|
||||
extern uint64_t
|
||||
lzma_mt_block_size(const lzma_filter *filters)
|
||||
{
|
||||
lzma_vli max = 0;
|
||||
uint64_t max = 0;
|
||||
|
||||
for (size_t i = 0; filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
|
||||
const lzma_filter_encoder *const fe
|
||||
= encoder_find(filters[i].id);
|
||||
if (fe->chunk_size != NULL) {
|
||||
const lzma_vli size
|
||||
= fe->chunk_size(filters[i].options);
|
||||
if (size == LZMA_VLI_UNKNOWN)
|
||||
return LZMA_VLI_UNKNOWN;
|
||||
if (fe->block_size != NULL) {
|
||||
const uint64_t size
|
||||
= fe->block_size(filters[i].options);
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
if (size > max)
|
||||
max = size;
|
||||
@@ -257,7 +247,6 @@ lzma_chunk_size(const lzma_filter *filters)
|
||||
|
||||
return max;
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
#include "common.h"
|
||||
|
||||
|
||||
// FIXME: Might become a part of the public API once finished.
|
||||
// extern lzma_vli lzma_chunk_size(const lzma_filter *filters);
|
||||
// FIXME: Might become a part of the public API.
|
||||
extern uint64_t lzma_mt_block_size(const lzma_filter *filters);
|
||||
|
||||
|
||||
extern lzma_ret lzma_raw_encoder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,12 +15,9 @@
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_filter_flags_decode(
|
||||
lzma_filter *filter, lzma_allocator *allocator,
|
||||
lzma_filter *filter, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size)
|
||||
{
|
||||
lzma_vli props_size;
|
||||
lzma_ret ret;
|
||||
|
||||
// Set the pointer to NULL so the caller can always safely free it.
|
||||
filter->options = NULL;
|
||||
|
||||
@@ -32,6 +29,7 @@ lzma_filter_flags_decode(
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
// Size of Properties
|
||||
lzma_vli props_size;
|
||||
return_if_error(lzma_vli_decode(&props_size, NULL,
|
||||
in, in_pos, in_size));
|
||||
|
||||
@@ -39,7 +37,7 @@ lzma_filter_flags_decode(
|
||||
if (in_size - *in_pos < props_size)
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
ret = lzma_properties_decode(
|
||||
const lzma_ret ret = lzma_properties_decode(
|
||||
filter, allocator, in + *in_pos, props_size);
|
||||
|
||||
*in_pos += props_size;
|
||||
|
||||
@@ -31,8 +31,6 @@ extern LZMA_API(lzma_ret)
|
||||
lzma_filter_flags_encode(const lzma_filter *filter,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
uint32_t props_size;
|
||||
|
||||
// Filter ID
|
||||
if (filter->id >= LZMA_FILTER_RESERVED_START)
|
||||
return LZMA_PROG_ERROR;
|
||||
@@ -41,6 +39,7 @@ lzma_filter_flags_encode(const lzma_filter *filter,
|
||||
out, out_pos, out_size));
|
||||
|
||||
// Size of Properties
|
||||
uint32_t props_size;
|
||||
return_if_error(lzma_properties_size(&props_size, filter));
|
||||
return_if_error(lzma_vli_encode(props_size, NULL,
|
||||
out, out_pos, out_size));
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
/// \file stream_encoder.h
|
||||
/// \brief Encodes .xz Streams
|
||||
/// \file hardware_cputhreads.c
|
||||
/// \brief Get the number of CPU threads or cores
|
||||
//
|
||||
// Author: Lasse Collin
|
||||
//
|
||||
@@ -10,14 +10,13 @@
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef LZMA_STREAM_ENCODER_H
|
||||
#define LZMA_STREAM_ENCODER_H
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include "tuklib_cpucores.h"
|
||||
|
||||
extern lzma_ret lzma_stream_encoder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
const lzma_filter *filters, lzma_check check);
|
||||
|
||||
#endif
|
||||
extern LZMA_API(uint32_t)
|
||||
lzma_cputhreads(void)
|
||||
{
|
||||
return tuklib_cpucores();
|
||||
}
|
||||
@@ -191,8 +191,8 @@ index_tree_init(index_tree *tree)
|
||||
|
||||
/// Helper for index_tree_end()
|
||||
static void
|
||||
index_tree_node_end(index_tree_node *node, lzma_allocator *allocator,
|
||||
void (*free_func)(void *node, lzma_allocator *allocator))
|
||||
index_tree_node_end(index_tree_node *node, const lzma_allocator *allocator,
|
||||
void (*free_func)(void *node, const lzma_allocator *allocator))
|
||||
{
|
||||
// The tree won't ever be very huge, so recursion should be fine.
|
||||
// 20 levels in the tree is likely quite a lot already in practice.
|
||||
@@ -202,22 +202,21 @@ index_tree_node_end(index_tree_node *node, lzma_allocator *allocator,
|
||||
if (node->right != NULL)
|
||||
index_tree_node_end(node->right, allocator, free_func);
|
||||
|
||||
if (free_func != NULL)
|
||||
free_func(node, allocator);
|
||||
|
||||
lzma_free(node, allocator);
|
||||
free_func(node, allocator);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/// Free the meory allocated for a tree. If free_func is not NULL,
|
||||
/// it is called on each node before freeing the node. This is used
|
||||
/// to free the Record groups from each index_stream before freeing
|
||||
/// the index_stream itself.
|
||||
/// Free the memory allocated for a tree. Each node is freed using the
|
||||
/// given free_func which is either &lzma_free or &index_stream_end.
|
||||
/// The latter is used to free the Record groups from each index_stream
|
||||
/// before freeing the index_stream itself.
|
||||
static void
|
||||
index_tree_end(index_tree *tree, lzma_allocator *allocator,
|
||||
void (*free_func)(void *node, lzma_allocator *allocator))
|
||||
index_tree_end(index_tree *tree, const lzma_allocator *allocator,
|
||||
void (*free_func)(void *node, const lzma_allocator *allocator))
|
||||
{
|
||||
assert(free_func != NULL);
|
||||
|
||||
if (tree->root != NULL)
|
||||
index_tree_node_end(tree->root, allocator, free_func);
|
||||
|
||||
@@ -230,7 +229,6 @@ index_tree_end(index_tree *tree, lzma_allocator *allocator,
|
||||
static void
|
||||
index_tree_append(index_tree *tree, index_tree_node *node)
|
||||
{
|
||||
uint32_t up;
|
||||
node->parent = tree->rightmost;
|
||||
node->left = NULL;
|
||||
node->right = NULL;
|
||||
@@ -259,10 +257,8 @@ index_tree_append(index_tree *tree, index_tree_node *node)
|
||||
// and thus know the state of the tree just by looking at the node
|
||||
// count. From the node count we can calculate how many steps to go
|
||||
// up in the tree to find the rotation root.
|
||||
up = tree->count ^ (UINT32_C(1) << bsr32(tree->count));
|
||||
uint32_t up = tree->count ^ (UINT32_C(1) << bsr32(tree->count));
|
||||
if (up != 0) {
|
||||
index_tree_node *pivot;
|
||||
|
||||
// Locate the root node for the rotation.
|
||||
up = ctz32(tree->count) + 2;
|
||||
do {
|
||||
@@ -270,7 +266,7 @@ index_tree_append(index_tree *tree, index_tree_node *node)
|
||||
} while (--up > 0);
|
||||
|
||||
// Rotate left using node as the rotation root.
|
||||
pivot = node->right;
|
||||
index_tree_node *pivot = node->right;
|
||||
|
||||
if (node->parent == NULL) {
|
||||
tree->root = pivot;
|
||||
@@ -342,8 +338,8 @@ index_tree_locate(const index_tree *tree, lzma_vli target)
|
||||
/// Allocate and initialize a new Stream using the given base offsets.
|
||||
static index_stream *
|
||||
index_stream_init(lzma_vli compressed_base, lzma_vli uncompressed_base,
|
||||
lzma_vli stream_number, lzma_vli block_number_base,
|
||||
lzma_allocator *allocator)
|
||||
uint32_t stream_number, lzma_vli block_number_base,
|
||||
const lzma_allocator *allocator)
|
||||
{
|
||||
index_stream *s = lzma_alloc(sizeof(index_stream), allocator);
|
||||
if (s == NULL)
|
||||
@@ -371,16 +367,17 @@ index_stream_init(lzma_vli compressed_base, lzma_vli uncompressed_base,
|
||||
|
||||
/// Free the memory allocated for a Stream and its Record groups.
|
||||
static void
|
||||
index_stream_end(void *node, lzma_allocator *allocator)
|
||||
index_stream_end(void *node, const lzma_allocator *allocator)
|
||||
{
|
||||
index_stream *s = node;
|
||||
index_tree_end(&s->groups, allocator, NULL);
|
||||
index_tree_end(&s->groups, allocator, &lzma_free);
|
||||
lzma_free(s, allocator);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static lzma_index *
|
||||
index_init_plain(lzma_allocator *allocator)
|
||||
index_init_plain(const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_index *i = lzma_alloc(sizeof(lzma_index), allocator);
|
||||
if (i != NULL) {
|
||||
@@ -398,15 +395,13 @@ index_init_plain(lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern LZMA_API(lzma_index *)
|
||||
lzma_index_init(lzma_allocator *allocator)
|
||||
lzma_index_init(const lzma_allocator *allocator)
|
||||
{
|
||||
index_stream *s;
|
||||
|
||||
lzma_index *i = index_init_plain(allocator);
|
||||
if (i == NULL)
|
||||
return NULL;
|
||||
|
||||
s = index_stream_init(0, 0, 1, 0, allocator);
|
||||
index_stream *s = index_stream_init(0, 0, 1, 0, allocator);
|
||||
if (s == NULL) {
|
||||
lzma_free(i, allocator);
|
||||
return NULL;
|
||||
@@ -419,7 +414,7 @@ lzma_index_init(lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern LZMA_API(void)
|
||||
lzma_index_end(lzma_index *i, lzma_allocator *allocator)
|
||||
lzma_index_end(lzma_index *i, const lzma_allocator *allocator)
|
||||
{
|
||||
// NOTE: If you modify this function, check also the bottom
|
||||
// of lzma_index_cat().
|
||||
@@ -605,8 +600,6 @@ lzma_index_padding_size(const lzma_index *i)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
|
||||
{
|
||||
index_stream *s;
|
||||
|
||||
if (i == NULL || stream_flags == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
@@ -614,7 +607,7 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
|
||||
return_if_error(lzma_stream_flags_compare(
|
||||
stream_flags, stream_flags));
|
||||
|
||||
s = (index_stream *)(i->streams.rightmost);
|
||||
index_stream *s = (index_stream *)(i->streams.rightmost);
|
||||
s->stream_flags = *stream_flags;
|
||||
|
||||
return LZMA_OK;
|
||||
@@ -624,17 +617,14 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding)
|
||||
{
|
||||
index_stream *s;
|
||||
lzma_vli old_stream_padding;
|
||||
|
||||
if (i == NULL || stream_padding > LZMA_VLI_MAX
|
||||
|| (stream_padding & 3) != 0)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
s = (index_stream *)(i->streams.rightmost);
|
||||
index_stream *s = (index_stream *)(i->streams.rightmost);
|
||||
|
||||
// Check that the new value won't make the file grow too big.
|
||||
old_stream_padding = s->stream_padding;
|
||||
const lzma_vli old_stream_padding = s->stream_padding;
|
||||
s->stream_padding = 0;
|
||||
if (lzma_index_file_size(i) + stream_padding > LZMA_VLI_MAX) {
|
||||
s->stream_padding = old_stream_padding;
|
||||
@@ -647,29 +637,23 @@ lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding)
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_append(lzma_index *i, lzma_allocator *allocator,
|
||||
lzma_index_append(lzma_index *i, const lzma_allocator *allocator,
|
||||
lzma_vli unpadded_size, lzma_vli uncompressed_size)
|
||||
{
|
||||
index_stream *s;
|
||||
index_group *g;
|
||||
lzma_vli compressed_base;
|
||||
lzma_vli uncompressed_base;
|
||||
uint32_t index_list_size_add;
|
||||
|
||||
// Validate.
|
||||
if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN
|
||||
|| unpadded_size > UNPADDED_SIZE_MAX
|
||||
|| uncompressed_size > LZMA_VLI_MAX)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
s = (index_stream *)(i->streams.rightmost);
|
||||
g = (index_group *)(s->groups.rightmost);
|
||||
index_stream *s = (index_stream *)(i->streams.rightmost);
|
||||
index_group *g = (index_group *)(s->groups.rightmost);
|
||||
|
||||
compressed_base = g == NULL ? 0
|
||||
const lzma_vli compressed_base = g == NULL ? 0
|
||||
: vli_ceil4(g->records[g->last].unpadded_sum);
|
||||
uncompressed_base = g == NULL ? 0
|
||||
const lzma_vli uncompressed_base = g == NULL ? 0
|
||||
: g->records[g->last].uncompressed_sum;
|
||||
index_list_size_add = lzma_vli_size(unpadded_size)
|
||||
const uint32_t index_list_size_add = lzma_vli_size(unpadded_size)
|
||||
+ lzma_vli_size(uncompressed_size);
|
||||
|
||||
// Check that the file size will stay within limits.
|
||||
@@ -780,10 +764,9 @@ index_cat_helper(const index_cat_info *info, index_stream *this)
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_cat(lzma_index *LZMA_RESTRICT dest, lzma_index *LZMA_RESTRICT src,
|
||||
lzma_allocator *allocator)
|
||||
lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
|
||||
const lzma_allocator *allocator)
|
||||
{
|
||||
index_cat_info info;
|
||||
const lzma_vli dest_file_size = lzma_index_file_size(dest);
|
||||
|
||||
// Check that we don't exceed the file size limits.
|
||||
@@ -813,12 +796,10 @@ lzma_index_cat(lzma_index *LZMA_RESTRICT dest, lzma_index *LZMA_RESTRICT src,
|
||||
index_stream *s = (index_stream *)(dest->streams.rightmost);
|
||||
index_group *g = (index_group *)(s->groups.rightmost);
|
||||
if (g != NULL && g->last + 1 < g->allocated) {
|
||||
index_group *newg;
|
||||
|
||||
assert(g->node.left == NULL);
|
||||
assert(g->node.right == NULL);
|
||||
|
||||
newg = lzma_alloc(sizeof(index_group)
|
||||
index_group *newg = lzma_alloc(sizeof(index_group)
|
||||
+ (g->last + 1)
|
||||
* sizeof(index_record),
|
||||
allocator);
|
||||
@@ -848,17 +829,21 @@ lzma_index_cat(lzma_index *LZMA_RESTRICT dest, lzma_index *LZMA_RESTRICT src,
|
||||
s->groups.rightmost = &newg->node;
|
||||
|
||||
lzma_free(g, allocator);
|
||||
|
||||
// NOTE: newg isn't leaked here because
|
||||
// newg == (void *)&newg->node.
|
||||
}
|
||||
}
|
||||
|
||||
// Add all the Streams from src to dest. Update the base offsets
|
||||
// of each Stream from src.
|
||||
info.uncompressed_size = dest->uncompressed_size;
|
||||
info.file_size = dest_file_size;
|
||||
info.stream_number_add = dest->streams.count;
|
||||
info.block_number_add = dest->record_count;
|
||||
info.streams = &dest->streams;
|
||||
|
||||
const index_cat_info info = {
|
||||
.uncompressed_size = dest->uncompressed_size,
|
||||
.file_size = dest_file_size,
|
||||
.stream_number_add = dest->streams.count,
|
||||
.block_number_add = dest->record_count,
|
||||
.streams = &dest->streams,
|
||||
};
|
||||
index_cat_helper(&info, (index_stream *)(src->streams.root));
|
||||
|
||||
// Update info about all the combined Streams.
|
||||
@@ -877,26 +862,18 @@ lzma_index_cat(lzma_index *LZMA_RESTRICT dest, lzma_index *LZMA_RESTRICT src,
|
||||
|
||||
/// Duplicate an index_stream.
|
||||
static index_stream *
|
||||
index_dup_stream(const index_stream *src, lzma_allocator *allocator)
|
||||
index_dup_stream(const index_stream *src, const lzma_allocator *allocator)
|
||||
{
|
||||
index_stream *dest;
|
||||
index_group *destg;
|
||||
index_group *srcg;
|
||||
size_t i = 0;
|
||||
|
||||
// Catch a somewhat theoretical integer overflow.
|
||||
if (src->record_count > PREALLOC_MAX)
|
||||
return NULL;
|
||||
|
||||
// Allocate and initialize a new Stream.
|
||||
dest = index_stream_init(src->node.compressed_base,
|
||||
index_stream *dest = index_stream_init(src->node.compressed_base,
|
||||
src->node.uncompressed_base, src->number,
|
||||
src->block_number_base, allocator);
|
||||
|
||||
// Return immediately if allocation failed or if there are
|
||||
// no groups to duplicate.
|
||||
if (dest == NULL || src->groups.leftmost == NULL)
|
||||
return dest;
|
||||
if (dest == NULL)
|
||||
return NULL;
|
||||
|
||||
// Copy the overall information.
|
||||
dest->record_count = src->record_count;
|
||||
@@ -904,10 +881,14 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
|
||||
dest->stream_flags = src->stream_flags;
|
||||
dest->stream_padding = src->stream_padding;
|
||||
|
||||
// Return if there are no groups to duplicate.
|
||||
if (src->groups.leftmost == NULL)
|
||||
return dest;
|
||||
|
||||
// Allocate memory for the Records. We put all the Records into
|
||||
// a single group. It's simplest and also tends to make
|
||||
// lzma_index_locate() a little bit faster with very big Indexes.
|
||||
destg = lzma_alloc(sizeof(index_group)
|
||||
index_group *destg = lzma_alloc(sizeof(index_group)
|
||||
+ src->record_count * sizeof(index_record),
|
||||
allocator);
|
||||
if (destg == NULL) {
|
||||
@@ -923,7 +904,8 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
|
||||
destg->last = src->record_count - 1;
|
||||
|
||||
// Go through all the groups in src and copy the Records into destg.
|
||||
srcg = (index_group *)(src->groups.leftmost);
|
||||
const index_group *srcg = (const index_group *)(src->groups.leftmost);
|
||||
size_t i = 0;
|
||||
do {
|
||||
memcpy(destg->records + i, srcg->records,
|
||||
(srcg->last + 1) * sizeof(index_record));
|
||||
@@ -941,11 +923,8 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern LZMA_API(lzma_index *)
|
||||
lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
|
||||
lzma_index_dup(const lzma_index *src, const lzma_allocator *allocator)
|
||||
{
|
||||
index_stream *srcstream;
|
||||
index_stream *deststream;
|
||||
|
||||
// Allocate the base structure (no initial Stream).
|
||||
lzma_index *dest = index_init_plain(allocator);
|
||||
if (dest == NULL)
|
||||
@@ -958,9 +937,11 @@ lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
|
||||
dest->index_list_size = src->index_list_size;
|
||||
|
||||
// Copy the Streams and the groups in them.
|
||||
srcstream = (index_stream *)(src->streams.leftmost);
|
||||
const index_stream *srcstream
|
||||
= (const index_stream *)(src->streams.leftmost);
|
||||
do {
|
||||
deststream = index_dup_stream(srcstream, allocator);
|
||||
index_stream *deststream = index_dup_stream(
|
||||
srcstream, allocator);
|
||||
if (deststream == NULL) {
|
||||
lzma_index_end(dest, allocator);
|
||||
return NULL;
|
||||
@@ -1031,6 +1012,8 @@ iter_set_info(lzma_index_iter *iter)
|
||||
iter->internal[ITER_GROUP].p = NULL;
|
||||
}
|
||||
|
||||
// NOTE: lzma_index_iter.stream.number is lzma_vli but we use uint32_t
|
||||
// internally.
|
||||
iter->stream.number = stream->number;
|
||||
iter->stream.block_count = stream->record_count;
|
||||
iter->stream.compressed_offset = stream->node.compressed_base;
|
||||
@@ -1119,19 +1102,14 @@ lzma_index_iter_rewind(lzma_index_iter *iter)
|
||||
extern LZMA_API(lzma_bool)
|
||||
lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode)
|
||||
{
|
||||
const lzma_index *i;
|
||||
const index_stream *stream;
|
||||
const index_group *group;
|
||||
size_t record;
|
||||
|
||||
// Catch unsupported mode values.
|
||||
if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK)
|
||||
return true;
|
||||
|
||||
i = iter->internal[ITER_INDEX].p;
|
||||
stream = iter->internal[ITER_STREAM].p;
|
||||
group = NULL;
|
||||
record = iter->internal[ITER_RECORD].s;
|
||||
const lzma_index *i = iter->internal[ITER_INDEX].p;
|
||||
const index_stream *stream = iter->internal[ITER_STREAM].p;
|
||||
const index_group *group = NULL;
|
||||
size_t record = iter->internal[ITER_RECORD].s;
|
||||
|
||||
// If we are being asked for the next Stream, leave group to NULL
|
||||
// so that the rest of the this function thinks that this Stream
|
||||
@@ -1231,10 +1209,6 @@ again:
|
||||
extern LZMA_API(lzma_bool)
|
||||
lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
|
||||
{
|
||||
const index_stream *stream;
|
||||
const index_group *group;
|
||||
size_t left, right;
|
||||
|
||||
const lzma_index *i = iter->internal[ITER_INDEX].p;
|
||||
|
||||
// If the target is past the end of the file, return immediately.
|
||||
@@ -1242,12 +1216,12 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
|
||||
return true;
|
||||
|
||||
// Locate the Stream containing the target offset.
|
||||
stream = index_tree_locate(&i->streams, target);
|
||||
const index_stream *stream = index_tree_locate(&i->streams, target);
|
||||
assert(stream != NULL);
|
||||
target -= stream->node.uncompressed_base;
|
||||
|
||||
// Locate the group containing the target offset.
|
||||
group = index_tree_locate(&stream->groups, target);
|
||||
const index_group *group = index_tree_locate(&stream->groups, target);
|
||||
assert(group != NULL);
|
||||
|
||||
// Use binary search to locate the exact Record. It is the first
|
||||
@@ -1255,8 +1229,8 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
|
||||
// This is because we want the rightmost Record that fullfills the
|
||||
// search criterion. It is possible that there are empty Blocks;
|
||||
// we don't want to return them.
|
||||
left = 0;
|
||||
right = group->last;
|
||||
size_t left = 0;
|
||||
size_t right = group->last;
|
||||
|
||||
while (left < right) {
|
||||
const size_t pos = left + (right - left) / 2;
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "check.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum {
|
||||
SEQ_INDICATOR,
|
||||
SEQ_COUNT,
|
||||
@@ -50,18 +50,20 @@ struct lzma_coder_s {
|
||||
|
||||
/// CRC32 of the List of Records field
|
||||
uint32_t crc32;
|
||||
};
|
||||
} lzma_index_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
index_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
index_decode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size,
|
||||
uint8_t *LZMA_RESTRICT out lzma_attribute((__unused__)),
|
||||
size_t *LZMA_RESTRICT out_pos lzma_attribute((__unused__)),
|
||||
uint8_t *restrict out lzma_attribute((__unused__)),
|
||||
size_t *restrict out_pos lzma_attribute((__unused__)),
|
||||
size_t out_size lzma_attribute((__unused__)),
|
||||
lzma_action action lzma_attribute((__unused__)))
|
||||
{
|
||||
lzma_index_coder *coder = coder_ptr;
|
||||
|
||||
// Similar optimization as in index_encoder.c
|
||||
const size_t in_start = *in_pos;
|
||||
lzma_ret ret = LZMA_OK;
|
||||
@@ -207,8 +209,9 @@ out:
|
||||
|
||||
|
||||
static void
|
||||
index_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
index_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_index_coder *coder = coder_ptr;
|
||||
lzma_index_end(coder->index, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -216,9 +219,11 @@ index_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
index_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
index_decoder_memconfig(void *coder_ptr, uint64_t *memusage,
|
||||
uint64_t *old_memlimit, uint64_t new_memlimit)
|
||||
{
|
||||
lzma_index_coder *coder = coder_ptr;
|
||||
|
||||
*memusage = lzma_index_memusage(1, coder->count);
|
||||
*old_memlimit = coder->memlimit;
|
||||
|
||||
@@ -234,7 +239,7 @@ index_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
index_decoder_reset(lzma_coder *coder, lzma_allocator *allocator,
|
||||
index_decoder_reset(lzma_index_coder *coder, const lzma_allocator *allocator,
|
||||
lzma_index **i, uint64_t memlimit)
|
||||
{
|
||||
// Remember the pointer given by the application. We will set it
|
||||
@@ -251,7 +256,7 @@ index_decoder_reset(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
// Initialize the rest.
|
||||
coder->sequence = SEQ_INDICATOR;
|
||||
coder->memlimit = memlimit;
|
||||
coder->memlimit = my_max(1, memlimit);
|
||||
coder->count = 0; // Needs to be initialized due to _memconfig().
|
||||
coder->pos = 0;
|
||||
coder->crc32 = 0;
|
||||
@@ -261,35 +266,37 @@ index_decoder_reset(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
index_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
index_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
lzma_index **i, uint64_t memlimit)
|
||||
{
|
||||
lzma_next_coder_init(&index_decoder_init, next, allocator);
|
||||
|
||||
if (i == NULL || memlimit == 0)
|
||||
if (i == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_index_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_index_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &index_decode;
|
||||
next->end = &index_decoder_end;
|
||||
next->memconfig = &index_decoder_memconfig;
|
||||
next->coder->index = NULL;
|
||||
coder->index = NULL;
|
||||
} else {
|
||||
lzma_index_end(next->coder->index, allocator);
|
||||
lzma_index_end(coder->index, allocator);
|
||||
}
|
||||
|
||||
return index_decoder_reset(next->coder, allocator, i, memlimit);
|
||||
return index_decoder_reset(coder, allocator, i, memlimit);
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit)
|
||||
{
|
||||
lzma_next_strm_init2(index_decoder_init, strm, i, memlimit);
|
||||
lzma_next_strm_init(index_decoder_init, strm, i, memlimit);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
@@ -299,27 +306,25 @@ lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit)
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_buffer_decode(
|
||||
lzma_index **i, uint64_t *memlimit, lzma_allocator *allocator,
|
||||
lzma_index_buffer_decode(lzma_index **i, uint64_t *memlimit,
|
||||
const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size)
|
||||
{
|
||||
lzma_coder coder;
|
||||
lzma_ret ret;
|
||||
|
||||
// Store the input start position so that we can restore it in case
|
||||
// of an error.
|
||||
const size_t in_start = *in_pos;
|
||||
|
||||
// Sanity checks
|
||||
if (i == NULL || memlimit == NULL
|
||||
|| in == NULL || in_pos == NULL || *in_pos > in_size)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// Initialize the decoder.
|
||||
lzma_index_coder coder;
|
||||
return_if_error(index_decoder_reset(&coder, allocator, i, *memlimit));
|
||||
|
||||
// Store the input start position so that we can restore it in case
|
||||
// of an error.
|
||||
const size_t in_start = *in_pos;
|
||||
|
||||
// Do the actual decoding.
|
||||
ret = index_decode(&coder, allocator, in, in_pos, in_size,
|
||||
lzma_ret ret = index_decode(&coder, allocator, in, in_pos, in_size,
|
||||
NULL, NULL, 0, LZMA_RUN);
|
||||
|
||||
if (ret == LZMA_STREAM_END) {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
#include "check.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum {
|
||||
SEQ_INDICATOR,
|
||||
SEQ_COUNT,
|
||||
@@ -37,19 +37,21 @@ struct lzma_coder_s {
|
||||
|
||||
/// CRC32 of the List of Records field
|
||||
uint32_t crc32;
|
||||
};
|
||||
} lzma_index_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
index_encode(lzma_coder *coder,
|
||||
lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *LZMA_RESTRICT in lzma_attribute((__unused__)),
|
||||
size_t *LZMA_RESTRICT in_pos lzma_attribute((__unused__)),
|
||||
index_encode(void *coder_ptr,
|
||||
const lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *restrict in lzma_attribute((__unused__)),
|
||||
size_t *restrict in_pos lzma_attribute((__unused__)),
|
||||
size_t in_size lzma_attribute((__unused__)),
|
||||
uint8_t *LZMA_RESTRICT out, size_t *LZMA_RESTRICT out_pos,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size,
|
||||
lzma_action action lzma_attribute((__unused__)))
|
||||
{
|
||||
lzma_index_coder *coder = coder_ptr;
|
||||
|
||||
// Position where to start calculating CRC32. The idea is that we
|
||||
// need to call lzma_crc32() only once per call to index_encode().
|
||||
const size_t out_start = *out_pos;
|
||||
@@ -159,7 +161,7 @@ out:
|
||||
|
||||
|
||||
static void
|
||||
index_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
index_encoder_end(void *coder, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -167,7 +169,7 @@ index_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static void
|
||||
index_encoder_reset(lzma_coder *coder, const lzma_index *i)
|
||||
index_encoder_reset(lzma_index_coder *coder, const lzma_index *i)
|
||||
{
|
||||
lzma_index_iter_init(&coder->iter, i);
|
||||
|
||||
@@ -181,7 +183,7 @@ index_encoder_reset(lzma_coder *coder, const lzma_index *i)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_index_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_index *i)
|
||||
{
|
||||
lzma_next_coder_init(&lzma_index_encoder_init, next, allocator);
|
||||
@@ -190,7 +192,7 @@ lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
next->coder = lzma_alloc(sizeof(lzma_index_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
@@ -207,7 +209,7 @@ lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_index_encoder(lzma_stream *strm, const lzma_index *i)
|
||||
{
|
||||
lzma_next_strm_init1(lzma_index_encoder_init, strm, i);
|
||||
lzma_next_strm_init(lzma_index_encoder_init, strm, i);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
@@ -220,10 +222,6 @@ extern LZMA_API(lzma_ret)
|
||||
lzma_index_buffer_encode(const lzma_index *i,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
lzma_coder coder;
|
||||
size_t out_start;
|
||||
lzma_ret ret;
|
||||
|
||||
// Validate the arguments.
|
||||
if (i == NULL || out == NULL || out_pos == NULL || *out_pos > out_size)
|
||||
return LZMA_PROG_ERROR;
|
||||
@@ -234,12 +232,13 @@ lzma_index_buffer_encode(const lzma_index *i,
|
||||
|
||||
// The Index encoder needs just one small data structure so we can
|
||||
// allocate it on stack.
|
||||
lzma_index_coder coder;
|
||||
index_encoder_reset(&coder, i);
|
||||
|
||||
// Do the actual encoding. This should never fail, but store
|
||||
// the original *out_pos just in case.
|
||||
out_start = *out_pos;
|
||||
ret = index_encode(&coder, NULL, NULL, NULL, 0,
|
||||
const size_t out_start = *out_pos;
|
||||
lzma_ret ret = index_encode(&coder, NULL, NULL, NULL, 0,
|
||||
out, out_pos, out_size, LZMA_RUN);
|
||||
|
||||
if (ret == LZMA_STREAM_END) {
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
|
||||
extern lzma_ret lzma_index_encoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_index *i);
|
||||
const lzma_allocator *allocator, const lzma_index *i);
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
@@ -70,7 +70,8 @@ struct lzma_index_hash_s {
|
||||
|
||||
|
||||
extern LZMA_API(lzma_index_hash *)
|
||||
lzma_index_hash_init(lzma_index_hash *index_hash, lzma_allocator *allocator)
|
||||
lzma_index_hash_init(lzma_index_hash *index_hash,
|
||||
const lzma_allocator *allocator)
|
||||
{
|
||||
if (index_hash == NULL) {
|
||||
index_hash = lzma_alloc(sizeof(lzma_index_hash), allocator);
|
||||
@@ -101,7 +102,8 @@ lzma_index_hash_init(lzma_index_hash *index_hash, lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern LZMA_API(void)
|
||||
lzma_index_hash_end(lzma_index_hash *index_hash, lzma_allocator *allocator)
|
||||
lzma_index_hash_end(lzma_index_hash *index_hash,
|
||||
const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_free(index_hash, allocator);
|
||||
return;
|
||||
@@ -124,14 +126,13 @@ static lzma_ret
|
||||
hash_append(lzma_index_hash_info *info, lzma_vli unpadded_size,
|
||||
lzma_vli uncompressed_size)
|
||||
{
|
||||
const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
|
||||
|
||||
info->blocks_size += vli_ceil4(unpadded_size);
|
||||
info->uncompressed_size += uncompressed_size;
|
||||
info->index_list_size += lzma_vli_size(unpadded_size)
|
||||
+ lzma_vli_size(uncompressed_size);
|
||||
++info->count;
|
||||
|
||||
const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
|
||||
lzma_check_update(&info->check, LZMA_CHECK_BEST,
|
||||
(const uint8_t *)(sizes), sizeof(sizes));
|
||||
|
||||
@@ -174,9 +175,6 @@ extern LZMA_API(lzma_ret)
|
||||
lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
|
||||
size_t *in_pos, size_t in_size)
|
||||
{
|
||||
size_t in_start;
|
||||
lzma_ret ret;
|
||||
|
||||
// Catch zero input buffer here, because in contrast to Index encoder
|
||||
// and decoder functions, applications call this function directly
|
||||
// instead of via lzma_code(), which does the buffer checking.
|
||||
@@ -186,8 +184,8 @@ lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
|
||||
// NOTE: This function has many similarities to index_encode() and
|
||||
// index_decode() functions found from index_encoder.c and
|
||||
// index_decoder.c. See the comments especially in index_encoder.c.
|
||||
in_start = *in_pos;
|
||||
ret = LZMA_OK;
|
||||
const size_t in_start = *in_pos;
|
||||
lzma_ret ret = LZMA_OK;
|
||||
|
||||
while (*in_pos < in_size)
|
||||
switch (index_hash->sequence) {
|
||||
|
||||
175
Utilities/cmliblzma/liblzma/common/memcmplen.h
Normal file
175
Utilities/cmliblzma/liblzma/common/memcmplen.h
Normal file
@@ -0,0 +1,175 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
/// \file memcmplen.h
|
||||
/// \brief Optimized comparison of two buffers
|
||||
//
|
||||
// Author: Lasse Collin
|
||||
//
|
||||
// This file has been put into the public domain.
|
||||
// You can do whatever you want with this file.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef LZMA_MEMCMPLEN_H
|
||||
#define LZMA_MEMCMPLEN_H
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#ifdef HAVE_IMMINTRIN_H
|
||||
# include <immintrin.h>
|
||||
#endif
|
||||
|
||||
|
||||
/// Find out how many equal bytes the two buffers have.
|
||||
///
|
||||
/// \param buf1 First buffer
|
||||
/// \param buf2 Second buffer
|
||||
/// \param len How many bytes have already been compared and will
|
||||
/// be assumed to match
|
||||
/// \param limit How many bytes to compare at most, including the
|
||||
/// already-compared bytes. This must be significantly
|
||||
/// smaller than UINT32_MAX to avoid integer overflows.
|
||||
/// Up to LZMA_MEMCMPLEN_EXTRA bytes may be read past
|
||||
/// the specified limit from both buf1 and buf2.
|
||||
///
|
||||
/// \return Number of equal bytes in the buffers is returned.
|
||||
/// This is always at least len and at most limit.
|
||||
///
|
||||
/// \note LZMA_MEMCMPLEN_EXTRA defines how many extra bytes may be read.
|
||||
/// It's rounded up to 2^n. This extra amount needs to be
|
||||
/// allocated in the buffers being used. It needs to be
|
||||
/// initialized too to keep Valgrind quiet.
|
||||
static inline uint32_t lzma_attribute((__always_inline__))
|
||||
lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
|
||||
uint32_t len, uint32_t limit)
|
||||
{
|
||||
assert(len <= limit);
|
||||
assert(limit <= UINT32_MAX / 2);
|
||||
|
||||
#if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
|
||||
&& ((TUKLIB_GNUC_REQ(3, 4) && defined(__x86_64__)) \
|
||||
|| (defined(__INTEL_COMPILER) && defined(__x86_64__)) \
|
||||
|| (defined(__INTEL_COMPILER) && defined(_M_X64)) \
|
||||
|| (defined(_MSC_VER) && defined(_M_X64)))
|
||||
// NOTE: This will use 64-bit unaligned access which
|
||||
// TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit, but
|
||||
// it's convenient here at least as long as it's x86-64 only.
|
||||
//
|
||||
// I keep this x86-64 only for now since that's where I know this
|
||||
// to be a good method. This may be fine on other 64-bit CPUs too.
|
||||
// On big endian one should use xor instead of subtraction and switch
|
||||
// to __builtin_clzll().
|
||||
#define LZMA_MEMCMPLEN_EXTRA 8
|
||||
while (len < limit) {
|
||||
const uint64_t x = *(const uint64_t *)(buf1 + len)
|
||||
- *(const uint64_t *)(buf2 + len);
|
||||
if (x != 0) {
|
||||
# if defined(_M_X64) // MSVC or Intel C compiler on Windows
|
||||
unsigned long tmp;
|
||||
_BitScanForward64(&tmp, x);
|
||||
len += (uint32_t)tmp >> 3;
|
||||
# else // GCC, clang, or Intel C compiler
|
||||
len += (uint32_t)__builtin_ctzll(x) >> 3;
|
||||
# endif
|
||||
return my_min(len, limit);
|
||||
}
|
||||
|
||||
len += 8;
|
||||
}
|
||||
|
||||
return limit;
|
||||
|
||||
#elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
|
||||
&& defined(HAVE__MM_MOVEMASK_EPI8) \
|
||||
&& ((defined(__GNUC__) && defined(__SSE2_MATH__)) \
|
||||
|| (defined(__INTEL_COMPILER) && defined(__SSE2__)) \
|
||||
|| (defined(_MSC_VER) && defined(_M_IX86_FP) \
|
||||
&& _M_IX86_FP >= 2))
|
||||
// NOTE: Like above, this will use 128-bit unaligned access which
|
||||
// TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit.
|
||||
//
|
||||
// SSE2 version for 32-bit and 64-bit x86. On x86-64 the above
|
||||
// version is sometimes significantly faster and sometimes
|
||||
// slightly slower than this SSE2 version, so this SSE2
|
||||
// version isn't used on x86-64.
|
||||
# define LZMA_MEMCMPLEN_EXTRA 16
|
||||
while (len < limit) {
|
||||
const uint32_t x = 0xFFFF ^ _mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||
_mm_loadu_si128((const __m128i *)(buf1 + len)),
|
||||
_mm_loadu_si128((const __m128i *)(buf2 + len))));
|
||||
|
||||
if (x != 0) {
|
||||
# if defined(__INTEL_COMPILER)
|
||||
len += _bit_scan_forward(x);
|
||||
# elif defined(_MSC_VER)
|
||||
unsigned long tmp;
|
||||
_BitScanForward(&tmp, x);
|
||||
len += tmp;
|
||||
# else
|
||||
len += __builtin_ctz(x);
|
||||
# endif
|
||||
return my_min(len, limit);
|
||||
}
|
||||
|
||||
len += 16;
|
||||
}
|
||||
|
||||
return limit;
|
||||
|
||||
#elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) && !defined(WORDS_BIGENDIAN)
|
||||
// Generic 32-bit little endian method
|
||||
# define LZMA_MEMCMPLEN_EXTRA 4
|
||||
while (len < limit) {
|
||||
uint32_t x = *(const uint32_t *)(buf1 + len)
|
||||
- *(const uint32_t *)(buf2 + len);
|
||||
if (x != 0) {
|
||||
if ((x & 0xFFFF) == 0) {
|
||||
len += 2;
|
||||
x >>= 16;
|
||||
}
|
||||
|
||||
if ((x & 0xFF) == 0)
|
||||
++len;
|
||||
|
||||
return my_min(len, limit);
|
||||
}
|
||||
|
||||
len += 4;
|
||||
}
|
||||
|
||||
return limit;
|
||||
|
||||
#elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) && defined(WORDS_BIGENDIAN)
|
||||
// Generic 32-bit big endian method
|
||||
# define LZMA_MEMCMPLEN_EXTRA 4
|
||||
while (len < limit) {
|
||||
uint32_t x = *(const uint32_t *)(buf1 + len)
|
||||
^ *(const uint32_t *)(buf2 + len);
|
||||
if (x != 0) {
|
||||
if ((x & 0xFFFF0000) == 0) {
|
||||
len += 2;
|
||||
x <<= 16;
|
||||
}
|
||||
|
||||
if ((x & 0xFF000000) == 0)
|
||||
++len;
|
||||
|
||||
return my_min(len, limit);
|
||||
}
|
||||
|
||||
len += 4;
|
||||
}
|
||||
|
||||
return limit;
|
||||
|
||||
#else
|
||||
// Simple portable version that doesn't use unaligned access.
|
||||
# define LZMA_MEMCMPLEN_EXTRA 0
|
||||
while (len < limit && buf1[len] == buf2[len])
|
||||
++len;
|
||||
|
||||
return len;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
184
Utilities/cmliblzma/liblzma/common/outqueue.c
Normal file
184
Utilities/cmliblzma/liblzma/common/outqueue.c
Normal file
@@ -0,0 +1,184 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
/// \file outqueue.c
|
||||
/// \brief Output queue handling in multithreaded coding
|
||||
//
|
||||
// Author: Lasse Collin
|
||||
//
|
||||
// This file has been put into the public domain.
|
||||
// You can do whatever you want with this file.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "outqueue.h"
|
||||
|
||||
|
||||
/// This is to ease integer overflow checking: We may allocate up to
|
||||
/// 2 * LZMA_THREADS_MAX buffers and we need some extra memory for other
|
||||
/// data structures (that's the second /2).
|
||||
#define BUF_SIZE_MAX (UINT64_MAX / LZMA_THREADS_MAX / 2 / 2)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
get_options(uint64_t *bufs_alloc_size, uint32_t *bufs_count,
|
||||
uint64_t buf_size_max, uint32_t threads)
|
||||
{
|
||||
if (threads > LZMA_THREADS_MAX || buf_size_max > BUF_SIZE_MAX)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// The number of buffers is twice the number of threads.
|
||||
// This wastes RAM but keeps the threads busy when buffers
|
||||
// finish out of order.
|
||||
//
|
||||
// NOTE: If this is changed, update BUF_SIZE_MAX too.
|
||||
*bufs_count = threads * 2;
|
||||
*bufs_alloc_size = *bufs_count * buf_size_max;
|
||||
|
||||
return LZMA_OK;
|
||||
}
|
||||
|
||||
|
||||
extern uint64_t
|
||||
lzma_outq_memusage(uint64_t buf_size_max, uint32_t threads)
|
||||
{
|
||||
uint64_t bufs_alloc_size;
|
||||
uint32_t bufs_count;
|
||||
|
||||
if (get_options(&bufs_alloc_size, &bufs_count, buf_size_max, threads)
|
||||
!= LZMA_OK)
|
||||
return UINT64_MAX;
|
||||
|
||||
return sizeof(lzma_outq) + bufs_count * sizeof(lzma_outbuf)
|
||||
+ bufs_alloc_size;
|
||||
}
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_outq_init(lzma_outq *outq, const lzma_allocator *allocator,
|
||||
uint64_t buf_size_max, uint32_t threads)
|
||||
{
|
||||
uint64_t bufs_alloc_size;
|
||||
uint32_t bufs_count;
|
||||
|
||||
// Set bufs_count and bufs_alloc_size.
|
||||
return_if_error(get_options(&bufs_alloc_size, &bufs_count,
|
||||
buf_size_max, threads));
|
||||
|
||||
// Allocate memory if needed.
|
||||
if (outq->buf_size_max != buf_size_max
|
||||
|| outq->bufs_allocated != bufs_count) {
|
||||
lzma_outq_end(outq, allocator);
|
||||
|
||||
#if SIZE_MAX < UINT64_MAX
|
||||
if (bufs_alloc_size > SIZE_MAX)
|
||||
return LZMA_MEM_ERROR;
|
||||
#endif
|
||||
|
||||
outq->bufs = lzma_alloc(bufs_count * sizeof(lzma_outbuf),
|
||||
allocator);
|
||||
outq->bufs_mem = lzma_alloc((size_t)(bufs_alloc_size),
|
||||
allocator);
|
||||
|
||||
if (outq->bufs == NULL || outq->bufs_mem == NULL) {
|
||||
lzma_outq_end(outq, allocator);
|
||||
return LZMA_MEM_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the rest of the main structure. Initialization of
|
||||
// outq->bufs[] is done when they are actually needed.
|
||||
outq->buf_size_max = (size_t)(buf_size_max);
|
||||
outq->bufs_allocated = bufs_count;
|
||||
outq->bufs_pos = 0;
|
||||
outq->bufs_used = 0;
|
||||
outq->read_pos = 0;
|
||||
|
||||
return LZMA_OK;
|
||||
}
|
||||
|
||||
|
||||
extern void
|
||||
lzma_outq_end(lzma_outq *outq, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_free(outq->bufs, allocator);
|
||||
outq->bufs = NULL;
|
||||
|
||||
lzma_free(outq->bufs_mem, allocator);
|
||||
outq->bufs_mem = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
extern lzma_outbuf *
|
||||
lzma_outq_get_buf(lzma_outq *outq)
|
||||
{
|
||||
// Caller must have checked it with lzma_outq_has_buf().
|
||||
assert(outq->bufs_used < outq->bufs_allocated);
|
||||
|
||||
// Initialize the new buffer.
|
||||
lzma_outbuf *buf = &outq->bufs[outq->bufs_pos];
|
||||
buf->buf = outq->bufs_mem + outq->bufs_pos * outq->buf_size_max;
|
||||
buf->size = 0;
|
||||
buf->finished = false;
|
||||
|
||||
// Update the queue state.
|
||||
if (++outq->bufs_pos == outq->bufs_allocated)
|
||||
outq->bufs_pos = 0;
|
||||
|
||||
++outq->bufs_used;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
extern bool
|
||||
lzma_outq_is_readable(const lzma_outq *outq)
|
||||
{
|
||||
uint32_t i = outq->bufs_pos - outq->bufs_used;
|
||||
if (outq->bufs_pos < outq->bufs_used)
|
||||
i += outq->bufs_allocated;
|
||||
|
||||
return outq->bufs[i].finished;
|
||||
}
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_outq_read(lzma_outq *restrict outq, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size,
|
||||
lzma_vli *restrict unpadded_size,
|
||||
lzma_vli *restrict uncompressed_size)
|
||||
{
|
||||
// There must be at least one buffer from which to read.
|
||||
if (outq->bufs_used == 0)
|
||||
return LZMA_OK;
|
||||
|
||||
// Get the buffer.
|
||||
uint32_t i = outq->bufs_pos - outq->bufs_used;
|
||||
if (outq->bufs_pos < outq->bufs_used)
|
||||
i += outq->bufs_allocated;
|
||||
|
||||
lzma_outbuf *buf = &outq->bufs[i];
|
||||
|
||||
// If it isn't finished yet, we cannot read from it.
|
||||
if (!buf->finished)
|
||||
return LZMA_OK;
|
||||
|
||||
// Copy from the buffer to output.
|
||||
lzma_bufcpy(buf->buf, &outq->read_pos, buf->size,
|
||||
out, out_pos, out_size);
|
||||
|
||||
// Return if we didn't get all the data from the buffer.
|
||||
if (outq->read_pos < buf->size)
|
||||
return LZMA_OK;
|
||||
|
||||
// The buffer was finished. Tell the caller its size information.
|
||||
*unpadded_size = buf->unpadded_size;
|
||||
*uncompressed_size = buf->uncompressed_size;
|
||||
|
||||
// Free this buffer for further use.
|
||||
--outq->bufs_used;
|
||||
outq->read_pos = 0;
|
||||
|
||||
return LZMA_STREAM_END;
|
||||
}
|
||||
156
Utilities/cmliblzma/liblzma/common/outqueue.h
Normal file
156
Utilities/cmliblzma/liblzma/common/outqueue.h
Normal file
@@ -0,0 +1,156 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
/// \file outqueue.h
|
||||
/// \brief Output queue handling in multithreaded coding
|
||||
//
|
||||
// Author: Lasse Collin
|
||||
//
|
||||
// This file has been put into the public domain.
|
||||
// You can do whatever you want with this file.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "common.h"
|
||||
|
||||
|
||||
/// Output buffer for a single thread
|
||||
typedef struct {
|
||||
/// Pointer to the output buffer of lzma_outq.buf_size_max bytes
|
||||
uint8_t *buf;
|
||||
|
||||
/// Amount of data written to buf
|
||||
size_t size;
|
||||
|
||||
/// Additional size information
|
||||
lzma_vli unpadded_size;
|
||||
lzma_vli uncompressed_size;
|
||||
|
||||
/// True when no more data will be written into this buffer.
|
||||
///
|
||||
/// \note This is read by another thread and thus access
|
||||
/// to this variable needs a mutex.
|
||||
bool finished;
|
||||
|
||||
} lzma_outbuf;
|
||||
|
||||
|
||||
typedef struct {
|
||||
/// Array of buffers that are used cyclically.
|
||||
lzma_outbuf *bufs;
|
||||
|
||||
/// Memory allocated for all the buffers
|
||||
uint8_t *bufs_mem;
|
||||
|
||||
/// Amount of buffer space available in each buffer
|
||||
size_t buf_size_max;
|
||||
|
||||
/// Number of buffers allocated
|
||||
uint32_t bufs_allocated;
|
||||
|
||||
/// Position in the bufs array. The next buffer to be taken
|
||||
/// into use is bufs[bufs_pos].
|
||||
uint32_t bufs_pos;
|
||||
|
||||
/// Number of buffers in use
|
||||
uint32_t bufs_used;
|
||||
|
||||
/// Position in the buffer in lzma_outq_read()
|
||||
size_t read_pos;
|
||||
|
||||
} lzma_outq;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Calculate the memory usage of an output queue
|
||||
*
|
||||
* \return Approximate memory usage in bytes or UINT64_MAX on error.
|
||||
*/
|
||||
extern uint64_t lzma_outq_memusage(uint64_t buf_size_max, uint32_t threads);
|
||||
|
||||
|
||||
/// \brief Initialize an output queue
|
||||
///
|
||||
/// \param outq Pointer to an output queue. Before calling
|
||||
/// this function the first time, *outq should
|
||||
/// have been zeroed with memzero() so that this
|
||||
/// function knows that there are no previous
|
||||
/// allocations to free.
|
||||
/// \param allocator Pointer to allocator or NULL
|
||||
/// \param buf_size_max Maximum amount of data that a single buffer
|
||||
/// in the queue may need to store.
|
||||
/// \param threads Number of buffers that may be in use
|
||||
/// concurrently. Note that more than this number
|
||||
/// of buffers will actually get allocated to
|
||||
/// improve performance when buffers finish
|
||||
/// out of order.
|
||||
///
|
||||
/// \return - LZMA_OK
|
||||
/// - LZMA_MEM_ERROR
|
||||
///
|
||||
extern lzma_ret lzma_outq_init(
|
||||
lzma_outq *outq, const lzma_allocator *allocator,
|
||||
uint64_t buf_size_max, uint32_t threads);
|
||||
|
||||
|
||||
/// \brief Free the memory associated with the output queue
|
||||
extern void lzma_outq_end(lzma_outq *outq, const lzma_allocator *allocator);
|
||||
|
||||
|
||||
/// \brief Get a new buffer
|
||||
///
|
||||
/// lzma_outq_has_buf() must be used to check that there is a buffer
|
||||
/// available before calling lzma_outq_get_buf().
|
||||
///
|
||||
extern lzma_outbuf *lzma_outq_get_buf(lzma_outq *outq);
|
||||
|
||||
|
||||
/// \brief Test if there is data ready to be read
|
||||
///
|
||||
/// Call to this function must be protected with the same mutex that
|
||||
/// is used to protect lzma_outbuf.finished.
|
||||
///
|
||||
extern bool lzma_outq_is_readable(const lzma_outq *outq);
|
||||
|
||||
|
||||
/// \brief Read finished data
|
||||
///
|
||||
/// \param outq Pointer to an output queue
|
||||
/// \param out Beginning of the output buffer
|
||||
/// \param out_pos The next byte will be written to
|
||||
/// out[*out_pos].
|
||||
/// \param out_size Size of the out buffer; the first byte into
|
||||
/// which no data is written to is out[out_size].
|
||||
/// \param unpadded_size Unpadded Size from the Block encoder
|
||||
/// \param uncompressed_size Uncompressed Size from the Block encoder
|
||||
///
|
||||
/// \return - LZMA: All OK. Either no data was available or the buffer
|
||||
/// being read didn't become empty yet.
|
||||
/// - LZMA_STREAM_END: The buffer being read was finished.
|
||||
/// *unpadded_size and *uncompressed_size were set.
|
||||
///
|
||||
/// \note This reads lzma_outbuf.finished variables and thus call
|
||||
/// to this function needs to be protected with a mutex.
|
||||
///
|
||||
extern lzma_ret lzma_outq_read(lzma_outq *restrict outq,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size, lzma_vli *restrict unpadded_size,
|
||||
lzma_vli *restrict uncompressed_size);
|
||||
|
||||
|
||||
/// \brief Test if there is at least one buffer free
|
||||
///
|
||||
/// This must be used before getting a new buffer with lzma_outq_get_buf().
|
||||
///
|
||||
static inline bool
|
||||
lzma_outq_has_buf(const lzma_outq *outq)
|
||||
{
|
||||
return outq->bufs_used < outq->bufs_allocated;
|
||||
}
|
||||
|
||||
|
||||
/// \brief Test if the queue is completely empty
|
||||
static inline bool
|
||||
lzma_outq_is_empty(const lzma_outq *outq)
|
||||
{
|
||||
return outq->bufs_used == 0;
|
||||
}
|
||||
@@ -15,13 +15,10 @@
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
|
||||
lzma_allocator *allocator,
|
||||
const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos, size_t out_size)
|
||||
{
|
||||
lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT;
|
||||
lzma_ret ret;
|
||||
|
||||
// Sanity checks
|
||||
if (in_pos == NULL || (in == NULL && *in_pos != in_size)
|
||||
|| *in_pos > in_size || out_pos == NULL
|
||||
@@ -36,7 +33,8 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
|
||||
// Initialize the Stream decoder.
|
||||
// TODO: We need something to tell the decoder that it can use the
|
||||
// output buffer as workspace, and thus save significant amount of RAM.
|
||||
ret = lzma_stream_decoder_init(
|
||||
lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT;
|
||||
lzma_ret ret = lzma_stream_decoder_init(
|
||||
&stream_decoder, allocator, *memlimit, flags);
|
||||
|
||||
if (ret == LZMA_OK) {
|
||||
|
||||
@@ -42,13 +42,10 @@ lzma_stream_buffer_bound(size_t uncompressed_size)
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
|
||||
lzma_allocator *allocator, const uint8_t *in, size_t in_size,
|
||||
const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t in_size,
|
||||
uint8_t *out, size_t *out_pos_ptr, size_t out_size)
|
||||
{
|
||||
lzma_stream_flags stream_flags = { 0 };
|
||||
lzma_block block = { 0 };
|
||||
size_t out_pos;
|
||||
|
||||
// Sanity checks
|
||||
if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX
|
||||
|| (in == NULL && in_size != 0) || out == NULL
|
||||
@@ -65,7 +62,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
|
||||
|
||||
// Use a local copy. We update *out_pos_ptr only if everything
|
||||
// succeeds.
|
||||
out_pos = *out_pos_ptr;
|
||||
size_t out_pos = *out_pos_ptr;
|
||||
|
||||
// Check that there's enough space for both Stream Header and
|
||||
// Stream Footer.
|
||||
@@ -77,7 +74,10 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
|
||||
out_size -= LZMA_STREAM_HEADER_SIZE;
|
||||
|
||||
// Encode the Stream Header.
|
||||
stream_flags.check = check;
|
||||
lzma_stream_flags stream_flags = {
|
||||
.version = 0,
|
||||
.check = check,
|
||||
};
|
||||
|
||||
if (lzma_stream_header_encode(&stream_flags, out + out_pos)
|
||||
!= LZMA_OK)
|
||||
@@ -86,8 +86,11 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
|
||||
out_pos += LZMA_STREAM_HEADER_SIZE;
|
||||
|
||||
// Encode a Block but only if there is at least one byte of input.
|
||||
block.check = check;
|
||||
block.filters = filters;
|
||||
lzma_block block = {
|
||||
.version = 0,
|
||||
.check = check,
|
||||
.filters = filters,
|
||||
};
|
||||
|
||||
if (in_size > 0)
|
||||
return_if_error(lzma_block_buffer_encode(&block, allocator,
|
||||
@@ -95,8 +98,6 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
|
||||
|
||||
// Index
|
||||
{
|
||||
lzma_ret ret;
|
||||
|
||||
// Create an Index. It will have one Record if there was
|
||||
// at least one byte of input to encode. Otherwise the
|
||||
// Index will be empty.
|
||||
@@ -104,7 +105,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
|
||||
if (i == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
ret = LZMA_OK;
|
||||
lzma_ret ret = LZMA_OK;
|
||||
|
||||
if (in_size > 0)
|
||||
ret = lzma_index_append(i, allocator,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "block_decoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum {
|
||||
SEQ_STREAM_HEADER,
|
||||
SEQ_BLOCK_HEADER,
|
||||
@@ -57,6 +57,10 @@ struct lzma_coder_s {
|
||||
/// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
|
||||
bool tell_any_check;
|
||||
|
||||
/// If true, we will tell the Block decoder to skip calculating
|
||||
/// and verifying the integrity check.
|
||||
bool ignore_check;
|
||||
|
||||
/// If true, we will decode concatenated Streams that possibly have
|
||||
/// Stream Padding between or after them. LZMA_STREAM_END is returned
|
||||
/// once the application isn't giving us any new input, and we aren't
|
||||
@@ -76,11 +80,11 @@ struct lzma_coder_s {
|
||||
/// Buffer to hold Stream Header, Block Header, and Stream Footer.
|
||||
/// Block Header has biggest maximum size.
|
||||
uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
|
||||
};
|
||||
} lzma_stream_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
stream_decoder_reset(lzma_coder *coder, lzma_allocator *allocator)
|
||||
stream_decoder_reset(lzma_stream_coder *coder, const lzma_allocator *allocator)
|
||||
{
|
||||
// Initialize the Index hash used to verify the Index.
|
||||
coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
|
||||
@@ -96,18 +100,18 @@ stream_decoder_reset(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
stream_decode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_stream_coder *coder = coder_ptr;
|
||||
|
||||
// When decoding the actual Block, it may be able to produce more
|
||||
// output even if we don't give it any new input.
|
||||
while (true)
|
||||
switch (coder->sequence) {
|
||||
case SEQ_STREAM_HEADER: {
|
||||
lzma_ret ret;
|
||||
|
||||
// Copy the Stream Header to the internal buffer.
|
||||
lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
|
||||
LZMA_STREAM_HEADER_SIZE);
|
||||
@@ -119,7 +123,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
coder->pos = 0;
|
||||
|
||||
// Decode the Stream Header.
|
||||
ret = lzma_stream_header_decode(
|
||||
const lzma_ret ret = lzma_stream_header_decode(
|
||||
&coder->stream_flags, coder->buffer);
|
||||
if (ret != LZMA_OK)
|
||||
return ret == LZMA_FORMAT_ERROR && !coder->first_stream
|
||||
@@ -156,11 +160,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
// Fall through
|
||||
|
||||
case SEQ_BLOCK_HEADER: {
|
||||
lzma_filter filters[LZMA_FILTERS_MAX + 1];
|
||||
uint64_t memusage;
|
||||
lzma_ret ret;
|
||||
size_t i;
|
||||
|
||||
if (*in_pos >= in_size)
|
||||
return LZMA_OK;
|
||||
|
||||
@@ -189,20 +188,28 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
coder->pos = 0;
|
||||
|
||||
// Version 0 is currently the only possible version.
|
||||
coder->block_options.version = 0;
|
||||
// Version 1 is needed to support the .ignore_check option.
|
||||
coder->block_options.version = 1;
|
||||
|
||||
// Set up a buffer to hold the filter chain. Block Header
|
||||
// decoder will initialize all members of this array so
|
||||
// we don't need to do it here.
|
||||
lzma_filter filters[LZMA_FILTERS_MAX + 1];
|
||||
coder->block_options.filters = filters;
|
||||
|
||||
// Decode the Block Header.
|
||||
return_if_error(lzma_block_header_decode(&coder->block_options,
|
||||
allocator, coder->buffer));
|
||||
|
||||
// If LZMA_IGNORE_CHECK was used, this flag needs to be set.
|
||||
// It has to be set after lzma_block_header_decode() because
|
||||
// it always resets this to false.
|
||||
coder->block_options.ignore_check = coder->ignore_check;
|
||||
|
||||
// Check the memory usage limit.
|
||||
memusage = lzma_raw_decoder_memusage(filters);
|
||||
const uint64_t memusage = lzma_raw_decoder_memusage(filters);
|
||||
lzma_ret ret;
|
||||
|
||||
if (memusage == UINT64_MAX) {
|
||||
// One or more unknown Filter IDs.
|
||||
ret = LZMA_OPTIONS_ERROR;
|
||||
@@ -228,7 +235,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
// Free the allocated filter options since they are needed
|
||||
// only to initialize the Block decoder.
|
||||
for (i = 0; i < LZMA_FILTERS_MAX; ++i)
|
||||
for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i)
|
||||
lzma_free(filters[i].options, allocator);
|
||||
|
||||
coder->block_options.filters = NULL;
|
||||
@@ -264,8 +271,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
}
|
||||
|
||||
case SEQ_INDEX: {
|
||||
lzma_ret ret;
|
||||
|
||||
// If we don't have any input, don't call
|
||||
// lzma_index_hash_decode() since it would return
|
||||
// LZMA_BUF_ERROR, which we must not do here.
|
||||
@@ -274,7 +279,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
// Decode the Index and compare it to the hash calculated
|
||||
// from the sizes of the Blocks (if any).
|
||||
ret = lzma_index_hash_decode(coder->index_hash,
|
||||
const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
|
||||
in, in_pos, in_size);
|
||||
if (ret != LZMA_STREAM_END)
|
||||
return ret;
|
||||
@@ -285,9 +290,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
// Fall through
|
||||
|
||||
case SEQ_STREAM_FOOTER: {
|
||||
lzma_stream_flags footer_flags;
|
||||
lzma_ret ret;
|
||||
|
||||
// Copy the Stream Footer to the internal buffer.
|
||||
lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
|
||||
LZMA_STREAM_HEADER_SIZE);
|
||||
@@ -301,7 +303,8 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
// Decode the Stream Footer. The decoder gives
|
||||
// LZMA_FORMAT_ERROR if the magic bytes don't match,
|
||||
// so convert that return code to LZMA_DATA_ERROR.
|
||||
ret = lzma_stream_footer_decode(
|
||||
lzma_stream_flags footer_flags;
|
||||
const lzma_ret ret = lzma_stream_footer_decode(
|
||||
&footer_flags, coder->buffer);
|
||||
if (ret != LZMA_OK)
|
||||
return ret == LZMA_FORMAT_ERROR
|
||||
@@ -374,8 +377,9 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static void
|
||||
stream_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
stream_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_stream_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->block_decoder, allocator);
|
||||
lzma_index_hash_end(coder->index_hash, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
@@ -384,16 +388,19 @@ stream_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_check
|
||||
stream_decoder_get_check(const lzma_coder *coder)
|
||||
stream_decoder_get_check(const void *coder_ptr)
|
||||
{
|
||||
const lzma_stream_coder *coder = coder_ptr;
|
||||
return coder->stream_flags.check;
|
||||
}
|
||||
|
||||
|
||||
static lzma_ret
|
||||
stream_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
stream_decoder_memconfig(void *coder_ptr, uint64_t *memusage,
|
||||
uint64_t *old_memlimit, uint64_t new_memlimit)
|
||||
{
|
||||
lzma_stream_coder *coder = coder_ptr;
|
||||
|
||||
*memusage = coder->memusage;
|
||||
*old_memlimit = coder->memlimit;
|
||||
|
||||
@@ -409,48 +416,49 @@ stream_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_stream_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_stream_decoder_init(
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
uint64_t memlimit, uint32_t flags)
|
||||
{
|
||||
lzma_next_coder_init(&lzma_stream_decoder_init, next, allocator);
|
||||
|
||||
if (memlimit == 0)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (flags & ~LZMA_SUPPORTED_FLAGS)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_stream_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_stream_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &stream_decode;
|
||||
next->end = &stream_decoder_end;
|
||||
next->get_check = &stream_decoder_get_check;
|
||||
next->memconfig = &stream_decoder_memconfig;
|
||||
|
||||
next->coder->block_decoder = LZMA_NEXT_CODER_INIT;
|
||||
next->coder->index_hash = NULL;
|
||||
coder->block_decoder = LZMA_NEXT_CODER_INIT;
|
||||
coder->index_hash = NULL;
|
||||
}
|
||||
|
||||
next->coder->memlimit = memlimit;
|
||||
next->coder->memusage = LZMA_MEMUSAGE_BASE;
|
||||
next->coder->tell_no_check = (flags & LZMA_TELL_NO_CHECK) != 0;
|
||||
next->coder->tell_unsupported_check
|
||||
coder->memlimit = my_max(1, memlimit);
|
||||
coder->memusage = LZMA_MEMUSAGE_BASE;
|
||||
coder->tell_no_check = (flags & LZMA_TELL_NO_CHECK) != 0;
|
||||
coder->tell_unsupported_check
|
||||
= (flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
|
||||
next->coder->tell_any_check = (flags & LZMA_TELL_ANY_CHECK) != 0;
|
||||
next->coder->concatenated = (flags & LZMA_CONCATENATED) != 0;
|
||||
next->coder->first_stream = true;
|
||||
coder->tell_any_check = (flags & LZMA_TELL_ANY_CHECK) != 0;
|
||||
coder->ignore_check = (flags & LZMA_IGNORE_CHECK) != 0;
|
||||
coder->concatenated = (flags & LZMA_CONCATENATED) != 0;
|
||||
coder->first_stream = true;
|
||||
|
||||
return stream_decoder_reset(next->coder, allocator);
|
||||
return stream_decoder_reset(coder, allocator);
|
||||
}
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
|
||||
{
|
||||
lzma_next_strm_init2(lzma_stream_decoder_init, strm, memlimit, flags);
|
||||
lzma_next_strm_init(lzma_stream_decoder_init, strm, memlimit, flags);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
|
||||
#include "common.h"
|
||||
|
||||
extern lzma_ret lzma_stream_decoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, uint64_t memlimit, uint32_t flags);
|
||||
extern lzma_ret lzma_stream_decoder_init(
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
uint64_t memlimit, uint32_t flags);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -10,12 +10,11 @@
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "stream_encoder.h"
|
||||
#include "block_encoder.h"
|
||||
#include "index_encoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum {
|
||||
SEQ_STREAM_HEADER,
|
||||
SEQ_BLOCK_INIT,
|
||||
@@ -26,7 +25,7 @@ struct lzma_coder_s {
|
||||
} sequence;
|
||||
|
||||
/// True if Block encoder has been initialized by
|
||||
/// lzma_stream_encoder_init() or stream_encoder_update()
|
||||
/// stream_encoder_init() or stream_encoder_update()
|
||||
/// and thus doesn't need to be initialized in stream_encode().
|
||||
bool block_encoder_is_initialized;
|
||||
|
||||
@@ -56,11 +55,11 @@ struct lzma_coder_s {
|
||||
/// Buffer to hold Stream Header, Block Header, and Stream Footer.
|
||||
/// Block Header has biggest maximum size.
|
||||
uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
|
||||
};
|
||||
} lzma_stream_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
block_encoder_init(lzma_coder *coder, lzma_allocator *allocator)
|
||||
block_encoder_init(lzma_stream_coder *coder, const lzma_allocator *allocator)
|
||||
{
|
||||
// Prepare the Block options. Even though Block encoder doesn't need
|
||||
// compressed_size, uncompressed_size, and header_size to be
|
||||
@@ -79,11 +78,13 @@ block_encoder_init(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
stream_encode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_stream_coder *coder = coder_ptr;
|
||||
|
||||
// Main loop
|
||||
while (*out_pos < out_size)
|
||||
switch (coder->sequence) {
|
||||
@@ -126,7 +127,7 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
}
|
||||
|
||||
// Initialize the Block encoder unless it was already
|
||||
// initialized by lzma_stream_encoder_init() or
|
||||
// initialized by stream_encoder_init() or
|
||||
// stream_encoder_update().
|
||||
if (!coder->block_encoder_is_initialized)
|
||||
return_if_error(block_encoder_init(coder, allocator));
|
||||
@@ -147,13 +148,12 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
}
|
||||
|
||||
case SEQ_BLOCK_ENCODE: {
|
||||
lzma_vli unpadded_size;
|
||||
|
||||
static const lzma_action convert[4] = {
|
||||
static const lzma_action convert[LZMA_ACTION_MAX + 1] = {
|
||||
LZMA_RUN,
|
||||
LZMA_SYNC_FLUSH,
|
||||
LZMA_FINISH,
|
||||
LZMA_FINISH,
|
||||
LZMA_FINISH,
|
||||
};
|
||||
|
||||
const lzma_ret ret = coder->block_encoder.code(
|
||||
@@ -164,7 +164,7 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
return ret;
|
||||
|
||||
// Add a new Index Record.
|
||||
unpadded_size = lzma_block_unpadded_size(
|
||||
const lzma_vli unpadded_size = lzma_block_unpadded_size(
|
||||
&coder->block_options);
|
||||
assert(unpadded_size != 0);
|
||||
return_if_error(lzma_index_append(coder->index, allocator,
|
||||
@@ -176,12 +176,6 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
}
|
||||
|
||||
case SEQ_INDEX_ENCODE: {
|
||||
const lzma_stream_flags stream_flags = {
|
||||
0,
|
||||
lzma_index_size(coder->index),
|
||||
coder->block_options.check,
|
||||
};
|
||||
|
||||
// Call the Index encoder. It doesn't take any input, so
|
||||
// those pointers can be NULL.
|
||||
const lzma_ret ret = coder->index_encoder.code(
|
||||
@@ -192,6 +186,11 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
return ret;
|
||||
|
||||
// Encode the Stream Footer into coder->buffer.
|
||||
const lzma_stream_flags stream_flags = {
|
||||
.version = 0,
|
||||
.backward_size = lzma_index_size(coder->index),
|
||||
.check = coder->block_options.check,
|
||||
};
|
||||
|
||||
if (lzma_stream_footer_encode(&stream_flags, coder->buffer)
|
||||
!= LZMA_OK)
|
||||
@@ -212,15 +211,15 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static void
|
||||
stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
stream_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
size_t i;
|
||||
lzma_stream_coder *coder = coder_ptr;
|
||||
|
||||
lzma_next_end(&coder->block_encoder, allocator);
|
||||
lzma_next_end(&coder->index_encoder, allocator);
|
||||
lzma_index_end(coder->index, allocator);
|
||||
|
||||
for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
|
||||
for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
|
||||
lzma_free(coder->filters[i].options, allocator);
|
||||
|
||||
lzma_free(coder, allocator);
|
||||
@@ -229,22 +228,20 @@ stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
stream_encoder_update(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters,
|
||||
const lzma_filter *reversed_filters)
|
||||
{
|
||||
size_t i;
|
||||
lzma_stream_coder *coder = coder_ptr;
|
||||
|
||||
if (coder->sequence <= SEQ_BLOCK_INIT) {
|
||||
lzma_ret ret;
|
||||
|
||||
// There is no incomplete Block waiting to be finished,
|
||||
// thus we can change the whole filter chain. Start by
|
||||
// trying to initialize the Block encoder with the new
|
||||
// chain. This way we detect if the chain is valid.
|
||||
coder->block_encoder_is_initialized = false;
|
||||
coder->block_options.filters = (lzma_filter *)(filters);
|
||||
ret = block_encoder_init(coder, allocator);
|
||||
const lzma_ret ret = block_encoder_init(coder, allocator);
|
||||
coder->block_options.filters = coder->filters;
|
||||
if (ret != LZMA_OK)
|
||||
return ret;
|
||||
@@ -264,62 +261,66 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
}
|
||||
|
||||
// Free the copy of the old chain and make a copy of the new chain.
|
||||
for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
|
||||
for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
|
||||
lzma_free(coder->filters[i].options, allocator);
|
||||
|
||||
return lzma_filters_copy(filters, coder->filters, allocator);
|
||||
}
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
static lzma_ret
|
||||
stream_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters, lzma_check check)
|
||||
{
|
||||
lzma_stream_flags stream_flags = { 0, 0, check };
|
||||
|
||||
lzma_next_coder_init(&lzma_stream_encoder_init, next, allocator);
|
||||
lzma_next_coder_init(&stream_encoder_init, next, allocator);
|
||||
|
||||
if (filters == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_stream_coder *coder = next->coder;
|
||||
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_stream_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &stream_encode;
|
||||
next->end = &stream_encoder_end;
|
||||
next->update = &stream_encoder_update;
|
||||
|
||||
next->coder->filters[0].id = LZMA_VLI_UNKNOWN;
|
||||
next->coder->block_encoder = LZMA_NEXT_CODER_INIT;
|
||||
next->coder->index_encoder = LZMA_NEXT_CODER_INIT;
|
||||
next->coder->index = NULL;
|
||||
coder->filters[0].id = LZMA_VLI_UNKNOWN;
|
||||
coder->block_encoder = LZMA_NEXT_CODER_INIT;
|
||||
coder->index_encoder = LZMA_NEXT_CODER_INIT;
|
||||
coder->index = NULL;
|
||||
}
|
||||
|
||||
// Basic initializations
|
||||
next->coder->sequence = SEQ_STREAM_HEADER;
|
||||
next->coder->block_options.version = 0;
|
||||
next->coder->block_options.check = check;
|
||||
coder->sequence = SEQ_STREAM_HEADER;
|
||||
coder->block_options.version = 0;
|
||||
coder->block_options.check = check;
|
||||
|
||||
// Initialize the Index
|
||||
lzma_index_end(next->coder->index, allocator);
|
||||
next->coder->index = lzma_index_init(allocator);
|
||||
if (next->coder->index == NULL)
|
||||
lzma_index_end(coder->index, allocator);
|
||||
coder->index = lzma_index_init(allocator);
|
||||
if (coder->index == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
// Encode the Stream Header
|
||||
lzma_stream_flags stream_flags = {
|
||||
.version = 0,
|
||||
.check = check,
|
||||
};
|
||||
return_if_error(lzma_stream_header_encode(
|
||||
&stream_flags, next->coder->buffer));
|
||||
&stream_flags, coder->buffer));
|
||||
|
||||
next->coder->buffer_pos = 0;
|
||||
next->coder->buffer_size = LZMA_STREAM_HEADER_SIZE;
|
||||
coder->buffer_pos = 0;
|
||||
coder->buffer_size = LZMA_STREAM_HEADER_SIZE;
|
||||
|
||||
// Initialize the Block encoder. This way we detect unsupported
|
||||
// filter chains when initializing the Stream encoder instead of
|
||||
// giving an error after Stream Header has already written out.
|
||||
return stream_encoder_update(
|
||||
next->coder, allocator, filters, NULL);
|
||||
return stream_encoder_update(coder, allocator, filters, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -327,11 +328,12 @@ extern LZMA_API(lzma_ret)
|
||||
lzma_stream_encoder(lzma_stream *strm,
|
||||
const lzma_filter *filters, lzma_check check)
|
||||
{
|
||||
lzma_next_strm_init2(lzma_stream_encoder_init, strm, filters, check);
|
||||
lzma_next_strm_init(stream_encoder_init, strm, filters, check);
|
||||
|
||||
strm->internal->supported_actions[LZMA_RUN] = true;
|
||||
strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;
|
||||
strm->internal->supported_actions[LZMA_FULL_FLUSH] = true;
|
||||
strm->internal->supported_actions[LZMA_FULL_BARRIER] = true;
|
||||
strm->internal->supported_actions[LZMA_FINISH] = true;
|
||||
|
||||
return LZMA_OK;
|
||||
|
||||
1143
Utilities/cmliblzma/liblzma/common/stream_encoder_mt.c
Normal file
1143
Utilities/cmliblzma/liblzma/common/stream_encoder_mt.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -30,15 +30,13 @@ stream_flags_decode(lzma_stream_flags *options, const uint8_t *in)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
|
||||
{
|
||||
uint32_t crc;
|
||||
|
||||
// Magic
|
||||
if (memcmp(in, lzma_header_magic, sizeof(lzma_header_magic)) != 0)
|
||||
return LZMA_FORMAT_ERROR;
|
||||
|
||||
// Verify the CRC32 so we can distinguish between corrupt
|
||||
// and unsupported files.
|
||||
crc = lzma_crc32(in + sizeof(lzma_header_magic),
|
||||
const uint32_t crc = lzma_crc32(in + sizeof(lzma_header_magic),
|
||||
LZMA_STREAM_FLAGS_SIZE, 0);
|
||||
if (crc != unaligned_read32le(in + sizeof(lzma_header_magic)
|
||||
+ LZMA_STREAM_FLAGS_SIZE))
|
||||
@@ -61,15 +59,13 @@ lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_footer_decode(lzma_stream_flags *options, const uint8_t *in)
|
||||
{
|
||||
uint32_t crc;
|
||||
|
||||
// Magic
|
||||
if (memcmp(in + sizeof(uint32_t) * 2 + LZMA_STREAM_FLAGS_SIZE,
|
||||
lzma_footer_magic, sizeof(lzma_footer_magic)) != 0)
|
||||
return LZMA_FORMAT_ERROR;
|
||||
|
||||
// CRC32
|
||||
crc = lzma_crc32(in + sizeof(uint32_t),
|
||||
const uint32_t crc = lzma_crc32(in + sizeof(uint32_t),
|
||||
sizeof(uint32_t) + LZMA_STREAM_FLAGS_SIZE, 0);
|
||||
if (crc != unaligned_read32le(in))
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
@@ -29,8 +29,6 @@ stream_flags_encode(const lzma_stream_flags *options, uint8_t *out)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
|
||||
{
|
||||
uint32_t crc;
|
||||
|
||||
assert(sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE
|
||||
+ 4 == LZMA_STREAM_HEADER_SIZE);
|
||||
|
||||
@@ -45,7 +43,7 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// CRC32 of the Stream Header
|
||||
crc = lzma_crc32(out + sizeof(lzma_header_magic),
|
||||
const uint32_t crc = lzma_crc32(out + sizeof(lzma_header_magic),
|
||||
LZMA_STREAM_FLAGS_SIZE, 0);
|
||||
|
||||
unaligned_write32le(out + sizeof(lzma_header_magic)
|
||||
@@ -58,8 +56,6 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
|
||||
{
|
||||
uint32_t crc;
|
||||
|
||||
assert(2 * 4 + LZMA_STREAM_FLAGS_SIZE + sizeof(lzma_footer_magic)
|
||||
== LZMA_STREAM_HEADER_SIZE);
|
||||
|
||||
@@ -77,7 +73,7 @@ lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
// CRC32
|
||||
crc = lzma_crc32(
|
||||
const uint32_t crc = lzma_crc32(
|
||||
out + 4, 4 + LZMA_STREAM_FLAGS_SIZE, 0);
|
||||
|
||||
unaligned_write32le(out, crc);
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_vli_decode(lzma_vli *LZMA_RESTRICT vli, size_t *vli_pos,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
lzma_vli_decode(lzma_vli *restrict vli, size_t *vli_pos,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size)
|
||||
{
|
||||
// If we haven't been given vli_pos, work in single-call mode.
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
extern LZMA_API(lzma_ret)
|
||||
lzma_vli_encode(lzma_vli vli, size_t *vli_pos,
|
||||
uint8_t *LZMA_RESTRICT out, size_t *LZMA_RESTRICT out_pos,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size)
|
||||
{
|
||||
// If we haven't been given vli_pos, work in single-call mode.
|
||||
|
||||
@@ -16,11 +16,10 @@
|
||||
extern LZMA_API(uint32_t)
|
||||
lzma_vli_size(lzma_vli vli)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
|
||||
if (vli > LZMA_VLI_MAX)
|
||||
return 0;
|
||||
|
||||
uint32_t i = 0;
|
||||
do {
|
||||
vli >>= 7;
|
||||
++i;
|
||||
|
||||
@@ -15,8 +15,9 @@
|
||||
|
||||
|
||||
static void
|
||||
delta_coder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
delta_coder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_delta_coder *coder = coder_ptr;
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -24,20 +25,21 @@ delta_coder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_delta_coder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
const lzma_options_delta *opt;
|
||||
|
||||
// Allocate memory for the decoder if needed.
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_delta_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_delta_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
|
||||
// End function is the same for encoder and decoder.
|
||||
next->end = &delta_coder_end;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
// Validate the options.
|
||||
@@ -45,16 +47,15 @@ lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// Set the delta distance.
|
||||
opt = filters[0].options;
|
||||
next->coder->distance = opt->dist;
|
||||
const lzma_options_delta *opt = filters[0].options;
|
||||
coder->distance = opt->dist;
|
||||
|
||||
// Initialize the rest of the variables.
|
||||
next->coder->pos = 0;
|
||||
memzero(next->coder->history, LZMA_DELTA_DIST_MAX);
|
||||
coder->pos = 0;
|
||||
memzero(coder->history, LZMA_DELTA_DIST_MAX);
|
||||
|
||||
// Initialize the next decoder in the chain, if any.
|
||||
return lzma_next_filter_init(&next->coder->next,
|
||||
allocator, filters + 1);
|
||||
return lzma_next_filter_init(&coder->next, allocator, filters + 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -68,5 +69,5 @@ lzma_delta_coder_memusage(const void *options)
|
||||
|| opt->dist > LZMA_DELTA_DIST_MAX)
|
||||
return UINT64_MAX;
|
||||
|
||||
return sizeof(lzma_coder);
|
||||
return sizeof(lzma_delta_coder);
|
||||
}
|
||||
|
||||
@@ -15,12 +15,11 @@
|
||||
|
||||
|
||||
static void
|
||||
decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size)
|
||||
decode_buffer(lzma_delta_coder *coder, uint8_t *buffer, size_t size)
|
||||
{
|
||||
size_t i;
|
||||
const size_t distance = coder->distance;
|
||||
|
||||
for (i = 0; i < size; ++i) {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
buffer[i] += coder->history[(distance + coder->pos) & 0xFF];
|
||||
coder->history[coder->pos-- & 0xFF] = buffer[i];
|
||||
}
|
||||
@@ -28,17 +27,18 @@ decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
delta_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
delta_decode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
const size_t out_start = *out_pos;
|
||||
lzma_ret ret;
|
||||
lzma_delta_coder *coder = coder_ptr;
|
||||
|
||||
assert(coder->next.code != NULL);
|
||||
|
||||
ret = coder->next.code(coder->next.coder, allocator,
|
||||
const size_t out_start = *out_pos;
|
||||
|
||||
const lzma_ret ret = coder->next.code(coder->next.coder, allocator,
|
||||
in, in_pos, in_size, out, out_pos, out_size,
|
||||
action);
|
||||
|
||||
@@ -49,7 +49,7 @@ delta_decode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_delta_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_delta_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
next->code = &delta_decode;
|
||||
@@ -58,15 +58,14 @@ lzma_delta_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_delta_props_decode(void **options, lzma_allocator *allocator,
|
||||
lzma_delta_props_decode(void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size)
|
||||
{
|
||||
lzma_options_delta *opt;
|
||||
|
||||
if (props_size != 1)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
opt = lzma_alloc(sizeof(lzma_options_delta), allocator);
|
||||
lzma_options_delta *opt
|
||||
= lzma_alloc(sizeof(lzma_options_delta), allocator);
|
||||
if (opt == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
|
||||
@@ -16,10 +16,11 @@
|
||||
#include "delta_common.h"
|
||||
|
||||
extern lzma_ret lzma_delta_decoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters);
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
extern lzma_ret lzma_delta_props_decode(
|
||||
void **options, lzma_allocator *allocator,
|
||||
void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -18,13 +18,12 @@
|
||||
/// is the first filter in the chain (and thus the last filter in the
|
||||
/// encoder's filter stack).
|
||||
static void
|
||||
copy_and_encode(lzma_coder *coder,
|
||||
const uint8_t *LZMA_RESTRICT in, uint8_t *LZMA_RESTRICT out, size_t size)
|
||||
copy_and_encode(lzma_delta_coder *coder,
|
||||
const uint8_t *restrict in, uint8_t *restrict out, size_t size)
|
||||
{
|
||||
size_t i;
|
||||
const size_t distance = coder->distance;
|
||||
|
||||
for (i = 0; i < size; ++i) {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
const uint8_t tmp = coder->history[
|
||||
(distance + coder->pos) & 0xFF];
|
||||
coder->history[coder->pos-- & 0xFF] = in[i];
|
||||
@@ -36,12 +35,11 @@ copy_and_encode(lzma_coder *coder,
|
||||
/// Encodes the data in place. This is used when we are the last filter
|
||||
/// in the chain (and thus non-last filter in the encoder's filter stack).
|
||||
static void
|
||||
encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size)
|
||||
encode_in_place(lzma_delta_coder *coder, uint8_t *buffer, size_t size)
|
||||
{
|
||||
size_t i;
|
||||
const size_t distance = coder->distance;
|
||||
|
||||
for (i = 0; i < size; ++i) {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
const uint8_t tmp = coder->history[
|
||||
(distance + coder->pos) & 0xFF];
|
||||
coder->history[coder->pos-- & 0xFF] = buffer[i];
|
||||
@@ -51,11 +49,13 @@ encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
delta_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size, lzma_action action)
|
||||
delta_encode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_delta_coder *coder = coder_ptr;
|
||||
|
||||
lzma_ret ret;
|
||||
|
||||
if (coder->next.code == NULL) {
|
||||
@@ -86,10 +86,12 @@ delta_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
delta_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
delta_encoder_update(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters_null lzma_attribute((__unused__)),
|
||||
const lzma_filter *reversed_filters)
|
||||
{
|
||||
lzma_delta_coder *coder = coder_ptr;
|
||||
|
||||
// Delta doesn't and will never support changing the options in
|
||||
// the middle of encoding. If the app tries to change them, we
|
||||
// simply ignore them.
|
||||
@@ -99,7 +101,7 @@ delta_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_delta_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_delta_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
next->code = &delta_encode;
|
||||
@@ -111,13 +113,12 @@ lzma_delta_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern lzma_ret
|
||||
lzma_delta_props_encode(const void *options, uint8_t *out)
|
||||
{
|
||||
const lzma_options_delta *opt = options;
|
||||
|
||||
// The caller must have already validated the options, so it's
|
||||
// LZMA_PROG_ERROR if they are invalid.
|
||||
if (lzma_delta_coder_memusage(options) == UINT64_MAX)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
const lzma_options_delta *opt = options;
|
||||
out[0] = opt->dist - LZMA_DELTA_DIST_MIN;
|
||||
|
||||
return LZMA_OK;
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
#include "delta_common.h"
|
||||
|
||||
extern lzma_ret lzma_delta_encoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters);
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
extern lzma_ret lzma_delta_props_encode(const void *options, uint8_t *out);
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
#include "delta_common.h"
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
/// Next coder in the chain
|
||||
lzma_next_coder next;
|
||||
|
||||
@@ -27,11 +27,11 @@ struct lzma_coder_s {
|
||||
|
||||
/// Buffer to hold history of the original data
|
||||
uint8_t history[LZMA_DELTA_DIST_MAX];
|
||||
};
|
||||
} lzma_delta_coder;
|
||||
|
||||
|
||||
extern lzma_ret lzma_delta_coder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -16,4 +16,4 @@ URL: @PACKAGE_URL@
|
||||
Version: @PACKAGE_VERSION@
|
||||
Cflags: -I${includedir}
|
||||
Libs: -L${libdir} -llzma
|
||||
Libs.private: @PTHREAD_CFLAGS@ @PTHREAD_LIBS@
|
||||
Libs.private: @PTHREAD_CFLAGS@ @LIBS@
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#include "lz_decoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
/// Dictionary (history buffer)
|
||||
lzma_dict dict;
|
||||
|
||||
@@ -48,7 +48,7 @@ struct lzma_coder_s {
|
||||
size_t size;
|
||||
uint8_t buffer[LZMA_BUFFER_SIZE];
|
||||
} temp;
|
||||
};
|
||||
} lzma_coder;
|
||||
|
||||
|
||||
static void
|
||||
@@ -64,22 +64,18 @@ lz_decoder_reset(lzma_coder *coder)
|
||||
|
||||
static lzma_ret
|
||||
decode_buffer(lzma_coder *coder,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size)
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size)
|
||||
{
|
||||
while (true) {
|
||||
size_t copy_size;
|
||||
size_t dict_start;
|
||||
lzma_ret ret;
|
||||
|
||||
// Wrap the dictionary if needed.
|
||||
if (coder->dict.pos == coder->dict.size)
|
||||
coder->dict.pos = 0;
|
||||
|
||||
// Store the current dictionary position. It is needed to know
|
||||
// where to start copying to the out[] buffer.
|
||||
dict_start = coder->dict.pos;
|
||||
const size_t dict_start = coder->dict.pos;
|
||||
|
||||
// Calculate how much we allow coder->lz.code() to decode.
|
||||
// It must not decode past the end of the dictionary
|
||||
@@ -90,13 +86,13 @@ decode_buffer(lzma_coder *coder,
|
||||
coder->dict.size - coder->dict.pos);
|
||||
|
||||
// Call the coder->lz.code() to do the actual decoding.
|
||||
ret = coder->lz.code(
|
||||
const lzma_ret ret = coder->lz.code(
|
||||
coder->lz.coder, &coder->dict,
|
||||
in, in_pos, in_size);
|
||||
|
||||
// Copy the decoded data from the dictionary to the out[]
|
||||
// buffer.
|
||||
copy_size = coder->dict.pos - dict_start;
|
||||
const size_t copy_size = coder->dict.pos - dict_start;
|
||||
assert(copy_size <= out_size - *out_pos);
|
||||
memcpy(out + *out_pos, coder->dict.buf + dict_start,
|
||||
copy_size);
|
||||
@@ -129,13 +125,15 @@ decode_buffer(lzma_coder *coder,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lz_decode(lzma_coder *coder,
|
||||
lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
size_t in_size, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size,
|
||||
lz_decode(void *coder_ptr,
|
||||
const lzma_allocator *allocator lzma_attribute((__unused__)),
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size,
|
||||
lzma_action action)
|
||||
{
|
||||
lzma_coder *coder = coder_ptr;
|
||||
|
||||
if (coder->next.code == NULL)
|
||||
return decode_buffer(coder, in, in_pos, in_size,
|
||||
out, out_pos, out_size);
|
||||
@@ -143,15 +141,13 @@ lz_decode(lzma_coder *coder,
|
||||
// We aren't the last coder in the chain, we need to decode
|
||||
// our input to a temporary buffer.
|
||||
while (*out_pos < out_size) {
|
||||
lzma_ret ret;
|
||||
|
||||
// Fill the temporary buffer if it is empty.
|
||||
if (!coder->next_finished
|
||||
&& coder->temp.pos == coder->temp.size) {
|
||||
coder->temp.pos = 0;
|
||||
coder->temp.size = 0;
|
||||
|
||||
ret = coder->next.code(
|
||||
const lzma_ret ret = coder->next.code(
|
||||
coder->next.coder,
|
||||
allocator, in, in_pos, in_size,
|
||||
coder->temp.buffer, &coder->temp.size,
|
||||
@@ -173,7 +169,7 @@ lz_decode(lzma_coder *coder,
|
||||
return LZMA_OK;
|
||||
}
|
||||
|
||||
ret = decode_buffer(coder, coder->temp.buffer,
|
||||
const lzma_ret ret = decode_buffer(coder, coder->temp.buffer,
|
||||
&coder->temp.pos, coder->temp.size,
|
||||
out, out_pos, out_size);
|
||||
|
||||
@@ -190,8 +186,10 @@ lz_decode(lzma_coder *coder,
|
||||
|
||||
|
||||
static void
|
||||
lz_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
lz_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_coder *coder = coder_ptr;
|
||||
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
lzma_free(coder->dict.buf, allocator);
|
||||
|
||||
@@ -206,32 +204,33 @@ lz_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_lz_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters,
|
||||
lzma_ret (*lz_init)(lzma_lz_decoder *lz,
|
||||
lzma_allocator *allocator, const void *options,
|
||||
const lzma_allocator *allocator, const void *options,
|
||||
lzma_lz_options *lz_options))
|
||||
{
|
||||
lzma_lz_options lz_options;
|
||||
|
||||
// Allocate the base structure if it isn't already allocated.
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &lz_decode;
|
||||
next->end = &lz_decoder_end;
|
||||
|
||||
next->coder->dict.buf = NULL;
|
||||
next->coder->dict.size = 0;
|
||||
next->coder->lz = LZMA_LZ_DECODER_INIT;
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->dict.buf = NULL;
|
||||
coder->dict.size = 0;
|
||||
coder->lz = LZMA_LZ_DECODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
// Allocate and initialize the LZ-based decoder. It will also give
|
||||
// us the dictionary size.
|
||||
return_if_error(lz_init(&next->coder->lz, allocator,
|
||||
lzma_lz_options lz_options;
|
||||
return_if_error(lz_init(&coder->lz, allocator,
|
||||
filters[0].options, &lz_options));
|
||||
|
||||
// If the dictionary size is very small, increase it to 4096 bytes.
|
||||
@@ -255,14 +254,14 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lz_options.dict_size = (lz_options.dict_size + 15) & ~((size_t)(15));
|
||||
|
||||
// Allocate and initialize the dictionary.
|
||||
if (next->coder->dict.size != lz_options.dict_size) {
|
||||
lzma_free(next->coder->dict.buf, allocator);
|
||||
next->coder->dict.buf
|
||||
if (coder->dict.size != lz_options.dict_size) {
|
||||
lzma_free(coder->dict.buf, allocator);
|
||||
coder->dict.buf
|
||||
= lzma_alloc(lz_options.dict_size, allocator);
|
||||
if (next->coder->dict.buf == NULL)
|
||||
if (coder->dict.buf == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder->dict.size = lz_options.dict_size;
|
||||
coder->dict.size = lz_options.dict_size;
|
||||
}
|
||||
|
||||
lz_decoder_reset(next->coder);
|
||||
@@ -275,21 +274,20 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
const size_t copy_size = my_min(lz_options.preset_dict_size,
|
||||
lz_options.dict_size);
|
||||
const size_t offset = lz_options.preset_dict_size - copy_size;
|
||||
memcpy(next->coder->dict.buf, lz_options.preset_dict + offset,
|
||||
memcpy(coder->dict.buf, lz_options.preset_dict + offset,
|
||||
copy_size);
|
||||
next->coder->dict.pos = copy_size;
|
||||
next->coder->dict.full = copy_size;
|
||||
coder->dict.pos = copy_size;
|
||||
coder->dict.full = copy_size;
|
||||
}
|
||||
|
||||
// Miscellaneous initializations
|
||||
next->coder->next_finished = false;
|
||||
next->coder->this_finished = false;
|
||||
next->coder->temp.pos = 0;
|
||||
next->coder->temp.size = 0;
|
||||
coder->next_finished = false;
|
||||
coder->this_finished = false;
|
||||
coder->temp.pos = 0;
|
||||
coder->temp.size = 0;
|
||||
|
||||
// Initialize the next filter in the chain, if any.
|
||||
return lzma_next_filter_init(&next->coder->next, allocator,
|
||||
filters + 1);
|
||||
return lzma_next_filter_init(&coder->next, allocator, filters + 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -301,7 +299,8 @@ lzma_lz_decoder_memusage(size_t dictionary_size)
|
||||
|
||||
|
||||
extern void
|
||||
lzma_lz_decoder_uncompressed(lzma_coder *coder, lzma_vli uncompressed_size)
|
||||
lzma_lz_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
|
||||
{
|
||||
lzma_coder *coder = coder_ptr;
|
||||
coder->lz.set_uncompressed(coder->lz.coder, uncompressed_size);
|
||||
}
|
||||
|
||||
@@ -53,45 +53,45 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
/// Data specific to the LZ-based decoder
|
||||
lzma_coder *coder;
|
||||
void *coder;
|
||||
|
||||
/// Function to decode from in[] to *dict
|
||||
lzma_ret (*code)(lzma_coder *LZMA_RESTRICT coder,
|
||||
lzma_dict *LZMA_RESTRICT dict, const uint8_t *LZMA_RESTRICT in,
|
||||
size_t *LZMA_RESTRICT in_pos, size_t in_size);
|
||||
lzma_ret (*code)(void *coder,
|
||||
lzma_dict *restrict dict, const uint8_t *restrict in,
|
||||
size_t *restrict in_pos, size_t in_size);
|
||||
|
||||
void (*reset)(lzma_coder *coder, const void *options);
|
||||
void (*reset)(void *coder, const void *options);
|
||||
|
||||
/// Set the uncompressed size
|
||||
void (*set_uncompressed)(lzma_coder *coder,
|
||||
lzma_vli uncompressed_size);
|
||||
void (*set_uncompressed)(void *coder, lzma_vli uncompressed_size);
|
||||
|
||||
/// Free allocated resources
|
||||
void (*end)(lzma_coder *coder, lzma_allocator *allocator);
|
||||
void (*end)(void *coder, const lzma_allocator *allocator);
|
||||
|
||||
} lzma_lz_decoder;
|
||||
|
||||
|
||||
static const lzma_lz_decoder LZMA_LZ_DECODER_INIT =
|
||||
{
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
};
|
||||
#define LZMA_LZ_DECODER_INIT \
|
||||
(lzma_lz_decoder){ \
|
||||
.coder = NULL, \
|
||||
.code = NULL, \
|
||||
.reset = NULL, \
|
||||
.set_uncompressed = NULL, \
|
||||
.end = NULL, \
|
||||
}
|
||||
|
||||
|
||||
extern lzma_ret lzma_lz_decoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters,
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters,
|
||||
lzma_ret (*lz_init)(lzma_lz_decoder *lz,
|
||||
lzma_allocator *allocator, const void *options,
|
||||
const lzma_allocator *allocator, const void *options,
|
||||
lzma_lz_options *lz_options));
|
||||
|
||||
extern uint64_t lzma_lz_decoder_memusage(size_t dictionary_size);
|
||||
|
||||
extern void lzma_lz_decoder_uncompressed(
|
||||
lzma_coder *coder, lzma_vli uncompressed_size);
|
||||
void *coder, lzma_vli uncompressed_size);
|
||||
|
||||
|
||||
//////////////////////
|
||||
@@ -151,15 +151,13 @@ dict_repeat(lzma_dict *dict, uint32_t distance, uint32_t *len)
|
||||
dict->pos += left;
|
||||
|
||||
} else {
|
||||
uint32_t copy_pos;
|
||||
uint32_t copy_size;
|
||||
|
||||
// The bigger the dictionary, the more rare this
|
||||
// case occurs. We need to "wrap" the dict, thus
|
||||
// we might need two memcpy() to copy all the data.
|
||||
assert(dict->full == dict->size);
|
||||
copy_pos = dict->pos - distance - 1 + dict->size;
|
||||
copy_size = dict->size - copy_pos;
|
||||
const uint32_t copy_pos
|
||||
= dict->pos - distance - 1 + dict->size;
|
||||
uint32_t copy_size = dict->size - copy_pos;
|
||||
|
||||
if (copy_size < left) {
|
||||
memmove(dict->buf + dict->pos, dict->buf + copy_pos,
|
||||
@@ -202,9 +200,9 @@ dict_put(lzma_dict *dict, uint8_t byte)
|
||||
|
||||
/// Copies arbitrary amount of data into the dictionary.
|
||||
static inline void
|
||||
dict_write(lzma_dict *LZMA_RESTRICT dict, const uint8_t *LZMA_RESTRICT in,
|
||||
size_t *LZMA_RESTRICT in_pos, size_t in_size,
|
||||
size_t *LZMA_RESTRICT left)
|
||||
dict_write(lzma_dict *restrict dict, const uint8_t *restrict in,
|
||||
size_t *restrict in_pos, size_t in_size,
|
||||
size_t *restrict left)
|
||||
{
|
||||
// NOTE: If we are being given more data than the size of the
|
||||
// dictionary, it could be possible to optimize the LZ decoder
|
||||
|
||||
@@ -20,8 +20,10 @@
|
||||
# include "lz_encoder_hash_table.h"
|
||||
#endif
|
||||
|
||||
#include "memcmplen.h"
|
||||
|
||||
struct lzma_coder_s {
|
||||
|
||||
typedef struct {
|
||||
/// LZ-based encoder e.g. LZMA
|
||||
lzma_lz_encoder lz;
|
||||
|
||||
@@ -30,7 +32,7 @@ struct lzma_coder_s {
|
||||
|
||||
/// Next coder in the chain
|
||||
lzma_next_coder next;
|
||||
};
|
||||
} lzma_coder;
|
||||
|
||||
|
||||
/// \brief Moves the data in the input window to free space for new data
|
||||
@@ -43,18 +45,16 @@ struct lzma_coder_s {
|
||||
static void
|
||||
move_window(lzma_mf *mf)
|
||||
{
|
||||
uint32_t move_offset;
|
||||
size_t move_size;
|
||||
|
||||
// Align the move to a multiple of 16 bytes. Some LZ-based encoders
|
||||
// like LZMA use the lowest bits of mf->read_pos to know the
|
||||
// alignment of the uncompressed data. We also get better speed
|
||||
// for memmove() with aligned buffers.
|
||||
assert(mf->read_pos > mf->keep_size_before);
|
||||
move_offset = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
|
||||
const uint32_t move_offset
|
||||
= (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
|
||||
|
||||
assert(mf->write_pos > move_offset);
|
||||
move_size = mf->write_pos - move_offset;
|
||||
const size_t move_size = mf->write_pos - move_offset;
|
||||
|
||||
assert(move_offset + move_size <= mf->size);
|
||||
|
||||
@@ -78,12 +78,10 @@ move_window(lzma_mf *mf)
|
||||
/// This function must not be called once it has returned LZMA_STREAM_END.
|
||||
///
|
||||
static lzma_ret
|
||||
fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
|
||||
size_t *in_pos, size_t in_size, lzma_action action)
|
||||
fill_window(lzma_coder *coder, const lzma_allocator *allocator,
|
||||
const uint8_t *in, size_t *in_pos, size_t in_size,
|
||||
lzma_action action)
|
||||
{
|
||||
size_t write_pos;
|
||||
lzma_ret ret;
|
||||
|
||||
assert(coder->mf.read_pos <= coder->mf.write_pos);
|
||||
|
||||
// Move the sliding window if needed.
|
||||
@@ -93,7 +91,8 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
|
||||
// Maybe this is ugly, but lzma_mf uses uint32_t for most things
|
||||
// (which I find cleanest), but we need size_t here when filling
|
||||
// the history window.
|
||||
write_pos = coder->mf.write_pos;
|
||||
size_t write_pos = coder->mf.write_pos;
|
||||
lzma_ret ret;
|
||||
if (coder->next.code == NULL) {
|
||||
// Not using a filter, simply memcpy() as much as possible.
|
||||
lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
|
||||
@@ -111,6 +110,12 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
|
||||
|
||||
coder->mf.write_pos = write_pos;
|
||||
|
||||
// Silence Valgrind. lzma_memcmplen() can read extra bytes
|
||||
// and Valgrind will give warnings if those bytes are uninitialized
|
||||
// because Valgrind cannot see that the values of the uninitialized
|
||||
// bytes are eventually ignored.
|
||||
memzero(coder->mf.buffer + write_pos, LZMA_MEMCMPLEN_EXTRA);
|
||||
|
||||
// If end of stream has been reached or flushing completed, we allow
|
||||
// the encoder to process all the input (that is, read_pos is allowed
|
||||
// to reach write_pos). Otherwise we keep keep_size_after bytes
|
||||
@@ -134,7 +139,7 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
|
||||
&& coder->mf.read_pos < coder->mf.read_limit) {
|
||||
// Match finder may update coder->pending and expects it to
|
||||
// start from zero, so use a temporary variable.
|
||||
const size_t pending = coder->mf.pending;
|
||||
const uint32_t pending = coder->mf.pending;
|
||||
coder->mf.pending = 0;
|
||||
|
||||
// Rewind read_pos so that the match finder can hash
|
||||
@@ -152,16 +157,16 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lz_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
lz_encode(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size,
|
||||
uint8_t *LZMA_RESTRICT out, size_t *LZMA_RESTRICT out_pos,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size, lzma_action action)
|
||||
{
|
||||
lzma_coder *coder = coder_ptr;
|
||||
|
||||
while (*out_pos < out_size
|
||||
&& (*in_pos < in_size || action != LZMA_RUN)) {
|
||||
lzma_ret ret;
|
||||
|
||||
// Read more data to coder->mf.buffer if needed.
|
||||
if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
|
||||
>= coder->mf.read_limit)
|
||||
@@ -169,7 +174,7 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
in, in_pos, in_size, action));
|
||||
|
||||
// Encode
|
||||
ret = coder->lz.code(coder->lz.coder,
|
||||
const lzma_ret ret = coder->lz.code(coder->lz.coder,
|
||||
&coder->mf, out, out_pos, out_size);
|
||||
if (ret != LZMA_OK) {
|
||||
// Setting this to LZMA_RUN for cases when we are
|
||||
@@ -185,17 +190,9 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static bool
|
||||
lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
|
||||
lz_encoder_prepare(lzma_mf *mf, const lzma_allocator *allocator,
|
||||
const lzma_lz_options *lz_options)
|
||||
{
|
||||
bool is_bt;
|
||||
uint32_t new_count;
|
||||
uint32_t reserve;
|
||||
uint32_t old_size;
|
||||
uint32_t hash_bytes;
|
||||
uint32_t hs;
|
||||
uint32_t old_count;
|
||||
|
||||
// For now, the dictionary size is limited to 1.5 GiB. This may grow
|
||||
// in the future if needed, but it needs a little more work than just
|
||||
// changing this check.
|
||||
@@ -221,14 +218,14 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
|
||||
// to size_t.
|
||||
// - Memory usage calculation needs something too, e.g. use uint64_t
|
||||
// for mf->size.
|
||||
reserve = lz_options->dict_size / 2;
|
||||
uint32_t reserve = lz_options->dict_size / 2;
|
||||
if (reserve > (UINT32_C(1) << 30))
|
||||
reserve /= 2;
|
||||
|
||||
reserve += (lz_options->before_size + lz_options->match_len_max
|
||||
+ lz_options->after_size) / 2 + (UINT32_C(1) << 19);
|
||||
|
||||
old_size = mf->size;
|
||||
const uint32_t old_size = mf->size;
|
||||
mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
|
||||
|
||||
// Deallocate the old history buffer if it exists but has different
|
||||
@@ -298,11 +295,12 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
|
||||
|
||||
// Calculate the sizes of mf->hash and mf->son and check that
|
||||
// nice_len is big enough for the selected match finder.
|
||||
hash_bytes = lz_options->match_finder & 0x0F;
|
||||
const uint32_t hash_bytes = lz_options->match_finder & 0x0F;
|
||||
if (hash_bytes > mf->nice_len)
|
||||
return true;
|
||||
|
||||
is_bt = (lz_options->match_finder & 0x10) != 0;
|
||||
const bool is_bt = (lz_options->match_finder & 0x10) != 0;
|
||||
uint32_t hs;
|
||||
|
||||
if (hash_bytes == 2) {
|
||||
hs = 0xFFFF;
|
||||
@@ -338,25 +336,22 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
|
||||
hs += HASH_4_SIZE;
|
||||
*/
|
||||
|
||||
// If the above code calculating hs is modified, make sure that
|
||||
// this assertion stays valid (UINT32_MAX / 5 is not strictly the
|
||||
// exact limit). If it doesn't, you need to calculate that
|
||||
// hash_size_sum + sons_count cannot overflow.
|
||||
assert(hs < UINT32_MAX / 5);
|
||||
|
||||
old_count = mf->hash_size_sum + mf->sons_count;
|
||||
mf->hash_size_sum = hs;
|
||||
const uint32_t old_hash_count = mf->hash_count;
|
||||
const uint32_t old_sons_count = mf->sons_count;
|
||||
mf->hash_count = hs;
|
||||
mf->sons_count = mf->cyclic_size;
|
||||
if (is_bt)
|
||||
mf->sons_count *= 2;
|
||||
|
||||
new_count = mf->hash_size_sum + mf->sons_count;
|
||||
|
||||
// Deallocate the old hash array if it exists and has different size
|
||||
// than what is needed now.
|
||||
if (old_count != new_count) {
|
||||
if (old_hash_count != mf->hash_count
|
||||
|| old_sons_count != mf->sons_count) {
|
||||
lzma_free(mf->hash, allocator);
|
||||
mf->hash = NULL;
|
||||
|
||||
lzma_free(mf->son, allocator);
|
||||
mf->son = NULL;
|
||||
}
|
||||
|
||||
// Maximum number of match finder cycles
|
||||
@@ -373,16 +368,23 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static bool
|
||||
lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
|
||||
lz_encoder_init(lzma_mf *mf, const lzma_allocator *allocator,
|
||||
const lzma_lz_options *lz_options)
|
||||
{
|
||||
size_t alloc_count;
|
||||
|
||||
// Allocate the history buffer.
|
||||
if (mf->buffer == NULL) {
|
||||
mf->buffer = lzma_alloc(mf->size, allocator);
|
||||
// lzma_memcmplen() is used for the dictionary buffer
|
||||
// so we need to allocate a few extra bytes to prevent
|
||||
// it from reading past the end of the buffer.
|
||||
mf->buffer = lzma_alloc(mf->size + LZMA_MEMCMPLEN_EXTRA,
|
||||
allocator);
|
||||
if (mf->buffer == NULL)
|
||||
return true;
|
||||
|
||||
// Keep Valgrind happy with lzma_memcmplen() and initialize
|
||||
// the extra bytes whose value may get read but which will
|
||||
// effectively get ignored.
|
||||
memzero(mf->buffer + mf->size, LZMA_MEMCMPLEN_EXTRA);
|
||||
}
|
||||
|
||||
// Use cyclic_size as initial mf->offset. This allows
|
||||
@@ -396,44 +398,49 @@ lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
|
||||
mf->write_pos = 0;
|
||||
mf->pending = 0;
|
||||
|
||||
// Allocate match finder's hash array.
|
||||
alloc_count = mf->hash_size_sum + mf->sons_count;
|
||||
|
||||
#if UINT32_MAX >= SIZE_MAX / 4
|
||||
// Check for integer overflow. (Huge dictionaries are not
|
||||
// possible on 32-bit CPU.)
|
||||
if (alloc_count > SIZE_MAX / sizeof(uint32_t))
|
||||
if (mf->hash_count > SIZE_MAX / sizeof(uint32_t)
|
||||
|| mf->sons_count > SIZE_MAX / sizeof(uint32_t))
|
||||
return true;
|
||||
#endif
|
||||
|
||||
// Allocate and initialize the hash table. Since EMPTY_HASH_VALUE
|
||||
// is zero, we can use lzma_alloc_zero() or memzero() for mf->hash.
|
||||
//
|
||||
// We don't need to initialize mf->son, but not doing that may
|
||||
// make Valgrind complain in normalization (see normalize() in
|
||||
// lz_encoder_mf.c). Skipping the initialization is *very* good
|
||||
// when big dictionary is used but only small amount of data gets
|
||||
// actually compressed: most of the mf->son won't get actually
|
||||
// allocated by the kernel, so we avoid wasting RAM and improve
|
||||
// initialization speed a lot.
|
||||
if (mf->hash == NULL) {
|
||||
mf->hash = lzma_alloc(alloc_count * sizeof(uint32_t),
|
||||
mf->hash = lzma_alloc_zero(mf->hash_count * sizeof(uint32_t),
|
||||
allocator);
|
||||
if (mf->hash == NULL)
|
||||
mf->son = lzma_alloc(mf->sons_count * sizeof(uint32_t),
|
||||
allocator);
|
||||
|
||||
if (mf->hash == NULL || mf->son == NULL) {
|
||||
lzma_free(mf->hash, allocator);
|
||||
mf->hash = NULL;
|
||||
|
||||
lzma_free(mf->son, allocator);
|
||||
mf->son = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
for (uint32_t i = 0; i < mf->hash_count; ++i)
|
||||
mf->hash[i] = EMPTY_HASH_VALUE;
|
||||
*/
|
||||
memzero(mf->hash, mf->hash_count * sizeof(uint32_t));
|
||||
}
|
||||
|
||||
mf->son = mf->hash + mf->hash_size_sum;
|
||||
mf->cyclic_pos = 0;
|
||||
|
||||
// Initialize the hash table. Since EMPTY_HASH_VALUE is zero, we
|
||||
// can use memset().
|
||||
/*
|
||||
for (uint32_t i = 0; i < hash_size_sum; ++i)
|
||||
mf->hash[i] = EMPTY_HASH_VALUE;
|
||||
*/
|
||||
memzero(mf->hash, (size_t)(mf->hash_size_sum) * sizeof(uint32_t));
|
||||
|
||||
// We don't need to initialize mf->son, but not doing that will
|
||||
// make Valgrind complain in normalization (see normalize() in
|
||||
// lz_encoder_mf.c).
|
||||
//
|
||||
// Skipping this initialization is *very* good when big dictionary is
|
||||
// used but only small amount of data gets actually compressed: most
|
||||
// of the mf->hash won't get actually allocated by the kernel, so
|
||||
// we avoid wasting RAM and improve initialization speed a lot.
|
||||
//memzero(mf->son, (size_t)(mf->sons_count) * sizeof(uint32_t));
|
||||
|
||||
// Handle preset dictionary.
|
||||
if (lz_options->preset_dict != NULL
|
||||
&& lz_options->preset_dict_size > 0) {
|
||||
@@ -457,24 +464,32 @@ extern uint64_t
|
||||
lzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
|
||||
{
|
||||
// Old buffers must not exist when calling lz_encoder_prepare().
|
||||
lzma_mf mf = { NULL };
|
||||
lzma_mf mf = {
|
||||
.buffer = NULL,
|
||||
.hash = NULL,
|
||||
.son = NULL,
|
||||
.hash_count = 0,
|
||||
.sons_count = 0,
|
||||
};
|
||||
|
||||
// Setup the size information into mf.
|
||||
if (lz_encoder_prepare(&mf, NULL, lz_options))
|
||||
return UINT64_MAX;
|
||||
|
||||
// Calculate the memory usage.
|
||||
return (uint64_t)(mf.hash_size_sum + mf.sons_count)
|
||||
* sizeof(uint32_t)
|
||||
+ (uint64_t)(mf.size) + sizeof(lzma_coder);
|
||||
return ((uint64_t)(mf.hash_count) + mf.sons_count) * sizeof(uint32_t)
|
||||
+ mf.size + sizeof(lzma_coder);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
lz_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
lz_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_coder *coder = coder_ptr;
|
||||
|
||||
lzma_next_end(&coder->next, allocator);
|
||||
|
||||
lzma_free(coder->mf.son, allocator);
|
||||
lzma_free(coder->mf.hash, allocator);
|
||||
lzma_free(coder->mf.buffer, allocator);
|
||||
|
||||
@@ -489,10 +504,12 @@ lz_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lz_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
lz_encoder_update(void *coder_ptr, const lzma_allocator *allocator,
|
||||
const lzma_filter *filters_null lzma_attribute((__unused__)),
|
||||
const lzma_filter *reversed_filters)
|
||||
{
|
||||
lzma_coder *coder = coder_ptr;
|
||||
|
||||
if (coder->lz.options_update == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
@@ -505,58 +522,63 @@ lz_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_lz_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters,
|
||||
lzma_ret (*lz_init)(lzma_lz_encoder *lz,
|
||||
lzma_allocator *allocator, const void *options,
|
||||
const lzma_allocator *allocator, const void *options,
|
||||
lzma_lz_options *lz_options))
|
||||
{
|
||||
lzma_lz_options lz_options;
|
||||
|
||||
#ifdef HAVE_SMALL
|
||||
// We need that the CRC32 table has been initialized.
|
||||
lzma_crc32_init();
|
||||
#endif
|
||||
|
||||
// Allocate and initialize the base data structure.
|
||||
if (next->coder == NULL) {
|
||||
next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (next->coder == NULL)
|
||||
lzma_coder *coder = next->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
next->coder = coder;
|
||||
next->code = &lz_encode;
|
||||
next->end = &lz_encoder_end;
|
||||
next->update = &lz_encoder_update;
|
||||
|
||||
next->coder->lz.coder = NULL;
|
||||
next->coder->lz.code = NULL;
|
||||
next->coder->lz.end = NULL;
|
||||
coder->lz.coder = NULL;
|
||||
coder->lz.code = NULL;
|
||||
coder->lz.end = NULL;
|
||||
|
||||
next->coder->mf.buffer = NULL;
|
||||
next->coder->mf.hash = NULL;
|
||||
next->coder->mf.hash_size_sum = 0;
|
||||
next->coder->mf.sons_count = 0;
|
||||
// mf.size is initialized to silence Valgrind
|
||||
// when used on optimized binaries (GCC may reorder
|
||||
// code in a way that Valgrind gets unhappy).
|
||||
coder->mf.buffer = NULL;
|
||||
coder->mf.size = 0;
|
||||
coder->mf.hash = NULL;
|
||||
coder->mf.son = NULL;
|
||||
coder->mf.hash_count = 0;
|
||||
coder->mf.sons_count = 0;
|
||||
|
||||
next->coder->next = LZMA_NEXT_CODER_INIT;
|
||||
coder->next = LZMA_NEXT_CODER_INIT;
|
||||
}
|
||||
|
||||
// Initialize the LZ-based encoder.
|
||||
return_if_error(lz_init(&next->coder->lz, allocator,
|
||||
lzma_lz_options lz_options;
|
||||
return_if_error(lz_init(&coder->lz, allocator,
|
||||
filters[0].options, &lz_options));
|
||||
|
||||
// Setup the size information into next->coder->mf and deallocate
|
||||
// Setup the size information into coder->mf and deallocate
|
||||
// old buffers if they have wrong size.
|
||||
if (lz_encoder_prepare(&next->coder->mf, allocator, &lz_options))
|
||||
if (lz_encoder_prepare(&coder->mf, allocator, &lz_options))
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
// Allocate new buffers if needed, and do the rest of
|
||||
// the initialization.
|
||||
if (lz_encoder_init(&next->coder->mf, allocator, &lz_options))
|
||||
if (lz_encoder_init(&coder->mf, allocator, &lz_options))
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
// Initialize the next filter in the chain, if any.
|
||||
return lzma_next_filter_init(&next->coder->next, allocator,
|
||||
filters + 1);
|
||||
return lzma_next_filter_init(&coder->next, allocator, filters + 1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -119,7 +119,7 @@ struct lzma_mf_s {
|
||||
lzma_action action;
|
||||
|
||||
/// Number of elements in hash[]
|
||||
uint32_t hash_size_sum;
|
||||
uint32_t hash_count;
|
||||
|
||||
/// Number of elements in son[]
|
||||
uint32_t sons_count;
|
||||
@@ -191,19 +191,18 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
/// Data specific to the LZ-based encoder
|
||||
lzma_coder *coder;
|
||||
void *coder;
|
||||
|
||||
/// Function to encode from *dict to out[]
|
||||
lzma_ret (*code)(lzma_coder *LZMA_RESTRICT coder,
|
||||
lzma_mf *LZMA_RESTRICT mf, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size);
|
||||
lzma_ret (*code)(void *coder,
|
||||
lzma_mf *restrict mf, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size);
|
||||
|
||||
/// Free allocated resources
|
||||
void (*end)(lzma_coder *coder, lzma_allocator *allocator);
|
||||
void (*end)(void *coder, const lzma_allocator *allocator);
|
||||
|
||||
/// Update the options in the middle of the encoding.
|
||||
lzma_ret (*options_update)(lzma_coder *coder,
|
||||
const lzma_filter *filter);
|
||||
lzma_ret (*options_update)(void *coder, const lzma_filter *filter);
|
||||
|
||||
} lzma_lz_encoder;
|
||||
|
||||
@@ -218,7 +217,7 @@ typedef struct {
|
||||
|
||||
|
||||
/// Get pointer to the first byte not ran through the match finder
|
||||
static inline uint8_t *
|
||||
static inline const uint8_t *
|
||||
mf_ptr(const lzma_mf *mf)
|
||||
{
|
||||
return mf->buffer + mf->read_pos;
|
||||
@@ -296,10 +295,10 @@ mf_read(lzma_mf *mf, uint8_t *out, size_t *out_pos, size_t out_size,
|
||||
|
||||
|
||||
extern lzma_ret lzma_lz_encoder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters,
|
||||
lzma_ret (*lz_init)(lzma_lz_encoder *lz,
|
||||
lzma_allocator *allocator, const void *options,
|
||||
const lzma_allocator *allocator, const void *options,
|
||||
lzma_lz_options *lz_options));
|
||||
|
||||
|
||||
|
||||
@@ -39,22 +39,25 @@
|
||||
// Endianness doesn't matter in hash_2_calc() (no effect on the output).
|
||||
#ifdef TUKLIB_FAST_UNALIGNED_ACCESS
|
||||
# define hash_2_calc() \
|
||||
hash_value = *(const uint16_t *)(cur)
|
||||
const uint32_t hash_value = *(const uint16_t *)(cur)
|
||||
#else
|
||||
# define hash_2_calc() \
|
||||
hash_value = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
|
||||
const uint32_t hash_value \
|
||||
= (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
|
||||
#endif
|
||||
|
||||
#define hash_3_calc() \
|
||||
temp = hash_table[cur[0]] ^ cur[1]; \
|
||||
hash_2_value = temp & HASH_2_MASK; \
|
||||
hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
|
||||
const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \
|
||||
const uint32_t hash_2_value = temp & HASH_2_MASK; \
|
||||
const uint32_t hash_value \
|
||||
= (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
|
||||
|
||||
#define hash_4_calc() \
|
||||
temp = hash_table[cur[0]] ^ cur[1]; \
|
||||
hash_2_value = temp & HASH_2_MASK; \
|
||||
hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
|
||||
hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
|
||||
const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \
|
||||
const uint32_t hash_2_value = temp & HASH_2_MASK; \
|
||||
const uint32_t hash_3_value \
|
||||
= (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
|
||||
const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
|
||||
^ (hash_table[cur[3]] << 5)) & mf->hash_mask
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
|
||||
#include "lz_encoder.h"
|
||||
#include "lz_encoder_hash.h"
|
||||
#include "memcmplen.h"
|
||||
|
||||
|
||||
/// \brief Find matches starting from the current byte
|
||||
@@ -32,9 +33,8 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
|
||||
|
||||
if (count > 0) {
|
||||
#ifndef NDEBUG
|
||||
uint32_t i;
|
||||
// Validate the matches.
|
||||
for (i = 0; i < count; ++i) {
|
||||
for (uint32_t i = 0; i < count; ++i) {
|
||||
assert(matches[i].len <= mf->nice_len);
|
||||
assert(matches[i].dist < mf->read_pos);
|
||||
assert(memcmp(mf_ptr(mf) - 1,
|
||||
@@ -50,9 +50,6 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
|
||||
// If a match of maximum search length was found, try to
|
||||
// extend the match to maximum possible length.
|
||||
if (len_best == mf->nice_len) {
|
||||
uint8_t *p1;
|
||||
uint8_t *p2;
|
||||
|
||||
// The limit for the match length is either the
|
||||
// maximum match length supported by the LZ-based
|
||||
// encoder or the number of bytes left in the
|
||||
@@ -63,15 +60,13 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
|
||||
|
||||
// Pointer to the byte we just ran through
|
||||
// the match finder.
|
||||
p1 = mf_ptr(mf) - 1;
|
||||
const uint8_t *p1 = mf_ptr(mf) - 1;
|
||||
|
||||
// Pointer to the beginning of the match. We need -1
|
||||
// here because the match distances are zero based.
|
||||
p2 = p1 - matches[count - 1].dist - 1;
|
||||
const uint8_t *p2 = p1 - matches[count - 1].dist - 1;
|
||||
|
||||
while (len_best < limit
|
||||
&& p1[len_best] == p2[len_best])
|
||||
++len_best;
|
||||
len_best = lzma_memcmplen(p1, p2, len_best, limit);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,36 +107,35 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
|
||||
static void
|
||||
normalize(lzma_mf *mf)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t subvalue;
|
||||
uint32_t count;
|
||||
uint32_t *hash;
|
||||
|
||||
assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS);
|
||||
|
||||
// In future we may not want to touch the lowest bits, because there
|
||||
// may be match finders that use larger resolution than one byte.
|
||||
subvalue = (MUST_NORMALIZE_POS - mf->cyclic_size);
|
||||
const uint32_t subvalue
|
||||
= (MUST_NORMALIZE_POS - mf->cyclic_size);
|
||||
// & (~(UINT32_C(1) << 10) - 1);
|
||||
|
||||
count = mf->hash_size_sum + mf->sons_count;
|
||||
hash = mf->hash;
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
for (uint32_t i = 0; i < mf->hash_count; ++i) {
|
||||
// If the distance is greater than the dictionary size,
|
||||
// we can simply mark the hash element as empty.
|
||||
//
|
||||
// NOTE: Only the first mf->hash_size_sum elements are
|
||||
// initialized for sure. There may be uninitialized elements
|
||||
// in mf->son. Since we go through both mf->hash and
|
||||
// mf->son here in normalization, Valgrind may complain
|
||||
// that the "if" below depends on uninitialized value. In
|
||||
// this case it is safe to ignore the warning. See also the
|
||||
// comments in lz_encoder_init() in lz_encoder.c.
|
||||
if (hash[i] <= subvalue)
|
||||
hash[i] = EMPTY_HASH_VALUE;
|
||||
if (mf->hash[i] <= subvalue)
|
||||
mf->hash[i] = EMPTY_HASH_VALUE;
|
||||
else
|
||||
hash[i] -= subvalue;
|
||||
mf->hash[i] -= subvalue;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < mf->sons_count; ++i) {
|
||||
// Do the same for mf->son.
|
||||
//
|
||||
// NOTE: There may be uninitialized elements in mf->son.
|
||||
// Valgrind may complain that the "if" below depends on
|
||||
// an uninitialized value. In this case it is safe to ignore
|
||||
// the warning. See also the comments in lz_encoder_init()
|
||||
// in lz_encoder.c.
|
||||
if (mf->son[i] <= subvalue)
|
||||
mf->son[i] = EMPTY_HASH_VALUE;
|
||||
else
|
||||
mf->son[i] -= subvalue;
|
||||
}
|
||||
|
||||
// Update offset to match the new locations.
|
||||
@@ -204,14 +198,15 @@ move_pending(lzma_mf *mf)
|
||||
move_pending(mf); \
|
||||
ret_op; \
|
||||
} \
|
||||
cur = mf_ptr(mf); \
|
||||
pos = mf->read_pos + mf->offset
|
||||
const uint8_t *cur = mf_ptr(mf); \
|
||||
const uint32_t pos = mf->read_pos + mf->offset
|
||||
|
||||
|
||||
/// Header for find functions. "return 0" indicates that zero matches
|
||||
/// were found.
|
||||
#define header_find(is_bt, len_min) \
|
||||
header(is_bt, len_min, return 0)
|
||||
header(is_bt, len_min, return 0); \
|
||||
uint32_t matches_count = 0
|
||||
|
||||
|
||||
/// Header for a loop in a skip function. "continue" tells to skip the rest
|
||||
@@ -268,19 +263,15 @@ hc_find_func(
|
||||
|
||||
while (true) {
|
||||
const uint32_t delta = pos - cur_match;
|
||||
const uint8_t *pb;
|
||||
if (depth-- == 0 || delta >= cyclic_size)
|
||||
return matches;
|
||||
|
||||
pb = cur - delta;
|
||||
const uint8_t *const pb = cur - delta;
|
||||
cur_match = son[cyclic_pos - delta
|
||||
+ (delta > cyclic_pos ? cyclic_size : 0)];
|
||||
|
||||
if (pb[len_best] == cur[len_best] && pb[0] == cur[0]) {
|
||||
uint32_t len = 0;
|
||||
while (++len != len_limit)
|
||||
if (pb[len] != cur[len])
|
||||
break;
|
||||
uint32_t len = lzma_memcmplen(pb, cur, 1, len_limit);
|
||||
|
||||
if (len_best < len) {
|
||||
len_best = len;
|
||||
@@ -313,27 +304,21 @@ do { \
|
||||
extern uint32_t
|
||||
lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches)
|
||||
{
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
|
||||
uint32_t delta2, cur_match;
|
||||
uint32_t len_best = 2;
|
||||
uint32_t matches_count = 0;
|
||||
|
||||
header_find(false, 3);
|
||||
|
||||
hash_3_calc();
|
||||
|
||||
delta2 = pos - mf->hash[hash_2_value];
|
||||
cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
const uint32_t delta2 = pos - mf->hash[hash_2_value];
|
||||
const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
|
||||
|
||||
uint32_t len_best = 2;
|
||||
|
||||
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
|
||||
for ( ; len_best != len_limit; ++len_best)
|
||||
if (*(cur + len_best - delta2) != cur[len_best])
|
||||
break;
|
||||
len_best = lzma_memcmplen(cur - delta2, cur,
|
||||
len_best, len_limit);
|
||||
|
||||
matches[0].len = len_best;
|
||||
matches[0].dist = delta2 - 1;
|
||||
@@ -353,22 +338,18 @@ extern void
|
||||
lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
|
||||
{
|
||||
do {
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
|
||||
uint32_t cur_match;
|
||||
|
||||
if (mf_avail(mf) < 3) {
|
||||
move_pending(mf);
|
||||
continue;
|
||||
}
|
||||
|
||||
cur = mf_ptr(mf);
|
||||
pos = mf->read_pos + mf->offset;
|
||||
const uint8_t *cur = mf_ptr(mf);
|
||||
const uint32_t pos = mf->read_pos + mf->offset;
|
||||
|
||||
hash_3_calc();
|
||||
|
||||
cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
const uint32_t cur_match
|
||||
= mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
|
||||
@@ -384,25 +365,21 @@ lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
|
||||
extern uint32_t
|
||||
lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
|
||||
{
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
|
||||
uint32_t delta2, delta3, cur_match;
|
||||
uint32_t len_best = 1;
|
||||
uint32_t matches_count = 0;
|
||||
|
||||
header_find(false, 4);
|
||||
|
||||
hash_4_calc();
|
||||
|
||||
delta2 = pos - mf->hash[hash_2_value];
|
||||
delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
|
||||
cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
uint32_t delta2 = pos - mf->hash[hash_2_value];
|
||||
const uint32_t delta3
|
||||
= pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
|
||||
const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value ] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
|
||||
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
|
||||
|
||||
uint32_t len_best = 1;
|
||||
|
||||
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
|
||||
len_best = 2;
|
||||
matches[0].len = 2;
|
||||
@@ -418,9 +395,8 @@ lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
|
||||
}
|
||||
|
||||
if (matches_count != 0) {
|
||||
for ( ; len_best != len_limit; ++len_best)
|
||||
if (*(cur + len_best - delta2) != cur[len_best])
|
||||
break;
|
||||
len_best = lzma_memcmplen(cur - delta2, cur,
|
||||
len_best, len_limit);
|
||||
|
||||
matches[matches_count - 1].len = len_best;
|
||||
|
||||
@@ -441,22 +417,18 @@ extern void
|
||||
lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount)
|
||||
{
|
||||
do {
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
|
||||
uint32_t cur_match;
|
||||
|
||||
if (mf_avail(mf) < 4) {
|
||||
move_pending(mf);
|
||||
continue;
|
||||
}
|
||||
|
||||
cur = mf_ptr(mf);
|
||||
pos = mf->read_pos + mf->offset;
|
||||
const uint8_t *cur = mf_ptr(mf);
|
||||
const uint32_t pos = mf->read_pos + mf->offset;
|
||||
|
||||
hash_4_calc();
|
||||
|
||||
cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
const uint32_t cur_match
|
||||
= mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
|
||||
@@ -494,10 +466,6 @@ bt_find_func(
|
||||
uint32_t len1 = 0;
|
||||
|
||||
while (true) {
|
||||
uint32_t *pair;
|
||||
const uint8_t *pb;
|
||||
uint32_t len;
|
||||
|
||||
const uint32_t delta = pos - cur_match;
|
||||
if (depth-- == 0 || delta >= cyclic_size) {
|
||||
*ptr0 = EMPTY_HASH_VALUE;
|
||||
@@ -505,17 +473,15 @@ bt_find_func(
|
||||
return matches;
|
||||
}
|
||||
|
||||
pair = son + ((cyclic_pos - delta
|
||||
uint32_t *const pair = son + ((cyclic_pos - delta
|
||||
+ (delta > cyclic_pos ? cyclic_size : 0))
|
||||
<< 1);
|
||||
|
||||
pb = cur - delta;
|
||||
len = my_min(len0, len1);
|
||||
const uint8_t *const pb = cur - delta;
|
||||
uint32_t len = my_min(len0, len1);
|
||||
|
||||
if (pb[len] == cur[len]) {
|
||||
while (++len != len_limit)
|
||||
if (pb[len] != cur[len])
|
||||
break;
|
||||
len = lzma_memcmplen(pb, cur, len + 1, len_limit);
|
||||
|
||||
if (len_best < len) {
|
||||
len_best = len;
|
||||
@@ -564,10 +530,6 @@ bt_skip_func(
|
||||
uint32_t len1 = 0;
|
||||
|
||||
while (true) {
|
||||
uint32_t *pair;
|
||||
const uint8_t *pb;
|
||||
uint32_t len;
|
||||
|
||||
const uint32_t delta = pos - cur_match;
|
||||
if (depth-- == 0 || delta >= cyclic_size) {
|
||||
*ptr0 = EMPTY_HASH_VALUE;
|
||||
@@ -575,16 +537,14 @@ bt_skip_func(
|
||||
return;
|
||||
}
|
||||
|
||||
pair = son + ((cyclic_pos - delta
|
||||
uint32_t *pair = son + ((cyclic_pos - delta
|
||||
+ (delta > cyclic_pos ? cyclic_size : 0))
|
||||
<< 1);
|
||||
pb = cur - delta;
|
||||
len = my_min(len0, len1);
|
||||
const uint8_t *pb = cur - delta;
|
||||
uint32_t len = my_min(len0, len1);
|
||||
|
||||
if (pb[len] == cur[len]) {
|
||||
while (++len != len_limit)
|
||||
if (pb[len] != cur[len])
|
||||
break;
|
||||
len = lzma_memcmplen(pb, cur, len + 1, len_limit);
|
||||
|
||||
if (len == len_limit) {
|
||||
*ptr1 = pair[0];
|
||||
@@ -626,17 +586,11 @@ do { \
|
||||
extern uint32_t
|
||||
lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches)
|
||||
{
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t hash_value; /* hash_2_calc */
|
||||
uint32_t cur_match;
|
||||
uint32_t matches_count = 0;
|
||||
|
||||
header_find(true, 2);
|
||||
|
||||
hash_2_calc();
|
||||
|
||||
cur_match = mf->hash[hash_value];
|
||||
const uint32_t cur_match = mf->hash[hash_value];
|
||||
mf->hash[hash_value] = pos;
|
||||
|
||||
bt_find(1);
|
||||
@@ -647,16 +601,11 @@ extern void
|
||||
lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
|
||||
{
|
||||
do {
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t hash_value; /* hash_2_calc */
|
||||
uint32_t cur_match;
|
||||
|
||||
header_skip(true, 2);
|
||||
|
||||
hash_2_calc();
|
||||
|
||||
cur_match = mf->hash[hash_value];
|
||||
const uint32_t cur_match = mf->hash[hash_value];
|
||||
mf->hash[hash_value] = pos;
|
||||
|
||||
bt_skip();
|
||||
@@ -670,27 +619,21 @@ lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
|
||||
extern uint32_t
|
||||
lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches)
|
||||
{
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
|
||||
uint32_t delta2, cur_match;
|
||||
uint32_t len_best = 2;
|
||||
uint32_t matches_count = 0;
|
||||
|
||||
header_find(true, 3);
|
||||
|
||||
hash_3_calc();
|
||||
|
||||
delta2 = pos - mf->hash[hash_2_value];
|
||||
cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
const uint32_t delta2 = pos - mf->hash[hash_2_value];
|
||||
const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
|
||||
|
||||
uint32_t len_best = 2;
|
||||
|
||||
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
|
||||
for ( ; len_best != len_limit; ++len_best)
|
||||
if (*(cur + len_best - delta2) != cur[len_best])
|
||||
break;
|
||||
len_best = lzma_memcmplen(
|
||||
cur, cur - delta2, len_best, len_limit);
|
||||
|
||||
matches[0].len = len_best;
|
||||
matches[0].dist = delta2 - 1;
|
||||
@@ -710,16 +653,12 @@ extern void
|
||||
lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
|
||||
{
|
||||
do {
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
|
||||
uint32_t cur_match;
|
||||
|
||||
header_skip(true, 3);
|
||||
|
||||
hash_3_calc();
|
||||
|
||||
cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
const uint32_t cur_match
|
||||
= mf->hash[FIX_3_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
|
||||
@@ -735,25 +674,21 @@ lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
|
||||
extern uint32_t
|
||||
lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches)
|
||||
{
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
|
||||
uint32_t delta2, delta3, cur_match;
|
||||
uint32_t len_best = 1;
|
||||
uint32_t matches_count = 0;
|
||||
|
||||
header_find(true, 4);
|
||||
|
||||
hash_4_calc();
|
||||
|
||||
delta2 = pos - mf->hash[hash_2_value];
|
||||
delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
|
||||
cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
uint32_t delta2 = pos - mf->hash[hash_2_value];
|
||||
const uint32_t delta3
|
||||
= pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
|
||||
const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
|
||||
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
|
||||
|
||||
uint32_t len_best = 1;
|
||||
|
||||
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
|
||||
len_best = 2;
|
||||
matches[0].len = 2;
|
||||
@@ -769,9 +704,8 @@ lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches)
|
||||
}
|
||||
|
||||
if (matches_count != 0) {
|
||||
for ( ; len_best != len_limit; ++len_best)
|
||||
if (*(cur + len_best - delta2) != cur[len_best])
|
||||
break;
|
||||
len_best = lzma_memcmplen(
|
||||
cur, cur - delta2, len_best, len_limit);
|
||||
|
||||
matches[matches_count - 1].len = len_best;
|
||||
|
||||
@@ -792,16 +726,12 @@ extern void
|
||||
lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount)
|
||||
{
|
||||
do {
|
||||
const uint8_t *cur;
|
||||
uint32_t pos;
|
||||
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
|
||||
uint32_t cur_match;
|
||||
|
||||
header_skip(true, 4);
|
||||
|
||||
hash_4_calc();
|
||||
|
||||
cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
const uint32_t cur_match
|
||||
= mf->hash[FIX_4_HASH_SIZE + hash_value];
|
||||
|
||||
mf->hash[hash_2_value] = pos;
|
||||
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
#ifndef LZMA_FASTPOS_H
|
||||
#define LZMA_FASTPOS_H
|
||||
|
||||
// LZMA encodes match distances (positions) by storing the highest two
|
||||
// bits using a six-bit value [0, 63], and then the missing lower bits.
|
||||
// Dictionary size is also stored using this encoding in the new .lzma
|
||||
// LZMA encodes match distances by storing the highest two bits using
|
||||
// a six-bit value [0, 63], and then the missing lower bits.
|
||||
// Dictionary size is also stored using this encoding in the .xz
|
||||
// file format header.
|
||||
//
|
||||
// fastpos.h provides a way to quickly find out the correct six-bit
|
||||
// values. The following table gives some examples of this encoding:
|
||||
//
|
||||
// pos return
|
||||
// dist return
|
||||
// 0 0
|
||||
// 1 1
|
||||
// 2 2
|
||||
@@ -48,10 +48,10 @@
|
||||
// Provided functions or macros
|
||||
// ----------------------------
|
||||
//
|
||||
// get_pos_slot(pos) is the basic version. get_pos_slot_2(pos)
|
||||
// assumes that pos >= FULL_DISTANCES, thus the result is at least
|
||||
// FULL_DISTANCES_BITS * 2. Using get_pos_slot(pos) instead of
|
||||
// get_pos_slot_2(pos) would give the same result, but get_pos_slot_2(pos)
|
||||
// get_dist_slot(dist) is the basic version. get_dist_slot_2(dist)
|
||||
// assumes that dist >= FULL_DISTANCES, thus the result is at least
|
||||
// FULL_DISTANCES_BITS * 2. Using get_dist_slot(dist) instead of
|
||||
// get_dist_slot_2(dist) would give the same result, but get_dist_slot_2(dist)
|
||||
// should be tiny bit faster due to the assumption being made.
|
||||
//
|
||||
//
|
||||
@@ -75,16 +75,15 @@
|
||||
// on all systems I have tried. The size optimized version is sometimes
|
||||
// slightly faster, but sometimes it is a lot slower.
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifdef HAVE_SMALL
|
||||
# define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos))
|
||||
# define get_dist_slot(dist) \
|
||||
((dist) <= 4 ? (dist) : get_dist_slot_2(dist))
|
||||
|
||||
static inline uint32_t
|
||||
get_pos_slot_2(uint32_t pos)
|
||||
get_dist_slot_2(uint32_t dist)
|
||||
{
|
||||
const uint32_t i = bsr32(pos);
|
||||
return (i + i) + ((pos >> (i - 1)) & 1);
|
||||
const uint32_t i = bsr32(dist);
|
||||
return (i + i) + ((dist >> (i - 1)) & 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -101,39 +100,39 @@ extern const uint8_t lzma_fastpos[1 << FASTPOS_BITS];
|
||||
#define fastpos_limit(extra, n) \
|
||||
(UINT32_C(1) << (FASTPOS_BITS + fastpos_shift(extra, n)))
|
||||
|
||||
#define fastpos_result(pos, extra, n) \
|
||||
lzma_fastpos[(pos) >> fastpos_shift(extra, n)] \
|
||||
#define fastpos_result(dist, extra, n) \
|
||||
lzma_fastpos[(dist) >> fastpos_shift(extra, n)] \
|
||||
+ 2 * fastpos_shift(extra, n)
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
get_pos_slot(uint32_t pos)
|
||||
get_dist_slot(uint32_t dist)
|
||||
{
|
||||
// If it is small enough, we can pick the result directly from
|
||||
// the precalculated table.
|
||||
if (pos < fastpos_limit(0, 0))
|
||||
return lzma_fastpos[pos];
|
||||
if (dist < fastpos_limit(0, 0))
|
||||
return lzma_fastpos[dist];
|
||||
|
||||
if (pos < fastpos_limit(0, 1))
|
||||
return fastpos_result(pos, 0, 1);
|
||||
if (dist < fastpos_limit(0, 1))
|
||||
return fastpos_result(dist, 0, 1);
|
||||
|
||||
return fastpos_result(pos, 0, 2);
|
||||
return fastpos_result(dist, 0, 2);
|
||||
}
|
||||
|
||||
|
||||
#ifdef FULL_DISTANCES_BITS
|
||||
static inline uint32_t
|
||||
get_pos_slot_2(uint32_t pos)
|
||||
get_dist_slot_2(uint32_t dist)
|
||||
{
|
||||
assert(pos >= FULL_DISTANCES);
|
||||
assert(dist >= FULL_DISTANCES);
|
||||
|
||||
if (pos < fastpos_limit(FULL_DISTANCES_BITS - 1, 0))
|
||||
return fastpos_result(pos, FULL_DISTANCES_BITS - 1, 0);
|
||||
if (dist < fastpos_limit(FULL_DISTANCES_BITS - 1, 0))
|
||||
return fastpos_result(dist, FULL_DISTANCES_BITS - 1, 0);
|
||||
|
||||
if (pos < fastpos_limit(FULL_DISTANCES_BITS - 1, 1))
|
||||
return fastpos_result(pos, FULL_DISTANCES_BITS - 1, 1);
|
||||
if (dist < fastpos_limit(FULL_DISTANCES_BITS - 1, 1))
|
||||
return fastpos_result(dist, FULL_DISTANCES_BITS - 1, 1);
|
||||
|
||||
return fastpos_result(pos, FULL_DISTANCES_BITS - 1, 2);
|
||||
return fastpos_result(dist, FULL_DISTANCES_BITS - 1, 2);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
#include "lzma_decoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum sequence {
|
||||
SEQ_CONTROL,
|
||||
SEQ_UNCOMPRESSED_1,
|
||||
@@ -50,14 +50,16 @@ struct lzma_coder_s {
|
||||
bool need_dictionary_reset;
|
||||
|
||||
lzma_options_lzma options;
|
||||
};
|
||||
} lzma_lzma2_coder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma2_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dict,
|
||||
const uint8_t *LZMA_RESTRICT in, size_t *LZMA_RESTRICT in_pos,
|
||||
lzma2_decode(void *coder_ptr, lzma_dict *restrict dict,
|
||||
const uint8_t *restrict in, size_t *restrict in_pos,
|
||||
size_t in_size)
|
||||
{
|
||||
lzma_lzma2_coder *restrict coder = coder_ptr;
|
||||
|
||||
// With SEQ_LZMA it is possible that no new input is needed to do
|
||||
// some progress. The rest of the sequences assume that there is
|
||||
// at least one byte of input.
|
||||
@@ -209,8 +211,10 @@ lzma2_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dict,
|
||||
|
||||
|
||||
static void
|
||||
lzma2_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
lzma2_decoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_lzma2_coder *coder = coder_ptr;
|
||||
|
||||
assert(coder->lzma.end == NULL);
|
||||
lzma_free(coder->lzma.coder, allocator);
|
||||
|
||||
@@ -221,34 +225,36 @@ lzma2_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
lzma2_decoder_init(lzma_lz_decoder *lz, const lzma_allocator *allocator,
|
||||
const void *opt, lzma_lz_options *lz_options)
|
||||
{
|
||||
const lzma_options_lzma *options = opt;
|
||||
|
||||
if (lz->coder == NULL) {
|
||||
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (lz->coder == NULL)
|
||||
lzma_lzma2_coder *coder = lz->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_lzma2_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
lz->coder = coder;
|
||||
lz->code = &lzma2_decode;
|
||||
lz->end = &lzma2_decoder_end;
|
||||
|
||||
lz->coder->lzma = LZMA_LZ_DECODER_INIT;
|
||||
coder->lzma = LZMA_LZ_DECODER_INIT;
|
||||
}
|
||||
|
||||
lz->coder->sequence = SEQ_CONTROL;
|
||||
lz->coder->need_properties = true;
|
||||
lz->coder->need_dictionary_reset = options->preset_dict == NULL
|
||||
const lzma_options_lzma *options = opt;
|
||||
|
||||
coder->sequence = SEQ_CONTROL;
|
||||
coder->need_properties = true;
|
||||
coder->need_dictionary_reset = options->preset_dict == NULL
|
||||
|| options->preset_dict_size == 0;
|
||||
|
||||
return lzma_lzma_decoder_create(&lz->coder->lzma,
|
||||
return lzma_lzma_decoder_create(&coder->lzma,
|
||||
allocator, options, lz_options);
|
||||
}
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma2_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_lzma2_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
// LZMA2 can only be the last filter in the chain. This is enforced
|
||||
@@ -263,17 +269,15 @@ lzma_lzma2_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern uint64_t
|
||||
lzma_lzma2_decoder_memusage(const void *options)
|
||||
{
|
||||
return sizeof(lzma_coder)
|
||||
return sizeof(lzma_lzma2_coder)
|
||||
+ lzma_lzma_decoder_memusage_nocheck(options);
|
||||
}
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
|
||||
lzma_lzma2_props_decode(void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size)
|
||||
{
|
||||
lzma_options_lzma *opt;
|
||||
|
||||
if (props_size != 1)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
@@ -285,7 +289,8 @@ lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
|
||||
if (props[0] > 40)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
|
||||
lzma_options_lzma *opt = lzma_alloc(
|
||||
sizeof(lzma_options_lzma), allocator);
|
||||
if (opt == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
|
||||
@@ -17,12 +17,13 @@
|
||||
#include "common.h"
|
||||
|
||||
extern lzma_ret lzma_lzma2_decoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters);
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
extern uint64_t lzma_lzma2_decoder_memusage(const void *options);
|
||||
|
||||
extern lzma_ret lzma_lzma2_props_decode(
|
||||
void **options, lzma_allocator *allocator,
|
||||
void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
#include "lzma2_encoder.h"
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
enum {
|
||||
SEQ_INIT,
|
||||
SEQ_LZMA_ENCODE,
|
||||
@@ -27,7 +27,7 @@ struct lzma_coder_s {
|
||||
} sequence;
|
||||
|
||||
/// LZMA encoder
|
||||
lzma_coder *lzma;
|
||||
void *lzma;
|
||||
|
||||
/// LZMA options currently in use.
|
||||
lzma_options_lzma opt_cur;
|
||||
@@ -48,20 +48,19 @@ struct lzma_coder_s {
|
||||
|
||||
/// Buffer to hold the chunk header and LZMA compressed data
|
||||
uint8_t buf[LZMA2_HEADER_MAX + LZMA2_CHUNK_MAX];
|
||||
};
|
||||
} lzma_lzma2_coder;
|
||||
|
||||
|
||||
static void
|
||||
lzma2_header_lzma(lzma_coder *coder)
|
||||
lzma2_header_lzma(lzma_lzma2_coder *coder)
|
||||
{
|
||||
size_t pos;
|
||||
size_t size;
|
||||
|
||||
assert(coder->uncompressed_size > 0);
|
||||
assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX);
|
||||
assert(coder->compressed_size > 0);
|
||||
assert(coder->compressed_size <= LZMA2_CHUNK_MAX);
|
||||
|
||||
size_t pos;
|
||||
|
||||
if (coder->need_properties) {
|
||||
pos = 0;
|
||||
|
||||
@@ -82,7 +81,7 @@ lzma2_header_lzma(lzma_coder *coder)
|
||||
coder->buf_pos = pos;
|
||||
|
||||
// Uncompressed size
|
||||
size = coder->uncompressed_size - 1;
|
||||
size_t size = coder->uncompressed_size - 1;
|
||||
coder->buf[pos++] += size >> 16;
|
||||
coder->buf[pos++] = (size >> 8) & 0xFF;
|
||||
coder->buf[pos++] = size & 0xFF;
|
||||
@@ -109,7 +108,7 @@ lzma2_header_lzma(lzma_coder *coder)
|
||||
|
||||
|
||||
static void
|
||||
lzma2_header_uncompressed(lzma_coder *coder)
|
||||
lzma2_header_uncompressed(lzma_lzma2_coder *coder)
|
||||
{
|
||||
assert(coder->uncompressed_size > 0);
|
||||
assert(coder->uncompressed_size <= LZMA2_CHUNK_MAX);
|
||||
@@ -134,10 +133,12 @@ lzma2_header_uncompressed(lzma_coder *coder)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma2_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint8_t *LZMA_RESTRICT out, size_t *LZMA_RESTRICT out_pos,
|
||||
lzma2_encode(void *coder_ptr, lzma_mf *restrict mf,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size)
|
||||
{
|
||||
lzma_lzma2_coder *restrict coder = coder_ptr;
|
||||
|
||||
while (*out_pos < out_size)
|
||||
switch (coder->sequence) {
|
||||
case SEQ_INIT:
|
||||
@@ -163,9 +164,6 @@ lzma2_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
// Fall through
|
||||
|
||||
case SEQ_LZMA_ENCODE: {
|
||||
uint32_t read_start;
|
||||
lzma_ret ret;
|
||||
|
||||
// Calculate how much more uncompressed data this chunk
|
||||
// could accept.
|
||||
const uint32_t left = LZMA2_UNCOMPRESSED_MAX
|
||||
@@ -186,10 +184,10 @@ lzma2_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
// Save the start position so that we can update
|
||||
// coder->uncompressed_size.
|
||||
read_start = mf->read_pos - mf->read_ahead;
|
||||
const uint32_t read_start = mf->read_pos - mf->read_ahead;
|
||||
|
||||
// Call the LZMA encoder until the chunk is finished.
|
||||
ret = lzma_lzma_encode(coder->lzma, mf,
|
||||
const lzma_ret ret = lzma_lzma_encode(coder->lzma, mf,
|
||||
coder->buf + LZMA2_HEADER_MAX,
|
||||
&coder->compressed_size,
|
||||
LZMA2_CHUNK_MAX, limit);
|
||||
@@ -266,8 +264,9 @@ lzma2_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
|
||||
static void
|
||||
lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
lzma2_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
|
||||
{
|
||||
lzma_lzma2_coder *coder = coder_ptr;
|
||||
lzma_free(coder->lzma, allocator);
|
||||
lzma_free(coder, allocator);
|
||||
return;
|
||||
@@ -275,9 +274,9 @@ lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
|
||||
lzma2_encoder_options_update(void *coder_ptr, const lzma_filter *filter)
|
||||
{
|
||||
lzma_options_lzma *opt;
|
||||
lzma_lzma2_coder *coder = coder_ptr;
|
||||
|
||||
// New options can be set only when there is no incomplete chunk.
|
||||
// This is the case at the beginning of the raw stream and right
|
||||
@@ -287,7 +286,7 @@ lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
|
||||
|
||||
// Look if there are new options. At least for now,
|
||||
// only lc/lp/pb can be changed.
|
||||
opt = filter->options;
|
||||
const lzma_options_lzma *opt = filter->options;
|
||||
if (coder->opt_cur.lc != opt->lc || coder->opt_cur.lp != opt->lp
|
||||
|| coder->opt_cur.pb != opt->pb) {
|
||||
// Validate the options.
|
||||
@@ -310,36 +309,38 @@ lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma2_encoder_init(lzma_lz_encoder *lz, lzma_allocator *allocator,
|
||||
lzma2_encoder_init(lzma_lz_encoder *lz, const lzma_allocator *allocator,
|
||||
const void *options, lzma_lz_options *lz_options)
|
||||
{
|
||||
if (options == NULL)
|
||||
return LZMA_PROG_ERROR;
|
||||
|
||||
if (lz->coder == NULL) {
|
||||
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
if (lz->coder == NULL)
|
||||
lzma_lzma2_coder *coder = lz->coder;
|
||||
if (coder == NULL) {
|
||||
coder = lzma_alloc(sizeof(lzma_lzma2_coder), allocator);
|
||||
if (coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
lz->coder = coder;
|
||||
lz->code = &lzma2_encode;
|
||||
lz->end = &lzma2_encoder_end;
|
||||
lz->options_update = &lzma2_encoder_options_update;
|
||||
|
||||
lz->coder->lzma = NULL;
|
||||
coder->lzma = NULL;
|
||||
}
|
||||
|
||||
lz->coder->opt_cur = *(const lzma_options_lzma *)(options);
|
||||
coder->opt_cur = *(const lzma_options_lzma *)(options);
|
||||
|
||||
lz->coder->sequence = SEQ_INIT;
|
||||
lz->coder->need_properties = true;
|
||||
lz->coder->need_state_reset = false;
|
||||
lz->coder->need_dictionary_reset
|
||||
= lz->coder->opt_cur.preset_dict == NULL
|
||||
|| lz->coder->opt_cur.preset_dict_size == 0;
|
||||
coder->sequence = SEQ_INIT;
|
||||
coder->need_properties = true;
|
||||
coder->need_state_reset = false;
|
||||
coder->need_dictionary_reset
|
||||
= coder->opt_cur.preset_dict == NULL
|
||||
|| coder->opt_cur.preset_dict_size == 0;
|
||||
|
||||
// Initialize LZMA encoder
|
||||
return_if_error(lzma_lzma_encoder_create(&lz->coder->lzma, allocator,
|
||||
&lz->coder->opt_cur, lz_options));
|
||||
return_if_error(lzma_lzma_encoder_create(&coder->lzma, allocator,
|
||||
&coder->opt_cur, lz_options));
|
||||
|
||||
// Make sure that we will always have enough history available in
|
||||
// case we need to use uncompressed chunks. They are used when the
|
||||
@@ -355,7 +356,7 @@ lzma2_encoder_init(lzma_lz_encoder *lz, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma2_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_lzma2_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
return lzma_lz_encoder_init(
|
||||
@@ -370,7 +371,7 @@ lzma_lzma2_encoder_memusage(const void *options)
|
||||
if (lzma_mem == UINT64_MAX)
|
||||
return UINT64_MAX;
|
||||
|
||||
return sizeof(lzma_coder) + lzma_mem;
|
||||
return sizeof(lzma_lzma2_coder) + lzma_mem;
|
||||
}
|
||||
|
||||
|
||||
@@ -393,7 +394,17 @@ lzma_lzma2_props_encode(const void *options, uint8_t *out)
|
||||
if (d == UINT32_MAX)
|
||||
out[0] = 40;
|
||||
else
|
||||
out[0] = get_pos_slot(d + 1) - 24;
|
||||
out[0] = get_dist_slot(d + 1) - 24;
|
||||
|
||||
return LZMA_OK;
|
||||
}
|
||||
|
||||
|
||||
extern uint64_t
|
||||
lzma_lzma2_block_size(const void *options)
|
||||
{
|
||||
const lzma_options_lzma *const opt = options;
|
||||
|
||||
// Use at least 1 MiB to keep compression ratio better.
|
||||
return my_max((uint64_t)(opt->dict_size) * 3, UINT64_C(1) << 20);
|
||||
}
|
||||
|
||||
@@ -31,11 +31,13 @@
|
||||
|
||||
|
||||
extern lzma_ret lzma_lzma2_encoder_init(
|
||||
lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
extern uint64_t lzma_lzma2_encoder_memusage(const void *options);
|
||||
|
||||
extern lzma_ret lzma_lzma2_props_encode(const void *options, uint8_t *out);
|
||||
|
||||
extern uint64_t lzma_lzma2_block_size(const void *options);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -129,15 +129,12 @@ static inline void
|
||||
literal_init(probability (*probs)[LITERAL_CODER_SIZE],
|
||||
uint32_t lc, uint32_t lp)
|
||||
{
|
||||
uint32_t coders;
|
||||
uint32_t i, j;
|
||||
|
||||
assert(lc + lp <= LZMA_LCLP_MAX);
|
||||
|
||||
coders = 1U << (lc + lp);
|
||||
const uint32_t coders = 1U << (lc + lp);
|
||||
|
||||
for (i = 0; i < coders; ++i)
|
||||
for (j = 0; j < LITERAL_CODER_SIZE; ++j)
|
||||
for (uint32_t i = 0; i < coders; ++i)
|
||||
for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j)
|
||||
bit_reset(probs[i][j]);
|
||||
|
||||
return;
|
||||
@@ -174,53 +171,54 @@ literal_init(probability (*probs)[LITERAL_CODER_SIZE],
|
||||
// Match distance //
|
||||
////////////////////
|
||||
|
||||
// Different set of probabilities is used for match distances that have very
|
||||
// Different sets of probabilities are used for match distances that have very
|
||||
// short match length: Lengths of 2, 3, and 4 bytes have a separate set of
|
||||
// probabilities for each length. The matches with longer length use a shared
|
||||
// set of probabilities.
|
||||
#define LEN_TO_POS_STATES 4
|
||||
#define DIST_STATES 4
|
||||
|
||||
// Macro to get the index of the appropriate probability array.
|
||||
#define get_len_to_pos_state(len) \
|
||||
((len) < LEN_TO_POS_STATES + MATCH_LEN_MIN \
|
||||
#define get_dist_state(len) \
|
||||
((len) < DIST_STATES + MATCH_LEN_MIN \
|
||||
? (len) - MATCH_LEN_MIN \
|
||||
: LEN_TO_POS_STATES - 1)
|
||||
: DIST_STATES - 1)
|
||||
|
||||
// The highest two bits of a match distance (pos slot) are encoded using six
|
||||
// bits. See fastpos.h for more explanation.
|
||||
#define POS_SLOT_BITS 6
|
||||
#define POS_SLOTS (1 << POS_SLOT_BITS)
|
||||
// The highest two bits of a match distance (distance slot) are encoded
|
||||
// using six bits. See fastpos.h for more explanation.
|
||||
#define DIST_SLOT_BITS 6
|
||||
#define DIST_SLOTS (1 << DIST_SLOT_BITS)
|
||||
|
||||
// Match distances up to 127 are fully encoded using probabilities. Since
|
||||
// the highest two bits (pos slot) are always encoded using six bits, the
|
||||
// distances 0-3 don't need any additional bits to encode, since the pos
|
||||
// slot itself is the same as the actual distance. START_POS_MODEL_INDEX
|
||||
// indicates the first pos slot where at least one additional bit is needed.
|
||||
#define START_POS_MODEL_INDEX 4
|
||||
// the highest two bits (distance slot) are always encoded using six bits,
|
||||
// the distances 0-3 don't need any additional bits to encode, since the
|
||||
// distance slot itself is the same as the actual distance. DIST_MODEL_START
|
||||
// indicates the first distance slot where at least one additional bit is
|
||||
// needed.
|
||||
#define DIST_MODEL_START 4
|
||||
|
||||
// Match distances greater than 127 are encoded in three pieces:
|
||||
// - pos slot: the highest two bits
|
||||
// - distance slot: the highest two bits
|
||||
// - direct bits: 2-26 bits below the highest two bits
|
||||
// - alignment bits: four lowest bits
|
||||
//
|
||||
// Direct bits don't use any probabilities.
|
||||
//
|
||||
// The pos slot value of 14 is for distances 128-191 (see the table in
|
||||
// The distance slot value of 14 is for distances 128-191 (see the table in
|
||||
// fastpos.h to understand why).
|
||||
#define END_POS_MODEL_INDEX 14
|
||||
#define DIST_MODEL_END 14
|
||||
|
||||
// Pos slots that indicate a distance <= 127.
|
||||
#define FULL_DISTANCES_BITS (END_POS_MODEL_INDEX / 2)
|
||||
// Distance slots that indicate a distance <= 127.
|
||||
#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
|
||||
#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
|
||||
|
||||
// For match distances greater than 127, only the highest two bits and the
|
||||
// lowest four bits (alignment) is encoded using probabilities.
|
||||
#define ALIGN_BITS 4
|
||||
#define ALIGN_TABLE_SIZE (1 << ALIGN_BITS)
|
||||
#define ALIGN_MASK (ALIGN_TABLE_SIZE - 1)
|
||||
#define ALIGN_SIZE (1 << ALIGN_BITS)
|
||||
#define ALIGN_MASK (ALIGN_SIZE - 1)
|
||||
|
||||
// LZMA remembers the four most recent match distances. Reusing these distances
|
||||
// tends to take less space than re-encoding the actual distance value.
|
||||
#define REP_DISTANCES 4
|
||||
#define REPS 4
|
||||
|
||||
#endif
|
||||
|
||||
@@ -16,6 +16,12 @@
|
||||
#include "lzma_decoder.h"
|
||||
#include "range_decoder.h"
|
||||
|
||||
// The macros unroll loops with switch statements.
|
||||
// Silence warnings about missing fall-through comments.
|
||||
#if TUKLIB_GNUC_REQ(7, 0)
|
||||
# pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_SMALL
|
||||
|
||||
@@ -114,33 +120,33 @@ do { \
|
||||
case seq ## _CHOICE: \
|
||||
rc_if_0(ld.choice, seq ## _CHOICE) { \
|
||||
rc_update_0(ld.choice); \
|
||||
rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW0); \
|
||||
rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW1); \
|
||||
rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW2); \
|
||||
rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW0); \
|
||||
rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW1); \
|
||||
rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW2); \
|
||||
target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \
|
||||
} else { \
|
||||
rc_update_1(ld.choice); \
|
||||
case seq ## _CHOICE2: \
|
||||
rc_if_0(ld.choice2, seq ## _CHOICE2) { \
|
||||
rc_update_0(ld.choice2); \
|
||||
rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
|
||||
rc_bit_case(ld.mid[pos_state][symbol], , , \
|
||||
seq ## _MID0); \
|
||||
rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
|
||||
rc_bit_case(ld.mid[pos_state][symbol], , , \
|
||||
seq ## _MID1); \
|
||||
rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
|
||||
rc_bit_case(ld.mid[pos_state][symbol], , , \
|
||||
seq ## _MID2); \
|
||||
target = symbol - LEN_MID_SYMBOLS \
|
||||
+ MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \
|
||||
} else { \
|
||||
rc_update_1(ld.choice2); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH0); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH1); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH2); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH3); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH4); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH5); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH6); \
|
||||
rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH7); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH0); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH1); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH2); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH3); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH4); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH5); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH6); \
|
||||
rc_bit_case(ld.high[symbol], , , seq ## _HIGH7); \
|
||||
target = symbol - LEN_HIGH_SYMBOLS \
|
||||
+ MATCH_LEN_MIN \
|
||||
+ LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \
|
||||
@@ -161,7 +167,7 @@ typedef struct {
|
||||
} lzma_length_decoder;
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
typedef struct {
|
||||
///////////////////
|
||||
// Probabilities //
|
||||
///////////////////
|
||||
@@ -193,15 +199,15 @@ struct lzma_coder_s {
|
||||
/// Probability tree for the highest two bits of the match distance.
|
||||
/// There is a separate probability tree for match lengths of
|
||||
/// 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
|
||||
probability pos_slot[LEN_TO_POS_STATES][POS_SLOTS];
|
||||
probability dist_slot[DIST_STATES][DIST_SLOTS];
|
||||
|
||||
/// Probability trees for additional bits for match distance when the
|
||||
/// distance is in the range [4, 127].
|
||||
probability pos_special[FULL_DISTANCES - END_POS_MODEL_INDEX];
|
||||
probability pos_special[FULL_DISTANCES - DIST_MODEL_END];
|
||||
|
||||
/// Probability tree for the lowest four bits of a match distance
|
||||
/// that is equal to or greater than 128.
|
||||
probability pos_align[ALIGN_TABLE_SIZE];
|
||||
probability pos_align[ALIGN_SIZE];
|
||||
|
||||
/// Length of a normal match
|
||||
lzma_length_decoder match_len_decoder;
|
||||
@@ -245,8 +251,8 @@ struct lzma_coder_s {
|
||||
SEQ_LITERAL_WRITE,
|
||||
SEQ_IS_REP,
|
||||
seq_len(SEQ_MATCH_LEN),
|
||||
seq_6(SEQ_POS_SLOT),
|
||||
SEQ_POS_MODEL,
|
||||
seq_6(SEQ_DIST_SLOT),
|
||||
SEQ_DIST_MODEL,
|
||||
SEQ_DIRECT,
|
||||
seq_4(SEQ_ALIGN),
|
||||
SEQ_EOPM,
|
||||
@@ -277,14 +283,27 @@ struct lzma_coder_s {
|
||||
/// If decoding a literal: match byte.
|
||||
/// If decoding a match: length of the match.
|
||||
uint32_t len;
|
||||
};
|
||||
} lzma_lzma1_decoder;
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
const uint8_t *LZMA_RESTRICT in,
|
||||
size_t *LZMA_RESTRICT in_pos, size_t in_size)
|
||||
lzma_decode(void *coder_ptr, lzma_dict *restrict dictptr,
|
||||
const uint8_t *restrict in,
|
||||
size_t *restrict in_pos, size_t in_size)
|
||||
{
|
||||
lzma_lzma1_decoder *restrict coder = coder_ptr;
|
||||
|
||||
////////////////////
|
||||
// Initialization //
|
||||
////////////////////
|
||||
|
||||
{
|
||||
const lzma_ret ret = rc_read_init(
|
||||
&coder->rc, in, in_pos, in_size);
|
||||
if (ret != LZMA_STREAM_END)
|
||||
return ret;
|
||||
}
|
||||
|
||||
///////////////
|
||||
// Variables //
|
||||
///////////////
|
||||
@@ -331,16 +350,6 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos)
|
||||
dict.limit = dict.pos + (size_t)(coder->uncompressed_size);
|
||||
|
||||
////////////////////
|
||||
// Initialization //
|
||||
////////////////////
|
||||
|
||||
if (!rc_read_init(&coder->rc, in, in_pos, in_size))
|
||||
return LZMA_OK;
|
||||
|
||||
rc = coder->rc;
|
||||
rc_in_pos = *in_pos;
|
||||
|
||||
// The main decoder loop. The "switch" is used to restart the decoder at
|
||||
// correct location. Once restarted, the "switch" is no longer used.
|
||||
switch (coder->sequence)
|
||||
@@ -356,21 +365,6 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
break;
|
||||
|
||||
rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) {
|
||||
static const lzma_lzma_state next_state[] = {
|
||||
STATE_LIT_LIT,
|
||||
STATE_LIT_LIT,
|
||||
STATE_LIT_LIT,
|
||||
STATE_LIT_LIT,
|
||||
STATE_MATCH_LIT_LIT,
|
||||
STATE_REP_LIT_LIT,
|
||||
STATE_SHORTREP_LIT_LIT,
|
||||
STATE_MATCH_LIT,
|
||||
STATE_REP_LIT,
|
||||
STATE_SHORTREP_LIT,
|
||||
STATE_MATCH_LIT,
|
||||
STATE_REP_LIT
|
||||
};
|
||||
|
||||
rc_update_0(coder->is_match[state][pos_state]);
|
||||
|
||||
// It's a literal i.e. a single 8-bit byte.
|
||||
@@ -388,21 +382,16 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
rc_bit(probs[symbol], , , SEQ_LITERAL);
|
||||
} while (symbol < (1 << 8));
|
||||
#else
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL0);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL1);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL2);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL3);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL4);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL5);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL6);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL7);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL0);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL1);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL2);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL3);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL4);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL5);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL6);
|
||||
rc_bit_case(probs[symbol], , , SEQ_LITERAL7);
|
||||
#endif
|
||||
} else {
|
||||
#ifndef HAVE_SMALL
|
||||
uint32_t match_bit;
|
||||
uint32_t subcoder_index;
|
||||
#endif
|
||||
|
||||
// Decode literal with match byte.
|
||||
//
|
||||
// We store the byte we compare against
|
||||
@@ -441,6 +430,8 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
} while (symbol < (1 << 8));
|
||||
#else
|
||||
// Unroll the loop.
|
||||
uint32_t match_bit;
|
||||
uint32_t subcoder_index;
|
||||
|
||||
# define d(seq) \
|
||||
case seq: \
|
||||
@@ -474,6 +465,20 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
// Use a lookup table to update to literal state,
|
||||
// since compared to other state updates, this would
|
||||
// need two branches.
|
||||
static const lzma_lzma_state next_state[] = {
|
||||
STATE_LIT_LIT,
|
||||
STATE_LIT_LIT,
|
||||
STATE_LIT_LIT,
|
||||
STATE_LIT_LIT,
|
||||
STATE_MATCH_LIT_LIT,
|
||||
STATE_REP_LIT_LIT,
|
||||
STATE_SHORTREP_LIT_LIT,
|
||||
STATE_MATCH_LIT,
|
||||
STATE_REP_LIT,
|
||||
STATE_SHORTREP_LIT,
|
||||
STATE_MATCH_LIT,
|
||||
STATE_REP_LIT
|
||||
};
|
||||
state = next_state[state];
|
||||
|
||||
case SEQ_LITERAL_WRITE:
|
||||
@@ -509,28 +514,28 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
|
||||
// Prepare to decode the highest two bits of the
|
||||
// match distance.
|
||||
probs = coder->pos_slot[get_len_to_pos_state(len)];
|
||||
probs = coder->dist_slot[get_dist_state(len)];
|
||||
symbol = 1;
|
||||
|
||||
#ifdef HAVE_SMALL
|
||||
case SEQ_POS_SLOT:
|
||||
case SEQ_DIST_SLOT:
|
||||
do {
|
||||
rc_bit(probs[symbol], , , SEQ_POS_SLOT);
|
||||
} while (symbol < POS_SLOTS);
|
||||
rc_bit(probs[symbol], , , SEQ_DIST_SLOT);
|
||||
} while (symbol < DIST_SLOTS);
|
||||
#else
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT0);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT1);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT2);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT3);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT4);
|
||||
rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT5);
|
||||
rc_bit_case(probs[symbol], , , SEQ_DIST_SLOT0);
|
||||
rc_bit_case(probs[symbol], , , SEQ_DIST_SLOT1);
|
||||
rc_bit_case(probs[symbol], , , SEQ_DIST_SLOT2);
|
||||
rc_bit_case(probs[symbol], , , SEQ_DIST_SLOT3);
|
||||
rc_bit_case(probs[symbol], , , SEQ_DIST_SLOT4);
|
||||
rc_bit_case(probs[symbol], , , SEQ_DIST_SLOT5);
|
||||
#endif
|
||||
// Get rid of the highest bit that was needed for
|
||||
// indexing of the probability array.
|
||||
symbol -= POS_SLOTS;
|
||||
symbol -= DIST_SLOTS;
|
||||
assert(symbol <= 63);
|
||||
|
||||
if (symbol < START_POS_MODEL_INDEX) {
|
||||
if (symbol < DIST_MODEL_START) {
|
||||
// Match distances [0, 3] have only two bits.
|
||||
rep0 = symbol;
|
||||
} else {
|
||||
@@ -540,7 +545,7 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
assert(limit >= 1 && limit <= 30);
|
||||
rep0 = 2 + (symbol & 1);
|
||||
|
||||
if (symbol < END_POS_MODEL_INDEX) {
|
||||
if (symbol < DIST_MODEL_END) {
|
||||
// Prepare to decode the low bits for
|
||||
// a distance of [4, 127].
|
||||
assert(limit <= 5);
|
||||
@@ -560,38 +565,38 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
- symbol - 1;
|
||||
symbol = 1;
|
||||
offset = 0;
|
||||
case SEQ_POS_MODEL:
|
||||
case SEQ_DIST_MODEL:
|
||||
#ifdef HAVE_SMALL
|
||||
do {
|
||||
rc_bit(probs[symbol], ,
|
||||
rep0 += 1 << offset,
|
||||
SEQ_POS_MODEL);
|
||||
SEQ_DIST_MODEL);
|
||||
} while (++offset < limit);
|
||||
#else
|
||||
switch (limit) {
|
||||
case 5:
|
||||
assert(offset == 0);
|
||||
rc_bit(probs[symbol], 0,
|
||||
rc_bit(probs[symbol], ,
|
||||
rep0 += 1,
|
||||
SEQ_POS_MODEL);
|
||||
SEQ_DIST_MODEL);
|
||||
++offset;
|
||||
--limit;
|
||||
case 4:
|
||||
rc_bit(probs[symbol], 0,
|
||||
rc_bit(probs[symbol], ,
|
||||
rep0 += 1 << offset,
|
||||
SEQ_POS_MODEL);
|
||||
SEQ_DIST_MODEL);
|
||||
++offset;
|
||||
--limit;
|
||||
case 3:
|
||||
rc_bit(probs[symbol], 0,
|
||||
rc_bit(probs[symbol], ,
|
||||
rep0 += 1 << offset,
|
||||
SEQ_POS_MODEL);
|
||||
SEQ_DIST_MODEL);
|
||||
++offset;
|
||||
--limit;
|
||||
case 2:
|
||||
rc_bit(probs[symbol], 0,
|
||||
rc_bit(probs[symbol], ,
|
||||
rep0 += 1 << offset,
|
||||
SEQ_POS_MODEL);
|
||||
SEQ_DIST_MODEL);
|
||||
++offset;
|
||||
--limit;
|
||||
case 1:
|
||||
@@ -601,9 +606,9 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
// rc_bit_last() here to omit
|
||||
// the unneeded updating of
|
||||
// "symbol".
|
||||
rc_bit_last(probs[symbol], 0,
|
||||
rc_bit_last(probs[symbol], ,
|
||||
rep0 += 1 << offset,
|
||||
SEQ_POS_MODEL);
|
||||
SEQ_DIST_MODEL);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
@@ -635,19 +640,19 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
} while (++offset < ALIGN_BITS);
|
||||
#else
|
||||
case SEQ_ALIGN0:
|
||||
rc_bit(coder->pos_align[symbol], 0,
|
||||
rc_bit(coder->pos_align[symbol], ,
|
||||
rep0 += 1, SEQ_ALIGN0);
|
||||
case SEQ_ALIGN1:
|
||||
rc_bit(coder->pos_align[symbol], 0,
|
||||
rc_bit(coder->pos_align[symbol], ,
|
||||
rep0 += 2, SEQ_ALIGN1);
|
||||
case SEQ_ALIGN2:
|
||||
rc_bit(coder->pos_align[symbol], 0,
|
||||
rc_bit(coder->pos_align[symbol], ,
|
||||
rep0 += 4, SEQ_ALIGN2);
|
||||
case SEQ_ALIGN3:
|
||||
// Like in SEQ_POS_MODEL, we don't
|
||||
// Like in SEQ_DIST_MODEL, we don't
|
||||
// need "symbol" for anything else
|
||||
// than indexing the probability array.
|
||||
rc_bit_last(coder->pos_align[symbol], 0,
|
||||
rc_bit_last(coder->pos_align[symbol], ,
|
||||
rep0 += 8, SEQ_ALIGN3);
|
||||
#endif
|
||||
|
||||
@@ -732,11 +737,9 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
// is stored to rep0 and rep1, rep2 and rep3
|
||||
// are updated accordingly.
|
||||
rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) {
|
||||
uint32_t distance;
|
||||
|
||||
rc_update_0(coder->is_rep1[state]);
|
||||
|
||||
distance = rep1;
|
||||
const uint32_t distance = rep1;
|
||||
rep1 = rep0;
|
||||
rep0 = distance;
|
||||
|
||||
@@ -745,23 +748,19 @@ lzma_decode(lzma_coder *LZMA_RESTRICT coder, lzma_dict *LZMA_RESTRICT dictptr,
|
||||
case SEQ_IS_REP2:
|
||||
rc_if_0(coder->is_rep2[state],
|
||||
SEQ_IS_REP2) {
|
||||
uint32_t distance;
|
||||
|
||||
rc_update_0(coder->is_rep2[
|
||||
state]);
|
||||
|
||||
distance = rep2;
|
||||
const uint32_t distance = rep2;
|
||||
rep2 = rep1;
|
||||
rep1 = rep0;
|
||||
rep0 = distance;
|
||||
|
||||
} else {
|
||||
uint32_t distance;
|
||||
|
||||
rc_update_1(coder->is_rep2[
|
||||
state]);
|
||||
|
||||
distance = rep3;
|
||||
const uint32_t distance = rep3;
|
||||
rep3 = rep2;
|
||||
rep2 = rep1;
|
||||
rep1 = rep0;
|
||||
@@ -849,26 +848,17 @@ out:
|
||||
|
||||
|
||||
static void
|
||||
lzma_decoder_uncompressed(lzma_coder *coder, lzma_vli uncompressed_size)
|
||||
lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
|
||||
{
|
||||
lzma_lzma1_decoder *coder = coder_ptr;
|
||||
coder->uncompressed_size = uncompressed_size;
|
||||
}
|
||||
|
||||
/*
|
||||
extern void
|
||||
lzma_lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
|
||||
{
|
||||
// This is hack.
|
||||
(*(lzma_coder **)(coder))->uncompressed_size = uncompressed_size;
|
||||
}
|
||||
*/
|
||||
|
||||
static void
|
||||
lzma_decoder_reset(lzma_coder *coder, const void *opt)
|
||||
lzma_decoder_reset(void *coder_ptr, const void *opt)
|
||||
{
|
||||
uint32_t i, j, pos_state;
|
||||
uint32_t num_pos_states;
|
||||
|
||||
lzma_lzma1_decoder *coder = coder_ptr;
|
||||
const lzma_options_lzma *options = opt;
|
||||
|
||||
// NOTE: We assume that lc/lp/pb are valid since they were
|
||||
@@ -895,8 +885,8 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
|
||||
rc_reset(coder->rc);
|
||||
|
||||
// Bit and bittree decoders
|
||||
for (i = 0; i < STATES; ++i) {
|
||||
for (j = 0; j <= coder->pos_mask; ++j) {
|
||||
for (uint32_t i = 0; i < STATES; ++i) {
|
||||
for (uint32_t j = 0; j <= coder->pos_mask; ++j) {
|
||||
bit_reset(coder->is_match[i][j]);
|
||||
bit_reset(coder->is_rep0_long[i][j]);
|
||||
}
|
||||
@@ -907,22 +897,22 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
|
||||
bit_reset(coder->is_rep2[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < LEN_TO_POS_STATES; ++i)
|
||||
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
|
||||
for (uint32_t i = 0; i < DIST_STATES; ++i)
|
||||
bittree_reset(coder->dist_slot[i], DIST_SLOT_BITS);
|
||||
|
||||
for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
|
||||
for (uint32_t i = 0; i < FULL_DISTANCES - DIST_MODEL_END; ++i)
|
||||
bit_reset(coder->pos_special[i]);
|
||||
|
||||
bittree_reset(coder->pos_align, ALIGN_BITS);
|
||||
|
||||
// Len decoders (also bit/bittree)
|
||||
num_pos_states = 1U << options->pb;
|
||||
const uint32_t num_pos_states = 1U << options->pb;
|
||||
bit_reset(coder->match_len_decoder.choice);
|
||||
bit_reset(coder->match_len_decoder.choice2);
|
||||
bit_reset(coder->rep_len_decoder.choice);
|
||||
bit_reset(coder->rep_len_decoder.choice2);
|
||||
|
||||
for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
|
||||
for (uint32_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
|
||||
bittree_reset(coder->match_len_decoder.low[pos_state],
|
||||
LEN_LOW_BITS);
|
||||
bittree_reset(coder->match_len_decoder.mid[pos_state],
|
||||
@@ -949,13 +939,11 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
lzma_lzma_decoder_create(lzma_lz_decoder *lz, const lzma_allocator *allocator,
|
||||
const void *opt, lzma_lz_options *lz_options)
|
||||
{
|
||||
const lzma_options_lzma *options = opt;
|
||||
|
||||
if (lz->coder == NULL) {
|
||||
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
lz->coder = lzma_alloc(sizeof(lzma_lzma1_decoder), allocator);
|
||||
if (lz->coder == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
@@ -966,6 +954,7 @@ lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
|
||||
// All dictionary sizes are OK here. LZ decoder will take care of
|
||||
// the special cases.
|
||||
const lzma_options_lzma *options = opt;
|
||||
lz_options->dict_size = options->dict_size;
|
||||
lz_options->preset_dict = options->preset_dict;
|
||||
lz_options->preset_dict_size = options->preset_dict_size;
|
||||
@@ -978,7 +967,7 @@ lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
/// initialization (lzma_lzma_decoder_init() passes function pointer to
|
||||
/// the LZ initialization).
|
||||
static lzma_ret
|
||||
lzma_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
lzma_decoder_init(lzma_lz_decoder *lz, const lzma_allocator *allocator,
|
||||
const void *options, lzma_lz_options *lz_options)
|
||||
{
|
||||
if (!is_lclppb_valid(options))
|
||||
@@ -995,7 +984,7 @@ lzma_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_lzma_decoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
// LZMA can only be the last filter in the chain. This is enforced
|
||||
@@ -1027,7 +1016,8 @@ extern uint64_t
|
||||
lzma_lzma_decoder_memusage_nocheck(const void *options)
|
||||
{
|
||||
const lzma_options_lzma *const opt = options;
|
||||
return sizeof(lzma_coder) + lzma_lz_decoder_memusage(opt->dict_size);
|
||||
return sizeof(lzma_lzma1_decoder)
|
||||
+ lzma_lz_decoder_memusage(opt->dict_size);
|
||||
}
|
||||
|
||||
|
||||
@@ -1042,15 +1032,14 @@ lzma_lzma_decoder_memusage(const void *options)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_props_decode(void **options, lzma_allocator *allocator,
|
||||
lzma_lzma_props_decode(void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size)
|
||||
{
|
||||
lzma_options_lzma *opt;
|
||||
|
||||
if (props_size != 5)
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
|
||||
lzma_options_lzma *opt
|
||||
= lzma_alloc(sizeof(lzma_options_lzma), allocator);
|
||||
if (opt == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
|
||||
|
||||
@@ -19,12 +19,13 @@
|
||||
|
||||
/// Allocates and initializes LZMA decoder
|
||||
extern lzma_ret lzma_lzma_decoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters);
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
extern uint64_t lzma_lzma_decoder_memusage(const void *options);
|
||||
|
||||
extern lzma_ret lzma_lzma_props_decode(
|
||||
void **options, lzma_allocator *allocator,
|
||||
void **options, const lzma_allocator *allocator,
|
||||
const uint8_t *props, size_t props_size);
|
||||
|
||||
|
||||
@@ -40,7 +41,7 @@ extern bool lzma_lzma_lclppb_decode(
|
||||
/// Allocate and setup function pointers only. This is used by LZMA1 and
|
||||
/// LZMA2 decoders.
|
||||
extern lzma_ret lzma_lzma_decoder_create(
|
||||
lzma_lz_decoder *lz, lzma_allocator *allocator,
|
||||
lzma_lz_decoder *lz, const lzma_allocator *allocator,
|
||||
const void *opt, lzma_lz_options *lz_options);
|
||||
|
||||
/// Gets memory usage without validating lc/lp/pb. This is used by LZMA2
|
||||
|
||||
@@ -28,14 +28,11 @@ literal_matched(lzma_range_encoder *rc, probability *subcoder,
|
||||
symbol += UINT32_C(1) << 8;
|
||||
|
||||
do {
|
||||
uint32_t match_bit;
|
||||
uint32_t subcoder_index;
|
||||
uint32_t bit;
|
||||
|
||||
match_byte <<= 1;
|
||||
match_bit = match_byte & offset;
|
||||
subcoder_index = offset + match_bit + (symbol >> 8);
|
||||
bit = (symbol >> 7) & 1;
|
||||
const uint32_t match_bit = match_byte & offset;
|
||||
const uint32_t subcoder_index
|
||||
= offset + match_bit + (symbol >> 8);
|
||||
const uint32_t bit = (symbol >> 7) & 1;
|
||||
rc_bit(rc, &subcoder[subcoder_index], bit);
|
||||
|
||||
symbol <<= 1;
|
||||
@@ -46,7 +43,7 @@ literal_matched(lzma_range_encoder *rc, probability *subcoder,
|
||||
|
||||
|
||||
static inline void
|
||||
literal(lzma_coder *coder, lzma_mf *mf, uint32_t position)
|
||||
literal(lzma_lzma1_encoder *coder, lzma_mf *mf, uint32_t position)
|
||||
{
|
||||
// Locate the literal byte to be encoded and the subcoder.
|
||||
const uint8_t cur_byte = mf->buffer[
|
||||
@@ -80,19 +77,16 @@ literal(lzma_coder *coder, lzma_mf *mf, uint32_t position)
|
||||
static void
|
||||
length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state)
|
||||
{
|
||||
uint32_t a0, a1, b0, b1;
|
||||
uint32_t *prices;
|
||||
uint32_t i;
|
||||
|
||||
const uint32_t table_size = lc->table_size;
|
||||
lc->counters[pos_state] = table_size;
|
||||
|
||||
a0 = rc_bit_0_price(lc->choice);
|
||||
a1 = rc_bit_1_price(lc->choice);
|
||||
b0 = a1 + rc_bit_0_price(lc->choice2);
|
||||
b1 = a1 + rc_bit_1_price(lc->choice2);
|
||||
prices = lc->prices[pos_state];
|
||||
const uint32_t a0 = rc_bit_0_price(lc->choice);
|
||||
const uint32_t a1 = rc_bit_1_price(lc->choice);
|
||||
const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2);
|
||||
const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2);
|
||||
uint32_t *const prices = lc->prices[pos_state];
|
||||
|
||||
uint32_t i;
|
||||
for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i)
|
||||
prices[i] = a0 + rc_bittree_price(lc->low[pos_state],
|
||||
LEN_LOW_BITS, i);
|
||||
@@ -146,39 +140,36 @@ length(lzma_range_encoder *rc, lzma_length_encoder *lc,
|
||||
///////////
|
||||
|
||||
static inline void
|
||||
match(lzma_coder *coder, const uint32_t pos_state,
|
||||
match(lzma_lzma1_encoder *coder, const uint32_t pos_state,
|
||||
const uint32_t distance, const uint32_t len)
|
||||
{
|
||||
uint32_t pos_slot;
|
||||
uint32_t len_to_pos_state;
|
||||
|
||||
update_match(coder->state);
|
||||
|
||||
length(&coder->rc, &coder->match_len_encoder, pos_state, len,
|
||||
coder->fast_mode);
|
||||
|
||||
pos_slot = get_pos_slot(distance);
|
||||
len_to_pos_state = get_len_to_pos_state(len);
|
||||
rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state],
|
||||
POS_SLOT_BITS, pos_slot);
|
||||
const uint32_t dist_slot = get_dist_slot(distance);
|
||||
const uint32_t dist_state = get_dist_state(len);
|
||||
rc_bittree(&coder->rc, coder->dist_slot[dist_state],
|
||||
DIST_SLOT_BITS, dist_slot);
|
||||
|
||||
if (pos_slot >= START_POS_MODEL_INDEX) {
|
||||
const uint32_t footer_bits = (pos_slot >> 1) - 1;
|
||||
const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
|
||||
const uint32_t pos_reduced = distance - base;
|
||||
if (dist_slot >= DIST_MODEL_START) {
|
||||
const uint32_t footer_bits = (dist_slot >> 1) - 1;
|
||||
const uint32_t base = (2 | (dist_slot & 1)) << footer_bits;
|
||||
const uint32_t dist_reduced = distance - base;
|
||||
|
||||
if (pos_slot < END_POS_MODEL_INDEX) {
|
||||
// Careful here: base - pos_slot - 1 can be -1, but
|
||||
if (dist_slot < DIST_MODEL_END) {
|
||||
// Careful here: base - dist_slot - 1 can be -1, but
|
||||
// rc_bittree_reverse starts at probs[1], not probs[0].
|
||||
rc_bittree_reverse(&coder->rc,
|
||||
coder->pos_special + base - pos_slot - 1,
|
||||
footer_bits, pos_reduced);
|
||||
coder->dist_special + base - dist_slot - 1,
|
||||
footer_bits, dist_reduced);
|
||||
} else {
|
||||
rc_direct(&coder->rc, pos_reduced >> ALIGN_BITS,
|
||||
rc_direct(&coder->rc, dist_reduced >> ALIGN_BITS,
|
||||
footer_bits - ALIGN_BITS);
|
||||
rc_bittree_reverse(
|
||||
&coder->rc, coder->pos_align,
|
||||
ALIGN_BITS, pos_reduced & ALIGN_MASK);
|
||||
&coder->rc, coder->dist_align,
|
||||
ALIGN_BITS, dist_reduced & ALIGN_MASK);
|
||||
++coder->align_price_count;
|
||||
}
|
||||
}
|
||||
@@ -196,7 +187,7 @@ match(lzma_coder *coder, const uint32_t pos_state,
|
||||
////////////////////
|
||||
|
||||
static inline void
|
||||
rep_match(lzma_coder *coder, const uint32_t pos_state,
|
||||
rep_match(lzma_lzma1_encoder *coder, const uint32_t pos_state,
|
||||
const uint32_t rep, const uint32_t len)
|
||||
{
|
||||
if (rep == 0) {
|
||||
@@ -240,7 +231,7 @@ rep_match(lzma_coder *coder, const uint32_t pos_state,
|
||||
//////////
|
||||
|
||||
static void
|
||||
encode_symbol(lzma_coder *coder, lzma_mf *mf,
|
||||
encode_symbol(lzma_lzma1_encoder *coder, lzma_mf *mf,
|
||||
uint32_t back, uint32_t len, uint32_t position)
|
||||
{
|
||||
const uint32_t pos_state = position & coder->pos_mask;
|
||||
@@ -256,7 +247,7 @@ encode_symbol(lzma_coder *coder, lzma_mf *mf,
|
||||
rc_bit(&coder->rc,
|
||||
&coder->is_match[coder->state][pos_state], 1);
|
||||
|
||||
if (back < REP_DISTANCES) {
|
||||
if (back < REPS) {
|
||||
// It's a repeated match i.e. the same distance
|
||||
// has been used earlier.
|
||||
rc_bit(&coder->rc, &coder->is_rep[coder->state], 1);
|
||||
@@ -264,7 +255,7 @@ encode_symbol(lzma_coder *coder, lzma_mf *mf,
|
||||
} else {
|
||||
// Normal match
|
||||
rc_bit(&coder->rc, &coder->is_rep[coder->state], 0);
|
||||
match(coder, pos_state, back - REP_DISTANCES, len);
|
||||
match(coder, pos_state, back - REPS, len);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,7 +265,7 @@ encode_symbol(lzma_coder *coder, lzma_mf *mf,
|
||||
|
||||
|
||||
static bool
|
||||
encode_init(lzma_coder *coder, lzma_mf *mf)
|
||||
encode_init(lzma_lzma1_encoder *coder, lzma_mf *mf)
|
||||
{
|
||||
assert(mf_position(mf) == 0);
|
||||
|
||||
@@ -302,7 +293,7 @@ encode_init(lzma_coder *coder, lzma_mf *mf)
|
||||
|
||||
|
||||
static void
|
||||
encode_eopm(lzma_coder *coder, uint32_t position)
|
||||
encode_eopm(lzma_lzma1_encoder *coder, uint32_t position)
|
||||
{
|
||||
const uint32_t pos_state = position & coder->pos_mask;
|
||||
rc_bit(&coder->rc, &coder->is_match[coder->state][pos_state], 1);
|
||||
@@ -318,23 +309,18 @@ encode_eopm(lzma_coder *coder, uint32_t position)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint8_t *LZMA_RESTRICT out, size_t *LZMA_RESTRICT out_pos,
|
||||
lzma_lzma_encode(lzma_lzma1_encoder *restrict coder, lzma_mf *restrict mf,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size, uint32_t limit)
|
||||
{
|
||||
uint32_t position;
|
||||
|
||||
// Initialize the stream if no data has been encoded yet.
|
||||
if (!coder->is_initialized && !encode_init(coder, mf))
|
||||
return LZMA_OK;
|
||||
|
||||
// Get the lowest bits of the uncompressed offset from the LZ layer.
|
||||
position = mf_position(mf);
|
||||
uint32_t position = mf_position(mf);
|
||||
|
||||
while (true) {
|
||||
uint32_t len;
|
||||
uint32_t back;
|
||||
|
||||
// Encode pending bits, if any. Calling this before encoding
|
||||
// the next symbol is needed only with plain LZMA, since
|
||||
// LZMA2 always provides big enough buffer to flush
|
||||
@@ -367,12 +353,14 @@ lzma_lzma_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
// Get optimal match (repeat position and length).
|
||||
// Value ranges for pos:
|
||||
// - [0, REP_DISTANCES): repeated match
|
||||
// - [REP_DISTANCES, UINT32_MAX):
|
||||
// match at (pos - REP_DISTANCES)
|
||||
// - [0, REPS): repeated match
|
||||
// - [REPS, UINT32_MAX):
|
||||
// match at (pos - REPS)
|
||||
// - UINT32_MAX: not a match but a literal
|
||||
// Value ranges for len:
|
||||
// - [MATCH_LEN_MIN, MATCH_LEN_MAX]
|
||||
uint32_t len;
|
||||
uint32_t back;
|
||||
|
||||
if (coder->fast_mode)
|
||||
lzma_lzma_optimum_fast(coder, mf, &back, &len);
|
||||
@@ -414,8 +402,8 @@ lzma_lzma_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma_encode(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint8_t *LZMA_RESTRICT out, size_t *LZMA_RESTRICT out_pos,
|
||||
lzma_encode(void *coder, lzma_mf *restrict mf,
|
||||
uint8_t *restrict out, size_t *restrict out_pos,
|
||||
size_t out_size)
|
||||
{
|
||||
// Plain LZMA has no support for sync-flushing.
|
||||
@@ -465,12 +453,10 @@ static void
|
||||
length_encoder_reset(lzma_length_encoder *lencoder,
|
||||
const uint32_t num_pos_states, const bool fast_mode)
|
||||
{
|
||||
size_t pos_state;
|
||||
|
||||
bit_reset(lencoder->choice);
|
||||
bit_reset(lencoder->choice2);
|
||||
|
||||
for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
|
||||
for (size_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
|
||||
bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS);
|
||||
bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS);
|
||||
}
|
||||
@@ -478,7 +464,7 @@ length_encoder_reset(lzma_length_encoder *lencoder,
|
||||
bittree_reset(lencoder->high, LEN_HIGH_BITS);
|
||||
|
||||
if (!fast_mode)
|
||||
for (pos_state = 0; pos_state < num_pos_states;
|
||||
for (uint32_t pos_state = 0; pos_state < num_pos_states;
|
||||
++pos_state)
|
||||
length_update_prices(lencoder, pos_state);
|
||||
|
||||
@@ -487,10 +473,9 @@ length_encoder_reset(lzma_length_encoder *lencoder,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
|
||||
lzma_lzma_encoder_reset(lzma_lzma1_encoder *coder,
|
||||
const lzma_options_lzma *options)
|
||||
{
|
||||
size_t i, j;
|
||||
|
||||
if (!is_options_valid(options))
|
||||
return LZMA_OPTIONS_ERROR;
|
||||
|
||||
@@ -503,14 +488,14 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
|
||||
|
||||
// State
|
||||
coder->state = STATE_LIT_LIT;
|
||||
for (i = 0; i < REP_DISTANCES; ++i)
|
||||
for (size_t i = 0; i < REPS; ++i)
|
||||
coder->reps[i] = 0;
|
||||
|
||||
literal_init(coder->literal, options->lc, options->lp);
|
||||
|
||||
// Bit encoders
|
||||
for (i = 0; i < STATES; ++i) {
|
||||
for (j = 0; j <= coder->pos_mask; ++j) {
|
||||
for (size_t i = 0; i < STATES; ++i) {
|
||||
for (size_t j = 0; j <= coder->pos_mask; ++j) {
|
||||
bit_reset(coder->is_match[i][j]);
|
||||
bit_reset(coder->is_rep0_long[i][j]);
|
||||
}
|
||||
@@ -521,14 +506,14 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
|
||||
bit_reset(coder->is_rep2[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
|
||||
bit_reset(coder->pos_special[i]);
|
||||
for (size_t i = 0; i < FULL_DISTANCES - DIST_MODEL_END; ++i)
|
||||
bit_reset(coder->dist_special[i]);
|
||||
|
||||
// Bit tree encoders
|
||||
for (i = 0; i < LEN_TO_POS_STATES; ++i)
|
||||
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
|
||||
for (size_t i = 0; i < DIST_STATES; ++i)
|
||||
bittree_reset(coder->dist_slot[i], DIST_SLOT_BITS);
|
||||
|
||||
bittree_reset(coder->pos_align, ALIGN_BITS);
|
||||
bittree_reset(coder->dist_align, ALIGN_BITS);
|
||||
|
||||
// Length encoders
|
||||
length_encoder_reset(&coder->match_len_encoder,
|
||||
@@ -561,20 +546,18 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
|
||||
lzma_lzma_encoder_create(void **coder_ptr,
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_options_lzma *options, lzma_lz_options *lz_options)
|
||||
{
|
||||
lzma_coder *coder;
|
||||
uint32_t log_size = 0;
|
||||
|
||||
// Allocate lzma_coder if it wasn't already allocated.
|
||||
// Allocate lzma_lzma1_encoder if it wasn't already allocated.
|
||||
if (*coder_ptr == NULL) {
|
||||
*coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator);
|
||||
*coder_ptr = lzma_alloc(sizeof(lzma_lzma1_encoder), allocator);
|
||||
if (*coder_ptr == NULL)
|
||||
return LZMA_MEM_ERROR;
|
||||
}
|
||||
|
||||
coder = *coder_ptr;
|
||||
lzma_lzma1_encoder *coder = *coder_ptr;
|
||||
|
||||
// Set compression mode. We haven't validates the options yet,
|
||||
// but it's OK here, since nothing bad happens with invalid
|
||||
@@ -590,6 +573,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
|
||||
|
||||
// Set dist_table_size.
|
||||
// Round the dictionary size up to next 2^n.
|
||||
uint32_t log_size = 0;
|
||||
while ((UINT32_C(1) << log_size) < options->dict_size)
|
||||
++log_size;
|
||||
|
||||
@@ -622,7 +606,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
|
||||
|
||||
|
||||
static lzma_ret
|
||||
lzma_encoder_init(lzma_lz_encoder *lz, lzma_allocator *allocator,
|
||||
lzma_encoder_init(lzma_lz_encoder *lz, const lzma_allocator *allocator,
|
||||
const void *options, lzma_lz_options *lz_options)
|
||||
{
|
||||
lz->code = &lzma_encode;
|
||||
@@ -632,7 +616,7 @@ lzma_encoder_init(lzma_lz_encoder *lz, lzma_allocator *allocator,
|
||||
|
||||
|
||||
extern lzma_ret
|
||||
lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
lzma_lzma_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters)
|
||||
{
|
||||
return lzma_lz_encoder_init(
|
||||
@@ -643,19 +627,17 @@ lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
|
||||
extern uint64_t
|
||||
lzma_lzma_encoder_memusage(const void *options)
|
||||
{
|
||||
lzma_lz_options lz_options;
|
||||
uint64_t lz_memusage;
|
||||
|
||||
if (!is_options_valid(options))
|
||||
return UINT64_MAX;
|
||||
|
||||
lzma_lz_options lz_options;
|
||||
set_lz_options(&lz_options, options);
|
||||
|
||||
lz_memusage = lzma_lz_encoder_memusage(&lz_options);
|
||||
const uint64_t lz_memusage = lzma_lz_encoder_memusage(&lz_options);
|
||||
if (lz_memusage == UINT64_MAX)
|
||||
return UINT64_MAX;
|
||||
|
||||
return (uint64_t)(sizeof(lzma_coder)) + lz_memusage;
|
||||
return (uint64_t)(sizeof(lzma_lzma1_encoder)) + lz_memusage;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -17,8 +17,12 @@
|
||||
#include "common.h"
|
||||
|
||||
|
||||
typedef struct lzma_lzma1_encoder_s lzma_lzma1_encoder;
|
||||
|
||||
|
||||
extern lzma_ret lzma_lzma_encoder_init(lzma_next_coder *next,
|
||||
lzma_allocator *allocator, const lzma_filter_info *filters);
|
||||
const lzma_allocator *allocator,
|
||||
const lzma_filter_info *filters);
|
||||
|
||||
|
||||
extern uint64_t lzma_lzma_encoder_memusage(const void *options);
|
||||
@@ -35,18 +39,18 @@ extern bool lzma_lzma_lclppb_encode(
|
||||
|
||||
/// Initializes raw LZMA encoder; this is used by LZMA2.
|
||||
extern lzma_ret lzma_lzma_encoder_create(
|
||||
lzma_coder **coder_ptr, lzma_allocator *allocator,
|
||||
void **coder_ptr, const lzma_allocator *allocator,
|
||||
const lzma_options_lzma *options, lzma_lz_options *lz_options);
|
||||
|
||||
|
||||
/// Resets an already initialized LZMA encoder; this is used by LZMA2.
|
||||
extern lzma_ret lzma_lzma_encoder_reset(
|
||||
lzma_coder *coder, const lzma_options_lzma *options);
|
||||
lzma_lzma1_encoder *coder, const lzma_options_lzma *options);
|
||||
|
||||
|
||||
extern lzma_ret lzma_lzma_encode(lzma_coder *LZMA_RESTRICT coder,
|
||||
lzma_mf *LZMA_RESTRICT mf, uint8_t *LZMA_RESTRICT out,
|
||||
size_t *LZMA_RESTRICT out_pos, size_t out_size,
|
||||
extern lzma_ret lzma_lzma_encode(lzma_lzma1_encoder *restrict coder,
|
||||
lzma_mf *restrict mf, uint8_t *restrict out,
|
||||
size_t *restrict out_pos, size_t out_size,
|
||||
uint32_t read_limit);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "lzma_encoder_private.h"
|
||||
#include "memcmplen.h"
|
||||
|
||||
|
||||
#define change_pair(small_dist, big_dist) \
|
||||
@@ -17,17 +18,10 @@
|
||||
|
||||
|
||||
extern void
|
||||
lzma_lzma_optimum_fast(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint32_t *LZMA_RESTRICT back_res, uint32_t *LZMA_RESTRICT len_res)
|
||||
lzma_lzma_optimum_fast(lzma_lzma1_encoder *restrict coder,
|
||||
lzma_mf *restrict mf,
|
||||
uint32_t *restrict back_res, uint32_t *restrict len_res)
|
||||
{
|
||||
const uint8_t *buf;
|
||||
uint32_t buf_avail;
|
||||
uint32_t i;
|
||||
uint32_t rep_len = 0;
|
||||
uint32_t rep_index = 0;
|
||||
uint32_t back_main = 0;
|
||||
uint32_t limit;
|
||||
|
||||
const uint32_t nice_len = mf->nice_len;
|
||||
|
||||
uint32_t len_main;
|
||||
@@ -40,8 +34,8 @@ lzma_lzma_optimum_fast(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT m
|
||||
matches_count = coder->matches_count;
|
||||
}
|
||||
|
||||
buf = mf_ptr(mf) - 1;
|
||||
buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
|
||||
const uint8_t *buf = mf_ptr(mf) - 1;
|
||||
const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
|
||||
|
||||
if (buf_avail < 2) {
|
||||
// There's not enough input left to encode a match.
|
||||
@@ -51,9 +45,10 @@ lzma_lzma_optimum_fast(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT m
|
||||
}
|
||||
|
||||
// Look for repeated matches; scan the previous four match distances
|
||||
for (i = 0; i < REP_DISTANCES; ++i) {
|
||||
uint32_t len;
|
||||
uint32_t rep_len = 0;
|
||||
uint32_t rep_index = 0;
|
||||
|
||||
for (uint32_t i = 0; i < REPS; ++i) {
|
||||
// Pointer to the beginning of the match candidate
|
||||
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
|
||||
|
||||
@@ -64,8 +59,8 @@ lzma_lzma_optimum_fast(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT m
|
||||
|
||||
// The first two bytes matched.
|
||||
// Calculate the length of the match.
|
||||
for (len = 2; len < buf_avail
|
||||
&& buf[len] == buf_back[len]; ++len) ;
|
||||
const uint32_t len = lzma_memcmplen(
|
||||
buf, buf_back, 2, buf_avail);
|
||||
|
||||
// If we have found a repeated match that is at least
|
||||
// nice_len long, return it immediately.
|
||||
@@ -85,13 +80,13 @@ lzma_lzma_optimum_fast(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT m
|
||||
// We didn't find a long enough repeated match. Encode it as a normal
|
||||
// match if the match length is at least nice_len.
|
||||
if (len_main >= nice_len) {
|
||||
*back_res = coder->matches[matches_count - 1].dist
|
||||
+ REP_DISTANCES;
|
||||
*back_res = coder->matches[matches_count - 1].dist + REPS;
|
||||
*len_res = len_main;
|
||||
mf_skip(mf, len_main - 1);
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t back_main = 0;
|
||||
if (len_main >= 2) {
|
||||
back_main = coder->matches[matches_count - 1].dist;
|
||||
|
||||
@@ -158,27 +153,17 @@ lzma_lzma_optimum_fast(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT m
|
||||
// the old buf pointer instead of recalculating it with mf_ptr().
|
||||
++buf;
|
||||
|
||||
limit = len_main - 1;
|
||||
const uint32_t limit = my_max(2, len_main - 1);
|
||||
|
||||
for (i = 0; i < REP_DISTANCES; ++i) {
|
||||
uint32_t len;
|
||||
|
||||
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
|
||||
|
||||
if (not_equal_16(buf, buf_back))
|
||||
continue;
|
||||
|
||||
for (len = 2; len < limit
|
||||
&& buf[len] == buf_back[len]; ++len) ;
|
||||
|
||||
if (len >= limit) {
|
||||
for (uint32_t i = 0; i < REPS; ++i) {
|
||||
if (memcmp(buf, buf - coder->reps[i] - 1, limit) == 0) {
|
||||
*back_res = UINT32_MAX;
|
||||
*len_res = 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
*back_res = back_main + REP_DISTANCES;
|
||||
*back_res = back_main + REPS;
|
||||
*len_res = len_main;
|
||||
mf_skip(mf, len_main - 2);
|
||||
return;
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
#include "lzma_encoder_private.h"
|
||||
#include "fastpos.h"
|
||||
#include "memcmplen.h"
|
||||
|
||||
|
||||
////////////
|
||||
@@ -18,7 +19,7 @@
|
||||
////////////
|
||||
|
||||
static uint32_t
|
||||
get_literal_price(const lzma_coder *const coder, const uint32_t pos,
|
||||
get_literal_price(const lzma_lzma1_encoder *const coder, const uint32_t pos,
|
||||
const uint32_t prev_byte, const bool match_mode,
|
||||
uint32_t match_byte, uint32_t symbol)
|
||||
{
|
||||
@@ -35,15 +36,12 @@ get_literal_price(const lzma_coder *const coder, const uint32_t pos,
|
||||
symbol += UINT32_C(1) << 8;
|
||||
|
||||
do {
|
||||
uint32_t match_bit;
|
||||
uint32_t subcoder_index;
|
||||
uint32_t bit;
|
||||
|
||||
match_byte <<= 1;
|
||||
|
||||
match_bit = match_byte & offset;
|
||||
subcoder_index = offset + match_bit + (symbol >> 8);
|
||||
bit = (symbol >> 7) & 1;
|
||||
const uint32_t match_bit = match_byte & offset;
|
||||
const uint32_t subcoder_index
|
||||
= offset + match_bit + (symbol >> 8);
|
||||
const uint32_t bit = (symbol >> 7) & 1;
|
||||
price += rc_bit_price(subcoder[subcoder_index], bit);
|
||||
|
||||
symbol <<= 1;
|
||||
@@ -67,7 +65,7 @@ get_len_price(const lzma_length_encoder *const lencoder,
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
get_short_rep_price(const lzma_coder *const coder,
|
||||
get_short_rep_price(const lzma_lzma1_encoder *const coder,
|
||||
const lzma_lzma_state state, const uint32_t pos_state)
|
||||
{
|
||||
return rc_bit_0_price(coder->is_rep0[state])
|
||||
@@ -76,7 +74,7 @@ get_short_rep_price(const lzma_coder *const coder,
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
get_pure_rep_price(const lzma_coder *const coder, const uint32_t rep_index,
|
||||
get_pure_rep_price(const lzma_lzma1_encoder *const coder, const uint32_t rep_index,
|
||||
const lzma_lzma_state state, uint32_t pos_state)
|
||||
{
|
||||
uint32_t price;
|
||||
@@ -101,7 +99,7 @@ get_pure_rep_price(const lzma_coder *const coder, const uint32_t rep_index,
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
get_rep_price(const lzma_coder *const coder, const uint32_t rep_index,
|
||||
get_rep_price(const lzma_lzma1_encoder *const coder, const uint32_t rep_index,
|
||||
const uint32_t len, const lzma_lzma_state state,
|
||||
const uint32_t pos_state)
|
||||
{
|
||||
@@ -111,18 +109,18 @@ get_rep_price(const lzma_coder *const coder, const uint32_t rep_index,
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
get_pos_len_price(const lzma_coder *const coder, const uint32_t pos,
|
||||
get_dist_len_price(const lzma_lzma1_encoder *const coder, const uint32_t dist,
|
||||
const uint32_t len, const uint32_t pos_state)
|
||||
{
|
||||
const uint32_t len_to_pos_state = get_len_to_pos_state(len);
|
||||
const uint32_t dist_state = get_dist_state(len);
|
||||
uint32_t price;
|
||||
|
||||
if (pos < FULL_DISTANCES) {
|
||||
price = coder->distances_prices[len_to_pos_state][pos];
|
||||
if (dist < FULL_DISTANCES) {
|
||||
price = coder->dist_prices[dist_state][dist];
|
||||
} else {
|
||||
const uint32_t pos_slot = get_pos_slot_2(pos);
|
||||
price = coder->pos_slot_prices[len_to_pos_state][pos_slot]
|
||||
+ coder->align_prices[pos & ALIGN_MASK];
|
||||
const uint32_t dist_slot = get_dist_slot_2(dist);
|
||||
price = coder->dist_slot_prices[dist_state][dist_slot]
|
||||
+ coder->align_prices[dist & ALIGN_MASK];
|
||||
}
|
||||
|
||||
price += get_len_price(&coder->match_len_encoder, len, pos_state);
|
||||
@@ -132,59 +130,53 @@ get_pos_len_price(const lzma_coder *const coder, const uint32_t pos,
|
||||
|
||||
|
||||
static void
|
||||
fill_distances_prices(lzma_coder *coder)
|
||||
fill_dist_prices(lzma_lzma1_encoder *coder)
|
||||
{
|
||||
uint32_t len_to_pos_state;
|
||||
uint32_t pos_slot;
|
||||
uint32_t i;
|
||||
for (uint32_t dist_state = 0; dist_state < DIST_STATES; ++dist_state) {
|
||||
|
||||
for (len_to_pos_state = 0;
|
||||
len_to_pos_state < LEN_TO_POS_STATES;
|
||||
++len_to_pos_state) {
|
||||
uint32_t *const dist_slot_prices
|
||||
= coder->dist_slot_prices[dist_state];
|
||||
|
||||
uint32_t *const pos_slot_prices
|
||||
= coder->pos_slot_prices[len_to_pos_state];
|
||||
|
||||
// Price to encode the pos_slot.
|
||||
for (pos_slot = 0;
|
||||
pos_slot < coder->dist_table_size; ++pos_slot)
|
||||
pos_slot_prices[pos_slot] = rc_bittree_price(
|
||||
coder->pos_slot[len_to_pos_state],
|
||||
POS_SLOT_BITS, pos_slot);
|
||||
// Price to encode the dist_slot.
|
||||
for (uint32_t dist_slot = 0;
|
||||
dist_slot < coder->dist_table_size; ++dist_slot)
|
||||
dist_slot_prices[dist_slot] = rc_bittree_price(
|
||||
coder->dist_slot[dist_state],
|
||||
DIST_SLOT_BITS, dist_slot);
|
||||
|
||||
// For matches with distance >= FULL_DISTANCES, add the price
|
||||
// of the direct bits part of the match distance. (Align bits
|
||||
// are handled by fill_align_prices()).
|
||||
for (pos_slot = END_POS_MODEL_INDEX;
|
||||
pos_slot < coder->dist_table_size; ++pos_slot)
|
||||
pos_slot_prices[pos_slot] += rc_direct_price(
|
||||
((pos_slot >> 1) - 1) - ALIGN_BITS);
|
||||
for (uint32_t dist_slot = DIST_MODEL_END;
|
||||
dist_slot < coder->dist_table_size;
|
||||
++dist_slot)
|
||||
dist_slot_prices[dist_slot] += rc_direct_price(
|
||||
((dist_slot >> 1) - 1) - ALIGN_BITS);
|
||||
|
||||
// Distances in the range [0, 3] are fully encoded with
|
||||
// pos_slot, so they are used for coder->distances_prices
|
||||
// dist_slot, so they are used for coder->dist_prices
|
||||
// as is.
|
||||
for (i = 0; i < START_POS_MODEL_INDEX; ++i)
|
||||
coder->distances_prices[len_to_pos_state][i]
|
||||
= pos_slot_prices[i];
|
||||
for (uint32_t i = 0; i < DIST_MODEL_START; ++i)
|
||||
coder->dist_prices[dist_state][i]
|
||||
= dist_slot_prices[i];
|
||||
}
|
||||
|
||||
// Distances in the range [4, 127] depend on pos_slot and pos_special.
|
||||
// We do this in a loop separate from the above loop to avoid
|
||||
// redundant calls to get_pos_slot().
|
||||
for (i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
|
||||
const uint32_t pos_slot = get_pos_slot(i);
|
||||
const uint32_t footer_bits = ((pos_slot >> 1) - 1);
|
||||
const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
|
||||
// Distances in the range [4, 127] depend on dist_slot and
|
||||
// dist_special. We do this in a loop separate from the above
|
||||
// loop to avoid redundant calls to get_dist_slot().
|
||||
for (uint32_t i = DIST_MODEL_START; i < FULL_DISTANCES; ++i) {
|
||||
const uint32_t dist_slot = get_dist_slot(i);
|
||||
const uint32_t footer_bits = ((dist_slot >> 1) - 1);
|
||||
const uint32_t base = (2 | (dist_slot & 1)) << footer_bits;
|
||||
const uint32_t price = rc_bittree_reverse_price(
|
||||
coder->pos_special + base - pos_slot - 1,
|
||||
coder->dist_special + base - dist_slot - 1,
|
||||
footer_bits, i - base);
|
||||
|
||||
for (len_to_pos_state = 0;
|
||||
len_to_pos_state < LEN_TO_POS_STATES;
|
||||
++len_to_pos_state)
|
||||
coder->distances_prices[len_to_pos_state][i]
|
||||
= price + coder->pos_slot_prices[
|
||||
len_to_pos_state][pos_slot];
|
||||
for (uint32_t dist_state = 0; dist_state < DIST_STATES;
|
||||
++dist_state)
|
||||
coder->dist_prices[dist_state][i]
|
||||
= price + coder->dist_slot_prices[
|
||||
dist_state][dist_slot];
|
||||
}
|
||||
|
||||
coder->match_price_count = 0;
|
||||
@@ -193,12 +185,11 @@ fill_distances_prices(lzma_coder *coder)
|
||||
|
||||
|
||||
static void
|
||||
fill_align_prices(lzma_coder *coder)
|
||||
fill_align_prices(lzma_lzma1_encoder *coder)
|
||||
{
|
||||
uint32_t i;
|
||||
for (i = 0; i < ALIGN_TABLE_SIZE; ++i)
|
||||
for (uint32_t i = 0; i < ALIGN_SIZE; ++i)
|
||||
coder->align_prices[i] = rc_bittree_reverse_price(
|
||||
coder->pos_align, ALIGN_BITS, i);
|
||||
coder->dist_align, ALIGN_BITS, i);
|
||||
|
||||
coder->align_price_count = 0;
|
||||
return;
|
||||
@@ -230,18 +221,15 @@ make_short_rep(lzma_optimal *optimal)
|
||||
|
||||
|
||||
static void
|
||||
backward(lzma_coder *LZMA_RESTRICT coder, uint32_t *LZMA_RESTRICT len_res,
|
||||
uint32_t *LZMA_RESTRICT back_res, uint32_t cur)
|
||||
backward(lzma_lzma1_encoder *restrict coder, uint32_t *restrict len_res,
|
||||
uint32_t *restrict back_res, uint32_t cur)
|
||||
{
|
||||
coder->opts_end_index = cur;
|
||||
|
||||
uint32_t pos_mem = coder->opts[cur].pos_prev;
|
||||
uint32_t back_mem = coder->opts[cur].back_prev;
|
||||
|
||||
coder->opts_end_index = cur;
|
||||
|
||||
do {
|
||||
const uint32_t pos_prev = pos_mem;
|
||||
const uint32_t back_cur = back_mem;
|
||||
|
||||
if (coder->opts[cur].prev_1_is_literal) {
|
||||
make_literal(&coder->opts[pos_mem]);
|
||||
coder->opts[pos_mem].pos_prev = pos_mem - 1;
|
||||
@@ -256,6 +244,9 @@ backward(lzma_coder *LZMA_RESTRICT coder, uint32_t *LZMA_RESTRICT len_res,
|
||||
}
|
||||
}
|
||||
|
||||
const uint32_t pos_prev = pos_mem;
|
||||
const uint32_t back_cur = back_mem;
|
||||
|
||||
back_mem = coder->opts[pos_prev].back_prev;
|
||||
pos_mem = coder->opts[pos_prev].pos_prev;
|
||||
|
||||
@@ -278,27 +269,10 @@ backward(lzma_coder *LZMA_RESTRICT coder, uint32_t *LZMA_RESTRICT len_res,
|
||||
//////////
|
||||
|
||||
static inline uint32_t
|
||||
helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint32_t *LZMA_RESTRICT back_res, uint32_t *LZMA_RESTRICT len_res,
|
||||
helper1(lzma_lzma1_encoder *restrict coder, lzma_mf *restrict mf,
|
||||
uint32_t *restrict back_res, uint32_t *restrict len_res,
|
||||
uint32_t position)
|
||||
{
|
||||
uint32_t buf_avail;
|
||||
const uint8_t *buf;
|
||||
uint32_t rep_lens[REP_DISTANCES];
|
||||
uint32_t rep_max_index = 0;
|
||||
uint32_t i;
|
||||
|
||||
uint8_t current_byte;
|
||||
uint8_t match_byte;
|
||||
|
||||
uint32_t pos_state;
|
||||
uint32_t match_price;
|
||||
uint32_t rep_match_price;
|
||||
uint32_t len_end;
|
||||
uint32_t len;
|
||||
|
||||
uint32_t normal_match_price;
|
||||
|
||||
const uint32_t nice_len = mf->nice_len;
|
||||
|
||||
uint32_t len_main;
|
||||
@@ -312,18 +286,19 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
matches_count = coder->matches_count;
|
||||
}
|
||||
|
||||
buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
|
||||
const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
|
||||
if (buf_avail < 2) {
|
||||
*back_res = UINT32_MAX;
|
||||
*len_res = 1;
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
buf = mf_ptr(mf) - 1;
|
||||
const uint8_t *const buf = mf_ptr(mf) - 1;
|
||||
|
||||
for (i = 0; i < REP_DISTANCES; ++i) {
|
||||
uint32_t len_test;
|
||||
uint32_t rep_lens[REPS];
|
||||
uint32_t rep_max_index = 0;
|
||||
|
||||
for (uint32_t i = 0; i < REPS; ++i) {
|
||||
const uint8_t *const buf_back = buf - coder->reps[i] - 1;
|
||||
|
||||
if (not_equal_16(buf, buf_back)) {
|
||||
@@ -331,12 +306,9 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
continue;
|
||||
}
|
||||
|
||||
for (len_test = 2; len_test < buf_avail
|
||||
&& buf[len_test] == buf_back[len_test];
|
||||
++len_test) ;
|
||||
rep_lens[i] = lzma_memcmplen(buf, buf_back, 2, buf_avail);
|
||||
|
||||
rep_lens[i] = len_test;
|
||||
if (len_test > rep_lens[rep_max_index])
|
||||
if (rep_lens[i] > rep_lens[rep_max_index])
|
||||
rep_max_index = i;
|
||||
}
|
||||
|
||||
@@ -349,15 +321,14 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
|
||||
if (len_main >= nice_len) {
|
||||
*back_res = coder->matches[matches_count - 1].dist
|
||||
+ REP_DISTANCES;
|
||||
*back_res = coder->matches[matches_count - 1].dist + REPS;
|
||||
*len_res = len_main;
|
||||
mf_skip(mf, len_main - 1);
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
current_byte = *buf;
|
||||
match_byte = *(buf - coder->reps[0] - 1);
|
||||
const uint8_t current_byte = *buf;
|
||||
const uint8_t match_byte = *(buf - coder->reps[0] - 1);
|
||||
|
||||
if (len_main < 2 && current_byte != match_byte
|
||||
&& rep_lens[rep_max_index] < 2) {
|
||||
@@ -368,7 +339,7 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
coder->opts[0].state = coder->state;
|
||||
|
||||
pos_state = position & coder->pos_mask;
|
||||
const uint32_t pos_state = position & coder->pos_mask;
|
||||
|
||||
coder->opts[1].price = rc_bit_0_price(
|
||||
coder->is_match[coder->state][pos_state])
|
||||
@@ -378,9 +349,9 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
make_literal(&coder->opts[1]);
|
||||
|
||||
match_price = rc_bit_1_price(
|
||||
const uint32_t match_price = rc_bit_1_price(
|
||||
coder->is_match[coder->state][pos_state]);
|
||||
rep_match_price = match_price
|
||||
const uint32_t rep_match_price = match_price
|
||||
+ rc_bit_1_price(coder->is_rep[coder->state]);
|
||||
|
||||
if (match_byte == current_byte) {
|
||||
@@ -394,7 +365,7 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
}
|
||||
}
|
||||
|
||||
len_end = my_max(len_main, rep_lens[rep_max_index]);
|
||||
const uint32_t len_end = my_max(len_main, rep_lens[rep_max_index]);
|
||||
|
||||
if (len_end < 2) {
|
||||
*back_res = coder->opts[1].back_prev;
|
||||
@@ -404,23 +375,21 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
coder->opts[1].pos_prev = 0;
|
||||
|
||||
for (i = 0; i < REP_DISTANCES; ++i)
|
||||
for (uint32_t i = 0; i < REPS; ++i)
|
||||
coder->opts[0].backs[i] = coder->reps[i];
|
||||
|
||||
len = len_end;
|
||||
uint32_t len = len_end;
|
||||
do {
|
||||
coder->opts[len].price = RC_INFINITY_PRICE;
|
||||
} while (--len >= 2);
|
||||
|
||||
|
||||
for (i = 0; i < REP_DISTANCES; ++i) {
|
||||
uint32_t price;
|
||||
|
||||
for (uint32_t i = 0; i < REPS; ++i) {
|
||||
uint32_t rep_len = rep_lens[i];
|
||||
if (rep_len < 2)
|
||||
continue;
|
||||
|
||||
price = rep_match_price + get_pure_rep_price(
|
||||
const uint32_t price = rep_match_price + get_pure_rep_price(
|
||||
coder, i, coder->state, pos_state);
|
||||
|
||||
do {
|
||||
@@ -439,7 +408,7 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
}
|
||||
|
||||
|
||||
normal_match_price = match_price
|
||||
const uint32_t normal_match_price = match_price
|
||||
+ rc_bit_0_price(coder->is_rep[coder->state]);
|
||||
|
||||
len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2;
|
||||
@@ -451,14 +420,13 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
for(; ; ++len) {
|
||||
const uint32_t dist = coder->matches[i].dist;
|
||||
const uint32_t cur_and_len_price = normal_match_price
|
||||
+ get_pos_len_price(coder,
|
||||
+ get_dist_len_price(coder,
|
||||
dist, len, pos_state);
|
||||
|
||||
if (cur_and_len_price < coder->opts[len].price) {
|
||||
coder->opts[len].price = cur_and_len_price;
|
||||
coder->opts[len].pos_prev = 0;
|
||||
coder->opts[len].back_prev
|
||||
= dist + REP_DISTANCES;
|
||||
coder->opts[len].back_prev = dist + REPS;
|
||||
coder->opts[len].prev_1_is_literal = false;
|
||||
}
|
||||
|
||||
@@ -473,7 +441,7 @@ helper1(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
helper2(lzma_lzma1_encoder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
uint32_t len_end, uint32_t position, const uint32_t cur,
|
||||
const uint32_t nice_len, const uint32_t buf_avail_full)
|
||||
{
|
||||
@@ -481,19 +449,6 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
uint32_t new_len = coder->longest_match_length;
|
||||
uint32_t pos_prev = coder->opts[cur].pos_prev;
|
||||
lzma_lzma_state state;
|
||||
uint32_t buf_avail;
|
||||
uint32_t rep_index;
|
||||
uint32_t i;
|
||||
|
||||
uint32_t cur_price;
|
||||
uint8_t current_byte;
|
||||
uint8_t match_byte;
|
||||
uint32_t pos_state;
|
||||
uint32_t cur_and_1_price;
|
||||
bool next_is_literal = false;
|
||||
uint32_t match_price;
|
||||
uint32_t rep_match_price;
|
||||
uint32_t start_len = 2;
|
||||
|
||||
if (coder->opts[cur].prev_1_is_literal) {
|
||||
--pos_prev;
|
||||
@@ -501,7 +456,7 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
if (coder->opts[cur].prev_2) {
|
||||
state = coder->opts[coder->opts[cur].pos_prev_2].state;
|
||||
|
||||
if (coder->opts[cur].back_prev_2 < REP_DISTANCES)
|
||||
if (coder->opts[cur].back_prev_2 < REPS)
|
||||
update_long_rep(state);
|
||||
else
|
||||
update_match(state);
|
||||
@@ -530,48 +485,49 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
update_long_rep(state);
|
||||
} else {
|
||||
pos = coder->opts[cur].back_prev;
|
||||
if (pos < REP_DISTANCES)
|
||||
if (pos < REPS)
|
||||
update_long_rep(state);
|
||||
else
|
||||
update_match(state);
|
||||
}
|
||||
|
||||
if (pos < REP_DISTANCES) {
|
||||
uint32_t i;
|
||||
|
||||
if (pos < REPS) {
|
||||
reps[0] = coder->opts[pos_prev].backs[pos];
|
||||
|
||||
uint32_t i;
|
||||
for (i = 1; i <= pos; ++i)
|
||||
reps[i] = coder->opts[pos_prev].backs[i - 1];
|
||||
|
||||
for (; i < REP_DISTANCES; ++i)
|
||||
for (; i < REPS; ++i)
|
||||
reps[i] = coder->opts[pos_prev].backs[i];
|
||||
|
||||
} else {
|
||||
reps[0] = pos - REP_DISTANCES;
|
||||
reps[0] = pos - REPS;
|
||||
|
||||
for (i = 1; i < REP_DISTANCES; ++i)
|
||||
for (uint32_t i = 1; i < REPS; ++i)
|
||||
reps[i] = coder->opts[pos_prev].backs[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
coder->opts[cur].state = state;
|
||||
|
||||
for (i = 0; i < REP_DISTANCES; ++i)
|
||||
for (uint32_t i = 0; i < REPS; ++i)
|
||||
coder->opts[cur].backs[i] = reps[i];
|
||||
|
||||
cur_price = coder->opts[cur].price;
|
||||
const uint32_t cur_price = coder->opts[cur].price;
|
||||
|
||||
current_byte = *buf;
|
||||
match_byte = *(buf - reps[0] - 1);
|
||||
const uint8_t current_byte = *buf;
|
||||
const uint8_t match_byte = *(buf - reps[0] - 1);
|
||||
|
||||
pos_state = position & coder->pos_mask;
|
||||
const uint32_t pos_state = position & coder->pos_mask;
|
||||
|
||||
cur_and_1_price = cur_price
|
||||
const uint32_t cur_and_1_price = cur_price
|
||||
+ rc_bit_0_price(coder->is_match[state][pos_state])
|
||||
+ get_literal_price(coder, position, buf[-1],
|
||||
!is_literal_state(state), match_byte, current_byte);
|
||||
|
||||
bool next_is_literal = false;
|
||||
|
||||
if (cur_and_1_price < coder->opts[cur + 1].price) {
|
||||
coder->opts[cur + 1].price = cur_and_1_price;
|
||||
coder->opts[cur + 1].pos_prev = cur;
|
||||
@@ -579,9 +535,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
next_is_literal = true;
|
||||
}
|
||||
|
||||
match_price = cur_price
|
||||
const uint32_t match_price = cur_price
|
||||
+ rc_bit_1_price(coder->is_match[state][pos_state]);
|
||||
rep_match_price = match_price
|
||||
const uint32_t rep_match_price = match_price
|
||||
+ rc_bit_1_price(coder->is_rep[state]);
|
||||
|
||||
if (match_byte == current_byte
|
||||
@@ -602,40 +558,31 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
if (buf_avail_full < 2)
|
||||
return len_end;
|
||||
|
||||
buf_avail = my_min(buf_avail_full, nice_len);
|
||||
const uint32_t buf_avail = my_min(buf_avail_full, nice_len);
|
||||
|
||||
if (!next_is_literal && match_byte != current_byte) { // speed optimization
|
||||
// try literal + rep0
|
||||
const uint8_t *const buf_back = buf - reps[0] - 1;
|
||||
const uint32_t limit = my_min(buf_avail_full, nice_len + 1);
|
||||
|
||||
uint32_t len_test = 1;
|
||||
while (len_test < limit && buf[len_test] == buf_back[len_test])
|
||||
++len_test;
|
||||
|
||||
--len_test;
|
||||
const uint32_t len_test = lzma_memcmplen(buf, buf_back, 1, limit) - 1;
|
||||
|
||||
if (len_test >= 2) {
|
||||
uint32_t pos_state_next;
|
||||
uint32_t next_rep_match_price;
|
||||
uint32_t offset;
|
||||
uint32_t cur_and_len_price;
|
||||
|
||||
lzma_lzma_state state_2 = state;
|
||||
update_literal(state_2);
|
||||
|
||||
pos_state_next = (position + 1) & coder->pos_mask;
|
||||
next_rep_match_price = cur_and_1_price
|
||||
const uint32_t pos_state_next = (position + 1) & coder->pos_mask;
|
||||
const uint32_t next_rep_match_price = cur_and_1_price
|
||||
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next])
|
||||
+ rc_bit_1_price(coder->is_rep[state_2]);
|
||||
|
||||
//for (; len_test >= 2; --len_test) {
|
||||
offset = cur + 1 + len_test;
|
||||
const uint32_t offset = cur + 1 + len_test;
|
||||
|
||||
while (len_end < offset)
|
||||
coder->opts[++len_end].price = RC_INFINITY_PRICE;
|
||||
|
||||
cur_and_len_price = next_rep_match_price
|
||||
const uint32_t cur_and_len_price = next_rep_match_price
|
||||
+ get_rep_price(coder, 0, len_test,
|
||||
state_2, pos_state_next);
|
||||
|
||||
@@ -651,23 +598,20 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
}
|
||||
|
||||
|
||||
for (rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
|
||||
uint32_t len_test, len_test_2, len_test_temp;
|
||||
uint32_t price, limit;
|
||||
uint32_t start_len = 2; // speed optimization
|
||||
|
||||
for (uint32_t rep_index = 0; rep_index < REPS; ++rep_index) {
|
||||
const uint8_t *const buf_back = buf - reps[rep_index] - 1;
|
||||
if (not_equal_16(buf, buf_back))
|
||||
continue;
|
||||
|
||||
for (len_test = 2; len_test < buf_avail
|
||||
&& buf[len_test] == buf_back[len_test];
|
||||
++len_test) ;
|
||||
uint32_t len_test = lzma_memcmplen(buf, buf_back, 2, buf_avail);
|
||||
|
||||
while (len_end < cur + len_test)
|
||||
coder->opts[++len_end].price = RC_INFINITY_PRICE;
|
||||
|
||||
len_test_temp = len_test;
|
||||
price = rep_match_price + get_pure_rep_price(
|
||||
const uint32_t len_test_temp = len_test;
|
||||
const uint32_t price = rep_match_price + get_pure_rep_price(
|
||||
coder, rep_index, state, pos_state);
|
||||
|
||||
do {
|
||||
@@ -689,8 +633,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
start_len = len_test + 1;
|
||||
|
||||
|
||||
len_test_2 = len_test + 1;
|
||||
limit = my_min(buf_avail_full,
|
||||
uint32_t len_test_2 = len_test + 1;
|
||||
const uint32_t limit = my_min(buf_avail_full,
|
||||
len_test_2 + nice_len);
|
||||
for (; len_test_2 < limit
|
||||
&& buf[len_test_2] == buf_back[len_test_2];
|
||||
@@ -699,18 +643,12 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
len_test_2 -= len_test + 1;
|
||||
|
||||
if (len_test_2 >= 2) {
|
||||
uint32_t pos_state_next;
|
||||
uint32_t cur_and_len_literal_price;
|
||||
uint32_t next_rep_match_price;
|
||||
uint32_t offset;
|
||||
uint32_t cur_and_len_price;
|
||||
|
||||
lzma_lzma_state state_2 = state;
|
||||
update_long_rep(state_2);
|
||||
|
||||
pos_state_next = (position + len_test) & coder->pos_mask;
|
||||
uint32_t pos_state_next = (position + len_test) & coder->pos_mask;
|
||||
|
||||
cur_and_len_literal_price = price
|
||||
const uint32_t cur_and_len_literal_price = price
|
||||
+ get_len_price(&coder->rep_len_encoder,
|
||||
len_test, pos_state)
|
||||
+ rc_bit_0_price(coder->is_match[state_2][pos_state_next])
|
||||
@@ -722,17 +660,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
|
||||
pos_state_next = (position + len_test + 1) & coder->pos_mask;
|
||||
|
||||
next_rep_match_price = cur_and_len_literal_price
|
||||
const uint32_t next_rep_match_price = cur_and_len_literal_price
|
||||
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next])
|
||||
+ rc_bit_1_price(coder->is_rep[state_2]);
|
||||
|
||||
//for(; len_test_2 >= 2; len_test_2--) {
|
||||
offset = cur + len_test + 1 + len_test_2;
|
||||
const uint32_t offset = cur + len_test + 1 + len_test_2;
|
||||
|
||||
while (len_end < offset)
|
||||
coder->opts[++len_end].price = RC_INFINITY_PRICE;
|
||||
|
||||
cur_and_len_price = next_rep_match_price
|
||||
const uint32_t cur_and_len_price = next_rep_match_price
|
||||
+ get_rep_price(coder, 0, len_test_2,
|
||||
state_2, pos_state_next);
|
||||
|
||||
@@ -763,29 +701,27 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
|
||||
|
||||
if (new_len >= start_len) {
|
||||
uint32_t len_test;
|
||||
uint32_t i = 0;
|
||||
|
||||
const uint32_t normal_match_price = match_price
|
||||
+ rc_bit_0_price(coder->is_rep[state]);
|
||||
|
||||
while (len_end < cur + new_len)
|
||||
coder->opts[++len_end].price = RC_INFINITY_PRICE;
|
||||
|
||||
uint32_t i = 0;
|
||||
while (start_len > coder->matches[i].len)
|
||||
++i;
|
||||
|
||||
for (len_test = start_len; ; ++len_test) {
|
||||
for (uint32_t len_test = start_len; ; ++len_test) {
|
||||
const uint32_t cur_back = coder->matches[i].dist;
|
||||
uint32_t cur_and_len_price = normal_match_price
|
||||
+ get_pos_len_price(coder,
|
||||
+ get_dist_len_price(coder,
|
||||
cur_back, len_test, pos_state);
|
||||
|
||||
if (cur_and_len_price < coder->opts[cur + len_test].price) {
|
||||
coder->opts[cur + len_test].price = cur_and_len_price;
|
||||
coder->opts[cur + len_test].pos_prev = cur;
|
||||
coder->opts[cur + len_test].back_prev
|
||||
= cur_back + REP_DISTANCES;
|
||||
= cur_back + REPS;
|
||||
coder->opts[cur + len_test].prev_1_is_literal = false;
|
||||
}
|
||||
|
||||
@@ -803,16 +739,12 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
len_test_2 -= len_test + 1;
|
||||
|
||||
if (len_test_2 >= 2) {
|
||||
uint32_t pos_state_next;
|
||||
uint32_t cur_and_len_literal_price;
|
||||
uint32_t next_rep_match_price;
|
||||
uint32_t offset;
|
||||
|
||||
lzma_lzma_state state_2 = state;
|
||||
update_match(state_2);
|
||||
pos_state_next = (position + len_test) & coder->pos_mask;
|
||||
uint32_t pos_state_next
|
||||
= (position + len_test) & coder->pos_mask;
|
||||
|
||||
cur_and_len_literal_price = cur_and_len_price
|
||||
const uint32_t cur_and_len_literal_price = cur_and_len_price
|
||||
+ rc_bit_0_price(
|
||||
coder->is_match[state_2][pos_state_next])
|
||||
+ get_literal_price(coder,
|
||||
@@ -825,14 +757,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
update_literal(state_2);
|
||||
pos_state_next = (pos_state_next + 1) & coder->pos_mask;
|
||||
|
||||
next_rep_match_price
|
||||
const uint32_t next_rep_match_price
|
||||
= cur_and_len_literal_price
|
||||
+ rc_bit_1_price(
|
||||
coder->is_match[state_2][pos_state_next])
|
||||
+ rc_bit_1_price(coder->is_rep[state_2]);
|
||||
|
||||
// for(; len_test_2 >= 2; --len_test_2) {
|
||||
offset = cur + len_test + 1 + len_test_2;
|
||||
const uint32_t offset = cur + len_test + 1 + len_test_2;
|
||||
|
||||
while (len_end < offset)
|
||||
coder->opts[++len_end].price = RC_INFINITY_PRICE;
|
||||
@@ -849,7 +781,7 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
coder->opts[offset].prev_2 = true;
|
||||
coder->opts[offset].pos_prev_2 = cur;
|
||||
coder->opts[offset].back_prev_2
|
||||
= cur_back + REP_DISTANCES;
|
||||
= cur_back + REPS;
|
||||
}
|
||||
//}
|
||||
}
|
||||
@@ -865,14 +797,11 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
|
||||
|
||||
|
||||
extern void
|
||||
lzma_lzma_optimum_normal(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint32_t *LZMA_RESTRICT back_res, uint32_t *LZMA_RESTRICT len_res,
|
||||
lzma_lzma_optimum_normal(lzma_lzma1_encoder *restrict coder,
|
||||
lzma_mf *restrict mf,
|
||||
uint32_t *restrict back_res, uint32_t *restrict len_res,
|
||||
uint32_t position)
|
||||
{
|
||||
uint32_t reps[REP_DISTANCES];
|
||||
uint32_t len_end;
|
||||
uint32_t cur;
|
||||
|
||||
// If we have symbols pending, return the next pending symbol.
|
||||
if (coder->opts_end_index != coder->opts_current_index) {
|
||||
assert(mf->read_ahead > 0);
|
||||
@@ -889,9 +818,9 @@ lzma_lzma_optimum_normal(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT
|
||||
// In liblzma they were moved into this single place.
|
||||
if (mf->read_ahead == 0) {
|
||||
if (coder->match_price_count >= (1 << 7))
|
||||
fill_distances_prices(coder);
|
||||
fill_dist_prices(coder);
|
||||
|
||||
if (coder->align_price_count >= ALIGN_TABLE_SIZE)
|
||||
if (coder->align_price_count >= ALIGN_SIZE)
|
||||
fill_align_prices(coder);
|
||||
}
|
||||
|
||||
@@ -899,13 +828,14 @@ lzma_lzma_optimum_normal(lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT
|
||||
// the original function into two pieces makes it at least a little
|
||||
// more readable, since those two parts don't share many variables.
|
||||
|
||||
len_end = helper1(coder, mf, back_res, len_res, position);
|
||||
uint32_t len_end = helper1(coder, mf, back_res, len_res, position);
|
||||
if (len_end == UINT32_MAX)
|
||||
return;
|
||||
|
||||
|
||||
uint32_t reps[REPS];
|
||||
memcpy(reps, coder->reps, sizeof(reps));
|
||||
|
||||
uint32_t cur;
|
||||
for (cur = 1; cur < len_end; ++cur) {
|
||||
assert(cur < OPTS);
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
//
|
||||
/// \file lzma_encoder_presets.c
|
||||
/// \brief Encoder presets
|
||||
/// \note xz needs this even when only decoding is enabled.
|
||||
//
|
||||
// Author: Lasse Collin
|
||||
//
|
||||
@@ -16,10 +17,6 @@
|
||||
extern LZMA_API(lzma_bool)
|
||||
lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
|
||||
{
|
||||
static const uint8_t dict_pow2[]
|
||||
= { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
|
||||
static const uint8_t depths[] = { 4, 8, 24, 48 };
|
||||
|
||||
const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK;
|
||||
const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK;
|
||||
const uint32_t supported_flags = LZMA_PRESET_EXTREME;
|
||||
@@ -34,12 +31,15 @@ lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
|
||||
options->lp = LZMA_LP_DEFAULT;
|
||||
options->pb = LZMA_PB_DEFAULT;
|
||||
|
||||
static const uint8_t dict_pow2[]
|
||||
= { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
|
||||
options->dict_size = UINT32_C(1) << dict_pow2[level];
|
||||
|
||||
if (level <= 3) {
|
||||
options->mode = LZMA_MODE_FAST;
|
||||
options->mf = level == 0 ? LZMA_MF_HC3 : LZMA_MF_HC4;
|
||||
options->nice_len = level <= 1 ? 128 : 273;
|
||||
static const uint8_t depths[] = { 4, 8, 24, 48 };
|
||||
options->depth = depths[level];
|
||||
} else {
|
||||
options->mode = LZMA_MODE_NORMAL;
|
||||
|
||||
@@ -64,12 +64,12 @@ typedef struct {
|
||||
uint32_t pos_prev; // pos_next;
|
||||
uint32_t back_prev;
|
||||
|
||||
uint32_t backs[REP_DISTANCES];
|
||||
uint32_t backs[REPS];
|
||||
|
||||
} lzma_optimal;
|
||||
|
||||
|
||||
struct lzma_coder_s {
|
||||
struct lzma_lzma1_encoder_s {
|
||||
/// Range encoder
|
||||
lzma_range_encoder rc;
|
||||
|
||||
@@ -77,7 +77,7 @@ struct lzma_coder_s {
|
||||
lzma_lzma_state state;
|
||||
|
||||
/// The four most recent match distances
|
||||
uint32_t reps[REP_DISTANCES];
|
||||
uint32_t reps[REPS];
|
||||
|
||||
/// Array of match candidates
|
||||
lzma_match matches[MATCH_LEN_MAX + 1];
|
||||
@@ -112,9 +112,9 @@ struct lzma_coder_s {
|
||||
probability is_rep1[STATES];
|
||||
probability is_rep2[STATES];
|
||||
probability is_rep0_long[STATES][POS_STATES_MAX];
|
||||
probability pos_slot[LEN_TO_POS_STATES][POS_SLOTS];
|
||||
probability pos_special[FULL_DISTANCES - END_POS_MODEL_INDEX];
|
||||
probability pos_align[ALIGN_TABLE_SIZE];
|
||||
probability dist_slot[DIST_STATES][DIST_SLOTS];
|
||||
probability dist_special[FULL_DISTANCES - DIST_MODEL_END];
|
||||
probability dist_align[ALIGN_SIZE];
|
||||
|
||||
// These are the same as in lzma_decoder.c except that the encoders
|
||||
// include also price tables.
|
||||
@@ -122,12 +122,12 @@ struct lzma_coder_s {
|
||||
lzma_length_encoder rep_len_encoder;
|
||||
|
||||
// Price tables
|
||||
uint32_t pos_slot_prices[LEN_TO_POS_STATES][POS_SLOTS];
|
||||
uint32_t distances_prices[LEN_TO_POS_STATES][FULL_DISTANCES];
|
||||
uint32_t dist_slot_prices[DIST_STATES][DIST_SLOTS];
|
||||
uint32_t dist_prices[DIST_STATES][FULL_DISTANCES];
|
||||
uint32_t dist_table_size;
|
||||
uint32_t match_price_count;
|
||||
|
||||
uint32_t align_prices[ALIGN_TABLE_SIZE];
|
||||
uint32_t align_prices[ALIGN_SIZE];
|
||||
uint32_t align_price_count;
|
||||
|
||||
// Optimal
|
||||
@@ -138,11 +138,11 @@ struct lzma_coder_s {
|
||||
|
||||
|
||||
extern void lzma_lzma_optimum_fast(
|
||||
lzma_coder *LZMA_RESTRICT coder, lzma_mf *LZMA_RESTRICT mf,
|
||||
uint32_t *LZMA_RESTRICT back_res, uint32_t *LZMA_RESTRICT len_res);
|
||||
lzma_lzma1_encoder *restrict coder, lzma_mf *restrict mf,
|
||||
uint32_t *restrict back_res, uint32_t *restrict len_res);
|
||||
|
||||
extern void lzma_lzma_optimum_normal(lzma_coder *LZMA_RESTRICT coder,
|
||||
lzma_mf *LZMA_RESTRICT mf, uint32_t *LZMA_RESTRICT back_res,
|
||||
uint32_t *LZMA_RESTRICT len_res, uint32_t position);
|
||||
extern void lzma_lzma_optimum_normal(lzma_lzma1_encoder *restrict coder,
|
||||
lzma_mf *restrict mf, uint32_t *restrict back_res,
|
||||
uint32_t *restrict len_res, uint32_t position);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -40,11 +40,8 @@
|
||||
// This does the same for a complete bit tree.
|
||||
// (A tree represented as an array.)
|
||||
#define bittree_reset(probs, bit_levels) \
|
||||
do { \
|
||||
uint32_t bt_i; \
|
||||
for (bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \
|
||||
bit_reset((probs)[bt_i]); \
|
||||
} while (0)
|
||||
for (uint32_t bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \
|
||||
bit_reset((probs)[bt_i])
|
||||
|
||||
|
||||
//////////////////////
|
||||
|
||||
@@ -25,20 +25,26 @@ typedef struct {
|
||||
|
||||
|
||||
/// Reads the first five bytes to initialize the range decoder.
|
||||
static inline bool
|
||||
rc_read_init(lzma_range_decoder *rc, const uint8_t *LZMA_RESTRICT in,
|
||||
size_t *LZMA_RESTRICT in_pos, size_t in_size)
|
||||
static inline lzma_ret
|
||||
rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,
|
||||
size_t *restrict in_pos, size_t in_size)
|
||||
{
|
||||
while (rc->init_bytes_left > 0) {
|
||||
if (*in_pos == in_size)
|
||||
return false;
|
||||
return LZMA_OK;
|
||||
|
||||
// The first byte is always 0x00. It could have been omitted
|
||||
// in LZMA2 but it wasn't, so one byte is wasted in every
|
||||
// LZMA2 chunk.
|
||||
if (rc->init_bytes_left == 5 && in[*in_pos] != 0x00)
|
||||
return LZMA_DATA_ERROR;
|
||||
|
||||
rc->code = (rc->code << 8) | in[*in_pos];
|
||||
++*in_pos;
|
||||
--rc->init_bytes_left;
|
||||
}
|
||||
|
||||
return true;
|
||||
return LZMA_STREAM_END;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user