diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 558cc49..6c998ea 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -164,8 +164,10 @@ jobs: -DANDROID_PLATFORM=android-33 \ -DANDROID_ABI=arm64-v8a ../.. cmake --build . + ${{ steps.setup-ndk.outputs.ndk-path }}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip kptools mv kptools kptools-android + - name: Release uses: ncipollo/release-action@v1.14.0 with: diff --git a/kernel/include/kpmodule.h b/kernel/include/kpmodule.h index 6553a08..21908d3 100644 --- a/kernel/include/kpmodule.h +++ b/kernel/include/kpmodule.h @@ -12,7 +12,7 @@ __attribute__((section(".kpm.info"), unused, aligned(1))) = #name "=" info #define KPM_NAME_LEN 32 -#define KPM_VERSION_LEN 12 +#define KPM_VERSION_LEN 32 #define KPM_LICENSE_LEN 32 #define KPM_AUTHOR_LEN 32 #define KPM_DESCRIPTION_LEN 512 @@ -41,4 +41,4 @@ typedef long (*mod_exitcall_t)(void *reserved); #define KPM_EXIT(fn) \ static mod_exitcall_t __kpm_exitcall_##fn __attribute__((__used__)) __attribute__((__section__(".kpm.exit"))) = fn -#endif \ No newline at end of file +#endif diff --git a/tools/.gitignore b/tools/.gitignore index a448f1b..67cf4bb 100644 --- a/tools/.gitignore +++ b/tools/.gitignore @@ -27,6 +27,7 @@ build/* preset.h kptools +kptools.exe # -build_android.sh \ No newline at end of file +build_android.sh diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 487a67d..68780a7 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -1,8 +1,11 @@ cmake_minimum_required(VERSION 3.5) project(kptools) -include_directories(${CMAKE_CURRENT_BINARY_DIR}) +file(GLOB_RECURSE LIB_SOURCES "lib/*.c") +include_directories(${CMAKE_CURRENT_BINARY_DIR}) +add_definitions(-DZSTD_DISABLE_ASM) +add_definitions(-DXZ_DEC_ANY_CHECK) set(SOURCES image.c kallsym.c @@ -13,13 +16,29 @@ set(SOURCES symbol.c kpm.c common.c - sha256.c + bootimg.c +) + +add_executable( + kptools + ${SOURCES} + ${LIB_SOURCES} ) -add_executable(kptools ${SOURCES}) +file(GLOB_RECURSE ALL_HEADERS "lib/*.h") +set(LIB_INCLUDE_DIRS "") +foreach(_header_file ${ALL_HEADERS}) + get_filename_component(_dir ${_header_file} DIRECTORY) + list(APPEND LIB_INCLUDE_DIRS ${_dir}) +endforeach() +list(REMOVE_DUPLICATES LIB_INCLUDE_DIRS) +target_include_directories(kptools PRIVATE ${LIB_INCLUDE_DIRS}) +target_compile_definitions(kptools PRIVATE BZ_NO_STDIO) find_package(ZLIB REQUIRED) +target_include_directories(kptools PRIVATE ${ZLIB_INCLUDE_DIRS}) + if(CMAKE_SYSTEM_NAME STREQUAL "Linux") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static") # 显式指定静态库路径 @@ -28,6 +47,4 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "Android") target_link_libraries(kptools PRIVATE ${ZLIB_LIBRARIES}) else() target_link_libraries(kptools PRIVATE ${ZLIB_LIBRARIES}) -endif() - -target_include_directories(kptools PRIVATE ${ZLIB_INCLUDE_DIRS}) \ No newline at end of file +endif() \ No newline at end of file diff --git a/tools/Makefile b/tools/Makefile index 7b57cd9..3740852 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -1,25 +1,30 @@ - CFLAGS = -std=c11 -Wall -Wextra -Wno-unused -Wno-unused-parameter +CFLAGS += -I. -I./lib -DBZ_NO_STDIO -DXZ_DEC_ANY_CHECK LDFLAGS = -static -lz + ifdef DEBUG - CFLAGS += -DDEBUG -g + CFLAGS += -DDEBUG -g endif -objs := image.o kallsym.o kptools.o order.o insn.o patch.o symbol.o kpm.o common.o -objs += sha256.o +OBJS := image.o kallsym.o kptools.o order.o insn.o patch.o symbol.o kpm.o common.o bootimg.o + +LIB_SRCS := $(wildcard lib/*.c) $(wildcard lib/*/*.c) +LIB_OBJS := $(LIB_SRCS:.c=.o) + + +ALL_OBJS := $(OBJS) $(LIB_OBJS) .PHONY: all all: kptools -.PHONY: kptools -kptools: ${objs} - ${CC} -o $@ $^ $(LDFLAGS) +kptools: $(ALL_OBJS) + $(CC) -o $@ $^ $(LDFLAGS) + %.o : %.c $(CC) -c $(CFLAGS) $(CPPFLAGS) $< -o $@ .PHONY: clean clean: - rm -rf preset.h rm -rf kptools - find . -name "*.o" | xargs rm -f \ No newline at end of file + rm *.o diff --git a/tools/bootimg.c b/tools/bootimg.c new file mode 100644 index 0000000..6d0910e --- /dev/null +++ b/tools/bootimg.c @@ -0,0 +1,1050 @@ +// we need zlib-devel liblz4-devel liblzma-devel +#include +#include +#include +#include +#include +#include +#include "bootimg.h" +#include "common.h" +#include "lib/lz4/lz4.h" +#include "lib/lz4/lz4frame.h" +#include "lib/lz4/lz4hc.h" +#include "lib/bz2/bzlib.h" +#include "lib/xz/xz.h" +#include "lib/sha/sha256.h" +#include "lib/sha/sha1.h" +// #include "lib/zstd/zstd.h" + + +static uint64_t XXH_swap64(uint64_t x) { + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} + +static uint32_t XXH_swap32(uint32_t x) { + return ((x >> 24) & 0x000000FF) | + ((x >> 8) & 0x0000FF00) | + ((x << 8) & 0x00FF0000) | + ((x << 24) & 0xFF000000); +} + +static uint32_t fdt32_to_cpu(uint32_t val) { + return ((val << 24) & 0xff000000) | + ((val << 8) & 0x00ff0000) | + ((val >> 8) & 0x0000ff00) | + ((val >> 24) & 0x000000ff); +} + +static void *my_memmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen) { + if (needlelen == 0) return (void *)haystack; + if (haystacklen < needlelen) return NULL; + + const char *h = (const char *)haystack; + const char *n = (const char *)needle; + size_t i; + + for (i = 0; i <= haystacklen - needlelen; i++) { + if (h[i] == n[0] && memcmp(&h[i], n, needlelen) == 0) { + return (void *)&h[i]; + } + } + return NULL; +} + +int is_sha256(uint32_t id[8]) { + if ((id[0] | id[1] | id[2] | id[3] | id[4] | id[5]) == 0) { + return 1; + } + if (id[6] != 0 || id[7] != 0) { + return 2; + } + + return 0; +} + +static int find_dtb_offset(const uint8_t *buf, unsigned int sz) { + if (!buf || sz < sizeof(struct fdt_header)) return -1; + const uint8_t *curr = buf; + const uint8_t *end = buf + sz; + + while (curr < end - sizeof(struct fdt_header)) { + curr = my_memmem(curr, end - curr, DTB_MAGIC, 4); + if (curr == NULL) return -1; + + struct fdt_header *fdt_hdr = (struct fdt_header *)curr; + uint32_t totalsize = fdt32_to_cpu(fdt_hdr->totalsize); + uint32_t off_dt_struct = fdt32_to_cpu(fdt_hdr->off_dt_struct); + if (totalsize > (uint32_t)(end - curr) || totalsize <= 0x48) { + curr += 4; + continue; + } + + // FDT_BEGIN_NODE (0x00000001) + if (curr + off_dt_struct + 4 <= end) { + uint32_t *tag = (uint32_t *)(curr + off_dt_struct); + if (fdt32_to_cpu(*tag) == 0x00000001) { + return (int)(curr - buf); + } + } + curr += 4; + } + return -1; +} + +int write_data_to_file(const char *path, const void *data, size_t size) { + FILE *fp = fopen(path, "wb"); + if (!fp) return -1; + fwrite(data, 1, size, fp); + fclose(fp); + chmod(path, 0644); + return 0; +} + +int compress_gzip(const uint8_t *in_data, size_t in_size, uint8_t **out_data, uint32_t *out_size) { + z_stream strm = {0}; + if (deflateInit2(&strm, 9, Z_DEFLATED, 16 + MAX_WBITS, 8, Z_DEFAULT_STRATEGY) != Z_OK) + return -1; + + uint32_t max_out_size = deflateBound(&strm, in_size); + *out_data = malloc(max_out_size); + if (!*out_data) { deflateEnd(&strm); return -1; } + + strm.next_in = (Bytef *)in_data; + strm.avail_in = in_size; + strm.next_out = *out_data; + strm.avail_out = max_out_size; + + int ret = deflate(&strm, Z_FINISH); + if (ret != Z_STREAM_END) { + free(*out_data); + deflateEnd(&strm); + return -2; + } + + *out_size = strm.total_out; + deflateEnd(&strm); + return 0; +} +int decompress_gzip(const uint8_t *in_data, size_t in_size, const char *out_path) { + z_stream strm = {0}; + strm.next_in = (Bytef *)in_data; + strm.avail_in = in_size; + + if (inflateInit2(&strm, 16 + MAX_WBITS) != Z_OK) return -1; + + FILE *out = fopen(out_path, "wb"); + if (!out) { inflateEnd(&strm); return -1; } + + uint8_t out_buf[40960]; // 40KB buffer + int ret; + do { + strm.next_out = out_buf; + strm.avail_out = sizeof(out_buf); + ret = inflate(&strm, Z_NO_FLUSH); + if (ret < 0 && ret != Z_STREAM_END) { + tools_logi("Error: Gzip inflate failed (err: %d)\n", ret); + fclose(out); + inflateEnd(&strm); + return -2; + } + size_t have = sizeof(out_buf) - strm.avail_out; + fwrite(out_buf, 1, have, out); + } while (ret != Z_STREAM_END); + + fclose(out); + chmod(out_path, 0644); + inflateEnd(&strm); + return 0; +} + +int compress_lz4(const uint8_t *in_data, size_t in_size, uint8_t **out_data, uint32_t *out_size) { + + size_t max_out_size = LZ4F_compressFrameBound(in_size, NULL); + + *out_data = (uint8_t *)malloc(max_out_size); + if (!*out_data) { + return -1; + } + + // 2. use default config (NULL -> default LZ4F_preferences_t) + // LZ4F_compressFrame will produce a standard LZ4 frame (magic number 0x184D2204) + size_t compressed_size = LZ4F_compressFrame(*out_data, max_out_size, in_data, in_size, NULL); + + if (LZ4F_isError(compressed_size)) { + free(*out_data); + return -2; + } + + *out_size = (uint32_t)compressed_size; + return 0; +} +int compress_lz4_le( + const uint8_t *in_data, + size_t in_size, + uint8_t **out_data, + uint32_t *out_size, + compress_head k_head +) { + + size_t max_blocks = + (in_size + LZ4_BLOCK_SIZE - 1) / LZ4_BLOCK_SIZE; + // uint32_t max_blocks_1 = + // ((uint32_t)k_head.magic[4]) + // | ((uint32_t)k_head.magic[5] << 8) + // | ((uint32_t)k_head.magic[6] << 16) + // | ((uint32_t)k_head.magic[7] << 24); + tools_logi("Calculated max blocks: %zu\n", max_blocks); + size_t max_out = + sizeof(uint32_t) + // MAGIC + max_blocks * (sizeof(uint32_t) + // block header + LZ4_compressBound(LZ4_BLOCK_SIZE)); + + uint8_t *out = malloc(max_out); + if (!out) { + tools_loge("Failed to allocate memory for LZ4 compression,size: %zu\n",max_out); + return -2; + } + + size_t out_off = 0; + + /* write MAGIC */ + uint32_t magic = LZ4_MAGIC; + memcpy(out + out_off, &magic, sizeof(magic)); + out_off += sizeof(magic); + + size_t in_off = 0; + + while (in_off < in_size) { + size_t chunk_size = in_size - in_off; + if (chunk_size > LZ4_BLOCK_SIZE) + chunk_size = LZ4_BLOCK_SIZE; + + int max_dst = LZ4_compressBound((int)chunk_size); + + int compressed = LZ4_compress_HC( + (const char *)(in_data + in_off), + (char *)(out + out_off + sizeof(uint32_t)), + (int)chunk_size, + max_dst, + LZ4HC_CLEVEL + ); + + if (compressed <= 0) { + tools_loge("LZ4 compression failed for block at offset %zu\n", in_off); + free(out); + return -3; + } + + /* write compressed block size */ + uint32_t csz = (uint32_t)compressed; + memcpy(out + out_off, &csz, sizeof(csz)); + out_off += sizeof(uint32_t); + + /* advance over compressed data */ + out_off += compressed; + in_off += chunk_size; + } + + *out_data = out; + *out_size = (uint32_t)out_off; + return 0; +} + +// int compress_zstd(const uint8_t *in_data, size_t in_size, uint8_t **out_data, uint32_t *out_size) { +// size_t const max_out_size = ZSTD_compressBound(in_size); +// *out_data = malloc(max_out_size); +// if (!*out_data) return -1; +// size_t const cSize = ZSTD_compress(*out_data, max_out_size, in_data, in_size, 3); + +// if (ZSTD_isError(cSize)) { +// free(*out_data); +// return -2; +// } +// *out_size = (uint32_t)cSize; +// return 0; +// } +// int decompress_zstd(const uint8_t *src, size_t srcSize, uint8_t **dst, uint32_t *dstSize) { +// unsigned long long const rSize = ZSTD_getFrameContentSize(src, srcSize); +// if (rSize == ZSTD_CONTENTSIZE_ERROR) return -1; +// if (rSize == ZSTD_CONTENTSIZE_UNKNOWN) return -2; + +// *dst = malloc((size_t)rSize); +// if (!*dst) return -3; + +// size_t const dSize = ZSTD_decompress(*dst, (size_t)rSize, src, srcSize); + +// if (ZSTD_isError(dSize)) { +// free(*dst); +// return -4; +// } +// *dstSize = (uint32_t)dSize; +// return 0; +// } + +int decompress_xz(const uint8_t *src, size_t srcSize, uint8_t **dst, uint32_t *dstSize) { + + xz_crc32_init(); + + + struct xz_dec *s = xz_dec_init(XZ_SINGLE, 0); + if (s == NULL) return -1; + + + uint32_t dstCapacity = 128 * 1024 * 1024; + *dst = (uint8_t *)malloc(dstCapacity); + if (!*dst) { + xz_dec_end(s); + return -1; + } + + struct xz_buf b; + b.in = src; + b.in_pos = 0; + b.in_size = srcSize; + b.out = *dst; + b.out_pos = 0; + b.out_size = dstCapacity; + enum xz_ret ret = xz_dec_run(s, &b); + + if (ret != XZ_STREAM_END) { + tools_loge("XZ Decompression failed: %d\n", ret); + free(*dst); + xz_dec_end(s); + return -1; + } + + *dstSize = (uint32_t)b.out_pos; + xz_dec_end(s); + return 0; +} + +int decompress_lzma(const uint8_t *src, size_t srcSize, uint8_t **dst, uint32_t *dstSize) { + xz_crc32_init(); + + struct xz_dec *s = xz_dec_init(XZ_SINGLE, 0); + if (!s) return -1; + + uint32_t dstCapacity = 128 * 1024 * 1024; + *dst = (uint8_t *)malloc(dstCapacity); + if (!*dst) { + xz_dec_end(s); + return -1; + } + + struct xz_buf b; + b.in = src; + b.in_pos = 0; + b.in_size = srcSize; + b.out = *dst; + b.out_pos = 0; + b.out_size = dstCapacity; + + enum xz_ret ret = xz_dec_run(s, &b); + + if (ret != XZ_STREAM_END) { + tools_loge("Your xz-embedded version only supports XZ container (Method 6).\n"); + free(*dst); + xz_dec_end(s); + return -1; + } + + *dstSize = (uint32_t)b.out_pos; + xz_dec_end(s); + return 0; +} + +int auto_depress(const uint8_t *data, size_t size, const char *out_path) { + if (size < 4) return -1; + compress_head k_head; + memcpy(&k_head, data, sizeof(k_head)); + int method = detect_compress_method(k_head); + tools_logi("Auto-detect compression method: %d\n", method); + + if (method == 1) { //Gzip + tools_logi("Detected GZIP compressed kernel.\n"); + if (decompress_gzip(data, size, out_path) == 0) { + tools_logi(" Decompressed to %s\n", out_path); + return 0; + } else { + tools_logi(" Gzip decompression failed.\n"); + return -1; + } + } + + + if (method == 2) { + tools_logi(" Detected LZ4 Frame. Decompressing with lz4frame...\n"); + LZ4F_decompressionContext_t dctx; + LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION); + + size_t dstCapacity = 64 * 1024 * 1024; + void* dst = malloc(dstCapacity); + if (!dst) return -1; + + size_t consumedSize = size; + size_t producedSize = dstCapacity; + + size_t ret = LZ4F_decompress(dctx, dst, &producedSize, data, &consumedSize, NULL); + + if (LZ4F_isError(ret)) { + tools_loge("LZ4 Decompression failed: %s\n", LZ4F_getErrorName(ret)); + free(dst); + LZ4F_freeDecompressionContext(dctx); + return -1; + } else { + tools_logi("Decompressed: %zu bytes\n", producedSize); + write_data_to_file(out_path, (uint8_t*)dst, (uint32_t)producedSize); + free(dst); + LZ4F_freeDecompressionContext(dctx); + return 0; + } + } + + if (method == 3) { + tools_logi("Probing LZ4 Legacy (block-based)...\n"); + + const uint8_t *p = (const uint8_t *)data; + const uint8_t *end = p + size; + + + if (size < 4) + goto not_lz4; + + + if (*(uint32_t *)p != LZ4_MAGIC) + goto not_lz4; + + p += 4; + + size_t out_cap = 64 * 1024 * 1024; + uint8_t *out = malloc(out_cap); + if (!out) + goto not_lz4; + + size_t out_off = 0; + + uint8_t *block_out = malloc(LZ4_BLOCK_SIZE); + if (!block_out) { + free(out); + goto not_lz4; + } + + int decoded_any = 0; + while (1) { + uint32_t block_size; + + if (p + 4 > end) + break; + + memcpy(&block_size, p, 4); + p += 4; + + if (block_size == 0) + break; + + if (block_size > LZ4_compressBound(LZ4_BLOCK_SIZE)) + goto fail; + + if (p + block_size > end) + goto fail; + + int decoded = LZ4_decompress_safe( + (const char *)p, + (char *)block_out, + (int)block_size, + LZ4_BLOCK_SIZE + ); + + if (decoded < 0) + goto fail; + + decoded_any = 1; + + if (out_off + (size_t)decoded > out_cap) { + size_t new_cap = out_cap * 2; + while (new_cap < out_off + (size_t)decoded) + new_cap *= 2; + + uint8_t *tmp = realloc(out, new_cap); + if (!tmp) + goto fail; + + out = tmp; + out_cap = new_cap; + } + + memcpy(out + out_off, block_out, (size_t)decoded); + out_off += (size_t)decoded; + + p += block_size; + } + + + if (!decoded_any) + goto fail; + + tools_logi("LZ4 block decompressed: %zu bytes\n", out_off); + write_data_to_file(out_path, out, (uint32_t)out_off); + + free(block_out); + free(out); + return 0; + + fail: + free(block_out); + free(out); + + not_lz4: + + tools_logi("Not LZ4 block format, fallback.\n"); + } + + // till now no kernel use this + // if (method == 4) { + // tools_logi("Detected ZSTD compressed kernel. Decompressing...\n"); + // unsigned long long const rSize = ZSTD_getFrameContentSize(data, size); + // if (rSize == ZSTD_CONTENTSIZE_ERROR || rSize == ZSTD_CONTENTSIZE_UNKNOWN) { + // tools_loge("Not a valid Zstd frame or size unknown.\n"); + // return -1; + // } + + // uint8_t *dst = malloc((size_t)rSize); + // if (!dst) return -1; + + // size_t const dSize = ZSTD_decompress(dst, (size_t)rSize, data, size); + + // if (ZSTD_isError(dSize)) { + // tools_loge(" Zstd Decompression failed: %s\n", ZSTD_getErrorName(dSize)); + // free(dst); + // return -1; + // } else { + // tools_logi(" Decompressed: %zu bytes\n", dSize); + // write_data_to_file(out_path, dst, (uint32_t)dSize); + // free(dst); + // return 0; + // } + // } + + if (method == 5) { // BZIP2 + tools_logi("Detected BZIP2. Decompressing...\n"); + + + unsigned int dstCapacity = 64 * 1024 * 1024; + void* dst = malloc(dstCapacity); + if (!dst) { + tools_loge("Failed to allocate memory for BZIP2 decompression.\n"); + return -1; + } + + unsigned int producedSize = dstCapacity; + unsigned int consumedSize = (unsigned int)size; + + int ret = BZ2_bzBuffToBuffDecompress((char*)dst, &producedSize, (char*)data, consumedSize, 0, 0); + + if (ret != BZ_OK) { + tools_loge(" BZIP2 Decompression failed with error code: %d\n", ret); + free(dst); + return -1; + } + + tools_logi(" BZIP2 Decompressed: %u bytes\n", producedSize); + write_data_to_file(out_path, (uint8_t*)dst, producedSize); + free(dst); + return 0; + } + + if (method == 6) { // XZ + tools_logi(" Detected XZ format. Decompressing...\n"); + + uint8_t *xz_dst = NULL; + uint32_t xz_size = 0; + + if (decompress_xz(data, size, &xz_dst, &xz_size) == 0) { + tools_logi("XZ Decompressed: %u bytes\n", xz_size); + write_data_to_file(out_path, xz_dst, xz_size); + free(xz_dst); + return 0; + } else { + tools_loge(" XZ Decompression failed.\n"); + return -1; + } + } + + if (method == 7) { // LZMA Legacy + tools_logi("Detected Legacy LZMA format. Decompressing...\n"); + + uint8_t *lzma_dst = NULL; + uint32_t lzma_size = 0; + + if (decompress_lzma(data, size, &lzma_dst, &lzma_size) == 0) { + tools_logi(" LZMA Decompressed: %u bytes\n", lzma_size); + write_data_to_file(out_path, lzma_dst, lzma_size); + free(lzma_dst); + return 0; + } else { + tools_loge(" LZMA Decompression failed.\n"); + return -1; + } + } + + tools_logi("Treating as Raw Kernel (or unknown format).\n"); + if (write_data_to_file(out_path, data, size) == 0) { + tools_logi(" Saved raw kernel to %s\n", out_path); + return 0; + } + + return -1; +} + +int extract_kernel(const char *bootimg_path) { + FILE *fp = fopen(bootimg_path, "rb"); + if (!fp) { + tools_logi("Error: Cannot open %s\n", bootimg_path); + return -1; + } + + struct boot_img_hdr hdr; + fread(&hdr, sizeof(hdr), 1, fp); + + if (memcmp(hdr.magic, "ANDROID!", 8) != 0) { + tools_logi("Error: Invalid boot image magic.\n"); + fclose(fp); + return -2; + } + + uint32_t page_size = hdr.page_size; + uint32_t kernel_offset = page_size; // Kernel starts after the first page + if (hdr.unused[0] >= 3) { + kernel_offset = 4096; + } + if (hdr.unused[0] > 10) { + kernel_offset = page_size; + } + + tools_logi("Kernel size: %d,Header Version: %d, Offset: %d\n", hdr.kernel_size, hdr.unused[0], kernel_offset); + + uint8_t *kernel_data = malloc(hdr.kernel_size); + if (!kernel_data) { + fclose(fp); + return -3; + } + + fseek(fp, kernel_offset, SEEK_SET); + fread(kernel_data, 1, hdr.kernel_size, fp); + fclose(fp); + + int res = auto_depress(kernel_data, hdr.kernel_size, "kernel"); + + free(kernel_data); + return res; +} + +int detect_compress_method(compress_head data) { + // 1. GZIP / ZOPFLI (1F 8B) + if (data.magic[0] == 0x1F && data.magic[1] == 0x8B) return 1; + if (data.magic[0] == 0x1F && data.magic[1] == 0x9E) return 1; + // 2. LZ4 (04 22 4D 18 is Frame ) + if (data.magic[0] == 0x04 && data.magic[1] == 0x22 && + data.magic[2] == 0x4D && data.magic[3] == 0x18) return 2; + + if (data.magic[0] == 0x03 && data.magic[1] == 0x21 && + data.magic[2] == 0x4C && data.magic[3] == 0x18) return 2; + + // LZ4 Legacy (02 21 4C 18) + if (data.magic[0] == 0x02 && data.magic[1] == 0x21 && + data.magic[2] == 0x4C && data.magic[3] == 0x18) return 3; + + // 3. ZSTD 28 B5 2F FD + if (data.magic[0] == 0x28 && data.magic[1] == 0xB5 && + data.magic[2] == 0x2F && data.magic[3] == 0xFD) return 4; + + // 4. BZIP2 (BZh) - 42 5A 68 + if (data.magic[0] == 0x42 && data.magic[1] == 0x5A && + data.magic[2] == 0x68) return 5; + + // 5. XZ - FD 37 7A 58 5A 00 + if (data.magic[0] == 0xFD && data.magic[1] == 0x37 && + data.magic[2] == 0x7A && data.magic[3] == 0x58) return 6; + + // 6. LZMA - 5D 00 00 + if (data.magic[0] == 0x5D && data.magic[1] == 0x00 && + data.magic[2] == 0x00) return 7; + + return 0; // Raw Kernel +} + +int repack_bootimg(const char *orig_boot_path, + const char *new_kernel_path, + const char *out_boot_path) { + tools_logi(" Starting automatic repack...\n"); + + FILE *f_orig = fopen(orig_boot_path, "rb"); + if (!f_orig) return -1; + + struct boot_img_hdr hdr; + struct avb_footer avb; + uint32_t extracted_size = 0; + fread(&hdr, sizeof(hdr), 1, f_orig); + + if (memcmp(hdr.magic, "ANDROID!", 8) != 0) { + tools_logi("Not a valid Android Boot Image.\n"); + fclose(f_orig); + return -2; + } + + fseek(f_orig, 0, SEEK_END); + long total_size = ftell(f_orig); + + uint32_t avb_size = 0; + //uint8_t *foot_buf = NULL; + //foot_buf = malloc(64); + fseek(f_orig, total_size-sizeof(avb), SEEK_SET); + fread(&avb, sizeof(avb), 1, f_orig); + + uint32_t header_ver = hdr.unused[0]; + if (header_ver > 10){header_ver = 0;extracted_size = hdr.unused[0];} + //if (header_ver == 0){tools_loge_exit("we don't support this device any more\n");} + uint32_t page_size = (header_ver >= 3) ? 4096 : hdr.page_size; + uint32_t fmt_size = (header_ver >= 3) ? hdr.kernel_addr : hdr.ramdisk_size; + + tools_logi("Header Version: %u, Page Size: %u, fmt_size: %u\n", header_ver, page_size,fmt_size); + + uint8_t *old_k_full = malloc(hdr.kernel_size); + fseek(f_orig, page_size, SEEK_SET); + fread(old_k_full, 1, hdr.kernel_size, f_orig); + + + compress_head k_head; + memcpy(&k_head, old_k_full, sizeof(k_head)); + int method = detect_compress_method(k_head); + + // DTB (v1/v2) + uint8_t *extracted_dtb = NULL; + uint32_t dtb_size = 0; + if (header_ver < 3) { + int dtb_off = find_dtb_offset(old_k_full, hdr.kernel_size); + if (dtb_off > 0) { + dtb_size = hdr.kernel_size - dtb_off; + extracted_dtb = malloc(dtb_size); + memcpy(extracted_dtb, old_k_full + dtb_off, dtb_size); + tools_logi("Detected DTB appended to kernel. Size: %u\n", dtb_size); + } + } + free(old_k_full); + + + FILE *f_new_k = fopen(new_kernel_path, "rb"); + if (!f_new_k) { fclose(f_orig); if(extracted_dtb) free(extracted_dtb); return -3; } + fseek(f_new_k, 0, SEEK_END); + uint32_t raw_k_size = ftell(f_new_k); + fseek(f_new_k, 0, SEEK_SET); + uint8_t *raw_k_buf = malloc(raw_k_size); + fread(raw_k_buf, 1, raw_k_size, f_new_k); + fclose(f_new_k); + + uint8_t *final_k_buf = raw_k_buf; + uint32_t final_k_size = raw_k_size; + uint8_t *compressed_buf = NULL; + + if (method == 1) { + tools_logi("Compressing new kernel with GZIP...\n"); + if (compress_gzip(raw_k_buf, raw_k_size, &compressed_buf, &final_k_size) == 0) { + final_k_buf = compressed_buf; + } + } + if (method == 2) { + tools_logi("Compressing new kernel with LZ4...\n"); + if (compress_lz4(raw_k_buf, raw_k_size, &compressed_buf, &final_k_size) == 0) { + final_k_buf = compressed_buf; + } + } + if (method == 3) { + tools_logi("Compressing new kernel with LZ4 Legacy...\n"); + if (compress_lz4_le(raw_k_buf, raw_k_size, &compressed_buf, &final_k_size, k_head) == 0) { + final_k_buf = compressed_buf; + } + } + if (method == 4) { + tools_logi(" Kernel uses zstd, we have not supported it yet, please report to dev\n"); + return -1; + } + + if (method == 5) { // BZIP2 + tools_logi(" Compressing new kernel with BZIP2 (Level 9)...\n"); + + unsigned int max_out_size = (unsigned int)(raw_k_size * 1.01) + 600; + uint8_t *compressed_buf = (uint8_t *)malloc(max_out_size); + if (!compressed_buf) return -1; + + unsigned int final_size = max_out_size; + unsigned int source_size = (unsigned int)raw_k_size; + + int ret = BZ2_bzBuffToBuffCompress((char*)compressed_buf, &final_size, (char*)raw_k_buf, source_size, 9, 0, 30); + + if (ret == BZ_OK) { + final_k_buf = compressed_buf; + final_k_size = final_size; + tools_logi("BZIP2 compression complete. Size: %u bytes\n", final_k_size); + } else { + tools_loge("BZIP2 compression failed: %d\n", ret); + free(compressed_buf); + return -1; + } + } + if (method == 6 || method == 7) { + tools_logi(" Original was XZ/LZMA. Repacking as GZIP for compatibility...\n"); + uint8_t *compressed_buf = NULL; + uint32_t final_k_size = 0; + if (compress_gzip(raw_k_buf, raw_k_size, &compressed_buf, &final_k_size) == 0) { + final_k_buf = compressed_buf; + method = 1; + tools_logi("Repacked as GZIP. New Size: %u bytes\n", final_k_size); + } else { + tools_loge("GZIP compression failed during XZ-to-GZIP conversion.\n"); + return -1; + } + } + tools_logi("Final kernel size after compression (if applied): %u bytes\n", final_k_size); + uint32_t old_k_aligned = ALIGN(hdr.kernel_size, page_size); + uint32_t rest_data_offset = page_size + old_k_aligned; + uint32_t rest_data_size = (total_size > rest_data_offset) ? (total_size - rest_data_offset) : 0; + hdr.kernel_size = final_k_size + dtb_size; + uint32_t checksum_aligned = ALIGN(fmt_size , page_size); + uint8_t *rest_buf_tmp = NULL; + uint8_t *rest_buf = NULL; + uint32_t rest_buf_offset = 0; + if (rest_data_size > 0) { + rest_buf_tmp = malloc(rest_data_size); + fseek(f_orig, rest_data_offset, SEEK_SET); + fread(rest_buf_tmp, 1, rest_data_size-sizeof(avb), f_orig); + for (int32_t i = (int32_t)rest_data_size - 1; i >= 0; i--) { + if (rest_buf_tmp[i] != 0) { + rest_buf_offset = (uint32_t)(i + 1); + break; + } + } + if (rest_buf_offset > rest_data_size / 3 * 2){ + tools_logi("warning: overload size of rest data, kptools may crash,Rest data size: %u bytes, Actual used size: %u bytes\n", rest_data_size, rest_buf_offset); + + rest_buf = rest_buf_tmp; + rest_data_size = rest_buf_offset + sizeof(avb); + + }else{ + rest_buf = malloc(rest_buf_offset); + memcpy(rest_buf, rest_buf_tmp, rest_buf_offset); + tools_logi("Rest data size: %u bytes, Actual used size: %u bytes\n", rest_data_size, rest_buf_offset); + rest_data_size = rest_buf_offset; + free(rest_buf_tmp); + } + + } + fclose(f_orig); + + uint32_t use_sha256 = is_sha256(hdr.id); + int len = use_sha256 ? SHA256_BLOCK_SIZE : SHA1_BLOCK_SIZE; + BYTE buf[len]; + if (use_sha256 != 1 || header_ver <= 3) { + + if (use_sha256){ + SHA256_CTX ctx; + sha256_init(&ctx); + sha256_update(&ctx, (const BYTE *)final_k_buf, hdr.kernel_size); + sha256_update(&ctx, (const BYTE *)&hdr.kernel_size, 4); + sha256_update(&ctx, (const BYTE *)rest_buf, fmt_size); + sha256_update(&ctx, (const BYTE *)&fmt_size, sizeof(fmt_size)); + + sha256_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, hdr.second_size); + sha256_update(&ctx, (const BYTE *)&hdr.second_size, 4); + tools_logi("second_size=%d\n",hdr.second_size); + if (hdr.second_size > 0){ + checksum_aligned += ALIGN(hdr.second_size , page_size); + } + //to do extra data + if (extracted_size) { + tools_logi("extracted_size=%d\n",extracted_size); + sha256_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, page_size); + sha256_update(&ctx, (const BYTE *)&extracted_size, 4); + checksum_aligned += ALIGN(extracted_size , page_size); + } + if (header_ver == 1 || header_ver == 2){ + tools_logi("recovery_dtbo_size=%d\n",hdr.recovery_dtbo_size); + sha256_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, hdr.recovery_dtbo_size); + sha256_update(&ctx, (const BYTE *)&hdr.recovery_dtbo_size, 4); + checksum_aligned += ALIGN(hdr.recovery_dtbo_size , page_size); + } + if (header_ver == 2){ + tools_logi("dtb_size=%d\n",hdr.dtb_size); + sha256_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, hdr.dtb_size); + sha256_update(&ctx, (const BYTE *)&hdr.dtb_size, 4); + } + + sha256_final(&ctx, buf); + memcpy(hdr.id, buf, 32); + }else{ + SHA1_CTX ctx; + sha1_init(&ctx); + sha1_update(&ctx, (const BYTE *)final_k_buf, hdr.kernel_size); + sha1_update(&ctx, (const BYTE *)&hdr.kernel_size, 4); + sha1_update(&ctx, (const BYTE *)rest_buf, fmt_size); + sha1_update(&ctx, (const BYTE *)&fmt_size, sizeof(fmt_size)); + + + sha1_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, hdr.second_size); + sha1_update(&ctx, (const BYTE *)&hdr.second_size, 4); + if (hdr.second_size > 0){ + checksum_aligned += ALIGN(hdr.second_size , page_size); + } + tools_logi("second_size=%d,offset=%d\n",hdr.second_size, checksum_aligned+rest_data_offset); + //to do extra data + if (extracted_size) { + tools_logi("extracted_size=%d,offset=%d\n",extracted_size, checksum_aligned+rest_data_offset); + sha1_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, page_size); + sha1_update(&ctx, (const BYTE *)&extracted_size, 4); + checksum_aligned += ALIGN(extracted_size , page_size); + } + + if (header_ver == 1 || header_ver == 2){ + tools_logi("recovery_dtbo_size=%d,offset=%d\n",hdr.recovery_dtbo_size, checksum_aligned+rest_data_offset); + sha1_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, hdr.recovery_dtbo_size); + sha1_update(&ctx, (const BYTE *)&hdr.recovery_dtbo_size, 4); + checksum_aligned += ALIGN(hdr.recovery_dtbo_size , page_size); + } + + if (header_ver == 2){ + tools_logi("dtb_size=%d,dtb_addr=%lu\n",hdr.dtb_size,hdr.dtb_addr); + sha1_update(&ctx, (const BYTE *)rest_buf + checksum_aligned, hdr.dtb_size); + sha1_update(&ctx, (const BYTE *)&hdr.dtb_size, 4); + } + + sha1_final(&ctx, buf); + memcpy(hdr.id, buf, 20); + } + } + FILE *f_out = fopen(out_boot_path, "wb"); + if (!f_out) { return -4; } + + + fwrite(&hdr, sizeof(hdr), 1, f_out); + fseek(f_out, page_size, SEEK_SET); + + + fwrite(final_k_buf, 1, final_k_size, f_out); + if (extracted_dtb) { + fwrite(extracted_dtb, 1, dtb_size, f_out); + } + tools_logi("dtb_size=%d\n",dtb_size); + + uint32_t new_k_total_aligned = ALIGN(hdr.kernel_size, page_size); + fseek(f_out, page_size + new_k_total_aligned, SEEK_SET); + //tools_logi("rest_data_size=%d,total_size=%d,rest_data_offset=%d,now=%d\n",rest_data_size , total_size , rest_data_offset,page_size + new_k_total_aligned); + //const uint8_t avb_magic[] = "AVB0"; + uint8_t avb_sig[] = { + 0x41,0x56,0x42,0x30, + 0x00,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00 + }; + + + if (rest_buf) { + uint8_t *avb_ptr = my_memmem(rest_buf, rest_data_size, avb_sig, sizeof(avb_sig)); + + if (!avb_ptr) { + avb_sig[18] = 0x01; // Try next version + avb_ptr = my_memmem(rest_buf, rest_data_size, avb_sig, sizeof(avb_sig)); + } + if (!avb_ptr) { + avb_sig[18] = 0x02; // Try next version + avb_ptr = my_memmem(rest_buf, rest_data_size, avb_sig, sizeof(avb_sig)); + } + if (avb_ptr) { + uint8_t *last_avb = NULL; + uint8_t *search_ptr = avb_ptr; + while (search_ptr) { + last_avb = search_ptr; + tools_logi("Found AVB footer in rest data.%p\n", search_ptr); + uint32_t offset = (uint32_t)(search_ptr - rest_buf) + sizeof(avb_sig); + if (offset >= rest_data_size) + break; + + search_ptr = my_memmem( + rest_buf + offset, + rest_data_size - offset, + avb_sig, + sizeof(avb_sig) + ); + } + avb_ptr = last_avb; + + } + + if (avb_ptr) { + size_t avb_offset = avb_ptr - rest_buf; + tools_logi("avb_offset=%zu\n",avb_offset); + uint32_t avb_size = page_size + avb_offset + new_k_total_aligned; + avb.data_size1 = XXH_swap32(avb_size); + avb.data_size2 = XXH_swap32(avb_size); + } + if (rest_data_size > total_size - page_size - new_k_total_aligned){ + total_size = ALIGN(page_size + new_k_total_aligned + rest_data_size, page_size); // when rest data is larger than original, we need to expand the total size to fit it + fwrite(rest_buf, 1, total_size - page_size - new_k_total_aligned -sizeof(avb), f_out); + fwrite(&avb, sizeof(avb), 1, f_out); + }else{ + fwrite(rest_buf, 1, rest_data_size, f_out); + } + } + + + + long current_pos = ftell(f_out); + + // Padding + //tools_logi("current_post=%d,total_size=%d\n",current_pos,total_size); + if (current_pos < total_size - sizeof(avb)) { + uint32_t padding = total_size - current_pos - sizeof(avb); + uint8_t *zero_pad = calloc(1, padding); + fwrite(zero_pad, 1, padding, f_out); + free(zero_pad); + fwrite(&avb, sizeof(avb), 1, f_out); + } + + fclose(f_out); + if (compressed_buf) free(compressed_buf); + if (extracted_dtb) free(extracted_dtb); + free(raw_k_buf); + if (rest_buf) free(rest_buf); + + tools_logi("Repack completed: %s\n", out_boot_path); + return 0; +} +int cacluate_sha1(const char *file) { + FILE *fp = fopen(file, "rb"); + if (!fp) { + return -1; + } + SHA1_CTX ctx; + sha1_init(&ctx); + + uint8_t *buffer = malloc(409600); + size_t bytesRead; + while ((bytesRead = fread(buffer, 1, sizeof(buffer), fp)) > 0) { + sha1_update(&ctx, buffer, bytesRead); + } + fclose(fp); + uint8_t hash[SHA1_BLOCK_SIZE]; + sha1_final(&ctx, hash); + free(buffer); + for (int i = 0; i < 20; i++) { + printf("%02x", hash[i]); + } + return 0; +} diff --git a/tools/bootimg.h b/tools/bootimg.h new file mode 100644 index 0000000..c32dbc9 --- /dev/null +++ b/tools/bootimg.h @@ -0,0 +1,86 @@ + +#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1)) +#define PAGE_SIZE_DEFAULT 4096 + +#define LZ4_MAGIC 0x184c2102 +#define LZ4_BLOCK_SIZE 0x800000 +#define LZ4HC_CLEVEL 12 +#define AVB_FOOTER_SIZE 64 + +struct boot_img_hdr { + uint8_t magic[8]; // "ANDROID!" + uint32_t kernel_size; + uint32_t kernel_addr; //when it come to V3 ,it should be ramdisk_size + uint32_t ramdisk_size; + uint32_t ramdisk_addr; + uint32_t second_size; + uint32_t second_addr; + uint32_t tags_addr; + uint32_t page_size; // 4096 + uint32_t unused[2]; + uint8_t name[16]; + uint8_t cmdline[512]; + uint32_t id[8]; + uint8_t extra_cmdline[1024]; // command + + // v2 + uint32_t recovery_dtbo_size; + uint64_t recovery_dtbo_offset; + + + // v3 + uint32_t dtb_size; + uint64_t dtb_addr; +}; +struct kernel_hdr { + uint32_t code0; // Executable code + uint32_t code1; // Executable code + uint64_t text_offset; // Image load offset, little endian + uint64_t image_size; // Effective Image size, little endian + uint64_t flags; // kernel flags, little endian + uint64_t res2; // reserved + uint64_t res3; // reserved + uint64_t res4; // reserved + uint32_t magic; // Magic number, "ARM\x64" + uint32_t res5; // reserved + +}; + +typedef struct { + uint8_t magic[8]; +} compress_head; + +#define DTB_MAGIC "\xd0\x0d\xfe\xed" + +struct fdt_header { + uint32_t magic; + uint32_t totalsize; + uint32_t off_dt_struct; + uint32_t off_dt_strings; + uint32_t off_mem_rsvmap; + uint32_t version; + uint32_t last_comp_version; + uint32_t boot_cpuid_phys; + uint32_t size_dt_strings; + uint32_t size_dt_struct; +}; +struct avb_footer { + uint32_t reverse[16]; + /* 0x00 */ uint32_t magic; /* ("AVBf") */ + /* 0x04 */ uint32_t version; /* 0x00000001 */ + /* 0x08 */ uint64_t reserved1; /* 0x0000000000000000 */ + /* 0x10 */ uint32_t data_size1; /* 0x00022FC000000000 */ + /* 0x10 */ uint32_t data_size_1; /* 0x00022FC000000000 */ + /* 0x16 */ uint32_t data_size2; /* same as data_size1 */ + /* 0x16 */ uint32_t data_size_2; /* same as data_size1 */ + /* 0x20 */ uint64_t unknown_field; /* 0x0000000000000940 */ + /* 0x30 */ uint8_t padding[24]; /* */ +} __attribute__((packed)); + +int repack_bootimg(const char *orig_boot_path, + const char *new_kernel_path, + const char *out_boot_path); +int extract_kernel(const char *bootimg_path); + +int detect_compress_method(compress_head data); +int cacluate_sha1(const char *file); \ No newline at end of file diff --git a/tools/kallsym.c b/tools/kallsym.c index 5b7d206..152171d 100644 --- a/tools/kallsym.c +++ b/tools/kallsym.c @@ -270,7 +270,8 @@ static int try_find_arm64_relo_table(kallsym_t *info, char *img, int32_t imglen) uint64_t r_offset = uint_unpack(img + cand, 8, info->is_be); uint64_t r_info = uint_unpack(img + cand + 8, 8, info->is_be); uint64_t r_addend = uint_unpack(img + cand + 16, 8, info->is_be); - if ((r_offset & 0xffff000000000000) == 0xffff000000000000 && r_info == 0x403) { + uint32_t r_type = r_info & 0xffffffff; + if ((r_offset & 0xffff000000000000) == 0xffff000000000000 && (r_type == 0x101 || r_type == 0x403)) { if (!(r_addend & 0xfff) && r_addend >= min_va && r_addend < kernel_va) kernel_va = r_addend; cand += 24; rela_num++; @@ -330,6 +331,11 @@ static int try_find_arm64_relo_table(kallsym_t *info, char *img, int32_t imglen) return -1; } + uint32_t r_type = r_info & 0xffffffff; + if (r_type == 0x101) { + r_addend += kernel_va; + } + uint64_t value = uint_unpack(img + offset, 8, info->is_be); if (value == r_addend) continue; *(uint64_t *)(img + offset) = value + r_addend; diff --git a/tools/kptools.c b/tools/kptools.c index a322141..2f0affb 100644 --- a/tools/kptools.c +++ b/tools/kptools.c @@ -16,7 +16,7 @@ #include #include "../version" - +#include "bootimg.h" #include "preset.h" #include "image.h" #include "order.h" @@ -47,7 +47,7 @@ void print_usage(char **argv) " -l, --list Print all patch informations of kernel image if (-i) specified.\n" " Print extra item informations if (-M) specified.\n" " Print KernelPatch image informations if (-k) specified.\n" - + "Unpack kernel: unpack \n Repack Kernel: repack \n" "Options:\n" " -i, --image PATH Kernel image path.\n" " -k, --kpimg PATH KernelPatch image path.\n" @@ -74,7 +74,23 @@ int main(int argc, char *argv[]) { version = (MAJOR << 16) + (MINOR << 8) + PATCH; program_name = argv[0]; - + if (argc > 2){ + + if (strcmp(argv[1], "unpack") == 0) { + set_log_enable(true); + return extract_kernel(argv[2]); + } + if (strcmp(argv[1], "unpacknolog") == 0) { + return extract_kernel(argv[2]); + } + if (strcmp(argv[1], "repack") == 0) { + set_log_enable(true); + return repack_bootimg(argv[2], "kernel", "new-boot.img"); + } + if (strcmp(argv[1], "sha1") == 0) { + return cacluate_sha1(argv[2]); + } + } struct option longopts[] = { { "help", no_argument, NULL, 'h' }, { "version", no_argument, NULL, 'v' }, diff --git a/tools/lib/bz2/blocksort.c b/tools/lib/bz2/blocksort.c new file mode 100644 index 0000000..92d81fe --- /dev/null +++ b/tools/lib/bz2/blocksort.c @@ -0,0 +1,1094 @@ + +/*-------------------------------------------------------------*/ +/*--- Block sorting machinery ---*/ +/*--- blocksort.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#include "bzlib_private.h" + +/*---------------------------------------------*/ +/*--- Fallback O(N log(N)^2) sorting ---*/ +/*--- algorithm, for repetitive blocks ---*/ +/*---------------------------------------------*/ + +/*---------------------------------------------*/ +static +__inline__ +void fallbackSimpleSort ( UInt32* fmap, + UInt32* eclass, + Int32 lo, + Int32 hi ) +{ + Int32 i, j, tmp; + UInt32 ec_tmp; + + if (lo == hi) return; + + if (hi - lo > 3) { + for ( i = hi-4; i >= lo; i-- ) { + tmp = fmap[i]; + ec_tmp = eclass[tmp]; + for ( j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4 ) + fmap[j-4] = fmap[j]; + fmap[j-4] = tmp; + } + } + + for ( i = hi-1; i >= lo; i-- ) { + tmp = fmap[i]; + ec_tmp = eclass[tmp]; + for ( j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++ ) + fmap[j-1] = fmap[j]; + fmap[j-1] = tmp; + } +} + + +/*---------------------------------------------*/ +#define fswap(zz1, zz2) \ + { Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; } + +#define fvswap(zzp1, zzp2, zzn) \ +{ \ + Int32 yyp1 = (zzp1); \ + Int32 yyp2 = (zzp2); \ + Int32 yyn = (zzn); \ + while (yyn > 0) { \ + fswap(fmap[yyp1], fmap[yyp2]); \ + yyp1++; yyp2++; yyn--; \ + } \ +} + + +#define fmin(a,b) ((a) < (b)) ? (a) : (b) + +#define fpush(lz,hz) { stackLo[sp] = lz; \ + stackHi[sp] = hz; \ + sp++; } + +#define fpop(lz,hz) { sp--; \ + lz = stackLo[sp]; \ + hz = stackHi[sp]; } + +#define FALLBACK_QSORT_SMALL_THRESH 10 +#define FALLBACK_QSORT_STACK_SIZE 100 + + +static +void fallbackQSort3 ( UInt32* fmap, + UInt32* eclass, + Int32 loSt, + Int32 hiSt ) +{ + Int32 unLo, unHi, ltLo, gtHi, n, m; + Int32 sp, lo, hi; + UInt32 med, r, r3; + Int32 stackLo[FALLBACK_QSORT_STACK_SIZE]; + Int32 stackHi[FALLBACK_QSORT_STACK_SIZE]; + + r = 0; + + sp = 0; + fpush ( loSt, hiSt ); + + while (sp > 0) { + + AssertH ( sp < FALLBACK_QSORT_STACK_SIZE - 1, 1004 ); + + fpop ( lo, hi ); + if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) { + fallbackSimpleSort ( fmap, eclass, lo, hi ); + continue; + } + + /* Random partitioning. Median of 3 sometimes fails to + avoid bad cases. Median of 9 seems to help but + looks rather expensive. This too seems to work but + is cheaper. Guidance for the magic constants + 7621 and 32768 is taken from Sedgewick's algorithms + book, chapter 35. + */ + r = ((r * 7621) + 1) % 32768; + r3 = r % 3; + if (r3 == 0) med = eclass[fmap[lo]]; else + if (r3 == 1) med = eclass[fmap[(lo+hi)>>1]]; else + med = eclass[fmap[hi]]; + + unLo = ltLo = lo; + unHi = gtHi = hi; + + while (1) { + while (1) { + if (unLo > unHi) break; + n = (Int32)eclass[fmap[unLo]] - (Int32)med; + if (n == 0) { + fswap(fmap[unLo], fmap[ltLo]); + ltLo++; unLo++; + continue; + }; + if (n > 0) break; + unLo++; + } + while (1) { + if (unLo > unHi) break; + n = (Int32)eclass[fmap[unHi]] - (Int32)med; + if (n == 0) { + fswap(fmap[unHi], fmap[gtHi]); + gtHi--; unHi--; + continue; + }; + if (n < 0) break; + unHi--; + } + if (unLo > unHi) break; + fswap(fmap[unLo], fmap[unHi]); unLo++; unHi--; + } + + AssertD ( unHi == unLo-1, "fallbackQSort3(2)" ); + + if (gtHi < ltLo) continue; + + n = fmin(ltLo-lo, unLo-ltLo); fvswap(lo, unLo-n, n); + m = fmin(hi-gtHi, gtHi-unHi); fvswap(unLo, hi-m+1, m); + + n = lo + unLo - ltLo - 1; + m = hi - (gtHi - unHi) + 1; + + if (n - lo > hi - m) { + fpush ( lo, n ); + fpush ( m, hi ); + } else { + fpush ( m, hi ); + fpush ( lo, n ); + } + } +} + +#undef fmin +#undef fpush +#undef fpop +#undef fswap +#undef fvswap +#undef FALLBACK_QSORT_SMALL_THRESH +#undef FALLBACK_QSORT_STACK_SIZE + + +/*---------------------------------------------*/ +/* Pre: + nblock > 0 + eclass exists for [0 .. nblock-1] + ((UChar*)eclass) [0 .. nblock-1] holds block + ptr exists for [0 .. nblock-1] + + Post: + ((UChar*)eclass) [0 .. nblock-1] holds block + All other areas of eclass destroyed + fmap [0 .. nblock-1] holds sorted order + bhtab [ 0 .. 2+(nblock/32) ] destroyed +*/ + +#define SET_BH(zz) bhtab[(zz) >> 5] |= ((UInt32)1 << ((zz) & 31)) +#define CLEAR_BH(zz) bhtab[(zz) >> 5] &= ~((UInt32)1 << ((zz) & 31)) +#define ISSET_BH(zz) (bhtab[(zz) >> 5] & ((UInt32)1 << ((zz) & 31))) +#define WORD_BH(zz) bhtab[(zz) >> 5] +#define UNALIGNED_BH(zz) ((zz) & 0x01f) + +static +void fallbackSort ( UInt32* fmap, + UInt32* eclass, + UInt32* bhtab, + Int32 nblock, + Int32 verb ) +{ + Int32 ftab[257]; + Int32 ftabCopy[256]; + Int32 H, i, j, k, l, r, cc, cc1; + Int32 nNotDone; + Int32 nBhtab; + UChar* eclass8 = (UChar*)eclass; + + /*-- + Initial 1-char radix sort to generate + initial fmap and initial BH bits. + --*/ + if (verb >= 4) + VPrintf0 ( " bucket sorting ...\n" ); + for (i = 0; i < 257; i++) ftab[i] = 0; + for (i = 0; i < nblock; i++) ftab[eclass8[i]]++; + for (i = 0; i < 256; i++) ftabCopy[i] = ftab[i]; + for (i = 1; i < 257; i++) ftab[i] += ftab[i-1]; + + for (i = 0; i < nblock; i++) { + j = eclass8[i]; + k = ftab[j] - 1; + ftab[j] = k; + fmap[k] = i; + } + + nBhtab = 2 + (nblock / 32); + for (i = 0; i < nBhtab; i++) bhtab[i] = 0; + for (i = 0; i < 256; i++) SET_BH(ftab[i]); + + /*-- + Inductively refine the buckets. Kind-of an + "exponential radix sort" (!), inspired by the + Manber-Myers suffix array construction algorithm. + --*/ + + /*-- set sentinel bits for block-end detection --*/ + for (i = 0; i < 32; i++) { + SET_BH(nblock + 2*i); + CLEAR_BH(nblock + 2*i + 1); + } + + /*-- the log(N) loop --*/ + H = 1; + while (1) { + + if (verb >= 4) + VPrintf1 ( " depth %6d has ", H ); + + j = 0; + for (i = 0; i < nblock; i++) { + if (ISSET_BH(i)) j = i; + k = fmap[i] - H; if (k < 0) k += nblock; + eclass[k] = j; + } + + nNotDone = 0; + r = -1; + while (1) { + + /*-- find the next non-singleton bucket --*/ + k = r + 1; + while (ISSET_BH(k) && UNALIGNED_BH(k)) k++; + if (ISSET_BH(k)) { + while (WORD_BH(k) == 0xffffffff) k += 32; + while (ISSET_BH(k)) k++; + } + l = k - 1; + if (l >= nblock) break; + while (!ISSET_BH(k) && UNALIGNED_BH(k)) k++; + if (!ISSET_BH(k)) { + while (WORD_BH(k) == 0x00000000) k += 32; + while (!ISSET_BH(k)) k++; + } + r = k - 1; + if (r >= nblock) break; + + /*-- now [l, r] bracket current bucket --*/ + if (r > l) { + nNotDone += (r - l + 1); + fallbackQSort3 ( fmap, eclass, l, r ); + + /*-- scan bucket and generate header bits-- */ + cc = -1; + for (i = l; i <= r; i++) { + cc1 = eclass[fmap[i]]; + if (cc != cc1) { SET_BH(i); cc = cc1; }; + } + } + } + + if (verb >= 4) + VPrintf1 ( "%6d unresolved strings\n", nNotDone ); + + H *= 2; + if (H > nblock || nNotDone == 0) break; + } + + /*-- + Reconstruct the original block in + eclass8 [0 .. nblock-1], since the + previous phase destroyed it. + --*/ + if (verb >= 4) + VPrintf0 ( " reconstructing block ...\n" ); + j = 0; + for (i = 0; i < nblock; i++) { + while (ftabCopy[j] == 0) j++; + ftabCopy[j]--; + eclass8[fmap[i]] = (UChar)j; + } + AssertH ( j < 256, 1005 ); +} + +#undef SET_BH +#undef CLEAR_BH +#undef ISSET_BH +#undef WORD_BH +#undef UNALIGNED_BH + + +/*---------------------------------------------*/ +/*--- The main, O(N^2 log(N)) sorting ---*/ +/*--- algorithm. Faster for "normal" ---*/ +/*--- non-repetitive blocks. ---*/ +/*---------------------------------------------*/ + +/*---------------------------------------------*/ +static +__inline__ +Bool mainGtU ( UInt32 i1, + UInt32 i2, + UChar* block, + UInt16* quadrant, + UInt32 nblock, + Int32* budget ) +{ + Int32 k; + UChar c1, c2; + UInt16 s1, s2; + + AssertD ( i1 != i2, "mainGtU" ); + /* 1 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 2 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 3 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 4 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 5 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 6 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 7 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 8 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 9 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 10 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 11 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + /* 12 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + + k = nblock + 8; + + do { + /* 1 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 2 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 3 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 4 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 5 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 6 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 7 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + /* 8 */ + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + + if (i1 >= nblock) i1 -= nblock; + if (i2 >= nblock) i2 -= nblock; + + k -= 8; + (*budget)--; + } + while (k >= 0); + + return False; +} + + +/*---------------------------------------------*/ +/*-- + Knuth's increments seem to work better + than Incerpi-Sedgewick here. Possibly + because the number of elems to sort is + usually small, typically <= 20. +--*/ +static +Int32 incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280, + 9841, 29524, 88573, 265720, + 797161, 2391484 }; + +static +void mainSimpleSort ( UInt32* ptr, + UChar* block, + UInt16* quadrant, + Int32 nblock, + Int32 lo, + Int32 hi, + Int32 d, + Int32* budget ) +{ + Int32 i, j, h, bigN, hp; + UInt32 v; + + bigN = hi - lo + 1; + if (bigN < 2) return; + + hp = 0; + while (incs[hp] < bigN) hp++; + hp--; + + for (; hp >= 0; hp--) { + h = incs[hp]; + + i = lo + h; + while (True) { + + /*-- copy 1 --*/ + if (i > hi) break; + v = ptr[i]; + j = i; + while ( mainGtU ( + ptr[j-h]+d, v+d, block, quadrant, nblock, budget + ) ) { + ptr[j] = ptr[j-h]; + j = j - h; + if (j <= (lo + h - 1)) break; + } + ptr[j] = v; + i++; + + /*-- copy 2 --*/ + if (i > hi) break; + v = ptr[i]; + j = i; + while ( mainGtU ( + ptr[j-h]+d, v+d, block, quadrant, nblock, budget + ) ) { + ptr[j] = ptr[j-h]; + j = j - h; + if (j <= (lo + h - 1)) break; + } + ptr[j] = v; + i++; + + /*-- copy 3 --*/ + if (i > hi) break; + v = ptr[i]; + j = i; + while ( mainGtU ( + ptr[j-h]+d, v+d, block, quadrant, nblock, budget + ) ) { + ptr[j] = ptr[j-h]; + j = j - h; + if (j <= (lo + h - 1)) break; + } + ptr[j] = v; + i++; + + if (*budget < 0) return; + } + } +} + + +/*---------------------------------------------*/ +/*-- + The following is an implementation of + an elegant 3-way quicksort for strings, + described in a paper "Fast Algorithms for + Sorting and Searching Strings", by Robert + Sedgewick and Jon L. Bentley. +--*/ + +#define mswap(zz1, zz2) \ + { Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; } + +#define mvswap(zzp1, zzp2, zzn) \ +{ \ + Int32 yyp1 = (zzp1); \ + Int32 yyp2 = (zzp2); \ + Int32 yyn = (zzn); \ + while (yyn > 0) { \ + mswap(ptr[yyp1], ptr[yyp2]); \ + yyp1++; yyp2++; yyn--; \ + } \ +} + +static +__inline__ +UChar mmed3 ( UChar a, UChar b, UChar c ) +{ + UChar t; + if (a > b) { t = a; a = b; b = t; }; + if (b > c) { + b = c; + if (a > b) b = a; + } + return b; +} + +#define mmin(a,b) ((a) < (b)) ? (a) : (b) + +#define mpush(lz,hz,dz) { stackLo[sp] = lz; \ + stackHi[sp] = hz; \ + stackD [sp] = dz; \ + sp++; } + +#define mpop(lz,hz,dz) { sp--; \ + lz = stackLo[sp]; \ + hz = stackHi[sp]; \ + dz = stackD [sp]; } + + +#define mnextsize(az) (nextHi[az]-nextLo[az]) + +#define mnextswap(az,bz) \ + { Int32 tz; \ + tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \ + tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \ + tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; } + + +#define MAIN_QSORT_SMALL_THRESH 20 +#define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT) +#define MAIN_QSORT_STACK_SIZE 100 + +static +void mainQSort3 ( UInt32* ptr, + UChar* block, + UInt16* quadrant, + Int32 nblock, + Int32 loSt, + Int32 hiSt, + Int32 dSt, + Int32* budget ) +{ + Int32 unLo, unHi, ltLo, gtHi, n, m, med; + Int32 sp, lo, hi, d; + + Int32 stackLo[MAIN_QSORT_STACK_SIZE]; + Int32 stackHi[MAIN_QSORT_STACK_SIZE]; + Int32 stackD [MAIN_QSORT_STACK_SIZE]; + + Int32 nextLo[3]; + Int32 nextHi[3]; + Int32 nextD [3]; + + sp = 0; + mpush ( loSt, hiSt, dSt ); + + while (sp > 0) { + + AssertH ( sp < MAIN_QSORT_STACK_SIZE - 2, 1001 ); + + mpop ( lo, hi, d ); + if (hi - lo < MAIN_QSORT_SMALL_THRESH || + d > MAIN_QSORT_DEPTH_THRESH) { + mainSimpleSort ( ptr, block, quadrant, nblock, lo, hi, d, budget ); + if (*budget < 0) return; + continue; + } + + med = (Int32) + mmed3 ( block[ptr[ lo ]+d], + block[ptr[ hi ]+d], + block[ptr[ (lo+hi)>>1 ]+d] ); + + unLo = ltLo = lo; + unHi = gtHi = hi; + + while (True) { + while (True) { + if (unLo > unHi) break; + n = ((Int32)block[ptr[unLo]+d]) - med; + if (n == 0) { + mswap(ptr[unLo], ptr[ltLo]); + ltLo++; unLo++; continue; + }; + if (n > 0) break; + unLo++; + } + while (True) { + if (unLo > unHi) break; + n = ((Int32)block[ptr[unHi]+d]) - med; + if (n == 0) { + mswap(ptr[unHi], ptr[gtHi]); + gtHi--; unHi--; continue; + }; + if (n < 0) break; + unHi--; + } + if (unLo > unHi) break; + mswap(ptr[unLo], ptr[unHi]); unLo++; unHi--; + } + + AssertD ( unHi == unLo-1, "mainQSort3(2)" ); + + if (gtHi < ltLo) { + mpush(lo, hi, d+1 ); + continue; + } + + n = mmin(ltLo-lo, unLo-ltLo); mvswap(lo, unLo-n, n); + m = mmin(hi-gtHi, gtHi-unHi); mvswap(unLo, hi-m+1, m); + + n = lo + unLo - ltLo - 1; + m = hi - (gtHi - unHi) + 1; + + nextLo[0] = lo; nextHi[0] = n; nextD[0] = d; + nextLo[1] = m; nextHi[1] = hi; nextD[1] = d; + nextLo[2] = n+1; nextHi[2] = m-1; nextD[2] = d+1; + + if (mnextsize(0) < mnextsize(1)) mnextswap(0,1); + if (mnextsize(1) < mnextsize(2)) mnextswap(1,2); + if (mnextsize(0) < mnextsize(1)) mnextswap(0,1); + + AssertD (mnextsize(0) >= mnextsize(1), "mainQSort3(8)" ); + AssertD (mnextsize(1) >= mnextsize(2), "mainQSort3(9)" ); + + mpush (nextLo[0], nextHi[0], nextD[0]); + mpush (nextLo[1], nextHi[1], nextD[1]); + mpush (nextLo[2], nextHi[2], nextD[2]); + } +} + +#undef mswap +#undef mvswap +#undef mpush +#undef mpop +#undef mmin +#undef mnextsize +#undef mnextswap +#undef MAIN_QSORT_SMALL_THRESH +#undef MAIN_QSORT_DEPTH_THRESH +#undef MAIN_QSORT_STACK_SIZE + + +/*---------------------------------------------*/ +/* Pre: + nblock > N_OVERSHOOT + block32 exists for [0 .. nblock-1 +N_OVERSHOOT] + ((UChar*)block32) [0 .. nblock-1] holds block + ptr exists for [0 .. nblock-1] + + Post: + ((UChar*)block32) [0 .. nblock-1] holds block + All other areas of block32 destroyed + ftab [0 .. 65536 ] destroyed + ptr [0 .. nblock-1] holds sorted order + if (*budget < 0), sorting was abandoned +*/ + +#define BIGFREQ(b) (ftab[((b)+1) << 8] - ftab[(b) << 8]) +#define SETMASK (1 << 21) +#define CLEARMASK (~(SETMASK)) + +static +void mainSort ( UInt32* ptr, + UChar* block, + UInt16* quadrant, + UInt32* ftab, + Int32 nblock, + Int32 verb, + Int32* budget ) +{ + Int32 i, j, k, ss, sb; + Int32 runningOrder[256]; + Bool bigDone[256]; + Int32 copyStart[256]; + Int32 copyEnd [256]; + UChar c1; + Int32 numQSorted; + UInt16 s; + if (verb >= 4) VPrintf0 ( " main sort initialise ...\n" ); + + /*-- set up the 2-byte frequency table --*/ + for (i = 65536; i >= 0; i--) ftab[i] = 0; + + j = block[0] << 8; + i = nblock-1; + for (; i >= 3; i -= 4) { + quadrant[i] = 0; + j = (j >> 8) | ( ((UInt16)block[i]) << 8); + ftab[j]++; + quadrant[i-1] = 0; + j = (j >> 8) | ( ((UInt16)block[i-1]) << 8); + ftab[j]++; + quadrant[i-2] = 0; + j = (j >> 8) | ( ((UInt16)block[i-2]) << 8); + ftab[j]++; + quadrant[i-3] = 0; + j = (j >> 8) | ( ((UInt16)block[i-3]) << 8); + ftab[j]++; + } + for (; i >= 0; i--) { + quadrant[i] = 0; + j = (j >> 8) | ( ((UInt16)block[i]) << 8); + ftab[j]++; + } + + /*-- (emphasises close relationship of block & quadrant) --*/ + for (i = 0; i < BZ_N_OVERSHOOT; i++) { + block [nblock+i] = block[i]; + quadrant[nblock+i] = 0; + } + + if (verb >= 4) VPrintf0 ( " bucket sorting ...\n" ); + + /*-- Complete the initial radix sort --*/ + for (i = 1; i <= 65536; i++) ftab[i] += ftab[i-1]; + + s = block[0] << 8; + i = nblock-1; + for (; i >= 3; i -= 4) { + s = (s >> 8) | (block[i] << 8); + j = ftab[s] -1; + ftab[s] = j; + ptr[j] = i; + s = (s >> 8) | (block[i-1] << 8); + j = ftab[s] -1; + ftab[s] = j; + ptr[j] = i-1; + s = (s >> 8) | (block[i-2] << 8); + j = ftab[s] -1; + ftab[s] = j; + ptr[j] = i-2; + s = (s >> 8) | (block[i-3] << 8); + j = ftab[s] -1; + ftab[s] = j; + ptr[j] = i-3; + } + for (; i >= 0; i--) { + s = (s >> 8) | (block[i] << 8); + j = ftab[s] -1; + ftab[s] = j; + ptr[j] = i; + } + + /*-- + Now ftab contains the first loc of every small bucket. + Calculate the running order, from smallest to largest + big bucket. + --*/ + for (i = 0; i <= 255; i++) { + bigDone [i] = False; + runningOrder[i] = i; + } + + { + Int32 vv; + Int32 h = 1; + do h = 3 * h + 1; while (h <= 256); + do { + h = h / 3; + for (i = h; i <= 255; i++) { + vv = runningOrder[i]; + j = i; + while ( BIGFREQ(runningOrder[j-h]) > BIGFREQ(vv) ) { + runningOrder[j] = runningOrder[j-h]; + j = j - h; + if (j <= (h - 1)) goto zero; + } + zero: + runningOrder[j] = vv; + } + } while (h != 1); + } + + /*-- + The main sorting loop. + --*/ + + numQSorted = 0; + + for (i = 0; i <= 255; i++) { + + /*-- + Process big buckets, starting with the least full. + Basically this is a 3-step process in which we call + mainQSort3 to sort the small buckets [ss, j], but + also make a big effort to avoid the calls if we can. + --*/ + ss = runningOrder[i]; + + /*-- + Step 1: + Complete the big bucket [ss] by quicksorting + any unsorted small buckets [ss, j], for j != ss. + Hopefully previous pointer-scanning phases have already + completed many of the small buckets [ss, j], so + we don't have to sort them at all. + --*/ + for (j = 0; j <= 255; j++) { + if (j != ss) { + sb = (ss << 8) + j; + if ( ! (ftab[sb] & SETMASK) ) { + Int32 lo = ftab[sb] & CLEARMASK; + Int32 hi = (ftab[sb+1] & CLEARMASK) - 1; + if (hi > lo) { + if (verb >= 4) + VPrintf4 ( " qsort [0x%x, 0x%x] " + "done %d this %d\n", + ss, j, numQSorted, hi - lo + 1 ); + mainQSort3 ( + ptr, block, quadrant, nblock, + lo, hi, BZ_N_RADIX, budget + ); + numQSorted += (hi - lo + 1); + if (*budget < 0) return; + } + } + ftab[sb] |= SETMASK; + } + } + + AssertH ( !bigDone[ss], 1006 ); + + /*-- + Step 2: + Now scan this big bucket [ss] so as to synthesise the + sorted order for small buckets [t, ss] for all t, + including, magically, the bucket [ss,ss] too. + This will avoid doing Real Work in subsequent Step 1's. + --*/ + { + for (j = 0; j <= 255; j++) { + copyStart[j] = ftab[(j << 8) + ss] & CLEARMASK; + copyEnd [j] = (ftab[(j << 8) + ss + 1] & CLEARMASK) - 1; + } + for (j = ftab[ss << 8] & CLEARMASK; j < copyStart[ss]; j++) { + k = ptr[j]-1; if (k < 0) k += nblock; + c1 = block[k]; + if (!bigDone[c1]) + ptr[ copyStart[c1]++ ] = k; + } + for (j = (ftab[(ss+1) << 8] & CLEARMASK) - 1; j > copyEnd[ss]; j--) { + k = ptr[j]-1; if (k < 0) k += nblock; + c1 = block[k]; + if (!bigDone[c1]) + ptr[ copyEnd[c1]-- ] = k; + } + } + + AssertH ( (copyStart[ss]-1 == copyEnd[ss]) + || + /* Extremely rare case missing in bzip2-1.0.0 and 1.0.1. + Necessity for this case is demonstrated by compressing + a sequence of approximately 48.5 million of character + 251; 1.0.0/1.0.1 will then die here. */ + (copyStart[ss] == 0 && copyEnd[ss] == nblock-1), + 1007 ) + + for (j = 0; j <= 255; j++) ftab[(j << 8) + ss] |= SETMASK; + + /*-- + Step 3: + The [ss] big bucket is now done. Record this fact, + and update the quadrant descriptors. Remember to + update quadrants in the overshoot area too, if + necessary. The "if (i < 255)" test merely skips + this updating for the last bucket processed, since + updating for the last bucket is pointless. + + The quadrant array provides a way to incrementally + cache sort orderings, as they appear, so as to + make subsequent comparisons in fullGtU() complete + faster. For repetitive blocks this makes a big + difference (but not big enough to be able to avoid + the fallback sorting mechanism, exponential radix sort). + + The precise meaning is: at all times: + + for 0 <= i < nblock and 0 <= j <= nblock + + if block[i] != block[j], + + then the relative values of quadrant[i] and + quadrant[j] are meaningless. + + else { + if quadrant[i] < quadrant[j] + then the string starting at i lexicographically + precedes the string starting at j + + else if quadrant[i] > quadrant[j] + then the string starting at j lexicographically + precedes the string starting at i + + else + the relative ordering of the strings starting + at i and j has not yet been determined. + } + --*/ + bigDone[ss] = True; + + if (i < 255) { + Int32 bbStart = ftab[ss << 8] & CLEARMASK; + Int32 bbSize = (ftab[(ss+1) << 8] & CLEARMASK) - bbStart; + Int32 shifts = 0; + + while ((bbSize >> shifts) > 65534) shifts++; + + for (j = bbSize-1; j >= 0; j--) { + Int32 a2update = ptr[bbStart + j]; + UInt16 qVal = (UInt16)(j >> shifts); + quadrant[a2update] = qVal; + if (a2update < BZ_N_OVERSHOOT) + quadrant[a2update + nblock] = qVal; + } + AssertH ( ((bbSize-1) >> shifts) <= 65535, 1002 ); + } + + } + + if (verb >= 4) + VPrintf3 ( " %d pointers, %d sorted, %d scanned\n", + nblock, numQSorted, nblock - numQSorted ); +} + +#undef BIGFREQ +#undef SETMASK +#undef CLEARMASK + + +/*---------------------------------------------*/ +/* Pre: + nblock > 0 + arr2 exists for [0 .. nblock-1 +N_OVERSHOOT] + ((UChar*)arr2) [0 .. nblock-1] holds block + arr1 exists for [0 .. nblock-1] + + Post: + ((UChar*)arr2) [0 .. nblock-1] holds block + All other areas of block destroyed + ftab [ 0 .. 65536 ] destroyed + arr1 [0 .. nblock-1] holds sorted order +*/ +void BZ2_blockSort ( EState* s ) +{ + UInt32* ptr = s->ptr; + UChar* block = s->block; + UInt32* ftab = s->ftab; + Int32 nblock = s->nblock; + Int32 verb = s->verbosity; + Int32 wfact = s->workFactor; + UInt16* quadrant; + Int32 budget; + Int32 budgetInit; + Int32 i; + + if (nblock < 10000) { + fallbackSort ( s->arr1, s->arr2, ftab, nblock, verb ); + } else { + /* Calculate the location for quadrant, remembering to get + the alignment right. Assumes that &(block[0]) is at least + 2-byte aligned -- this should be ok since block is really + the first section of arr2. + */ + i = nblock+BZ_N_OVERSHOOT; + if (i & 1) i++; + quadrant = (UInt16*)(&(block[i])); + + /* (wfact-1) / 3 puts the default-factor-30 + transition point at very roughly the same place as + with v0.1 and v0.9.0. + Not that it particularly matters any more, since the + resulting compressed stream is now the same regardless + of whether or not we use the main sort or fallback sort. + */ + if (wfact < 1 ) wfact = 1; + if (wfact > 100) wfact = 100; + budgetInit = nblock * ((wfact-1) / 3); + budget = budgetInit; + + mainSort ( ptr, block, quadrant, ftab, nblock, verb, &budget ); + if (verb >= 3) + VPrintf3 ( " %d work, %d block, ratio %5.2f\n", + budgetInit - budget, + nblock, + (float)(budgetInit - budget) / + (float)(nblock==0 ? 1 : nblock) ); + if (budget < 0) { + if (verb >= 2) + VPrintf0 ( " too repetitive; using fallback" + " sorting algorithm\n" ); + fallbackSort ( s->arr1, s->arr2, ftab, nblock, verb ); + } + } + + s->origPtr = -1; + for (i = 0; i < s->nblock; i++) + if (ptr[i] == 0) + { s->origPtr = i; break; }; + + AssertH( s->origPtr != -1, 1003 ); +} + + +/*-------------------------------------------------------------*/ +/*--- end blocksort.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/bzlib.c b/tools/lib/bz2/bzlib.c new file mode 100644 index 0000000..6c68b7b --- /dev/null +++ b/tools/lib/bz2/bzlib.c @@ -0,0 +1,1587 @@ + +/*-------------------------------------------------------------*/ +/*--- Library top-level functions. ---*/ +/*--- bzlib.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + +/* CHANGES + 0.9.0 -- original version. + 0.9.0a/b -- no changes in this file. + 0.9.0c -- made zero-length BZ_FLUSH work correctly in bzCompress(). + fixed bzWrite/bzRead to ignore zero-length requests. + fixed bzread to correctly handle read requests after EOF. + wrong parameter order in call to bzDecompressInit in + bzBuffToBuffDecompress. Fixed. +*/ + +#include "bzlib_private.h" + + +/*---------------------------------------------------*/ +/*--- Compression stuff ---*/ +/*---------------------------------------------------*/ + + +/*---------------------------------------------------*/ +#ifndef BZ_NO_STDIO +void BZ2_bz__AssertH__fail ( int errcode ) +{ + fprintf(stderr, + "\n\nbzip2/libbzip2: internal error number %d.\n" + "This is a bug in bzip2/libbzip2, %s.\n" + "Please report it to: bzip2-devel@sourceware.org. If this happened\n" + "when you were using some program which uses libbzip2 as a\n" + "component, you should also report this bug to the author(s)\n" + "of that program. Please make an effort to report this bug;\n" + "timely and accurate bug reports eventually lead to higher\n" + "quality software. Thanks.\n\n", + errcode, + BZ2_bzlibVersion() + ); + + if (errcode == 1007) { + fprintf(stderr, + "\n*** A special note about internal error number 1007 ***\n" + "\n" + "Experience suggests that a common cause of i.e. 1007\n" + "is unreliable memory or other hardware. The 1007 assertion\n" + "just happens to cross-check the results of huge numbers of\n" + "memory reads/writes, and so acts (unintendedly) as a stress\n" + "test of your memory system.\n" + "\n" + "I suggest the following: try compressing the file again,\n" + "possibly monitoring progress in detail with the -vv flag.\n" + "\n" + "* If the error cannot be reproduced, and/or happens at different\n" + " points in compression, you may have a flaky memory system.\n" + " Try a memory-test program. I have used Memtest86\n" + " (www.memtest86.com). At the time of writing it is free (GPLd).\n" + " Memtest86 tests memory much more thorougly than your BIOSs\n" + " power-on test, and may find failures that the BIOS doesn't.\n" + "\n" + "* If the error can be repeatably reproduced, this is a bug in\n" + " bzip2, and I would very much like to hear about it. Please\n" + " let me know, and, ideally, save a copy of the file causing the\n" + " problem -- without which I will be unable to investigate it.\n" + "\n" + ); + } + + exit(3); +} +#endif + + +/*---------------------------------------------------*/ +static +int bz_config_ok ( void ) +{ + if (sizeof(int) != 4) return 0; + if (sizeof(short) != 2) return 0; + if (sizeof(char) != 1) return 0; + return 1; +} + + +/*---------------------------------------------------*/ +static +void* default_bzalloc ( void* opaque, Int32 items, Int32 size ) +{ + void* v = malloc ( items * size ); + return v; +} + +static +void default_bzfree ( void* opaque, void* addr ) +{ + if (addr != NULL) free ( addr ); +} + + +/*---------------------------------------------------*/ +static +void prepare_new_block ( EState* s ) +{ + Int32 i; + s->nblock = 0; + s->numZ = 0; + s->state_out_pos = 0; + BZ_INITIALISE_CRC ( s->blockCRC ); + for (i = 0; i < 256; i++) s->inUse[i] = False; + s->blockNo++; +} + + +/*---------------------------------------------------*/ +static +void init_RL ( EState* s ) +{ + s->state_in_ch = 256; + s->state_in_len = 0; +} + + +static +Bool isempty_RL ( EState* s ) +{ + if (s->state_in_ch < 256 && s->state_in_len > 0) + return False; else + return True; +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzCompressInit) + ( bz_stream* strm, + int blockSize100k, + int verbosity, + int workFactor ) +{ + Int32 n; + EState* s; + + if (!bz_config_ok()) return BZ_CONFIG_ERROR; + + if (strm == NULL || + blockSize100k < 1 || blockSize100k > 9 || + workFactor < 0 || workFactor > 250) + return BZ_PARAM_ERROR; + + if (workFactor == 0) workFactor = 30; + if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc; + if (strm->bzfree == NULL) strm->bzfree = default_bzfree; + + s = BZALLOC( sizeof(EState) ); + if (s == NULL) return BZ_MEM_ERROR; + s->strm = strm; + + s->arr1 = NULL; + s->arr2 = NULL; + s->ftab = NULL; + + n = 100000 * blockSize100k; + s->arr1 = BZALLOC( n * sizeof(UInt32) ); + s->arr2 = BZALLOC( (n+BZ_N_OVERSHOOT) * sizeof(UInt32) ); + s->ftab = BZALLOC( 65537 * sizeof(UInt32) ); + + if (s->arr1 == NULL || s->arr2 == NULL || s->ftab == NULL) { + if (s->arr1 != NULL) BZFREE(s->arr1); + if (s->arr2 != NULL) BZFREE(s->arr2); + if (s->ftab != NULL) BZFREE(s->ftab); + if (s != NULL) BZFREE(s); + return BZ_MEM_ERROR; + } + + s->blockNo = 0; + s->state = BZ_S_INPUT; + s->mode = BZ_M_RUNNING; + s->combinedCRC = 0; + s->blockSize100k = blockSize100k; + s->nblockMAX = 100000 * blockSize100k - 19; + s->verbosity = verbosity; + s->workFactor = workFactor; + + s->block = (UChar*)s->arr2; + s->mtfv = (UInt16*)s->arr1; + s->zbits = NULL; + s->ptr = (UInt32*)s->arr1; + + strm->state = s; + strm->total_in_lo32 = 0; + strm->total_in_hi32 = 0; + strm->total_out_lo32 = 0; + strm->total_out_hi32 = 0; + init_RL ( s ); + prepare_new_block ( s ); + return BZ_OK; +} + + +/*---------------------------------------------------*/ +static +void add_pair_to_block ( EState* s ) +{ + Int32 i; + UChar ch = (UChar)(s->state_in_ch); + for (i = 0; i < s->state_in_len; i++) { + BZ_UPDATE_CRC( s->blockCRC, ch ); + } + s->inUse[s->state_in_ch] = True; + switch (s->state_in_len) { + case 1: + s->block[s->nblock] = (UChar)ch; s->nblock++; + break; + case 2: + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = (UChar)ch; s->nblock++; + break; + case 3: + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = (UChar)ch; s->nblock++; + break; + default: + s->inUse[s->state_in_len-4] = True; + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = (UChar)ch; s->nblock++; + s->block[s->nblock] = ((UChar)(s->state_in_len-4)); + s->nblock++; + break; + } +} + + +/*---------------------------------------------------*/ +static +void flush_RL ( EState* s ) +{ + if (s->state_in_ch < 256) add_pair_to_block ( s ); + init_RL ( s ); +} + + +/*---------------------------------------------------*/ +#define ADD_CHAR_TO_BLOCK(zs,zchh0) \ +{ \ + UInt32 zchh = (UInt32)(zchh0); \ + /*-- fast track the common case --*/ \ + if (zchh != zs->state_in_ch && \ + zs->state_in_len == 1) { \ + UChar ch = (UChar)(zs->state_in_ch); \ + BZ_UPDATE_CRC( zs->blockCRC, ch ); \ + zs->inUse[zs->state_in_ch] = True; \ + zs->block[zs->nblock] = (UChar)ch; \ + zs->nblock++; \ + zs->state_in_ch = zchh; \ + } \ + else \ + /*-- general, uncommon cases --*/ \ + if (zchh != zs->state_in_ch || \ + zs->state_in_len == 255) { \ + if (zs->state_in_ch < 256) \ + add_pair_to_block ( zs ); \ + zs->state_in_ch = zchh; \ + zs->state_in_len = 1; \ + } else { \ + zs->state_in_len++; \ + } \ +} + + +/*---------------------------------------------------*/ +static +Bool copy_input_until_stop ( EState* s ) +{ + Bool progress_in = False; + + if (s->mode == BZ_M_RUNNING) { + + /*-- fast track the common case --*/ + while (True) { + /*-- block full? --*/ + if (s->nblock >= s->nblockMAX) break; + /*-- no input? --*/ + if (s->strm->avail_in == 0) break; + progress_in = True; + ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) ); + s->strm->next_in++; + s->strm->avail_in--; + s->strm->total_in_lo32++; + if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++; + } + + } else { + + /*-- general, uncommon case --*/ + while (True) { + /*-- block full? --*/ + if (s->nblock >= s->nblockMAX) break; + /*-- no input? --*/ + if (s->strm->avail_in == 0) break; + /*-- flush/finish end? --*/ + if (s->avail_in_expect == 0) break; + progress_in = True; + ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) ); + s->strm->next_in++; + s->strm->avail_in--; + s->strm->total_in_lo32++; + if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++; + s->avail_in_expect--; + } + } + return progress_in; +} + + +/*---------------------------------------------------*/ +static +Bool copy_output_until_stop ( EState* s ) +{ + Bool progress_out = False; + + while (True) { + + /*-- no output space? --*/ + if (s->strm->avail_out == 0) break; + + /*-- block done? --*/ + if (s->state_out_pos >= s->numZ) break; + + progress_out = True; + *(s->strm->next_out) = s->zbits[s->state_out_pos]; + s->state_out_pos++; + s->strm->avail_out--; + s->strm->next_out++; + s->strm->total_out_lo32++; + if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; + } + + return progress_out; +} + + +/*---------------------------------------------------*/ +static +Bool handle_compress ( bz_stream* strm ) +{ + Bool progress_in = False; + Bool progress_out = False; + EState* s = strm->state; + + while (True) { + + if (s->state == BZ_S_OUTPUT) { + progress_out |= copy_output_until_stop ( s ); + if (s->state_out_pos < s->numZ) break; + if (s->mode == BZ_M_FINISHING && + s->avail_in_expect == 0 && + isempty_RL(s)) break; + prepare_new_block ( s ); + s->state = BZ_S_INPUT; + if (s->mode == BZ_M_FLUSHING && + s->avail_in_expect == 0 && + isempty_RL(s)) break; + } + + if (s->state == BZ_S_INPUT) { + progress_in |= copy_input_until_stop ( s ); + if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) { + flush_RL ( s ); + BZ2_compressBlock ( s, (Bool)(s->mode == BZ_M_FINISHING) ); + s->state = BZ_S_OUTPUT; + } + else + if (s->nblock >= s->nblockMAX) { + BZ2_compressBlock ( s, False ); + s->state = BZ_S_OUTPUT; + } + else + if (s->strm->avail_in == 0) { + break; + } + } + + } + + return progress_in || progress_out; +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzCompress) ( bz_stream *strm, int action ) +{ + Bool progress; + EState* s; + if (strm == NULL) return BZ_PARAM_ERROR; + s = strm->state; + if (s == NULL) return BZ_PARAM_ERROR; + if (s->strm != strm) return BZ_PARAM_ERROR; + + preswitch: + switch (s->mode) { + + case BZ_M_IDLE: + return BZ_SEQUENCE_ERROR; + + case BZ_M_RUNNING: + if (action == BZ_RUN) { + progress = handle_compress ( strm ); + return progress ? BZ_RUN_OK : BZ_PARAM_ERROR; + } + else + if (action == BZ_FLUSH) { + s->avail_in_expect = strm->avail_in; + s->mode = BZ_M_FLUSHING; + goto preswitch; + } + else + if (action == BZ_FINISH) { + s->avail_in_expect = strm->avail_in; + s->mode = BZ_M_FINISHING; + goto preswitch; + } + else + return BZ_PARAM_ERROR; + + case BZ_M_FLUSHING: + if (action != BZ_FLUSH) return BZ_SEQUENCE_ERROR; + if (s->avail_in_expect != s->strm->avail_in) + return BZ_SEQUENCE_ERROR; + progress = handle_compress ( strm ); + if (s->avail_in_expect > 0 || !isempty_RL(s) || + s->state_out_pos < s->numZ) return BZ_FLUSH_OK; + s->mode = BZ_M_RUNNING; + return BZ_RUN_OK; + + case BZ_M_FINISHING: + if (action != BZ_FINISH) return BZ_SEQUENCE_ERROR; + if (s->avail_in_expect != s->strm->avail_in) + return BZ_SEQUENCE_ERROR; + progress = handle_compress ( strm ); + if (!progress) return BZ_SEQUENCE_ERROR; + if (s->avail_in_expect > 0 || !isempty_RL(s) || + s->state_out_pos < s->numZ) return BZ_FINISH_OK; + s->mode = BZ_M_IDLE; + return BZ_STREAM_END; + } + return BZ_OK; /*--not reached--*/ +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzCompressEnd) ( bz_stream *strm ) +{ + EState* s; + if (strm == NULL) return BZ_PARAM_ERROR; + s = strm->state; + if (s == NULL) return BZ_PARAM_ERROR; + if (s->strm != strm) return BZ_PARAM_ERROR; + + if (s->arr1 != NULL) BZFREE(s->arr1); + if (s->arr2 != NULL) BZFREE(s->arr2); + if (s->ftab != NULL) BZFREE(s->ftab); + BZFREE(strm->state); + + strm->state = NULL; + + return BZ_OK; +} + + +/*---------------------------------------------------*/ +/*--- Decompression stuff ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzDecompressInit) + ( bz_stream* strm, + int verbosity, + int small ) +{ + DState* s; + + if (!bz_config_ok()) return BZ_CONFIG_ERROR; + + if (strm == NULL) return BZ_PARAM_ERROR; + if (small != 0 && small != 1) return BZ_PARAM_ERROR; + if (verbosity < 0 || verbosity > 4) return BZ_PARAM_ERROR; + + if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc; + if (strm->bzfree == NULL) strm->bzfree = default_bzfree; + + s = BZALLOC( sizeof(DState) ); + if (s == NULL) return BZ_MEM_ERROR; + s->strm = strm; + strm->state = s; + s->state = BZ_X_MAGIC_1; + s->bsLive = 0; + s->bsBuff = 0; + s->calculatedCombinedCRC = 0; + strm->total_in_lo32 = 0; + strm->total_in_hi32 = 0; + strm->total_out_lo32 = 0; + strm->total_out_hi32 = 0; + s->smallDecompress = (Bool)small; + s->ll4 = NULL; + s->ll16 = NULL; + s->tt = NULL; + s->currBlockNo = 0; + s->verbosity = verbosity; + + return BZ_OK; +} + + +/*---------------------------------------------------*/ +/* Return True iff data corruption is discovered. + Returns False if there is no problem. +*/ +static +Bool unRLE_obuf_to_output_FAST ( DState* s ) +{ + UChar k1; + + if (s->blockRandomised) { + + while (True) { + /* try to finish existing run */ + while (True) { + if (s->strm->avail_out == 0) return False; + if (s->state_out_len == 0) break; + *( (UChar*)(s->strm->next_out) ) = s->state_out_ch; + BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch ); + s->state_out_len--; + s->strm->next_out++; + s->strm->avail_out--; + s->strm->total_out_lo32++; + if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; + } + + /* can a new run be started? */ + if (s->nblock_used == s->save_nblock+1) return False; + + /* Only caused by corrupt data stream? */ + if (s->nblock_used > s->save_nblock+1) + return True; + + s->state_out_len = 1; + s->state_out_ch = s->k0; + BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + s->state_out_len = 2; + BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + s->state_out_len = 3; + BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + s->state_out_len = ((Int32)k1) + 4; + BZ_GET_FAST(s->k0); BZ_RAND_UPD_MASK; + s->k0 ^= BZ_RAND_MASK; s->nblock_used++; + } + + } else { + + /* restore */ + UInt32 c_calculatedBlockCRC = s->calculatedBlockCRC; + UChar c_state_out_ch = s->state_out_ch; + Int32 c_state_out_len = s->state_out_len; + Int32 c_nblock_used = s->nblock_used; + Int32 c_k0 = s->k0; + UInt32* c_tt = s->tt; + UInt32 c_tPos = s->tPos; + char* cs_next_out = s->strm->next_out; + unsigned int cs_avail_out = s->strm->avail_out; + Int32 ro_blockSize100k = s->blockSize100k; + /* end restore */ + + UInt32 avail_out_INIT = cs_avail_out; + Int32 s_save_nblockPP = s->save_nblock+1; + unsigned int total_out_lo32_old; + + while (True) { + + /* try to finish existing run */ + if (c_state_out_len > 0) { + while (True) { + if (cs_avail_out == 0) goto return_notr; + if (c_state_out_len == 1) break; + *( (UChar*)(cs_next_out) ) = c_state_out_ch; + BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch ); + c_state_out_len--; + cs_next_out++; + cs_avail_out--; + } + s_state_out_len_eq_one: + { + if (cs_avail_out == 0) { + c_state_out_len = 1; goto return_notr; + }; + *( (UChar*)(cs_next_out) ) = c_state_out_ch; + BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch ); + cs_next_out++; + cs_avail_out--; + } + } + /* Only caused by corrupt data stream? */ + if (c_nblock_used > s_save_nblockPP) + return True; + + /* can a new run be started? */ + if (c_nblock_used == s_save_nblockPP) { + c_state_out_len = 0; goto return_notr; + }; + c_state_out_ch = c_k0; + BZ_GET_FAST_C(k1); c_nblock_used++; + if (k1 != c_k0) { + c_k0 = k1; goto s_state_out_len_eq_one; + }; + if (c_nblock_used == s_save_nblockPP) + goto s_state_out_len_eq_one; + + c_state_out_len = 2; + BZ_GET_FAST_C(k1); c_nblock_used++; + if (c_nblock_used == s_save_nblockPP) continue; + if (k1 != c_k0) { c_k0 = k1; continue; }; + + c_state_out_len = 3; + BZ_GET_FAST_C(k1); c_nblock_used++; + if (c_nblock_used == s_save_nblockPP) continue; + if (k1 != c_k0) { c_k0 = k1; continue; }; + + BZ_GET_FAST_C(k1); c_nblock_used++; + c_state_out_len = ((Int32)k1) + 4; + BZ_GET_FAST_C(c_k0); c_nblock_used++; + } + + return_notr: + total_out_lo32_old = s->strm->total_out_lo32; + s->strm->total_out_lo32 += (avail_out_INIT - cs_avail_out); + if (s->strm->total_out_lo32 < total_out_lo32_old) + s->strm->total_out_hi32++; + + /* save */ + s->calculatedBlockCRC = c_calculatedBlockCRC; + s->state_out_ch = c_state_out_ch; + s->state_out_len = c_state_out_len; + s->nblock_used = c_nblock_used; + s->k0 = c_k0; + s->tt = c_tt; + s->tPos = c_tPos; + s->strm->next_out = cs_next_out; + s->strm->avail_out = cs_avail_out; + /* end save */ + } + return False; +} + + + +/*---------------------------------------------------*/ +__inline__ Int32 BZ2_indexIntoF ( Int32 indx, Int32 *cftab ) +{ + Int32 nb, na, mid; + nb = 0; + na = 256; + do { + mid = (nb + na) >> 1; + if (indx >= cftab[mid]) nb = mid; else na = mid; + } + while (na - nb != 1); + return nb; +} + + +/*---------------------------------------------------*/ +/* Return True iff data corruption is discovered. + Returns False if there is no problem. +*/ +static +Bool unRLE_obuf_to_output_SMALL ( DState* s ) +{ + UChar k1; + + if (s->blockRandomised) { + + while (True) { + /* try to finish existing run */ + while (True) { + if (s->strm->avail_out == 0) return False; + if (s->state_out_len == 0) break; + *( (UChar*)(s->strm->next_out) ) = s->state_out_ch; + BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch ); + s->state_out_len--; + s->strm->next_out++; + s->strm->avail_out--; + s->strm->total_out_lo32++; + if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; + } + + /* can a new run be started? */ + if (s->nblock_used == s->save_nblock+1) return False; + + /* Only caused by corrupt data stream? */ + if (s->nblock_used > s->save_nblock+1) + return True; + + s->state_out_len = 1; + s->state_out_ch = s->k0; + BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + s->state_out_len = 2; + BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + s->state_out_len = 3; + BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; + k1 ^= BZ_RAND_MASK; s->nblock_used++; + s->state_out_len = ((Int32)k1) + 4; + BZ_GET_SMALL(s->k0); BZ_RAND_UPD_MASK; + s->k0 ^= BZ_RAND_MASK; s->nblock_used++; + } + + } else { + + while (True) { + /* try to finish existing run */ + while (True) { + if (s->strm->avail_out == 0) return False; + if (s->state_out_len == 0) break; + *( (UChar*)(s->strm->next_out) ) = s->state_out_ch; + BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch ); + s->state_out_len--; + s->strm->next_out++; + s->strm->avail_out--; + s->strm->total_out_lo32++; + if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; + } + + /* can a new run be started? */ + if (s->nblock_used == s->save_nblock+1) return False; + + /* Only caused by corrupt data stream? */ + if (s->nblock_used > s->save_nblock+1) + return True; + + s->state_out_len = 1; + s->state_out_ch = s->k0; + BZ_GET_SMALL(k1); s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + s->state_out_len = 2; + BZ_GET_SMALL(k1); s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + s->state_out_len = 3; + BZ_GET_SMALL(k1); s->nblock_used++; + if (s->nblock_used == s->save_nblock+1) continue; + if (k1 != s->k0) { s->k0 = k1; continue; }; + + BZ_GET_SMALL(k1); s->nblock_used++; + s->state_out_len = ((Int32)k1) + 4; + BZ_GET_SMALL(s->k0); s->nblock_used++; + } + + } +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzDecompress) ( bz_stream *strm ) +{ + Bool corrupt; + DState* s; + if (strm == NULL) return BZ_PARAM_ERROR; + s = strm->state; + if (s == NULL) return BZ_PARAM_ERROR; + if (s->strm != strm) return BZ_PARAM_ERROR; + + while (True) { + if (s->state == BZ_X_IDLE) return BZ_SEQUENCE_ERROR; + if (s->state == BZ_X_OUTPUT) { + if (s->smallDecompress) + corrupt = unRLE_obuf_to_output_SMALL ( s ); else + corrupt = unRLE_obuf_to_output_FAST ( s ); + if (corrupt) return BZ_DATA_ERROR; + if (s->nblock_used == s->save_nblock+1 && s->state_out_len == 0) { + BZ_FINALISE_CRC ( s->calculatedBlockCRC ); + if (s->verbosity >= 3) + VPrintf2 ( " {0x%08x, 0x%08x}", s->storedBlockCRC, + s->calculatedBlockCRC ); + if (s->verbosity >= 2) VPrintf0 ( "]" ); + if (s->calculatedBlockCRC != s->storedBlockCRC) + return BZ_DATA_ERROR; + s->calculatedCombinedCRC + = (s->calculatedCombinedCRC << 1) | + (s->calculatedCombinedCRC >> 31); + s->calculatedCombinedCRC ^= s->calculatedBlockCRC; + s->state = BZ_X_BLKHDR_1; + } else { + return BZ_OK; + } + } + if (s->state >= BZ_X_MAGIC_1) { + Int32 r = BZ2_decompress ( s ); + if (r == BZ_STREAM_END) { + if (s->verbosity >= 3) + VPrintf2 ( "\n combined CRCs: stored = 0x%08x, computed = 0x%08x", + s->storedCombinedCRC, s->calculatedCombinedCRC ); + if (s->calculatedCombinedCRC != s->storedCombinedCRC) + return BZ_DATA_ERROR; + return r; + } + if (s->state != BZ_X_OUTPUT) return r; + } + } + + AssertH ( 0, 6001 ); + + return 0; /*NOTREACHED*/ +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzDecompressEnd) ( bz_stream *strm ) +{ + DState* s; + if (strm == NULL) return BZ_PARAM_ERROR; + s = strm->state; + if (s == NULL) return BZ_PARAM_ERROR; + if (s->strm != strm) return BZ_PARAM_ERROR; + + if (s->tt != NULL) BZFREE(s->tt); + if (s->ll16 != NULL) BZFREE(s->ll16); + if (s->ll4 != NULL) BZFREE(s->ll4); + + BZFREE(strm->state); + strm->state = NULL; + + return BZ_OK; +} + + +#ifndef BZ_NO_STDIO +/*---------------------------------------------------*/ +/*--- File I/O stuff ---*/ +/*---------------------------------------------------*/ + +#define BZ_SETERR(eee) \ +{ \ + if (bzerror != NULL) *bzerror = eee; \ + if (bzf != NULL) bzf->lastErr = eee; \ +} + +typedef + struct { + FILE* handle; + Char buf[BZ_MAX_UNUSED]; + Int32 bufN; + Bool writing; + bz_stream strm; + Int32 lastErr; + Bool initialisedOk; + } + bzFile; + + +/*---------------------------------------------*/ +static Bool myfeof ( FILE* f ) +{ + Int32 c = fgetc ( f ); + if (c == EOF) return True; + ungetc ( c, f ); + return False; +} + + +/*---------------------------------------------------*/ +BZFILE* BZ_API(BZ2_bzWriteOpen) + ( int* bzerror, + FILE* f, + int blockSize100k, + int verbosity, + int workFactor ) +{ + Int32 ret; + bzFile* bzf = NULL; + + BZ_SETERR(BZ_OK); + + if (f == NULL || + (blockSize100k < 1 || blockSize100k > 9) || + (workFactor < 0 || workFactor > 250) || + (verbosity < 0 || verbosity > 4)) + { BZ_SETERR(BZ_PARAM_ERROR); return NULL; }; + + if (ferror(f)) + { BZ_SETERR(BZ_IO_ERROR); return NULL; }; + + bzf = malloc ( sizeof(bzFile) ); + if (bzf == NULL) + { BZ_SETERR(BZ_MEM_ERROR); return NULL; }; + + BZ_SETERR(BZ_OK); + bzf->initialisedOk = False; + bzf->bufN = 0; + bzf->handle = f; + bzf->writing = True; + bzf->strm.bzalloc = NULL; + bzf->strm.bzfree = NULL; + bzf->strm.opaque = NULL; + + if (workFactor == 0) workFactor = 30; + ret = BZ2_bzCompressInit ( &(bzf->strm), blockSize100k, + verbosity, workFactor ); + if (ret != BZ_OK) + { BZ_SETERR(ret); free(bzf); return NULL; }; + + bzf->strm.avail_in = 0; + bzf->initialisedOk = True; + return bzf; +} + + + +/*---------------------------------------------------*/ +void BZ_API(BZ2_bzWrite) + ( int* bzerror, + BZFILE* b, + void* buf, + int len ) +{ + Int32 n, n2, ret; + bzFile* bzf = (bzFile*)b; + + BZ_SETERR(BZ_OK); + if (bzf == NULL || buf == NULL || len < 0) + { BZ_SETERR(BZ_PARAM_ERROR); return; }; + if (!(bzf->writing)) + { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; + if (ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return; }; + + if (len == 0) + { BZ_SETERR(BZ_OK); return; }; + + bzf->strm.avail_in = len; + bzf->strm.next_in = buf; + + while (True) { + bzf->strm.avail_out = BZ_MAX_UNUSED; + bzf->strm.next_out = bzf->buf; + ret = BZ2_bzCompress ( &(bzf->strm), BZ_RUN ); + if (ret != BZ_RUN_OK) + { BZ_SETERR(ret); return; }; + + if (bzf->strm.avail_out < BZ_MAX_UNUSED) { + n = BZ_MAX_UNUSED - bzf->strm.avail_out; + n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar), + n, bzf->handle ); + if (n != n2 || ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return; }; + } + + if (bzf->strm.avail_in == 0) + { BZ_SETERR(BZ_OK); return; }; + } +} + + +/*---------------------------------------------------*/ +void BZ_API(BZ2_bzWriteClose) + ( int* bzerror, + BZFILE* b, + int abandon, + unsigned int* nbytes_in, + unsigned int* nbytes_out ) +{ + BZ2_bzWriteClose64 ( bzerror, b, abandon, + nbytes_in, NULL, nbytes_out, NULL ); +} + + +void BZ_API(BZ2_bzWriteClose64) + ( int* bzerror, + BZFILE* b, + int abandon, + unsigned int* nbytes_in_lo32, + unsigned int* nbytes_in_hi32, + unsigned int* nbytes_out_lo32, + unsigned int* nbytes_out_hi32 ) +{ + Int32 n, n2, ret; + bzFile* bzf = (bzFile*)b; + + if (bzf == NULL) + { BZ_SETERR(BZ_OK); return; }; + if (!(bzf->writing)) + { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; + if (ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return; }; + + if (nbytes_in_lo32 != NULL) *nbytes_in_lo32 = 0; + if (nbytes_in_hi32 != NULL) *nbytes_in_hi32 = 0; + if (nbytes_out_lo32 != NULL) *nbytes_out_lo32 = 0; + if (nbytes_out_hi32 != NULL) *nbytes_out_hi32 = 0; + + if ((!abandon) && bzf->lastErr == BZ_OK) { + while (True) { + bzf->strm.avail_out = BZ_MAX_UNUSED; + bzf->strm.next_out = bzf->buf; + ret = BZ2_bzCompress ( &(bzf->strm), BZ_FINISH ); + if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END) + { BZ_SETERR(ret); return; }; + + if (bzf->strm.avail_out < BZ_MAX_UNUSED) { + n = BZ_MAX_UNUSED - bzf->strm.avail_out; + n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar), + n, bzf->handle ); + if (n != n2 || ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return; }; + } + + if (ret == BZ_STREAM_END) break; + } + } + + if ( !abandon && !ferror ( bzf->handle ) ) { + fflush ( bzf->handle ); + if (ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return; }; + } + + if (nbytes_in_lo32 != NULL) + *nbytes_in_lo32 = bzf->strm.total_in_lo32; + if (nbytes_in_hi32 != NULL) + *nbytes_in_hi32 = bzf->strm.total_in_hi32; + if (nbytes_out_lo32 != NULL) + *nbytes_out_lo32 = bzf->strm.total_out_lo32; + if (nbytes_out_hi32 != NULL) + *nbytes_out_hi32 = bzf->strm.total_out_hi32; + + BZ_SETERR(BZ_OK); + BZ2_bzCompressEnd ( &(bzf->strm) ); + free ( bzf ); +} + + +/*---------------------------------------------------*/ +BZFILE* BZ_API(BZ2_bzReadOpen) + ( int* bzerror, + FILE* f, + int verbosity, + int small, + void* unused, + int nUnused ) +{ + bzFile* bzf = NULL; + int ret; + + BZ_SETERR(BZ_OK); + + if (f == NULL || + (small != 0 && small != 1) || + (verbosity < 0 || verbosity > 4) || + (unused == NULL && nUnused != 0) || + (unused != NULL && (nUnused < 0 || nUnused > BZ_MAX_UNUSED))) + { BZ_SETERR(BZ_PARAM_ERROR); return NULL; }; + + if (ferror(f)) + { BZ_SETERR(BZ_IO_ERROR); return NULL; }; + + bzf = malloc ( sizeof(bzFile) ); + if (bzf == NULL) + { BZ_SETERR(BZ_MEM_ERROR); return NULL; }; + + BZ_SETERR(BZ_OK); + + bzf->initialisedOk = False; + bzf->handle = f; + bzf->bufN = 0; + bzf->writing = False; + bzf->strm.bzalloc = NULL; + bzf->strm.bzfree = NULL; + bzf->strm.opaque = NULL; + + while (nUnused > 0) { + bzf->buf[bzf->bufN] = *((UChar*)(unused)); bzf->bufN++; + unused = ((void*)( 1 + ((UChar*)(unused)) )); + nUnused--; + } + + ret = BZ2_bzDecompressInit ( &(bzf->strm), verbosity, small ); + if (ret != BZ_OK) + { BZ_SETERR(ret); free(bzf); return NULL; }; + + bzf->strm.avail_in = bzf->bufN; + bzf->strm.next_in = bzf->buf; + + bzf->initialisedOk = True; + return bzf; +} + + +/*---------------------------------------------------*/ +void BZ_API(BZ2_bzReadClose) ( int *bzerror, BZFILE *b ) +{ + bzFile* bzf = (bzFile*)b; + + BZ_SETERR(BZ_OK); + if (bzf == NULL) + { BZ_SETERR(BZ_OK); return; }; + + if (bzf->writing) + { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; + + if (bzf->initialisedOk) + (void)BZ2_bzDecompressEnd ( &(bzf->strm) ); + free ( bzf ); +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzRead) + ( int* bzerror, + BZFILE* b, + void* buf, + int len ) +{ + Int32 n, ret; + bzFile* bzf = (bzFile*)b; + + BZ_SETERR(BZ_OK); + + if (bzf == NULL || buf == NULL || len < 0) + { BZ_SETERR(BZ_PARAM_ERROR); return 0; }; + + if (bzf->writing) + { BZ_SETERR(BZ_SEQUENCE_ERROR); return 0; }; + + if (len == 0) + { BZ_SETERR(BZ_OK); return 0; }; + + bzf->strm.avail_out = len; + bzf->strm.next_out = buf; + + while (True) { + + if (ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return 0; }; + + if (bzf->strm.avail_in == 0 && !myfeof(bzf->handle)) { + n = fread ( bzf->buf, sizeof(UChar), + BZ_MAX_UNUSED, bzf->handle ); + if (ferror(bzf->handle)) + { BZ_SETERR(BZ_IO_ERROR); return 0; }; + bzf->bufN = n; + bzf->strm.avail_in = bzf->bufN; + bzf->strm.next_in = bzf->buf; + } + + ret = BZ2_bzDecompress ( &(bzf->strm) ); + + if (ret != BZ_OK && ret != BZ_STREAM_END) + { BZ_SETERR(ret); return 0; }; + + if (ret == BZ_OK && myfeof(bzf->handle) && + bzf->strm.avail_in == 0 && bzf->strm.avail_out > 0) + { BZ_SETERR(BZ_UNEXPECTED_EOF); return 0; }; + + if (ret == BZ_STREAM_END) + { BZ_SETERR(BZ_STREAM_END); + return len - bzf->strm.avail_out; }; + if (bzf->strm.avail_out == 0) + { BZ_SETERR(BZ_OK); return len; }; + + } + + return 0; /*not reached*/ +} + + +/*---------------------------------------------------*/ +void BZ_API(BZ2_bzReadGetUnused) + ( int* bzerror, + BZFILE* b, + void** unused, + int* nUnused ) +{ + bzFile* bzf = (bzFile*)b; + if (bzf == NULL) + { BZ_SETERR(BZ_PARAM_ERROR); return; }; + if (bzf->lastErr != BZ_STREAM_END) + { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; + if (unused == NULL || nUnused == NULL) + { BZ_SETERR(BZ_PARAM_ERROR); return; }; + + BZ_SETERR(BZ_OK); + *nUnused = bzf->strm.avail_in; + *unused = bzf->strm.next_in; +} +#endif + + +/*---------------------------------------------------*/ +/*--- Misc convenience stuff ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzBuffToBuffCompress) + ( char* dest, + unsigned int* destLen, + char* source, + unsigned int sourceLen, + int blockSize100k, + int verbosity, + int workFactor ) +{ + bz_stream strm; + int ret; + + if (dest == NULL || destLen == NULL || + source == NULL || + blockSize100k < 1 || blockSize100k > 9 || + verbosity < 0 || verbosity > 4 || + workFactor < 0 || workFactor > 250) + return BZ_PARAM_ERROR; + + if (workFactor == 0) workFactor = 30; + strm.bzalloc = NULL; + strm.bzfree = NULL; + strm.opaque = NULL; + ret = BZ2_bzCompressInit ( &strm, blockSize100k, + verbosity, workFactor ); + if (ret != BZ_OK) return ret; + + strm.next_in = source; + strm.next_out = dest; + strm.avail_in = sourceLen; + strm.avail_out = *destLen; + + ret = BZ2_bzCompress ( &strm, BZ_FINISH ); + if (ret == BZ_FINISH_OK) goto output_overflow; + if (ret != BZ_STREAM_END) goto errhandler; + + /* normal termination */ + *destLen -= strm.avail_out; + BZ2_bzCompressEnd ( &strm ); + return BZ_OK; + + output_overflow: + BZ2_bzCompressEnd ( &strm ); + return BZ_OUTBUFF_FULL; + + errhandler: + BZ2_bzCompressEnd ( &strm ); + return ret; +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzBuffToBuffDecompress) + ( char* dest, + unsigned int* destLen, + char* source, + unsigned int sourceLen, + int small, + int verbosity ) +{ + bz_stream strm; + int ret; + + if (dest == NULL || destLen == NULL || + source == NULL || + (small != 0 && small != 1) || + verbosity < 0 || verbosity > 4) + return BZ_PARAM_ERROR; + + strm.bzalloc = NULL; + strm.bzfree = NULL; + strm.opaque = NULL; + ret = BZ2_bzDecompressInit ( &strm, verbosity, small ); + if (ret != BZ_OK) return ret; + + strm.next_in = source; + strm.next_out = dest; + strm.avail_in = sourceLen; + strm.avail_out = *destLen; + + ret = BZ2_bzDecompress ( &strm ); + if (ret == BZ_OK) goto output_overflow_or_eof; + if (ret != BZ_STREAM_END) goto errhandler; + + /* normal termination */ + *destLen -= strm.avail_out; + BZ2_bzDecompressEnd ( &strm ); + return BZ_OK; + + output_overflow_or_eof: + if (strm.avail_out > 0) { + BZ2_bzDecompressEnd ( &strm ); + return BZ_UNEXPECTED_EOF; + } else { + BZ2_bzDecompressEnd ( &strm ); + return BZ_OUTBUFF_FULL; + }; + + errhandler: + BZ2_bzDecompressEnd ( &strm ); + return ret; +} + + +/*---------------------------------------------------*/ +/*-- + Code contributed by Yoshioka Tsuneo (tsuneo@rr.iij4u.or.jp) + to support better zlib compatibility. + This code is not _officially_ part of libbzip2 (yet); + I haven't tested it, documented it, or considered the + threading-safeness of it. + If this code breaks, please contact both Yoshioka and me. +--*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +/*-- + return version like "0.9.5d, 4-Sept-1999". +--*/ +const char * BZ_API(BZ2_bzlibVersion)(void) +{ + return BZ_VERSION; +} + + +#ifndef BZ_NO_STDIO +/*---------------------------------------------------*/ + +#if defined(_WIN32) || defined(OS2) || defined(MSDOS) +# include +# include +# define SET_BINARY_MODE(file) setmode(fileno(file),O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif +static +BZFILE * bzopen_or_bzdopen + ( const char *path, /* no use when bzdopen */ + int fd, /* no use when bzdopen */ + const char *mode, + int open_mode) /* bzopen: 0, bzdopen:1 */ +{ + int bzerr; + char unused[BZ_MAX_UNUSED]; + int blockSize100k = 9; + int writing = 0; + char mode2[10] = ""; + FILE *fp = NULL; + BZFILE *bzfp = NULL; + int verbosity = 0; + int workFactor = 30; + int smallMode = 0; + int nUnused = 0; + + if (mode == NULL) return NULL; + while (*mode) { + switch (*mode) { + case 'r': + writing = 0; break; + case 'w': + writing = 1; break; + case 's': + smallMode = 1; break; + default: + if (isdigit((unsigned char)(*mode))) { + blockSize100k = *mode-BZ_HDR_0; + } + } + mode++; + } + strcat(mode2, writing ? "w" : "r" ); + strcat(mode2,"b"); /* binary mode */ + + if (open_mode==0) { + if (path==NULL || strcmp(path,"")==0) { + fp = (writing ? stdout : stdin); + SET_BINARY_MODE(fp); + } else { + fp = fopen(path,mode2); + } + } else { +#ifdef BZ_STRICT_ANSI + fp = NULL; +#else + fp = fdopen(fd,mode2); +#endif + } + if (fp == NULL) return NULL; + + if (writing) { + /* Guard against total chaos and anarchy -- JRS */ + if (blockSize100k < 1) blockSize100k = 1; + if (blockSize100k > 9) blockSize100k = 9; + bzfp = BZ2_bzWriteOpen(&bzerr,fp,blockSize100k, + verbosity,workFactor); + } else { + bzfp = BZ2_bzReadOpen(&bzerr,fp,verbosity,smallMode, + unused,nUnused); + } + if (bzfp == NULL) { + if (fp != stdin && fp != stdout) fclose(fp); + return NULL; + } + return bzfp; +} + + +/*---------------------------------------------------*/ +/*-- + open file for read or write. + ex) bzopen("file","w9") + case path="" or NULL => use stdin or stdout. +--*/ +BZFILE * BZ_API(BZ2_bzopen) + ( const char *path, + const char *mode ) +{ + return bzopen_or_bzdopen(path,-1,mode,/*bzopen*/0); +} + + +/*---------------------------------------------------*/ +BZFILE * BZ_API(BZ2_bzdopen) + ( int fd, + const char *mode ) +{ + return bzopen_or_bzdopen(NULL,fd,mode,/*bzdopen*/1); +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzread) (BZFILE* b, void* buf, int len ) +{ + int bzerr, nread; + if (((bzFile*)b)->lastErr == BZ_STREAM_END) return 0; + nread = BZ2_bzRead(&bzerr,b,buf,len); + if (bzerr == BZ_OK || bzerr == BZ_STREAM_END) { + return nread; + } else { + return -1; + } +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzwrite) (BZFILE* b, void* buf, int len ) +{ + int bzerr; + + BZ2_bzWrite(&bzerr,b,buf,len); + if(bzerr == BZ_OK){ + return len; + }else{ + return -1; + } +} + + +/*---------------------------------------------------*/ +int BZ_API(BZ2_bzflush) (BZFILE *b) +{ + /* do nothing now... */ + return 0; +} + + +/*---------------------------------------------------*/ +void BZ_API(BZ2_bzclose) (BZFILE* b) +{ + int bzerr; + FILE *fp; + + if (b==NULL) {return;} + fp = ((bzFile *)b)->handle; + if(((bzFile*)b)->writing){ + BZ2_bzWriteClose(&bzerr,b,0,NULL,NULL); + if(bzerr != BZ_OK){ + BZ2_bzWriteClose(NULL,b,1,NULL,NULL); + } + }else{ + BZ2_bzReadClose(&bzerr,b); + } + if(fp!=stdin && fp!=stdout){ + fclose(fp); + } +} + + +/*---------------------------------------------------*/ +/*-- + return last error code +--*/ +static const char *bzerrorstrings[] = { + "OK" + ,"SEQUENCE_ERROR" + ,"PARAM_ERROR" + ,"MEM_ERROR" + ,"DATA_ERROR" + ,"DATA_ERROR_MAGIC" + ,"IO_ERROR" + ,"UNEXPECTED_EOF" + ,"OUTBUFF_FULL" + ,"CONFIG_ERROR" + ,"???" /* for future */ + ,"???" /* for future */ + ,"???" /* for future */ + ,"???" /* for future */ + ,"???" /* for future */ + ,"???" /* for future */ +}; + + +const char * BZ_API(BZ2_bzerror) (BZFILE *b, int *errnum) +{ + int err = ((bzFile *)b)->lastErr; + + if(err>0) err = 0; + *errnum = err; + return bzerrorstrings[err*-1]; +} +#endif + +#ifdef BZ_NO_STDIO +#include +#include + +// 当 bzip2 内部发生断言失败时会调用这个 +void BZ2_bz__AssertH__fail ( int errcode ) { + fprintf(stderr, "\n\nbzip2/libbz2: internal error number %d.\n", errcode); + exit(3); +} + +// 当 bzip2 发生内部错误时调用 +void bz_internal_error ( int errcode ) { + BZ2_bz__AssertH__fail(errcode); +} +#endif + +/*-------------------------------------------------------------*/ +/*--- end bzlib.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/bzlib.h b/tools/lib/bz2/bzlib.h new file mode 100644 index 0000000..ac3f1ba --- /dev/null +++ b/tools/lib/bz2/bzlib.h @@ -0,0 +1,285 @@ + +/*-------------------------------------------------------------*/ +/*--- Public header file for the library. ---*/ +/*--- bzlib.h ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#ifndef _BZLIB_H +#define _BZLIB_H + +#ifndef BZ_NO_STDIO +/* Need a definitition for FILE */ +#include +#endif + +#ifdef _WIN32 +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define BZ_RUN 0 +#define BZ_FLUSH 1 +#define BZ_FINISH 2 + +#define BZ_OK 0 +#define BZ_RUN_OK 1 +#define BZ_FLUSH_OK 2 +#define BZ_FINISH_OK 3 +#define BZ_STREAM_END 4 +#define BZ_SEQUENCE_ERROR (-1) +#define BZ_PARAM_ERROR (-2) +#define BZ_MEM_ERROR (-3) +#define BZ_DATA_ERROR (-4) +#define BZ_DATA_ERROR_MAGIC (-5) +#define BZ_IO_ERROR (-6) +#define BZ_UNEXPECTED_EOF (-7) +#define BZ_OUTBUFF_FULL (-8) +#define BZ_CONFIG_ERROR (-9) + +typedef + struct { + char *next_in; + unsigned int avail_in; + unsigned int total_in_lo32; + unsigned int total_in_hi32; + + char *next_out; + unsigned int avail_out; + unsigned int total_out_lo32; + unsigned int total_out_hi32; + + void *state; + + void *(*bzalloc)(void *,int,int); + void (*bzfree)(void *,void *); + void *opaque; + } + bz_stream; + + +#ifndef BZ_IMPORT +#define BZ_EXPORT +#endif + +#ifdef _WIN32 +# ifdef small + /* windows.h define small to char */ +# undef small +# endif +# ifdef BZ_EXPORT +# define BZ_API(func) WINAPI func +# define BZ_EXTERN extern +# else + /* import windows dll dynamically */ +# define BZ_API(func) (WINAPI * func) +# define BZ_EXTERN +# endif +#else +# define BZ_API(func) func +# define BZ_EXTERN extern +#endif + + +/*-- Core (low-level) library functions --*/ + +BZ_EXTERN int BZ_API(BZ2_bzCompressInit) ( + bz_stream* strm, + int blockSize100k, + int verbosity, + int workFactor + ); + +BZ_EXTERN int BZ_API(BZ2_bzCompress) ( + bz_stream* strm, + int action + ); + +BZ_EXTERN int BZ_API(BZ2_bzCompressEnd) ( + bz_stream* strm + ); + +BZ_EXTERN int BZ_API(BZ2_bzDecompressInit) ( + bz_stream *strm, + int verbosity, + int small + ); + +BZ_EXTERN int BZ_API(BZ2_bzDecompress) ( + bz_stream* strm + ); + +BZ_EXTERN int BZ_API(BZ2_bzDecompressEnd) ( + bz_stream *strm + ); + + + +/*-- High(er) level library functions --*/ + +#ifndef BZ_NO_STDIO +#define BZ_MAX_UNUSED 5000 + +typedef void BZFILE; + +BZ_EXTERN BZFILE* BZ_API(BZ2_bzReadOpen) ( + int* bzerror, + FILE* f, + int verbosity, + int small, + void* unused, + int nUnused + ); + +BZ_EXTERN void BZ_API(BZ2_bzReadClose) ( + int* bzerror, + BZFILE* b + ); + +BZ_EXTERN void BZ_API(BZ2_bzReadGetUnused) ( + int* bzerror, + BZFILE* b, + void** unused, + int* nUnused + ); + +BZ_EXTERN int BZ_API(BZ2_bzRead) ( + int* bzerror, + BZFILE* b, + void* buf, + int len + ); + +BZ_EXTERN BZFILE* BZ_API(BZ2_bzWriteOpen) ( + int* bzerror, + FILE* f, + int blockSize100k, + int verbosity, + int workFactor + ); + +BZ_EXTERN void BZ_API(BZ2_bzWrite) ( + int* bzerror, + BZFILE* b, + void* buf, + int len + ); + +BZ_EXTERN void BZ_API(BZ2_bzWriteClose) ( + int* bzerror, + BZFILE* b, + int abandon, + unsigned int* nbytes_in, + unsigned int* nbytes_out + ); + +BZ_EXTERN void BZ_API(BZ2_bzWriteClose64) ( + int* bzerror, + BZFILE* b, + int abandon, + unsigned int* nbytes_in_lo32, + unsigned int* nbytes_in_hi32, + unsigned int* nbytes_out_lo32, + unsigned int* nbytes_out_hi32 + ); +#endif + + +/*-- Utility functions --*/ + +BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffCompress) ( + char* dest, + unsigned int* destLen, + char* source, + unsigned int sourceLen, + int blockSize100k, + int verbosity, + int workFactor + ); + +BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffDecompress) ( + char* dest, + unsigned int* destLen, + char* source, + unsigned int sourceLen, + int small, + int verbosity + ); + + +/*-- + Code contributed by Yoshioka Tsuneo (tsuneo@rr.iij4u.or.jp) + to support better zlib compatibility. + This code is not _officially_ part of libbzip2 (yet); + I haven't tested it, documented it, or considered the + threading-safeness of it. + If this code breaks, please contact both Yoshioka and me. +--*/ + +BZ_EXTERN const char * BZ_API(BZ2_bzlibVersion) ( + void + ); + +#ifndef BZ_NO_STDIO +BZ_EXTERN BZFILE * BZ_API(BZ2_bzopen) ( + const char *path, + const char *mode + ); + +BZ_EXTERN BZFILE * BZ_API(BZ2_bzdopen) ( + int fd, + const char *mode + ); + +BZ_EXTERN int BZ_API(BZ2_bzread) ( + BZFILE* b, + void* buf, + int len + ); + +BZ_EXTERN int BZ_API(BZ2_bzwrite) ( + BZFILE* b, + void* buf, + int len + ); + +BZ_EXTERN int BZ_API(BZ2_bzflush) ( + BZFILE* b + ); + +BZ_EXTERN void BZ_API(BZ2_bzclose) ( + BZFILE* b + ); + +BZ_EXTERN const char * BZ_API(BZ2_bzerror) ( + BZFILE *b, + int *errnum + ); +#endif + +#ifdef __cplusplus +} +#endif + +#endif + +/*-------------------------------------------------------------*/ +/*--- end bzlib.h ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/bzlib_private.h b/tools/lib/bz2/bzlib_private.h new file mode 100644 index 0000000..3755a6f --- /dev/null +++ b/tools/lib/bz2/bzlib_private.h @@ -0,0 +1,509 @@ + +/*-------------------------------------------------------------*/ +/*--- Private header file for the library. ---*/ +/*--- bzlib_private.h ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#ifndef _BZLIB_PRIVATE_H +#define _BZLIB_PRIVATE_H + +#include + +#ifndef BZ_NO_STDIO +#include +#include +#include +#endif + +#include "bzlib.h" + + + +/*-- General stuff. --*/ + +#define BZ_VERSION "1.0.8, 13-Jul-2019" + +typedef char Char; +typedef unsigned char Bool; +typedef unsigned char UChar; +typedef int Int32; +typedef unsigned int UInt32; +typedef short Int16; +typedef unsigned short UInt16; + +#define True ((Bool)1) +#define False ((Bool)0) + +#ifndef __GNUC__ +#define __inline__ /* */ +#endif + +#ifndef BZ_NO_STDIO + +extern void BZ2_bz__AssertH__fail ( int errcode ); +#define AssertH(cond,errcode) \ + { if (!(cond)) BZ2_bz__AssertH__fail ( errcode ); } + +#if BZ_DEBUG +#define AssertD(cond,msg) \ + { if (!(cond)) { \ + fprintf ( stderr, \ + "\n\nlibbzip2(debug build): internal error\n\t%s\n", msg );\ + exit(1); \ + }} +#else +#define AssertD(cond,msg) /* */ +#endif + +#define VPrintf0(zf) \ + fprintf(stderr,zf) +#define VPrintf1(zf,za1) \ + fprintf(stderr,zf,za1) +#define VPrintf2(zf,za1,za2) \ + fprintf(stderr,zf,za1,za2) +#define VPrintf3(zf,za1,za2,za3) \ + fprintf(stderr,zf,za1,za2,za3) +#define VPrintf4(zf,za1,za2,za3,za4) \ + fprintf(stderr,zf,za1,za2,za3,za4) +#define VPrintf5(zf,za1,za2,za3,za4,za5) \ + fprintf(stderr,zf,za1,za2,za3,za4,za5) + +#else + +extern void bz_internal_error ( int errcode ); +#define AssertH(cond,errcode) \ + { if (!(cond)) bz_internal_error ( errcode ); } +#define AssertD(cond,msg) do { } while (0) +#define VPrintf0(zf) do { } while (0) +#define VPrintf1(zf,za1) do { } while (0) +#define VPrintf2(zf,za1,za2) do { } while (0) +#define VPrintf3(zf,za1,za2,za3) do { } while (0) +#define VPrintf4(zf,za1,za2,za3,za4) do { } while (0) +#define VPrintf5(zf,za1,za2,za3,za4,za5) do { } while (0) + +#endif + + +#define BZALLOC(nnn) (strm->bzalloc)(strm->opaque,(nnn),1) +#define BZFREE(ppp) (strm->bzfree)(strm->opaque,(ppp)) + + +/*-- Header bytes. --*/ + +#define BZ_HDR_B 0x42 /* 'B' */ +#define BZ_HDR_Z 0x5a /* 'Z' */ +#define BZ_HDR_h 0x68 /* 'h' */ +#define BZ_HDR_0 0x30 /* '0' */ + +/*-- Constants for the back end. --*/ + +#define BZ_MAX_ALPHA_SIZE 258 +#define BZ_MAX_CODE_LEN 23 + +#define BZ_RUNA 0 +#define BZ_RUNB 1 + +#define BZ_N_GROUPS 6 +#define BZ_G_SIZE 50 +#define BZ_N_ITERS 4 + +#define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE)) + + + +/*-- Stuff for randomising repetitive blocks. --*/ + +extern Int32 BZ2_rNums[512]; + +#define BZ_RAND_DECLS \ + Int32 rNToGo; \ + Int32 rTPos \ + +#define BZ_RAND_INIT_MASK \ + s->rNToGo = 0; \ + s->rTPos = 0 \ + +#define BZ_RAND_MASK ((s->rNToGo == 1) ? 1 : 0) + +#define BZ_RAND_UPD_MASK \ + if (s->rNToGo == 0) { \ + s->rNToGo = BZ2_rNums[s->rTPos]; \ + s->rTPos++; \ + if (s->rTPos == 512) s->rTPos = 0; \ + } \ + s->rNToGo--; + + + +/*-- Stuff for doing CRCs. --*/ + +extern UInt32 BZ2_crc32Table[256]; + +#define BZ_INITIALISE_CRC(crcVar) \ +{ \ + crcVar = 0xffffffffL; \ +} + +#define BZ_FINALISE_CRC(crcVar) \ +{ \ + crcVar = ~(crcVar); \ +} + +#define BZ_UPDATE_CRC(crcVar,cha) \ +{ \ + crcVar = (crcVar << 8) ^ \ + BZ2_crc32Table[(crcVar >> 24) ^ \ + ((UChar)cha)]; \ +} + + + +/*-- States and modes for compression. --*/ + +#define BZ_M_IDLE 1 +#define BZ_M_RUNNING 2 +#define BZ_M_FLUSHING 3 +#define BZ_M_FINISHING 4 + +#define BZ_S_OUTPUT 1 +#define BZ_S_INPUT 2 + +#define BZ_N_RADIX 2 +#define BZ_N_QSORT 12 +#define BZ_N_SHELL 18 +#define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2) + + + + +/*-- Structure holding all the compression-side stuff. --*/ + +typedef + struct { + /* pointer back to the struct bz_stream */ + bz_stream* strm; + + /* mode this stream is in, and whether inputting */ + /* or outputting data */ + Int32 mode; + Int32 state; + + /* remembers avail_in when flush/finish requested */ + UInt32 avail_in_expect; + + /* for doing the block sorting */ + UInt32* arr1; + UInt32* arr2; + UInt32* ftab; + Int32 origPtr; + + /* aliases for arr1 and arr2 */ + UInt32* ptr; + UChar* block; + UInt16* mtfv; + UChar* zbits; + + /* for deciding when to use the fallback sorting algorithm */ + Int32 workFactor; + + /* run-length-encoding of the input */ + UInt32 state_in_ch; + Int32 state_in_len; + BZ_RAND_DECLS; + + /* input and output limits and current posns */ + Int32 nblock; + Int32 nblockMAX; + Int32 numZ; + Int32 state_out_pos; + + /* map of bytes used in block */ + Int32 nInUse; + Bool inUse[256]; + UChar unseqToSeq[256]; + + /* the buffer for bit stream creation */ + UInt32 bsBuff; + Int32 bsLive; + + /* block and combined CRCs */ + UInt32 blockCRC; + UInt32 combinedCRC; + + /* misc administratium */ + Int32 verbosity; + Int32 blockNo; + Int32 blockSize100k; + + /* stuff for coding the MTF values */ + Int32 nMTF; + Int32 mtfFreq [BZ_MAX_ALPHA_SIZE]; + UChar selector [BZ_MAX_SELECTORS]; + UChar selectorMtf[BZ_MAX_SELECTORS]; + + UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + Int32 code [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + Int32 rfreq [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + /* second dimension: only 3 needed; 4 makes index calculations faster */ + UInt32 len_pack[BZ_MAX_ALPHA_SIZE][4]; + + } + EState; + + + +/*-- externs for compression. --*/ + +extern void +BZ2_blockSort ( EState* ); + +extern void +BZ2_compressBlock ( EState*, Bool ); + +extern void +BZ2_bsInitWrite ( EState* ); + +extern void +BZ2_hbAssignCodes ( Int32*, UChar*, Int32, Int32, Int32 ); + +extern void +BZ2_hbMakeCodeLengths ( UChar*, Int32*, Int32, Int32 ); + + + +/*-- states for decompression. --*/ + +#define BZ_X_IDLE 1 +#define BZ_X_OUTPUT 2 + +#define BZ_X_MAGIC_1 10 +#define BZ_X_MAGIC_2 11 +#define BZ_X_MAGIC_3 12 +#define BZ_X_MAGIC_4 13 +#define BZ_X_BLKHDR_1 14 +#define BZ_X_BLKHDR_2 15 +#define BZ_X_BLKHDR_3 16 +#define BZ_X_BLKHDR_4 17 +#define BZ_X_BLKHDR_5 18 +#define BZ_X_BLKHDR_6 19 +#define BZ_X_BCRC_1 20 +#define BZ_X_BCRC_2 21 +#define BZ_X_BCRC_3 22 +#define BZ_X_BCRC_4 23 +#define BZ_X_RANDBIT 24 +#define BZ_X_ORIGPTR_1 25 +#define BZ_X_ORIGPTR_2 26 +#define BZ_X_ORIGPTR_3 27 +#define BZ_X_MAPPING_1 28 +#define BZ_X_MAPPING_2 29 +#define BZ_X_SELECTOR_1 30 +#define BZ_X_SELECTOR_2 31 +#define BZ_X_SELECTOR_3 32 +#define BZ_X_CODING_1 33 +#define BZ_X_CODING_2 34 +#define BZ_X_CODING_3 35 +#define BZ_X_MTF_1 36 +#define BZ_X_MTF_2 37 +#define BZ_X_MTF_3 38 +#define BZ_X_MTF_4 39 +#define BZ_X_MTF_5 40 +#define BZ_X_MTF_6 41 +#define BZ_X_ENDHDR_2 42 +#define BZ_X_ENDHDR_3 43 +#define BZ_X_ENDHDR_4 44 +#define BZ_X_ENDHDR_5 45 +#define BZ_X_ENDHDR_6 46 +#define BZ_X_CCRC_1 47 +#define BZ_X_CCRC_2 48 +#define BZ_X_CCRC_3 49 +#define BZ_X_CCRC_4 50 + + + +/*-- Constants for the fast MTF decoder. --*/ + +#define MTFA_SIZE 4096 +#define MTFL_SIZE 16 + + + +/*-- Structure holding all the decompression-side stuff. --*/ + +typedef + struct { + /* pointer back to the struct bz_stream */ + bz_stream* strm; + + /* state indicator for this stream */ + Int32 state; + + /* for doing the final run-length decoding */ + UChar state_out_ch; + Int32 state_out_len; + Bool blockRandomised; + BZ_RAND_DECLS; + + /* the buffer for bit stream reading */ + UInt32 bsBuff; + Int32 bsLive; + + /* misc administratium */ + Int32 blockSize100k; + Bool smallDecompress; + Int32 currBlockNo; + Int32 verbosity; + + /* for undoing the Burrows-Wheeler transform */ + Int32 origPtr; + UInt32 tPos; + Int32 k0; + Int32 unzftab[256]; + Int32 nblock_used; + Int32 cftab[257]; + Int32 cftabCopy[257]; + + /* for undoing the Burrows-Wheeler transform (FAST) */ + UInt32 *tt; + + /* for undoing the Burrows-Wheeler transform (SMALL) */ + UInt16 *ll16; + UChar *ll4; + + /* stored and calculated CRCs */ + UInt32 storedBlockCRC; + UInt32 storedCombinedCRC; + UInt32 calculatedBlockCRC; + UInt32 calculatedCombinedCRC; + + /* map of bytes used in block */ + Int32 nInUse; + Bool inUse[256]; + Bool inUse16[16]; + UChar seqToUnseq[256]; + + /* for decoding the MTF values */ + UChar mtfa [MTFA_SIZE]; + Int32 mtfbase[256 / MTFL_SIZE]; + UChar selector [BZ_MAX_SELECTORS]; + UChar selectorMtf[BZ_MAX_SELECTORS]; + UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + + Int32 limit [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + Int32 base [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + Int32 perm [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + Int32 minLens[BZ_N_GROUPS]; + + /* save area for scalars in the main decompress code */ + Int32 save_i; + Int32 save_j; + Int32 save_t; + Int32 save_alphaSize; + Int32 save_nGroups; + Int32 save_nSelectors; + Int32 save_EOB; + Int32 save_groupNo; + Int32 save_groupPos; + Int32 save_nextSym; + Int32 save_nblockMAX; + Int32 save_nblock; + Int32 save_es; + Int32 save_N; + Int32 save_curr; + Int32 save_zt; + Int32 save_zn; + Int32 save_zvec; + Int32 save_zj; + Int32 save_gSel; + Int32 save_gMinlen; + Int32* save_gLimit; + Int32* save_gBase; + Int32* save_gPerm; + + } + DState; + + + +/*-- Macros for decompression. --*/ + +#define BZ_GET_FAST(cccc) \ + /* c_tPos is unsigned, hence test < 0 is pointless. */ \ + if (s->tPos >= (UInt32)100000 * (UInt32)s->blockSize100k) return True; \ + s->tPos = s->tt[s->tPos]; \ + cccc = (UChar)(s->tPos & 0xff); \ + s->tPos >>= 8; + +#define BZ_GET_FAST_C(cccc) \ + /* c_tPos is unsigned, hence test < 0 is pointless. */ \ + if (c_tPos >= (UInt32)100000 * (UInt32)ro_blockSize100k) return True; \ + c_tPos = c_tt[c_tPos]; \ + cccc = (UChar)(c_tPos & 0xff); \ + c_tPos >>= 8; + +#define SET_LL4(i,n) \ + { if (((i) & 0x1) == 0) \ + s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0xf0) | (n); else \ + s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0x0f) | ((n) << 4); \ + } + +#define GET_LL4(i) \ + ((((UInt32)(s->ll4[(i) >> 1])) >> (((i) << 2) & 0x4)) & 0xF) + +#define SET_LL(i,n) \ + { s->ll16[i] = (UInt16)(n & 0x0000ffff); \ + SET_LL4(i, n >> 16); \ + } + +#define GET_LL(i) \ + (((UInt32)s->ll16[i]) | (GET_LL4(i) << 16)) + +#define BZ_GET_SMALL(cccc) \ + /* c_tPos is unsigned, hence test < 0 is pointless. */ \ + if (s->tPos >= (UInt32)100000 * (UInt32)s->blockSize100k) return True; \ + cccc = BZ2_indexIntoF ( s->tPos, s->cftab ); \ + s->tPos = GET_LL(s->tPos); + + +/*-- externs for decompression. --*/ + +extern Int32 +BZ2_indexIntoF ( Int32, Int32* ); + +extern Int32 +BZ2_decompress ( DState* ); + +extern void +BZ2_hbCreateDecodeTables ( Int32*, Int32*, Int32*, UChar*, + Int32, Int32, Int32 ); + + +#endif + + +/*-- BZ_NO_STDIO seems to make NULL disappear on some platforms. --*/ + +#ifdef BZ_NO_STDIO +#ifndef NULL +#define NULL 0 +#endif +#endif + + +/*-------------------------------------------------------------*/ +/*--- end bzlib_private.h ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/compress.c b/tools/lib/bz2/compress.c new file mode 100644 index 0000000..2dc5dc1 --- /dev/null +++ b/tools/lib/bz2/compress.c @@ -0,0 +1,672 @@ + +/*-------------------------------------------------------------*/ +/*--- Compression machinery (not incl block sorting) ---*/ +/*--- compress.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +/* CHANGES + 0.9.0 -- original version. + 0.9.0a/b -- no changes in this file. + 0.9.0c -- changed setting of nGroups in sendMTFValues() + so as to do a bit better on small files +*/ + +#include "bzlib_private.h" + + +/*---------------------------------------------------*/ +/*--- Bit stream I/O ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +void BZ2_bsInitWrite ( EState* s ) +{ + s->bsLive = 0; + s->bsBuff = 0; +} + + +/*---------------------------------------------------*/ +static +void bsFinishWrite ( EState* s ) +{ + while (s->bsLive > 0) { + s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24); + s->numZ++; + s->bsBuff <<= 8; + s->bsLive -= 8; + } +} + + +/*---------------------------------------------------*/ +#define bsNEEDW(nz) \ +{ \ + while (s->bsLive >= 8) { \ + s->zbits[s->numZ] \ + = (UChar)(s->bsBuff >> 24); \ + s->numZ++; \ + s->bsBuff <<= 8; \ + s->bsLive -= 8; \ + } \ +} + + +/*---------------------------------------------------*/ +static +__inline__ +void bsW ( EState* s, Int32 n, UInt32 v ) +{ + bsNEEDW ( n ); + s->bsBuff |= (v << (32 - s->bsLive - n)); + s->bsLive += n; +} + + +/*---------------------------------------------------*/ +static +void bsPutUInt32 ( EState* s, UInt32 u ) +{ + bsW ( s, 8, (u >> 24) & 0xffL ); + bsW ( s, 8, (u >> 16) & 0xffL ); + bsW ( s, 8, (u >> 8) & 0xffL ); + bsW ( s, 8, u & 0xffL ); +} + + +/*---------------------------------------------------*/ +static +void bsPutUChar ( EState* s, UChar c ) +{ + bsW( s, 8, (UInt32)c ); +} + + +/*---------------------------------------------------*/ +/*--- The back end proper ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +static +void makeMaps_e ( EState* s ) +{ + Int32 i; + s->nInUse = 0; + for (i = 0; i < 256; i++) + if (s->inUse[i]) { + s->unseqToSeq[i] = s->nInUse; + s->nInUse++; + } +} + + +/*---------------------------------------------------*/ +static +void generateMTFValues ( EState* s ) +{ + UChar yy[256]; + Int32 i, j; + Int32 zPend; + Int32 wr; + Int32 EOB; + + /* + After sorting (eg, here), + s->arr1 [ 0 .. s->nblock-1 ] holds sorted order, + and + ((UChar*)s->arr2) [ 0 .. s->nblock-1 ] + holds the original block data. + + The first thing to do is generate the MTF values, + and put them in + ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ]. + Because there are strictly fewer or equal MTF values + than block values, ptr values in this area are overwritten + with MTF values only when they are no longer needed. + + The final compressed bitstream is generated into the + area starting at + (UChar*) (&((UChar*)s->arr2)[s->nblock]) + + These storage aliases are set up in bzCompressInit(), + except for the last one, which is arranged in + compressBlock(). + */ + UInt32* ptr = s->ptr; + UChar* block = s->block; + UInt16* mtfv = s->mtfv; + + makeMaps_e ( s ); + EOB = s->nInUse+1; + + for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0; + + wr = 0; + zPend = 0; + for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i; + + for (i = 0; i < s->nblock; i++) { + UChar ll_i; + AssertD ( wr <= i, "generateMTFValues(1)" ); + j = ptr[i]-1; if (j < 0) j += s->nblock; + ll_i = s->unseqToSeq[block[j]]; + AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" ); + + if (yy[0] == ll_i) { + zPend++; + } else { + + if (zPend > 0) { + zPend--; + while (True) { + if (zPend & 1) { + mtfv[wr] = BZ_RUNB; wr++; + s->mtfFreq[BZ_RUNB]++; + } else { + mtfv[wr] = BZ_RUNA; wr++; + s->mtfFreq[BZ_RUNA]++; + } + if (zPend < 2) break; + zPend = (zPend - 2) / 2; + }; + zPend = 0; + } + { + register UChar rtmp; + register UChar* ryy_j; + register UChar rll_i; + rtmp = yy[1]; + yy[1] = yy[0]; + ryy_j = &(yy[1]); + rll_i = ll_i; + while ( rll_i != rtmp ) { + register UChar rtmp2; + ryy_j++; + rtmp2 = rtmp; + rtmp = *ryy_j; + *ryy_j = rtmp2; + }; + yy[0] = rtmp; + j = ryy_j - &(yy[0]); + mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++; + } + + } + } + + if (zPend > 0) { + zPend--; + while (True) { + if (zPend & 1) { + mtfv[wr] = BZ_RUNB; wr++; + s->mtfFreq[BZ_RUNB]++; + } else { + mtfv[wr] = BZ_RUNA; wr++; + s->mtfFreq[BZ_RUNA]++; + } + if (zPend < 2) break; + zPend = (zPend - 2) / 2; + }; + zPend = 0; + } + + mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++; + + s->nMTF = wr; +} + + +/*---------------------------------------------------*/ +#define BZ_LESSER_ICOST 0 +#define BZ_GREATER_ICOST 15 + +static +void sendMTFValues ( EState* s ) +{ + Int32 v, t, i, j, gs, ge, totc, bt, bc, iter; + Int32 nSelectors, alphaSize, minLen, maxLen, selCtr; + Int32 nGroups, nBytes; + + /*-- + UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + is a global since the decoder also needs it. + + Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + are also globals only used in this proc. + Made global to keep stack frame size small. + --*/ + + + UInt16 cost[BZ_N_GROUPS]; + Int32 fave[BZ_N_GROUPS]; + + UInt16* mtfv = s->mtfv; + + if (s->verbosity >= 3) + VPrintf3( " %d in block, %d after MTF & 1-2 coding, " + "%d+2 syms in use\n", + s->nblock, s->nMTF, s->nInUse ); + + alphaSize = s->nInUse+2; + for (t = 0; t < BZ_N_GROUPS; t++) + for (v = 0; v < alphaSize; v++) + s->len[t][v] = BZ_GREATER_ICOST; + + /*--- Decide how many coding tables to use ---*/ + AssertH ( s->nMTF > 0, 3001 ); + if (s->nMTF < 200) nGroups = 2; else + if (s->nMTF < 600) nGroups = 3; else + if (s->nMTF < 1200) nGroups = 4; else + if (s->nMTF < 2400) nGroups = 5; else + nGroups = 6; + + /*--- Generate an initial set of coding tables ---*/ + { + Int32 nPart, remF, tFreq, aFreq; + + nPart = nGroups; + remF = s->nMTF; + gs = 0; + while (nPart > 0) { + tFreq = remF / nPart; + ge = gs-1; + aFreq = 0; + while (aFreq < tFreq && ge < alphaSize-1) { + ge++; + aFreq += s->mtfFreq[ge]; + } + + if (ge > gs + && nPart != nGroups && nPart != 1 + && ((nGroups-nPart) % 2 == 1)) { + aFreq -= s->mtfFreq[ge]; + ge--; + } + + if (s->verbosity >= 3) + VPrintf5( " initial group %d, [%d .. %d], " + "has %d syms (%4.1f%%)\n", + nPart, gs, ge, aFreq, + (100.0 * (float)aFreq) / (float)(s->nMTF) ); + + for (v = 0; v < alphaSize; v++) + if (v >= gs && v <= ge) + s->len[nPart-1][v] = BZ_LESSER_ICOST; else + s->len[nPart-1][v] = BZ_GREATER_ICOST; + + nPart--; + gs = ge+1; + remF -= aFreq; + } + } + + /*--- + Iterate up to BZ_N_ITERS times to improve the tables. + ---*/ + for (iter = 0; iter < BZ_N_ITERS; iter++) { + + for (t = 0; t < BZ_N_GROUPS; t++) fave[t] = 0; + + for (t = 0; t < nGroups; t++) + for (v = 0; v < alphaSize; v++) + s->rfreq[t][v] = 0; + + /*--- + Set up an auxiliary length table which is used to fast-track + the common case (nGroups == 6). + ---*/ + if (nGroups == 6) { + for (v = 0; v < alphaSize; v++) { + s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v]; + s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v]; + s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v]; + } + } + + nSelectors = 0; + totc = 0; + gs = 0; + while (True) { + + /*--- Set group start & end marks. --*/ + if (gs >= s->nMTF) break; + ge = gs + BZ_G_SIZE - 1; + if (ge >= s->nMTF) ge = s->nMTF-1; + + /*-- + Calculate the cost of this group as coded + by each of the coding tables. + --*/ + for (t = 0; t < BZ_N_GROUPS; t++) cost[t] = 0; + + if (nGroups == 6 && 50 == ge-gs+1) { + /*--- fast track the common case ---*/ + register UInt32 cost01, cost23, cost45; + register UInt16 icv; + cost01 = cost23 = cost45 = 0; + +# define BZ_ITER(nn) \ + icv = mtfv[gs+(nn)]; \ + cost01 += s->len_pack[icv][0]; \ + cost23 += s->len_pack[icv][1]; \ + cost45 += s->len_pack[icv][2]; \ + + BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4); + BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9); + BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14); + BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19); + BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24); + BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29); + BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34); + BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39); + BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44); + BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49); + +# undef BZ_ITER + + cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16; + cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16; + cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16; + + } else { + /*--- slow version which correctly handles all situations ---*/ + for (i = gs; i <= ge; i++) { + UInt16 icv = mtfv[i]; + for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv]; + } + } + + /*-- + Find the coding table which is best for this group, + and record its identity in the selector table. + --*/ + bc = 999999999; bt = -1; + for (t = 0; t < nGroups; t++) + if (cost[t] < bc) { bc = cost[t]; bt = t; }; + totc += bc; + fave[bt]++; + s->selector[nSelectors] = bt; + nSelectors++; + + /*-- + Increment the symbol frequencies for the selected table. + --*/ + if (nGroups == 6 && 50 == ge-gs+1) { + /*--- fast track the common case ---*/ + +# define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++ + + BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4); + BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9); + BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14); + BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19); + BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24); + BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29); + BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34); + BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39); + BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44); + BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49); + +# undef BZ_ITUR + + } else { + /*--- slow version which correctly handles all situations ---*/ + for (i = gs; i <= ge; i++) + s->rfreq[bt][ mtfv[i] ]++; + } + + gs = ge+1; + } + if (s->verbosity >= 3) { + VPrintf2 ( " pass %d: size is %d, grp uses are ", + iter+1, totc/8 ); + for (t = 0; t < nGroups; t++) + VPrintf1 ( "%d ", fave[t] ); + VPrintf0 ( "\n" ); + } + + /*-- + Recompute the tables based on the accumulated frequencies. + --*/ + /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See + comment in huffman.c for details. */ + for (t = 0; t < nGroups; t++) + BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]), + alphaSize, 17 /*20*/ ); + } + + + AssertH( nGroups < 8, 3002 ); + AssertH( nSelectors < 32768 && + nSelectors <= BZ_MAX_SELECTORS, + 3003 ); + + + /*--- Compute MTF values for the selectors. ---*/ + { + UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp; + for (i = 0; i < nGroups; i++) pos[i] = i; + for (i = 0; i < nSelectors; i++) { + ll_i = s->selector[i]; + j = 0; + tmp = pos[j]; + while ( ll_i != tmp ) { + j++; + tmp2 = tmp; + tmp = pos[j]; + pos[j] = tmp2; + }; + pos[0] = tmp; + s->selectorMtf[i] = j; + } + }; + + /*--- Assign actual codes for the tables. --*/ + for (t = 0; t < nGroups; t++) { + minLen = 32; + maxLen = 0; + for (i = 0; i < alphaSize; i++) { + if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; + if (s->len[t][i] < minLen) minLen = s->len[t][i]; + } + AssertH ( !(maxLen > 17 /*20*/ ), 3004 ); + AssertH ( !(minLen < 1), 3005 ); + BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]), + minLen, maxLen, alphaSize ); + } + + /*--- Transmit the mapping table. ---*/ + { + Bool inUse16[16]; + for (i = 0; i < 16; i++) { + inUse16[i] = False; + for (j = 0; j < 16; j++) + if (s->inUse[i * 16 + j]) inUse16[i] = True; + } + + nBytes = s->numZ; + for (i = 0; i < 16; i++) + if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0); + + for (i = 0; i < 16; i++) + if (inUse16[i]) + for (j = 0; j < 16; j++) { + if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0); + } + + if (s->verbosity >= 3) + VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes ); + } + + /*--- Now the selectors. ---*/ + nBytes = s->numZ; + bsW ( s, 3, nGroups ); + bsW ( s, 15, nSelectors ); + for (i = 0; i < nSelectors; i++) { + for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1); + bsW(s,1,0); + } + if (s->verbosity >= 3) + VPrintf1( "selectors %d, ", s->numZ-nBytes ); + + /*--- Now the coding tables. ---*/ + nBytes = s->numZ; + + for (t = 0; t < nGroups; t++) { + Int32 curr = s->len[t][0]; + bsW ( s, 5, curr ); + for (i = 0; i < alphaSize; i++) { + while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ }; + while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ }; + bsW ( s, 1, 0 ); + } + } + + if (s->verbosity >= 3) + VPrintf1 ( "code lengths %d, ", s->numZ-nBytes ); + + /*--- And finally, the block data proper ---*/ + nBytes = s->numZ; + selCtr = 0; + gs = 0; + while (True) { + if (gs >= s->nMTF) break; + ge = gs + BZ_G_SIZE - 1; + if (ge >= s->nMTF) ge = s->nMTF-1; + AssertH ( s->selector[selCtr] < nGroups, 3006 ); + + if (nGroups == 6 && 50 == ge-gs+1) { + /*--- fast track the common case ---*/ + UInt16 mtfv_i; + UChar* s_len_sel_selCtr + = &(s->len[s->selector[selCtr]][0]); + Int32* s_code_sel_selCtr + = &(s->code[s->selector[selCtr]][0]); + +# define BZ_ITAH(nn) \ + mtfv_i = mtfv[gs+(nn)]; \ + bsW ( s, \ + s_len_sel_selCtr[mtfv_i], \ + s_code_sel_selCtr[mtfv_i] ) + + BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4); + BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9); + BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14); + BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19); + BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24); + BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29); + BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34); + BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39); + BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44); + BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49); + +# undef BZ_ITAH + + } else { + /*--- slow version which correctly handles all situations ---*/ + for (i = gs; i <= ge; i++) { + bsW ( s, + s->len [s->selector[selCtr]] [mtfv[i]], + s->code [s->selector[selCtr]] [mtfv[i]] ); + } + } + + + gs = ge+1; + selCtr++; + } + AssertH( selCtr == nSelectors, 3007 ); + + if (s->verbosity >= 3) + VPrintf1( "codes %d\n", s->numZ-nBytes ); +} + + +/*---------------------------------------------------*/ +void BZ2_compressBlock ( EState* s, Bool is_last_block ) +{ + if (s->nblock > 0) { + + BZ_FINALISE_CRC ( s->blockCRC ); + s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31); + s->combinedCRC ^= s->blockCRC; + if (s->blockNo > 1) s->numZ = 0; + + if (s->verbosity >= 2) + VPrintf4( " block %d: crc = 0x%08x, " + "combined CRC = 0x%08x, size = %d\n", + s->blockNo, s->blockCRC, s->combinedCRC, s->nblock ); + + BZ2_blockSort ( s ); + } + + s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]); + + /*-- If this is the first block, create the stream header. --*/ + if (s->blockNo == 1) { + BZ2_bsInitWrite ( s ); + bsPutUChar ( s, BZ_HDR_B ); + bsPutUChar ( s, BZ_HDR_Z ); + bsPutUChar ( s, BZ_HDR_h ); + bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) ); + } + + if (s->nblock > 0) { + + bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 ); + bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 ); + bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 ); + + /*-- Now the block's CRC, so it is in a known place. --*/ + bsPutUInt32 ( s, s->blockCRC ); + + /*-- + Now a single bit indicating (non-)randomisation. + As of version 0.9.5, we use a better sorting algorithm + which makes randomisation unnecessary. So always set + the randomised bit to 'no'. Of course, the decoder + still needs to be able to handle randomised blocks + so as to maintain backwards compatibility with + older versions of bzip2. + --*/ + bsW(s,1,0); + + bsW ( s, 24, s->origPtr ); + generateMTFValues ( s ); + sendMTFValues ( s ); + } + + + /*-- If this is the last block, add the stream trailer. --*/ + if (is_last_block) { + + bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 ); + bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 ); + bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 ); + bsPutUInt32 ( s, s->combinedCRC ); + if (s->verbosity >= 2) + VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC ); + bsFinishWrite ( s ); + } +} + + +/*-------------------------------------------------------------*/ +/*--- end compress.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/crctable.c b/tools/lib/bz2/crctable.c new file mode 100644 index 0000000..2b33c25 --- /dev/null +++ b/tools/lib/bz2/crctable.c @@ -0,0 +1,104 @@ + +/*-------------------------------------------------------------*/ +/*--- Table for doing CRCs ---*/ +/*--- crctable.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#include "bzlib_private.h" + +/*-- + I think this is an implementation of the AUTODIN-II, + Ethernet & FDDI 32-bit CRC standard. Vaguely derived + from code by Rob Warnock, in Section 51 of the + comp.compression FAQ. +--*/ + +UInt32 BZ2_crc32Table[256] = { + + /*-- Ugly, innit? --*/ + + 0x00000000L, 0x04c11db7L, 0x09823b6eL, 0x0d4326d9L, + 0x130476dcL, 0x17c56b6bL, 0x1a864db2L, 0x1e475005L, + 0x2608edb8L, 0x22c9f00fL, 0x2f8ad6d6L, 0x2b4bcb61L, + 0x350c9b64L, 0x31cd86d3L, 0x3c8ea00aL, 0x384fbdbdL, + 0x4c11db70L, 0x48d0c6c7L, 0x4593e01eL, 0x4152fda9L, + 0x5f15adacL, 0x5bd4b01bL, 0x569796c2L, 0x52568b75L, + 0x6a1936c8L, 0x6ed82b7fL, 0x639b0da6L, 0x675a1011L, + 0x791d4014L, 0x7ddc5da3L, 0x709f7b7aL, 0x745e66cdL, + 0x9823b6e0L, 0x9ce2ab57L, 0x91a18d8eL, 0x95609039L, + 0x8b27c03cL, 0x8fe6dd8bL, 0x82a5fb52L, 0x8664e6e5L, + 0xbe2b5b58L, 0xbaea46efL, 0xb7a96036L, 0xb3687d81L, + 0xad2f2d84L, 0xa9ee3033L, 0xa4ad16eaL, 0xa06c0b5dL, + 0xd4326d90L, 0xd0f37027L, 0xddb056feL, 0xd9714b49L, + 0xc7361b4cL, 0xc3f706fbL, 0xceb42022L, 0xca753d95L, + 0xf23a8028L, 0xf6fb9d9fL, 0xfbb8bb46L, 0xff79a6f1L, + 0xe13ef6f4L, 0xe5ffeb43L, 0xe8bccd9aL, 0xec7dd02dL, + 0x34867077L, 0x30476dc0L, 0x3d044b19L, 0x39c556aeL, + 0x278206abL, 0x23431b1cL, 0x2e003dc5L, 0x2ac12072L, + 0x128e9dcfL, 0x164f8078L, 0x1b0ca6a1L, 0x1fcdbb16L, + 0x018aeb13L, 0x054bf6a4L, 0x0808d07dL, 0x0cc9cdcaL, + 0x7897ab07L, 0x7c56b6b0L, 0x71159069L, 0x75d48ddeL, + 0x6b93dddbL, 0x6f52c06cL, 0x6211e6b5L, 0x66d0fb02L, + 0x5e9f46bfL, 0x5a5e5b08L, 0x571d7dd1L, 0x53dc6066L, + 0x4d9b3063L, 0x495a2dd4L, 0x44190b0dL, 0x40d816baL, + 0xaca5c697L, 0xa864db20L, 0xa527fdf9L, 0xa1e6e04eL, + 0xbfa1b04bL, 0xbb60adfcL, 0xb6238b25L, 0xb2e29692L, + 0x8aad2b2fL, 0x8e6c3698L, 0x832f1041L, 0x87ee0df6L, + 0x99a95df3L, 0x9d684044L, 0x902b669dL, 0x94ea7b2aL, + 0xe0b41de7L, 0xe4750050L, 0xe9362689L, 0xedf73b3eL, + 0xf3b06b3bL, 0xf771768cL, 0xfa325055L, 0xfef34de2L, + 0xc6bcf05fL, 0xc27dede8L, 0xcf3ecb31L, 0xcbffd686L, + 0xd5b88683L, 0xd1799b34L, 0xdc3abdedL, 0xd8fba05aL, + 0x690ce0eeL, 0x6dcdfd59L, 0x608edb80L, 0x644fc637L, + 0x7a089632L, 0x7ec98b85L, 0x738aad5cL, 0x774bb0ebL, + 0x4f040d56L, 0x4bc510e1L, 0x46863638L, 0x42472b8fL, + 0x5c007b8aL, 0x58c1663dL, 0x558240e4L, 0x51435d53L, + 0x251d3b9eL, 0x21dc2629L, 0x2c9f00f0L, 0x285e1d47L, + 0x36194d42L, 0x32d850f5L, 0x3f9b762cL, 0x3b5a6b9bL, + 0x0315d626L, 0x07d4cb91L, 0x0a97ed48L, 0x0e56f0ffL, + 0x1011a0faL, 0x14d0bd4dL, 0x19939b94L, 0x1d528623L, + 0xf12f560eL, 0xf5ee4bb9L, 0xf8ad6d60L, 0xfc6c70d7L, + 0xe22b20d2L, 0xe6ea3d65L, 0xeba91bbcL, 0xef68060bL, + 0xd727bbb6L, 0xd3e6a601L, 0xdea580d8L, 0xda649d6fL, + 0xc423cd6aL, 0xc0e2d0ddL, 0xcda1f604L, 0xc960ebb3L, + 0xbd3e8d7eL, 0xb9ff90c9L, 0xb4bcb610L, 0xb07daba7L, + 0xae3afba2L, 0xaafbe615L, 0xa7b8c0ccL, 0xa379dd7bL, + 0x9b3660c6L, 0x9ff77d71L, 0x92b45ba8L, 0x9675461fL, + 0x8832161aL, 0x8cf30badL, 0x81b02d74L, 0x857130c3L, + 0x5d8a9099L, 0x594b8d2eL, 0x5408abf7L, 0x50c9b640L, + 0x4e8ee645L, 0x4a4ffbf2L, 0x470cdd2bL, 0x43cdc09cL, + 0x7b827d21L, 0x7f436096L, 0x7200464fL, 0x76c15bf8L, + 0x68860bfdL, 0x6c47164aL, 0x61043093L, 0x65c52d24L, + 0x119b4be9L, 0x155a565eL, 0x18197087L, 0x1cd86d30L, + 0x029f3d35L, 0x065e2082L, 0x0b1d065bL, 0x0fdc1becL, + 0x3793a651L, 0x3352bbe6L, 0x3e119d3fL, 0x3ad08088L, + 0x2497d08dL, 0x2056cd3aL, 0x2d15ebe3L, 0x29d4f654L, + 0xc5a92679L, 0xc1683bceL, 0xcc2b1d17L, 0xc8ea00a0L, + 0xd6ad50a5L, 0xd26c4d12L, 0xdf2f6bcbL, 0xdbee767cL, + 0xe3a1cbc1L, 0xe760d676L, 0xea23f0afL, 0xeee2ed18L, + 0xf0a5bd1dL, 0xf464a0aaL, 0xf9278673L, 0xfde69bc4L, + 0x89b8fd09L, 0x8d79e0beL, 0x803ac667L, 0x84fbdbd0L, + 0x9abc8bd5L, 0x9e7d9662L, 0x933eb0bbL, 0x97ffad0cL, + 0xafb010b1L, 0xab710d06L, 0xa6322bdfL, 0xa2f33668L, + 0xbcb4666dL, 0xb8757bdaL, 0xb5365d03L, 0xb1f740b4L +}; + + +/*-------------------------------------------------------------*/ +/*--- end crctable.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/decompress.c b/tools/lib/bz2/decompress.c new file mode 100644 index 0000000..a1a0bac --- /dev/null +++ b/tools/lib/bz2/decompress.c @@ -0,0 +1,652 @@ + +/*-------------------------------------------------------------*/ +/*--- Decompression machinery ---*/ +/*--- decompress.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#include "bzlib_private.h" + + +/*---------------------------------------------------*/ +static +void makeMaps_d ( DState* s ) +{ + Int32 i; + s->nInUse = 0; + for (i = 0; i < 256; i++) + if (s->inUse[i]) { + s->seqToUnseq[s->nInUse] = i; + s->nInUse++; + } +} + + +/*---------------------------------------------------*/ +#define RETURN(rrr) \ + { retVal = rrr; goto save_state_and_return; }; + +#define GET_BITS(lll,vvv,nnn) \ + case lll: s->state = lll; \ + while (True) { \ + if (s->bsLive >= nnn) { \ + UInt32 v; \ + v = (s->bsBuff >> \ + (s->bsLive-nnn)) & ((1 << nnn)-1); \ + s->bsLive -= nnn; \ + vvv = v; \ + break; \ + } \ + if (s->strm->avail_in == 0) RETURN(BZ_OK); \ + s->bsBuff \ + = (s->bsBuff << 8) | \ + ((UInt32) \ + (*((UChar*)(s->strm->next_in)))); \ + s->bsLive += 8; \ + s->strm->next_in++; \ + s->strm->avail_in--; \ + s->strm->total_in_lo32++; \ + if (s->strm->total_in_lo32 == 0) \ + s->strm->total_in_hi32++; \ + } + +#define GET_UCHAR(lll,uuu) \ + GET_BITS(lll,uuu,8) + +#define GET_BIT(lll,uuu) \ + GET_BITS(lll,uuu,1) + +/*---------------------------------------------------*/ +#define GET_MTF_VAL(label1,label2,lval) \ +{ \ + if (groupPos == 0) { \ + groupNo++; \ + if (groupNo >= nSelectors) \ + RETURN(BZ_DATA_ERROR); \ + groupPos = BZ_G_SIZE; \ + gSel = s->selector[groupNo]; \ + gMinlen = s->minLens[gSel]; \ + gLimit = &(s->limit[gSel][0]); \ + gPerm = &(s->perm[gSel][0]); \ + gBase = &(s->base[gSel][0]); \ + } \ + groupPos--; \ + zn = gMinlen; \ + GET_BITS(label1, zvec, zn); \ + while (1) { \ + if (zn > 20 /* the longest code */) \ + RETURN(BZ_DATA_ERROR); \ + if (zvec <= gLimit[zn]) break; \ + zn++; \ + GET_BIT(label2, zj); \ + zvec = (zvec << 1) | zj; \ + }; \ + if (zvec - gBase[zn] < 0 \ + || zvec - gBase[zn] >= BZ_MAX_ALPHA_SIZE) \ + RETURN(BZ_DATA_ERROR); \ + lval = gPerm[zvec - gBase[zn]]; \ +} + + +/*---------------------------------------------------*/ +Int32 BZ2_decompress ( DState* s ) +{ + UChar uc; + Int32 retVal; + Int32 minLen, maxLen; + bz_stream* strm = s->strm; + + /* stuff that needs to be saved/restored */ + Int32 i; + Int32 j; + Int32 t; + Int32 alphaSize; + Int32 nGroups; + Int32 nSelectors; + Int32 EOB; + Int32 groupNo; + Int32 groupPos; + Int32 nextSym; + Int32 nblockMAX; + Int32 nblock; + Int32 es; + Int32 N; + Int32 curr; + Int32 zt; + Int32 zn; + Int32 zvec; + Int32 zj; + Int32 gSel; + Int32 gMinlen; + Int32* gLimit; + Int32* gBase; + Int32* gPerm; + + if (s->state == BZ_X_MAGIC_1) { + /*initialise the save area*/ + s->save_i = 0; + s->save_j = 0; + s->save_t = 0; + s->save_alphaSize = 0; + s->save_nGroups = 0; + s->save_nSelectors = 0; + s->save_EOB = 0; + s->save_groupNo = 0; + s->save_groupPos = 0; + s->save_nextSym = 0; + s->save_nblockMAX = 0; + s->save_nblock = 0; + s->save_es = 0; + s->save_N = 0; + s->save_curr = 0; + s->save_zt = 0; + s->save_zn = 0; + s->save_zvec = 0; + s->save_zj = 0; + s->save_gSel = 0; + s->save_gMinlen = 0; + s->save_gLimit = NULL; + s->save_gBase = NULL; + s->save_gPerm = NULL; + } + + /*restore from the save area*/ + i = s->save_i; + j = s->save_j; + t = s->save_t; + alphaSize = s->save_alphaSize; + nGroups = s->save_nGroups; + nSelectors = s->save_nSelectors; + EOB = s->save_EOB; + groupNo = s->save_groupNo; + groupPos = s->save_groupPos; + nextSym = s->save_nextSym; + nblockMAX = s->save_nblockMAX; + nblock = s->save_nblock; + es = s->save_es; + N = s->save_N; + curr = s->save_curr; + zt = s->save_zt; + zn = s->save_zn; + zvec = s->save_zvec; + zj = s->save_zj; + gSel = s->save_gSel; + gMinlen = s->save_gMinlen; + gLimit = s->save_gLimit; + gBase = s->save_gBase; + gPerm = s->save_gPerm; + + retVal = BZ_OK; + + switch (s->state) { + + GET_UCHAR(BZ_X_MAGIC_1, uc); + if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC); + + GET_UCHAR(BZ_X_MAGIC_2, uc); + if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC); + + GET_UCHAR(BZ_X_MAGIC_3, uc) + if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC); + + GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8) + if (s->blockSize100k < (BZ_HDR_0 + 1) || + s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC); + s->blockSize100k -= BZ_HDR_0; + + if (s->smallDecompress) { + s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) ); + s->ll4 = BZALLOC( + ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) + ); + if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR); + } else { + s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) ); + if (s->tt == NULL) RETURN(BZ_MEM_ERROR); + } + + GET_UCHAR(BZ_X_BLKHDR_1, uc); + + if (uc == 0x17) goto endhdr_2; + if (uc != 0x31) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_BLKHDR_2, uc); + if (uc != 0x41) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_BLKHDR_3, uc); + if (uc != 0x59) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_BLKHDR_4, uc); + if (uc != 0x26) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_BLKHDR_5, uc); + if (uc != 0x53) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_BLKHDR_6, uc); + if (uc != 0x59) RETURN(BZ_DATA_ERROR); + + s->currBlockNo++; + if (s->verbosity >= 2) + VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo ); + + s->storedBlockCRC = 0; + GET_UCHAR(BZ_X_BCRC_1, uc); + s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); + GET_UCHAR(BZ_X_BCRC_2, uc); + s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); + GET_UCHAR(BZ_X_BCRC_3, uc); + s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); + GET_UCHAR(BZ_X_BCRC_4, uc); + s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); + + GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1); + + s->origPtr = 0; + GET_UCHAR(BZ_X_ORIGPTR_1, uc); + s->origPtr = (s->origPtr << 8) | ((Int32)uc); + GET_UCHAR(BZ_X_ORIGPTR_2, uc); + s->origPtr = (s->origPtr << 8) | ((Int32)uc); + GET_UCHAR(BZ_X_ORIGPTR_3, uc); + s->origPtr = (s->origPtr << 8) | ((Int32)uc); + + if (s->origPtr < 0) + RETURN(BZ_DATA_ERROR); + if (s->origPtr > 10 + 100000*s->blockSize100k) + RETURN(BZ_DATA_ERROR); + + /*--- Receive the mapping table ---*/ + for (i = 0; i < 16; i++) { + GET_BIT(BZ_X_MAPPING_1, uc); + if (uc == 1) + s->inUse16[i] = True; else + s->inUse16[i] = False; + } + + for (i = 0; i < 256; i++) s->inUse[i] = False; + + for (i = 0; i < 16; i++) + if (s->inUse16[i]) + for (j = 0; j < 16; j++) { + GET_BIT(BZ_X_MAPPING_2, uc); + if (uc == 1) s->inUse[i * 16 + j] = True; + } + makeMaps_d ( s ); + if (s->nInUse == 0) RETURN(BZ_DATA_ERROR); + alphaSize = s->nInUse+2; + + /*--- Now the selectors ---*/ + GET_BITS(BZ_X_SELECTOR_1, nGroups, 3); + if (nGroups < 2 || nGroups > BZ_N_GROUPS) RETURN(BZ_DATA_ERROR); + GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15); + if (nSelectors < 1) RETURN(BZ_DATA_ERROR); + for (i = 0; i < nSelectors; i++) { + j = 0; + while (True) { + GET_BIT(BZ_X_SELECTOR_3, uc); + if (uc == 0) break; + j++; + if (j >= nGroups) RETURN(BZ_DATA_ERROR); + } + /* Having more than BZ_MAX_SELECTORS doesn't make much sense + since they will never be used, but some implementations might + "round up" the number of selectors, so just ignore those. */ + if (i < BZ_MAX_SELECTORS) + s->selectorMtf[i] = j; + } + if (nSelectors > BZ_MAX_SELECTORS) + nSelectors = BZ_MAX_SELECTORS; + + /*--- Undo the MTF values for the selectors. ---*/ + { + UChar pos[BZ_N_GROUPS], tmp, v; + for (v = 0; v < nGroups; v++) pos[v] = v; + + for (i = 0; i < nSelectors; i++) { + v = s->selectorMtf[i]; + tmp = pos[v]; + while (v > 0) { pos[v] = pos[v-1]; v--; } + pos[0] = tmp; + s->selector[i] = tmp; + } + } + + /*--- Now the coding tables ---*/ + for (t = 0; t < nGroups; t++) { + GET_BITS(BZ_X_CODING_1, curr, 5); + for (i = 0; i < alphaSize; i++) { + while (True) { + if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR); + GET_BIT(BZ_X_CODING_2, uc); + if (uc == 0) break; + GET_BIT(BZ_X_CODING_3, uc); + if (uc == 0) curr++; else curr--; + } + s->len[t][i] = curr; + } + } + + /*--- Create the Huffman decoding tables ---*/ + for (t = 0; t < nGroups; t++) { + minLen = 32; + maxLen = 0; + for (i = 0; i < alphaSize; i++) { + if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; + if (s->len[t][i] < minLen) minLen = s->len[t][i]; + } + BZ2_hbCreateDecodeTables ( + &(s->limit[t][0]), + &(s->base[t][0]), + &(s->perm[t][0]), + &(s->len[t][0]), + minLen, maxLen, alphaSize + ); + s->minLens[t] = minLen; + } + + /*--- Now the MTF values ---*/ + + EOB = s->nInUse+1; + nblockMAX = 100000 * s->blockSize100k; + groupNo = -1; + groupPos = 0; + + for (i = 0; i <= 255; i++) s->unzftab[i] = 0; + + /*-- MTF init --*/ + { + Int32 ii, jj, kk; + kk = MTFA_SIZE-1; + for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) { + for (jj = MTFL_SIZE-1; jj >= 0; jj--) { + s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj); + kk--; + } + s->mtfbase[ii] = kk + 1; + } + } + /*-- end MTF init --*/ + + nblock = 0; + GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym); + + while (True) { + + if (nextSym == EOB) break; + + if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) { + + es = -1; + N = 1; + do { + /* Check that N doesn't get too big, so that es doesn't + go negative. The maximum value that can be + RUNA/RUNB encoded is equal to the block size (post + the initial RLE), viz, 900k, so bounding N at 2 + million should guard against overflow without + rejecting any legitimate inputs. */ + if (N >= 2*1024*1024) RETURN(BZ_DATA_ERROR); + if (nextSym == BZ_RUNA) es = es + (0+1) * N; else + if (nextSym == BZ_RUNB) es = es + (1+1) * N; + N = N * 2; + GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym); + } + while (nextSym == BZ_RUNA || nextSym == BZ_RUNB); + + es++; + uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ]; + s->unzftab[uc] += es; + + if (s->smallDecompress) + while (es > 0) { + if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); + s->ll16[nblock] = (UInt16)uc; + nblock++; + es--; + } + else + while (es > 0) { + if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); + s->tt[nblock] = (UInt32)uc; + nblock++; + es--; + }; + + continue; + + } else { + + if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); + + /*-- uc = MTF ( nextSym-1 ) --*/ + { + Int32 ii, jj, kk, pp, lno, off; + UInt32 nn; + nn = (UInt32)(nextSym - 1); + + if (nn < MTFL_SIZE) { + /* avoid general-case expense */ + pp = s->mtfbase[0]; + uc = s->mtfa[pp+nn]; + while (nn > 3) { + Int32 z = pp+nn; + s->mtfa[(z) ] = s->mtfa[(z)-1]; + s->mtfa[(z)-1] = s->mtfa[(z)-2]; + s->mtfa[(z)-2] = s->mtfa[(z)-3]; + s->mtfa[(z)-3] = s->mtfa[(z)-4]; + nn -= 4; + } + while (nn > 0) { + s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; + }; + s->mtfa[pp] = uc; + } else { + /* general case */ + lno = nn / MTFL_SIZE; + off = nn % MTFL_SIZE; + pp = s->mtfbase[lno] + off; + uc = s->mtfa[pp]; + while (pp > s->mtfbase[lno]) { + s->mtfa[pp] = s->mtfa[pp-1]; pp--; + }; + s->mtfbase[lno]++; + while (lno > 0) { + s->mtfbase[lno]--; + s->mtfa[s->mtfbase[lno]] + = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1]; + lno--; + } + s->mtfbase[0]--; + s->mtfa[s->mtfbase[0]] = uc; + if (s->mtfbase[0] == 0) { + kk = MTFA_SIZE-1; + for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) { + for (jj = MTFL_SIZE-1; jj >= 0; jj--) { + s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj]; + kk--; + } + s->mtfbase[ii] = kk + 1; + } + } + } + } + /*-- end uc = MTF ( nextSym-1 ) --*/ + + s->unzftab[s->seqToUnseq[uc]]++; + if (s->smallDecompress) + s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else + s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]); + nblock++; + + GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym); + continue; + } + } + + /* Now we know what nblock is, we can do a better sanity + check on s->origPtr. + */ + if (s->origPtr < 0 || s->origPtr >= nblock) + RETURN(BZ_DATA_ERROR); + + /*-- Set up cftab to facilitate generation of T^(-1) --*/ + /* Check: unzftab entries in range. */ + for (i = 0; i <= 255; i++) { + if (s->unzftab[i] < 0 || s->unzftab[i] > nblock) + RETURN(BZ_DATA_ERROR); + } + /* Actually generate cftab. */ + s->cftab[0] = 0; + for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1]; + for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1]; + /* Check: cftab entries in range. */ + for (i = 0; i <= 256; i++) { + if (s->cftab[i] < 0 || s->cftab[i] > nblock) { + /* s->cftab[i] can legitimately be == nblock */ + RETURN(BZ_DATA_ERROR); + } + } + /* Check: cftab entries non-descending. */ + for (i = 1; i <= 256; i++) { + if (s->cftab[i-1] > s->cftab[i]) { + RETURN(BZ_DATA_ERROR); + } + } + + s->state_out_len = 0; + s->state_out_ch = 0; + BZ_INITIALISE_CRC ( s->calculatedBlockCRC ); + s->state = BZ_X_OUTPUT; + if (s->verbosity >= 2) VPrintf0 ( "rt+rld" ); + + if (s->smallDecompress) { + + /*-- Make a copy of cftab, used in generation of T --*/ + for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i]; + + /*-- compute the T vector --*/ + for (i = 0; i < nblock; i++) { + uc = (UChar)(s->ll16[i]); + SET_LL(i, s->cftabCopy[uc]); + s->cftabCopy[uc]++; + } + + /*-- Compute T^(-1) by pointer reversal on T --*/ + i = s->origPtr; + j = GET_LL(i); + do { + Int32 tmp = GET_LL(j); + SET_LL(j, i); + i = j; + j = tmp; + } + while (i != s->origPtr); + + s->tPos = s->origPtr; + s->nblock_used = 0; + if (s->blockRandomised) { + BZ_RAND_INIT_MASK; + BZ_GET_SMALL(s->k0); s->nblock_used++; + BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; + } else { + BZ_GET_SMALL(s->k0); s->nblock_used++; + } + + } else { + + /*-- compute the T^(-1) vector --*/ + for (i = 0; i < nblock; i++) { + uc = (UChar)(s->tt[i] & 0xff); + s->tt[s->cftab[uc]] |= (i << 8); + s->cftab[uc]++; + } + + s->tPos = s->tt[s->origPtr] >> 8; + s->nblock_used = 0; + if (s->blockRandomised) { + BZ_RAND_INIT_MASK; + BZ_GET_FAST(s->k0); s->nblock_used++; + BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; + } else { + BZ_GET_FAST(s->k0); s->nblock_used++; + } + + } + + RETURN(BZ_OK); + + + + endhdr_2: + + GET_UCHAR(BZ_X_ENDHDR_2, uc); + if (uc != 0x72) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_ENDHDR_3, uc); + if (uc != 0x45) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_ENDHDR_4, uc); + if (uc != 0x38) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_ENDHDR_5, uc); + if (uc != 0x50) RETURN(BZ_DATA_ERROR); + GET_UCHAR(BZ_X_ENDHDR_6, uc); + if (uc != 0x90) RETURN(BZ_DATA_ERROR); + + s->storedCombinedCRC = 0; + GET_UCHAR(BZ_X_CCRC_1, uc); + s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); + GET_UCHAR(BZ_X_CCRC_2, uc); + s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); + GET_UCHAR(BZ_X_CCRC_3, uc); + s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); + GET_UCHAR(BZ_X_CCRC_4, uc); + s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); + + s->state = BZ_X_IDLE; + RETURN(BZ_STREAM_END); + + default: AssertH ( False, 4001 ); + } + + AssertH ( False, 4002 ); + + save_state_and_return: + + s->save_i = i; + s->save_j = j; + s->save_t = t; + s->save_alphaSize = alphaSize; + s->save_nGroups = nGroups; + s->save_nSelectors = nSelectors; + s->save_EOB = EOB; + s->save_groupNo = groupNo; + s->save_groupPos = groupPos; + s->save_nextSym = nextSym; + s->save_nblockMAX = nblockMAX; + s->save_nblock = nblock; + s->save_es = es; + s->save_N = N; + s->save_curr = curr; + s->save_zt = zt; + s->save_zn = zn; + s->save_zvec = zvec; + s->save_zj = zj; + s->save_gSel = gSel; + s->save_gMinlen = gMinlen; + s->save_gLimit = gLimit; + s->save_gBase = gBase; + s->save_gPerm = gPerm; + + return retVal; +} + + +/*-------------------------------------------------------------*/ +/*--- end decompress.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/huffman.c b/tools/lib/bz2/huffman.c new file mode 100644 index 0000000..43a1899 --- /dev/null +++ b/tools/lib/bz2/huffman.c @@ -0,0 +1,205 @@ + +/*-------------------------------------------------------------*/ +/*--- Huffman coding low-level stuff ---*/ +/*--- huffman.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#include "bzlib_private.h" + +/*---------------------------------------------------*/ +#define WEIGHTOF(zz0) ((zz0) & 0xffffff00) +#define DEPTHOF(zz1) ((zz1) & 0x000000ff) +#define MYMAX(zz2,zz3) ((zz2) > (zz3) ? (zz2) : (zz3)) + +#define ADDWEIGHTS(zw1,zw2) \ + (WEIGHTOF(zw1)+WEIGHTOF(zw2)) | \ + (1 + MYMAX(DEPTHOF(zw1),DEPTHOF(zw2))) + +#define UPHEAP(z) \ +{ \ + Int32 zz, tmp; \ + zz = z; tmp = heap[zz]; \ + while (weight[tmp] < weight[heap[zz >> 1]]) { \ + heap[zz] = heap[zz >> 1]; \ + zz >>= 1; \ + } \ + heap[zz] = tmp; \ +} + +#define DOWNHEAP(z) \ +{ \ + Int32 zz, yy, tmp; \ + zz = z; tmp = heap[zz]; \ + while (True) { \ + yy = zz << 1; \ + if (yy > nHeap) break; \ + if (yy < nHeap && \ + weight[heap[yy+1]] < weight[heap[yy]]) \ + yy++; \ + if (weight[tmp] < weight[heap[yy]]) break; \ + heap[zz] = heap[yy]; \ + zz = yy; \ + } \ + heap[zz] = tmp; \ +} + + +/*---------------------------------------------------*/ +void BZ2_hbMakeCodeLengths ( UChar *len, + Int32 *freq, + Int32 alphaSize, + Int32 maxLen ) +{ + /*-- + Nodes and heap entries run from 1. Entry 0 + for both the heap and nodes is a sentinel. + --*/ + Int32 nNodes, nHeap, n1, n2, i, j, k; + Bool tooLong; + + Int32 heap [ BZ_MAX_ALPHA_SIZE + 2 ]; + Int32 weight [ BZ_MAX_ALPHA_SIZE * 2 ]; + Int32 parent [ BZ_MAX_ALPHA_SIZE * 2 ]; + + for (i = 0; i < alphaSize; i++) + weight[i+1] = (freq[i] == 0 ? 1 : freq[i]) << 8; + + while (True) { + + nNodes = alphaSize; + nHeap = 0; + + heap[0] = 0; + weight[0] = 0; + parent[0] = -2; + + for (i = 1; i <= alphaSize; i++) { + parent[i] = -1; + nHeap++; + heap[nHeap] = i; + UPHEAP(nHeap); + } + + AssertH( nHeap < (BZ_MAX_ALPHA_SIZE+2), 2001 ); + + while (nHeap > 1) { + n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP(1); + n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP(1); + nNodes++; + parent[n1] = parent[n2] = nNodes; + weight[nNodes] = ADDWEIGHTS(weight[n1], weight[n2]); + parent[nNodes] = -1; + nHeap++; + heap[nHeap] = nNodes; + UPHEAP(nHeap); + } + + AssertH( nNodes < (BZ_MAX_ALPHA_SIZE * 2), 2002 ); + + tooLong = False; + for (i = 1; i <= alphaSize; i++) { + j = 0; + k = i; + while (parent[k] >= 0) { k = parent[k]; j++; } + len[i-1] = j; + if (j > maxLen) tooLong = True; + } + + if (! tooLong) break; + + /* 17 Oct 04: keep-going condition for the following loop used + to be 'i < alphaSize', which missed the last element, + theoretically leading to the possibility of the compressor + looping. However, this count-scaling step is only needed if + one of the generated Huffman code words is longer than + maxLen, which up to and including version 1.0.2 was 20 bits, + which is extremely unlikely. In version 1.0.3 maxLen was + changed to 17 bits, which has minimal effect on compression + ratio, but does mean this scaling step is used from time to + time, enough to verify that it works. + + This means that bzip2-1.0.3 and later will only produce + Huffman codes with a maximum length of 17 bits. However, in + order to preserve backwards compatibility with bitstreams + produced by versions pre-1.0.3, the decompressor must still + handle lengths of up to 20. */ + + for (i = 1; i <= alphaSize; i++) { + j = weight[i] >> 8; + j = 1 + (j / 2); + weight[i] = j << 8; + } + } +} + + +/*---------------------------------------------------*/ +void BZ2_hbAssignCodes ( Int32 *code, + UChar *length, + Int32 minLen, + Int32 maxLen, + Int32 alphaSize ) +{ + Int32 n, vec, i; + + vec = 0; + for (n = minLen; n <= maxLen; n++) { + for (i = 0; i < alphaSize; i++) + if (length[i] == n) { code[i] = vec; vec++; }; + vec <<= 1; + } +} + + +/*---------------------------------------------------*/ +void BZ2_hbCreateDecodeTables ( Int32 *limit, + Int32 *base, + Int32 *perm, + UChar *length, + Int32 minLen, + Int32 maxLen, + Int32 alphaSize ) +{ + Int32 pp, i, j, vec; + + pp = 0; + for (i = minLen; i <= maxLen; i++) + for (j = 0; j < alphaSize; j++) + if (length[j] == i) { perm[pp] = j; pp++; }; + + for (i = 0; i < BZ_MAX_CODE_LEN; i++) base[i] = 0; + for (i = 0; i < alphaSize; i++) base[length[i]+1]++; + + for (i = 1; i < BZ_MAX_CODE_LEN; i++) base[i] += base[i-1]; + + for (i = 0; i < BZ_MAX_CODE_LEN; i++) limit[i] = 0; + vec = 0; + + for (i = minLen; i <= maxLen; i++) { + vec += (base[i+1] - base[i]); + limit[i] = vec-1; + vec <<= 1; + } + for (i = minLen + 1; i <= maxLen; i++) + base[i] = ((limit[i-1] + 1) << 1) - base[i]; +} + + +/*-------------------------------------------------------------*/ +/*--- end huffman.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/bz2/randtable.c b/tools/lib/bz2/randtable.c new file mode 100644 index 0000000..bdc6d4a --- /dev/null +++ b/tools/lib/bz2/randtable.c @@ -0,0 +1,84 @@ + +/*-------------------------------------------------------------*/ +/*--- Table for randomising repetitive blocks ---*/ +/*--- randtable.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ + This file is part of bzip2/libbzip2, a program and library for + lossless, block-sorting data compression. + + bzip2/libbzip2 version 1.0.8 of 13 July 2019 + Copyright (C) 1996-2019 Julian Seward + + Please read the WARNING, DISCLAIMER and PATENTS sections in the + README file. + + This program is released under the terms of the license contained + in the file LICENSE. + ------------------------------------------------------------------ */ + + +#include "bzlib_private.h" + + +/*---------------------------------------------*/ +Int32 BZ2_rNums[512] = { + 619, 720, 127, 481, 931, 816, 813, 233, 566, 247, + 985, 724, 205, 454, 863, 491, 741, 242, 949, 214, + 733, 859, 335, 708, 621, 574, 73, 654, 730, 472, + 419, 436, 278, 496, 867, 210, 399, 680, 480, 51, + 878, 465, 811, 169, 869, 675, 611, 697, 867, 561, + 862, 687, 507, 283, 482, 129, 807, 591, 733, 623, + 150, 238, 59, 379, 684, 877, 625, 169, 643, 105, + 170, 607, 520, 932, 727, 476, 693, 425, 174, 647, + 73, 122, 335, 530, 442, 853, 695, 249, 445, 515, + 909, 545, 703, 919, 874, 474, 882, 500, 594, 612, + 641, 801, 220, 162, 819, 984, 589, 513, 495, 799, + 161, 604, 958, 533, 221, 400, 386, 867, 600, 782, + 382, 596, 414, 171, 516, 375, 682, 485, 911, 276, + 98, 553, 163, 354, 666, 933, 424, 341, 533, 870, + 227, 730, 475, 186, 263, 647, 537, 686, 600, 224, + 469, 68, 770, 919, 190, 373, 294, 822, 808, 206, + 184, 943, 795, 384, 383, 461, 404, 758, 839, 887, + 715, 67, 618, 276, 204, 918, 873, 777, 604, 560, + 951, 160, 578, 722, 79, 804, 96, 409, 713, 940, + 652, 934, 970, 447, 318, 353, 859, 672, 112, 785, + 645, 863, 803, 350, 139, 93, 354, 99, 820, 908, + 609, 772, 154, 274, 580, 184, 79, 626, 630, 742, + 653, 282, 762, 623, 680, 81, 927, 626, 789, 125, + 411, 521, 938, 300, 821, 78, 343, 175, 128, 250, + 170, 774, 972, 275, 999, 639, 495, 78, 352, 126, + 857, 956, 358, 619, 580, 124, 737, 594, 701, 612, + 669, 112, 134, 694, 363, 992, 809, 743, 168, 974, + 944, 375, 748, 52, 600, 747, 642, 182, 862, 81, + 344, 805, 988, 739, 511, 655, 814, 334, 249, 515, + 897, 955, 664, 981, 649, 113, 974, 459, 893, 228, + 433, 837, 553, 268, 926, 240, 102, 654, 459, 51, + 686, 754, 806, 760, 493, 403, 415, 394, 687, 700, + 946, 670, 656, 610, 738, 392, 760, 799, 887, 653, + 978, 321, 576, 617, 626, 502, 894, 679, 243, 440, + 680, 879, 194, 572, 640, 724, 926, 56, 204, 700, + 707, 151, 457, 449, 797, 195, 791, 558, 945, 679, + 297, 59, 87, 824, 713, 663, 412, 693, 342, 606, + 134, 108, 571, 364, 631, 212, 174, 643, 304, 329, + 343, 97, 430, 751, 497, 314, 983, 374, 822, 928, + 140, 206, 73, 263, 980, 736, 876, 478, 430, 305, + 170, 514, 364, 692, 829, 82, 855, 953, 676, 246, + 369, 970, 294, 750, 807, 827, 150, 790, 288, 923, + 804, 378, 215, 828, 592, 281, 565, 555, 710, 82, + 896, 831, 547, 261, 524, 462, 293, 465, 502, 56, + 661, 821, 976, 991, 658, 869, 905, 758, 745, 193, + 768, 550, 608, 933, 378, 286, 215, 979, 792, 961, + 61, 688, 793, 644, 986, 403, 106, 366, 905, 644, + 372, 567, 466, 434, 645, 210, 389, 550, 919, 135, + 780, 773, 635, 389, 707, 100, 626, 958, 165, 504, + 920, 176, 193, 713, 857, 265, 203, 50, 668, 108, + 645, 990, 626, 197, 510, 357, 358, 850, 858, 364, + 936, 638 +}; + + +/*-------------------------------------------------------------*/ +/*--- end randtable.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/tools/lib/lz4/lz4.c b/tools/lib/lz4/lz4.c new file mode 100644 index 0000000..a1f02e7 --- /dev/null +++ b/tools/lib/lz4/lz4.c @@ -0,0 +1,2832 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ + +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4_HEAPMODE : + * Select how stateless compression functions like `LZ4_compress_default()` + * allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4_HEAPMODE +# define LZ4_HEAPMODE 0 +#endif + +/* + * LZ4_ACCELERATION_DEFAULT : + * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 + */ +#define LZ4_ACCELERATION_DEFAULT 1 +/* + * LZ4_ACCELERATION_MAX : + * Any "acceleration" value higher than this threshold + * get treated as LZ4_ACCELERATION_MAX instead (fix #876) + */ +#define LZ4_ACCELERATION_MAX 65537 + + +/*-************************************ +* CPU Feature Detection +**************************************/ +/* LZ4_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets which assembly generation depends on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ +# if defined(__GNUC__) && \ + ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ + || (defined(__riscv) && defined(__riscv_zicclsm)) ) +# define LZ4_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER) +# define LZ4_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/* + * LZ4_FORCE_SW_BITCOUNT + * Define this parameter if your target system or compiler does not support hardware bit count + */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ +# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */ +# define LZ4_FORCE_SW_BITCOUNT +#endif + + + +/*-************************************ +* Dependency +**************************************/ +/* + * LZ4_SRC_INCLUDED: + * Amalgamation flag, whether lz4.c is included + */ +#ifndef LZ4_SRC_INCLUDED +# define LZ4_SRC_INCLUDED 1 +#endif + +#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ +#endif + +#ifndef LZ4_STATIC_LINKING_ONLY +# define LZ4_STATIC_LINKING_ONLY +#endif +#include "lz4.h" +/* see also "memory routines" below */ + + +/*-************************************ +* Compiler Options +**************************************/ +#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */ +# include /* only present in VS2005+ */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */ +# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */ +# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */ +# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */ +#endif /* _MSC_VER */ + +#ifndef LZ4_FORCE_INLINE +# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */ +# define LZ4_FORCE_INLINE static __forceinline +# else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# if defined (__GNUC__) || defined (__clang__) +# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define LZ4_FORCE_INLINE static inline +# endif +# else +# define LZ4_FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +# endif /* _MSC_VER */ +#endif /* LZ4_FORCE_INLINE */ + +/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE + * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, + * together with a simple 8-byte copy loop as a fall-back path. + * However, this optimization hurts the decompression speed by >30%, + * because the execution does not go to the optimized loop + * for typical compressible data, and all of the preamble checks + * before going to the fall-back path become useless overhead. + * This optimization happens only with the -O3 flag, and -O2 generates + * a simple 8-byte copy loop. + * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 + * functions are annotated with __attribute__((optimize("O2"))), + * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute + * of LZ4_wildCopy8 does not affect the compression speed. + */ +#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) +# define LZ4_FORCE_O2 __attribute__((optimize("O2"))) +# undef LZ4_FORCE_INLINE +# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline)) +#else +# define LZ4_FORCE_O2 +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#ifndef likely +#define likely(expr) expect((expr) != 0, 1) +#endif +#ifndef unlikely +#define unlikely(expr) expect((expr) != 0, 0) +#endif + +/* Should the alignment test prove unreliable, for some reason, + * it can be disabled by setting LZ4_ALIGN_TEST to 0 */ +#ifndef LZ4_ALIGN_TEST /* can be externally provided */ +# define LZ4_ALIGN_TEST 1 +#endif + + +/*-************************************ +* Memory routines +**************************************/ + +/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION : + * Disable relatively high-level LZ4/HC functions that use dynamic memory + * allocation functions (malloc(), calloc(), free()). + * + * Note that this is a compile-time switch. And since it disables + * public/stable LZ4 v1 API functions, we don't recommend using this + * symbol to generate a library for distribution. + * + * The following public functions are removed when this symbol is defined. + * - lz4 : LZ4_createStream, LZ4_freeStream, + * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated) + * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC, + * LZ4_createHC (deprecated), LZ4_freeHC (deprecated) + * - lz4frame, lz4file : All LZ4F_* functions + */ +#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +# define ALLOC(s) lz4_error_memory_allocation_is_disabled +# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled +# define FREEMEM(p) lz4_error_memory_allocation_is_disabled +#elif defined(LZ4_USER_MEMORY_FUNCTIONS) +/* memory management functions can be customized by user project. + * Below functions must exist somewhere in the Project + * and be available at link time */ +void* LZ4_malloc(size_t s); +void* LZ4_calloc(size_t n, size_t s); +void LZ4_free(void* p); +# define ALLOC(s) LZ4_malloc(s) +# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s) +# define FREEMEM(p) LZ4_free(p) +#else +# include /* malloc, calloc, free */ +# define ALLOC(s) malloc(s) +# define ALLOC_AND_ZERO(s) calloc(1,s) +# define FREEMEM(p) free(p) +#endif + +#if ! LZ4_FREESTANDING +# include /* memset, memcpy */ +#endif +#if !defined(LZ4_memset) +# define LZ4_memset(p,v,s) memset((p),(v),(s)) +#endif +#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s)) + + +/*-************************************ +* Common Constants +**************************************/ +#define MINMATCH 4 + +#define WILDCOPYLENGTH 8 +#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ +#define FASTLOOP_SAFE_DISTANCE 64 +static const int LZ4_minLength = (MFLIMIT+1); + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 +#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ +# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" +#endif + +#define ML_BITS 4 +#define ML_MASK ((1U<=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) +# include + static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ " %i: ", __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + +static int LZ4_isAligned(const void* ptr, size_t alignment) +{ + return ((size_t)ptr & (alignment -1)) == 0; +} + + +/*-************************************ +* Types +**************************************/ +#include +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef unsigned char BYTE; /*uint8_t not necessarily blessed to alias arbitrary type*/ + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef uintptr_t uptrval; +#else +# if UINT_MAX != 4294967295UL +# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4" +# endif + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef size_t uptrval; /* generally true, except OpenVMS-64 */ +#endif + +#if defined(__x86_64__) + typedef U64 reg_t; /* 64-bits in x32 mode */ +#else + typedef size_t reg_t; /* 32-bits in x32 mode */ +#endif + +typedef enum { + notLimited = 0, + limitedOutput = 1, + fillOutput = 2 +} limitedOutput_directive; + + +/*-************************************ +* Reading and writing into memory +**************************************/ + +/** + * LZ4 relies on memcpy with a constant size being inlined. In freestanding + * environments, the compiler can't assume the implementation of memcpy() is + * standard compliant, so it can't apply its specialized memcpy() inlining + * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze + * memcpy() as if it were standard compliant, so it can inline it in freestanding + * environments. This is needed when decompressing the Linux Kernel, for example. + */ +#if !defined(LZ4_memcpy) +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) +# else +# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size) +# endif +#endif + +#if !defined(LZ4_memmove) +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4_memmove __builtin_memmove +# else +# define LZ4_memmove memmove +# endif +#endif + +static unsigned LZ4_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__)) +#elif defined(_MSC_VER) +#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop)) +#endif + +#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) +/* lie to the compiler about data alignment; use with caution */ + +static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; } +static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; } +static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; } + +static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } + +#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16; +LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32; +LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST; + +static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; } + +static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; } +static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; } + +#else /* safe and portable access using memcpy() */ + +static U16 LZ4_read16(const void* memPtr) +{ + U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U32 LZ4_read32(const void* memPtr) +{ + U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +static reg_t LZ4_read_ARCH(const void* memPtr) +{ + reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +static void LZ4_write16(void* memPtr, U16 value) +{ + LZ4_memcpy(memPtr, &value, sizeof(value)); +} + +static void LZ4_write32(void* memPtr, U32 value) +{ + LZ4_memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* LZ4_FORCE_MEMORY_ACCESS */ + + +static U16 LZ4_readLE16(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read16(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)((U16)p[0] | (p[1]<<8)); + } +} + +#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT +static U32 LZ4_readLE32(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read32(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24); + } +} +#endif + +static void LZ4_writeLE16(void* memPtr, U16 value) +{ + if (LZ4_isLittleEndian()) { + LZ4_write16(memPtr, value); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE) value; + p[1] = (BYTE)(value>>8); + } +} + +/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ +LZ4_FORCE_INLINE +void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */ +LZ4_FORCE_INLINE void +LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH + * - there is at least 12 bytes available to write after dstEnd */ +LZ4_FORCE_INLINE void +LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) +{ + BYTE v[8]; + + assert(dstEnd >= dstPtr + MINMATCH); + + switch(offset) { + case 1: + MEM_INIT(v, *srcPtr, 8); + break; + case 2: + LZ4_memcpy(v, srcPtr, 2); + LZ4_memcpy(&v[2], srcPtr, 2); +#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ +# pragma warning(push) +# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ +#endif + LZ4_memcpy(&v[4], v, 4); +#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ +# pragma warning(pop) +#endif + break; + case 4: + LZ4_memcpy(v, srcPtr, 4); + LZ4_memcpy(&v[4], srcPtr, 4); + break; + default: + LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); + return; + } + + LZ4_memcpy(dstPtr, v, 8); + dstPtr += 8; + while (dstPtr < dstEnd) { + LZ4_memcpy(dstPtr, v, 8); + dstPtr += 8; + } +} +#endif + + +/*-************************************ +* Common functions +**************************************/ +static unsigned LZ4_NbCommonBytes (reg_t val) +{ + assert(val != 0); + if (LZ4_isLittleEndian()) { + if (sizeof(val) == 8) { +# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT) +/*-************************************************************************************************* +* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11. +* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics +* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC. +****************************************************************************************************/ +# if defined(__clang__) && (__clang_major__ < 10) + /* Avoid undefined clang-cl intrinsics issue. + * See https://github.com/lz4/lz4/pull/1017 for details. */ + return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3; +# else + /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */ + return (unsigned)_tzcnt_u64(val) >> 3; +# endif +# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64(&r, (U64)val); + return (unsigned)r >> 3; +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctzll((U64)val) >> 3; +# else + const U64 m = 0x0101010101010101ULL; + val ^= val - 1; + return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56); +# endif + } else /* 32 bits */ { +# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)r >> 3; +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctz((U32)val) >> 3; +# else + const U32 m = 0x01010101; + return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; +# endif + } + } else /* Big Endian CPU */ { + if (sizeof(val)==8) { +# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clzll((U64)val) >> 3; +# else +#if 1 + /* this method is probably faster, + * but adds a 128 bytes lookup table */ + static const unsigned char ctz7_tab[128] = { + 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + }; + U64 const mask = 0x0101010101010101ULL; + U64 const t = (((val >> 8) - mask) | val) & mask; + return ctz7_tab[(t * 0x0080402010080402ULL) >> 57]; +#else + /* this method doesn't consume memory space like the previous one, + * but it contains several branches, + * that may end up slowing execution */ + static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. + Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. + Note that this code path is never triggered in 32-bits mode. */ + unsigned r; + if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +#endif +# endif + } else /* 32 bits */ { +# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clz((U32)val) >> 3; +# else + val >>= 8; + val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) | + (val + 0x00FF0000)) >> 24; + return (unsigned)val ^ 3; +# endif + } + } +} + + +#define STEPSIZE sizeof(reg_t) +LZ4_FORCE_INLINE +unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +{ + const BYTE* const pStart = pIn; + + if (likely(pIn < pInLimit-(STEPSIZE-1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { + pIn+=STEPSIZE; pMatch+=STEPSIZE; + } else { + return LZ4_NbCommonBytes(diff); + } } + + while (likely(pIn < pInLimit-(STEPSIZE-1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; } + pIn += LZ4_NbCommonBytes(diff); + return (unsigned)(pIn - pStart); + } + + if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn compression run slower on incompressible data */ + + +/*-************************************ +* Local Structures and types +**************************************/ +typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; + +/** + * This enum distinguishes several different modes of accessing previous + * content in the stream. + * + * - noDict : There is no preceding content. + * - withPrefix64k : Table entries up to ctx->dictSize before the current blob + * blob being compressed are valid and refer to the preceding + * content (of length ctx->dictSize), which is available + * contiguously preceding in memory the content currently + * being compressed. + * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere + * else in memory, starting at ctx->dictionary with length + * ctx->dictSize. + * - usingDictCtx : Everything concerning the preceding content is + * in a separate context, pointed to by ctx->dictCtx. + * ctx->dictionary, ctx->dictSize, and table entries + * in the current context that refer to positions + * preceding the beginning of the current compression are + * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx + * ->dictSize describe the location and size of the preceding + * content, and matches are found by looking in the ctx + * ->dictCtx->hashTable. + */ +typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + + +/*-************************************ +* Local Utils +**************************************/ +int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } +const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } +int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } +int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); } + + +/*-**************************************** +* Internal Definitions, used only in Tests +*******************************************/ +#if defined (__cplusplus) +extern "C" { +#endif + +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); + +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize); +int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, + int compressedSize, int targetOutputSize, int dstCapacity, + const void* dictStart, size_t dictSize); +#if defined (__cplusplus) +} +#endif + +/*-****************************** +* Compression functions +********************************/ +LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType) +{ + if (tableType == byU16) + return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); + else + return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); +} + +LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) +{ + const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; + if (LZ4_isLittleEndian()) { + const U64 prime5bytes = 889523592379ULL; + return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); + } else { + const U64 prime8bytes = 11400714785074694791ULL; + return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); + } +} + +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) +{ + if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); + +#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT + return LZ4_hash4(LZ4_readLE32(p), tableType); +#else + return LZ4_hash4(LZ4_read32(p), tableType); +#endif +} + +LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: { /* illegal! */ assert(0); return; } + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } + } +} + +LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: /* fallthrough */ + case byPtr: { /* illegal! */ assert(0); return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } + case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } + } +} + +/* LZ4_putPosition*() : only used in byPtr mode */ +LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, + void* tableBase, tableType_t const tableType) +{ + const BYTE** const hashTable = (const BYTE**)tableBase; + assert(tableType == byPtr); (void)tableType; + hashTable[h] = p; +} + +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType) +{ + U32 const h = LZ4_hashPosition(p, tableType); + LZ4_putPositionOnHash(p, h, tableBase, tableType); +} + +/* LZ4_getIndexOnHash() : + * Index of match position registered in hash table. + * hash position must be calculated by using base+index, or dictBase+index. + * Assumption 1 : only valid if tableType == byU32 or byU16. + * Assumption 2 : h is presumed valid (within limits of hash table) + */ +LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); + if (tableType == byU32) { + const U32* const hashTable = (const U32*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-2))); + return hashTable[h]; + } + if (tableType == byU16) { + const U16* const hashTable = (const U16*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-1))); + return hashTable[h]; + } + assert(0); return 0; /* forbidden case */ +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + assert(tableType == byPtr); (void)tableType; + { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } +} + +LZ4_FORCE_INLINE const BYTE* +LZ4_getPosition(const BYTE* p, + const void* tableBase, tableType_t tableType) +{ + U32 const h = LZ4_hashPosition(p, tableType); + return LZ4_getPositionOnHash(h, tableBase, tableType); +} + +LZ4_FORCE_INLINE void +LZ4_prepareTable(LZ4_stream_t_internal* const cctx, + const int inputSize, + const tableType_t tableType) { + /* If the table hasn't been used, it's guaranteed to be zeroed out, and is + * therefore safe to use no matter what mode we're in. Otherwise, we figure + * out if it's safe to leave as is or whether it needs to be reset. + */ + if ((tableType_t)cctx->tableType != clearedTable) { + assert(inputSize >= 0); + if ((tableType_t)cctx->tableType != tableType + || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) + || ((tableType == byU32) && cctx->currentOffset > 1 GB) + || tableType == byPtr + || inputSize >= 4 KB) + { + DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", (void*)cctx); + MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); + cctx->currentOffset = 0; + cctx->tableType = (U32)clearedTable; + } else { + DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); + } + } + + /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, + * is faster than compressing without a gap. + * However, compressing with currentOffset == 0 is faster still, + * so we preserve that case. + */ + if (cctx->currentOffset != 0 && tableType == byU32) { + DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); + cctx->currentOffset += 64 KB; + } + + /* Finally, clear history */ + cctx->dictCtx = NULL; + cctx->dictionary = NULL; + cctx->dictSize = 0; +} + +/** LZ4_compress_generic_validated() : + * inlined, to ensure branches are decided at compilation time. + * The following conditions are presumed already validated: + * - source != NULL + * - inputSize > 0 + */ +LZ4_FORCE_INLINE int LZ4_compress_generic_validated( + LZ4_stream_t_internal* const cctx, + const char* const source, + char* const dest, + const int inputSize, + int* inputConsumed, /* only written when outputDirective == fillOutput */ + const int maxOutputSize, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration) +{ + int result; + const BYTE* ip = (const BYTE*)source; + + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*)source - startIndex; + const BYTE* lowLimit; + + const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; + const BYTE* const dictionary = + dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; + const U32 dictSize = + dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = + (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */ + + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ + const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary; + const BYTE* anchor = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; + const BYTE* const matchlimit = iend - LASTLITERALS; + + /* the dictCtx currentOffset is indexed on the start of the dictionary, + * while a dictionary in the current context precedes the currentOffset */ + const BYTE* dictBase = (dictionary == NULL) ? NULL : + (dictDirective == usingDictCtx) ? + dictionary + dictSize - dictCtx->currentOffset : + dictionary + dictSize - startIndex; + + BYTE* op = (BYTE*) dest; + BYTE* const olimit = op + maxOutputSize; + + U32 offset = 0; + U32 forwardH; + + DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); + assert(ip != NULL); + if (tableType == byU16) assert(inputSize= 1); + + lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); + + /* Update context state */ + if (dictDirective == usingDictCtx) { + /* Subsequent linked blocks can't use the dictionary. */ + /* Instead, they use the block we just compressed. */ + cctx->dictCtx = NULL; + cctx->dictSize = (U32)inputSize; + } else { + cctx->dictSize += (U32)inputSize; + } + cctx->currentOffset += (U32)inputSize; + cctx->tableType = (U32)tableType; + + if (inputSizehashTable, byPtr); + } else { + LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType); + } } + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + const BYTE* match; + BYTE* token; + const BYTE* filledIp; + + /* Find a match */ + if (tableType == byPtr) { + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType); + + } while ( (match+LZ4_DISTANCE_MAX < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; /* make dictCtx index comparable with current context */ + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else if (dictDirective == usingExtDict) { + if (matchIndex < startIndex) { + DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); + assert(startIndex - matchIndex >= MINMATCH); + assert(dictBase); + match = dictBase + matchIndex; + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else { /* single continuous memory segment */ + match = base + matchIndex; + } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ + assert(matchIndex < current); + if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) + && (matchIndex+LZ4_DISTANCE_MAX < current)) { + continue; + } /* too far */ + assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ + + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; /* match found */ + } + + } while(1); + } + + /* Catch up */ + filledIp = ip; + assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */ + if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) { + do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1]))); + } + + /* Encode Literals */ + { unsigned const litLength = (unsigned)(ip - anchor); + token = op++; + if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ + (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + if ((outputDirective == fillOutput) && + (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { + op--; + goto _last_literals; + } + if (litLength >= RUN_MASK) { + unsigned len = litLength - RUN_MASK; + *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(litLength< olimit)) { + /* the match was too close to the end, rewind and go to last literals */ + op = token; + goto _last_literals; + } + + /* Encode Offset */ + if (maybe_extMem) { /* static test */ + DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); + assert(offset <= LZ4_DISTANCE_MAX && offset > 0); + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); + assert(ip-match <= LZ4_DISTANCE_MAX); + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } + + /* Encode MatchLength */ + { unsigned matchCode; + + if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) + && (lowLimit==dictionary) /* match within extDict */ ) { + const BYTE* limit = ip + (dictEnd-match); + assert(dictEnd > match); + if (limit > matchlimit) limit = matchlimit; + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); + ip += (size_t)matchCode + MINMATCH; + if (ip==limit) { + unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); + matchCode += more; + ip += more; + } + DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); + } else { + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); + ip += (size_t)matchCode + MINMATCH; + DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); + } + + if ((outputDirective) && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { + if (outputDirective == fillOutput) { + /* Match description too long : reduce it */ + U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; + ip -= matchCode - newMatchCode; + assert(newMatchCode < matchCode); + matchCode = newMatchCode; + if (unlikely(ip <= filledIp)) { + /* We have already filled up to filledIp so if ip ends up less than filledIp + * we have positions in the hash table beyond the current position. This is + * a problem if we reuse the hash table. So we have to remove these positions + * from the hash table. + */ + const BYTE* ptr; + DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); + for (ptr = ip; ptr <= filledIp; ++ptr) { + U32 const h = LZ4_hashPosition(ptr, tableType); + LZ4_clearHash(h, cctx->hashTable, tableType); + } + } + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + while (matchCode >= 4*255) { + op+=4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4*255; + } + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else + *token += (BYTE)(matchCode); + } + /* Ensure we have enough space for the last literals. */ + assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); + + anchor = ip; + + /* Test end of chunk */ + if (ip >= mflimitPlusOne) break; + + /* Fill table */ + { U32 const h = LZ4_hashPosition(ip-2, tableType); + if (tableType == byPtr) { + LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr); + } else { + U32 const idx = (U32)((ip-2) - base); + LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType); + } } + + /* Test next position */ + if (tableType == byPtr) { + + match = LZ4_getPosition(ip, cctx->hashTable, tableType); + LZ4_putPosition(ip, cctx->hashTable, tableType); + if ( (match+LZ4_DISTANCE_MAX >= ip) + && (LZ4_read32(match) == LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + } else { /* byU32, byU16 */ + + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex < current); + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + matchIndex += dictDelta; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + assert(dictBase); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else { /* single memory segment */ + match = base + matchIndex; + } + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + assert(matchIndex < current); + if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) + && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) + && (LZ4_read32(match) == LZ4_read32(ip)) ) { + token=op++; + *token=0; + if (maybe_extMem) offset = current - matchIndex; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", + (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); + goto _next_match; + } + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + + } + +_last_literals: + /* Encode Last Literals */ + { size_t lastRun = (size_t)(iend - anchor); + if ( (outputDirective) && /* Check output buffer overflow */ + (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { + if (outputDirective == fillOutput) { + /* adapt lastRun to fill 'dst' */ + assert(olimit >= op); + lastRun = (size_t)(olimit-op) - 1/*token*/; + lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/ + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun); + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRun< 0); + DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result); + return result; +} + +/** LZ4_compress_generic() : + * inlined, to ensure branches are decided at compilation time; + * takes care of src == (NULL, 0) + * and forward the rest to LZ4_compress_generic_validated */ +LZ4_FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal* const cctx, + const char* const src, + char* const dst, + const int srcSize, + int *inputConsumed, /* only written when outputDirective == fillOutput */ + const int dstCapacity, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration) +{ + DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", + srcSize, dstCapacity); + + if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */ + if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */ + if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */ + DEBUGLOG(5, "Generating an empty block"); + assert(outputDirective == notLimited || dstCapacity >= 1); + assert(dst != NULL); + dst[0] = 0; + if (outputDirective == fillOutput) { + assert (inputConsumed != NULL); + *inputConsumed = 0; + } + return 1; + } + assert(src != NULL); + + return LZ4_compress_generic_validated(cctx, src, dst, srcSize, + inputConsumed, /* only written into if outputDirective == fillOutput */ + dstCapacity, outputDirective, + tableType, dictDirective, dictIssue, acceleration); +} + + +int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; + assert(ctx != NULL); + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + if (maxOutputSize >= LZ4_compressBound(inputSize)) { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + +/** + * LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of + * "correctly initialized"). + */ +int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) +{ + LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse; + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + assert(ctx != NULL); + + if (dstCapacity >= LZ4_compressBound(srcSize)) { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + + +int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration) +{ + int result; +#if (LZ4_HEAPMODE) + LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctxPtr == NULL) return 0; +#else + LZ4_stream_t ctx; + LZ4_stream_t* const ctxPtr = &ctx; +#endif + result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration); + +#if (LZ4_HEAPMODE) + FREEMEM(ctxPtr); +#endif + return result; +} + + +int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1); +} + + +/* Note!: This function leaves the stream in an unclean/broken state! + * It is not safe to subsequently use the same state with a _fastReset() or + * _continue() call without resetting it. */ +static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration) +{ + void* const s = LZ4_initStream(state, sizeof (*state)); + assert(s != NULL); (void)s; + + if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ + return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration); + } else { + if (*srcSizePtr < LZ4_64Klimit) { + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration); + } else { + tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration); + } } +} + +int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration) +{ + int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration); + /* clean the state on exit */ + LZ4_initStream(state, sizeof (LZ4_stream_t)); + return r; +} + + +int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) +{ +#if (LZ4_HEAPMODE) + LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctx == NULL) return 0; +#else + LZ4_stream_t ctxBody; + LZ4_stream_t* const ctx = &ctxBody; +#endif + + int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1); + +#if (LZ4_HEAPMODE) + FREEMEM(ctx); +#endif + return result; +} + + + +/*-****************************** +* Streaming functions +********************************/ + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_stream_t* LZ4_createStream(void) +{ + LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); + LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal)); + DEBUGLOG(4, "LZ4_createStream %p", (void*)lz4s); + if (lz4s == NULL) return NULL; + LZ4_initStream(lz4s, sizeof(*lz4s)); + return lz4s; +} +#endif + +static size_t LZ4_stream_t_alignment(void) +{ +#if LZ4_ALIGN_TEST + typedef struct { char c; LZ4_stream_t t; } t_a; + return sizeof(t_a) - sizeof(LZ4_stream_t); +#else + return 1; /* effectively disabled */ +#endif +} + +LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) +{ + DEBUGLOG(5, "LZ4_initStream"); + if (buffer == NULL) { return NULL; } + if (size < sizeof(LZ4_stream_t)) { return NULL; } + if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL; + MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal)); + return (LZ4_stream_t*)buffer; +} + +/* resetStream is now deprecated, + * prefer initStream() which is more general */ +void LZ4_resetStream (LZ4_stream_t* LZ4_stream) +{ + DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", (void*)LZ4_stream); + MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal)); +} + +void LZ4_resetStream_fast(LZ4_stream_t* ctx) { + LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); +} + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +int LZ4_freeStream (LZ4_stream_t* LZ4_stream) +{ + if (!LZ4_stream) return 0; /* support free on NULL */ + DEBUGLOG(5, "LZ4_freeStream %p", (void*)LZ4_stream); + FREEMEM(LZ4_stream); + return (0); +} +#endif + + +typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e; +#define HASH_UNIT sizeof(reg_t) +int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict, + const char* dictionary, int dictSize, + LoadDict_mode_e _ld) +{ + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + const tableType_t tableType = byU32; + const BYTE* p = (const BYTE*)dictionary; + const BYTE* const dictEnd = p + dictSize; + U32 idx32; + + DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, (void*)dictionary, (void*)LZ4_dict); + + /* It's necessary to reset the context, + * and not just continue it with prepareTable() + * to avoid any risk of generating overflowing matchIndex + * when compressing using this dictionary */ + LZ4_resetStream(LZ4_dict); + + /* We always increment the offset by 64 KB, since, if the dict is longer, + * we truncate it to the last 64k, and if it's shorter, we still want to + * advance by a whole window length so we can provide the guarantee that + * there are only valid offsets in the window, which allows an optimization + * in LZ4_compress_fast_continue() where it uses noDictIssue even when the + * dictionary isn't a full 64k. */ + dict->currentOffset += 64 KB; + + if (dictSize < (int)HASH_UNIT) { + return 0; + } + + if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->tableType = (U32)tableType; + idx32 = dict->currentOffset - dict->dictSize; + + while (p <= dictEnd-HASH_UNIT) { + U32 const h = LZ4_hashPosition(p, tableType); + /* Note: overwriting => favors positions end of dictionary */ + LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); + p+=3; idx32+=3; + } + + if (_ld == _ld_slow) { + /* Fill hash table with additional references, to improve compression capability */ + p = dict->dictionary; + idx32 = dict->currentOffset - dict->dictSize; + while (p <= dictEnd-HASH_UNIT) { + U32 const h = LZ4_hashPosition(p, tableType); + U32 const limit = dict->currentOffset - 64 KB; + if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) { + /* Note: not overwriting => favors positions beginning of dictionary */ + LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); + } + p++; idx32++; + } + } + + return (int)dict->dictSize; +} + +int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast); +} + +int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow); +} + +void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) +{ + const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL : + &(dictionaryStream->internal_donotuse); + + DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", + (void*)workingStream, (void*)dictionaryStream, + dictCtx != NULL ? dictCtx->dictSize : 0); + + if (dictCtx != NULL) { + /* If the current offset is zero, we will never look in the + * external dictionary context, since there is no value a table + * entry can take that indicate a miss. In that case, we need + * to bump the offset to something non-zero. + */ + if (workingStream->internal_donotuse.currentOffset == 0) { + workingStream->internal_donotuse.currentOffset = 64 KB; + } + + /* Don't actually attach an empty dictionary. + */ + if (dictCtx->dictSize == 0) { + dictCtx = NULL; + } + } + workingStream->internal_donotuse.dictCtx = dictCtx; +} + + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) +{ + assert(nextSize >= 0); + if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ + /* rescale hash table */ + U32 const delta = LZ4_dict->currentOffset - 64 KB; + const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + DEBUGLOG(4, "LZ4_renormDictT"); + for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; + else LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 KB; + if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + + +int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, + const char* source, char* dest, + int inputSize, int maxOutputSize, + int acceleration) +{ + const tableType_t tableType = byU32; + LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse; + const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL; + + DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize); + + LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */ + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + + /* invalidate tiny dictionaries */ + if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */ + && (dictEnd != source) /* prefix mode */ + && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */ + && (streamPtr->dictCtx == NULL) /* usingDictCtx */ + ) { + DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, (void*)streamPtr->dictionary); + /* remove dictionary existence from history, to employ faster prefix mode */ + streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)source; + dictEnd = source; + } + + /* Check overlapping input/dictionary space */ + { const char* const sourceEnd = source + inputSize; + if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; + if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == source) { + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); + else + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); + } + + /* external dictionary mode */ + { int result; + if (streamPtr->dictCtx) { + /* We depend here on the fact that dictCtx'es (produced by + * LZ4_loadDict) guarantee that their tables contain no references + * to offsets between dictCtx->currentOffset - 64 KB and + * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe + * to use noDictIssue even when the dict isn't a full 64 KB. + */ + if (inputSize > 4 KB) { + /* For compressing large blobs, it is faster to pay the setup + * cost to copy the dictionary's tables into the active context, + * so that the compression loop is only looking into one table. + */ + LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr)); + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); + } + } else { /* small data <= 4 KB */ + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } + } + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + return result; + } +} + + +/* Hidden debug function, to force-test external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) +{ + LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse; + int result; + + LZ4_renormDictT(streamPtr, srcSize); + + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + } + + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)srcSize; + + return result; +} + + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at its memory location, + * save it into a safer place (char* safeBuffer). + * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable, + * one can therefore call LZ4_compress_fast_continue() right after. + * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + */ +int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) +{ + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + + DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, (void*)safeBuffer); + + if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } + + if (safeBuffer == NULL) assert(dictSize == 0); + if (dictSize > 0) { + const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; + assert(dict->dictionary); + LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize); + } + + dict->dictionary = (const BYTE*)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} + + + +/*-******************************* + * Decompression functions + ********************************/ + +typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; + +#undef MIN +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) + + +/* variant for decompress_unsafe() + * does not know end of input + * presumes input is well formed + * note : will consume at least one byte */ +static size_t read_long_length_no_check(const BYTE** pp) +{ + size_t b, l = 0; + do { b = **pp; (*pp)++; l += b; } while (b==255); + DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1) + return l; +} + +/* core decoder variant for LZ4_decompress_fast*() + * for legacy support only : these entry points are deprecated. + * - Presumes input is correctly formed (no defense vs malformed inputs) + * - Does not know input size (presume input buffer is "large enough") + * - Decompress a full block (only) + * @return : nb of bytes read from input. + * Note : this variant is not optimized for speed, just for maintenance. + * the goal is to remove support of decompress_fast*() variants by v2.0 +**/ +LZ4_FORCE_INLINE int +LZ4_decompress_unsafe_generic( + const BYTE* const istart, + BYTE* const ostart, + int decompressedSize, + + size_t prefixSize, + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note: =0 if dictStart==NULL */ + ) +{ + const BYTE* ip = istart; + BYTE* op = (BYTE*)ostart; + BYTE* const oend = ostart + decompressedSize; + const BYTE* const prefixStart = ostart - prefixSize; + + DEBUGLOG(5, "LZ4_decompress_unsafe_generic"); + if (dictStart == NULL) assert(dictSize == 0); + + while (1) { + /* start new sequence */ + unsigned token = *ip++; + + /* literals */ + { size_t ll = token >> ML_BITS; + if (ll==15) { + /* long literal length */ + ll += read_long_length_no_check(&ip); + } + if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */ + LZ4_memmove(op, ip, ll); /* support in-place decompression */ + op += ll; + ip += ll; + if ((size_t)(oend-op) < MFLIMIT) { + if (op==oend) break; /* end of block */ + DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op); + /* incorrect end of block : + * last match must start at least MFLIMIT==12 bytes before end of output block */ + return -1; + } } + + /* match */ + { size_t ml = token & 15; + size_t const offset = LZ4_readLE16(ip); + ip+=2; + + if (ml==15) { + /* long literal length */ + ml += read_long_length_no_check(&ip); + } + ml += MINMATCH; + + if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */ + + { const BYTE* match = op - offset; + + /* out of range */ + if (offset > (size_t)(op - prefixStart) + dictSize) { + DEBUGLOG(6, "offset out of range"); + return -1; + } + + /* check special case : extDict */ + if (offset > (size_t)(op - prefixStart)) { + /* extDict scenario */ + const BYTE* const dictEnd = dictStart + dictSize; + const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart)); + size_t const extml = (size_t)(dictEnd - extMatch); + if (extml > ml) { + /* match entirely within extDict */ + LZ4_memmove(op, extMatch, ml); + op += ml; + ml = 0; + } else { + /* match split between extDict & prefix */ + LZ4_memmove(op, extMatch, extml); + op += extml; + ml -= extml; + } + match = prefixStart; + } + + /* match copy - slow variant, supporting overlap copy */ + { size_t u; + for (u=0; u= ipmax before start of loop. Returns initial_error if so. + * @error (output) - error code. Must be set to 0 before call. +**/ +typedef size_t Rvl_t; +static const Rvl_t rvl_error = (Rvl_t)(-1); +LZ4_FORCE_INLINE Rvl_t +read_variable_length(const BYTE** ip, const BYTE* ilimit, + int initial_check) +{ + Rvl_t s, length = 0; + assert(ip != NULL); + assert(*ip != NULL); + assert(ilimit != NULL); + if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */ + return rvl_error; + } + s = **ip; + (*ip)++; + length += s; + if (unlikely((*ip) > ilimit)) { /* read limit reached */ + return rvl_error; + } + /* accumulator overflow detection (32-bit mode only) */ + if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + return rvl_error; + } + if (likely(s != 255)) return length; + do { + s = **ip; + (*ip)++; + length += s; + if (unlikely((*ip) > ilimit)) { /* read limit reached */ + return rvl_error; + } + /* accumulator overflow detection (32-bit mode only) */ + if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + return rvl_error; + } + } while (s == 255); + + return length; +} + +/*! LZ4_decompress_generic() : + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, + * in order to remove useless branches during compilation optimization. + */ +LZ4_FORCE_INLINE int +LZ4_decompress_generic( + const char* const src, + char* const dst, + int srcSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ + + earlyEnd_directive partialDecoding, /* full, partial */ + dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) +{ + if ((src == NULL) || (outputSize < 0)) { return -1; } + + { const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + + BYTE* op = (BYTE*) dst; + BYTE* const oend = op + outputSize; + BYTE* cpy; + + const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; + + const int checkOffset = (dictSize < (int)(64 KB)); + + + /* Set up the "end" pointers for the shortcut. */ + const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/; + const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/; + + const BYTE* match; + size_t offset; + unsigned token; + size_t length; + + + DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize); + + /* Special cases */ + assert(lowPrefix <= op); + if (unlikely(outputSize==0)) { + /* Empty output buffer */ + if (partialDecoding) return 0; + return ((srcSize==1) && (*ip==0)) ? 0 : -1; + } + if (unlikely(srcSize==0)) { return -1; } + + /* LZ4_FAST_DEC_LOOP: + * designed for modern OoO performance cpus, + * where copying reliably 32-bytes is preferable to an unpredictable branch. + * note : fast loop may show a regression for some client arm chips. */ +#if LZ4_FAST_DEC_LOOP + if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { + DEBUGLOG(6, "move to safe decode loop"); + goto safe_decode; + } + + /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */ + DEBUGLOG(6, "using fast decode loop"); + while (1) { + /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ + assert(oend - op >= FASTLOOP_SAFE_DISTANCE); + assert(ip < iend); + token = *ip++; + length = token >> ML_BITS; /* literal length */ + DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); + + /* decode literal length */ + if (length == RUN_MASK) { + size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); + if (addl == rvl_error) { + DEBUGLOG(6, "error reading long literal length"); + goto _output_error; + } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + + /* copy literals */ + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } + LZ4_wildCopy32(op, ip, op+length); + ip += length; op += length; + } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) { + /* We don't need to check oend, since we check it once for each loop below */ + DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); + /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */ + LZ4_memcpy(op, ip, 16); + ip += length; op += length; + } else { + goto safe_literal_copy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset); + match = op - offset; + assert(match <= op); /* overflow check */ + + /* get matchlength */ + length = token & ML_MASK; + DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH); + + if (length == ML_MASK) { + size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); + if (addl == rvl_error) { + DEBUGLOG(5, "error reading long match length"); + goto _output_error; + } + length += addl; + length += MINMATCH; + DEBUGLOG(7, " long match length == %u", (unsigned)length); + if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + goto safe_match_copy; + } + } else { + length += MINMATCH; + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length); + goto safe_match_copy; + } + + /* Fastpath check: skip LZ4_wildCopy32 when true */ + if ((dict == withPrefix64k) || (match >= lowPrefix)) { + if (offset >= 8) { + assert(match >= lowPrefix); + assert(match <= op); + assert(op + 18 <= oend); + + LZ4_memcpy(op, match, 8); + LZ4_memcpy(op+8, match+8, 8); + LZ4_memcpy(op+16, match+16, 2); + op += length; + continue; + } } } + + if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) { + DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match); + goto _output_error; + } + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + assert(dictEnd != NULL); + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) { + DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd"); + length = MIN(length, (size_t)(oend-op)); + } else { + DEBUGLOG(6, "end-of-block condition violated") + goto _output_error; + } } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + LZ4_memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) { *op++ = *copyFrom++; } + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + + /* copy match within block */ + cpy = op + length; + + assert((op <= oend) && (oend-op >= 32)); + if (unlikely(offset<16)) { + LZ4_memcpy_using_offset(op, match, cpy, offset); + } else { + LZ4_wildCopy32(op, match, cpy); + } + + op = cpy; /* wildcopy correction */ + } + safe_decode: +#endif + + /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ + DEBUGLOG(6, "using safe decode loop"); + while (1) { + assert(ip < iend); + token = *ip++; + length = token >> ML_BITS; /* literal length */ + DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); + + /* A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough space, + * enter the shortcut and copy 16 bytes on behalf of the literals + * (in the fast mode, only 8 bytes can be safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes in a similar + * manner; but we ensure that there's enough space in the output for + * those 18 bytes earlier, upon entering the shortcut (in other words, + * there is a combined check for both stages). + */ + if ( (length != RUN_MASK) + /* strictly "less than" on input, to re-enter the loop with at least one byte */ + && likely((ip < shortiend) & (op <= shortoend)) ) { + /* Copy the literals */ + LZ4_memcpy(op, ip, 16); + op += length; ip += length; + + /* The second stage: prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. */ + length = token & ML_MASK; /* match length */ + DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4); + offset = LZ4_readLE16(ip); ip += 2; + match = op - offset; + assert(match <= op); /* check overflow */ + + /* Do not deal with overlapping matches. */ + if ( (length != ML_MASK) + && (offset >= 8) + && (dict==withPrefix64k || match >= lowPrefix) ) { + /* Copy the match. */ + LZ4_memcpy(op + 0, match + 0, 8); + LZ4_memcpy(op + 8, match + 8, 8); + LZ4_memcpy(op +16, match +16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ + continue; + } + + /* The second stage didn't work out, but the info is ready. + * Propel it right to the point of match copying. */ + goto _copy_match; + } + + /* decode literal length */ + if (length == RUN_MASK) { + size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + } + +#if LZ4_FAST_DEC_LOOP + safe_literal_copy: +#endif + /* copy literals */ + cpy = op+length; + + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) { + /* We've either hit the input parsing restriction or the output parsing restriction. + * In the normal scenario, decoding a full block, it must be the last sequence, + * otherwise it's an error (invalid input or dimensions). + * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow. + */ + if (partialDecoding) { + /* Since we are partial decoding we may be in this block because of the output parsing + * restriction, which is not valid since the output buffer is allowed to be undersized. + */ + DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end") + DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length); + DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op)); + DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip)); + /* Finishing in the middle of a literals segment, + * due to lack of input. + */ + if (ip+length > iend) { + length = (size_t)(iend-ip); + cpy = op + length; + } + /* Finishing in the middle of a literals segment, + * due to lack of output space. + */ + if (cpy > oend) { + cpy = oend; + assert(op<=oend); + length = (size_t)(oend-op); + } + } else { + /* We must be on the last sequence (or invalid) because of the parsing limitations + * so check that we exactly consume the input and don't overrun the output buffer. + */ + if ((ip+length != iend) || (cpy > oend)) { + DEBUGLOG(5, "should have been last run of literals") + DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", (void*)ip, (int)length, (void*)(ip+length), (void*)iend); + DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", (void*)cpy, (void*)(oend-MFLIMIT)); + DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize); + goto _output_error; + } + } + LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */ + ip += length; + op += length; + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) { + break; + } + } else { + LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */ + ip += length; op = cpy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + + /* get matchlength */ + length = token & ML_MASK; + DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); + + _copy_match: + if (length == ML_MASK) { + size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + +#if LZ4_FAST_DEC_LOOP + safe_match_copy: +#endif + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + assert(dictEnd != NULL); + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) length = MIN(length, (size_t)(oend-op)); + else goto _output_error; /* doesn't respect parsing restriction */ + } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + LZ4_memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + assert(match >= lowPrefix); + + /* copy match within block */ + cpy = op + length; + + /* partialDecoding : may end anywhere within the block */ + assert(op<=oend); + if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + size_t const mlen = MIN(length, (size_t)(oend-op)); + const BYTE* const matchEnd = match + mlen; + BYTE* const copyEnd = op + mlen; + if (matchEnd > op) { /* overlap copy */ + while (op < copyEnd) { *op++ = *match++; } + } else { + LZ4_memcpy(op, match, mlen); + } + op = copyEnd; + if (op == oend) { break; } + continue; + } + + if (unlikely(offset<8)) { + LZ4_write32(op, 0); /* silence msan warning when offset==0 */ + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += inc32table[offset]; + LZ4_memcpy(op+4, match, 4); + match -= dec64table[offset]; + } else { + LZ4_memcpy(op, match, 8); + match += 8; + } + op += 8; + + if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); + if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ + if (op < oCopyLimit) { + LZ4_wildCopy8(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op < cpy) { *op++ = *match++; } + } else { + LZ4_memcpy(op, match, 8); + if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } + } + op = cpy; /* wildcopy correction */ + } + + /* end of decoding */ + DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst)); + return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ + + /* Overflow error detected */ + _output_error: + return (int) (-(((const char*)ip)-src))-1; + } +} + + +/*===== Instantiate the API decoding functions. =====*/ + +LZ4_FORCE_O2 +int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, + decode_full_block, noDict, + (BYTE*)dest, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, + partial_decode, + noDict, (BYTE*)dst, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_fast(const char* source, char* dest, int originalSize) +{ + DEBUGLOG(5, "LZ4_decompress_fast"); + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 0, NULL, 0); +} + +/*===== Instantiate a few more decoding cases, used more than once. =====*/ + +LZ4_FORCE_O2 /* Exported, an obsolete API function. */ +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +/* Another obsolete API function, paired with the previous one. */ +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 64 KB, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, + size_t prefixSize) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize) +{ + DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict"); + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, + int compressedSize, int targetOutputSize, int dstCapacity, + const void* dictStart, size_t dictSize) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 0, (const BYTE*)dictStart, dictSize); +} + +/* The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). + */ +LZ4_FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} + +/*===== streaming decompression functions =====*/ + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_streamDecode_t* LZ4_createStreamDecode(void) +{ + LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal)); + return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); +} + +int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) +{ + if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ + FREEMEM(LZ4_stream); + return 0; +} +#endif + +/*! LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * @return : 1 if OK, 0 if error + */ +int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + lz4sd->prefixSize = (size_t)dictSize; + if (dictSize) { + assert(dictionary != NULL); + lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; + } else { + lz4sd->prefixEnd = (const BYTE*) dictionary; + } + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/*! LZ4_decoderRingBufferSize() : + * when setting a ring buffer for streaming decompression (optional scenario), + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * Note : in a ring buffer scenario, + * blocks are presumed decompressed next to each other. + * When not enough space remains for next block (remainingSize < maxBlockSize), + * decoding resumes from beginning of ring buffer. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +int LZ4_decoderRingBufferSize(int maxBlockSize) +{ + if (maxBlockSize < 0) return 0; + if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; + if (maxBlockSize < 16) maxBlockSize = 16; + return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); +} + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in "streaming" mode. + Previously decoded blocks must still be available at the memory position where they were decoded. + If it's not possible, save the relevant part of decoded data into a safe buffer, + and indicate where it stands using LZ4_setStreamDecode() +*/ +LZ4_FORCE_O2 +int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)result; + lz4sd->prefixEnd += result; + } else { + /* The buffer wraps around, or they're switching to another buffer. */ + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } + + return result; +} + +LZ4_FORCE_O2 int +LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, + const char* source, char* dest, int originalSize) +{ + LZ4_streamDecode_t_internal* const lz4sd = + (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse); + int result; + + DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize); + assert(originalSize >= 0); + + if (lz4sd->prefixSize == 0) { + DEBUGLOG(5, "first invocation : no prefix nor extDict"); + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + DEBUGLOG(5, "continue using existing prefix"); + result = LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)originalSize; + lz4sd->prefixEnd += originalSize; + } else { + DEBUGLOG(5, "prefix becomes extDict"); + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_fast_extDict(source, dest, originalSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } + + return result; +} + + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as "_continue" ones, + the dictionary must be explicitly provided within parameters +*/ + +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); +} + +int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize); +} + +int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) +{ + if (dictSize==0 || dictStart+dictSize == dest) + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + (size_t)dictSize, NULL, 0); + assert(dictSize >= 0); + return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); +} + + +/*=************************************************* +* Obsolete Functions +***************************************************/ +/* obsolete compression functions */ +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_default(source, dest, inputSize, maxOutputSize); +} +int LZ4_compress(const char* src, char* dest, int srcSize) +{ + return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); +} +int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); +} +int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); +} +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); +} +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) +{ + return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); +} + +/* +These decompression functions are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is totally equivalent to LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe +*/ +int LZ4_uncompress (const char* source, char* dest, int outputSize) +{ + return LZ4_decompress_fast(source, dest, outputSize); +} +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) +{ + return LZ4_decompress_safe(source, dest, isize, maxOutputSize); +} + +/* Obsolete Streaming functions */ + +int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); } + +int LZ4_resetStreamState(void* state, char* inputBuffer) +{ + (void)inputBuffer; + LZ4_resetStream((LZ4_stream_t*)state); + return 0; +} + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +void* LZ4_create (char* inputBuffer) +{ + (void)inputBuffer; + return LZ4_createStream(); +} +#endif + +char* LZ4_slideInputBuffer (void* state) +{ + /* avoid const char * -> char * conversion warning */ + return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; +} + +#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/tools/lib/lz4/lz4.h b/tools/lib/lz4/lz4.h new file mode 100644 index 0000000..5b147b2 --- /dev/null +++ b/tools/lib/lz4/lz4.h @@ -0,0 +1,886 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Header File + * Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef LZ4_H_2983827168210 +#define LZ4_H_2983827168210 + +/* --- Dependency --- */ +#include /* size_t */ + + +/** + Introduction + + LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, + scalable with multi-cores CPU. It features an extremely fast decoder, with speed in + multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. + + The LZ4 compression library provides in-memory compression and decompression functions. + It gives full buffer control to user. + Compression can be done in: + - a single step (described as Simple Functions) + - a single step, reusing a context (described in Advanced Functions) + - unbounded multiple steps (described as Streaming compression) + + lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). + Decompressing such a compressed block requires additional metadata. + Exact metadata depends on exact decompression function. + For the typical case of LZ4_decompress_safe(), + metadata includes block's compressed size, and maximum bound of decompressed size. + Each application is free to encode and pass such metadata in whichever way it wants. + + lz4.h only handle blocks, it can not generate Frames. + + Blocks are different from Frames (doc/lz4_Frame_format.md). + Frames bundle both blocks and metadata in a specified manner. + Embedding metadata is required for compressed data to be self-contained and portable. + Frame format is delivered through a companion API, declared in lz4frame.h. + The `lz4` CLI can only manage frames. +*/ + +/*^*************************************************************** +* Export parameters +*****************************************************************/ +/* +* LZ4_DLL_EXPORT : +* Enable exporting of functions when building a Windows DLL +* LZ4LIB_VISIBILITY : +* Control library symbols visibility. +*/ +#ifndef LZ4LIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4LIB_VISIBILITY +# endif +#endif +#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) +# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY +#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) +# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define LZ4LIB_API LZ4LIB_VISIBILITY +#endif + +/*! LZ4_FREESTANDING : + * When this macro is set to 1, it enables "freestanding mode" that is + * suitable for typical freestanding environment which doesn't support + * standard C library. + * + * - LZ4_FREESTANDING is a compile-time switch. + * - It requires the following macros to be defined: + * LZ4_memcpy, LZ4_memmove, LZ4_memset. + * - It only enables LZ4/HC functions which don't use heap. + * All LZ4F_* functions are not supported. + * - See tests/freestanding.c to check its basic setup. + */ +#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1) +# define LZ4_HEAPMODE 0 +# define LZ4HC_HEAPMODE 0 +# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1 +# if !defined(LZ4_memcpy) +# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'." +# endif +# if !defined(LZ4_memset) +# error "LZ4_FREESTANDING requires macro 'LZ4_memset'." +# endif +# if !defined(LZ4_memmove) +# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'." +# endif +#elif ! defined(LZ4_FREESTANDING) +# define LZ4_FREESTANDING 0 +#endif + + +/*------ Version ------*/ +#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ +#define LZ4_VERSION_MINOR 10 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ + +#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) + +#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +#define LZ4_QUOTE(str) #str +#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) +#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */ + +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */ + + +/*-************************************ +* Tuning memory usage +**************************************/ +/*! + * LZ4_MEMORY_USAGE : + * Can be selected at compile time, by setting LZ4_MEMORY_USAGE. + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB) + * Increasing memory usage improves compression ratio, generally at the cost of speed. + * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. + * Default value is 14, for 16KB, which nicely fits into most L1 caches. + */ +#ifndef LZ4_MEMORY_USAGE +# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT +#endif + +/* These are absolute limits, they should not be changed by users */ +#define LZ4_MEMORY_USAGE_MIN 10 +#define LZ4_MEMORY_USAGE_DEFAULT 14 +#define LZ4_MEMORY_USAGE_MAX 20 + +#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN) +# error "LZ4_MEMORY_USAGE is too small !" +#endif + +#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX) +# error "LZ4_MEMORY_USAGE is too large !" +#endif + +/*-************************************ +* Simple Functions +**************************************/ +/*! LZ4_compress_default() : + * Compresses 'srcSize' bytes from buffer 'src' + * into already allocated 'dst' buffer of size 'dstCapacity'. + * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'src' into a more limited 'dst' budget, + * compression stops *immediately*, and the function result is zero. + * In which case, 'dst' content is undefined (invalid). + * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. + * dstCapacity : size of buffer 'dst' (which must be already allocated) + * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) + * or 0 if compression fails + * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). + */ +LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); + +/*! LZ4_decompress_safe() : + * @compressedSize : is the exact complete size of the compressed block. + * @dstCapacity : is the size of destination buffer (which must be already allocated), + * presumed an upper bound of decompressed size. + * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * Note 1 : This function is protected against malicious data packets : + * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, + * even if the compressed block is maliciously modified to order the decoder to do these actions. + * In such case, the decoder stops immediately, and considers the compressed block malformed. + * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. + * The implementation is free to send / store / derive this information in whichever way is most beneficial. + * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. + */ +LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); + + +/*-************************************ +* Advanced Functions +**************************************/ +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) + +/*! LZ4_compressBound() : + Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) + This function is primarily useful for memory allocation purposes (destination buffer size). + Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). + Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) + inputSize : max supported value is LZ4_MAX_INPUT_SIZE + return : maximum output size in a "worst case" scenario + or 0, if input size is incorrect (too large or negative) +*/ +LZ4LIB_API int LZ4_compressBound(int inputSize); + +/*! LZ4_compress_fast() : + Same as LZ4_compress_default(), but allows selection of "acceleration" factor. + The larger the acceleration value, the faster the algorithm, but also the lesser the compression. + It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. + An acceleration value of "1" is the same as regular LZ4_compress_default() + Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). + Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). +*/ +LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + + +/*! LZ4_compress_fast_extState() : + * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. + * Use LZ4_sizeofState() to know how much memory must be allocated, + * and allocate it on 8-bytes boundaries (using `malloc()` typically). + * Then, provide this buffer as `void* state` to compression function. + */ +LZ4LIB_API int LZ4_sizeofState(void); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_compress_destSize() : + * Reverse the logic : compresses as much data as possible from 'src' buffer + * into already allocated buffer 'dst', of size >= 'dstCapacity'. + * This function either compresses the entire 'src' content into 'dst' if it's large enough, + * or fill 'dst' buffer completely with as much data as possible from 'src'. + * note: acceleration parameter is fixed to "default". + * + * *srcSizePtr : in+out parameter. Initially contains size of input. + * Will be modified to indicate how many bytes where read from 'src' to fill 'dst'. + * New value is necessarily <= input value. + * @return : Nb bytes written into 'dst' (necessarily <= dstCapacity) + * or 0 if compression fails. + * + * Note : 'targetDstSize' must be >= 1, because it's the smallest valid lz4 payload. + * + * Note 2:from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+): + * the produced compressed content could, in rare circumstances, + * require to be decompressed into a destination buffer + * larger by at least 1 byte than decompressesSize. + * If an application uses `LZ4_compress_destSize()`, + * it's highly recommended to update liblz4 to v1.9.2 or better. + * If this can't be done or ensured, + * the receiving decompression function should provide + * a dstCapacity which is > decompressedSize, by at least 1 byte. + * See https://github.com/lz4/lz4/issues/859 for details + */ +LZ4LIB_API int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize); + +/*! LZ4_decompress_safe_partial() : + * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', + * into destination buffer 'dst' of size 'dstCapacity'. + * Up to 'targetOutputSize' bytes will be decoded. + * The function stops decoding on reaching this objective. + * This can be useful to boost performance + * whenever only the beginning of a block is required. + * + * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) + * If source stream is detected malformed, function returns a negative result. + * + * Note 1 : @return can be < targetOutputSize, if compressed block contains less data. + * + * Note 2 : targetOutputSize must be <= dstCapacity + * + * Note 3 : this function effectively stops decoding on reaching targetOutputSize, + * so dstCapacity is kind of redundant. + * This is because in older versions of this function, + * decoding operation would still write complete sequences. + * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, + * it could write more bytes, though only up to dstCapacity. + * Some "margin" used to be required for this operation to work properly. + * Thankfully, this is no longer necessary. + * The function nonetheless keeps the same signature, in an effort to preserve API compatibility. + * + * Note 4 : If srcSize is the exact size of the block, + * then targetOutputSize can be any value, + * including larger than the block's decompressed size. + * The function will, at most, generate block's decompressed size. + * + * Note 5 : If srcSize is _larger_ than block's compressed size, + * then targetOutputSize **MUST** be <= block's decompressed size. + * Otherwise, *silent corruption will occur*. + */ +LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); + + +/*-********************************************* +* Streaming Compression Functions +***********************************************/ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ + +/*! + Note about RC_INVOKED + + - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio). + https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros + + - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars) + and reports warning "RC4011: identifier truncated". + + - To eliminate the warning, we surround long preprocessor symbol with + "#if !defined(RC_INVOKED) ... #endif" block that means + "skip this block when rc.exe is trying to read it". +*/ +#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); +LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); +#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ +#endif + +/*! LZ4_resetStream_fast() : v1.9.0+ + * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks + * (e.g., LZ4_compress_fast_continue()). + * + * An LZ4_stream_t must be initialized once before usage. + * This is automatically done when created by LZ4_createStream(). + * However, should the LZ4_stream_t be simply declared on stack (for example), + * it's necessary to initialize it first, using LZ4_initStream(). + * + * After init, start any new stream with LZ4_resetStream_fast(). + * A same LZ4_stream_t can be re-used multiple times consecutively + * and compress multiple streams, + * provided that it starts each new stream with LZ4_resetStream_fast(). + * + * LZ4_resetStream_fast() is much faster than LZ4_initStream(), + * but is not compatible with memory regions containing garbage data. + * + * Note: it's only useful to call LZ4_resetStream_fast() + * in the context of streaming compression. + * The *extState* functions perform their own resets. + * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. + */ +LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); + +/*! LZ4_loadDict() : + * Use this function to reference a static dictionary into LZ4_stream_t. + * The dictionary must remain available during compression. + * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. + * The same dictionary will have to be loaded on decompression side for successful decoding. + * Dictionary are useful for better compression of small data (KB range). + * While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic. + * When in doubt, employ the Zstandard's Dictionary Builder. + * Loading a size of 0 is allowed, and is the same as reset. + * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded) + */ +LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_loadDictSlow() : v1.10.0+ + * Same as LZ4_loadDict(), + * but uses a bit more cpu to reference the dictionary content more thoroughly. + * This is expected to slightly improve compression ratio. + * The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions. + * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded) + */ +LZ4LIB_API int LZ4_loadDictSlow(LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_attach_dictionary() : stable since v1.10.0 + * + * This allows efficient re-use of a static dictionary multiple times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a + * working LZ4_stream_t, this function introduces a no-copy setup mechanism, + * in which the working stream references @dictionaryStream in-place. + * + * Several assumptions are made about the state of @dictionaryStream. + * Currently, only states which have been prepared by LZ4_loadDict() or + * LZ4_loadDictSlow() should be expected to work. + * + * Alternatively, the provided @dictionaryStream may be NULL, + * in which case any existing dictionary stream is unset. + * + * If a dictionary is provided, it replaces any pre-existing stream history. + * The dictionary contents are the only history that can be referenced and + * logically immediately precede the data compressed in the first subsequent + * compression call. + * + * The dictionary will only remain attached to the working stream through the + * first compression call, at the end of which it is cleared. + * @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged + * through the completion of the compression session. + * + * Note: there is no equivalent LZ4_attach_*() method on the decompression side + * because there is no initialization cost, hence no need to share the cost across multiple sessions. + * To decompress LZ4 blocks using dictionary, attached or not, + * just employ the regular LZ4_setStreamDecode() for streaming, + * or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression. + */ +LZ4LIB_API void +LZ4_attach_dictionary(LZ4_stream_t* workingStream, + const LZ4_stream_t* dictionaryStream); + +/*! LZ4_compress_fast_continue() : + * Compress 'src' content using data from previously compressed blocks, for better compression ratio. + * 'dst' buffer must be already allocated. + * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. + * + * @return : size of compressed block + * or 0 if there is an error (typically, cannot fit into 'dst'). + * + * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. + * Each block has precise boundaries. + * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. + * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. + * + * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! + * + * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. + * Make sure that buffers are separated, by at least one byte. + * This construction ensures that each block only depends on previous block. + * + * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. + */ +LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_saveDict() : + * If last 64KB data cannot be guaranteed to remain available at its current memory location, + * save it into a safer place (char* safeBuffer). + * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), + * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. + * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. + */ +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); + + +/*-********************************************** +* Streaming Decompression Functions +* Bufferless synchronous API +************************************************/ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ + +/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : + * creation / destruction of streaming decompression tracking context. + * A tracking context can be re-used multiple times. + */ +#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); +LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); +#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ +#endif + +/*! LZ4_setStreamDecode() : + * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionally be set. Use NULL or size 0 for a reset order. + * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. + * @return : 1 if OK, 0 if error + */ +LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); + +/*! LZ4_decoderRingBufferSize() : v1.8.2+ + * Note : in a ring buffer scenario (optional), + * blocks are presumed decompressed next to each other + * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), + * at which stage it resumes from beginning of ring buffer. + * When setting such a ring buffer for streaming decompression, + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); +#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ + +/*! LZ4_decompress_safe_continue() : + * This decoding function allows decompression of consecutive blocks in "streaming" mode. + * The difference with the usual independent blocks is that + * new blocks are allowed to find references into former blocks. + * A block is an unsplittable entity, and must be presented entirely to the decompression function. + * LZ4_decompress_safe_continue() only accepts one block at a time. + * It's modeled after `LZ4_decompress_safe()` and behaves similarly. + * + * @LZ4_streamDecode : decompression state, tracking the position in memory of past data + * @compressedSize : exact complete size of one compressed block. + * @dstCapacity : size of destination buffer (which must be already allocated), + * must be an upper bound of decompressed size. + * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * + * The last 64KB of previously decoded data *must* remain available and unmodified + * at the memory position where they were previously decoded. + * If less than 64KB of data has been decoded, all the data must be present. + * + * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : + * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). + * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. + * In which case, encoding and decoding buffers do not need to be synchronized. + * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. + * - Synchronized mode : + * Decompression buffer size is _exactly_ the same as compression buffer size, + * and follows exactly same update rule (block boundaries at same positions), + * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), + * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). + * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including small ones ( < 64 KB). + * + * Whenever these conditions are not possible, + * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, + * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. +*/ +LZ4LIB_API int +LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, + const char* src, char* dst, + int srcSize, int dstCapacity); + + +/*! LZ4_decompress_safe_usingDict() : + * Works the same as + * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue() + * However, it's stateless: it doesn't need any LZ4_streamDecode_t state. + * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. + */ +LZ4LIB_API int +LZ4_decompress_safe_usingDict(const char* src, char* dst, + int srcSize, int dstCapacity, + const char* dictStart, int dictSize); + +/*! LZ4_decompress_safe_partial_usingDict() : + * Behaves the same as LZ4_decompress_safe_partial() + * with the added ability to specify a memory segment for past data. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. + */ +LZ4LIB_API int +LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, + int compressedSize, + int targetOutputSize, int maxOutputSize, + const char* dictStart, int dictSize); + +#endif /* LZ4_H_2983827168210 */ + + +/*^************************************* + * !!!!!! STATIC LINKING ONLY !!!!!! + ***************************************/ + +/*-**************************************************************************** + * Experimental section + * + * Symbols declared in this section must be considered unstable. Their + * signatures or semantics may change, or they may be removed altogether in the + * future. They are therefore only safe to depend on when the caller is + * statically linked against the library. + * + * To protect against unsafe usage, not only are the declarations guarded, + * the definitions are hidden by default + * when building LZ4 as a shared/dynamic library. + * + * In order to access these declarations, + * define LZ4_STATIC_LINKING_ONLY in your application + * before including LZ4's headers. + * + * In order to make their implementations accessible dynamically, you must + * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. + ******************************************************************************/ + +#ifdef LZ4_STATIC_LINKING_ONLY + +#ifndef LZ4_STATIC_3504398509 +#define LZ4_STATIC_3504398509 + +#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS +# define LZ4LIB_STATIC_API LZ4LIB_API +#else +# define LZ4LIB_STATIC_API +#endif + + +/*! LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. + * It is only safe to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). + * From a high level, the difference is that + * this function initializes the provided state with a call to something like LZ4_resetStream_fast() + * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). + */ +LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_compress_destSize_extState() : introduced in v1.10.0 + * Same as LZ4_compress_destSize(), but using an externally allocated state. + * Also: exposes @acceleration + */ +int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration); + +/*! In-place compression and decompression + * + * It's possible to have input and output sharing the same buffer, + * for highly constrained memory environments. + * In both cases, it requires input to lay at the end of the buffer, + * and decompression to start at beginning of the buffer. + * Buffer size must feature some margin, hence be larger than final size. + * + * |<------------------------buffer--------------------------------->| + * |<-----------compressed data--------->| + * |<-----------decompressed size------------------>| + * |<----margin---->| + * + * This technique is more useful for decompression, + * since decompressed size is typically larger, + * and margin is short. + * + * In-place decompression will work inside any buffer + * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). + * This presumes that decompressedSize > compressedSize. + * Otherwise, it means compression actually expanded data, + * and it would be more efficient to store such data with a flag indicating it's not compressed. + * This can happen when data is not compressible (already compressed, or encrypted). + * + * For in-place compression, margin is larger, as it must be able to cope with both + * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, + * and data expansion, which can happen when input is not compressible. + * As a consequence, buffer size requirements are much higher, + * and memory savings offered by in-place compression are more limited. + * + * There are ways to limit this cost for compression : + * - Reduce history size, by modifying LZ4_DISTANCE_MAX. + * Note that it is a compile-time constant, so all compressions will apply this limit. + * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, + * so it's a reasonable trick when inputs are known to be small. + * - Require the compressor to deliver a "maximum compressed size". + * This is the `dstCapacity` parameter in `LZ4_compress*()`. + * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, + * in which case, the return code will be 0 (zero). + * The caller must be ready for these cases to happen, + * and typically design a backup scheme to send data uncompressed. + * The combination of both techniques can significantly reduce + * the amount of margin required for in-place compression. + * + * In-place compression can work in any buffer + * which size is >= (maxCompressedSize) + * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. + * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, + * so it's possible to reduce memory requirements by playing with them. + */ + +#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) +#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ + +#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ +# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + +#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ +#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ + +#endif /* LZ4_STATIC_3504398509 */ +#endif /* LZ4_STATIC_LINKING_ONLY */ + + + +#ifndef LZ4_H_98237428734687 +#define LZ4_H_98237428734687 + +/*-************************************************************ + * Private Definitions + ************************************************************** + * Do not use these definitions directly. + * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. + * Accessing members will expose user code to API and/or ABI break in future versions of the library. + **************************************************************/ +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ + +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef int8_t LZ4_i8; + typedef unsigned char LZ4_byte; + typedef uint16_t LZ4_u16; + typedef uint32_t LZ4_u32; +#else + typedef signed char LZ4_i8; + typedef unsigned char LZ4_byte; + typedef unsigned short LZ4_u16; + typedef unsigned int LZ4_u32; +#endif + +/*! LZ4_stream_t : + * Never ever use below internal definitions directly ! + * These definitions are not API/ABI safe, and may change in future versions. + * If you need static allocation, declare or allocate an LZ4_stream_t object. +**/ + +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { + LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]; + const LZ4_byte* dictionary; + const LZ4_stream_t_internal* dictCtx; + LZ4_u32 currentOffset; + LZ4_u32 tableType; + LZ4_u32 dictSize; + /* Implicit padding to ensure structure is aligned */ +}; + +#define LZ4_STREAM_MINSIZE ((1UL << (LZ4_MEMORY_USAGE)) + 32) /* static size, for inter-version compatibility */ +union LZ4_stream_u { + char minStateSize[LZ4_STREAM_MINSIZE]; + LZ4_stream_t_internal internal_donotuse; +}; /* previously typedef'd to LZ4_stream_t */ + + +/*! LZ4_initStream() : v1.9.0+ + * An LZ4_stream_t structure must be initialized at least once. + * This is automatically done when invoking LZ4_createStream(), + * but it's not when the structure is simply declared on stack (for example). + * + * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. + * It can also initialize any arbitrary buffer of sufficient size, + * and will @return a pointer of proper type upon initialization. + * + * Note : initialization fails if size and alignment conditions are not respected. + * In which case, the function will @return NULL. + * Note2: An LZ4_stream_t structure guarantees correct alignment and size. + * Note3: Before v1.9.0, use LZ4_resetStream() instead +**/ +LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* stateBuffer, size_t size); + + +/*! LZ4_streamDecode_t : + * Never ever use below internal definitions directly ! + * These definitions are not API/ABI safe, and may change in future versions. + * If you need static allocation, declare or allocate an LZ4_streamDecode_t object. +**/ +typedef struct { + const LZ4_byte* externalDict; + const LZ4_byte* prefixEnd; + size_t extDictSize; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#define LZ4_STREAMDECODE_MINSIZE 32 +union LZ4_streamDecode_u { + char minStateSize[LZ4_STREAMDECODE_MINSIZE]; + LZ4_streamDecode_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_streamDecode_t */ + + + +/*-************************************ +* Obsolete Functions +**************************************/ + +/*! Deprecation warnings + * + * Deprecated functions make the compiler generate a warning when invoked. + * This is meant to invite users to update their source code. + * Should deprecation warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc + * or _CRT_SECURE_NO_WARNINGS in Visual. + * + * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS + * before including the header file. + */ +#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define LZ4_DEPRECATED(message) [[deprecated(message)]] +# elif defined(_MSC_VER) +# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) +# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45)) +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31) +# define LZ4_DEPRECATED(message) __attribute__((deprecated)) +# else +# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler") +# define LZ4_DEPRECATED(message) /* disabled */ +# endif +#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ + +/*! Obsolete compression functions (since v1.7.3) */ +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/*! Obsolete decompression functions (since v1.8.0) */ +LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); + +/* Obsolete streaming functions (since v1.7.0) + * degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, they don't + * actually retain any history between compression calls. The compression ratio + * achieved will therefore be no better than compressing each chunk + * independently. + */ +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); +LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); + +/*! Obsolete streaming decoding functions (since v1.7.0) */ +LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); + +/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) : + * These functions used to be faster than LZ4_decompress_safe(), + * but this is no longer the case. They are now slower. + * This is because LZ4_decompress_fast() doesn't know the input size, + * and therefore must progress more cautiously into the input buffer to not read beyond the end of block. + * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. + * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. + * + * The last remaining LZ4_decompress_fast() specificity is that + * it can decompress a block without knowing its compressed size. + * Such functionality can be achieved in a more secure manner + * by employing LZ4_decompress_safe_partial(). + * + * Parameters: + * originalSize : is the uncompressed size to regenerate. + * `dst` must be already allocated, its size must be >= 'originalSize' bytes. + * @return : number of bytes read from source buffer (== compressed size). + * The function expects to finish at block's end exactly. + * If the source stream is detected malformed, the function stops decoding and returns a negative result. + * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. + * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. + * Also, since match offsets are not validated, match reads from 'src' may underflow too. + * These issues never happen if input (compressed) data is correct. + * But they may happen if input data is invalid (error or intentional tampering). + * As a consequence, use these functions in trusted environments with trusted data **only**. + */ +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial() instead") +LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider migrating towards LZ4_decompress_safe_continue() instead. " + "Note that the contract will change (requires block's compressed size, instead of decompressed size)") +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial_usingDict() instead") +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); + +/*! LZ4_resetStream() : + * An LZ4_stream_t structure must be initialized at least once. + * This is done with LZ4_initStream(), or LZ4_resetStream(). + * Consider switching to LZ4_initStream(), + * invoking LZ4_resetStream() will trigger deprecation warnings in the future. + */ +LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); + + +#endif /* LZ4_H_98237428734687 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/tools/lib/lz4/lz4file.c b/tools/lib/lz4/lz4file.c new file mode 100644 index 0000000..e7f8879 --- /dev/null +++ b/tools/lib/lz4/lz4file.c @@ -0,0 +1,362 @@ +/* + * LZ4 file library + * Copyright (c) Yann Collet and LZ4 contributors. All rights reserved. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + */ +#include /* malloc, free */ +#include +#include +#include "lz4.h" +#include "lz4file.h" + +/* ===== Error Handling ===== */ +static LZ4F_errorCode_t returnErrorCode(LZ4F_errorCodes code) +{ + return (LZ4F_errorCode_t)-(ptrdiff_t)code; +} + +#undef RETURN_ERROR +#define RETURN_ERROR(e) return returnErrorCode(LZ4F_ERROR_ ## e) + +/* ===== Read API Implementation ===== */ + +struct LZ4_readFile_s { + LZ4F_dctx* dctxPtr; + FILE* fp; + LZ4_byte* srcBuf; + size_t srcBufNext; + size_t srcBufSize; + size_t srcBufMaxSize; +}; + +static void freeReadFileResources(LZ4_readFile_t* lz4fRead) +{ + if (lz4fRead==NULL) return; + LZ4F_freeDecompressionContext(lz4fRead->dctxPtr); + free(lz4fRead->srcBuf); + free(lz4fRead); +} + +static void freeAndNullReadFile(LZ4_readFile_t** statePtr) +{ + assert(statePtr != NULL); + freeReadFileResources(*statePtr); + *statePtr = NULL; +} + +static LZ4F_errorCode_t readAndParseHeader(LZ4_readFile_t* readFile, FILE* fp) +{ + char headerBuf[LZ4F_HEADER_SIZE_MAX]; + LZ4F_frameInfo_t frameInfo; + size_t consumedSize; + + /* Read the header from file */ + const size_t bytesRead = fread(headerBuf, 1, sizeof(headerBuf), fp); + if (bytesRead < LZ4F_HEADER_SIZE_MIN + LZ4F_ENDMARK_SIZE) { + RETURN_ERROR(io_read); + } + + /* Parse frame information */ + consumedSize = bytesRead; + { const LZ4F_errorCode_t result = LZ4F_getFrameInfo(readFile->dctxPtr, &frameInfo, headerBuf, &consumedSize); + if (LZ4F_isError(result)) { + return result; + } } + + /* Determine buffer size based on block size */ + { const size_t blockSize = LZ4F_getBlockSize(frameInfo.blockSizeID); + if (blockSize == 0) { + RETURN_ERROR(maxBlockSize_invalid); + } + readFile->srcBufMaxSize = blockSize; + } + + /* Allocate source buffer */ + assert(readFile->srcBuf == NULL); /* Should be NULL from calloc */ + readFile->srcBuf = (LZ4_byte*)malloc(readFile->srcBufMaxSize); + if (readFile->srcBuf == NULL) { + RETURN_ERROR(allocation_failed); + } + + /* Store remaining header data in buffer */ + readFile->srcBufSize = bytesRead - consumedSize; + if (readFile->srcBufSize > 0) { + memcpy(readFile->srcBuf, headerBuf + consumedSize, readFile->srcBufSize); + } + readFile->srcBufNext = 0; + + return LZ4F_OK_NoError; +} + +LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp) +{ + LZ4_readFile_t* readFile; + + /* Validate parameters */ + if (fp == NULL || lz4fRead == NULL) { + RETURN_ERROR(parameter_null); + } + + /* Allocate read file structure */ + readFile = (LZ4_readFile_t*)calloc(1, sizeof(LZ4_readFile_t)); + if (readFile == NULL) { + RETURN_ERROR(allocation_failed); + } + + readFile->fp = fp; + + /* Initialize decompression context */ + { LZ4F_errorCode_t const result = LZ4F_createDecompressionContext(&readFile->dctxPtr, LZ4F_VERSION); + if (LZ4F_isError(result)) { + freeAndNullReadFile(&readFile); + return result; + } } + + /* Read and parse the header */ + { LZ4F_errorCode_t const result = readAndParseHeader(readFile, fp); + if (LZ4F_isError(result)) { + freeAndNullReadFile(&readFile); + return result; + } } + + *lz4fRead = readFile; + return LZ4F_OK_NoError; +} + +size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size) +{ + LZ4_byte* outPtr = (LZ4_byte*)buf; + size_t totalBytesRead = 0; + + if (lz4fRead == NULL || buf == NULL) + RETURN_ERROR(parameter_null); + + while (totalBytesRead < size) { + size_t srcBytes = lz4fRead->srcBufSize - lz4fRead->srcBufNext; + size_t dstBytes = size - totalBytesRead; + + if (srcBytes == 0) { + size_t const bytesRead = fread(lz4fRead->srcBuf, 1, lz4fRead->srcBufMaxSize, lz4fRead->fp); + if (bytesRead == 0) { + if (ferror(lz4fRead->fp)) { + RETURN_ERROR(io_read); + } + break; /* end of input reached */ + } + /* success: ret > 0*/ + lz4fRead->srcBufSize = bytesRead; + srcBytes = lz4fRead->srcBufSize; + lz4fRead->srcBufNext = 0; + } + + { size_t const decStatus = LZ4F_decompress( + lz4fRead->dctxPtr, + outPtr, &dstBytes, + lz4fRead->srcBuf + lz4fRead->srcBufNext, + &srcBytes, + NULL); + if (LZ4F_isError(decStatus)) { + return decStatus; + } } + + lz4fRead->srcBufNext += srcBytes; + totalBytesRead += dstBytes; + outPtr += dstBytes; + } + + return totalBytesRead; +} + +LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead) +{ + if (lz4fRead == NULL) + RETURN_ERROR(parameter_null); + freeReadFileResources(lz4fRead); + return LZ4F_OK_NoError; +} + +/* ===== write API ===== */ + +struct LZ4_writeFile_s { + LZ4F_cctx* cctxPtr; + FILE* fp; + LZ4_byte* dstBuf; + size_t maxWriteSize; + size_t dstBufMaxSize; + LZ4F_errorCode_t errCode; +}; + +static void freeWriteFileResources(LZ4_writeFile_t* state) +{ + if (state == NULL) return; + LZ4F_freeCompressionContext(state->cctxPtr); + free(state->dstBuf); + free(state); +} + +static void freeAndNullWriteFile(LZ4_writeFile_t** statePtr) +{ + assert(statePtr != NULL); + freeWriteFileResources(*statePtr); + *statePtr = NULL; +} + +static LZ4F_errorCode_t writeHeader(LZ4_writeFile_t* writeFile, + FILE* fp, + const LZ4F_preferences_t* prefsPtr) +{ + LZ4_byte headerBuf[LZ4F_HEADER_SIZE_MAX]; + + /* Generate header */ + LZ4F_errorCode_t const headerSize = LZ4F_compressBegin(writeFile->cctxPtr, + headerBuf, LZ4F_HEADER_SIZE_MAX, prefsPtr); + if (LZ4F_isError(headerSize)) { + return headerSize; + } + + /* Write header to file */ + if (headerSize != fwrite(headerBuf, 1, headerSize, fp)) { + RETURN_ERROR(io_write); + } + + return LZ4F_OK_NoError; +} + +LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr) +{ + LZ4_writeFile_t* writeFile; + size_t blockSize; + + if (fp == NULL || lz4fWrite == NULL) + RETURN_ERROR(parameter_null); + + /* Validate block size */ + { LZ4F_blockSizeID_t const blockSizeID = prefsPtr ? prefsPtr->frameInfo.blockSizeID : LZ4F_default; + blockSize = LZ4F_getBlockSize(blockSizeID); + if (blockSize == 0) { + RETURN_ERROR(maxBlockSize_invalid); + } } + + /* Allocate write file structure */ + writeFile = (LZ4_writeFile_t*)calloc(1, sizeof(LZ4_writeFile_t)); + if (writeFile == NULL) { + RETURN_ERROR(allocation_failed); + } + writeFile->fp = fp; + writeFile->errCode = LZ4F_OK_NoError; + writeFile->maxWriteSize = blockSize; + + /* Calculate and allocate destination buffer */ + writeFile->dstBufMaxSize = LZ4F_compressBound(0, prefsPtr); + writeFile->dstBuf = (LZ4_byte*)malloc(writeFile->dstBufMaxSize); + if (writeFile->dstBuf == NULL) { + freeAndNullWriteFile(&writeFile); + RETURN_ERROR(allocation_failed); + } + + /* Initialize compression context */ + { LZ4F_errorCode_t const status = LZ4F_createCompressionContext(&writeFile->cctxPtr, LZ4F_VERSION); + if (LZ4F_isError(status)) { + freeAndNullWriteFile(lz4fWrite); + return status; + } } + + /* Write header to file */ + { LZ4F_errorCode_t const writeStatus = writeHeader(writeFile, fp, prefsPtr); + if (LZ4F_isError(writeStatus)) { + freeAndNullWriteFile(&writeFile); + return writeStatus; + } } + + *lz4fWrite = writeFile; + return LZ4F_OK_NoError; +} + +size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, const void* buf, size_t size) +{ + const LZ4_byte* p = (const LZ4_byte*)buf; + size_t remainingBytes = size; + + /* Validate parameters */ + if (lz4fWrite == NULL || buf == NULL) + RETURN_ERROR(parameter_null); + + while (remainingBytes) { + size_t const chunkSize = (remainingBytes > lz4fWrite->maxWriteSize) ? lz4fWrite->maxWriteSize : remainingBytes; + + /* Compress and write chunk */ + size_t cSize = LZ4F_compressUpdate(lz4fWrite->cctxPtr, + lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize, + p, chunkSize, + NULL); + if (LZ4F_isError(cSize)) { + lz4fWrite->errCode = cSize; + return cSize; + } + + if (cSize != fwrite(lz4fWrite->dstBuf, 1, cSize, lz4fWrite->fp)) { + lz4fWrite->errCode = returnErrorCode(LZ4F_ERROR_io_write); + RETURN_ERROR(io_write); + } + + /* Update positions */ + p += chunkSize; + remainingBytes -= chunkSize; + } + + return size; +} + +LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite) +{ + LZ4F_errorCode_t ret = LZ4F_OK_NoError; + + if (lz4fWrite == NULL) { + RETURN_ERROR(parameter_null); + } + + if (lz4fWrite->errCode == LZ4F_OK_NoError) { + ret = LZ4F_compressEnd(lz4fWrite->cctxPtr, + lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize, + NULL); + if (LZ4F_isError(ret)) { + goto cleanup; + } + + if (ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) { + ret = returnErrorCode(LZ4F_ERROR_io_write); + } + } + +cleanup: + freeWriteFileResources(lz4fWrite); + return ret; +} diff --git a/tools/lib/lz4/lz4file.h b/tools/lib/lz4/lz4file.h new file mode 100644 index 0000000..57c7288 --- /dev/null +++ b/tools/lib/lz4/lz4file.h @@ -0,0 +1,116 @@ +/* + LZ4 file library + Header File + Copyright (c) Yann Collet and LZ4 contributors. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : https://github.com/lz4/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef LZ4FILE_H +#define LZ4FILE_H + +#include /* FILE* */ +#include "lz4frame_static.h" + +typedef struct LZ4_readFile_s LZ4_readFile_t; +typedef struct LZ4_writeFile_s LZ4_writeFile_t; + +/** LZ4 File Decompression **/ + +/** + * Opens an LZ4 file for reading. + * Note that the FILE* handle @p fp must be opened in binary mode. + * + * @param lz4fRead Pointer to receive the read file handle. + * It is an OUT parameter, so its initial value is ignored and will be overwritten. + * Its value on exit is only valid if the function returns LZ4F_OK_NoError. + * @param fp FILE* positioned at start of LZ4 file (binary mode). + * + * @return LZ4F_OK_NoError on success, or error code on failure. + * Can be tested with LZ4F_isError(). + * + * @note Must be closed with LZ4F_readClose() when done. + */ +LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp); + +/*! LZ4F_read() : + * Read lz4file content to buffer. + * `lz4f` must use LZ4_readOpen to set first. + * `buf` read data buffer. + * `size` read data buffer size. + */ +LZ4FLIB_STATIC_API size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size); + +/*! LZ4F_readClose() : + * Close lz4file handle. + * `lz4f` must use LZ4_readOpen to set first. + */ +LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead); + +/** LZ4 File Decompression **/ + +/** + * Opens an LZ4 file for writing. + * Note that the FILE* handle @p fp must be opened in write binary mode. + * + * @param lz4fWrite Pointer to receive the write file handle. + * It is an OUT parameter, so its initial value is ignored and will be overwritten. + * Its value on exit is only valid if the function returns LZ4F_OK_NoError. + * @param fp FILE* positioned at start of LZ4 file (binary mode). + * + * @return LZ4F_OK_NoError on success, or error code on failure. + * Can be tested with LZ4F_isError(). + * + * @note Must be closed with LZ4F_writeClose() when done. + */ +LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr); + +/*! LZ4F_write() : + * Write buffer to lz4file. + * `lz4f` must use LZ4F_writeOpen to set first. + * `buf` write data buffer. + * `size` write data buffer size. + */ +LZ4FLIB_STATIC_API size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, const void* buf, size_t size); + +/*! LZ4F_writeClose() : + * Close lz4file handle. + * `lz4f` must use LZ4F_writeOpen to set first. + */ +LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite); + +#endif /* LZ4FILE_H */ + +#if defined (__cplusplus) +} +#endif diff --git a/tools/lib/lz4/lz4frame.c b/tools/lib/lz4/lz4frame.c new file mode 100644 index 0000000..6dab767 --- /dev/null +++ b/tools/lib/lz4/lz4frame.c @@ -0,0 +1,2165 @@ +/* + * LZ4 auto-framing library + * Copyright (c) Yann Collet. All rights reserved. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + */ + +/* LZ4F is a stand-alone API to create LZ4-compressed Frames + * in full conformance with specification v1.6.1 . + * This library rely upon memory management capabilities (malloc, free) + * provided either by , + * or redirected towards another library of user's choice + * (see Memory Routines below). + */ + + +/*-************************************ +* Compiler Options +**************************************/ +#include +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4F_HEAPMODE : + * Control how LZ4F_compressFrame allocates the Compression State, + * either on stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4F_HEAPMODE +# define LZ4F_HEAPMODE 0 +#endif + + +/*-************************************ +* Library declarations +**************************************/ +#define LZ4F_STATIC_LINKING_ONLY +#include "lz4frame.h" +#define LZ4_STATIC_LINKING_ONLY +#include "lz4.h" +#define LZ4_HC_STATIC_LINKING_ONLY +#include "lz4hc.h" +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + + +/*-************************************ +* Memory routines +**************************************/ +/* + * User may redirect invocations of + * malloc(), calloc() and free() + * towards another library or solution of their choice + * by modifying below section. +**/ + +#include /* memset, memcpy, memmove */ +#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ +# define MEM_INIT(p,v,s) memset((p),(v),(s)) +#endif + +#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ +# include /* malloc, calloc, free */ +# define ALLOC(s) malloc(s) +# define ALLOC_AND_ZERO(s) calloc(1,(s)) +# define FREEMEM(p) free(p) +#endif + +static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem) +{ + /* custom calloc defined : use it */ + if (cmem.customCalloc != NULL) { + return cmem.customCalloc(cmem.opaqueState, s); + } + /* nothing defined : use default 's calloc() */ + if (cmem.customAlloc == NULL) { + return ALLOC_AND_ZERO(s); + } + /* only custom alloc defined : use it, and combine it with memset() */ + { void* const p = cmem.customAlloc(cmem.opaqueState, s); + if (p != NULL) MEM_INIT(p, 0, s); + return p; +} } + +static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem) +{ + /* custom malloc defined : use it */ + if (cmem.customAlloc != NULL) { + return cmem.customAlloc(cmem.opaqueState, s); + } + /* nothing defined : use default 's malloc() */ + return ALLOC(s); +} + +static void LZ4F_free(void* p, LZ4F_CustomMem cmem) +{ + if (p == NULL) return; + if (cmem.customFree != NULL) { + /* custom allocation defined : use it */ + cmem.customFree(cmem.opaqueState, p); + return; + } + /* nothing defined : use default 's free() */ + FREEMEM(p); +} + + +/*-************************************ +* Debug +**************************************/ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG) +# include +static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ " %i: ", __LINE__ ); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + + +/*-************************************ +* Basic Types +**************************************/ +#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; +#endif + + +/* unoptimized version; solves endianness & alignment issues */ +static U32 LZ4F_readLE32 (const void* src) +{ + const BYTE* const srcPtr = (const BYTE*)src; + U32 value32 = srcPtr[0]; + value32 |= ((U32)srcPtr[1])<< 8; + value32 |= ((U32)srcPtr[2])<<16; + value32 |= ((U32)srcPtr[3])<<24; + return value32; +} + +static void LZ4F_writeLE32 (void* dst, U32 value32) +{ + BYTE* const dstPtr = (BYTE*)dst; + dstPtr[0] = (BYTE)value32; + dstPtr[1] = (BYTE)(value32 >> 8); + dstPtr[2] = (BYTE)(value32 >> 16); + dstPtr[3] = (BYTE)(value32 >> 24); +} + +static U64 LZ4F_readLE64 (const void* src) +{ + const BYTE* const srcPtr = (const BYTE*)src; + U64 value64 = srcPtr[0]; + value64 |= ((U64)srcPtr[1]<<8); + value64 |= ((U64)srcPtr[2]<<16); + value64 |= ((U64)srcPtr[3]<<24); + value64 |= ((U64)srcPtr[4]<<32); + value64 |= ((U64)srcPtr[5]<<40); + value64 |= ((U64)srcPtr[6]<<48); + value64 |= ((U64)srcPtr[7]<<56); + return value64; +} + +static void LZ4F_writeLE64 (void* dst, U64 value64) +{ + BYTE* const dstPtr = (BYTE*)dst; + dstPtr[0] = (BYTE)value64; + dstPtr[1] = (BYTE)(value64 >> 8); + dstPtr[2] = (BYTE)(value64 >> 16); + dstPtr[3] = (BYTE)(value64 >> 24); + dstPtr[4] = (BYTE)(value64 >> 32); + dstPtr[5] = (BYTE)(value64 >> 40); + dstPtr[6] = (BYTE)(value64 >> 48); + dstPtr[7] = (BYTE)(value64 >> 56); +} + + +/*-************************************ +* Constants +**************************************/ +#ifndef LZ4_SRC_INCLUDED /* avoid double definition */ +# define KB *(1<<10) +# define MB *(1<<20) +# define GB *(1<<30) +#endif + +#define _1BIT 0x01 +#define _2BITS 0x03 +#define _3BITS 0x07 +#define _4BITS 0x0F +#define _8BITS 0xFF + +#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U +#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB + +static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */ +static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */ +static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */ +static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */ + + +/*-************************************ +* Structures and local types +**************************************/ + +typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e; +typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e; + +typedef struct LZ4F_cctx_s +{ + LZ4F_CustomMem cmem; + LZ4F_preferences_t prefs; + U32 version; + U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */ + const LZ4F_CDict* cdict; + size_t maxBlockSize; + size_t maxBufferSize; + BYTE* tmpBuff; /* internal buffer, for streaming */ + BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */ + size_t tmpInSize; /* amount of data to compress after tmpIn */ + U64 totalInSize; + XXH32_state_t xxh; + void* lz4CtxPtr; + U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ + U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ + LZ4F_BlockCompressMode_e blockCompressMode; +} LZ4F_cctx_t; + + +/*-************************************ +* Error management +**************************************/ +#define LZ4F_GENERATE_STRING(STRING) #STRING, +static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) }; + + +unsigned LZ4F_isError(LZ4F_errorCode_t code) +{ + return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode)); +} + +const char* LZ4F_getErrorName(LZ4F_errorCode_t code) +{ + static const char* codeError = "Unspecified error code"; + if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)]; + return codeError; +} + +LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult) +{ + if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError; + return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult); +} + +static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code) +{ + /* A compilation error here means sizeof(ptrdiff_t) is not large enough */ + LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); + return (LZ4F_errorCode_t)-(ptrdiff_t)code; +} + +#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e) + +#define RETURN_ERROR_IF(c,e) do { \ + if (c) { \ + DEBUGLOG(3, "Error: " #c); \ + RETURN_ERROR(e); \ + } \ + } while (0) + +#define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0) + +unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; } + +int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; } + +size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID) +{ + static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB }; + + if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; + if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB) + RETURN_ERROR(maxBlockSize_invalid); + { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB; + return blockSizes[blockSizeIdx]; +} } + +/*-************************************ +* Private functions +**************************************/ +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) + +static BYTE LZ4F_headerChecksum (const void* header, size_t length) +{ + U32 const xxh = XXH32(header, length, 0); + return (BYTE)(xxh >> 8); +} + + +/*-************************************ +* Simple-pass compression functions +**************************************/ +static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, + const size_t srcSize) +{ + LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB; + size_t maxBlockSize = 64 KB; + while (requestedBSID > proposedBSID) { + if (srcSize <= maxBlockSize) + return proposedBSID; + proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1); + maxBlockSize <<= 2; + } + return requestedBSID; +} + +/*! LZ4F_compressBound_internal() : + * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. + * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario. + * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers. + * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. + */ +static size_t LZ4F_compressBound_internal(size_t srcSize, + const LZ4F_preferences_t* preferencesPtr, + size_t alreadyBuffered) +{ + LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES; + prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */ + prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */ + { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr; + U32 const flush = prefsPtr->autoFlush | (srcSize==0); + LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID; + size_t const blockSize = LZ4F_getBlockSize(blockID); + size_t const maxBuffered = blockSize - 1; + size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered); + size_t const maxSrcSize = srcSize + bufferedSize; + unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize); + size_t const partialBlockSize = maxSrcSize & (blockSize-1); + size_t const lastBlockSize = flush ? partialBlockSize : 0; + unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0); + + size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag; + size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize); + + return ((BHSize + blockCRCSize) * nbBlocks) + + (blockSize * nbFullBlocks) + lastBlockSize + frameEnd; + } +} + +size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) +{ + LZ4F_preferences_t prefs; + size_t const headerSize = maxFHSize; /* max header size, including optional fields */ + + if (preferencesPtr!=NULL) prefs = *preferencesPtr; + else MEM_INIT(&prefs, 0, sizeof(prefs)); + prefs.autoFlush = 1; + + return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);; +} + + +/*! LZ4F_compressFrame_usingCDict() : + * Compress srcBuffer using a dictionary, in a single step. + * cdict can be NULL, in which case, no dictionary is used. + * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, + * however, it's the only way to provide a dictID, so it's not recommended. + * @return : number of bytes written into dstBuffer, + * or an error code if it fails (can be tested using LZ4F_isError()) + */ +size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr) +{ + LZ4F_preferences_t prefs; + LZ4F_compressOptions_t options; + BYTE* const dstStart = (BYTE*) dstBuffer; + BYTE* dstPtr = dstStart; + BYTE* const dstEnd = dstStart + dstCapacity; + + DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize); + if (preferencesPtr!=NULL) + prefs = *preferencesPtr; + else + MEM_INIT(&prefs, 0, sizeof(prefs)); + if (prefs.frameInfo.contentSize != 0) + prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */ + + prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize); + prefs.autoFlush = 1; + if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID)) + prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */ + + MEM_INIT(&options, 0, sizeof(options)); + options.stableSrc = 1; + + RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall); + + { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ + FORWARD_IF_ERROR(headerSize); + dstPtr += headerSize; /* header size */ } + + assert(dstEnd >= dstPtr); + { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options); + FORWARD_IF_ERROR(cSize); + dstPtr += cSize; } + + assert(dstEnd >= dstPtr); + { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */ + FORWARD_IF_ERROR(tailSize); + dstPtr += tailSize; } + + assert(dstEnd >= dstStart); + return (size_t)(dstPtr - dstStart); +} + + +/*! LZ4F_compressFrame() : + * Compress an entire srcBuffer into a valid LZ4 frame, in a single step. + * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. + * @return : number of bytes written into dstBuffer. + * or an error code if it fails (can be tested using LZ4F_isError()) + */ +size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_preferences_t* preferencesPtr) +{ + size_t result; +#if (LZ4F_HEAPMODE) + LZ4F_cctx_t* cctxPtr; + result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION); + FORWARD_IF_ERROR(result); +#else + LZ4F_cctx_t cctx; + LZ4_stream_t lz4ctx; + LZ4F_cctx_t* const cctxPtr = &cctx; + + MEM_INIT(&cctx, 0, sizeof(cctx)); + cctx.version = LZ4F_VERSION; + cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ + if ( preferencesPtr == NULL + || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) { + LZ4_initStream(&lz4ctx, sizeof(lz4ctx)); + cctxPtr->lz4CtxPtr = &lz4ctx; + cctxPtr->lz4CtxAlloc = 1; + cctxPtr->lz4CtxType = ctxFast; + } +#endif + DEBUGLOG(4, "LZ4F_compressFrame"); + + result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity, + srcBuffer, srcSize, + NULL, preferencesPtr); + +#if (LZ4F_HEAPMODE) + LZ4F_freeCompressionContext(cctxPtr); +#else + if ( preferencesPtr != NULL + && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) { + LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); + } +#endif + return result; +} + + +/*-*************************************************** +* Dictionary compression +*****************************************************/ + +struct LZ4F_CDict_s { + LZ4F_CustomMem cmem; + void* dictContent; + LZ4_stream_t* fastCtx; + LZ4_streamHC_t* HCCtx; +}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */ + +LZ4F_CDict* +LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize) +{ + const char* dictStart = (const char*)dictBuffer; + LZ4F_CDict* cdict = NULL; + + DEBUGLOG(4, "LZ4F_createCDict_advanced"); + + if (!dictStart) + return NULL; + cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem); + if (!cdict) + return NULL; + + cdict->cmem = cmem; + if (dictSize > 64 KB) { + dictStart += dictSize - 64 KB; + dictSize = 64 KB; + } + cdict->dictContent = LZ4F_malloc(dictSize, cmem); + /* note: using @cmem to allocate => can't use default create */ + cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem); + cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem); + if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) { + LZ4F_freeCDict(cdict); + return NULL; + } + memcpy(cdict->dictContent, dictStart, dictSize); + LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t)); + LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize); + LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t)); + /* note: we don't know at this point which compression level is going to be used + * as a consequence, HCCtx is created for the more common HC mode */ + LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); + LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize); + return cdict; +} + +/*! LZ4F_createCDict() : + * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. + * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. + * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict + * @return : digested dictionary for compression, or NULL if failed */ +LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) +{ + DEBUGLOG(4, "LZ4F_createCDict"); + return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize); +} + +void LZ4F_freeCDict(LZ4F_CDict* cdict) +{ + if (cdict==NULL) return; /* support free on NULL */ + LZ4F_free(cdict->dictContent, cdict->cmem); + LZ4F_free(cdict->fastCtx, cdict->cmem); + LZ4F_free(cdict->HCCtx, cdict->cmem); + LZ4F_free(cdict, cdict->cmem); +} + + +/*-********************************* +* Advanced compression functions +***********************************/ + +LZ4F_cctx* +LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) +{ + LZ4F_cctx* const cctxPtr = + (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem); + if (cctxPtr==NULL) return NULL; + + cctxPtr->cmem = customMem; + cctxPtr->version = version; + cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */ + + return cctxPtr; +} + +/*! LZ4F_createCompressionContext() : + * The first thing to do is to create a compressionContext object, which will be used in all compression operations. + * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure. + * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries. + * The function will provide a pointer to an allocated LZ4F_compressionContext_t object. + * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation. + * Object can release its memory using LZ4F_freeCompressionContext(); +**/ +LZ4F_errorCode_t +LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version) +{ + assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */ + /* in case it nonetheless happen in production */ + RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null); + + *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version); + RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed); + return LZ4F_OK_NoError; +} + +LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr) +{ + if (cctxPtr != NULL) { /* support free on NULL */ + LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */ + LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); + LZ4F_free(cctxPtr, cctxPtr->cmem); + } + return LZ4F_OK_NoError; +} + + +/** + * This function prepares the internal LZ4(HC) stream for a new compression, + * resetting the context and attaching the dictionary, if there is one. + * + * It needs to be called at the beginning of each independent compression + * stream (i.e., at the beginning of a frame in blockLinked mode, or at the + * beginning of each block in blockIndependent mode). + */ +static void LZ4F_initStream(void* ctx, + const LZ4F_CDict* cdict, + int level, + LZ4F_blockMode_t blockMode) { + if (level < LZ4HC_CLEVEL_MIN) { + if (cdict || blockMode == LZ4F_blockLinked) { + /* In these cases, we will call LZ4_compress_fast_continue(), + * which needs an already reset context. Otherwise, we'll call a + * one-shot API. The non-continued APIs internally perform their own + * resets at the beginning of their calls, where they know what + * tableType they need the context to be in. So in that case this + * would be misguided / wasted work. */ + LZ4_resetStream_fast((LZ4_stream_t*)ctx); + if (cdict) + LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx); + } + /* In these cases, we'll call a one-shot API. + * The non-continued APIs internally perform their own resets + * at the beginning of their calls, where they know + * which tableType they need the context to be in. + * Therefore, a reset here would be wasted work. */ + } else { + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level); + if (cdict) + LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx); + } +} + +static int ctxTypeID_to_size(int ctxTypeID) { + switch(ctxTypeID) { + case 1: + return LZ4_sizeofState(); + case 2: + return LZ4_sizeofStateHC(); + default: + return 0; + } +} + +size_t LZ4F_cctx_size(const LZ4F_cctx* cctx) { + if (cctx == NULL) { + return 0; + } + return sizeof(*cctx) + cctx->maxBufferSize + ctxTypeID_to_size(cctx->lz4CtxAlloc); +} + +/* LZ4F_compressBegin_internal() + * Note: only accepts @cdict _or_ @dictBuffer as non NULL. + */ +size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* dictBuffer, size_t dictSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr) +{ + LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES; + BYTE* const dstStart = (BYTE*)dstBuffer; + BYTE* dstPtr = dstStart; + + RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall); + if (preferencesPtr == NULL) preferencesPtr = &prefNull; + cctx->prefs = *preferencesPtr; + DEBUGLOG(5, "LZ4F_compressBegin_internal: Independent_blocks=%u", cctx->prefs.frameInfo.blockMode); + + /* cctx Management */ + { U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; + int requiredSize = ctxTypeID_to_size(ctxTypeID); + int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc); + if (allocatedSize < requiredSize) { + /* not enough space allocated */ + LZ4F_free(cctx->lz4CtxPtr, cctx->cmem); + if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { + /* must take ownership of memory allocation, + * in order to respect custom allocator contract */ + cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem); + if (cctx->lz4CtxPtr) + LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t)); + } else { + cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem); + if (cctx->lz4CtxPtr) + LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t)); + } + RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed); + cctx->lz4CtxAlloc = ctxTypeID; + cctx->lz4CtxType = ctxTypeID; + } else if (cctx->lz4CtxType != ctxTypeID) { + /* otherwise, a sufficient buffer is already allocated, + * but we need to reset it to the correct context type */ + if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { + LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t)); + } else { + LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t)); + LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel); + } + cctx->lz4CtxType = ctxTypeID; + } } + + /* Buffer Management */ + if (cctx->prefs.frameInfo.blockSizeID == 0) + cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; + cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID); + + { size_t const requiredBuffSize = preferencesPtr->autoFlush ? + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */ + cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0); + + if (cctx->maxBufferSize < requiredBuffSize) { + cctx->maxBufferSize = 0; + LZ4F_free(cctx->tmpBuff, cctx->cmem); + cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem); + RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed); + cctx->maxBufferSize = requiredBuffSize; + } } + cctx->tmpIn = cctx->tmpBuff; + cctx->tmpInSize = 0; + (void)XXH32_reset(&(cctx->xxh), 0); + + /* context init */ + cctx->cdict = cdict; + if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) { + /* frame init only for blockLinked : blockIndependent will be init at each block */ + LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked); + } + if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) { + LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed); + } + if (dictBuffer) { + assert(cdict == NULL); + RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid); + if (cctx->lz4CtxType == ctxFast) { + /* lz4 fast*/ + LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize); + } else { + /* lz4hc */ + assert(cctx->lz4CtxType == ctxHC); + LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize); + } + } + + /* Stage 2 : Write Frame Header */ + + /* Magic Number */ + LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER); + dstPtr += 4; + { BYTE* const headerStart = dstPtr; + + /* FLG Byte */ + *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */ + + ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5) + + ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4) + + ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3) + + ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2) + + (cctx->prefs.frameInfo.dictID > 0) ); + /* BD Byte */ + *dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4); + /* Optional Frame content size field */ + if (cctx->prefs.frameInfo.contentSize) { + LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize); + dstPtr += 8; + cctx->totalInSize = 0; + } + /* Optional dictionary ID field */ + if (cctx->prefs.frameInfo.dictID) { + LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID); + dstPtr += 4; + } + /* Header CRC Byte */ + *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart)); + dstPtr++; + } + + cctx->cStage = 1; /* header written, now request input data block */ + return (size_t)(dstPtr - dstStart); +} + +size_t LZ4F_compressBegin(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_preferences_t* preferencesPtr) +{ + return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity, + NULL, 0, + NULL, preferencesPtr); +} + +/* LZ4F_compressBegin_usingDictOnce: + * Hidden implementation, + * employed for multi-threaded compression + * when frame defines linked blocks */ +size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* dict, size_t dictSize, + const LZ4F_preferences_t* preferencesPtr) +{ + return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity, + dict, dictSize, + NULL, preferencesPtr); +} + +size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* dict, size_t dictSize, + const LZ4F_preferences_t* preferencesPtr) +{ + /* note : incorrect implementation : + * this will only use the dictionary once, + * instead of once *per* block when frames defines independent blocks */ + return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity, + dict, dictSize, + preferencesPtr); +} + +size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr) +{ + return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity, + NULL, 0, + cdict, preferencesPtr); +} + + +/* LZ4F_compressBound() : + * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario. + * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario. + * This function cannot fail. + */ +size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) +{ + if (preferencesPtr && preferencesPtr->autoFlush) { + return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0); + } + return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1); +} + + +typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict); + + +/*! LZ4F_makeBlock(): + * compress a single block, add header and optional checksum. + * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize + */ +static size_t LZ4F_makeBlock(void* dst, + const void* src, size_t srcSize, + compressFunc_t compress, void* lz4ctx, int level, + const LZ4F_CDict* cdict, + LZ4F_blockChecksum_t crcFlag) +{ + BYTE* const cSizePtr = (BYTE*)dst; + int dstCapacity = (srcSize > 1) ? (int)srcSize - 1 : 1; + U32 cSize; + assert(compress != NULL); + cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize), + (int)srcSize, dstCapacity, + level, cdict); + + if (cSize == 0 || cSize >= srcSize) { + cSize = (U32)srcSize; + LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG); + memcpy(cSizePtr+BHSize, src, srcSize); + } else { + LZ4F_writeLE32(cSizePtr, cSize); + } + if (crcFlag) { + U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */ + LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32); + } + return BHSize + cSize + ((U32)crcFlag)*BFSize; +} + + +static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + int const acceleration = (level < 0) ? -level + 1 : 1; + DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize); + LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); + if (cdict) { + return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); + } else { + return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration); + } +} + +static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + int const acceleration = (level < 0) ? -level + 1 : 1; + (void)cdict; /* init once at beginning of frame */ + DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize); + return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); +} + +static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); + if (cdict) { + return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); + } + return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level); +} + +static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + (void)level; (void)cdict; /* init once at beginning of frame */ + return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); +} + +static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict; + return 0; +} + +static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode) +{ + if (compressMode == LZ4B_UNCOMPRESSED) + return LZ4F_doNotCompressBlock; + if (level < LZ4HC_CLEVEL_MIN) { + if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock; + return LZ4F_compressBlock_continue; + } + if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC; + return LZ4F_compressBlockHC_continue; +} + +/* Save or shorten history (up to 64KB) into @tmpBuff */ +static void LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr) +{ + int const dictSize = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? + LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB) : + LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); + cctxPtr->tmpIn = cctxPtr->tmpBuff + dictSize; +} + +typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus; + +static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } }; + + +/*! LZ4F_compressUpdateImpl() : + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * When successful, the function always entirely consumes @srcBuffer. + * src data is either buffered or compressed into @dstBuffer. + * If the block compression does not match the compression of the previous block, the old data is flushed + * and operations continue with the new compression mode. + * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on. + * @compressOptionsPtr is optional : provide NULL to mean "default". + * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. + * or an error code if it fails (which can be tested using LZ4F_isError()) + * After an error, the state is left in a UB state, and must be re-initialized. + */ +static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* compressOptionsPtr, + LZ4F_BlockCompressMode_e blockCompression) + { + size_t const blockSize = cctxPtr->maxBlockSize; + const BYTE* srcPtr = (const BYTE*)srcBuffer; + const BYTE* const srcEnd = srcSize ? (assert(srcPtr!=NULL), srcPtr + srcSize) : srcPtr; + BYTE* const dstStart = (BYTE*)dstBuffer; + BYTE* dstPtr = dstStart; + LZ4F_lastBlockStatus lastBlockCompressed = notDone; + compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression); + size_t bytesWritten; + DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize); + + RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */ + if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) + RETURN_ERROR(dstMaxSize_tooSmall); + + if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize) + RETURN_ERROR(dstMaxSize_tooSmall); + + /* flush currently written block, to continue with new block compression */ + if (cctxPtr->blockCompressMode != blockCompression) { + bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); + dstPtr += bytesWritten; + cctxPtr->blockCompressMode = blockCompression; + } + + if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull; + + /* complete tmp buffer */ + if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */ + size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize; + assert(blockSize > cctxPtr->tmpInSize); + if (sizeToCopy > srcSize) { + /* add src to tmpIn buffer */ + memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize); + srcPtr = srcEnd; + cctxPtr->tmpInSize += srcSize; + /* still needs some CRC */ + } else { + /* complete tmpIn block and then compress it */ + lastBlockCompressed = fromTmpBuffer; + memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy); + srcPtr += sizeToCopy; + + dstPtr += LZ4F_makeBlock(dstPtr, + cctxPtr->tmpIn, blockSize, + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); + if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize; + cctxPtr->tmpInSize = 0; + } } + + while ((size_t)(srcEnd - srcPtr) >= blockSize) { + /* compress full blocks */ + lastBlockCompressed = fromSrcBuffer; + dstPtr += LZ4F_makeBlock(dstPtr, + srcPtr, blockSize, + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); + srcPtr += blockSize; + } + + if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) { + /* autoFlush : remaining input (< blockSize) is compressed */ + lastBlockCompressed = fromSrcBuffer; + dstPtr += LZ4F_makeBlock(dstPtr, + srcPtr, (size_t)(srcEnd - srcPtr), + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); + srcPtr = srcEnd; + } + + /* preserve dictionary within @tmpBuff whenever necessary */ + if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) { + /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */ + assert(blockCompression == LZ4B_COMPRESSED); + if (compressOptionsPtr->stableSrc) { + cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */ + } else { + LZ4F_localSaveDict(cctxPtr); + } + } + + /* keep tmpIn within limits */ + if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */ + && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */ + { + /* only preserve 64KB within internal buffer. Ensures there is enough room for next block. + * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */ + LZ4F_localSaveDict(cctxPtr); + assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)); + } + + /* some input data left, necessarily < blockSize */ + if (srcPtr < srcEnd) { + /* fill tmp buffer */ + size_t const sizeToCopy = (size_t)(srcEnd - srcPtr); + memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy); + cctxPtr->tmpInSize = sizeToCopy; + } + + if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) + (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize); + + cctxPtr->totalInSize += srcSize; + return (size_t)(dstPtr - dstStart); +} + +/*! LZ4F_compressUpdate() : + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * When successful, the function always entirely consumes @srcBuffer. + * src data is either buffered or compressed into @dstBuffer. + * If previously an uncompressed block was written, buffered data is flushed + * before appending compressed data is continued. + * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). + * @compressOptionsPtr is optional : provide NULL to mean "default". + * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. + * or an error code if it fails (which can be tested using LZ4F_isError()) + * After an error, the state is left in a UB state, and must be re-initialized. + */ +size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* compressOptionsPtr) +{ + return LZ4F_compressUpdateImpl(cctxPtr, + dstBuffer, dstCapacity, + srcBuffer, srcSize, + compressOptionsPtr, LZ4B_COMPRESSED); +} + +/*! LZ4F_uncompressedUpdate() : + * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed. + * This symbol is only supported when LZ4F_blockIndependent is used + * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). + * @compressOptionsPtr is optional : provide NULL to mean "default". + * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. + * or an error code if it fails (which can be tested using LZ4F_isError()) + * After an error, the state is left in a UB state, and must be re-initialized. + */ +size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* compressOptionsPtr) +{ + return LZ4F_compressUpdateImpl(cctxPtr, + dstBuffer, dstCapacity, + srcBuffer, srcSize, + compressOptionsPtr, LZ4B_UNCOMPRESSED); +} + + +/*! LZ4F_flush() : + * When compressed data must be sent immediately, without waiting for a block to be filled, + * invoke LZ4F_flush(), which will immediately compress any remaining data stored within LZ4F_cctx. + * The result of the function is the number of bytes written into dstBuffer. + * It can be zero, this means there was no data left within LZ4F_cctx. + * The function outputs an error code if it fails (can be tested using LZ4F_isError()) + * LZ4F_compressOptions_t* is optional. NULL is a valid argument. + */ +size_t LZ4F_flush(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* compressOptionsPtr) +{ + BYTE* const dstStart = (BYTE*)dstBuffer; + BYTE* dstPtr = dstStart; + compressFunc_t compress; + + DEBUGLOG(5, "LZ4F_flush: %zu buffered bytes (saved dict size = %i) (dstCapacity=%u)", + cctxPtr->tmpInSize, (int)(cctxPtr->tmpIn - cctxPtr->tmpBuff), (unsigned)dstCapacity); + if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */ + RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); + RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall); + (void)compressOptionsPtr; /* not useful (yet) */ + + /* select compression function */ + compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode); + + /* compress tmp buffer */ + dstPtr += LZ4F_makeBlock(dstPtr, + cctxPtr->tmpIn, cctxPtr->tmpInSize, + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); + assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity)); + + if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) + cctxPtr->tmpIn += cctxPtr->tmpInSize; + cctxPtr->tmpInSize = 0; + + /* keep tmpIn within limits */ + if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { + assert(cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked); + LZ4F_localSaveDict(cctxPtr); + } + + return (size_t)(dstPtr - dstStart); +} + + +/*! LZ4F_compressEnd() : + * When you want to properly finish the compressed frame, just call LZ4F_compressEnd(). + * It will flush whatever data remained within compressionContext (like LZ4_flush()) + * but also properly finalize the frame, with an endMark and an (optional) checksum. + * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. + * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size)) + * or an error code if it fails (can be tested using LZ4F_isError()) + * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin(). + */ +size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* compressOptionsPtr) +{ + BYTE* const dstStart = (BYTE*)dstBuffer; + BYTE* dstPtr = dstStart; + + size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); + DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity); + FORWARD_IF_ERROR(flushSize); + dstPtr += flushSize; + + assert(flushSize <= dstCapacity); + dstCapacity -= flushSize; + + RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall); + LZ4F_writeLE32(dstPtr, 0); + dstPtr += 4; /* endMark */ + + if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) { + U32 const xxh = XXH32_digest(&(cctxPtr->xxh)); + RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall); + DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh); + LZ4F_writeLE32(dstPtr, xxh); + dstPtr+=4; /* content Checksum */ + } + + cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */ + + if (cctxPtr->prefs.frameInfo.contentSize) { + if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize) + RETURN_ERROR(frameSize_wrong); + } + + return (size_t)(dstPtr - dstStart); +} + + +/*-*************************************************** +* Frame Decompression +*****************************************************/ + +typedef enum { + dstage_getFrameHeader=0, dstage_storeFrameHeader, + dstage_init, + dstage_getBlockHeader, dstage_storeBlockHeader, + dstage_copyDirect, dstage_getBlockChecksum, + dstage_getCBlock, dstage_storeCBlock, + dstage_flushOut, + dstage_getSuffix, dstage_storeSuffix, + dstage_getSFrameSize, dstage_storeSFrameSize, + dstage_skipSkippable +} dStage_t; + +struct LZ4F_dctx_s { + LZ4F_CustomMem cmem; + LZ4F_frameInfo_t frameInfo; + U32 version; + dStage_t dStage; + U64 frameRemainingSize; + size_t maxBlockSize; + size_t maxBufferSize; + BYTE* tmpIn; + size_t tmpInSize; + size_t tmpInTarget; + BYTE* tmpOutBuffer; + const BYTE* dict; + size_t dictSize; + BYTE* tmpOut; + size_t tmpOutSize; + size_t tmpOutStart; + XXH32_state_t xxh; + XXH32_state_t blockChecksum; + int skipChecksum; + BYTE header[LZ4F_HEADER_SIZE_MAX]; +}; /* typedef'd to LZ4F_dctx in lz4frame.h */ + + +LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) +{ + LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem); + if (dctx == NULL) return NULL; + + dctx->cmem = customMem; + dctx->version = version; + return dctx; +} + +/*! LZ4F_createDecompressionContext() : + * Create a decompressionContext object, which will track all decompression operations. + * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object. + * Object can later be released using LZ4F_freeDecompressionContext(). + * @return : if != 0, there was an error during context creation. + */ +LZ4F_errorCode_t +LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) +{ + assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */ + RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */ + + *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber); + if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */ + RETURN_ERROR(allocation_failed); + } + return LZ4F_OK_NoError; +} + +LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx) +{ + LZ4F_errorCode_t result = LZ4F_OK_NoError; + if (dctx != NULL) { /* can accept NULL input, like free() */ + result = (LZ4F_errorCode_t)dctx->dStage; + LZ4F_free(dctx->tmpIn, dctx->cmem); + LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); + LZ4F_free(dctx, dctx->cmem); + } + return result; +} + +size_t LZ4F_dctx_size(const LZ4F_dctx* dctx) { + if (dctx == NULL) { + return 0; + } + return sizeof(*dctx) + + (dctx->tmpIn != NULL ? dctx->maxBlockSize + BFSize : 0) + + (dctx->tmpOutBuffer != NULL ? dctx->maxBufferSize : 0); +} + + +/*==--- Streaming Decompression operations ---==*/ +void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx) +{ + DEBUGLOG(5, "LZ4F_resetDecompressionContext"); + dctx->dStage = dstage_getFrameHeader; + dctx->dict = NULL; + dctx->dictSize = 0; + dctx->skipChecksum = 0; + dctx->frameRemainingSize = 0; +} + + +/*! LZ4F_decodeHeader() : + * input : `src` points at the **beginning of the frame** + * output : set internal values of dctx, such as + * dctx->frameInfo and dctx->dStage. + * Also allocates internal buffers. + * @return : nb Bytes read from src (necessarily <= srcSize) + * or an error code (testable with LZ4F_isError()) + */ +static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize) +{ + unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID; + size_t frameHeaderSize; + const BYTE* srcPtr = (const BYTE*)src; + + DEBUGLOG(5, "LZ4F_decodeHeader"); + /* need to decode header to get frameInfo */ + RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */ + MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); + + /* special case : skippable frames */ + if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) { + dctx->frameInfo.frameType = LZ4F_skippableFrame; + if (src == (void*)(dctx->header)) { + dctx->tmpInSize = srcSize; + dctx->tmpInTarget = 8; + dctx->dStage = dstage_storeSFrameSize; + return srcSize; + } else { + dctx->dStage = dstage_getSFrameSize; + return 4; + } } + + /* control magic number */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) { + DEBUGLOG(4, "frame header error : unknown magic number"); + RETURN_ERROR(frameType_unknown); + } +#endif + dctx->frameInfo.frameType = LZ4F_frame; + + /* Flags */ + { U32 const FLG = srcPtr[4]; + U32 const version = (FLG>>6) & _2BITS; + blockChecksumFlag = (FLG>>4) & _1BIT; + blockMode = (FLG>>5) & _1BIT; + contentSizeFlag = (FLG>>3) & _1BIT; + contentChecksumFlag = (FLG>>2) & _1BIT; + dictIDFlag = FLG & _1BIT; + /* validate */ + if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ + if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */ + } + DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag); + + /* Frame Header Size */ + frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); + + if (srcSize < frameHeaderSize) { + /* not enough input to fully decode frame header */ + if (srcPtr != dctx->header) + memcpy(dctx->header, srcPtr, srcSize); + dctx->tmpInSize = srcSize; + dctx->tmpInTarget = frameHeaderSize; + dctx->dStage = dstage_storeFrameHeader; + return srcSize; + } + + { U32 const BD = srcPtr[5]; + blockSizeID = (BD>>4) & _3BITS; + /* validate */ + if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ + if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */ + if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */ + } + + /* check header */ + assert(frameHeaderSize > 5); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5); + RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid); + } +#endif + + /* save */ + dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode; + dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag; + dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag; + dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID; + dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID); + if (contentSizeFlag) { + dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6); + } + if (dictIDFlag) + dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5); + + dctx->dStage = dstage_init; + + return frameHeaderSize; +} + + +/*! LZ4F_headerSize() : + * @return : size of frame header + * or an error code, which can be tested using LZ4F_isError() + */ +size_t LZ4F_headerSize(const void* src, size_t srcSize) +{ + RETURN_ERROR_IF(src == NULL, srcPtr_wrong); + + /* minimal srcSize to determine header size */ + if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH) + RETURN_ERROR(frameHeader_incomplete); + + /* special case : skippable frames */ + if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) + return 8; + + /* control magic number */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) + RETURN_ERROR(frameType_unknown); +#endif + + /* Frame Header Size */ + { BYTE const FLG = ((const BYTE*)src)[4]; + U32 const contentSizeFlag = (FLG>>3) & _1BIT; + U32 const dictIDFlag = FLG & _1BIT; + return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); + } +} + +/*! LZ4F_getFrameInfo() : + * This function extracts frame parameters (max blockSize, frame checksum, etc.). + * Usage is optional. Objective is to provide relevant information for allocation purposes. + * This function works in 2 situations : + * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process. + * Amount of input data provided must be large enough to successfully decode the frame header. + * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum. + * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx. + * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). + * Decompression must resume from (srcBuffer + *srcSizePtr). + * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call, + * or an error code which can be tested using LZ4F_isError() + * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. + * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. + */ +LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, + LZ4F_frameInfo_t* frameInfoPtr, + const void* srcBuffer, size_t* srcSizePtr) +{ + assert(dctx != NULL); + RETURN_ERROR_IF(frameInfoPtr == NULL, parameter_null); + RETURN_ERROR_IF(srcSizePtr == NULL, parameter_null); + + LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader); + if (dctx->dStage > dstage_storeFrameHeader) { + /* frameInfo already decoded */ + size_t o=0, i=0; + *srcSizePtr = 0; + *frameInfoPtr = dctx->frameInfo; + /* returns : recommended nb of bytes for LZ4F_decompress() */ + return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL); + } else { + if (dctx->dStage == dstage_storeFrameHeader) { + /* frame decoding already started, in the middle of header => automatic fail */ + *srcSizePtr = 0; + RETURN_ERROR(frameDecoding_alreadyStarted); + } else { + size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr); + if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; } + if (*srcSizePtr < hSize) { + *srcSizePtr=0; + RETURN_ERROR(frameHeader_incomplete); + } + + { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize); + if (LZ4F_isError(decodeResult)) { + *srcSizePtr = 0; + } else { + *srcSizePtr = decodeResult; + decodeResult = BHSize; /* block header size */ + } + *frameInfoPtr = dctx->frameInfo; + return decodeResult; + } } } +} + + +/* LZ4F_updateDict() : + * only used for LZ4F_blockLinked mode + * Condition : @dstPtr != NULL + */ +static void LZ4F_updateDict(LZ4F_dctx* dctx, + const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart, + unsigned withinTmp) +{ + assert(dstPtr != NULL); + if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */ + assert(dctx->dict != NULL); + + if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */ + dctx->dictSize += dstSize; + return; + } + + assert(dstPtr >= dstBufferStart); + if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */ + dctx->dict = (const BYTE*)dstBufferStart; + dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize; + return; + } + + assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */ + + /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */ + assert(dctx->tmpOutBuffer != NULL); + + if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */ + /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */ + assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart); + dctx->dictSize += dstSize; + return; + } + + if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */ + size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); + size_t copySize = 64 KB - dctx->tmpOutSize; + const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; + if (dctx->tmpOutSize > 64 KB) copySize = 0; + if (copySize > preserveSize) copySize = preserveSize; + + memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); + + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize; + return; + } + + if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */ + if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */ + size_t const preserveSize = 64 KB - dstSize; + memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); + dctx->dictSize = preserveSize; + } + memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize); + dctx->dictSize += dstSize; + return; + } + + /* join dict & dest into tmp */ + { size_t preserveSize = 64 KB - dstSize; + if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize; + memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); + memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize); + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = preserveSize + dstSize; + } +} + + +/*! LZ4F_decompress() : + * Call this function repetitively to regenerate compressed data in srcBuffer. + * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer + * into dstBuffer of capacity *dstSizePtr. + * + * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). + * + * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). + * If number of bytes read is < number of bytes provided, then decompression operation is not complete. + * Remaining data will have to be presented again in a subsequent invocation. + * + * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress. + * Schematically, it's the size of the current (or remaining) compressed block + header of next block. + * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling. + * Note that this is just a hint, and it's always possible to any srcSize value. + * When a frame is fully decoded, @return will be 0. + * If decompression failed, @return is an error code which can be tested using LZ4F_isError(). + */ +size_t LZ4F_decompress(LZ4F_dctx* dctx, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const LZ4F_decompressOptions_t* decompressOptionsPtr) +{ + LZ4F_decompressOptions_t optionsNull; + const BYTE* const srcStart = (const BYTE*)srcBuffer; + const BYTE* const srcEnd = srcStart + *srcSizePtr; + const BYTE* srcPtr = srcStart; + BYTE* const dstStart = (BYTE*)dstBuffer; + BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL; + BYTE* dstPtr = dstStart; + const BYTE* selectedIn = NULL; + unsigned doAnotherStage = 1; + size_t nextSrcSizeHint = 1; + + + DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)", + srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr); + if (dstBuffer == NULL) assert(*dstSizePtr == 0); + MEM_INIT(&optionsNull, 0, sizeof(optionsNull)); + if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull; + *srcSizePtr = 0; + *dstSizePtr = 0; + assert(dctx != NULL); + dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */ + + /* behaves as a state machine */ + + while (doAnotherStage) { + + switch(dctx->dStage) + { + + case dstage_getFrameHeader: + DEBUGLOG(6, "dstage_getFrameHeader"); + if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */ + size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */ + FORWARD_IF_ERROR(hSize); + srcPtr += hSize; + break; + } + dctx->tmpInSize = 0; + if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */ + dctx->tmpInTarget = minFHSize; /* minimum size to decode header */ + dctx->dStage = dstage_storeFrameHeader; + /* fall-through */ + + case dstage_storeFrameHeader: + DEBUGLOG(6, "dstage_storeFrameHeader"); + { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr)); + memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); + dctx->tmpInSize += sizeToCopy; + srcPtr += sizeToCopy; + } + if (dctx->tmpInSize < dctx->tmpInTarget) { + nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ + doAnotherStage = 0; /* not enough src data, ask for some more */ + break; + } + FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */ + break; + + case dstage_init: + DEBUGLOG(6, "dstage_init"); + if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0); + /* internal buffers allocation */ + { size_t const bufferNeeded = dctx->maxBlockSize + + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0); + if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ + dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ + LZ4F_free(dctx->tmpIn, dctx->cmem); + dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem); + RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed); + LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); + dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem); + RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed); + dctx->maxBufferSize = bufferNeeded; + } } + dctx->tmpInSize = 0; + dctx->tmpInTarget = 0; + dctx->tmpOut = dctx->tmpOutBuffer; + dctx->tmpOutStart = 0; + dctx->tmpOutSize = 0; + + dctx->dStage = dstage_getBlockHeader; + /* fall-through */ + + case dstage_getBlockHeader: + if ((size_t)(srcEnd - srcPtr) >= BHSize) { + selectedIn = srcPtr; + srcPtr += BHSize; + } else { + /* not enough input to read cBlockSize field */ + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeBlockHeader; + } + + if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */ + case dstage_storeBlockHeader: + { size_t const remainingInput = (size_t)(srcEnd - srcPtr); + size_t const wantedData = BHSize - dctx->tmpInSize; + size_t const sizeToCopy = MIN(wantedData, remainingInput); + memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); + srcPtr += sizeToCopy; + dctx->tmpInSize += sizeToCopy; + + if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */ + nextSrcSizeHint = BHSize - dctx->tmpInSize; + doAnotherStage = 0; + break; + } + selectedIn = dctx->tmpIn; + } /* if (dctx->dStage == dstage_storeBlockHeader) */ + + /* decode block header */ + { U32 const blockHeader = LZ4F_readLE32(selectedIn); + size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU; + size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize; + if (blockHeader==0) { /* frameEnd signal, no more block */ + DEBUGLOG(5, "end of frame"); + dctx->dStage = dstage_getSuffix; + break; + } + if (nextCBlockSize > dctx->maxBlockSize) { + RETURN_ERROR(maxBlockSize_invalid); + } + if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) { + /* next block is uncompressed */ + dctx->tmpInTarget = nextCBlockSize; + DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize); + if (dctx->frameInfo.blockChecksumFlag) { + (void)XXH32_reset(&dctx->blockChecksum, 0); + } + dctx->dStage = dstage_copyDirect; + break; + } + /* next block is a compressed block */ + dctx->tmpInTarget = nextCBlockSize + crcSize; + dctx->dStage = dstage_getCBlock; + if (dstPtr==dstEnd || srcPtr==srcEnd) { + nextSrcSizeHint = BHSize + nextCBlockSize + crcSize; + doAnotherStage = 0; + } + break; + } + + case dstage_copyDirect: /* uncompressed block */ + DEBUGLOG(6, "dstage_copyDirect"); + { size_t sizeToCopy; + if (dstPtr == NULL) { + sizeToCopy = 0; + } else { + size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr)); + sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize); + memcpy(dstPtr, srcPtr, sizeToCopy); + if (!dctx->skipChecksum) { + if (dctx->frameInfo.blockChecksumFlag) { + (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy); + } + if (dctx->frameInfo.contentChecksumFlag) + (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy); + } + if (dctx->frameInfo.contentSize) + dctx->frameRemainingSize -= sizeToCopy; + + /* history management (linked blocks only)*/ + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { + LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0); + } + srcPtr += sizeToCopy; + dstPtr += sizeToCopy; + } + if (sizeToCopy == dctx->tmpInTarget) { /* all done */ + if (dctx->frameInfo.blockChecksumFlag) { + dctx->tmpInSize = 0; + dctx->dStage = dstage_getBlockChecksum; + } else + dctx->dStage = dstage_getBlockHeader; /* new block */ + break; + } + dctx->tmpInTarget -= sizeToCopy; /* need to copy more */ + } + nextSrcSizeHint = dctx->tmpInTarget + + +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0) + + BHSize /* next header size */; + doAnotherStage = 0; + break; + + /* check block checksum for recently transferred uncompressed block */ + case dstage_getBlockChecksum: + DEBUGLOG(6, "dstage_getBlockChecksum"); + { const void* crcSrc; + if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) { + crcSrc = srcPtr; + srcPtr += 4; + } else { + size_t const stillToCopy = 4 - dctx->tmpInSize; + size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr)); + memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); + dctx->tmpInSize += sizeToCopy; + srcPtr += sizeToCopy; + if (dctx->tmpInSize < 4) { /* all input consumed */ + doAnotherStage = 0; + break; + } + crcSrc = dctx->header; + } + if (!dctx->skipChecksum) { + U32 const readCRC = LZ4F_readLE32(crcSrc); + U32 const calcCRC = XXH32_digest(&dctx->blockChecksum); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + DEBUGLOG(6, "compare block checksum"); + if (readCRC != calcCRC) { + DEBUGLOG(4, "incorrect block checksum: %08X != %08X", + readCRC, calcCRC); + RETURN_ERROR(blockChecksum_invalid); + } +#else + (void)readCRC; + (void)calcCRC; +#endif + } } + dctx->dStage = dstage_getBlockHeader; /* new block */ + break; + + case dstage_getCBlock: + DEBUGLOG(6, "dstage_getCBlock"); + if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) { + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeCBlock; + break; + } + /* input large enough to read full block directly */ + selectedIn = srcPtr; + srcPtr += dctx->tmpInTarget; + + if (0) /* always jump over next block */ + case dstage_storeCBlock: + { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize; + size_t const inputLeft = (size_t)(srcEnd-srcPtr); + size_t const sizeToCopy = MIN(wantedData, inputLeft); + memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); + dctx->tmpInSize += sizeToCopy; + srcPtr += sizeToCopy; + if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */ + nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0) + + BHSize /* next header size */; + doAnotherStage = 0; + break; + } + selectedIn = dctx->tmpIn; + } + + /* At this stage, input is large enough to decode a block */ + + /* First, decode and control block checksum if it exists */ + if (dctx->frameInfo.blockChecksumFlag) { + assert(dctx->tmpInTarget >= 4); + dctx->tmpInTarget -= 4; + assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */ + { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget); + U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid); +#else + (void)readBlockCrc; + (void)calcBlockCrc; +#endif + } } + + /* decode directly into destination buffer if there is enough room */ + if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) + /* unless the dictionary is stored in tmpOut: + * in which case it's faster to decode within tmpOut + * to benefit from prefix speedup */ + && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) ) + { + const char* dict = (const char*)dctx->dict; + size_t dictSize = dctx->dictSize; + int decodedSize; + assert(dstPtr != NULL); + if (dict && dictSize > 1 GB) { + /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */ + dict += dictSize - 64 KB; + dictSize = 64 KB; + } + decodedSize = LZ4_decompress_safe_usingDict( + (const char*)selectedIn, (char*)dstPtr, + (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, + dict, (int)dictSize); + RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); + if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum)) + XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize); + if (dctx->frameInfo.contentSize) + dctx->frameRemainingSize -= (size_t)decodedSize; + + /* dictionary management */ + if (dctx->frameInfo.blockMode==LZ4F_blockLinked) { + LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0); + } + + dstPtr += decodedSize; + dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */ + break; + } + + /* not enough place into dst : decode into tmpOut */ + + /* manage dictionary */ + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { + if (dctx->dict == dctx->tmpOutBuffer) { + /* truncate dictionary to 64 KB if too big */ + if (dctx->dictSize > 128 KB) { + memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB); + dctx->dictSize = 64 KB; + } + dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize; + } else { /* dict not within tmpOut */ + size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB); + dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace; + } } + + /* Decode block into tmpOut */ + { const char* dict = (const char*)dctx->dict; + size_t dictSize = dctx->dictSize; + int decodedSize; + if (dict && dictSize > 1 GB) { + /* the dictSize param is an int, avoid truncation / sign issues */ + dict += dictSize - 64 KB; + dictSize = 64 KB; + } + decodedSize = LZ4_decompress_safe_usingDict( + (const char*)selectedIn, (char*)dctx->tmpOut, + (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, + dict, (int)dictSize); + RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); + if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum) + XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize); + if (dctx->frameInfo.contentSize) + dctx->frameRemainingSize -= (size_t)decodedSize; + dctx->tmpOutSize = (size_t)decodedSize; + dctx->tmpOutStart = 0; + dctx->dStage = dstage_flushOut; + } + /* fall-through */ + + case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */ + DEBUGLOG(6, "dstage_flushOut"); + if (dstPtr != NULL) { + size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr)); + memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy); + + /* dictionary management */ + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) + LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/); + + dctx->tmpOutStart += sizeToCopy; + dstPtr += sizeToCopy; + } + if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */ + dctx->dStage = dstage_getBlockHeader; /* get next block */ + break; + } + /* could not flush everything : stop there, just request a block header */ + doAnotherStage = 0; + nextSrcSizeHint = BHSize; + break; + + case dstage_getSuffix: + RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */ + if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */ + nextSrcSizeHint = 0; + LZ4F_resetDecompressionContext(dctx); + doAnotherStage = 0; + break; + } + if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeSuffix; + } else { + selectedIn = srcPtr; + srcPtr += 4; + } + + if (dctx->dStage == dstage_storeSuffix) /* can be skipped */ + case dstage_storeSuffix: + { size_t const remainingInput = (size_t)(srcEnd - srcPtr); + size_t const wantedData = 4 - dctx->tmpInSize; + size_t const sizeToCopy = MIN(wantedData, remainingInput); + memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); + srcPtr += sizeToCopy; + dctx->tmpInSize += sizeToCopy; + if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */ + nextSrcSizeHint = 4 - dctx->tmpInSize; + doAnotherStage=0; + break; + } + selectedIn = dctx->tmpIn; + } /* if (dctx->dStage == dstage_storeSuffix) */ + + /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */ + if (!dctx->skipChecksum) { + U32 const readCRC = LZ4F_readLE32(selectedIn); + U32 const resultCRC = XXH32_digest(&(dctx->xxh)); + DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid); +#else + (void)readCRC; + (void)resultCRC; +#endif + } + nextSrcSizeHint = 0; + LZ4F_resetDecompressionContext(dctx); + doAnotherStage = 0; + break; + + case dstage_getSFrameSize: + if ((srcEnd - srcPtr) >= 4) { + selectedIn = srcPtr; + srcPtr += 4; + } else { + /* not enough input to read cBlockSize field */ + dctx->tmpInSize = 4; + dctx->tmpInTarget = 8; + dctx->dStage = dstage_storeSFrameSize; + } + + if (dctx->dStage == dstage_storeSFrameSize) + case dstage_storeSFrameSize: + { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, + (size_t)(srcEnd - srcPtr) ); + memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); + srcPtr += sizeToCopy; + dctx->tmpInSize += sizeToCopy; + if (dctx->tmpInSize < dctx->tmpInTarget) { + /* not enough input to get full sBlockSize; wait for more */ + nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize; + doAnotherStage = 0; + break; + } + selectedIn = dctx->header + 4; + } /* if (dctx->dStage == dstage_storeSFrameSize) */ + + /* case dstage_decodeSFrameSize: */ /* no direct entry */ + { size_t const SFrameSize = LZ4F_readLE32(selectedIn); + dctx->frameInfo.contentSize = SFrameSize; + dctx->tmpInTarget = SFrameSize; + dctx->dStage = dstage_skipSkippable; + break; + } + + case dstage_skipSkippable: + { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr)); + srcPtr += skipSize; + dctx->tmpInTarget -= skipSize; + doAnotherStage = 0; + nextSrcSizeHint = dctx->tmpInTarget; + if (nextSrcSizeHint) break; /* still more to skip */ + /* frame fully skipped : prepare context for a new frame */ + LZ4F_resetDecompressionContext(dctx); + break; + } + } /* switch (dctx->dStage) */ + } /* while (doAnotherStage) */ + + /* preserve history within tmpOut whenever necessary */ + LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2); + if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */ + && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */ + && (dctx->dict != NULL) /* dictionary exists */ + && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */ + && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */ + { + if (dctx->dStage == dstage_flushOut) { + size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); + size_t copySize = 64 KB - dctx->tmpOutSize; + const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; + if (dctx->tmpOutSize > 64 KB) copySize = 0; + if (copySize > preserveSize) copySize = preserveSize; + assert(dctx->tmpOutBuffer != NULL); + + memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); + + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = preserveSize + dctx->tmpOutStart; + } else { + const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize; + size_t const newDictSize = MIN(dctx->dictSize, 64 KB); + + memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); + + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = newDictSize; + dctx->tmpOut = dctx->tmpOutBuffer + newDictSize; + } + } + + *srcSizePtr = (size_t)(srcPtr - srcStart); + *dstSizePtr = (size_t)(dstPtr - dstStart); + return nextSrcSizeHint; +} + +/*! LZ4F_decompress_usingDict() : + * Same as LZ4F_decompress(), using a predefined dictionary. + * Dictionary is used "in place", without any preprocessing. + * It must remain accessible throughout the entire frame decoding. + */ +size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const void* dict, size_t dictSize, + const LZ4F_decompressOptions_t* decompressOptionsPtr) +{ + if (dctx->dStage <= dstage_init) { + dctx->dict = (const BYTE*)dict; + dctx->dictSize = dictSize; + } + return LZ4F_decompress(dctx, dstBuffer, dstSizePtr, + srcBuffer, srcSizePtr, + decompressOptionsPtr); +} diff --git a/tools/lib/lz4/lz4frame.h b/tools/lib/lz4/lz4frame.h new file mode 100644 index 0000000..aee2042 --- /dev/null +++ b/tools/lib/lz4/lz4frame.h @@ -0,0 +1,771 @@ +/* + LZ4F - LZ4-Frame library + Header File + Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : https://github.com/lz4/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + +/* LZ4F is a stand-alone API able to create and decode LZ4 frames + * conformant with specification v1.6.1 in doc/lz4_Frame_format.md . + * Generated frames are compatible with `lz4` CLI. + * + * LZ4F also offers streaming capabilities. + * + * lz4.h is not required when using lz4frame.h, + * except to extract common constants such as LZ4_VERSION_NUMBER. + * */ + +#ifndef LZ4F_H_09782039843 +#define LZ4F_H_09782039843 + +#if defined (__cplusplus) +extern "C" { +#endif + +/* --- Dependency --- */ +#include /* size_t */ + + +/** + * Introduction + * + * lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md . + * LZ4 Frames are compatible with `lz4` CLI, + * and designed to be interoperable with any system. +**/ + +/*-*************************************************************** + * Compiler specifics + *****************************************************************/ +/* LZ4_DLL_EXPORT : + * Enable exporting of functions when building a Windows DLL + * LZ4FLIB_VISIBILITY : + * Control library symbols visibility. + */ +#ifndef LZ4FLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4FLIB_VISIBILITY +# endif +#endif +#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) +# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY +#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) +# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY +#else +# define LZ4FLIB_API LZ4FLIB_VISIBILITY +#endif + +#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS +# define LZ4F_DEPRECATE(x) x +#else +# if defined(_MSC_VER) +# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */ +# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6)) +# define LZ4F_DEPRECATE(x) x __attribute__((deprecated)) +# else +# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */ +# endif +#endif + + +/*-************************************ + * Error management + **************************************/ +typedef size_t LZ4F_errorCode_t; + +LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */ +LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */ + + +/*-************************************ + * Frame compression types + ************************************* */ +/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */ +#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS +# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x +#else +# define LZ4F_OBSOLETE_ENUM(x) +#endif + +/* The larger the block size, the (slightly) better the compression ratio, + * though there are diminishing returns. + * Larger blocks also increase memory usage on both compression and decompression sides. + */ +typedef enum { + LZ4F_default=0, + LZ4F_max64KB=4, + LZ4F_max256KB=5, + LZ4F_max1MB=6, + LZ4F_max4MB=7 + LZ4F_OBSOLETE_ENUM(max64KB) + LZ4F_OBSOLETE_ENUM(max256KB) + LZ4F_OBSOLETE_ENUM(max1MB) + LZ4F_OBSOLETE_ENUM(max4MB) +} LZ4F_blockSizeID_t; + +/* Linked blocks sharply reduce inefficiencies when using small blocks, + * they compress better. + * However, some LZ4 decoders are only compatible with independent blocks */ +typedef enum { + LZ4F_blockLinked=0, + LZ4F_blockIndependent + LZ4F_OBSOLETE_ENUM(blockLinked) + LZ4F_OBSOLETE_ENUM(blockIndependent) +} LZ4F_blockMode_t; + +typedef enum { + LZ4F_noContentChecksum=0, + LZ4F_contentChecksumEnabled + LZ4F_OBSOLETE_ENUM(noContentChecksum) + LZ4F_OBSOLETE_ENUM(contentChecksumEnabled) +} LZ4F_contentChecksum_t; + +typedef enum { + LZ4F_noBlockChecksum=0, + LZ4F_blockChecksumEnabled +} LZ4F_blockChecksum_t; + +typedef enum { + LZ4F_frame=0, + LZ4F_skippableFrame + LZ4F_OBSOLETE_ENUM(skippableFrame) +} LZ4F_frameType_t; + +#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS +typedef LZ4F_blockSizeID_t blockSizeID_t; +typedef LZ4F_blockMode_t blockMode_t; +typedef LZ4F_frameType_t frameType_t; +typedef LZ4F_contentChecksum_t contentChecksum_t; +#endif + +/*! LZ4F_frameInfo_t : + * makes it possible to set or read frame parameters. + * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO, + * setting all parameters to default. + * It's then possible to update selectively some parameters */ +typedef struct { + LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default (LZ4F_max64KB) */ + LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default (LZ4F_blockLinked) */ + LZ4F_contentChecksum_t contentChecksumFlag; /* 1: add a 32-bit checksum of frame's decompressed data; 0 == default (disabled) */ + LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ + unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ + unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */ + LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0 == default (disabled) */ +} LZ4F_frameInfo_t; + +#define LZ4F_INIT_FRAMEINFO { LZ4F_max64KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */ + +/*! LZ4F_preferences_t : + * makes it possible to supply advanced compression instructions to streaming interface. + * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES, + * setting all parameters to default. + * All reserved fields must be set to zero. */ +typedef struct { + LZ4F_frameInfo_t frameInfo; + int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */ + unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */ + unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */ + unsigned reserved[3]; /* must be zero for forward compatibility */ +} LZ4F_preferences_t; + +#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */ + + +/*-********************************* +* Simple compression function +***********************************/ + +/*! LZ4F_compressFrame() : + * Compress srcBuffer content into an LZ4-compressed frame. + * It's a one shot operation, all input content is consumed, and all output is generated. + * + * Note : it's a stateless operation (no LZ4F_cctx state needed). + * In order to reduce load on the allocator, LZ4F_compressFrame(), by default, + * uses the stack to allocate space for the compression state and some table. + * If this usage of the stack is too much for your application, + * consider compiling `lz4frame.c` with compile-time macro LZ4F_HEAPMODE set to 1 instead. + * All state allocations will use the Heap. + * It also means each invocation of LZ4F_compressFrame() will trigger several internal alloc/free invocations. + * + * @dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * @preferencesPtr is optional : one can provide NULL, in which case all preferences are set to default. + * @return : number of bytes written into dstBuffer. + * or an error code if it fails (can be tested using LZ4F_isError()) + */ +LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_preferences_t* preferencesPtr); + +/*! LZ4F_compressFrameBound() : + * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. + * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. + * Note : this result is only usable with LZ4F_compressFrame(). + * It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed. + */ +LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr); + + +/*! LZ4F_compressionLevel_max() : + * @return maximum allowed compression level (currently: 12) + */ +LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */ + + +/*-*********************************** +* Advanced compression functions +*************************************/ +typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */ +typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */ + +typedef struct { + unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */ + unsigned reserved[3]; +} LZ4F_compressOptions_t; + +/*--- Resource Management ---*/ + +#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */ +LZ4FLIB_API unsigned LZ4F_getVersion(void); + +/*! LZ4F_createCompressionContext() : + * The first thing to do is to create a compressionContext object, + * which will keep track of operation state during streaming compression. + * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version, + * and a pointer to LZ4F_cctx*, to write the resulting pointer into. + * @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. + * The function provides a pointer to a fully allocated LZ4F_cctx object. + * @cctxPtr MUST be != NULL. + * If @return != zero, context creation failed. + * A created compression context can be employed multiple times for consecutive streaming operations. + * Once all streaming compression jobs are completed, + * the state object can be released using LZ4F_freeCompressionContext(). + * Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored. + * Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing). +**/ +LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version); +LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx); + + +/*---- Compression ----*/ + +#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected parameters */ +#define LZ4F_HEADER_SIZE_MAX 19 + +/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */ +#define LZ4F_BLOCK_HEADER_SIZE 4 + +/* Size in bytes of a block checksum footer in little-endian format. */ +#define LZ4F_BLOCK_CHECKSUM_SIZE 4 + +/* Size in bytes of the content checksum. */ +#define LZ4F_CONTENT_CHECKSUM_SIZE 4 + +/* Size in bytes of the endmark. */ +#define LZ4F_ENDMARK_SIZE 4 + +/*! LZ4F_compressBegin() : + * will write the frame header into dstBuffer. + * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * `prefsPtr` is optional : NULL can be provided to set all preferences to default. + * @return : number of bytes written into dstBuffer for the header + * or an error code (which can be tested using LZ4F_isError()) + */ +LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_preferences_t* prefsPtr); + +/*! LZ4F_compressBound() : + * Provides minimum dstCapacity required to guarantee success of + * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario. + * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead. + * Note that the result is only valid for a single invocation of LZ4F_compressUpdate(). + * When invoking LZ4F_compressUpdate() multiple times, + * if the output buffer is gradually filled up instead of emptied and re-used from its start, + * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound(). + * @return is always the same for a srcSize and prefsPtr. + * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. + * tech details : + * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. + * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd(). + * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin(). + */ +LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr); + +/*! LZ4F_compressUpdate() : + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. + * This value is provided by LZ4F_compressBound(). + * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). + * After an error, the state is left in a UB state, and must be re-initialized or freed. + * If previously an uncompressed block was written, buffered data is flushed + * before appending compressed data is continued. + * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. + * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). + * or an error code if it fails (which can be tested using LZ4F_isError()) + */ +LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* cOptPtr); + +/*! LZ4F_flush() : + * When data must be generated and sent immediately, without waiting for a block to be completely filled, + * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. + * `dstCapacity` must be large enough to ensure the operation will be successful. + * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. + * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) + * or an error code if it fails (which can be tested using LZ4F_isError()) + * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). + */ +LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* cOptPtr); + +/*! LZ4F_compressEnd() : + * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). + * It will flush whatever data remained within `cctx` (like LZ4_flush()) + * and properly finalize the frame, with an endMark and a checksum. + * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. + * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), + * or an error code if it fails (which can be tested using LZ4F_isError()) + * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). + * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. + */ +LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* cOptPtr); + + +/*-********************************* +* Decompression functions +***********************************/ +typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */ +typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */ + +typedef struct { + unsigned stableDst; /* pledges that last 64KB decompressed data is present right before @dstBuffer pointer. + * This optimization skips internal storage operations. + * Once set, this pledge must remain valid up to the end of current frame. */ + unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time. + * Setting this option to 1 once disables all checksums for the rest of the frame. */ + unsigned reserved1; /* must be set to zero for forward compatibility */ + unsigned reserved0; /* idem */ +} LZ4F_decompressOptions_t; + + +/* Resource management */ + +/*! LZ4F_createDecompressionContext() : + * Create an LZ4F_dctx object, to track all decompression operations. + * @version provided MUST be LZ4F_VERSION. + * @dctxPtr MUST be valid. + * The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object. + * The @return is an errorCode, which can be tested using LZ4F_isError(). + * dctx memory can be released using LZ4F_freeDecompressionContext(); + * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. + * That is, it should be == 0 if decompression has been completed fully and correctly. + */ +LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); +LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); + + +/*-*********************************** +* Streaming decompression functions +*************************************/ + +#define LZ4F_MAGICNUMBER 0x184D2204U +#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U +#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5 + +/*! LZ4F_headerSize() : v1.9.0+ + * Provide the header size of a frame starting at `src`. + * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH, + * which is enough to decode the header length. + * @return : size of frame header + * or an error code, which can be tested using LZ4F_isError() + * note : Frame header size is variable, but is guaranteed to be + * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes. + */ +LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize); + +/*! LZ4F_getFrameInfo() : + * This function extracts frame parameters (max blockSize, dictID, etc.). + * Its usage is optional: user can also invoke LZ4F_decompress() directly. + * + * Extracted information will fill an existing LZ4F_frameInfo_t structure. + * This can be useful for allocation and dictionary identification purposes. + * + * LZ4F_getFrameInfo() can work in the following situations : + * + * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress(). + * It will decode header from `srcBuffer`, + * consuming the header and starting the decoding process. + * + * Input size must be large enough to contain the full frame header. + * Frame header size can be known beforehand by LZ4F_headerSize(). + * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes, + * and not more than <= LZ4F_HEADER_SIZE_MAX bytes. + * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work. + * It's allowed to provide more input data than the header size, + * LZ4F_getFrameInfo() will only consume the header. + * + * If input size is not large enough, + * aka if it's smaller than header size, + * function will fail and return an error code. + * + * 2) After decoding has been started, + * it's possible to invoke LZ4F_getFrameInfo() anytime + * to extract already decoded frame parameters stored within dctx. + * + * Note that, if decoding has barely started, + * and not yet read enough information to decode the header, + * LZ4F_getFrameInfo() will fail. + * + * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value). + * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started, + * and when decoding the header has been successful. + * Decompression must then resume from (srcBuffer + *srcSizePtr). + * + * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call, + * or an error code which can be tested using LZ4F_isError(). + * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely. + * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. + */ +LZ4FLIB_API size_t +LZ4F_getFrameInfo(LZ4F_dctx* dctx, + LZ4F_frameInfo_t* frameInfoPtr, + const void* srcBuffer, size_t* srcSizePtr); + +/** + * @brief Incrementally decompresses an LZ4 frame into user-provided buffers. + * + * Call repeatedly until the return value is 0 (frame fully decoded) or an error is reported. + * On each call, the function consumes up to *srcSizePtr bytes from @p srcBuffer and + * produces up to *dstSizePtr bytes into @p dstBuffer. It updates both size pointers with + * the actual number of bytes consumed/produced. There is no separate flush step. + * + * Typical loop: + * - Provide whatever input you have and an available output buffer. + * - Read how much input was consumed and how much output was produced. + * - Use the returned value as a hint for how many source bytes are ideal next time. + * + * @param[in] dctx A valid decompression context created by LZ4F_createDecompressionContext(). + * @param[out] dstBuffer Destination buffer for decompressed bytes. May change between calls. + * @param[in,out] dstSizePtr In: capacity of @p dstBuffer in bytes. Out: number of bytes written (<= input value). + * @param[in] srcBuffer Source buffer containing (more) compressed data. May point to the middle of a larger buffer. + * @param[in,out] srcSizePtr In: number of available bytes in @p srcBuffer. Out: number of bytes consumed (<= input value). + * @param[in] optionsPtr Optional decompression options; pass NULL for defaults. + * + * @return See @retval cases. + * @retval >0 Hint (in bytes) for how many source bytes are ideal to provide on the next call. + * This also indicates the current frame is not yet complete: the decompressor + * expects more input, or may require additional output space to make progress. + * User can always pass any amount of input; this value is only a performance hint. + * @retval 0 The current frame is fully decoded. If *srcSizePtr is less than the provided value, + * the unconsumed tail is the start of another frame (if any). + * @retval error An error code; test with LZ4F_isError(ret). After an error, dctx is not + * resumable: call LZ4F_resetDecompressionContext() before reusing it. + * + * @pre @p dctx is a valid state created by LZ4F_createDecompressionContext(). + * @post *srcSizePtr and *dstSizePtr are updated with the actual bytes consumed/produced. + * @p dstBuffer contents in [0, *dstSizePtr) are valid decompressed data. + * + * @note The function may not consume all provided input on each call. Always check *srcSizePtr. + * Present any unconsumed source bytes again on the next call. + * @note @p dstBuffer content is overwritten; it does not need to be stable across calls. + * @note After finishing a frame (return==0), you may immediately start feeding the next frame + * into the same @p dctx (optionally, one can use LZ4F_resetDecompressionContext()). + * + * @warning If you called LZ4F_getFrameInfo() beforehand, you must advance @p srcBuffer and + * decrease *srcSizePtr by the number of bytes it consumed (the frame header). Failing + * to do so can cause decompression failure or, worse, silent corruption. + * + * @see LZ4F_getFrameInfo(), LZ4F_isError(), LZ4F_resetDecompressionContext(), + * LZ4F_createDecompressionContext(), LZ4F_freeDecompressionContext() + */ +LZ4FLIB_API size_t +LZ4F_decompress(LZ4F_dctx* dctx, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const LZ4F_decompressOptions_t* dOptPtr); + + +/*! LZ4F_resetDecompressionContext() : added in v1.8.0 + * In case of an error, the context is left in "undefined" state. + * In which case, it's necessary to reset it, before re-using it. + * This method can also be used to abruptly stop any unfinished decompression, + * and start a new one using same context resources. */ +LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */ + + +/********************************** + * Dictionary compression API + *********************************/ + +/* A Dictionary is useful for the compression of small messages (KB range). + * It dramatically improves compression efficiency. + * + * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful. + * Better results are generally achieved by using Zstandard's Dictionary Builder + * to generate a high-quality dictionary from a set of samples. + * + * The same dictionary will have to be used on the decompression side + * for decoding to be successful. + * To help identify the correct dictionary at decoding stage, + * the frame header allows optional embedding of a dictID field. + */ + +/*! LZ4F_compressBegin_usingDict() : stable since v1.10 + * Inits dictionary compression streaming, and writes the frame header into dstBuffer. + * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * @prefsPtr is optional : one may provide NULL as argument, + * however, it's the only way to provide dictID in the frame header. + * @dictBuffer must outlive the compression session. + * @return : number of bytes written into dstBuffer for the header, + * or an error code (which can be tested using LZ4F_isError()) + * NOTE: The LZ4Frame spec allows each independent block to be compressed with the dictionary, + * but this entry supports a more limited scenario, where only the first block uses the dictionary. + * This is still useful for small data, which only need one block anyway. + * For larger inputs, one may be more interested in LZ4F_compressFrame_usingCDict() below. + */ +LZ4FLIB_API size_t +LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* dictBuffer, size_t dictSize, + const LZ4F_preferences_t* prefsPtr); + +/*! LZ4F_decompress_usingDict() : stable since v1.10 + * Same as LZ4F_decompress(), using a predefined dictionary. + * Dictionary is used "in place", without any preprocessing. +** It must remain accessible throughout the entire frame decoding. */ +LZ4FLIB_API size_t +LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const void* dict, size_t dictSize, + const LZ4F_decompressOptions_t* decompressOptionsPtr); + +/***************************************** + * Bulk processing dictionary compression + *****************************************/ + +/* Loading a dictionary has a cost, since it involves construction of tables. + * The Bulk processing dictionary API makes it possible to share this cost + * over an arbitrary number of compression jobs, even concurrently, + * markedly improving compression latency for these cases. + * + * Note that there is no corresponding bulk API for the decompression side, + * because dictionary does not carry any initialization cost for decompression. + * Use the regular LZ4F_decompress_usingDict() there. + */ +typedef struct LZ4F_CDict_s LZ4F_CDict; + +/*! LZ4_createCDict() : stable since v1.10 + * When compressing multiple messages / blocks using the same dictionary, it's recommended to initialize it just once. + * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. + * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * @dictBuffer can be released after LZ4_CDict creation, since its content is copied within CDict. */ +LZ4FLIB_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); +LZ4FLIB_API void LZ4F_freeCDict(LZ4F_CDict* CDict); + +/*! LZ4_compressFrame_usingCDict() : stable since v1.10 + * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. + * @cctx must point to a context created by LZ4F_createCompressionContext(). + * If @cdict==NULL, compress without a dictionary. + * @dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * If this condition is not respected, function will fail (@return an errorCode). + * The LZ4F_preferences_t structure is optional : one may provide NULL as argument, + * but it's not recommended, as it's the only way to provide @dictID in the frame header. + * @return : number of bytes written into dstBuffer. + * or an error code if it fails (can be tested using LZ4F_isError()) + * Note: for larger inputs generating multiple independent blocks, + * this entry point uses the dictionary for each block. */ +LZ4FLIB_API size_t +LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr); + +/*! LZ4F_compressBegin_usingCDict() : stable since v1.10 + * Inits streaming dictionary compression, and writes the frame header into dstBuffer. + * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * @prefsPtr is optional : one may provide NULL as argument, + * note however that it's the only way to insert a @dictID in the frame header. + * @cdict must outlive the compression session. + * @return : number of bytes written into dstBuffer for the header, + * or an error code, which can be tested using LZ4F_isError(). */ +LZ4FLIB_API size_t +LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* prefsPtr); + + +#if defined (__cplusplus) +} +#endif + +#endif /* LZ4F_H_09782039843 */ + +#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) +#define LZ4F_H_STATIC_09782039843 + +/* Note : + * The below declarations are not stable and may change in the future. + * They are therefore only safe to depend on + * when the caller is statically linked against the library. + * To access their declarations, define LZ4F_STATIC_LINKING_ONLY. + * + * By default, these symbols aren't published into shared/dynamic libraries. + * You can override this behavior and force them to be published + * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. + * Use at your own risk. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS +# define LZ4FLIB_STATIC_API LZ4FLIB_API +#else +# define LZ4FLIB_STATIC_API +#endif + + +/* --- Error List --- */ +#define LZ4F_LIST_ERRORS(ITEM) \ + ITEM(OK_NoError) \ + ITEM(ERROR_GENERIC) \ + ITEM(ERROR_maxBlockSize_invalid) \ + ITEM(ERROR_blockMode_invalid) \ + ITEM(ERROR_parameter_invalid) \ + ITEM(ERROR_compressionLevel_invalid) \ + ITEM(ERROR_headerVersion_wrong) \ + ITEM(ERROR_blockChecksum_invalid) \ + ITEM(ERROR_reservedFlag_set) \ + ITEM(ERROR_allocation_failed) \ + ITEM(ERROR_srcSize_tooLarge) \ + ITEM(ERROR_dstMaxSize_tooSmall) \ + ITEM(ERROR_frameHeader_incomplete) \ + ITEM(ERROR_frameType_unknown) \ + ITEM(ERROR_frameSize_wrong) \ + ITEM(ERROR_srcPtr_wrong) \ + ITEM(ERROR_decompressionFailed) \ + ITEM(ERROR_headerChecksum_invalid) \ + ITEM(ERROR_contentChecksum_invalid) \ + ITEM(ERROR_frameDecoding_alreadyStarted) \ + ITEM(ERROR_compressionState_uninitialized) \ + ITEM(ERROR_parameter_null) \ + ITEM(ERROR_io_write) \ + ITEM(ERROR_io_read) \ + ITEM(ERROR_maxCode) + +#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, + +/* enum list is exposed, to handle specific errors */ +typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) + _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes; + +LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); + +/********************************** + * Advanced compression operations + *********************************/ + +/*! LZ4F_getBlockSize() : + * @return, in scalar format (size_t), + * the maximum block size associated with @blockSizeID, + * or an error code (can be tested using LZ4F_isError()) if @blockSizeID is invalid. +**/ +LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID); + +/*! LZ4F_uncompressedUpdate() : + * LZ4F_uncompressedUpdate() can be called repetitively to add data stored as uncompressed blocks. + * Important rule: dstCapacity MUST be large enough to store the entire source buffer as + * no compression is done for this operation + * If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode). + * After an error, the state is left in a UB state, and must be re-initialized or freed. + * If previously a compressed block was written, buffered data is flushed first, + * before appending uncompressed data is continued. + * This operation is only supported when LZ4F_blockIndependent is used. + * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. + * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). + * or an error code if it fails (which can be tested using LZ4F_isError()) + */ +LZ4FLIB_STATIC_API size_t +LZ4F_uncompressedUpdate(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* cOptPtr); + +/********************************** + * Custom memory allocation + *********************************/ + +/*! Custom memory allocation : v1.9.4+ + * These prototypes make it possible to pass custom allocation/free functions. + * LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below. + * All allocation/free operations will be completed using these custom variants instead of regular ones. + */ +typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size); +typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size); +typedef void (*LZ4F_FreeFunction) (void* opaqueState, void* address); +typedef struct { + LZ4F_AllocFunction customAlloc; + LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */ + LZ4F_FreeFunction customFree; + void* opaqueState; +} LZ4F_CustomMem; +static +#ifdef __GNUC__ +__attribute__((__unused__)) +#endif +LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ + +LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); +LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); +LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize); + +/*! Context size inspection : v1.10.1+ + * These functions return the total memory footprint of the provided context. + */ +LZ4FLIB_STATIC_API size_t LZ4F_cctx_size(const LZ4F_cctx* cctx); +LZ4FLIB_STATIC_API size_t LZ4F_dctx_size(const LZ4F_dctx* dctx); + +#if defined (__cplusplus) +} +#endif + +#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */ diff --git a/tools/lib/lz4/lz4frame_static.h b/tools/lib/lz4/lz4frame_static.h new file mode 100644 index 0000000..dad4b8f --- /dev/null +++ b/tools/lib/lz4/lz4frame_static.h @@ -0,0 +1,47 @@ +/* + LZ4 auto-framing library + Header File for static linking only + Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : https://github.com/lz4/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + +#ifndef LZ4FRAME_STATIC_H_0398209384 +#define LZ4FRAME_STATIC_H_0398209384 + +/* The declarations that formerly were made here have been merged into + * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward, + * it is recommended to simply include that header directly. + */ + +#define LZ4F_STATIC_LINKING_ONLY +#include "lz4frame.h" + +#endif /* LZ4FRAME_STATIC_H_0398209384 */ diff --git a/tools/lib/lz4/lz4hc.c b/tools/lib/lz4/lz4hc.c new file mode 100644 index 0000000..6c0d934 --- /dev/null +++ b/tools/lib/lz4/lz4hc.c @@ -0,0 +1,2255 @@ +/* + LZ4 HC - High Compression Mode of LZ4 + Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : https://github.com/lz4/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ +/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */ + + +/* ************************************* +* Tuning Parameter +***************************************/ + +/*! HEAPMODE : + * Select how stateless HC compression functions like `LZ4_compress_HC()` + * allocate memory for their workspace: + * in stack (0:fastest), or in heap (1:default, requires malloc()). + * Since workspace is rather large, heap mode is recommended. +**/ +#ifndef LZ4HC_HEAPMODE +# define LZ4HC_HEAPMODE 1 +#endif + + +/*=== Dependency ===*/ +#define LZ4_HC_STATIC_LINKING_ONLY +#include "lz4hc.h" +#include + + +/*=== Shared lz4.c code ===*/ +#ifndef LZ4_SRC_INCLUDED +# if defined(__GNUC__) +# pragma GCC diagnostic ignored "-Wunused-function" +# endif +# if defined (__clang__) +# pragma clang diagnostic ignored "-Wunused-function" +# endif +# define LZ4_COMMONDEFS_ONLY +# include "lz4.c" /* LZ4_count, constants, mem */ +#endif + + +/*=== Enums ===*/ +typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive; + + +/*=== Constants ===*/ +#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) +#define LZ4_OPT_NUM (1<<12) + + +/*=== Macros ===*/ +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) +#define MAX(a,b) ( (a) > (b) ? (a) : (b) ) + + +/*=== Levels definition ===*/ +typedef enum { lz4mid, lz4hc, lz4opt } lz4hc_strat_e; +typedef struct { + lz4hc_strat_e strat; + int nbSearches; + U32 targetLength; +} cParams_t; +static const cParams_t k_clTable[LZ4HC_CLEVEL_MAX+1] = { + { lz4mid, 2, 16 }, /* 0, unused */ + { lz4mid, 2, 16 }, /* 1, unused */ + { lz4mid, 2, 16 }, /* 2 */ + { lz4hc, 4, 16 }, /* 3 */ + { lz4hc, 8, 16 }, /* 4 */ + { lz4hc, 16, 16 }, /* 5 */ + { lz4hc, 32, 16 }, /* 6 */ + { lz4hc, 64, 16 }, /* 7 */ + { lz4hc, 128, 16 }, /* 8 */ + { lz4hc, 256, 16 }, /* 9 */ + { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ + { lz4opt, 512,128 }, /*11 */ + { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ +}; + +static cParams_t LZ4HC_getCLevelParams(int cLevel) +{ + /* note : clevel convention is a bit different from lz4frame, + * possibly something worth revisiting for consistency */ + if (cLevel < 1) + cLevel = LZ4HC_CLEVEL_DEFAULT; + cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel); + return k_clTable[cLevel]; +} + + +/*=== Hashing ===*/ +#define LZ4HC_HASHSIZE 4 +#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG)) +static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); } + +#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) +/* lie to the compiler about data alignment; use with caution */ +static U64 LZ4_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) +/* __pack instructions are safer, but compiler specific */ +LZ4_PACK(typedef struct { U64 u64; }) LZ4_unalign64; +static U64 LZ4_read64(const void* ptr) { return ((const LZ4_unalign64*)ptr)->u64; } + +#else /* safe and portable access using memcpy() */ +static U64 LZ4_read64(const void* memPtr) +{ + U64 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +#endif /* LZ4_FORCE_MEMORY_ACCESS */ + +#define LZ4MID_HASHSIZE 8 +#define LZ4MID_HASHLOG (LZ4HC_HASH_LOG-1) +#define LZ4MID_HASHTABLESIZE (1 << LZ4MID_HASHLOG) + +static U32 LZ4MID_hash4(U32 v) { return (v * 2654435761U) >> (32-LZ4MID_HASHLOG); } +static U32 LZ4MID_hash4Ptr(const void* ptr) { return LZ4MID_hash4(LZ4_read32(ptr)); } +/* note: hash7 hashes the lower 56-bits. + * It presumes input was read using little endian.*/ +static U32 LZ4MID_hash7(U64 v) { return (U32)(((v << (64-56)) * 58295818150454627ULL) >> (64-LZ4MID_HASHLOG)) ; } +static U64 LZ4_readLE64(const void* memPtr); +static U32 LZ4MID_hash8Ptr(const void* ptr) { return LZ4MID_hash7(LZ4_readLE64(ptr)); } + +static U64 LZ4_readLE64(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read64(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + /* note: relies on the compiler to simplify this expression */ + return (U64)p[0] | ((U64)p[1]<<8) | ((U64)p[2]<<16) | ((U64)p[3]<<24) + | ((U64)p[4]<<32) | ((U64)p[5]<<40) | ((U64)p[6]<<48) | ((U64)p[7]<<56); + } +} + + +/*=== Count match length ===*/ +LZ4_FORCE_INLINE +unsigned LZ4HC_NbCommonBytes32(U32 val) +{ + assert(val != 0); + if (LZ4_isLittleEndian()) { +# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)((31 - r) >> 3); +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clz(val) >> 3; +# else + val >>= 8; + val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) | + (val + 0x00FF0000)) >> 24; + return (unsigned)val ^ 3; +# endif + } else { +# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)(r >> 3); +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctz(val) >> 3; +# else + const U32 m = 0x01010101; + return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; +# endif + } +} + +/** LZ4HC_countBack() : + * @return : negative value, nb of common bytes before ip/match */ +LZ4_FORCE_INLINE +int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, + const BYTE* const iMin, const BYTE* const mMin) +{ + int back = 0; + int const min = (int)MAX(iMin - ip, mMin - match); + assert(min <= 0); + assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31)); + assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31)); + + while ((back - min) > 3) { + U32 const v = LZ4_read32(ip + back - 4) ^ LZ4_read32(match + back - 4); + if (v) { + return (back - (int)LZ4HC_NbCommonBytes32(v)); + } else back -= 4; /* 4-byte step */ + } + /* check remainder if any */ + while ( (back > min) + && (ip[back-1] == match[back-1]) ) + back--; + return back; +} + +/*=== Chain table updates ===*/ +#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */ +/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */ +#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor + + +/************************************** +* Init +**************************************/ +static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4) +{ + MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable)); + MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); +} + +static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start) +{ + size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart); + size_t newStartingOffset = bufferSize + hc4->dictLimit; + DEBUGLOG(5, "LZ4HC_init_internal"); + assert(newStartingOffset >= bufferSize); /* check overflow */ + if (newStartingOffset > 1 GB) { + LZ4HC_clearTables(hc4); + newStartingOffset = 0; + } + newStartingOffset += 64 KB; + hc4->nextToUpdate = (U32)newStartingOffset; + hc4->prefixStart = start; + hc4->end = start; + hc4->dictStart = start; + hc4->dictLimit = (U32)newStartingOffset; + hc4->lowLimit = (U32)newStartingOffset; +} + + +/************************************** +* Encode +**************************************/ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2) +# define RAWLOG(...) fprintf(stderr, __VA_ARGS__) +void LZ4HC_hexOut(const void* src, size_t len) +{ + const BYTE* p = (const BYTE*)src; + size_t n; + for (n=0; n= _lev) { \ + RAWLOG("match bytes: "); \ + LZ4HC_hexOut(_ptr, _len); \ + RAWLOG("ref bytes: "); \ + LZ4HC_hexOut(_ref, _len); \ + } + +#else +# define HEX_CMP(l,p,r,_l) +#endif + +/* LZ4HC_encodeSequence() : + * @return : 0 if ok, + * 1 if buffer issue detected */ +LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( + const BYTE** _ip, + BYTE** _op, + const BYTE** _anchor, + int matchLength, + int offset, + limitedOutput_directive limit, + BYTE* oend) +{ +#define ip (*_ip) +#define op (*_op) +#define anchor (*_anchor) + + BYTE* const token = op++; + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6) + static const BYTE* start = NULL; + static U32 totalCost = 0; + U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start); /* only works for single segment */ + U32 const ll = (U32)(ip - anchor); + U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0; + U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; + U32 const cost = 1 + llAdd + ll + 2 + mlAdd; + if (start==NULL) start = anchor; /* only works for single segment */ + DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u", + pos, + (U32)(ip - anchor), matchLength, offset, + cost, totalCost); +# if 1 /* only works on single segment data */ + HEX_CMP(7, ip, ip-offset, matchLength); +# endif + totalCost += cost; +#endif + + /* Encode Literal length */ + { size_t litLen = (size_t)(ip - anchor); + LZ4_STATIC_ASSERT(notLimited == 0); + /* Check output limit */ + if (limit && ((op + (litLen / 255) + litLen + (2 + 1 + LASTLITERALS)) > oend)) { + DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)", + (int)litLen, (int)(oend - op)); + return 1; + } + if (litLen >= RUN_MASK) { + size_t len = litLen - RUN_MASK; + *token = (RUN_MASK << ML_BITS); + for(; len >= 255 ; len -= 255) *op++ = 255; + *op++ = (BYTE)len; + } else { + *token = (BYTE)(litLen << ML_BITS); + } + + /* Copy Literals */ + LZ4_wildCopy8(op, anchor, op + litLen); + op += litLen; + } + + /* Encode Offset */ + assert(offset <= LZ4_DISTANCE_MAX ); + assert(offset > 0); + LZ4_writeLE16(op, (U16)(offset)); op += 2; + + /* Encode MatchLength */ + assert(matchLength >= MINMATCH); + { size_t mlCode = (size_t)matchLength - MINMATCH; + if (limit && (op + (mlCode / 255) + (1 + LASTLITERALS) > oend)) { + DEBUGLOG(6, "Not enough room to write match length"); + return 1; /* Check output limit */ + } + if (mlCode >= ML_MASK) { + *token += ML_MASK; + mlCode -= ML_MASK; + for(; mlCode >= 510 ; mlCode -= 510) { *op++ = 255; *op++ = 255; } + if (mlCode >= 255) { mlCode -= 255; *op++ = 255; } + *op++ = (BYTE)mlCode; + } else { + *token += (BYTE)(mlCode); + } } + + /* Prepare next loop */ + ip += matchLength; + anchor = ip; + + return 0; + +#undef ip +#undef op +#undef anchor +} + + +typedef struct { + int off; + int len; + int back; /* negative value */ +} LZ4HC_match_t; + +LZ4HC_match_t LZ4HC_searchExtDict(const BYTE* ip, U32 ipIndex, + const BYTE* const iLowLimit, const BYTE* const iHighLimit, + const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex, + int currentBestML, int nbAttempts) +{ + size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit; + U32 lDictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)]; + U32 matchIndex = lDictMatchIndex + gDictEndIndex - (U32)lDictEndIndex; + int offset = 0, sBack = 0; + assert(lDictEndIndex <= 1 GB); + if (lDictMatchIndex>0) + DEBUGLOG(7, "lDictEndIndex = %zu, lDictMatchIndex = %u", lDictEndIndex, lDictMatchIndex); + while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) { + const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + lDictMatchIndex; + + if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + int mlt; + int back = 0; + const BYTE* vLimit = ip + (lDictEndIndex - lDictMatchIndex); + if (vLimit > iHighLimit) vLimit = iHighLimit; + mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + back = (ip > iLowLimit) ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0; + mlt -= back; + if (mlt > currentBestML) { + currentBestML = mlt; + offset = (int)(ipIndex - matchIndex); + sBack = back; + DEBUGLOG(7, "found match of length %i within extDictCtx", currentBestML); + } } + + { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, lDictMatchIndex); + lDictMatchIndex -= nextOffset; + matchIndex -= nextOffset; + } } + + { LZ4HC_match_t md; + md.len = currentBestML; + md.off = offset; + md.back = sBack; + return md; + } +} + +typedef LZ4HC_match_t (*LZ4MID_searchIntoDict_f)(const BYTE* ip, U32 ipIndex, + const BYTE* const iHighLimit, + const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex); + +static LZ4HC_match_t LZ4MID_searchHCDict(const BYTE* ip, U32 ipIndex, + const BYTE* const iHighLimit, + const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex) +{ + return LZ4HC_searchExtDict(ip,ipIndex, + ip, iHighLimit, + dictCtx, gDictEndIndex, + MINMATCH-1, 2); +} + +static LZ4HC_match_t LZ4MID_searchExtDict(const BYTE* ip, U32 ipIndex, + const BYTE* const iHighLimit, + const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex) +{ + size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit; + const U32* const hash4Table = dictCtx->hashTable; + const U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE; + DEBUGLOG(7, "LZ4MID_searchExtDict (ipIdx=%u)", ipIndex); + + /* search long match first */ + { U32 l8DictMatchIndex = hash8Table[LZ4MID_hash8Ptr(ip)]; + U32 m8Index = l8DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex; + assert(lDictEndIndex <= 1 GB); + if (ipIndex - m8Index <= LZ4_DISTANCE_MAX) { + const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l8DictMatchIndex; + const size_t safeLen = MIN(lDictEndIndex - l8DictMatchIndex, (size_t)(iHighLimit - ip)); + int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen); + if (mlt >= MINMATCH) { + LZ4HC_match_t md; + DEBUGLOG(7, "Found long ExtDict match of len=%u", mlt); + md.len = mlt; + md.off = (int)(ipIndex - m8Index); + md.back = 0; + return md; + } + } + } + + /* search for short match second */ + { U32 l4DictMatchIndex = hash4Table[LZ4MID_hash4Ptr(ip)]; + U32 m4Index = l4DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex; + if (ipIndex - m4Index <= LZ4_DISTANCE_MAX) { + const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l4DictMatchIndex; + const size_t safeLen = MIN(lDictEndIndex - l4DictMatchIndex, (size_t)(iHighLimit - ip)); + int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen); + if (mlt >= MINMATCH) { + LZ4HC_match_t md; + DEBUGLOG(7, "Found short ExtDict match of len=%u", mlt); + md.len = mlt; + md.off = (int)(ipIndex - m4Index); + md.back = 0; + return md; + } + } + } + + /* nothing found */ + { LZ4HC_match_t const md = {0, 0, 0 }; + return md; + } +} + +/************************************** +* Mid Compression (level 2) +**************************************/ + +LZ4_FORCE_INLINE void +LZ4MID_addPosition(U32* hTable, U32 hValue, U32 index) +{ + hTable[hValue] = index; +} + +#define ADDPOS8(_p, _idx) LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx) +#define ADDPOS4(_p, _idx) LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx) + +/* Fill hash tables with references into dictionary. + * The resulting table is only exploitable by LZ4MID (level 2) */ +static void +LZ4MID_fillHTable (LZ4HC_CCtx_internal* cctx, const void* dict, size_t size) +{ + U32* const hash4Table = cctx->hashTable; + U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE; + const BYTE* const prefixPtr = (const BYTE*)dict; + U32 const prefixIdx = cctx->dictLimit; + U32 const target = prefixIdx + (U32)size - LZ4MID_HASHSIZE; + U32 idx = cctx->nextToUpdate; + assert(dict == cctx->prefixStart); + DEBUGLOG(4, "LZ4MID_fillHTable (size:%zu)", size); + if (size <= LZ4MID_HASHSIZE) + return; + + for (; idx < target; idx += 3) { + ADDPOS4(prefixPtr+idx-prefixIdx, idx); + ADDPOS8(prefixPtr+idx+1-prefixIdx, idx+1); + } + + idx = (size > 32 KB + LZ4MID_HASHSIZE) ? target - 32 KB : cctx->nextToUpdate; + for (; idx < target; idx += 1) { + ADDPOS8(prefixPtr+idx-prefixIdx, idx); + } + + cctx->nextToUpdate = target; +} + +static LZ4MID_searchIntoDict_f select_searchDict_function(const LZ4HC_CCtx_internal* dictCtx) +{ + if (dictCtx == NULL) return NULL; + if (LZ4HC_getCLevelParams(dictCtx->compressionLevel).strat == lz4mid) + return LZ4MID_searchExtDict; + return LZ4MID_searchHCDict; +} + +/* preconditions: + * - *srcSizePtr within [1, LZ4_MAX_INPUT_SIZE] + * - src is valid + * - maxOutputSize >= 1 + * - dst is valid + */ +static int LZ4MID_compress ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* srcSizePtr, + int const maxOutputSize, + const limitedOutput_directive limit, + const dictCtx_directive dict + ) +{ + U32* const hash4Table = ctx->hashTable; + U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE; + const BYTE* ip = (const BYTE*)src; + const BYTE* anchor = ip; + const BYTE* const iend = ip + *srcSizePtr; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = (iend - LASTLITERALS); + const BYTE* const ilimit = (iend - LZ4MID_HASHSIZE); + BYTE* op = (BYTE*)dst; + BYTE* oend = op + maxOutputSize; + + const BYTE* const prefixPtr = ctx->prefixStart; + const U32 prefixIdx = ctx->dictLimit; + const U32 ilimitIdx = (U32)(ilimit - prefixPtr) + prefixIdx; + const BYTE* const dictStart = ctx->dictStart; + const U32 dictIdx = ctx->lowLimit; + const U32 gDictEndIndex = ctx->lowLimit; + const LZ4MID_searchIntoDict_f searchIntoDict = (dict == usingDictCtxHc) ? select_searchDict_function(ctx->dictCtx) : NULL; + unsigned matchLength; + unsigned matchDistance; + + DEBUGLOG(5, "LZ4MID_compress (%i bytes)", *srcSizePtr); + + /* preconditions verifications */ + if (dict == usingDictCtxHc) DEBUGLOG(5, "usingDictCtxHc"); + assert(*srcSizePtr > 0); + assert(*srcSizePtr <= LZ4_MAX_INPUT_SIZE); + assert(src != NULL); + assert(maxOutputSize >= 1); + assert(dst != NULL); + + if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ + if (*srcSizePtr < LZ4_minLength) + goto _lz4mid_last_literals; /* Input too small, no compression (all literals) */ + + /* main loop */ + while (ip <= mflimit) { + const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx; + /* search long match */ + { U32 const h8 = LZ4MID_hash8Ptr(ip); + U32 const pos8 = hash8Table[h8]; + assert(h8 < LZ4MID_HASHTABLESIZE); + assert(pos8 < ipIndex); + LZ4MID_addPosition(hash8Table, h8, ipIndex); + if (ipIndex - pos8 <= LZ4_DISTANCE_MAX) { + /* match candidate found */ + if (pos8 >= prefixIdx) { + const BYTE* const matchPtr = prefixPtr + pos8 - prefixIdx; + assert(matchPtr < ip); + matchLength = LZ4_count(ip, matchPtr, matchlimit); + if (matchLength >= MINMATCH) { + DEBUGLOG(7, "found long match at pos %u (len=%u)", pos8, matchLength); + matchDistance = ipIndex - pos8; + goto _lz4mid_encode_sequence; + } + } else { + if (pos8 >= dictIdx) { + /* extDict match candidate */ + const BYTE* const matchPtr = dictStart + (pos8 - dictIdx); + const size_t safeLen = MIN(prefixIdx - pos8, (size_t)(matchlimit - ip)); + matchLength = LZ4_count(ip, matchPtr, ip + safeLen); + if (matchLength >= MINMATCH) { + DEBUGLOG(7, "found long match at ExtDict pos %u (len=%u)", pos8, matchLength); + matchDistance = ipIndex - pos8; + goto _lz4mid_encode_sequence; + } + } + } + } } + /* search short match */ + { U32 const h4 = LZ4MID_hash4Ptr(ip); + U32 const pos4 = hash4Table[h4]; + assert(h4 < LZ4MID_HASHTABLESIZE); + assert(pos4 < ipIndex); + LZ4MID_addPosition(hash4Table, h4, ipIndex); + if (ipIndex - pos4 <= LZ4_DISTANCE_MAX) { + /* match candidate found */ + if (pos4 >= prefixIdx) { + /* only search within prefix */ + const BYTE* const matchPtr = prefixPtr + (pos4 - prefixIdx); + assert(matchPtr < ip); + assert(matchPtr >= prefixPtr); + matchLength = LZ4_count(ip, matchPtr, matchlimit); + if (matchLength >= MINMATCH) { + /* short match found, let's just check ip+1 for longer */ + U32 const h8 = LZ4MID_hash8Ptr(ip+1); + U32 const pos8 = hash8Table[h8]; + U32 const m2Distance = ipIndex + 1 - pos8; + matchDistance = ipIndex - pos4; + if ( m2Distance <= LZ4_DISTANCE_MAX + && pos8 >= prefixIdx /* only search within prefix */ + && likely(ip < mflimit) + ) { + const BYTE* const m2Ptr = prefixPtr + (pos8 - prefixIdx); + unsigned ml2 = LZ4_count(ip+1, m2Ptr, matchlimit); + if (ml2 > matchLength) { + LZ4MID_addPosition(hash8Table, h8, ipIndex+1); + ip++; + matchLength = ml2; + matchDistance = m2Distance; + } } + goto _lz4mid_encode_sequence; + } + } else { + if (pos4 >= dictIdx) { + /* extDict match candidate */ + const BYTE* const matchPtr = dictStart + (pos4 - dictIdx); + const size_t safeLen = MIN(prefixIdx - pos4, (size_t)(matchlimit - ip)); + matchLength = LZ4_count(ip, matchPtr, ip + safeLen); + if (matchLength >= MINMATCH) { + DEBUGLOG(7, "found match at ExtDict pos %u (len=%u)", pos4, matchLength); + matchDistance = ipIndex - pos4; + goto _lz4mid_encode_sequence; + } + } + } + } } + /* no match found in prefix */ + if ( (dict == usingDictCtxHc) + && (ipIndex - gDictEndIndex < LZ4_DISTANCE_MAX - 8) ) { + /* search a match into external dictionary */ + LZ4HC_match_t dMatch = searchIntoDict(ip, ipIndex, + matchlimit, + ctx->dictCtx, gDictEndIndex); + if (dMatch.len >= MINMATCH) { + DEBUGLOG(7, "found Dictionary match (offset=%i)", dMatch.off); + assert(dMatch.back == 0); + matchLength = (unsigned)dMatch.len; + matchDistance = (unsigned)dMatch.off; + goto _lz4mid_encode_sequence; + } + } + /* no match found */ + ip += 1 + ((ip-anchor) >> 9); /* skip faster over incompressible data */ + continue; + +_lz4mid_encode_sequence: + /* catch back */ + while (((ip > anchor) & ((U32)(ip-prefixPtr) > matchDistance)) && (unlikely(ip[-1] == ip[-(int)matchDistance-1]))) { + ip--; matchLength++; + }; + + /* fill table with beginning of match */ + ADDPOS8(ip+1, ipIndex+1); + ADDPOS8(ip+2, ipIndex+2); + ADDPOS4(ip+1, ipIndex+1); + + /* encode */ + { BYTE* const saved_op = op; + /* LZ4HC_encodeSequence always updates @op; on success, it updates @ip and @anchor */ + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + (int)matchLength, (int)matchDistance, + limit, oend) ) { + op = saved_op; /* restore @op value before failed LZ4HC_encodeSequence */ + goto _lz4mid_dest_overflow; + } + } + + /* fill table with end of match */ + { U32 endMatchIdx = (U32)(ip-prefixPtr) + prefixIdx; + U32 pos_m2 = endMatchIdx - 2; + if (pos_m2 < ilimitIdx) { + if (likely(ip - prefixPtr > 5)) { + ADDPOS8(ip-5, endMatchIdx - 5); + } + ADDPOS8(ip-3, endMatchIdx - 3); + ADDPOS8(ip-2, endMatchIdx - 2); + ADDPOS4(ip-2, endMatchIdx - 2); + ADDPOS4(ip-1, endMatchIdx - 1); + } + } + } + +_lz4mid_last_literals: + /* Encode Last Literals */ + { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ + size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; + size_t const totalSize = 1 + llAdd + lastRunSize; + if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ + if (limit && (op + totalSize > oend)) { + if (limit == limitedOutput) return 0; /* not enough space in @dst */ + /* adapt lastRunSize to fill 'dest' */ + lastRunSize = (size_t)(oend - op) - 1 /*token*/; + llAdd = (lastRunSize + 256 - RUN_MASK) / 256; + lastRunSize -= llAdd; + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); + ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = (RUN_MASK << ML_BITS); + for(; accumulator >= 255 ; accumulator -= 255) + *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + assert(lastRunSize <= (size_t)(oend - op)); + LZ4_memcpy(op, anchor, lastRunSize); + op += lastRunSize; + } + + /* End */ + DEBUGLOG(5, "compressed %i bytes into %i bytes", *srcSizePtr, (int)((char*)op - dst)); + assert(ip >= (const BYTE*)src); + assert(ip <= iend); + *srcSizePtr = (int)(ip - (const BYTE*)src); + assert((char*)op >= dst); + assert(op <= oend); + assert((char*)op - dst < INT_MAX); + return (int)((char*)op - dst); + +_lz4mid_dest_overflow: + if (limit == fillOutput) { + /* Assumption : @ip, @anchor, @optr and @matchLength must be set correctly */ + size_t const ll = (size_t)(ip - anchor); + size_t const ll_addbytes = (ll + 240) / 255; + size_t const ll_totalCost = 1 + ll_addbytes + ll; + BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ + DEBUGLOG(6, "Last sequence is overflowing : %u literals, %u remaining space", + (unsigned)ll, (unsigned)(oend-op)); + if (op + ll_totalCost <= maxLitPos) { + /* ll validated; now adjust match length */ + size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); + size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); + assert(maxMlSize < INT_MAX); + if ((size_t)matchLength > maxMlSize) matchLength= (unsigned)maxMlSize; + if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + matchLength >= MFLIMIT) { + DEBUGLOG(6, "Let's encode a last sequence (ll=%u, ml=%u)", (unsigned)ll, matchLength); + LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + (int)matchLength, (int)matchDistance, + notLimited, oend); + } } + DEBUGLOG(6, "Let's finish with a run of literals (%u bytes left)", (unsigned)(oend-op)); + goto _lz4mid_last_literals; + } + /* compression failed */ + return 0; +} + + +/************************************** +* HC Compression - Search +**************************************/ + +/* Update chains up to ip (excluded) */ +LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) +{ + U16* const chainTable = hc4->chainTable; + U32* const hashTable = hc4->hashTable; + const BYTE* const prefixPtr = hc4->prefixStart; + U32 const prefixIdx = hc4->dictLimit; + U32 const target = (U32)(ip - prefixPtr) + prefixIdx; + U32 idx = hc4->nextToUpdate; + assert(ip >= prefixPtr); + assert(target >= prefixIdx); + + while (idx < target) { + U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx); + size_t delta = idx - hashTable[h]; + if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX; + DELTANEXTU16(chainTable, idx) = (U16)delta; + hashTable[h] = idx; + idx++; + } + + hc4->nextToUpdate = target; +} + +#if defined(_MSC_VER) +# define LZ4HC_rotl32(x,r) _rotl(x,r) +#else +# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r))) +#endif + + +static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern) +{ + size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3; + if (bitsToRotate == 0) return pattern; + return LZ4HC_rotl32(pattern, (int)bitsToRotate); +} + +/* LZ4HC_countPattern() : + * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ +static unsigned +LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) +{ + const BYTE* const iStart = ip; + reg_t const pattern = (sizeof(pattern)==8) ? + (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32; + + while (likely(ip < iEnd-(sizeof(pattern)-1))) { + reg_t const diff = LZ4_read_ARCH(ip) ^ pattern; + if (!diff) { ip+=sizeof(pattern); continue; } + ip += LZ4_NbCommonBytes(diff); + return (unsigned)(ip - iStart); + } + + if (LZ4_isLittleEndian()) { + reg_t patternByte = pattern; + while ((ip>= 8; + } + } else { /* big endian */ + U32 bitOffset = (sizeof(pattern)*8) - 8; + while (ip < iEnd) { + BYTE const byte = (BYTE)(pattern >> bitOffset); + if (*ip != byte) break; + ip ++; bitOffset -= 8; + } } + + return (unsigned)(ip - iStart); +} + +/* LZ4HC_reverseCountPattern() : + * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) + * read using natural platform endianness */ +static unsigned +LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) +{ + const BYTE* const iStart = ip; + + while (likely(ip >= iLow+4)) { + if (LZ4_read32(ip-4) != pattern) break; + ip -= 4; + } + { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */ + while (likely(ip>iLow)) { + if (ip[-1] != *bytePtr) break; + ip--; bytePtr--; + } } + return (unsigned)(iStart - ip); +} + +/* LZ4HC_protectDictEnd() : + * Checks if the match is in the last 3 bytes of the dictionary, so reading the + * 4 byte MINMATCH would overflow. + * @returns true if the match index is okay. + */ +static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex) +{ + return ((U32)((dictLimit - 1) - matchIndex) >= 3); +} + +typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; +typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e; + + +LZ4_FORCE_INLINE LZ4HC_match_t +LZ4HC_InsertAndGetWiderMatch ( + LZ4HC_CCtx_internal* const hc4, + const BYTE* const ip, + const BYTE* const iLowLimit, const BYTE* const iHighLimit, + int longest, + const int maxNbAttempts, + const int patternAnalysis, const int chainSwap, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ + U16* const chainTable = hc4->chainTable; + U32* const hashTable = hc4->hashTable; + const LZ4HC_CCtx_internal* const dictCtx = hc4->dictCtx; + const BYTE* const prefixPtr = hc4->prefixStart; + const U32 prefixIdx = hc4->dictLimit; + const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx; + const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex); + const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX; + const BYTE* const dictStart = hc4->dictStart; + const U32 dictIdx = hc4->lowLimit; + const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx; + int const lookBackLength = (int)(ip-iLowLimit); + int nbAttempts = maxNbAttempts; + U32 matchChainPos = 0; + U32 const pattern = LZ4_read32(ip); + U32 matchIndex; + repeat_state_e repeat = rep_untested; + size_t srcPatternLength = 0; + int offset = 0, sBack = 0; + + DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"); + /* First Match */ + LZ4HC_Insert(hc4, ip); /* insert all prior positions up to ip (excluded) */ + matchIndex = hashTable[LZ4HC_hashPtr(ip)]; + DEBUGLOG(7, "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)", + ipIndex, matchIndex, lowestMatchIndex); + + while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) { + int matchLength=0; + nbAttempts--; + assert(matchIndex < ipIndex); + if (favorDecSpeed && (ipIndex - matchIndex < 8)) { + /* do nothing: + * favorDecSpeed intentionally skips matches with offset < 8 */ + } else if (matchIndex >= prefixIdx) { /* within current Prefix */ + const BYTE* const matchPtr = prefixPtr + (matchIndex - prefixIdx); + assert(matchPtr < ip); + assert(longest >= 1); + if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) { + if (LZ4_read32(matchPtr) == pattern) { + int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0; + matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); + matchLength -= back; + if (matchLength > longest) { + longest = matchLength; + offset = (int)(ipIndex - matchIndex); + sBack = back; + DEBUGLOG(7, "Found match of len=%i within prefix, offset=%i, back=%i", longest, offset, -back); + HEX_CMP(7, ip + back, ip + back - offset, (size_t)matchLength); + } } } + } else { /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */ + const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx); + assert(matchIndex >= dictIdx); + if ( likely(matchIndex <= prefixIdx - 4) + && (LZ4_read32(matchPtr) == pattern) ) { + int back = 0; + const BYTE* vLimit = ip + (prefixIdx - matchIndex); + if (vLimit > iHighLimit) vLimit = iHighLimit; + matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) + matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit); + back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0; + matchLength -= back; + if (matchLength > longest) { + longest = matchLength; + offset = (int)(ipIndex - matchIndex); + sBack = back; + DEBUGLOG(7, "Found match of len=%i within dict, offset=%i, back=%i", longest, offset, -back); + HEX_CMP(7, ip + back, matchPtr + back, (size_t)matchLength); + } } } + + if (chainSwap && matchLength==longest) { /* better match => select a better chain */ + assert(lookBackLength==0); /* search forward only */ + if (matchIndex + (U32)longest <= ipIndex) { + int const kTrigger = 4; + U32 distanceToNextMatch = 1; + int const end = longest - MINMATCH + 1; + int step = 1; + int accel = 1 << kTrigger; + int pos; + for (pos = 0; pos < end; pos += step) { + U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos); + step = (accel++ >> kTrigger); + if (candidateDist > distanceToNextMatch) { + distanceToNextMatch = candidateDist; + matchChainPos = (U32)pos; + accel = 1 << kTrigger; + } } + if (distanceToNextMatch > 1) { + if (distanceToNextMatch > matchIndex) break; /* avoid overflow */ + matchIndex -= distanceToNextMatch; + continue; + } } } + + { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex); + if (patternAnalysis && distNextMatch==1 && matchChainPos==0) { + U32 const matchCandidateIdx = matchIndex-1; + /* may be a repeated pattern */ + if (repeat == rep_untested) { + if ( ((pattern & 0xFFFF) == (pattern >> 16)) + & ((pattern & 0xFF) == (pattern >> 24)) ) { + DEBUGLOG(7, "Repeat pattern detected, char %02X", pattern >> 24); + repeat = rep_confirmed; + srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); + } else { + repeat = rep_not; + } } + if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex) + && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) { + const int extDict = matchCandidateIdx < prefixIdx; + const BYTE* const matchPtr = extDict ? dictStart + (matchCandidateIdx - dictIdx) : prefixPtr + (matchCandidateIdx - prefixIdx); + if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ + const BYTE* const iLimit = extDict ? dictEnd : iHighLimit; + size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern); + if (extDict && matchPtr + forwardPatternLength == iLimit) { + U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern); + forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern); + } + { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr; + size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern); + size_t currentSegmentLength; + if (!extDict + && matchPtr - backLength == prefixPtr + && dictIdx < prefixIdx) { + U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern); + backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern); + } + /* Limit backLength not go further than lowestMatchIndex */ + backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex); + assert(matchCandidateIdx - backLength >= lowestMatchIndex); + currentSegmentLength = backLength + forwardPatternLength; + /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */ + if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ + && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ + U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ + if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) + matchIndex = newMatchIndex; + else { + /* Can only happen if started in the prefix */ + assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); + matchIndex = prefixIdx; + } + } else { + U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */ + if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) { + assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); + matchIndex = prefixIdx; + } else { + matchIndex = newMatchIndex; + if (lookBackLength==0) { /* no back possible */ + size_t const maxML = MIN(currentSegmentLength, srcPatternLength); + if ((size_t)longest < maxML) { + assert(prefixPtr - prefixIdx + matchIndex != ip); + if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break; + assert(maxML < 2 GB); + longest = (int)maxML; + offset = (int)(ipIndex - matchIndex); + assert(sBack == 0); + DEBUGLOG(7, "Found repeat pattern match of len=%i, offset=%i", longest, offset); + } + { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex); + if (distToNextPattern > matchIndex) break; /* avoid overflow */ + matchIndex -= distToNextPattern; + } } } } } + continue; + } } + } } /* PA optimization */ + + /* follow current chain */ + matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos); + + } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */ + + if ( dict == usingDictCtxHc + && nbAttempts > 0 + && withinStartDistance) { + size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit; + U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)]; + assert(dictEndOffset <= 1 GB); + matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset; + if (dictMatchIndex>0) DEBUGLOG(7, "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i", dictEndOffset, dictMatchIndex, (int)dictMatchIndex - (int)dictEndOffset); + while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) { + const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex; + + if (LZ4_read32(matchPtr) == pattern) { + int mlt; + int back = 0; + const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex); + if (vLimit > iHighLimit) vLimit = iHighLimit; + mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0; + mlt -= back; + if (mlt > longest) { + longest = mlt; + offset = (int)(ipIndex - matchIndex); + sBack = back; + DEBUGLOG(7, "found match of length %i within extDictCtx", longest); + } } + + { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex); + dictMatchIndex -= nextOffset; + matchIndex -= nextOffset; + } } } + + { LZ4HC_match_t md; + assert(longest >= 0); + md.len = longest; + md.off = offset; + md.back = sBack; + return md; + } +} + +LZ4_FORCE_INLINE LZ4HC_match_t +LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + const int maxNbAttempts, + const int patternAnalysis, + const dictCtx_directive dict) +{ + DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch"); + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio); +} + + +/* preconditions: + * - *srcSizePtr within [1, LZ4_MAX_INPUT_SIZE] + * - src is valid + * - maxOutputSize >= 1 + * - dst is valid + */ +LZ4_FORCE_INLINE int LZ4HC_compress_hashChain ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* srcSizePtr, + int const maxOutputSize, + int maxNbAttempts, + const limitedOutput_directive limit, + const dictCtx_directive dict + ) +{ + const int inputSize = *srcSizePtr; + const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */ + + const BYTE* ip = (const BYTE*)src; + const BYTE* anchor = ip; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = (iend - LASTLITERALS); + + BYTE* optr = (BYTE*) dst; + BYTE* op = (BYTE*) dst; + BYTE* oend = op + maxOutputSize; + + const BYTE* start0; + const BYTE* start2 = NULL; + const BYTE* start3 = NULL; + LZ4HC_match_t m0, m1, m2, m3; + const LZ4HC_match_t nomatch = {0, 0, 0}; + + /* init */ + DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict); + + /* preconditions verifications */ + assert(*srcSizePtr >= 1); + assert(src != NULL); + assert(maxOutputSize >= 1); + assert(dst != NULL); + + *srcSizePtr = 0; + if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ + if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ + + /* Main Loop */ + while (ip <= mflimit) { + m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, maxNbAttempts, patternAnalysis, dict); + if (m1.len encode ML1 immediately */ + optr = op; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + m1.len, m1.off, + limit, oend) ) + goto _dest_overflow; + continue; + } + + if (start0 < ip) { /* first match was skipped at least once */ + if (start2 < ip + m0.len) { /* squeezing ML1 between ML0(original ML1) and ML2 */ + ip = start0; m1 = m0; /* restore initial Match1 */ + } } + + /* Here, start0==ip */ + if ((start2 - ip) < 3) { /* First Match too small : removed */ + ip = start2; + m1 = m2; + goto _Search2; + } + +_Search3: + if ((start2 - ip) < OPTIMAL_ML) { + int correction; + int new_ml = m1.len; + if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; + if (ip+new_ml > start2 + m2.len - MINMATCH) + new_ml = (int)(start2 - ip) + m2.len - MINMATCH; + correction = new_ml - (int)(start2 - ip); + if (correction > 0) { + start2 += correction; + m2.len -= correction; + } + } + + if (start2 + m2.len <= mflimit) { + start3 = start2 + m2.len - 3; + m3 = LZ4HC_InsertAndGetWiderMatch(ctx, + start3, start2, matchlimit, m2.len, + maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio); + start3 += m3.back; + } else { + m3 = nomatch; /* do not search further */ + } + + if (m3.len <= m2.len) { /* No better match => encode ML1 and ML2 */ + /* ip & ref are known; Now for ml */ + if (start2 < ip+m1.len) m1.len = (int)(start2 - ip); + /* Now, encode 2 sequences */ + optr = op; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + m1.len, m1.off, + limit, oend) ) + goto _dest_overflow; + ip = start2; + optr = op; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + m2.len, m2.off, + limit, oend) ) { + m1 = m2; + goto _dest_overflow; + } + continue; + } + + if (start3 < ip+m1.len+3) { /* Not enough space for match 2 : remove it */ + if (start3 >= (ip+m1.len)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */ + if (start2 < ip+m1.len) { + int correction = (int)(ip+m1.len - start2); + start2 += correction; + m2.len -= correction; + if (m2.len < MINMATCH) { + start2 = start3; + m2 = m3; + } + } + + optr = op; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + m1.len, m1.off, + limit, oend) ) + goto _dest_overflow; + ip = start3; + m1 = m3; + + start0 = start2; + m0 = m2; + goto _Search2; + } + + start2 = start3; + m2 = m3; + goto _Search3; + } + + /* + * OK, now we have 3 ascending matches; + * let's write the first one ML1. + * ip & ref are known; Now decide ml. + */ + if (start2 < ip+m1.len) { + if ((start2 - ip) < OPTIMAL_ML) { + int correction; + if (m1.len > OPTIMAL_ML) m1.len = OPTIMAL_ML; + if (ip + m1.len > start2 + m2.len - MINMATCH) + m1.len = (int)(start2 - ip) + m2.len - MINMATCH; + correction = m1.len - (int)(start2 - ip); + if (correction > 0) { + start2 += correction; + m2.len -= correction; + } + } else { + m1.len = (int)(start2 - ip); + } + } + optr = op; + if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), + m1.len, m1.off, + limit, oend) ) + goto _dest_overflow; + + /* ML2 becomes ML1 */ + ip = start2; m1 = m2; + + /* ML3 becomes ML2 */ + start2 = start3; m2 = m3; + + /* let's find a new ML3 */ + goto _Search3; + } + +_last_literals: + /* Encode Last Literals */ + { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ + size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; + size_t const totalSize = 1 + llAdd + lastRunSize; + if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ + if (limit && (op + totalSize > oend)) { + if (limit == limitedOutput) return 0; + /* adapt lastRunSize to fill 'dest' */ + lastRunSize = (size_t)(oend - op) - 1 /*token*/; + llAdd = (lastRunSize + 256 - RUN_MASK) / 256; + lastRunSize -= llAdd; + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); + ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = (RUN_MASK << ML_BITS); + for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + LZ4_memcpy(op, anchor, lastRunSize); + op += lastRunSize; + } + + /* End */ + *srcSizePtr = (int) (((const char*)ip) - src); + return (int) (((char*)op)-dst); + +_dest_overflow: + if (limit == fillOutput) { + /* Assumption : @ip, @anchor, @optr and @m1 must be set correctly */ + size_t const ll = (size_t)(ip - anchor); + size_t const ll_addbytes = (ll + 240) / 255; + size_t const ll_totalCost = 1 + ll_addbytes + ll; + BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ + DEBUGLOG(6, "Last sequence overflowing"); + op = optr; /* restore correct out pointer */ + if (op + ll_totalCost <= maxLitPos) { + /* ll validated; now adjust match length */ + size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); + size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); + assert(maxMlSize < INT_MAX); assert(m1.len >= 0); + if ((size_t)m1.len > maxMlSize) m1.len = (int)maxMlSize; + if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + m1.len >= MFLIMIT) { + LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, notLimited, oend); + } } + goto _last_literals; + } + /* compression failed */ + return 0; +} + + +static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx, + const char* const source, char* dst, + int* srcSizePtr, int dstCapacity, + int const nbSearches, size_t sufficient_len, + const limitedOutput_directive limit, int const fullUpdate, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed); + +static int +LZ4HC_compress_generic_internal ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + const limitedOutput_directive limit, + const dictCtx_directive dict + ) +{ + DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d, dstCapacity=%d)", + src, *srcSizePtr, dstCapacity); + + /* input sanitization */ + if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */ + if (dstCapacity < 1) return 0; /* Invalid: impossible to store anything */ + assert(dst); /* since dstCapacity >= 1, dst must be valid */ + if (*srcSizePtr == 0) { *dst = 0; return 1; } + assert(src != NULL); /* since *srcSizePtr >= 1, src must be valid */ + + ctx->end += *srcSizePtr; + { cParams_t const cParam = LZ4HC_getCLevelParams(cLevel); + HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio; + int result; + + if (cParam.strat == lz4mid) { + result = LZ4MID_compress(ctx, + src, dst, srcSizePtr, dstCapacity, + limit, dict); + } else if (cParam.strat == lz4hc) { + result = LZ4HC_compress_hashChain(ctx, + src, dst, srcSizePtr, dstCapacity, + cParam.nbSearches, limit, dict); + } else { + assert(cParam.strat == lz4opt); + result = LZ4HC_compress_optimal(ctx, + src, dst, srcSizePtr, dstCapacity, + cParam.nbSearches, cParam.targetLength, limit, + cLevel >= LZ4HC_CLEVEL_MAX, /* ultra mode */ + dict, favor); + } + if (result <= 0) ctx->dirty = 1; + return result; + } +} + +static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock); + +static int +LZ4HC_compress_generic_noDictCtx ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + assert(ctx->dictCtx == NULL); + return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx); +} + +static int isStateCompatible(const LZ4HC_CCtx_internal* ctx1, const LZ4HC_CCtx_internal* ctx2) +{ + int const isMid1 = LZ4HC_getCLevelParams(ctx1->compressionLevel).strat == lz4mid; + int const isMid2 = LZ4HC_getCLevelParams(ctx2->compressionLevel).strat == lz4mid; + return !(isMid1 ^ isMid2); +} + +static int +LZ4HC_compress_generic_dictCtx ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit); + assert(ctx->dictCtx != NULL); + if (position >= 64 KB) { + ctx->dictCtx = NULL; + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else if (position == 0 && *srcSizePtr > 4 KB && isStateCompatible(ctx, ctx->dictCtx)) { + LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal)); + LZ4HC_setExternalDict(ctx, (const BYTE *)src); + ctx->compressionLevel = (short)cLevel; + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else { + return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc); + } +} + +static int +LZ4HC_compress_generic ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + if (ctx->dictCtx == NULL) { + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else { + return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } +} + + +int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); } + +static size_t LZ4_streamHC_t_alignment(void) +{ +#if LZ4_ALIGN_TEST + typedef struct { char c; LZ4_streamHC_t t; } t_a; + return sizeof(t_a) - sizeof(LZ4_streamHC_t); +#else + return 1; /* effectively disabled */ +#endif +} + +/* state is presumed correctly initialized, + * in which case its size and alignment have already been validate */ +int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +{ + LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; + if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0; + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel); + LZ4HC_init_internal (ctx, (const BYTE*)src); + if (dstCapacity < LZ4_compressBound(srcSize)) + return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput); + else + return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited); +} + +int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +{ + LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); + if (ctx==NULL) return 0; /* init failure */ + return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel); +} + +int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +{ + int cSize; +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); + if (statePtr==NULL) return 0; +#else + LZ4_streamHC_t state; + LZ4_streamHC_t* const statePtr = &state; +#endif + DEBUGLOG(5, "LZ4_compress_HC") + cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel); +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + FREEMEM(statePtr); +#endif + return cSize; +} + +/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */ +int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) +{ + LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); + if (ctx==NULL) return 0; /* init failure */ + LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source); + LZ4_setCompressionLevel(ctx, cLevel); + return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput); +} + + + +/************************************** +* Streaming Functions +**************************************/ +/* allocation */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_streamHC_t* LZ4_createStreamHC(void) +{ + LZ4_streamHC_t* const state = + (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t)); + if (state == NULL) return NULL; + LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT); + return state; +} + +int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) +{ + DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr); + if (!LZ4_streamHCPtr) return 0; /* support free on NULL */ + FREEMEM(LZ4_streamHCPtr); + return 0; +} +#endif + + +LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size) +{ + LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer; + DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size); + /* check conditions */ + if (buffer == NULL) return NULL; + if (size < sizeof(LZ4_streamHC_t)) return NULL; + if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL; + /* init */ + { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse); + MEM_INIT(hcstate, 0, sizeof(*hcstate)); } + LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT); + return LZ4_streamHCPtr; +} + +/* just a stub */ +void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) +{ + LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); +} + +void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) +{ + LZ4HC_CCtx_internal* const s = &LZ4_streamHCPtr->internal_donotuse; + DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel); + if (s->dirty) { + LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); + } else { + assert(s->end >= s->prefixStart); + s->dictLimit += (U32)(s->end - s->prefixStart); + s->prefixStart = NULL; + s->end = NULL; + s->dictCtx = NULL; + } + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); +} + +void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) +{ + DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel); + if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT; + if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; + LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel; +} + +void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor) +{ + LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0); +} + +/* LZ4_loadDictHC() : + * LZ4_streamHCPtr is presumed properly initialized */ +int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, + const char* dictionary, int dictSize) +{ + LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + cParams_t cp; + DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d, clevel=%d)", LZ4_streamHCPtr, dictionary, dictSize, ctxPtr->compressionLevel); + assert(dictSize >= 0); + assert(LZ4_streamHCPtr != NULL); + if (dictSize > 64 KB) { + dictionary += (size_t)dictSize - 64 KB; + dictSize = 64 KB; + } + /* need a full initialization, there are bad side-effects when using resetFast() */ + { int const cLevel = ctxPtr->compressionLevel; + LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); + LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel); + cp = LZ4HC_getCLevelParams(cLevel); + } + LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary); + ctxPtr->end = (const BYTE*)dictionary + dictSize; + if (cp.strat == lz4mid) { + LZ4MID_fillHTable (ctxPtr, dictionary, (size_t)dictSize); + } else { + if (dictSize >= LZ4HC_HASHSIZE) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); + } + return dictSize; +} + +void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) { + working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL; +} + +/* compression */ + +static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) +{ + DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock); + if ( (ctxPtr->end >= ctxPtr->prefixStart + 4) + && (LZ4HC_getCLevelParams(ctxPtr->compressionLevel).strat != lz4mid) ) { + LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ + } + + /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ + ctxPtr->lowLimit = ctxPtr->dictLimit; + ctxPtr->dictStart = ctxPtr->prefixStart; + ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart); + ctxPtr->prefixStart = newBlock; + ctxPtr->end = newBlock; + ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */ + + /* cannot reference an extDict and a dictCtx at the same time */ + ctxPtr->dictCtx = NULL; +} + +static int +LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr, + const char* src, char* dst, + int* srcSizePtr, int dstCapacity, + limitedOutput_directive limit) +{ + LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)", + LZ4_streamHCPtr, src, *srcSizePtr, limit); + assert(ctxPtr != NULL); + /* auto-init if forgotten */ + if (ctxPtr->prefixStart == NULL) + LZ4HC_init_internal (ctxPtr, (const BYTE*) src); + + /* Check overflow */ + if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) { + size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart); + if (dictSize > 64 KB) dictSize = 64 KB; + LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize); + } + + /* Check if blocks follow each other */ + if ((const BYTE*)src != ctxPtr->end) + LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src); + + /* Check overlapping input/dictionary space */ + { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr; + const BYTE* const dictBegin = ctxPtr->dictStart; + const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit); + if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) { + if (sourceEnd > dictEnd) sourceEnd = dictEnd; + ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart); + ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart); + /* invalidate dictionary is it's too small */ + if (ctxPtr->dictLimit - ctxPtr->lowLimit < LZ4HC_HASHSIZE) { + ctxPtr->lowLimit = ctxPtr->dictLimit; + ctxPtr->dictStart = ctxPtr->prefixStart; + } } } + + return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit); +} + +int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity) +{ + DEBUGLOG(5, "LZ4_compress_HC_continue"); + if (dstCapacity < LZ4_compressBound(srcSize)) + return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput); + else + return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited); +} + +int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize) +{ + return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput); +} + + +/* LZ4_saveDictHC : + * save history content + * into a user-provided buffer + * which is then used to continue compression + */ +int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize) +{ + LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse; + int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart); + DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize); + assert(prefixSize >= 0); + if (dictSize > 64 KB) dictSize = 64 KB; + if (dictSize < 4) dictSize = 0; + if (dictSize > prefixSize) dictSize = prefixSize; + if (safeBuffer == NULL) assert(dictSize == 0); /* a NULL buffer with !0 size is invalid */ + if (dictSize > 0) + LZ4_memmove(safeBuffer, streamPtr->end - dictSize, (size_t)dictSize); + { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit; + streamPtr->end = (safeBuffer == NULL) ? NULL : (const BYTE*)safeBuffer + dictSize; + streamPtr->prefixStart = (const BYTE*)safeBuffer; + streamPtr->dictLimit = endIndex - (U32)dictSize; + streamPtr->lowLimit = endIndex - (U32)dictSize; + streamPtr->dictStart = streamPtr->prefixStart; + if (streamPtr->nextToUpdate < streamPtr->dictLimit) + streamPtr->nextToUpdate = streamPtr->dictLimit; + } + return dictSize; +} + + +/* ================================================ + * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX]) + * ===============================================*/ +typedef struct { + int price; + int off; + int mlen; + int litlen; +} LZ4HC_optimal_t; + +/* price in bytes */ +LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) +{ + int price = litlen; + assert(litlen >= 0); + if (litlen >= (int)RUN_MASK) + price += 1 + ((litlen-(int)RUN_MASK) / 255); + return price; +} + +/* requires mlen >= MINMATCH */ +LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) +{ + int price = 1 + 2 ; /* token + 16-bit offset */ + assert(litlen >= 0); + assert(mlen >= MINMATCH); + + price += LZ4HC_literalsPrice(litlen); + + if (mlen >= (int)(ML_MASK+MINMATCH)) + price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255); + + return price; +} + +LZ4_FORCE_INLINE LZ4HC_match_t +LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, + const BYTE* ip, const BYTE* const iHighLimit, + int minLen, int nbSearches, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ + LZ4HC_match_t const match0 = { 0 , 0, 0 }; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed); + assert(md.back == 0); + if (md.len <= minLen) return match0; + if (favorDecSpeed) { + if ((md.len>18) & (md.len<=36)) md.len=18; /* favor dec.speed (shortcut) */ + } + return md; +} + + + +/* preconditions: + * - *srcSizePtr within [1, LZ4_MAX_INPUT_SIZE] + * - src is valid + * - maxOutputSize >= 1 + * - dst is valid + */ +static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, + const char* const source, + char* dst, + int* srcSizePtr, + int dstCapacity, + int const nbSearches, + size_t sufficient_len, + const limitedOutput_directive limit, + int const fullUpdate, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ + int retval = 0; +#define TRAILING_LITERALS 3 +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS)); +#else + LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which can be a bit large for some stacks... */ +#endif + + const BYTE* ip = (const BYTE*) source; + const BYTE* anchor = ip; + const BYTE* const iend = ip + *srcSizePtr; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + BYTE* op = (BYTE*) dst; + BYTE* opSaved = (BYTE*) dst; + BYTE* oend = op + dstCapacity; + int ovml = MINMATCH; /* overflow - last sequence */ + int ovoff = 0; + + /* init */ + DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity); +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + if (opt == NULL) goto _return_label; +#endif + + /* preconditions verifications */ + assert(dstCapacity > 0); + assert(dst != NULL); + assert(*srcSizePtr > 0); + assert(source != NULL); + + *srcSizePtr = 0; + if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ + if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; + + /* Main Loop */ + while (ip <= mflimit) { + int const llen = (int)(ip - anchor); + int best_mlen, best_off; + int cur, last_match_pos = 0; + + LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); + if (firstMatch.len==0) { ip++; continue; } + + if ((size_t)firstMatch.len > sufficient_len) { + /* good enough solution : immediate encoding */ + int const firstML = firstMatch.len; + opSaved = op; + if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, firstMatch.off, limit, oend) ) { /* updates ip, op and anchor */ + ovml = firstML; + ovoff = firstMatch.off; + goto _dest_overflow; + } + continue; + } + + /* set prices for first positions (literals) */ + { int rPos; + for (rPos = 0 ; rPos < MINMATCH ; rPos++) { + int const cost = LZ4HC_literalsPrice(llen + rPos); + opt[rPos].mlen = 1; + opt[rPos].off = 0; + opt[rPos].litlen = llen + rPos; + opt[rPos].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + rPos, cost, opt[rPos].litlen); + } } + /* set prices using initial match */ + { int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ + int const offset = firstMatch.off; + int mlen; + assert(matchML < LZ4_OPT_NUM); + for (mlen = MINMATCH ; mlen <= matchML ; mlen++) { + int const cost = LZ4HC_sequencePrice(llen, mlen); + opt[mlen].mlen = mlen; + opt[mlen].off = offset; + opt[mlen].litlen = llen; + opt[mlen].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", + mlen, cost, mlen); + } } + last_match_pos = firstMatch.len; + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + + /* check further positions */ + for (cur = 1; cur < last_match_pos; cur++) { + const BYTE* const curPtr = ip + cur; + LZ4HC_match_t newMatch; + + if (curPtr > mflimit) break; + DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", + cur, opt[cur].price, opt[cur+1].price, cur+1); + if (fullUpdate) { + /* not useful to search here if next position has same (or lower) cost */ + if ( (opt[cur+1].price <= opt[cur].price) + /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ + && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) + continue; + } else { + /* not useful to search here if next position has same (or lower) cost */ + if (opt[cur+1].price <= opt[cur].price) continue; + } + + DEBUGLOG(7, "search at rPos:%u", cur); + if (fullUpdate) + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); + else + /* only test matches of minimum length; slightly faster, but misses a few bytes */ + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed); + if (!newMatch.len) continue; + + if ( ((size_t)newMatch.len > sufficient_len) + || (newMatch.len + cur >= LZ4_OPT_NUM) ) { + /* immediate encoding */ + best_mlen = newMatch.len; + best_off = newMatch.off; + last_match_pos = cur + 1; + goto encode; + } + + /* before match : set price with literals at beginning */ + { int const baseLitlen = opt[cur].litlen; + int litlen; + for (litlen = 1; litlen < MINMATCH; litlen++) { + int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); + int const pos = cur + litlen; + if (price < opt[pos].price) { + opt[pos].mlen = 1; /* literal */ + opt[pos].off = 0; + opt[pos].litlen = baseLitlen+litlen; + opt[pos].price = price; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", + pos, price, opt[pos].litlen); + } } } + + /* set prices using match at position = cur */ + { int const matchML = newMatch.len; + int ml = MINMATCH; + + assert(cur + newMatch.len < LZ4_OPT_NUM); + for ( ; ml <= matchML ; ml++) { + int const pos = cur + ml; + int const offset = newMatch.off; + int price; + int ll; + DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", + pos, last_match_pos); + if (opt[cur].mlen == 1) { + ll = opt[cur].litlen; + price = ((cur > ll) ? opt[cur - ll].price : 0) + + LZ4HC_sequencePrice(ll, ml); + } else { + ll = 0; + price = opt[cur].price + LZ4HC_sequencePrice(0, ml); + } + + assert((U32)favorDecSpeed <= 1); + if (pos > last_match_pos+TRAILING_LITERALS + || price <= opt[pos].price - (int)favorDecSpeed) { + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", + pos, price, ml); + assert(pos < LZ4_OPT_NUM); + if ( (ml == matchML) /* last pos of last match */ + && (last_match_pos < pos) ) + last_match_pos = pos; + opt[pos].mlen = ml; + opt[pos].off = offset; + opt[pos].litlen = ll; + opt[pos].price = price; + } } } + /* complete following positions with literals */ + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + } /* for (cur = 1; cur <= last_match_pos; cur++) */ + + assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS); + best_mlen = opt[last_match_pos].mlen; + best_off = opt[last_match_pos].off; + cur = last_match_pos - best_mlen; + +encode: /* cur, last_match_pos, best_mlen, best_off must be set */ + assert(cur < LZ4_OPT_NUM); + assert(last_match_pos >= 1); /* == 1 when only one candidate */ + DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos); + { int candidate_pos = cur; + int selected_matchLength = best_mlen; + int selected_offset = best_off; + while (1) { /* from end to beginning */ + int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ + int const next_offset = opt[candidate_pos].off; + DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength); + opt[candidate_pos].mlen = selected_matchLength; + opt[candidate_pos].off = selected_offset; + selected_matchLength = next_matchLength; + selected_offset = next_offset; + if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ + assert(next_matchLength > 0); /* can be 1, means literal */ + candidate_pos -= next_matchLength; + } } + + /* encode all recorded sequences in order */ + { int rPos = 0; /* relative position (to ip) */ + while (rPos < last_match_pos) { + int const ml = opt[rPos].mlen; + int const offset = opt[rPos].off; + if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ + rPos += ml; + assert(ml >= MINMATCH); + assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX)); + opSaved = op; + if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, offset, limit, oend) ) { /* updates ip, op and anchor */ + ovml = ml; + ovoff = offset; + goto _dest_overflow; + } } } + } /* while (ip <= mflimit) */ + +_last_literals: + /* Encode Last Literals */ + { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ + size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; + size_t const totalSize = 1 + llAdd + lastRunSize; + if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ + if (limit && (op + totalSize > oend)) { + if (limit == limitedOutput) { /* Check output limit */ + retval = 0; + goto _return_label; + } + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (size_t)(oend - op) - 1 /*token*/; + llAdd = (lastRunSize + 256 - RUN_MASK) / 256; + lastRunSize -= llAdd; + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); + ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = (RUN_MASK << ML_BITS); + for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + LZ4_memcpy(op, anchor, lastRunSize); + op += lastRunSize; + } + + /* End */ + *srcSizePtr = (int) (((const char*)ip) - source); + retval = (int) ((char*)op-dst); + goto _return_label; + +_dest_overflow: +if (limit == fillOutput) { + /* Assumption : ip, anchor, ovml and ovref must be set correctly */ + size_t const ll = (size_t)(ip - anchor); + size_t const ll_addbytes = (ll + 240) / 255; + size_t const ll_totalCost = 1 + ll_addbytes + ll; + BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ + DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved)); + op = opSaved; /* restore correct out pointer */ + if (op + ll_totalCost <= maxLitPos) { + /* ll validated; now adjust match length */ + size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); + size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); + assert(maxMlSize < INT_MAX); assert(ovml >= 0); + if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize; + if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) { + DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml); + DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor); + LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovoff, notLimited, oend); + DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor); + } } + goto _last_literals; +} +_return_label: +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + if (opt) FREEMEM(opt); +#endif + return retval; +} + + +/*************************************************** +* Deprecated Functions +***************************************************/ + +/* These functions currently generate deprecation warnings */ + +/* Wrappers for deprecated compression functions */ +int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); } +int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); } +int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } +int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); } +int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); } +int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); } +int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } +int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); } +int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); } +int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); } + + +/* Deprecated streaming functions */ +int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); } + +/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t) + * @return : 0 on success, !=0 if error */ +int LZ4_resetStreamStateHC(void* state, char* inputBuffer) +{ + LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4)); + if (hc4 == NULL) return 1; /* init failed */ + LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); + return 0; +} + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +void* LZ4_createHC (const char* inputBuffer) +{ + LZ4_streamHC_t* const hc4 = LZ4_createStreamHC(); + if (hc4 == NULL) return NULL; /* not enough memory */ + LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); + return hc4; +} + +int LZ4_freeHC (void* LZ4HC_Data) +{ + if (!LZ4HC_Data) return 0; /* support free on NULL */ + FREEMEM(LZ4HC_Data); + return 0; +} +#endif + +int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel) +{ + return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited); +} + +int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel) +{ + return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput); +} + +char* LZ4_slideInputBufferHC(void* LZ4HC_Data) +{ + LZ4HC_CCtx_internal* const s = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; + const BYTE* const bufferStart = s->prefixStart - s->dictLimit + s->lowLimit; + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)LZ4HC_Data, s->compressionLevel); + /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */ + return (char*)(uptrval)bufferStart; +} diff --git a/tools/lib/lz4/lz4hc.h b/tools/lib/lz4/lz4hc.h new file mode 100644 index 0000000..1f323f5 --- /dev/null +++ b/tools/lib/lz4/lz4hc.h @@ -0,0 +1,421 @@ +/* + LZ4 HC - High Compression Mode of LZ4 + Header File + Copyright (c) Yann Collet. All rights reserved. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : https://github.com/lz4/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ +#ifndef LZ4_HC_H_19834876238432 +#define LZ4_HC_H_19834876238432 + +#if defined (__cplusplus) +extern "C" { +#endif + +/* --- Dependency --- */ +/* note : lz4hc requires lz4.h/lz4.c for compilation */ +#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ + + +/* --- Useful constants --- */ +#define LZ4HC_CLEVEL_MIN 2 +#define LZ4HC_CLEVEL_DEFAULT 9 +#define LZ4HC_CLEVEL_OPT_MIN 10 +#define LZ4HC_CLEVEL_MAX 12 + + +/*-************************************ + * Block Compression + **************************************/ +/*! LZ4_compress_HC() : + * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm. + * `dst` must be already allocated. + * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") + * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") + * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. + * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. + * @return : the number of bytes written into 'dst' + * or 0 if compression fails. + */ +LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel); + + +/* Note : + * Decompression functions are provided within "lz4.h" (BSD license) + */ + + +/*! LZ4_compress_HC_extStateHC() : + * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. + * `state` size is provided by LZ4_sizeofStateHC(). + * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). + */ +LZ4LIB_API int LZ4_sizeofStateHC(void); +LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); + + +/*! LZ4_compress_HC_destSize() : v1.9.0+ + * Will compress as much data as possible from `src` + * to fit into `targetDstSize` budget. + * Result is provided in 2 parts : + * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) + * or 0 if compression fails. + * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src` + */ +LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC, + const char* src, char* dst, + int* srcSizePtr, int targetDstSize, + int compressionLevel); + + +/*-************************************ + * Streaming Compression + * Bufferless synchronous API + **************************************/ + typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ + +/*! LZ4_createStreamHC() and LZ4_freeStreamHC() : + * These functions create and release memory for LZ4 HC streaming state. + * Newly created states are automatically initialized. + * A same state can be used multiple times consecutively, + * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks. + */ +LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); +LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); + +/* + These functions compress data in successive blocks of any size, + using previous blocks as dictionary, to improve compression ratio. + One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks. + There is an exception for ring buffers, which can be smaller than 64 KB. + Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue(). + + Before starting compression, state must be allocated and properly initialized. + LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT. + + Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream) + or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental). + LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once, + which is automatically the case when state is created using LZ4_createStreamHC(). + + After reset, a first "fictional block" can be designated as initial dictionary, + using LZ4_loadDictHC() (Optional). + Note: In order for LZ4_loadDictHC() to create the correct data structure, + it is essential to set the compression level _before_ loading the dictionary. + + Invoke LZ4_compress_HC_continue() to compress each successive block. + The number of blocks is unlimited. + Previous input blocks, including initial dictionary when present, + must remain accessible and unmodified during compression. + + It's allowed to update compression level anytime between blocks, + using LZ4_setCompressionLevel() (experimental). + + @dst buffer should be sized to handle worst case scenarios + (see LZ4_compressBound(), it ensures compression success). + In case of failure, the API does not guarantee recovery, + so the state _must_ be reset. + To ensure compression success + whenever @dst buffer size cannot be made >= LZ4_compressBound(), + consider using LZ4_compress_HC_continue_destSize(). + + Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks, + it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC(). + Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB) + + After completing a streaming compression, + it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state, + just by resetting it, using LZ4_resetStreamHC_fast(). +*/ + +LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */ +LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize); + +LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, + const char* src, char* dst, + int srcSize, int maxDstSize); + +/*! LZ4_compress_HC_continue_destSize() : v1.9.0+ + * Similar to LZ4_compress_HC_continue(), + * but will read as much data as possible from `src` + * to fit into `targetDstSize` budget. + * Result is provided into 2 parts : + * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) + * or 0 if compression fails. + * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`. + * Note that this function may not consume the entire input. + */ +LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, + const char* src, char* dst, + int* srcSizePtr, int targetDstSize); + +/*! LZ4_saveDictHC(): + * save history content (up to 64 KB) into a user-provided buffer @param safeBuffer. + * @return value is the size of dictionary effectively saved (<= MIN(maxDictSize, 64 KB)). + * This function is always successful, and expects its arguments to be valid. + * To this end, let's remind that (NULL,0) is valid, + * but (NULL,!0) is not, and will result in a segfault (or assert() depending on compilation mode). + */ +LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize); + + +/*! LZ4_attach_HC_dictionary() : stable since v1.10.0 + * This API allows for the efficient re-use of a static dictionary many times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a + * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism, + * in which the working stream references the dictionary stream in-place. + * + * Several assumptions are made about the state of the dictionary stream. + * Currently, only streams which have been prepared by LZ4_loadDictHC() should + * be expected to work. + * + * Alternatively, the provided dictionary stream pointer may be NULL, in which + * case any existing dictionary stream is unset. + * + * A dictionary should only be attached to a stream without any history (i.e., + * a stream that has just been reset). + * + * The dictionary will remain attached to the working stream only for the + * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the + * dictionary context association from the working stream. The dictionary + * stream (and source buffer) must remain in-place / accessible / unchanged + * through the lifetime of the stream session. + */ +LZ4LIB_API void +LZ4_attach_HC_dictionary(LZ4_streamHC_t* working_stream, + const LZ4_streamHC_t* dictionary_stream); + + +/*^********************************************** + * !!!!!! STATIC LINKING ONLY !!!!!! + ***********************************************/ + +/*-****************************************************************** + * PRIVATE DEFINITIONS : + * Do not use these definitions directly. + * They are merely exposed to allow static allocation of `LZ4_streamHC_t`. + * Declare an `LZ4_streamHC_t` directly, rather than any type below. + * Even then, only do so in the context of static linking, as definitions may change between versions. + ********************************************************************/ + +#define LZ4HC_DICTIONARY_LOGSIZE 16 +#define LZ4HC_MAXD (1<= LZ4HC_CLEVEL_OPT_MIN. + */ +LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed( + LZ4_streamHC_t* LZ4_streamHCPtr, int favor); + +/*! LZ4_resetStreamHC_fast() : v1.9.0+ + * When an LZ4_streamHC_t is known to be in a internally coherent state, + * it can often be prepared for a new compression with almost no work, only + * sometimes falling back to the full, expensive reset that is always required + * when the stream is in an indeterminate state (i.e., the reset performed by + * LZ4_resetStreamHC()). + * + * LZ4_streamHCs are guaranteed to be in a valid state when: + * - returned from LZ4_createStreamHC() + * - reset by LZ4_resetStreamHC() + * - memset(stream, 0, sizeof(LZ4_streamHC_t)) + * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast() + * - the stream was in a valid state and was then used in any compression call + * that returned success + * - the stream was in an indeterminate state and was used in a compression + * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that + * returned success + * + * Note: + * A stream that was last used in a compression call that returned an error + * may be passed to this function. However, it will be fully reset, which will + * clear any existing history and settings from the context. + */ +LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast( + LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); + +/*! LZ4_compress_HC_extStateHC_fastReset() : + * A variant of LZ4_compress_HC_extStateHC(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStreamHC_fast() for a definition of + * "correctly initialized"). From a high level, the difference is that this + * function initializes the provided state with a call to + * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a + * call to LZ4_resetStreamHC(). + */ +LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset ( + void* state, + const char* src, char* dst, + int srcSize, int dstCapacity, + int compressionLevel); + +#if defined (__cplusplus) +} +#endif + +#endif /* LZ4_HC_SLO_098092834 */ +#endif /* LZ4_HC_STATIC_LINKING_ONLY */ diff --git a/tools/lib/lz4/xxhash.c b/tools/lib/lz4/xxhash.c new file mode 100644 index 0000000..e132619 --- /dev/null +++ b/tools/lib/lz4/xxhash.c @@ -0,0 +1,1030 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. + * When this macro is enabled, xxHash actively checks input for null pointer. + * It it is, result for null input pointers is the same as a null-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independence be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; + * set it to 0 when the input is guaranteed to be aligned, + * or when alignment doesn't matter for performance. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! Modify the local functions below should you wish to use some other memory routines +* for malloc(), free() */ +#include +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/*! and for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#include /* assert */ + +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#if defined (_MSC_VER) && !defined (__clang__) /* MSVC */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define FORCE_INLINE static __forceinline +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# if defined (__GNUC__) || defined (__clang__) +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; +# endif +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; } __attribute__((packed)) unalign; +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianness; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN +static int XXH_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianness endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianness endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +/* mix all bits */ +static U32 XXH32_avalanche(U32 h32) +{ + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +static U32 +XXH32_finalize(U32 h32, const void* ptr, size_t len, + XXH_endianness endian, XXH_alignment align) + +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1 \ + h32 += (*p++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + +#define PROCESS4 \ + h32 += XXH_get32bits(p) * PRIME32_3; \ + p+=4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + + switch(len&15) /* or switch(bEnd - p) */ + { + case 12: PROCESS4; + /* fallthrough */ + case 8: PROCESS4; + /* fallthrough */ + case 4: PROCESS4; + return XXH32_avalanche(h32); + + case 13: PROCESS4; + /* fallthrough */ + case 9: PROCESS4; + /* fallthrough */ + case 5: PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: PROCESS4; + /* fallthrough */ + case 10: PROCESS4; + /* fallthrough */ + case 6: PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: PROCESS4; + /* fallthrough */ + case 11: PROCESS4; + /* fallthrough */ + case 7: PROCESS4; + /* fallthrough */ + case 3: PROCESS1; + /* fallthrough */ + case 2: PROCESS1; + /* fallthrough */ + case 1: PROCESS1; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + assert(0); + return h32; /* reaching this point is deemed impossible */ +} + + +FORCE_INLINE U32 +XXH32_endian_align(const void* input, size_t len, U32 seed, + XXH_endianness endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 15; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32)len; + + return XXH32_finalize(h32, p, len&15, endian, align); +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, input, len); + return XXH32_digest(&state); +#else + XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + + +/*====== Hash streaming ======*/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode +XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianness endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + +FORCE_INLINE U32 +XXH32_digest_endian (const XXH32_state_t* state, XXH_endianness endian) +{ + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, remaining comparable across different systems. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ + +/*====== Memory access ======*/ + +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t U64; +# else + /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ + typedef unsigned long long U64; +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; +static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianness endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianness endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/*====== xxh64 ======*/ + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +static U64 XXH64_avalanche(U64 h64) +{ + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +static U64 +XXH64_finalize(U64 h64, const void* ptr, size_t len, + XXH_endianness endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1_64 \ + h64 ^= (*p++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ + p+=4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 { \ + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ + p+=8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ +} + + switch(len&31) { + case 24: PROCESS8_64; + /* fallthrough */ + case 16: PROCESS8_64; + /* fallthrough */ + case 8: PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: PROCESS8_64; + /* fallthrough */ + case 20: PROCESS8_64; + /* fallthrough */ + case 12: PROCESS8_64; + /* fallthrough */ + case 4: PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: PROCESS8_64; + /* fallthrough */ + case 17: PROCESS8_64; + /* fallthrough */ + case 9: PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: PROCESS8_64; + /* fallthrough */ + case 21: PROCESS8_64; + /* fallthrough */ + case 13: PROCESS8_64; + /* fallthrough */ + case 5: PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: PROCESS8_64; + /* fallthrough */ + case 18: PROCESS8_64; + /* fallthrough */ + case 10: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: PROCESS8_64; + /* fallthrough */ + case 22: PROCESS8_64; + /* fallthrough */ + case 14: PROCESS8_64; + /* fallthrough */ + case 6: PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: PROCESS8_64; + /* fallthrough */ + case 19: PROCESS8_64; + /* fallthrough */ + case 11: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: PROCESS8_64; + /* fallthrough */ + case 23: PROCESS8_64; + /* fallthrough */ + case 15: PROCESS8_64; + /* fallthrough */ + case 7: PROCESS4_64; + /* fallthrough */ + case 3: PROCESS1_64; + /* fallthrough */ + case 2: PROCESS1_64; + /* fallthrough */ + case 1: PROCESS1_64; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + + /* impossible to reach */ + assert(0); + return 0; /* unreachable, but some compilers complain without it */ +} + +FORCE_INLINE U64 +XXH64_endian_align(const void* input, size_t len, U64 seed, + XXH_endianness endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + return XXH64_finalize(h64, p, len, endian, align); +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, input, len); + return XXH64_digest(&state); +#else + XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + +/*====== Hash Streaming ======*/ + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + +FORCE_INLINE XXH_errorcode +XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianness endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianness endian) +{ + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + PRIME64_5; + } + + h64 += (U64) state->total_len; + + return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); +} + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#endif /* XXH_NO_LONG_LONG */ diff --git a/tools/lib/lz4/xxhash.h b/tools/lib/lz4/xxhash.h new file mode 100644 index 0000000..d6bad94 --- /dev/null +++ b/tools/lib/lz4/xxhash.h @@ -0,0 +1,328 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** + * API modifier + ******************************/ +/** XXH_INLINE_ALL (and XXH_PRIVATE_API) + * This is useful to include xxhash functions in `static` mode + * in order to inline them, and remove their symbol from the public list. + * Inlining can offer dramatic performance improvement on small keys. + * Methodology : + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * `xxhash.c` is automatically included. + * It's not useful to compile and link it as a separate module. + */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/*! XXH_NAMESPACE, aka Namespace Emulation : + * + * If you want to include _and expose_ xxHash functions from within your own library, + * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library + * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * + * Note that no change is required within the calling program as long as it includes `xxhash.h` : + * regular symbol name will be automatically translated by this header. + */ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 5 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +typedef unsigned int XXH32_hash_t; + +/*! XXH32() : + Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); + +/*====== Streaming ======*/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +/* + * Streaming functions generate the xxHash of an input provided in multiple segments. + * Note that, for small input, they are slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * XXH state must first be allocated, using XXH*_createState() . + * + * Start a new hash by initializing state with a seed, using XXH*_reset(). + * + * Then, feed the hash state by calling XXH*_update() as many times as necessary. + * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a digest, + * and generate some new hashes later on, by calling again XXH*_digest(). + * + * When done, free XXH state space if it was allocated dynamically. + */ + +/*====== Canonical representation ======*/ + +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. + * The canonical representation uses human-readable write convention, aka big-endian (large digits first). + * These functions allow transformation of hash result into and from its canonical format. + * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. + */ + + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +typedef unsigned long long XXH64_hash_t; + +/*! XXH64() : + Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). +*/ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*====== Streaming ======*/ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/*====== Canonical representation ======*/ +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +#endif /* XXH_NO_LONG_LONG */ + + + +#ifdef XXH_STATIC_LINKING_ONLY + +/* ================================================================================================ + This section contains declarations which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + These declarations should only be used with static linking. + Never use them in association with dynamic linking ! +=================================================================================================== */ + +/* These definitions are only present to allow + * static allocation of XXH state, on stack or in a struct for example. + * Never **ever** use members directly. */ + +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + +struct XXH32_state_s { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; + uint32_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +struct XXH64_state_s { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +# else + +struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; + unsigned memsize; + unsigned reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ +struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; + unsigned memsize; + unsigned reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +# endif + +# endif + + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ +#endif + +#endif /* XXH_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* XXHASH_H_5627135585666179 */ diff --git a/tools/lib/sha/sha1.c b/tools/lib/sha/sha1.c new file mode 100644 index 0000000..4ce22c7 --- /dev/null +++ b/tools/lib/sha/sha1.c @@ -0,0 +1,97 @@ +#include +#include +#include "sha1.h" + + +#define ROTL32(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) + +static void sha1_transform(uint32_t state[5], const uint8_t buffer[SHA1_BLOCK_SIZE]) { + uint32_t a, b, c, d, e, t, W[80]; + int i; + + + for (i = 0; i < 16; ++i) { + W[i] = ((uint32_t)buffer[i * 4] << 24) | ((uint32_t)buffer[i * 4 + 1] << 16) | + ((uint32_t)buffer[i * 4 + 2] << 8) | ((uint32_t)buffer[i * 4 + 3]); + } + + + for (; i < 80; ++i) { + W[i] = ROTL32(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16], 1); + } + + a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; + + + for (i = 0; i < 80; ++i) { + if (i < 20) + t = ((b & c) | ((~b) & d)) + 0x5A827999; + else if (i < 40) + t = (b ^ c ^ d) + 0x6ED9EBA1; + else if (i < 60) + t = ((b & c) | (b & d) | (c & d)) + 0x8F1BBCDC; + else + t = (b ^ c ^ d) + 0xCA62C1D6; + + t += ROTL32(a, 5) + e + W[i]; + e = d; d = c; c = ROTL32(b, 30); b = a; a = t; + } + + state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; +} + +void sha1_init(SHA1_CTX *ctx) { + ctx->state[0] = 0x67452301; + ctx->state[1] = 0xEFCDAB89; + ctx->state[2] = 0x98BADCFE; + ctx->state[3] = 0x10325476; + ctx->state[4] = 0xC3D2E1F0; + ctx->count = 0; +} + +void sha1_update(SHA1_CTX *ctx, const uint8_t *data, size_t len) { + size_t i, j; + j = (size_t)((ctx->count >> 3) & 63); + ctx->count += (uint64_t)len << 3; + + if ((j + len) > 63) { + i = 64 - j; + memcpy(&ctx->buffer[j], data, i); + sha1_transform(ctx->state, ctx->buffer); + for (; i + 63 < len; i += 64) { + sha1_transform(ctx->state, &data[i]); + } + j = 0; + } else { + i = 0; + } + memcpy(&ctx->buffer[j], &data[i], len - i); +} + +void sha1_final(SHA1_CTX *ctx, uint8_t digest[SHA1_DIGEST_SIZE]) { + uint8_t final_count[8]; + uint8_t c; + + for (int i = 0; i < 8; i++) { + final_count[i] = (uint8_t)((ctx->count >> ((7 - i) * 8)) & 255); + } + + + c = 0x80; + sha1_update(ctx, &c, 1); + + + while ((size_t)((ctx->count >> 3) & 63) != 56) { + c = 0x00; + sha1_update(ctx, &c, 1); + } + + sha1_update(ctx, final_count, 8); + + for (int i = 0; i < 5; i++) { + digest[i * 4] = (uint8_t)((ctx->state[i] >> 24) & 255); + digest[i * 4 + 1] = (uint8_t)((ctx->state[i] >> 16) & 255); + digest[i * 4 + 2] = (uint8_t)((ctx->state[i] >> 8) & 255); + digest[i * 4 + 3] = (uint8_t)((ctx->state[i]) & 255); + } +} \ No newline at end of file diff --git a/tools/lib/sha/sha1.h b/tools/lib/sha/sha1.h new file mode 100644 index 0000000..c9b1d73 --- /dev/null +++ b/tools/lib/sha/sha1.h @@ -0,0 +1,28 @@ +#ifndef SHA1_H +#define SHA1_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define SHA1_BLOCK_SIZE 64 +#define SHA1_DIGEST_SIZE 20 + +typedef struct { + uint32_t state[5]; + uint64_t count; + uint8_t buffer[SHA1_BLOCK_SIZE]; +} SHA1_CTX; + +void sha1_init(SHA1_CTX *ctx); +void sha1_update(SHA1_CTX *ctx, const uint8_t *data, size_t len); +void sha1_final(SHA1_CTX *ctx, uint8_t digest[SHA1_DIGEST_SIZE]); + +#ifdef __cplusplus +} +#endif + +#endif // SHA1_H \ No newline at end of file diff --git a/tools/sha256.c b/tools/lib/sha/sha256.c similarity index 100% rename from tools/sha256.c rename to tools/lib/sha/sha256.c diff --git a/tools/sha256.h b/tools/lib/sha/sha256.h similarity index 100% rename from tools/sha256.h rename to tools/lib/sha/sha256.h diff --git a/tools/lib/xz/xz.h b/tools/lib/xz/xz.h new file mode 100644 index 0000000..c317c49 --- /dev/null +++ b/tools/lib/xz/xz.h @@ -0,0 +1,448 @@ +/* SPDX-License-Identifier: 0BSD */ + +/* + * XZ decompressor + * + * Authors: Lasse Collin + * Igor Pavlov + */ + +#ifndef XZ_H +#define XZ_H + +#ifdef __KERNEL__ +# include +# include +#else +# include +# include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* "#define XZ_EXTERN static" can be used to make extern functions static. */ +#ifndef XZ_EXTERN +# define XZ_EXTERN extern +#endif + +/** + * enum xz_mode - Operation mode + * + * @XZ_SINGLE: Single-call mode. This uses less RAM than + * multi-call modes, because the LZMA2 + * dictionary doesn't need to be allocated as + * part of the decoder state. All required data + * structures are allocated at initialization, + * so xz_dec_run() cannot return XZ_MEM_ERROR. + * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 + * dictionary buffer. All data structures are + * allocated at initialization, so xz_dec_run() + * cannot return XZ_MEM_ERROR. + * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is + * allocated once the required size has been + * parsed from the stream headers. If the + * allocation fails, xz_dec_run() will return + * XZ_MEM_ERROR. + * + * It is possible to enable support only for a subset of the above + * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, + * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled + * with support for all operation modes, but the preboot code may + * be built with fewer features to minimize code size. + */ +enum xz_mode { + XZ_SINGLE, + XZ_PREALLOC, + XZ_DYNALLOC +}; + +/** + * enum xz_ret - Return codes + * @XZ_OK: Everything is OK so far. More input or more + * output space is required to continue. This + * return code is possible only in multi-call mode + * (XZ_PREALLOC or XZ_DYNALLOC). + * @XZ_STREAM_END: Operation finished successfully. + * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding + * is still possible in multi-call mode by simply + * calling xz_dec_run() again. + * Note that this return value is used only if + * XZ_DEC_ANY_CHECK was defined at build time, + * which is not used in the kernel. Unsupported + * check types return XZ_OPTIONS_ERROR if + * XZ_DEC_ANY_CHECK was not defined at build time. + * @XZ_MEM_ERROR: Allocating memory failed. This return code is + * possible only if the decoder was initialized + * with XZ_DYNALLOC. The amount of memory that was + * tried to be allocated was no more than the + * dict_max argument given to xz_dec_init(). + * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than + * allowed by the dict_max argument given to + * xz_dec_init(). This return value is possible + * only in multi-call mode (XZ_PREALLOC or + * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) + * ignores the dict_max argument. + * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic + * bytes). + * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested + * compression options. In the decoder this means + * that the header CRC32 matches, but the header + * itself specifies something that we don't support. + * @XZ_DATA_ERROR: Compressed data is corrupt. + * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly + * different between multi-call and single-call + * mode; more information below. + * + * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls + * to XZ code cannot consume any input and cannot produce any new output. + * This happens when there is no new input available, or the output buffer + * is full while at least one output byte is still pending. Assuming your + * code is not buggy, you can get this error only when decoding a compressed + * stream that is truncated or otherwise corrupt. + * + * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer + * is too small or the compressed input is corrupt in a way that makes the + * decoder produce more output than the caller expected. When it is + * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR + * is used instead of XZ_BUF_ERROR. + */ +enum xz_ret { + XZ_OK, + XZ_STREAM_END, + XZ_UNSUPPORTED_CHECK, + XZ_MEM_ERROR, + XZ_MEMLIMIT_ERROR, + XZ_FORMAT_ERROR, + XZ_OPTIONS_ERROR, + XZ_DATA_ERROR, + XZ_BUF_ERROR +}; + +/** + * struct xz_buf - Passing input and output buffers to XZ code + * @in: Beginning of the input buffer. This may be NULL if and only + * if in_pos is equal to in_size. + * @in_pos: Current position in the input buffer. This must not exceed + * in_size. + * @in_size: Size of the input buffer + * @out: Beginning of the output buffer. This may be NULL if and only + * if out_pos is equal to out_size. + * @out_pos: Current position in the output buffer. This must not exceed + * out_size. + * @out_size: Size of the output buffer + * + * Only the contents of the output buffer from out[out_pos] onward, and + * the variables in_pos and out_pos are modified by the XZ code. + */ +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +/* + * struct xz_dec - Opaque type to hold the XZ decoder state + */ +struct xz_dec; + +/** + * xz_dec_init() - Allocate and initialize a XZ decoder state + * @mode: Operation mode + * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for + * multi-call decoding. This is ignored in single-call mode + * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes + * or 2^n + 2^(n-1) bytes (the latter sizes are less common + * in practice), so other values for dict_max don't make sense. + * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, + * 512 KiB, and 1 MiB are probably the only reasonable values, + * except for kernel and initramfs images where a bigger + * dictionary can be fine and useful. + * + * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at + * once. The caller must provide enough output space or the decoding will + * fail. The output space is used as the dictionary buffer, which is why + * there is no need to allocate the dictionary as part of the decoder's + * internal state. + * + * Because the output buffer is used as the workspace, streams encoded using + * a big dictionary are not a problem in single-call mode. It is enough that + * the output buffer is big enough to hold the actual uncompressed data; it + * can be smaller than the dictionary size stored in the stream headers. + * + * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes + * of memory is preallocated for the LZMA2 dictionary. This way there is no + * risk that xz_dec_run() could run out of memory, since xz_dec_run() will + * never allocate any memory. Instead, if the preallocated dictionary is too + * small for decoding the given input stream, xz_dec_run() will return + * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be + * decoded to avoid allocating excessive amount of memory for the dictionary. + * + * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): + * dict_max specifies the maximum allowed dictionary size that xz_dec_run() + * may allocate once it has parsed the dictionary size from the stream + * headers. This way excessive allocations can be avoided while still + * limiting the maximum memory usage to a sane value to prevent running the + * system out of memory when decompressing streams from untrusted sources. + * + * On success, xz_dec_init() returns a pointer to struct xz_dec, which is + * ready to be used with xz_dec_run(). If memory allocation fails, + * xz_dec_init() returns NULL. + */ +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); + +/** + * xz_dec_run() - Run the XZ decoder for a single XZ stream + * @s: Decoder state allocated using xz_dec_init() + * @b: Input and output buffers + * + * The possible return values depend on build options and operation mode. + * See enum xz_ret for details. + * + * Note that if an error occurs in single-call mode (return value is not + * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the + * contents of the output buffer from b->out[b->out_pos] onward are + * undefined. This is true even after XZ_BUF_ERROR, because with some filter + * chains, there may be a second pass over the output buffer, and this pass + * cannot be properly done if the output buffer is truncated. Thus, you + * cannot give the single-call decoder a too small buffer and then expect to + * get that amount valid data from the beginning of the stream. You must use + * the multi-call decoder if you don't want to uncompress the whole stream. + * + * Use xz_dec_run() when XZ data is stored inside some other file format. + * The decoding will stop after one XZ stream has been decompressed. To + * decompress regular .xz files which might have multiple concatenated + * streams, use xz_dec_catrun() instead. + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); + +/** + * xz_dec_catrun() - Run the XZ decoder with support for concatenated streams + * @s: Decoder state allocated using xz_dec_init() + * @b: Input and output buffers + * @finish: This is an int instead of bool to avoid requiring stdbool.h. + * As long as more input might be coming, finish must be false. + * When the caller knows that it has provided all the input to + * the decoder (some possibly still in b->in), it must set finish + * to true. Only when finish is true can this function return + * XZ_STREAM_END to indicate successful decompression of the + * file. In single-call mode (XZ_SINGLE) finish is assumed to + * always be true; the caller-provided value is ignored. + * + * This is like xz_dec_run() except that this makes it easy to decode .xz + * files with multiple streams (multiple .xz files concatenated as is). + * The rarely-used Stream Padding feature is supported too, that is, there + * can be null bytes after or between the streams. The number of null bytes + * must be a multiple of four. + * + * When finish is false and b->in_pos == b->in_size, it is possible that + * XZ_BUF_ERROR isn't returned even when no progress is possible (XZ_OK is + * returned instead). This shouldn't matter because in this situation a + * reasonable caller will attempt to provide more input or set finish to + * true for the next xz_dec_catrun() call anyway. + * + * For any struct xz_dec that has been initialized for multi-call mode: + * Once decoding has been started with xz_dec_run() or xz_dec_catrun(), + * the same function must be used until xz_dec_reset() or xz_dec_end(). + * Switching between the two decoding functions without resetting results + * in undefined behavior. + * + * xz_dec_catrun() is only available if XZ_DEC_CONCATENATED was defined + * at compile time. + */ +XZ_EXTERN enum xz_ret xz_dec_catrun(struct xz_dec *s, struct xz_buf *b, + int finish); + +/** + * xz_dec_reset() - Reset an already allocated decoder state + * @s: Decoder state allocated using xz_dec_init() + * + * This function can be used to reset the multi-call decoder state without + * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). + * + * In single-call mode, xz_dec_reset() is always called in the beginning of + * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in + * multi-call mode. + */ +XZ_EXTERN void xz_dec_reset(struct xz_dec *s); + +/** + * xz_dec_end() - Free the memory allocated for the decoder state + * @s: Decoder state allocated using xz_dec_init(). If s is NULL, + * this function does nothing. + */ +XZ_EXTERN void xz_dec_end(struct xz_dec *s); + +/** + * DOC: MicroLZMA decompressor + * + * This MicroLZMA header format was created for use in EROFS but may be used + * by others too. **In most cases one needs the XZ APIs above instead.** + * + * The compressed format supported by this decoder is a raw LZMA stream + * whose first byte (always 0x00) has been replaced with bitwise-negation + * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is + * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00. + * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream + * marker must not be used. The unused values are reserved for future use. + */ + +/* + * struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state + */ +struct xz_dec_microlzma; + +/** + * xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder + * @mode: XZ_SINGLE or XZ_PREALLOC + * @dict_size: LZMA dictionary size. This must be at least 4 KiB and + * at most 3 GiB. + * + * In contrast to xz_dec_init(), this function only allocates the memory + * and remembers the dictionary size. xz_dec_microlzma_reset() must be used + * before calling xz_dec_microlzma_run(). + * + * The amount of allocated memory is a little less than 30 KiB with XZ_SINGLE. + * With XZ_PREALLOC also a dictionary buffer of dict_size bytes is allocated. + * + * On success, xz_dec_microlzma_alloc() returns a pointer to + * struct xz_dec_microlzma. If memory allocation fails or + * dict_size is invalid, NULL is returned. + */ +XZ_EXTERN struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode, + uint32_t dict_size); + +/** + * xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state + * @s: Decoder state allocated using xz_dec_microlzma_alloc() + * @comp_size: Compressed size of the input stream + * @uncomp_size: Uncompressed size of the input stream. A value smaller + * than the real uncompressed size of the input stream can + * be specified if uncomp_size_is_exact is set to false. + * uncomp_size can never be set to a value larger than the + * expected real uncompressed size because it would eventually + * result in XZ_DATA_ERROR. + * @uncomp_size_is_exact: This is an int instead of bool to avoid + * requiring stdbool.h. This should normally be set to true. + * When this is set to false, error detection is weaker. + */ +XZ_EXTERN void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, + uint32_t comp_size, uint32_t uncomp_size, + int uncomp_size_is_exact); + +/** + * xz_dec_microlzma_run() - Run the MicroLZMA decoder + * @s: Decoder state initialized using xz_dec_microlzma_reset() + * @b: Input and output buffers + * + * This works similarly to xz_dec_run() with a few important differences. + * Only the differences are documented here. + * + * The only possible return values are XZ_OK, XZ_STREAM_END, and + * XZ_DATA_ERROR. This function cannot return XZ_BUF_ERROR: if no progress + * is possible due to lack of input data or output space, this function will + * keep returning XZ_OK. Thus, the calling code must be written so that it + * will eventually provide input and output space matching (or exceeding) + * comp_size and uncomp_size arguments given to xz_dec_microlzma_reset(). + * If the caller cannot do this (for example, if the input file is truncated + * or otherwise corrupt), the caller must detect this error by itself to + * avoid an infinite loop. + * + * If the compressed data seems to be corrupt, XZ_DATA_ERROR is returned. + * This can happen also when incorrect dictionary, uncompressed, or + * compressed sizes have been specified. + * + * With XZ_PREALLOC only: As an extra feature, b->out may be NULL to skip over + * uncompressed data. This way the caller doesn't need to provide a temporary + * output buffer for the bytes that will be ignored. + * + * With XZ_SINGLE only: In contrast to xz_dec_run(), the return value XZ_OK + * is also possible and thus XZ_SINGLE is actually a limited multi-call mode. + * After XZ_OK the bytes decoded so far may be read from the output buffer. + * It is possible to continue decoding but the variables b->out and b->out_pos + * MUST NOT be changed by the caller. Increasing the value of b->out_size is + * allowed to make more output space available; one doesn't need to provide + * space for the whole uncompressed data on the first call. The input buffer + * may be changed normally like with XZ_PREALLOC. This way input data can be + * provided from non-contiguous memory. + */ +XZ_EXTERN enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, + struct xz_buf *b); + +/** + * xz_dec_microlzma_end() - Free the memory allocated for the decoder state + * @s: Decoder state allocated using xz_dec_microlzma_alloc(). + * If s is NULL, this function does nothing. + */ +XZ_EXTERN void xz_dec_microlzma_end(struct xz_dec_microlzma *s); + +/* + * Standalone build (userspace build or in-kernel build for boot time use) + * needs a CRC32 implementation. For normal in-kernel use, kernel's own + * CRC32 module is used instead, and users of this module don't need to + * care about the functions below. + */ +#ifndef XZ_INTERNAL_CRC32 +# ifdef __KERNEL__ +# define XZ_INTERNAL_CRC32 0 +# else +# define XZ_INTERNAL_CRC32 1 +# endif +#endif + +/* + * If CRC64 support has been enabled with XZ_USE_CRC64, a CRC64 + * implementation is needed too. + */ +#ifndef XZ_USE_CRC64 +# undef XZ_INTERNAL_CRC64 +# define XZ_INTERNAL_CRC64 0 +#endif +#ifndef XZ_INTERNAL_CRC64 +# ifdef __KERNEL__ +# error Using CRC64 in the kernel has not been implemented. +# else +# define XZ_INTERNAL_CRC64 1 +# endif +#endif + +#if XZ_INTERNAL_CRC32 +/* + * This must be called before any other xz_* function to initialize + * the CRC32 lookup table. + */ +XZ_EXTERN void xz_crc32_init(void); + +/* + * Update CRC32 value using the polynomial from IEEE-802.3. To start a new + * calculation, the third argument must be zero. To continue the calculation, + * the previously returned value is passed as the third argument. + */ +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); +#endif + +#if XZ_INTERNAL_CRC64 +/* + * This must be called before any other xz_* function (except xz_crc32_init()) + * to initialize the CRC64 lookup table. + */ +XZ_EXTERN void xz_crc64_init(void); + +/* + * Update CRC64 value using the polynomial from ECMA-182. To start a new + * calculation, the third argument must be zero. To continue the calculation, + * the previously returned value is passed as the third argument. + */ +XZ_EXTERN uint64_t xz_crc64(const uint8_t *buf, size_t size, uint64_t crc); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/lib/xz/xz_config.h b/tools/lib/xz/xz_config.h new file mode 100644 index 0000000..d7d4031 --- /dev/null +++ b/tools/lib/xz/xz_config.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: 0BSD */ + +/* + * Private includes and definitions for userspace use of XZ Embedded + * + * Author: Lasse Collin + */ + +#ifndef XZ_CONFIG_H +#define XZ_CONFIG_H + +/* Uncomment to enable building of xz_dec_catrun(). */ +/* #define XZ_DEC_CONCATENATED */ + +/* Uncomment to enable CRC64 support. */ +/* #define XZ_USE_CRC64 */ + +/* Uncomment as needed to enable BCJ filter decoders. */ +/* #define XZ_DEC_X86 */ +/* #define XZ_DEC_ARM */ +/* #define XZ_DEC_ARMTHUMB */ +/* #define XZ_DEC_ARM64 */ +/* #define XZ_DEC_RISCV */ +/* #define XZ_DEC_POWERPC */ +/* #define XZ_DEC_IA64 */ +/* #define XZ_DEC_SPARC */ + +/* + * Visual Studio 2013 update 2 supports only __inline, not inline. + * MSVC v19.0 / VS 2015 and newer support both. + */ +#if defined(_MSC_VER) && _MSC_VER < 1900 && !defined(inline) +# define inline __inline +#endif + +#include +#include +#include + +#include "xz.h" + +#define kmalloc(size, flags) malloc(size) +#define kfree(ptr) free(ptr) +#define vmalloc(size) malloc(size) +#define vfree(ptr) free(ptr) + +#define memeq(a, b, size) (memcmp(a, b, size) == 0) +#define memzero(buf, size) memset(buf, 0, size) + +#ifndef min +# define min(x, y) ((x) < (y) ? (x) : (y)) +#endif +#define min_t(type, x, y) min(x, y) + +#ifndef fallthrough +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311 +# define fallthrough [[fallthrough]] +# elif (defined(__GNUC__) && __GNUC__ >= 7) \ + || (defined(__clang_major__) && __clang_major__ >= 10) +# define fallthrough __attribute__((__fallthrough__)) +# else +# define fallthrough do {} while (0) +# endif +#endif + +/* + * Some functions have been marked with __always_inline to keep the + * performance reasonable even when the compiler is optimizing for + * small code size. You may be able to save a few bytes by #defining + * __always_inline to plain inline, but don't complain if the code + * becomes slow. + * + * NOTE: System headers on GNU/Linux may #define this macro already, + * so if you want to change it, you need to #undef it first. + */ +#ifndef __always_inline +# ifdef __GNUC__ +# define __always_inline \ + inline __attribute__((__always_inline__)) +# else +# define __always_inline inline +# endif +#endif + +/* Inline functions to access unaligned unsigned 32-bit integers */ +#ifndef get_unaligned_le32 +static inline uint32_t get_unaligned_le32(const uint8_t *buf) +{ + return (uint32_t)buf[0] + | ((uint32_t)buf[1] << 8) + | ((uint32_t)buf[2] << 16) + | ((uint32_t)buf[3] << 24); +} +#endif + +#ifndef get_unaligned_be32 +static inline uint32_t get_unaligned_be32(const uint8_t *buf) +{ + return (uint32_t)((uint32_t)buf[0] << 24) + | ((uint32_t)buf[1] << 16) + | ((uint32_t)buf[2] << 8) + | (uint32_t)buf[3]; +} +#endif + +#ifndef put_unaligned_le32 +static inline void put_unaligned_le32(uint32_t val, uint8_t *buf) +{ + buf[0] = (uint8_t)val; + buf[1] = (uint8_t)(val >> 8); + buf[2] = (uint8_t)(val >> 16); + buf[3] = (uint8_t)(val >> 24); +} +#endif + +#ifndef put_unaligned_be32 +static inline void put_unaligned_be32(uint32_t val, uint8_t *buf) +{ + buf[0] = (uint8_t)(val >> 24); + buf[1] = (uint8_t)(val >> 16); + buf[2] = (uint8_t)(val >> 8); + buf[3] = (uint8_t)val; +} +#endif + +/* + * To keep things simpler, use the generic unaligned methods also for + * aligned access. The only place where performance could matter is + * SHA-256 but files using SHA-256 aren't common. + */ +#ifndef get_le32 +# define get_le32 get_unaligned_le32 +#endif +#ifndef get_be32 +# define get_be32 get_unaligned_be32 +#endif + +#endif diff --git a/tools/lib/xz/xz_crc32.c b/tools/lib/xz/xz_crc32.c new file mode 100644 index 0000000..effdf34 --- /dev/null +++ b/tools/lib/xz/xz_crc32.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: 0BSD + +/* + * CRC32 using the polynomial from IEEE-802.3 + * + * Authors: Lasse Collin + * Igor Pavlov + */ + +/* + * This is not the fastest implementation, but it is pretty compact. + * The fastest versions of xz_crc32() on modern CPUs without hardware + * accelerated CRC instruction are 3-5 times as fast as this version, + * but they are bigger and use more memory for the lookup table. + */ + +#include "xz_private.h" + +/* + * STATIC_RW_DATA is used in the pre-boot environment on some architectures. + * See for details. + */ +#ifndef STATIC_RW_DATA +# define STATIC_RW_DATA static +#endif + +STATIC_RW_DATA uint32_t xz_crc32_table[256]; + +XZ_EXTERN void xz_crc32_init(void) +{ + const uint32_t poly = 0xEDB88320; + + uint32_t i; + uint32_t j; + uint32_t r; + + for (i = 0; i < 256; ++i) { + r = i; + for (j = 0; j < 8; ++j) + r = (r >> 1) ^ (poly & ~((r & 1) - 1)); + + xz_crc32_table[i] = r; + } + + return; +} + +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) +{ + crc = ~crc; + + while (size != 0) { + crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } + + return ~crc; +} diff --git a/tools/lib/xz/xz_dec_lzma2.c b/tools/lib/xz/xz_dec_lzma2.c new file mode 100644 index 0000000..475c378 --- /dev/null +++ b/tools/lib/xz/xz_dec_lzma2.c @@ -0,0 +1,1345 @@ +// SPDX-License-Identifier: 0BSD + +/* + * LZMA2 decoder + * + * Authors: Lasse Collin + * Igor Pavlov + */ + +#include "xz_private.h" +#include "xz_lzma2.h" + +/* + * Range decoder initialization eats the first five bytes of each LZMA chunk. + */ +#define RC_INIT_BYTES 5 + +/* + * Minimum number of usable input buffer to safely decode one LZMA symbol. + * The worst case is that we decode 22 bits using probabilities and 26 + * direct bits. This may decode at maximum of 20 bytes of input. However, + * lzma_main() does an extra normalization before returning, thus we + * need to put 21 here. + */ +#define LZMA_IN_REQUIRED 21 + +/* + * Dictionary (history buffer) + * + * These are always true: + * start <= pos <= full <= end + * pos <= limit <= end + * + * In multi-call mode, also these are true: + * end == size + * size <= size_max + * allocated <= size + * + * Most of these variables are size_t to support single-call mode, + * in which the dictionary variables address the actual output + * buffer directly. + */ +struct dictionary { + /* Beginning of the history buffer */ + uint8_t *buf; + + /* Old position in buf (before decoding more data) */ + size_t start; + + /* Position in buf */ + size_t pos; + + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + size_t full; + + /* Write limit; we don't write to buf[limit] or later bytes. */ + size_t limit; + + /* + * End of the dictionary buffer. In multi-call mode, this is + * the same as the dictionary size. In single-call mode, this + * indicates the size of the output buffer. + */ + size_t end; + + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + uint32_t size; + + /* + * Maximum allowed dictionary size in multi-call mode. + * This is ignored in single-call mode. + */ + uint32_t size_max; + + /* + * Amount of memory currently allocated for the dictionary. + * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, + * size_max is always the same as the allocated size.) + */ + uint32_t allocated; + + /* Operation mode */ + enum xz_mode mode; +}; + +/* Range decoder */ +struct rc_dec { + uint32_t range; + uint32_t code; + + /* + * Number of initializing bytes remaining to be read + * by rc_read_init(). + */ + uint32_t init_bytes_left; + + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +/* Probabilities for a length decoder. */ +struct lzma_len_dec { + /* Probability of match length being at least 10 */ + uint16_t choice; + + /* Probability of match length being at least 18 */ + uint16_t choice2; + + /* Probabilities for match lengths 2-9 */ + uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; + + /* Probabilities for match lengths 10-17 */ + uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; + + /* Probabilities for match lengths 18-273 */ + uint16_t high[LEN_HIGH_SYMBOLS]; +}; + +struct lzma_dec { + /* Distances of latest four matches */ + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + + /* Types of the most recently seen LZMA symbols */ + enum lzma_state state; + + /* + * Length of a match. This is updated so that dict_repeat can + * be called again to finish repeating the whole match. + */ + uint32_t len; + + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask derived from the number of literal + * position bits, and a mask derived from the number + * position bits) + */ + uint32_t lc; + uint32_t literal_pos_mask; /* (1 << lp) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ + + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + uint16_t is_match[STATES][POS_STATES_MAX]; + + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + uint16_t is_rep[STATES]; + + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + uint16_t is_rep0[STATES]; + + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + uint16_t is_rep1[STATES]; + + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + uint16_t is_rep2[STATES]; + + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + uint16_t is_rep0_long[STATES][POS_STATES_MAX]; + + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; + + /* + * Probability trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; + + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + uint16_t dist_align[ALIGN_SIZE]; + + /* Length of a normal match */ + struct lzma_len_dec match_len_dec; + + /* Length of a repeated match */ + struct lzma_len_dec rep_len_dec; + + /* Probabilities of literals */ + uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; +}; + +struct lzma2_dec { + /* Position in xz_dec_lzma2_run(). */ + enum lzma2_seq { + SEQ_CONTROL, + SEQ_UNCOMPRESSED_1, + SEQ_UNCOMPRESSED_2, + SEQ_COMPRESSED_0, + SEQ_COMPRESSED_1, + SEQ_PROPERTIES, + SEQ_LZMA_PREPARE, + SEQ_LZMA_RUN, + SEQ_COPY + } sequence; + + /* Next position after decoding the compressed size of the chunk. */ + enum lzma2_seq next_sequence; + + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uint32_t uncompressed; + + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + uint32_t compressed; + + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + bool need_dict_reset; + + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + bool need_props; + +#ifdef XZ_DEC_MICROLZMA + bool pedantic_microlzma; +#endif +}; + +struct xz_dec_lzma2 { + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2_lzma() for details. + */ + struct { + uint32_t size; + uint8_t buf[3 * LZMA_IN_REQUIRED]; + } temp; +}; + +/************** + * Dictionary * + **************/ + +/* + * Reset the dictionary state. When in single-call mode, set up the beginning + * of the dictionary to point to the actual output buffer. + */ +static void dict_reset(struct dictionary *dict, struct xz_buf *b) +{ + if (DEC_IS_SINGLE(dict->mode)) { + dict->buf = b->out + b->out_pos; + dict->end = b->out_size - b->out_pos; + } + + dict->start = 0; + dict->pos = 0; + dict->limit = 0; + dict->full = 0; +} + +/* Set dictionary write limit */ +static void dict_limit(struct dictionary *dict, size_t out_max) +{ + if (dict->end - dict->pos <= out_max) + dict->limit = dict->end; + else + dict->limit = dict->pos + out_max; +} + +/* Return true if at least one byte can be written into the dictionary. */ +static inline bool dict_has_space(const struct dictionary *dict) +{ + return dict->pos < dict->limit; +} + +/* + * Get a byte from the dictionary at the given distance. The distance is + * assumed to valid, or as a special case, zero when the dictionary is + * still empty. This special case is needed for single-call decoding to + * avoid writing a '\0' to the end of the destination buffer. + */ +static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) +{ + size_t offset = dict->pos - dist - 1; + + if (dist >= dict->pos) + offset += dict->end; + + return dict->full > 0 ? dict->buf[offset] : 0; +} + +/* + * Put one byte into the dictionary. It is assumed that there is space for it. + */ +static inline void dict_put(struct dictionary *dict, uint8_t byte) +{ + dict->buf[dict->pos++] = byte; + + if (dict->full < dict->pos) + dict->full = dict->pos; +} + +/* + * Repeat given number of bytes from the given distance. If the distance is + * invalid, false is returned. On success, true is returned and *len is + * updated to indicate how many bytes were left to be repeated. + */ +static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) +{ + size_t back; + uint32_t left; + + if (dist >= dict->full || dist >= dict->size) + return false; + + left = min_t(size_t, dict->limit - dict->pos, *len); + *len -= left; + + back = dict->pos - dist - 1; + if (dist >= dict->pos) + back += dict->end; + + do { + dict->buf[dict->pos++] = dict->buf[back++]; + if (back == dict->end) + back = 0; + } while (--left > 0); + + if (dict->full < dict->pos) + dict->full = dict->pos; + + return true; +} + +/* Copy uncompressed data as is from input to dictionary and output buffers. */ +static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, + uint32_t *left) +{ + size_t copy_size; + + while (*left > 0 && b->in_pos < b->in_size + && b->out_pos < b->out_size) { + copy_size = min(b->in_size - b->in_pos, + b->out_size - b->out_pos); + if (copy_size > dict->end - dict->pos) + copy_size = dict->end - dict->pos; + if (copy_size > *left) + copy_size = *left; + + *left -= copy_size; + + /* + * If doing in-place decompression in single-call mode and the + * uncompressed size of the file is larger than the caller + * thought (i.e. it is invalid input!), the buffers below may + * overlap and cause undefined behavior with memcpy(). + * With valid inputs memcpy() would be fine here. + */ + memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + dict->pos += copy_size; + + if (dict->full < dict->pos) + dict->full = dict->pos; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + /* + * Like above but for multi-call mode: use memmove() + * to avoid undefined behavior with invalid input. + */ + memmove(b->out + b->out_pos, b->in + b->in_pos, + copy_size); + } + + dict->start = dict->pos; + + b->out_pos += copy_size; + b->in_pos += copy_size; + } +} + +#ifdef XZ_DEC_MICROLZMA +# define DICT_FLUSH_SUPPORTS_SKIPPING true +#else +# define DICT_FLUSH_SUPPORTS_SKIPPING false +#endif + +/* + * Flush pending data from dictionary to b->out. It is assumed that there is + * enough space in b->out. This is guaranteed because caller uses dict_limit() + * before decoding data into the dictionary. + */ +static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) +{ + size_t copy_size = dict->pos - dict->start; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + /* + * These buffers cannot overlap even if doing in-place + * decompression because in multi-call mode dict->buf + * has been allocated by us in this file; it's not + * provided by the caller like in single-call mode. + * + * With MicroLZMA, b->out can be NULL to skip bytes that + * the caller doesn't need. This cannot be done with XZ + * because it would break BCJ filters. + */ + if (!DICT_FLUSH_SUPPORTS_SKIPPING || b->out != NULL) + memcpy(b->out + b->out_pos, dict->buf + dict->start, + copy_size); + } + + dict->start = dict->pos; + b->out_pos += copy_size; + return copy_size; +} + +/***************** + * Range decoder * + *****************/ + +/* Reset the range decoder. */ +static void rc_reset(struct rc_dec *rc) +{ + rc->range = (uint32_t)-1; + rc->code = 0; + rc->init_bytes_left = RC_INIT_BYTES; +} + +/* + * Read the first five initial bytes into rc->code if they haven't been + * read already. (Yes, the first byte gets completely ignored.) + */ +static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) +{ + while (rc->init_bytes_left > 0) { + if (b->in_pos == b->in_size) + return false; + + rc->code = (rc->code << 8) + b->in[b->in_pos++]; + --rc->init_bytes_left; + } + + return true; +} + +/* Return true if there may not be enough input for the next decoding loop. */ +static inline bool rc_limit_exceeded(const struct rc_dec *rc) +{ + return rc->in_pos > rc->in_limit; +} + +/* + * Return true if it is possible (from point of view of range decoder) that + * we have reached the end of the LZMA chunk. + */ +static inline bool rc_is_finished(const struct rc_dec *rc) +{ + return rc->code == 0; +} + +/* Read the next input byte if needed. */ +static __always_inline void rc_normalize(struct rc_dec *rc) +{ + if (rc->range < RC_TOP_VALUE) { + rc->range <<= RC_SHIFT_BITS; + rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; + } +} + +/* + * Decode one bit. In some versions, this function has been split in three + * functions so that the compiler is supposed to be able to more easily avoid + * an extra branch. In this particular version of the LZMA decoder, this + * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3 + * on x86). Using a non-split version results in nicer looking code too. + * + * NOTE: This must return an int. Do not make it return a bool or the speed + * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, + * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) + */ +static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) +{ + uint32_t bound; + int bit; + + rc_normalize(rc); + bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; + if (rc->code < bound) { + rc->range = bound; + *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; + bit = 0; + } else { + rc->range -= bound; + rc->code -= bound; + *prob -= *prob >> RC_MOVE_BITS; + bit = 1; + } + + return bit; +} + +/* Decode a bittree starting from the most significant bit. */ +static __always_inline uint32_t rc_bittree(struct rc_dec *rc, + uint16_t *probs, uint32_t limit) +{ + uint32_t symbol = 1; + + do { + if (rc_bit(rc, &probs[symbol])) + symbol = (symbol << 1) + 1; + else + symbol <<= 1; + } while (symbol < limit); + + return symbol; +} + +/* Decode a bittree starting from the least significant bit. */ +static __always_inline void rc_bittree_reverse(struct rc_dec *rc, + uint16_t *probs, + uint32_t *dest, uint32_t limit) +{ + uint32_t symbol = 1; + uint32_t i = 0; + + do { + if (rc_bit(rc, &probs[symbol])) { + symbol = (symbol << 1) + 1; + *dest += 1 << i; + } else { + symbol <<= 1; + } + } while (++i < limit); +} + +/* Decode direct bits (fixed fifty-fifty probability) */ +static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) +{ + uint32_t mask; + + do { + rc_normalize(rc); + rc->range >>= 1; + rc->code -= rc->range; + mask = (uint32_t)0 - (rc->code >> 31); + rc->code += rc->range & mask; + *dest = (*dest << 1) + (mask + 1); + } while (--limit > 0); +} + +/******** + * LZMA * + ********/ + +/* Get pointer to literal coder probability array. */ +static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s) +{ + uint32_t prev_byte = dict_get(&s->dict, 0); + uint32_t low = prev_byte >> (8 - s->lzma.lc); + uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; + return s->lzma.literal[low + high]; +} + +/* Decode a literal (one 8-bit byte) */ +static void lzma_literal(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + uint32_t symbol; + uint32_t match_byte; + uint32_t match_bit; + uint32_t offset; + uint32_t i; + + probs = lzma_literal_probs(s); + + if (lzma_state_is_literal(s->lzma.state)) { + symbol = rc_bittree(&s->rc, probs, 0x100); + } else { + symbol = 1; + match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; + offset = 0x100; + + do { + match_bit = match_byte & offset; + match_byte <<= 1; + i = offset + match_bit + symbol; + + if (rc_bit(&s->rc, &probs[i])) { + symbol = (symbol << 1) + 1; + offset &= match_bit; + } else { + symbol <<= 1; + offset &= ~match_bit; + } + } while (symbol < 0x100); + } + + dict_put(&s->dict, (uint8_t)symbol); + lzma_state_literal(&s->lzma.state); +} + +/* Decode the length of the match into s->lzma.len. */ +static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, + uint32_t pos_state) +{ + uint16_t *probs; + uint32_t limit; + + if (!rc_bit(&s->rc, &l->choice)) { + probs = l->low[pos_state]; + limit = LEN_LOW_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN; + } else { + if (!rc_bit(&s->rc, &l->choice2)) { + probs = l->mid[pos_state]; + limit = LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; + } else { + probs = l->high; + limit = LEN_HIGH_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + + LEN_MID_SYMBOLS; + } + } + + s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; +} + +/* Decode a match. The distance will be stored in s->lzma.rep0. */ +static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint16_t *probs; + uint32_t dist_slot; + uint32_t limit; + + lzma_state_match(&s->lzma.state); + + s->lzma.rep3 = s->lzma.rep2; + s->lzma.rep2 = s->lzma.rep1; + s->lzma.rep1 = s->lzma.rep0; + + lzma_len(s, &s->lzma.match_len_dec, pos_state); + + probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; + dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; + + if (dist_slot < DIST_MODEL_START) { + s->lzma.rep0 = dist_slot; + } else { + limit = (dist_slot >> 1) - 1; + s->lzma.rep0 = 2 + (dist_slot & 1); + + if (dist_slot < DIST_MODEL_END) { + s->lzma.rep0 <<= limit; + probs = s->lzma.dist_special + s->lzma.rep0 + - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, + &s->lzma.rep0, limit); + } else { + rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); + s->lzma.rep0 <<= ALIGN_BITS; + rc_bittree_reverse(&s->rc, s->lzma.dist_align, + &s->lzma.rep0, ALIGN_BITS); + } + } +} + +/* + * Decode a repeated match. The distance is one of the four most recently + * seen matches. The distance will be stored in s->lzma.rep0. + */ +static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint32_t tmp; + + if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { + if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ + s->lzma.state][pos_state])) { + lzma_state_short_rep(&s->lzma.state); + s->lzma.len = 1; + return; + } + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { + tmp = s->lzma.rep1; + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { + tmp = s->lzma.rep2; + } else { + tmp = s->lzma.rep3; + s->lzma.rep3 = s->lzma.rep2; + } + + s->lzma.rep2 = s->lzma.rep1; + } + + s->lzma.rep1 = s->lzma.rep0; + s->lzma.rep0 = tmp; + } + + lzma_state_long_rep(&s->lzma.state); + lzma_len(s, &s->lzma.rep_len_dec, pos_state); +} + +/* LZMA decoder core */ +static bool lzma_main(struct xz_dec_lzma2 *s) +{ + uint32_t pos_state; + + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if (dict_has_space(&s->dict) && s->lzma.len > 0) + dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); + + /* + * Decode more LZMA symbols. One iteration may consume up to + * LZMA_IN_REQUIRED - 1 bytes. + */ + while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { + pos_state = s->dict.pos & s->lzma.pos_mask; + + if (!rc_bit(&s->rc, &s->lzma.is_match[ + s->lzma.state][pos_state])) { + lzma_literal(s); + } else { + if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) + lzma_rep_match(s, pos_state); + else + lzma_match(s, pos_state); + + if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) + return false; + } + } + + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rc_normalize(&s->rc); + + return true; +} + +/* + * Reset the LZMA decoder and range decoder state. Dictionary is not reset + * here, because LZMA state may be reset without resetting the dictionary. + */ +static void lzma_reset(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + size_t i; + + s->lzma.state = STATE_LIT_LIT; + s->lzma.rep0 = 0; + s->lzma.rep1 = 0; + s->lzma.rep2 = 0; + s->lzma.rep3 = 0; + s->lzma.len = 0; + + /* + * All probabilities are initialized to the same value. This hack + * makes the code smaller by avoiding a separate loop for each + * probability array. + * + * This could be optimized so that only that part of literal + * probabilities that are actually required. In the common case + * we would write 12 KiB less. + */ + probs = s->lzma.is_match[0]; + for (i = 0; i < PROBS_TOTAL; ++i) + probs[i] = RC_BIT_MODEL_TOTAL / 2; + + rc_reset(&s->rc); +} + +/* + * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks + * from the decoded lp and pb values. On success, the LZMA decoder state is + * reset and true is returned. + */ +static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) +{ + if (props > (4 * 5 + 4) * 9 + 8) + return false; + + s->lzma.pos_mask = 0; + while (props >= 9 * 5) { + props -= 9 * 5; + ++s->lzma.pos_mask; + } + + s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; + + s->lzma.literal_pos_mask = 0; + while (props >= 9) { + props -= 9; + ++s->lzma.literal_pos_mask; + } + + s->lzma.lc = props; + + if (s->lzma.lc + s->lzma.literal_pos_mask > 4) + return false; + + s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; + + lzma_reset(s); + + return true; +} + +/********* + * LZMA2 * + *********/ + +/* + * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't + * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This + * wrapper function takes care of making the LZMA decoder's assumption safe. + * + * As long as there is plenty of input left to be decoded in the current LZMA + * chunk, we decode directly from the caller-supplied input buffer until + * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into + * s->temp.buf, which (hopefully) gets filled on the next call to this + * function. We decode a few bytes from the temporary buffer so that we can + * continue decoding from the caller-supplied input buffer again. + */ +static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) +{ + size_t in_avail; + uint32_t tmp; + + in_avail = b->in_size - b->in_pos; + if (s->temp.size > 0 || s->lzma2.compressed == 0) { + tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; + if (tmp > s->lzma2.compressed - s->temp.size) + tmp = s->lzma2.compressed - s->temp.size; + if (tmp > in_avail) + tmp = in_avail; + + memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); + + if (s->temp.size + tmp == s->lzma2.compressed) { + memzero(s->temp.buf + s->temp.size + tmp, + sizeof(s->temp.buf) + - s->temp.size - tmp); + s->rc.in_limit = s->temp.size + tmp; + } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { + s->temp.size += tmp; + b->in_pos += tmp; + return true; + } else { + s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; + } + + s->rc.in = s->temp.buf; + s->rc.in_pos = 0; + + if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) + return false; + + s->lzma2.compressed -= s->rc.in_pos; + + if (s->rc.in_pos < s->temp.size) { + s->temp.size -= s->rc.in_pos; + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, + s->temp.size); + return true; + } + + b->in_pos += s->rc.in_pos - s->temp.size; + s->temp.size = 0; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail >= LZMA_IN_REQUIRED) { + s->rc.in = b->in; + s->rc.in_pos = b->in_pos; + + if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) + s->rc.in_limit = b->in_pos + s->lzma2.compressed; + else + s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; + + if (!lzma_main(s)) + return false; + + in_avail = s->rc.in_pos - b->in_pos; + if (in_avail > s->lzma2.compressed) + return false; + + s->lzma2.compressed -= in_avail; + b->in_pos = s->rc.in_pos; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail < LZMA_IN_REQUIRED) { + if (in_avail > s->lzma2.compressed) + in_avail = s->lzma2.compressed; + + memcpy(s->temp.buf, b->in + b->in_pos, in_avail); + s->temp.size = in_avail; + b->in_pos += in_avail; + } + + return true; +} + +/* + * Take care of the LZMA2 control layer, and forward the job of actual LZMA + * decoding or copying of uncompressed chunks to other functions. + */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, + struct xz_buf *b) +{ + uint32_t tmp; + + while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { + switch (s->lzma2.sequence) { + case SEQ_CONTROL: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s->control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s->control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return XZ_DATA_ERROR. + */ + tmp = b->in[b->in_pos++]; + + if (tmp == 0x00) + return XZ_STREAM_END; + + if (tmp >= 0xE0 || tmp == 0x01) { + s->lzma2.need_props = true; + s->lzma2.need_dict_reset = false; + dict_reset(&s->dict, b); + } else if (s->lzma2.need_dict_reset) { + return XZ_DATA_ERROR; + } + + if (tmp >= 0x80) { + s->lzma2.uncompressed = (tmp & 0x1F) << 16; + s->lzma2.sequence = SEQ_UNCOMPRESSED_1; + + if (tmp >= 0xC0) { + /* + * When there are new properties, + * state reset is done at + * SEQ_PROPERTIES. + */ + s->lzma2.need_props = false; + s->lzma2.next_sequence + = SEQ_PROPERTIES; + + } else if (s->lzma2.need_props) { + return XZ_DATA_ERROR; + + } else { + s->lzma2.next_sequence + = SEQ_LZMA_PREPARE; + if (tmp >= 0xA0) + lzma_reset(s); + } + } else { + if (tmp > 0x02) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_COMPRESSED_0; + s->lzma2.next_sequence = SEQ_COPY; + } + + break; + + case SEQ_UNCOMPRESSED_1: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_UNCOMPRESSED_2; + break; + + case SEQ_UNCOMPRESSED_2: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = SEQ_COMPRESSED_0; + break; + + case SEQ_COMPRESSED_0: + s->lzma2.compressed + = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_COMPRESSED_1; + break; + + case SEQ_COMPRESSED_1: + s->lzma2.compressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = s->lzma2.next_sequence; + break; + + case SEQ_PROPERTIES: + if (!lzma_props(s, b->in[b->in_pos++])) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + + fallthrough; + + case SEQ_LZMA_PREPARE: + if (s->lzma2.compressed < RC_INIT_BYTES) + return XZ_DATA_ERROR; + + if (!rc_read_init(&s->rc, b)) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + fallthrough; + + case SEQ_LZMA_RUN: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b->out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s->lzma2.sequence. + */ + dict_limit(&s->dict, min_t(size_t, + b->out_size - b->out_pos, + s->lzma2.uncompressed)); + if (!lzma2_lzma(s, b)) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if (s->lzma2.uncompressed == 0) { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 + || !rc_is_finished(&s->rc)) + return XZ_DATA_ERROR; + + rc_reset(&s->rc); + s->lzma2.sequence = SEQ_CONTROL; + + } else if (b->out_pos == b->out_size + || (b->in_pos == b->in_size + && s->temp.size + < s->lzma2.compressed)) { + return XZ_OK; + } + + break; + + case SEQ_COPY: + dict_uncompressed(&s->dict, b, &s->lzma2.compressed); + if (s->lzma2.compressed > 0) + return XZ_OK; + + s->lzma2.sequence = SEQ_CONTROL; + break; + } + } + + return XZ_OK; +} + +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, + uint32_t dict_max) +{ + struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->dict.mode = mode; + s->dict.size_max = dict_max; + + if (DEC_IS_PREALLOC(mode)) { + s->dict.buf = vmalloc(dict_max); + if (s->dict.buf == NULL) { + kfree(s); + return NULL; + } + } else if (DEC_IS_DYNALLOC(mode)) { + s->dict.buf = NULL; + s->dict.allocated = 0; + } + + return s; +} + +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) +{ + /* This limits dictionary size to 3 GiB to keep parsing simpler. */ + if (props > 39) + return XZ_OPTIONS_ERROR; + + s->dict.size = 2 + (props & 1); + s->dict.size <<= (props >> 1) + 11; + + if (DEC_IS_MULTI(s->dict.mode)) { + if (s->dict.size > s->dict.size_max) + return XZ_MEMLIMIT_ERROR; + + s->dict.end = s->dict.size; + + if (DEC_IS_DYNALLOC(s->dict.mode)) { + if (s->dict.allocated < s->dict.size) { + s->dict.allocated = s->dict.size; + vfree(s->dict.buf); + s->dict.buf = vmalloc(s->dict.size); + if (s->dict.buf == NULL) { + s->dict.allocated = 0; + return XZ_MEM_ERROR; + } + } + } + } + + s->lzma2.sequence = SEQ_CONTROL; + s->lzma2.need_dict_reset = true; + + s->temp.size = 0; + + return XZ_OK; +} + +XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) +{ + if (DEC_IS_MULTI(s->dict.mode)) + vfree(s->dict.buf); + + kfree(s); +} + +#ifdef XZ_DEC_MICROLZMA +/* This is a wrapper struct to have a nice struct name in the public API. */ +struct xz_dec_microlzma { + struct xz_dec_lzma2 s; +}; + +XZ_EXTERN enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s_ptr, + struct xz_buf *b) +{ + struct xz_dec_lzma2 *s = &s_ptr->s; + + /* + * sequence is SEQ_PROPERTIES before the first input byte, + * SEQ_LZMA_PREPARE until a total of five bytes have been read, + * and SEQ_LZMA_RUN for the rest of the input stream. + */ + if (s->lzma2.sequence != SEQ_LZMA_RUN) { + if (s->lzma2.sequence == SEQ_PROPERTIES) { + /* One byte is needed for the props. */ + if (b->in_pos >= b->in_size) + return XZ_OK; + + /* + * Don't increment b->in_pos here. The same byte is + * also passed to rc_read_init() which will ignore it. + */ + if (!lzma_props(s, ~b->in[b->in_pos])) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + } + + /* + * xz_dec_microlzma_reset() doesn't validate the compressed + * size so we do it here. We have to limit the maximum size + * to avoid integer overflows in lzma2_lzma(). 3 GiB is a nice + * round number and much more than users of this code should + * ever need. + */ + if (s->lzma2.compressed < RC_INIT_BYTES + || s->lzma2.compressed > (3U << 30)) + return XZ_DATA_ERROR; + + if (!rc_read_init(&s->rc, b)) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + dict_reset(&s->dict, b); + } + + /* This is to allow increasing b->out_size between calls. */ + if (DEC_IS_SINGLE(s->dict.mode)) + s->dict.end = b->out_size - b->out_pos; + + while (true) { + dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos, + s->lzma2.uncompressed)); + + if (!lzma2_lzma(s, b)) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if (s->lzma2.uncompressed == 0) { + if (s->lzma2.pedantic_microlzma) { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 + || !rc_is_finished(&s->rc)) + return XZ_DATA_ERROR; + } + + return XZ_STREAM_END; + } + + if (b->out_pos == b->out_size) + return XZ_OK; + + if (b->in_pos == b->in_size + && s->temp.size < s->lzma2.compressed) + return XZ_OK; + } +} + +XZ_EXTERN struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode, + uint32_t dict_size) +{ + struct xz_dec_microlzma *s; + + /* Restrict dict_size to the same range as in the LZMA2 code. */ + if (dict_size < 4096 || dict_size > (3U << 30)) + return NULL; + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->s.dict.mode = mode; + s->s.dict.size = dict_size; + + if (DEC_IS_MULTI(mode)) { + s->s.dict.end = dict_size; + + s->s.dict.buf = vmalloc(dict_size); + if (s->s.dict.buf == NULL) { + kfree(s); + return NULL; + } + } + + return s; +} + +XZ_EXTERN void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, + uint32_t comp_size, + uint32_t uncomp_size, + int uncomp_size_is_exact) +{ + /* + * comp_size is validated in xz_dec_microlzma_run(). + * uncomp_size can safely be anything. + */ + s->s.lzma2.compressed = comp_size; + s->s.lzma2.uncompressed = uncomp_size; + s->s.lzma2.pedantic_microlzma = uncomp_size_is_exact; + + s->s.lzma2.sequence = SEQ_PROPERTIES; + s->s.temp.size = 0; +} + +XZ_EXTERN void xz_dec_microlzma_end(struct xz_dec_microlzma *s) +{ + if (DEC_IS_MULTI(s->s.dict.mode)) + vfree(s->s.dict.buf); + + kfree(s); +} +#endif diff --git a/tools/lib/xz/xz_dec_stream.c b/tools/lib/xz/xz_dec_stream.c new file mode 100644 index 0000000..33927e8 --- /dev/null +++ b/tools/lib/xz/xz_dec_stream.c @@ -0,0 +1,984 @@ +// SPDX-License-Identifier: 0BSD + +/* + * .xz Stream decoder + * + * Author: Lasse Collin + */ + +#include "xz_private.h" +#include "xz_stream.h" + +#ifdef XZ_USE_CRC64 +# define IS_CRC64(check_type) ((check_type) == XZ_CHECK_CRC64) +#else +# define IS_CRC64(check_type) false +#endif + +#ifdef XZ_USE_SHA256 +# define IS_SHA256(check_type) ((check_type) == XZ_CHECK_SHA256) +#else +# define IS_SHA256(check_type) false +#endif + +/* Hash used to validate the Index field */ +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +struct xz_dec { + /* Position in dec_main() */ + enum { + SEQ_STREAM_HEADER, + SEQ_BLOCK_START, + SEQ_BLOCK_HEADER, + SEQ_BLOCK_UNCOMPRESS, + SEQ_BLOCK_PADDING, + SEQ_BLOCK_CHECK, + SEQ_INDEX, + SEQ_INDEX_PADDING, + SEQ_INDEX_CRC32, + SEQ_STREAM_FOOTER, + SEQ_STREAM_PADDING + } sequence; + + /* Position in variable-length integers and Check fields */ + uint32_t pos; + + /* Variable-length integer decoded by dec_vli() */ + vli_type vli; + + /* Saved in_pos and out_pos */ + size_t in_start; + size_t out_start; + +#ifdef XZ_USE_CRC64 + /* CRC32 or CRC64 value in Block or CRC32 value in Index */ + uint64_t crc; +#else + /* CRC32 value in Block or Index */ + uint32_t crc; +#endif + + /* Type of the integrity check calculated from uncompressed data */ + enum xz_check check_type; + + /* Operation mode */ + enum xz_mode mode; + + /* + * True if the next call to xz_dec_run() is allowed to return + * XZ_BUF_ERROR. + */ + bool allow_buf_error; + + /* Information stored in Block Header */ + struct { + /* + * Value stored in the Compressed Size field, or + * VLI_UNKNOWN if Compressed Size is not present. + */ + vli_type compressed; + + /* + * Value stored in the Uncompressed Size field, or + * VLI_UNKNOWN if Uncompressed Size is not present. + */ + vli_type uncompressed; + + /* Size of the Block Header field */ + uint32_t size; + } block_header; + + /* Information collected when decoding Blocks */ + struct { + /* Observed compressed size of the current Block */ + vli_type compressed; + + /* Observed uncompressed size of the current Block */ + vli_type uncompressed; + + /* Number of Blocks decoded so far */ + vli_type count; + + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + struct xz_dec_hash hash; + } block; + + /* Variables needed when verifying the Index field */ + struct { + /* Position in dec_index() */ + enum { + SEQ_INDEX_COUNT, + SEQ_INDEX_UNPADDED, + SEQ_INDEX_UNCOMPRESSED + } sequence; + + /* Size of the Index in bytes */ + vli_type size; + + /* Number of Records (matches block.count in valid files) */ + vli_type count; + + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + struct xz_dec_hash hash; + } index; + + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. buf[] has to be aligned + * to a multiple of four bytes; the size_t variables before it + * should guarantee this. + */ + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + + struct xz_dec_lzma2 *lzma2; + +#ifdef XZ_DEC_BCJ + struct xz_dec_bcj *bcj; + bool bcj_active; +#endif + +#ifdef XZ_USE_SHA256 + /* + * SHA-256 value in Block + * + * struct xz_sha256 is over a hundred bytes and it's only accessed + * from a few places. By putting the SHA-256 state near the end + * of struct xz_dec (somewhere after the "index" member) reduces + * code size at least on x86 and RISC-V. It's because the first bytes + * of the struct can be accessed with smaller instructions; the + * members that are accessed from many places should be at the top. + */ + struct xz_sha256 sha256; +#endif +}; + +#if defined(XZ_DEC_ANY_CHECK) || defined(XZ_USE_SHA256) +/* Sizes of the Check field with different Check IDs */ +static const uint8_t check_sizes[16] = { + 0, + 4, 4, 4, + 8, 8, 8, + 16, 16, 16, + 32, 32, 32, + 64, 64, 64 +}; +#endif + +/* + * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller + * must have set s->temp.pos and s->temp.size to indicate how much data + * we are supposed to copy into s->temp.buf. Return true once s->temp.pos + * has reached s->temp.size. + */ +static bool fill_temp(struct xz_dec *s, struct xz_buf *b) +{ + size_t copy_size = min_t(size_t, + b->in_size - b->in_pos, s->temp.size - s->temp.pos); + + memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); + b->in_pos += copy_size; + s->temp.pos += copy_size; + + if (s->temp.pos == s->temp.size) { + s->temp.pos = 0; + return true; + } + + return false; +} + +/* Decode a variable-length integer (little-endian base-128 encoding) */ +static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, + size_t *in_pos, size_t in_size) +{ + uint8_t byte; + + if (s->pos == 0) + s->vli = 0; + + while (*in_pos < in_size) { + byte = in[*in_pos]; + ++*in_pos; + + s->vli |= (vli_type)(byte & 0x7F) << s->pos; + + if ((byte & 0x80) == 0) { + /* Don't allow non-minimal encodings. */ + if (byte == 0 && s->pos != 0) + return XZ_DATA_ERROR; + + s->pos = 0; + return XZ_STREAM_END; + } + + s->pos += 7; + if (s->pos == 7 * VLI_BYTES_MAX) + return XZ_DATA_ERROR; + } + + return XZ_OK; +} + +/* + * Decode the Compressed Data field from a Block. Update and validate + * the observed compressed and uncompressed sizes of the Block so that + * they don't exceed the values possibly stored in the Block Header + * (validation assumes that no integer overflow occurs, since vli_type + * is normally uint64_t). Update the CRC32 or CRC64 value if presence of + * the CRC32 or CRC64 field was indicated in Stream Header. + * + * Once the decoding is finished, validate that the observed sizes match + * the sizes possibly stored in the Block Header. Update the hash and + * Block count, which are later used to validate the Index field. + */ +static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + s->in_start = b->in_pos; + s->out_start = b->out_pos; + +#ifdef XZ_DEC_BCJ + if (s->bcj_active) + ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); + else +#endif + ret = xz_dec_lzma2_run(s->lzma2, b); + + s->block.compressed += b->in_pos - s->in_start; + s->block.uncompressed += b->out_pos - s->out_start; + + /* + * There is no need to separately check for VLI_UNKNOWN, since + * the observed sizes are always smaller than VLI_UNKNOWN. + */ + if (s->block.compressed > s->block_header.compressed + || s->block.uncompressed + > s->block_header.uncompressed) + return XZ_DATA_ERROR; + + if (s->check_type == XZ_CHECK_CRC32) + s->crc = xz_crc32(b->out + s->out_start, + b->out_pos - s->out_start, s->crc); +#ifdef XZ_USE_CRC64 + else if (s->check_type == XZ_CHECK_CRC64) + s->crc = xz_crc64(b->out + s->out_start, + b->out_pos - s->out_start, s->crc); +#endif +#ifdef XZ_USE_SHA256 + else if (s->check_type == XZ_CHECK_SHA256) + xz_sha256_update(b->out + s->out_start, + b->out_pos - s->out_start, &s->sha256); +#endif + + if (ret == XZ_STREAM_END) { + if (s->block_header.compressed != VLI_UNKNOWN + && s->block_header.compressed + != s->block.compressed) + return XZ_DATA_ERROR; + + if (s->block_header.uncompressed != VLI_UNKNOWN + && s->block_header.uncompressed + != s->block.uncompressed) + return XZ_DATA_ERROR; + + s->block.hash.unpadded += s->block_header.size + + s->block.compressed; + +#if defined(XZ_DEC_ANY_CHECK) || defined(XZ_USE_SHA256) + s->block.hash.unpadded += check_sizes[s->check_type]; +#else + if (s->check_type == XZ_CHECK_CRC32) + s->block.hash.unpadded += 4; + else if (IS_CRC64(s->check_type)) + s->block.hash.unpadded += 8; +#endif + + s->block.hash.uncompressed += s->block.uncompressed; + s->block.hash.crc32 = xz_crc32( + (const uint8_t *)&s->block.hash, + sizeof(s->block.hash), s->block.hash.crc32); + + ++s->block.count; + } + + return ret; +} + +/* Update the Index size and the CRC32 value. */ +static void index_update(struct xz_dec *s, const struct xz_buf *b) +{ + size_t in_used = b->in_pos - s->in_start; + s->index.size += in_used; + s->crc = xz_crc32(b->in + s->in_start, in_used, s->crc); +} + +/* + * Decode the Number of Records, Unpadded Size, and Uncompressed Size + * fields from the Index field. That is, Index Padding and CRC32 are not + * decoded by this function. + * + * This can return XZ_OK (more input needed), XZ_STREAM_END (everything + * successfully decoded), or XZ_DATA_ERROR (input is corrupt). + */ +static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + do { + ret = dec_vli(s, b->in, &b->in_pos, b->in_size); + if (ret != XZ_STREAM_END) { + index_update(s, b); + return ret; + } + + switch (s->index.sequence) { + case SEQ_INDEX_COUNT: + s->index.count = s->vli; + + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if (s->index.count != s->block.count) + return XZ_DATA_ERROR; + + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + + case SEQ_INDEX_UNPADDED: + s->index.hash.unpadded += s->vli; + s->index.sequence = SEQ_INDEX_UNCOMPRESSED; + break; + + case SEQ_INDEX_UNCOMPRESSED: + s->index.hash.uncompressed += s->vli; + s->index.hash.crc32 = xz_crc32( + (const uint8_t *)&s->index.hash, + sizeof(s->index.hash), + s->index.hash.crc32); + --s->index.count; + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + } + } while (s->index.count > 0); + + return XZ_STREAM_END; +} + +/* + * Validate that the next four or eight input bytes match the value + * of s->crc. s->pos must be zero when starting to validate the first byte. + * The "bits" argument allows using the same code for both CRC32 and CRC64. + */ +static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b, + uint32_t bits) +{ + do { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (((s->crc >> s->pos) & 0xFF) != b->in[b->in_pos++]) + return XZ_DATA_ERROR; + + s->pos += 8; + + } while (s->pos < bits); + + s->crc = 0; + s->pos = 0; + + return XZ_STREAM_END; +} + +#ifdef XZ_DEC_ANY_CHECK +/* + * Skip over the Check field when the Check ID is not supported. + * Returns true once the whole Check field has been skipped over. + */ +static bool check_skip(struct xz_dec *s, struct xz_buf *b) +{ + while (s->pos < check_sizes[s->check_type]) { + if (b->in_pos == b->in_size) + return false; + + ++b->in_pos; + ++s->pos; + } + + s->pos = 0; + + return true; +} +#endif + +/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ +static enum xz_ret dec_stream_header(struct xz_dec *s) +{ + if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) + return XZ_FORMAT_ERROR; + + if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) + != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) + return XZ_DATA_ERROR; + + if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) + return XZ_OPTIONS_ERROR; + + /* + * Of integrity checks, we support none (Check ID = 0), + * CRC32 (Check ID = 1), and optionally CRC64 (Check ID = 4). + * However, if XZ_DEC_ANY_CHECK is defined, we will accept other + * check types too, but then the check won't be verified and + * a warning (XZ_UNSUPPORTED_CHECK) will be given. + */ + if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX) + return XZ_OPTIONS_ERROR; + + s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; + + if (s->check_type > XZ_CHECK_CRC32 && !IS_CRC64(s->check_type) + && !IS_SHA256(s->check_type)) { +#ifdef XZ_DEC_ANY_CHECK + return XZ_UNSUPPORTED_CHECK; +#else + return XZ_OPTIONS_ERROR; +#endif + } + + return XZ_OK; +} + +/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ +static enum xz_ret dec_stream_footer(struct xz_dec *s) +{ + if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) + return XZ_DATA_ERROR; + + if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) + return XZ_DATA_ERROR; + + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) + return XZ_DATA_ERROR; + + if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) + return XZ_DATA_ERROR; + + /* + * Use XZ_STREAM_END instead of XZ_OK to be more convenient + * for the caller. + */ + return XZ_STREAM_END; +} + +/* Decode the Block Header and initialize the filter chain. */ +static enum xz_ret dec_block_header(struct xz_dec *s) +{ + enum xz_ret ret; + + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + s->temp.size -= 4; + if (xz_crc32(s->temp.buf, s->temp.size, 0) + != get_le32(s->temp.buf + s->temp.size)) + return XZ_DATA_ERROR; + + s->temp.pos = 2; + + /* + * Catch unsupported Block Flags. We support only one or two filters + * in the chain, so we catch that with the same test. + */ +#ifdef XZ_DEC_BCJ + if (s->temp.buf[1] & 0x3E) +#else + if (s->temp.buf[1] & 0x3F) +#endif + return XZ_OPTIONS_ERROR; + + /* Compressed Size */ + if (s->temp.buf[1] & 0x40) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.compressed = s->vli; + } else { + s->block_header.compressed = VLI_UNKNOWN; + } + + /* Uncompressed Size */ + if (s->temp.buf[1] & 0x80) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.uncompressed = s->vli; + } else { + s->block_header.uncompressed = VLI_UNKNOWN; + } + +#ifdef XZ_DEC_BCJ + /* If there are two filters, the first one must be a BCJ filter. */ + s->bcj_active = s->temp.buf[1] & 0x01; + if (s->bcj_active) { + if (s->temp.size - s->temp.pos < 2) + return XZ_OPTIONS_ERROR; + + ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* + * We don't support custom start offset, + * so Size of Properties must be zero. + */ + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + } +#endif + + /* Valid Filter Flags always take at least two bytes. */ + if (s->temp.size - s->temp.pos < 2) + return XZ_DATA_ERROR; + + /* Filter ID = LZMA2 */ + if (s->temp.buf[s->temp.pos++] != 0x21) + return XZ_OPTIONS_ERROR; + + /* Size of Properties = 1-byte Filter Properties */ + if (s->temp.buf[s->temp.pos++] != 0x01) + return XZ_OPTIONS_ERROR; + + /* Filter Properties contains LZMA2 dictionary size. */ + if (s->temp.size - s->temp.pos < 1) + return XZ_DATA_ERROR; + + ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* The rest must be Header Padding. */ + while (s->temp.pos < s->temp.size) + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + + s->temp.pos = 0; + s->block.compressed = 0; + s->block.uncompressed = 0; + + return XZ_OK; +} + +static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s->in_start = b->in_pos; + + while (true) { + switch (s->sequence) { + case SEQ_STREAM_HEADER: + /* + * Stream Header is copied to s->temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if (!fill_temp(s, b)) + return XZ_OK; + + /* + * If dec_stream_header() returns + * XZ_UNSUPPORTED_CHECK, it is still possible + * to continue decoding if working in multi-call + * mode. Thus, update s->sequence before calling + * dec_stream_header(). + */ + s->sequence = SEQ_BLOCK_START; + + ret = dec_stream_header(s); + if (ret != XZ_OK) + return ret; + + fallthrough; + + case SEQ_BLOCK_START: + /* We need one byte of input to continue. */ + if (b->in_pos == b->in_size) + return XZ_OK; + + /* See if this is the beginning of the Index field. */ + if (b->in[b->in_pos] == 0) { + s->in_start = b->in_pos++; + s->sequence = SEQ_INDEX; + break; + } + + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s->block_header.size + = ((uint32_t)b->in[b->in_pos] + 1) * 4; + + s->temp.size = s->block_header.size; + s->temp.pos = 0; + s->sequence = SEQ_BLOCK_HEADER; + + fallthrough; + + case SEQ_BLOCK_HEADER: + if (!fill_temp(s, b)) + return XZ_OK; + + ret = dec_block_header(s); + if (ret != XZ_OK) + return ret; + +#ifdef XZ_USE_SHA256 + if (s->check_type == XZ_CHECK_SHA256) + xz_sha256_reset(&s->sha256); +#endif + + s->sequence = SEQ_BLOCK_UNCOMPRESS; + + fallthrough; + + case SEQ_BLOCK_UNCOMPRESS: + ret = dec_block(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_BLOCK_PADDING; + + fallthrough; + + case SEQ_BLOCK_PADDING: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + while (s->block.compressed & 3) { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + + ++s->block.compressed; + } + + s->sequence = SEQ_BLOCK_CHECK; + + fallthrough; + + case SEQ_BLOCK_CHECK: + if (s->check_type == XZ_CHECK_CRC32) { + ret = crc_validate(s, b, 32); + if (ret != XZ_STREAM_END) + return ret; + } + else if (IS_CRC64(s->check_type)) { + ret = crc_validate(s, b, 64); + if (ret != XZ_STREAM_END) + return ret; + } +#ifdef XZ_USE_SHA256 + else if (s->check_type == XZ_CHECK_SHA256) { + s->temp.size = 32; + if (!fill_temp(s, b)) + return XZ_OK; + + if (!xz_sha256_validate(s->temp.buf, + &s->sha256)) + return XZ_DATA_ERROR; + + s->pos = 0; + } +#endif +#ifdef XZ_DEC_ANY_CHECK + else if (!check_skip(s, b)) { + return XZ_OK; + } +#endif + + s->sequence = SEQ_BLOCK_START; + break; + + case SEQ_INDEX: + ret = dec_index(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_INDEX_PADDING; + + fallthrough; + + case SEQ_INDEX_PADDING: + while ((s->index.size + (b->in_pos - s->in_start)) + & 3) { + if (b->in_pos == b->in_size) { + index_update(s, b); + return XZ_OK; + } + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + } + + /* Finish the CRC32 value and Index size. */ + index_update(s, b); + + /* Compare the hashes to validate the Index field. */ + if (!memeq(&s->block.hash, &s->index.hash, + sizeof(s->block.hash))) + return XZ_DATA_ERROR; + + s->sequence = SEQ_INDEX_CRC32; + + fallthrough; + + case SEQ_INDEX_CRC32: + ret = crc_validate(s, b, 32); + if (ret != XZ_STREAM_END) + return ret; + + s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_FOOTER; + + fallthrough; + + case SEQ_STREAM_FOOTER: + if (!fill_temp(s, b)) + return XZ_OK; + + return dec_stream_footer(s); + + case SEQ_STREAM_PADDING: + /* Never reached, only silencing a warning */ + break; + } + } + + /* Never reached */ +} + +/* + * xz_dec_run() is a wrapper for dec_main() to handle some special cases in + * multi-call and single-call decoding. + * + * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we + * are not going to make any progress anymore. This is to prevent the caller + * from calling us infinitely when the input file is truncated or otherwise + * corrupt. Since zlib-style API allows that the caller fills the input buffer + * only when the decoder doesn't produce any new output, we have to be careful + * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only + * after the second consecutive call to xz_dec_run() that makes no progress. + * + * In single-call mode, if we couldn't decode everything and no error + * occurred, either the input is truncated or the output buffer is too small. + * Since we know that the last input byte never produces any output, we know + * that if all the input was consumed and decoding wasn't finished, the file + * must be corrupt. Otherwise the output buffer has to be too small or the + * file is corrupt in a way that decoding it produces too big output. + * + * If single-call decoding fails, we reset b->in_pos and b->out_pos back to + * their original values. This is because with some filter chains there won't + * be any valid uncompressed data in the output buffer unless the decoding + * actually succeeds (that's the price to pay of using the output buffer as + * the workspace). + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) +{ + size_t in_start; + size_t out_start; + enum xz_ret ret; + + if (DEC_IS_SINGLE(s->mode)) + xz_dec_reset(s); + + in_start = b->in_pos; + out_start = b->out_pos; + ret = dec_main(s, b); + + if (DEC_IS_SINGLE(s->mode)) { + if (ret == XZ_OK) + ret = b->in_pos == b->in_size + ? XZ_DATA_ERROR : XZ_BUF_ERROR; + + if (ret != XZ_STREAM_END) { + b->in_pos = in_start; + b->out_pos = out_start; + } + + } else if (ret == XZ_OK && in_start == b->in_pos + && out_start == b->out_pos) { + if (s->allow_buf_error) + ret = XZ_BUF_ERROR; + + s->allow_buf_error = true; + } else { + s->allow_buf_error = false; + } + + return ret; +} + +#ifdef XZ_DEC_CONCATENATED +XZ_EXTERN enum xz_ret xz_dec_catrun(struct xz_dec *s, struct xz_buf *b, + int finish) +{ + enum xz_ret ret; + + if (DEC_IS_SINGLE(s->mode)) { + xz_dec_reset(s); + finish = true; + } + + while (true) { + if (s->sequence == SEQ_STREAM_PADDING) { + /* + * Skip Stream Padding. Its size must be a multiple + * of four bytes which is tracked with s->pos. + */ + while (true) { + if (b->in_pos == b->in_size) { + /* + * Note that if we are repeatedly + * given no input and finish is false, + * we will keep returning XZ_OK even + * though no progress is being made. + * The lack of XZ_BUF_ERROR support + * isn't a problem here because a + * reasonable caller will eventually + * provide more input or set finish + * to true. + */ + if (!finish) + return XZ_OK; + + if (s->pos != 0) + return XZ_DATA_ERROR; + + return XZ_STREAM_END; + } + + if (b->in[b->in_pos] != 0x00) { + if (s->pos != 0) + return XZ_DATA_ERROR; + + break; + } + + ++b->in_pos; + s->pos = (s->pos + 1) & 3; + } + + /* + * More input remains. It should be a new Stream. + * + * In single-call mode xz_dec_run() will always call + * xz_dec_reset(). Thus, we need to do it here only + * in multi-call mode. + */ + if (DEC_IS_MULTI(s->mode)) + xz_dec_reset(s); + } + + ret = xz_dec_run(s, b); + + if (ret != XZ_STREAM_END) + break; + + s->sequence = SEQ_STREAM_PADDING; + } + + return ret; +} +#endif + +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) +{ + struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->mode = mode; + +#ifdef XZ_DEC_BCJ + s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); + if (s->bcj == NULL) + goto error_bcj; +#endif + + s->lzma2 = xz_dec_lzma2_create(mode, dict_max); + if (s->lzma2 == NULL) + goto error_lzma2; + + xz_dec_reset(s); + return s; + +error_lzma2: +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +error_bcj: +#endif + kfree(s); + return NULL; +} + +XZ_EXTERN void xz_dec_reset(struct xz_dec *s) +{ + s->sequence = SEQ_STREAM_HEADER; + s->allow_buf_error = false; + s->pos = 0; + s->crc = 0; + memzero(&s->block, sizeof(s->block)); + memzero(&s->index, sizeof(s->index)); + s->temp.pos = 0; + s->temp.size = STREAM_HEADER_SIZE; +} + +XZ_EXTERN void xz_dec_end(struct xz_dec *s) +{ + if (s != NULL) { + xz_dec_lzma2_end(s->lzma2); +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +#endif + kfree(s); + } +} diff --git a/tools/lib/xz/xz_lzma2.h b/tools/lib/xz/xz_lzma2.h new file mode 100644 index 0000000..d2632b7 --- /dev/null +++ b/tools/lib/xz/xz_lzma2.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: 0BSD */ + +/* + * LZMA2 definitions + * + * Authors: Lasse Collin + * Igor Pavlov + */ + +#ifndef XZ_LZMA2_H +#define XZ_LZMA2_H + +/* Range coder constants */ +#define RC_SHIFT_BITS 8 +#define RC_TOP_BITS 24 +#define RC_TOP_VALUE (1 << RC_TOP_BITS) +#define RC_BIT_MODEL_TOTAL_BITS 11 +#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS) +#define RC_MOVE_BITS 5 + +/* + * Maximum number of position states. A position state is the lowest pb + * number of bits of the current uncompressed offset. In some places there + * are different sets of probabilities for different position states. + */ +#define POS_STATES_MAX (1 << 4) + +/* + * This enum is used to track which LZMA symbols have occurred most recently + * and in which order. This information is used to predict the next symbol. + * + * Symbols: + * - Literal: One 8-bit byte + * - Match: Repeat a chunk of data at some distance + * - Long repeat: Multi-byte match at a recently seen distance + * - Short repeat: One-byte repeat at a recently seen distance + * + * The symbol names are in from STATE_oldest_older_previous. REP means + * either short or long repeated match, and NONLIT means any non-literal. + */ +enum lzma_state { + STATE_LIT_LIT, + STATE_MATCH_LIT_LIT, + STATE_REP_LIT_LIT, + STATE_SHORTREP_LIT_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT, + STATE_SHORTREP_LIT, + STATE_LIT_MATCH, + STATE_LIT_LONGREP, + STATE_LIT_SHORTREP, + STATE_NONLIT_MATCH, + STATE_NONLIT_REP +}; + +/* Total number of states */ +#define STATES 12 + +/* The lowest 7 states indicate that the previous state was a literal. */ +#define LIT_STATES 7 + +/* Indicate that the latest symbol was a literal. */ +static inline void lzma_state_literal(enum lzma_state *state) +{ + if (*state <= STATE_SHORTREP_LIT_LIT) + *state = STATE_LIT_LIT; + else if (*state <= STATE_LIT_SHORTREP) + *state -= 3; + else + *state -= 6; +} + +/* Indicate that the latest symbol was a match. */ +static inline void lzma_state_match(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; +} + +/* Indicate that the latest state was a long repeated match. */ +static inline void lzma_state_long_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; +} + +/* Indicate that the latest symbol was a short match. */ +static inline void lzma_state_short_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; +} + +/* Test if the previous symbol was a literal. */ +static inline bool lzma_state_is_literal(enum lzma_state state) +{ + return state < LIT_STATES; +} + +/* Each literal coder is divided in three sections: + * - 0x001-0x0FF: Without match byte + * - 0x101-0x1FF: With match byte; match bit is 0 + * - 0x201-0x2FF: With match byte; match bit is 1 + * + * Match byte is used when the previous LZMA symbol was something else than + * a literal (that is, it was some kind of match). + */ +#define LITERAL_CODER_SIZE 0x300 + +/* Maximum number of literal coders */ +#define LITERAL_CODERS_MAX (1 << 4) + +/* Minimum length of a match is two bytes. */ +#define MATCH_LEN_MIN 2 + +/* Match length is encoded with 4, 5, or 10 bits. + * + * Length Bits + * 2-9 4 = Choice=0 + 3 bits + * 10-17 5 = Choice=1 + Choice2=0 + 3 bits + * 18-273 10 = Choice=1 + Choice2=1 + 8 bits + */ +#define LEN_LOW_BITS 3 +#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) +#define LEN_MID_BITS 3 +#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) +#define LEN_HIGH_BITS 8 +#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) +#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) + +/* + * Maximum length of a match is 273 which is a result of the encoding + * described above. + */ +#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) + +/* + * Different sets of probabilities are used for match distances that have + * very short match length: Lengths of 2, 3, and 4 bytes have a separate + * set of probabilities for each length. The matches with longer length + * use a shared set of probabilities. + */ +#define DIST_STATES 4 + +/* + * Get the index of the appropriate probability array for decoding + * the distance slot. + */ +static inline uint32_t lzma_get_dist_state(uint32_t len) +{ + return len < DIST_STATES + MATCH_LEN_MIN + ? len - MATCH_LEN_MIN : DIST_STATES - 1; +} + +/* + * The highest two bits of a 32-bit match distance are encoded using six bits. + * This six-bit value is called a distance slot. This way encoding a 32-bit + * value takes 6-36 bits, larger values taking more bits. + */ +#define DIST_SLOT_BITS 6 +#define DIST_SLOTS (1 << DIST_SLOT_BITS) + +/* Match distances up to 127 are fully encoded using probabilities. Since + * the highest two bits (distance slot) are always encoded using six bits, + * the distances 0-3 don't need any additional bits to encode, since the + * distance slot itself is the same as the actual distance. DIST_MODEL_START + * indicates the first distance slot where at least one additional bit is + * needed. + */ +#define DIST_MODEL_START 4 + +/* + * Match distances greater than 127 are encoded in three pieces: + * - distance slot: the highest two bits + * - direct bits: 2-26 bits below the highest two bits + * - alignment bits: four lowest bits + * + * Direct bits don't use any probabilities. + * + * The distance slot value of 14 is for distances 128-191. + */ +#define DIST_MODEL_END 14 + +/* Distance slots that indicate a distance <= 127. */ +#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) +#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) + +/* + * For match distances greater than 127, only the highest two bits and the + * lowest four bits (alignment) is encoded using probabilities. + */ +#define ALIGN_BITS 4 +#define ALIGN_SIZE (1 << ALIGN_BITS) +#define ALIGN_MASK (ALIGN_SIZE - 1) + +/* Total number of all probability variables */ +#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) + +/* + * LZMA remembers the four most recent match distances. Reusing these + * distances tends to take less space than re-encoding the actual + * distance value. + */ +#define REPS 4 + +#endif diff --git a/tools/lib/xz/xz_private.h b/tools/lib/xz/xz_private.h new file mode 100644 index 0000000..7387401 --- /dev/null +++ b/tools/lib/xz/xz_private.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: 0BSD */ + +/* + * Private includes and definitions + * + * Author: Lasse Collin + */ + +#ifndef XZ_PRIVATE_H +#define XZ_PRIVATE_H + +#ifdef __KERNEL__ +# include +# include +# include + /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ +# ifndef XZ_PREBOOT +# include +# include +# include +# ifdef CONFIG_XZ_DEC_X86 +# define XZ_DEC_X86 +# endif +# ifdef CONFIG_XZ_DEC_POWERPC +# define XZ_DEC_POWERPC +# endif +# ifdef CONFIG_XZ_DEC_IA64 +# define XZ_DEC_IA64 +# endif +# ifdef CONFIG_XZ_DEC_ARM +# define XZ_DEC_ARM +# endif +# ifdef CONFIG_XZ_DEC_ARMTHUMB +# define XZ_DEC_ARMTHUMB +# endif +# ifdef CONFIG_XZ_DEC_SPARC +# define XZ_DEC_SPARC +# endif +# ifdef CONFIG_XZ_DEC_ARM64 +# define XZ_DEC_ARM64 +# endif +# ifdef CONFIG_XZ_DEC_RISCV +# define XZ_DEC_RISCV +# endif +# ifdef CONFIG_XZ_DEC_MICROLZMA +# define XZ_DEC_MICROLZMA +# endif +# define memeq(a, b, size) (memcmp(a, b, size) == 0) +# define memzero(buf, size) memset(buf, 0, size) +# endif +# define get_le32(p) le32_to_cpup((const uint32_t *)(p)) +#else + /* + * For userspace builds, use a separate header to define the required + * macros and functions. This makes it easier to adapt the code into + * different environments and avoids clutter in the Linux kernel tree. + */ +# include "xz_config.h" +#endif + +/* If no specific decoding mode is requested, enable support for all modes. */ +#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ + && !defined(XZ_DEC_DYNALLOC) +# define XZ_DEC_SINGLE +# define XZ_DEC_PREALLOC +# define XZ_DEC_DYNALLOC +#endif + +/* + * The DEC_IS_foo(mode) macros are used in "if" statements. If only some + * of the supported modes are enabled, these macros will evaluate to true or + * false at compile time and thus allow the compiler to omit unneeded code. + */ +#ifdef XZ_DEC_SINGLE +# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) +#else +# define DEC_IS_SINGLE(mode) (false) +#endif + +#ifdef XZ_DEC_PREALLOC +# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) +#else +# define DEC_IS_PREALLOC(mode) (false) +#endif + +#ifdef XZ_DEC_DYNALLOC +# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) +#else +# define DEC_IS_DYNALLOC(mode) (false) +#endif + +#if !defined(XZ_DEC_SINGLE) +# define DEC_IS_MULTI(mode) (true) +#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC) +# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) +#else +# define DEC_IS_MULTI(mode) (false) +#endif + +/* + * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ. + * XZ_DEC_BCJ is used to enable generic support for BCJ decoders. + */ +#ifndef XZ_DEC_BCJ +# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \ + || defined(XZ_DEC_IA64) \ + || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \ + || defined(XZ_DEC_SPARC) || defined(XZ_DEC_ARM64) \ + || defined(XZ_DEC_RISCV) +# define XZ_DEC_BCJ +# endif +#endif + +struct xz_sha256 { + /* Buffered input data */ + uint8_t data[64]; + + /* Internal state and the final hash value */ + uint32_t state[8]; + + /* Size of the input data */ + uint64_t size; +}; + +/* Reset the SHA-256 state to prepare for a new calculation. */ +XZ_EXTERN void xz_sha256_reset(struct xz_sha256 *s); + +/* Update the SHA-256 state with new data. */ +XZ_EXTERN void xz_sha256_update(const uint8_t *buf, size_t size, + struct xz_sha256 *s); + +/* + * Finish the SHA-256 calculation. Compare the result with the first 32 bytes + * from buf. Return true if the values are equal and false if they aren't. + */ +XZ_EXTERN bool xz_sha256_validate(const uint8_t *buf, struct xz_sha256 *s); + +/* + * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used + * before calling xz_dec_lzma2_run(). + */ +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, + uint32_t dict_max); + +/* + * Decode the LZMA2 properties (one byte) and reset the decoder. Return + * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not + * big enough, and XZ_OPTIONS_ERROR if props indicates something that this + * decoder doesn't support. + */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, + uint8_t props); + +/* Decode raw LZMA2 stream from b->in to b->out. */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, + struct xz_buf *b); + +/* Free the memory allocated for the LZMA2 decoder. */ +XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s); + +#ifdef XZ_DEC_BCJ +/* + * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before + * calling xz_dec_bcj_run(). + */ +XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call); + +/* + * Decode the Filter ID of a BCJ filter. This implementation doesn't + * support custom start offsets, so no decoding of Filter Properties + * is needed. Returns XZ_OK if the given Filter ID is supported. + * Otherwise XZ_OPTIONS_ERROR is returned. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); + +/* + * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is + * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run() + * must be called directly. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, + struct xz_buf *b); + +/* Free the memory allocated for the BCJ filters. */ +#define xz_dec_bcj_end(s) kfree(s) +#endif + +#endif diff --git a/tools/lib/xz/xz_stream.h b/tools/lib/xz/xz_stream.h new file mode 100644 index 0000000..55f9f6f --- /dev/null +++ b/tools/lib/xz/xz_stream.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: 0BSD */ + +/* + * Definitions for handling the .xz file format + * + * Author: Lasse Collin + */ + +#ifndef XZ_STREAM_H +#define XZ_STREAM_H + +#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32 +# include +# undef crc32 +# define xz_crc32(buf, size, crc) \ + (~crc32_le(~(uint32_t)(crc), buf, size)) +#endif + +/* + * See the .xz file format specification at + * https://tukaani.org/xz/xz-file-format.txt + * to understand the container format. + */ + +#define STREAM_HEADER_SIZE 12 + +#define HEADER_MAGIC "\3757zXZ" +#define HEADER_MAGIC_SIZE 6 + +#define FOOTER_MAGIC "YZ" +#define FOOTER_MAGIC_SIZE 2 + +/* + * Variable-length integer can hold a 63-bit unsigned integer or a special + * value indicating that the value is unknown. + * + * Experimental: vli_type can be defined to uint32_t to save a few bytes + * in code size (no effect on speed). Doing so limits the uncompressed and + * compressed size of the file to less than 256 MiB and may also weaken + * error detection slightly. + */ +typedef uint64_t vli_type; + +#define VLI_MAX ((vli_type)-1 / 2) +#define VLI_UNKNOWN ((vli_type)-1) + +/* Maximum encoded size of a VLI */ +#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7) + +/* Integrity Check types */ +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10 +}; + +/* Maximum possible Check ID */ +#define XZ_CHECK_MAX 15 + +#endif diff --git a/tools/patch.c b/tools/patch.c index 6924255..9dc5918 100644 --- a/tools/patch.c +++ b/tools/patch.c @@ -23,7 +23,7 @@ #include "preset.h" #include "symbol.h" #include "kpm.h" -#include "sha256.h" +#include "lib/sha/sha256.h" void read_kernel_file(const char *path, kernel_file_t *kernel_file) {