Skip to content

Commit 0c260e1

Browse files
nodejs-github-botrichardlau
authored andcommittedMay 16, 2024
deps: update zlib to 1.3.0.1-motley-7d77fb7
PR-URL: #52516 Reviewed-By: Marco Ippolito <marcoippolito54@gmail.com> Reviewed-By: Mohammed Keyvanzadeh <mohammadkeyvanzade94@gmail.com> Reviewed-By: Luigi Pinca <luigipinca@gmail.com>
1 parent 1152d7f commit 0c260e1

File tree

9 files changed

+1031
-98
lines changed

9 files changed

+1031
-98
lines changed
 

‎deps/zlib/BUILD.gn

+30
Original file line numberDiff line numberDiff line change
@@ -441,6 +441,36 @@ executable("zlib_bench") {
441441
configs += [ "//build/config/compiler:no_chromium_code" ]
442442
}
443443

444+
executable("minigzip") {
445+
include_dirs = [ "." ]
446+
447+
sources = [ "test/minigzip.c" ]
448+
if (!is_debug) {
449+
configs -= [ "//build/config/compiler:default_optimization" ]
450+
configs += [ "//build/config/compiler:optimize_speed" ]
451+
}
452+
453+
deps = [ ":zlib" ]
454+
455+
configs -= [ "//build/config/compiler:chromium_code" ]
456+
configs += [ "//build/config/compiler:no_chromium_code" ]
457+
}
458+
459+
executable("zpipe") {
460+
include_dirs = [ "." ]
461+
462+
sources = [ "examples/zpipe.c" ]
463+
if (!is_debug) {
464+
configs -= [ "//build/config/compiler:default_optimization" ]
465+
configs += [ "//build/config/compiler:optimize_speed" ]
466+
}
467+
468+
deps = [ ":zlib" ]
469+
470+
configs -= [ "//build/config/compiler:chromium_code" ]
471+
configs += [ "//build/config/compiler:no_chromium_code" ]
472+
}
473+
444474
if (!is_win || target_os != "winuwp") {
445475
executable("minizip_bin") {
446476
include_dirs = [ "." ]

‎deps/zlib/CMakeLists.txt

+33-4
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ option(ENABLE_SIMD_AVX512 "Enable SIMD AXV512 optimizations" OFF)
2626
option(USE_ZLIB_RABIN_KARP_HASH "Enable bitstream compatibility with canonical zlib" OFF)
2727
option(BUILD_UNITTESTS "Enable standalone unit tests build" OFF)
2828
option(BUILD_MINIZIP_BIN "Enable building minzip_bin tool" OFF)
29+
option(BUILD_ZPIPE "Enable building zpipe tool" OFF)
30+
option(BUILD_MINIGZIP "Enable building minigzip tool" OFF)
2931

3032
if (USE_ZLIB_RABIN_KARP_HASH)
3133
add_definitions(-DUSE_ZLIB_RABIN_KARP_ROLLING_HASH)
@@ -79,9 +81,16 @@ if (ENABLE_SIMD_OPTIMIZATIONS)
7981
add_definitions(-DRISCV_RVV)
8082
add_definitions(-DDEFLATE_SLIDE_HASH_RVV)
8183
add_definitions(-DADLER32_SIMD_RVV)
82-
#TODO(cavalcantii): add remaining flags as we port optimizations to RVV.
83-
# Required by CPU features detection code.
84-
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --target=riscv64-unknown-linux-gnu -march=rv64gcv")
84+
85+
# TODO(cavalcantii): add remaining flags as we port optimizations to RVV.
86+
# chunk_copy is required for READ64 and unconditional decode of literals.
87+
add_definitions(-DINFLATE_CHUNK_GENERIC)
88+
add_definitions(-DINFLATE_CHUNK_READ_64LE)
89+
90+
# Tested with clang-17, unaligned loads are required by read64 & chunk_copy.
91+
# TODO(cavalcantii): replace internal clang flags for -munaligned-access
92+
# when we have a newer compiler available.
93+
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --target=riscv64-unknown-linux-gnu -march=rv64gcv -Xclang -target-feature -Xclang +unaligned-scalar-mem")
8594
endif()
8695

8796
endif()
@@ -192,9 +201,14 @@ set(ZLIB_SRCS
192201
if (ENABLE_SIMD_OPTIMIZATIONS)
193202
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
194203
message("RISCVV: Add optimizations.")
204+
list(REMOVE_ITEM ZLIB_SRCS inflate.c)
195205
list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h)
206+
list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/chunkcopy.h)
196207
list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h)
208+
197209
list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c)
210+
list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.c)
211+
list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inflate.c)
198212
list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c)
199213
else()
200214
list(REMOVE_ITEM ZLIB_SRCS inflate.c)
@@ -339,7 +353,7 @@ if (BUILD_UNITTESTS)
339353
endif()
340354

341355
#============================================================================
342-
# Minigzip tool
356+
# Minizip tool
343357
#============================================================================
344358
# TODO(cavalcantii): get it working on Windows.
345359
if (BUILD_MINIZIP_BIN)
@@ -349,3 +363,18 @@ if (BUILD_MINIZIP_BIN)
349363
)
350364
target_link_libraries(minizip_bin zlib)
351365
endif()
366+
367+
#============================================================================
368+
# zpipe tool
369+
#============================================================================
370+
if (BUILD_ZPIPE)
371+
add_executable(zpipe examples/zpipe.c)
372+
target_link_libraries(zpipe zlib)
373+
endif()
374+
#============================================================================
375+
# MiniGzip tool
376+
#============================================================================
377+
if (BUILD_MINIGZIP)
378+
add_executable(minigzip_bin test/minigzip.c)
379+
target_link_libraries(minigzip_bin zlib)
380+
endif()

‎deps/zlib/adler32_simd.c

+76-90
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,6 @@
4141
* [2] zlib adler32_z() uses this fact to implement NMAX-block-based updates
4242
* of the adler s1 s2 of uint32_t type (see adler32.c).
4343
*/
44-
/* Copyright (C) 2023 SiFive, Inc. All rights reserved.
45-
* For conditions of distribution and use, see copyright notice in zlib.h
46-
*/
4744

4845
#include "adler32_simd.h"
4946

@@ -368,103 +365,92 @@ uint32_t ZLIB_INTERNAL adler32_simd_( /* NEON */
368365

369366
#elif defined(ADLER32_SIMD_RVV)
370367
#include <riscv_vector.h>
371-
/* adler32_rvv.c - RVV version of Adler-32
372-
* RVV 1.0 code contributed by Alex Chiang <alex.chiang@sifive.com>
373-
* on https://github.com/zlib-ng/zlib-ng/pull/1532
374-
* Port from Simon Hosie's fork:
375-
* https://github.com/cloudflare/zlib/commit/40688b53c61cb9bfc36471acd2dc0800b7ebcab1
368+
369+
/*
370+
* Patch by Simon Hosie, from:
371+
* https://github.com/cloudflare/zlib/pull/55
376372
*/
377373

378374
uint32_t ZLIB_INTERNAL adler32_simd_( /* RVV */
379375
uint32_t adler,
380376
const unsigned char *buf,
381377
unsigned long len)
382378
{
383-
/* split Adler-32 into component sums */
384-
uint32_t sum2 = (adler >> 16) & 0xffff;
385-
adler &= 0xffff;
386-
387-
size_t left = len;
388-
size_t vl = __riscv_vsetvlmax_e8m1();
389-
vl = vl > 256 ? 256 : vl;
390-
vuint32m4_t v_buf32_accu = __riscv_vmv_v_x_u32m4(0, vl);
391-
vuint32m4_t v_adler32_prev_accu = __riscv_vmv_v_x_u32m4(0, vl);
392-
vuint16m2_t v_buf16_accu;
393-
394-
/*
395-
* We accumulate 8-bit data, and to prevent overflow, we have to use a 32-bit accumulator.
396-
* However, adding 8-bit data into a 32-bit accumulator isn't efficient. We use 16-bit & 32-bit
397-
* accumulators to boost performance.
398-
*
399-
* The block_size is the largest multiple of vl that <= 256, because overflow would occur when
400-
* vl > 256 (255 * 256 <= UINT16_MAX).
401-
*
402-
* We accumulate 8-bit data into a 16-bit accumulator and then
403-
* move the data into the 32-bit accumulator at the last iteration.
379+
size_t vl = __riscv_vsetvlmax_e8m2();
380+
const vuint16m4_t zero16 = __riscv_vmv_v_x_u16m4(0, vl);
381+
vuint16m4_t a_sum = zero16;
382+
vuint32m8_t b_sum = __riscv_vmv_v_x_u32m8(0, vl);
383+
384+
/* Deal with the part which is not a multiple of vl first; because it's
385+
* easier to zero-stuff the beginning of the checksum than it is to tweak the
386+
* multipliers and sums for odd lengths afterwards.
387+
*/
388+
size_t head = len & (vl - 1);
389+
if (head > 0) {
390+
vuint8m2_t zero8 = __riscv_vmv_v_x_u8m2(0, vl);
391+
vuint8m2_t in = __riscv_vle8_v_u8m2(buf, vl);
392+
in = __riscv_vslideup(zero8, in, vl - head, vl);
393+
vuint16m4_t in16 = __riscv_vwcvtu_x(in, vl);
394+
a_sum = in16;
395+
buf += head;
396+
}
397+
398+
/* We have a 32-bit accumulator, and in each iteration we add 22-times a
399+
* 16-bit value, plus another 16-bit value. We periodically subtract up to
400+
* 65535 times BASE to avoid overflow. b_overflow estimates how often we
401+
* need to do this subtraction.
402+
*/
403+
const int b_overflow = BASE / 23;
404+
int fixup = b_overflow;
405+
ssize_t iters = (len - head) / vl;
406+
while (iters > 0) {
407+
const vuint16m4_t a_overflow = __riscv_vrsub(a_sum, BASE, vl);
408+
int batch = iters < 22 ? iters : 22;
409+
iters -= batch;
410+
b_sum = __riscv_vwmaccu(b_sum, batch, a_sum, vl);
411+
vuint16m4_t a_batch = zero16, b_batch = zero16;
412+
413+
/* Do a short batch, where neither a_sum nor b_sum can overflow a 16-bit
414+
* register. Then add them back into the main accumulators.
404415
*/
405-
size_t block_size = (256 / vl) * vl;
406-
size_t nmax_limit = (NMAX / block_size);
407-
size_t cnt = 0;
408-
while (left >= block_size) {
409-
v_buf16_accu = __riscv_vmv_v_x_u16m2(0, vl);
410-
size_t subprob = block_size;
411-
while (subprob > 0) {
412-
vuint8m1_t v_buf8 = __riscv_vle8_v_u8m1(buf, vl);
413-
v_adler32_prev_accu = __riscv_vwaddu_wv_u32m4(v_adler32_prev_accu, v_buf16_accu, vl);
414-
v_buf16_accu = __riscv_vwaddu_wv_u16m2(v_buf16_accu, v_buf8, vl);
415-
buf += vl;
416-
subprob -= vl;
417-
}
418-
v_adler32_prev_accu = __riscv_vmacc_vx_u32m4(v_adler32_prev_accu, block_size / vl, v_buf32_accu, vl);
419-
v_buf32_accu = __riscv_vwaddu_wv_u32m4(v_buf32_accu, v_buf16_accu, vl);
420-
left -= block_size;
421-
/* do modulo once each block of NMAX size */
422-
if (++cnt >= nmax_limit) {
423-
v_adler32_prev_accu = __riscv_vremu_vx_u32m4(v_adler32_prev_accu, BASE, vl);
424-
cnt = 0;
425-
}
416+
while (batch-- > 0) {
417+
vuint8m2_t in8 = __riscv_vle8_v_u8m2(buf, vl);
418+
buf += vl;
419+
b_batch = __riscv_vadd(b_batch, a_batch, vl);
420+
a_batch = __riscv_vwaddu_wv(a_batch, in8, vl);
426421
}
427-
/* the left len <= 256 now, we can use 16-bit accum safely */
428-
v_buf16_accu = __riscv_vmv_v_x_u16m2(0, vl);
429-
size_t res = left;
430-
while (left >= vl) {
431-
vuint8m1_t v_buf8 = __riscv_vle8_v_u8m1(buf, vl);
432-
v_adler32_prev_accu = __riscv_vwaddu_wv_u32m4(v_adler32_prev_accu, v_buf16_accu, vl);
433-
v_buf16_accu = __riscv_vwaddu_wv_u16m2(v_buf16_accu, v_buf8, vl);
434-
buf += vl;
435-
left -= vl;
422+
vbool4_t ov = __riscv_vmsgeu(a_batch, a_overflow, vl);
423+
a_sum = __riscv_vadd(a_sum, a_batch, vl);
424+
a_sum = __riscv_vadd_mu(ov, a_sum, a_sum, 65536 - BASE, vl);
425+
b_sum = __riscv_vwaddu_wv(b_sum, b_batch, vl);
426+
if (--fixup <= 0) {
427+
b_sum = __riscv_vnmsac(b_sum, BASE, __riscv_vsrl(b_sum, 16, vl), vl);
428+
fixup = b_overflow;
436429
}
437-
v_adler32_prev_accu = __riscv_vmacc_vx_u32m4(v_adler32_prev_accu, res / vl, v_buf32_accu, vl);
438-
v_adler32_prev_accu = __riscv_vremu_vx_u32m4(v_adler32_prev_accu, BASE, vl);
439-
v_buf32_accu = __riscv_vwaddu_wv_u32m4(v_buf32_accu, v_buf16_accu, vl);
440-
441-
vuint32m4_t v_seq = __riscv_vid_v_u32m4(vl);
442-
vuint32m4_t v_rev_seq = __riscv_vrsub_vx_u32m4(v_seq, vl, vl);
443-
vuint32m4_t v_sum32_accu = __riscv_vmul_vv_u32m4(v_buf32_accu, v_rev_seq, vl);
444-
445-
v_sum32_accu = __riscv_vadd_vv_u32m4(v_sum32_accu, __riscv_vmul_vx_u32m4(v_adler32_prev_accu, vl, vl), vl);
446-
447-
vuint32m1_t v_sum2_sum = __riscv_vmv_s_x_u32m1(0, vl);
448-
v_sum2_sum = __riscv_vredsum_vs_u32m4_u32m1(v_sum32_accu, v_sum2_sum, vl);
449-
uint32_t sum2_sum = __riscv_vmv_x_s_u32m1_u32(v_sum2_sum);
450-
451-
sum2 += (sum2_sum + adler * (len - left));
452-
453-
vuint32m1_t v_adler_sum = __riscv_vmv_s_x_u32m1(0, vl);
454-
v_adler_sum = __riscv_vredsum_vs_u32m4_u32m1(v_buf32_accu, v_adler_sum, vl);
455-
uint32_t adler_sum = __riscv_vmv_x_s_u32m1_u32(v_adler_sum);
456-
457-
adler += adler_sum;
458-
459-
while (left--) {
460-
adler += *buf++;
461-
sum2 += adler;
462-
}
463-
464-
sum2 %= BASE;
465-
adler %= BASE;
466-
467-
return adler | (sum2 << 16);
430+
}
431+
/* Adjust per-lane sums to have appropriate offsets from the end of the
432+
* buffer.
433+
*/
434+
const vuint16m4_t off = __riscv_vrsub(__riscv_vid_v_u16m4(vl), vl, vl);
435+
vuint16m4_t bsum16 = __riscv_vncvt_x(__riscv_vremu(b_sum, BASE, vl), vl);
436+
b_sum = __riscv_vadd(__riscv_vwmulu(a_sum, off, vl),
437+
__riscv_vwmulu(bsum16, vl, vl), vl);
438+
bsum16 = __riscv_vncvt_x(__riscv_vremu(b_sum, BASE, vl), vl);
439+
440+
/* And finally, do a horizontal sum across the registers for the final
441+
* result.
442+
*/
443+
uint32_t a = adler & 0xffff;
444+
uint32_t b = ((adler >> 16) + a * (len % BASE)) % BASE;
445+
vuint32m1_t sca = __riscv_vmv_v_x_u32m1(a, 1);
446+
vuint32m1_t scb = __riscv_vmv_v_x_u32m1(b, 1);
447+
sca = __riscv_vwredsumu(a_sum, sca, vl);
448+
scb = __riscv_vwredsumu(bsum16, scb, vl);
449+
a = __riscv_vmv_x(sca);
450+
b = __riscv_vmv_x(scb);
451+
a %= BASE;
452+
b %= BASE;
453+
return (b << 16) | a;
468454
}
469455

470456
#endif /* ADLER32_SIMD_SSSE3 */

‎deps/zlib/contrib/optimizations/chunkcopy.h

+75
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,10 @@
2121

2222
#if defined(__clang__) || defined(__GNUC__) || defined(__llvm__)
2323
#define Z_BUILTIN_MEMCPY __builtin_memcpy
24+
#define Z_BUILTIN_MEMSET __builtin_memset
2425
#else
2526
#define Z_BUILTIN_MEMCPY zmemcpy
27+
#define Z_BUILTIN_MEMSET zmemset
2628
#endif
2729

2830
#if defined(INFLATE_CHUNK_SIMD_NEON)
@@ -31,6 +33,8 @@ typedef uint8x16_t z_vec128i_t;
3133
#elif defined(INFLATE_CHUNK_SIMD_SSE2)
3234
#include <emmintrin.h>
3335
typedef __m128i z_vec128i_t;
36+
#elif defined(INFLATE_CHUNK_GENERIC)
37+
typedef struct { uint8_t x[16]; } z_vec128i_t;
3438
#else
3539
#error chunkcopy.h inflate chunk SIMD is not defined for your build target
3640
#endif
@@ -265,6 +269,77 @@ static inline z_vec128i_t v_load8_dup(const void* src) {
265269
static inline void v_store_128(void* out, const z_vec128i_t vec) {
266270
_mm_storeu_si128((__m128i*)out, vec);
267271
}
272+
#elif defined(INFLATE_CHUNK_GENERIC)
273+
/*
274+
* Default implementations for chunk-copy functions rely on memcpy() being
275+
* inlined by the compiler for best performance. This is most likely to work
276+
* as expected when the length argument is constant (as is the case here) and
277+
* the target supports unaligned loads and stores. Since that's not always a
278+
* safe assumption, this may need extra compiler arguments such as
279+
* `-mno-strict-align` or `-munaligned-access`, or the availability of
280+
* extensions like SIMD.
281+
*/
282+
283+
/*
284+
* v_load64_dup(): load *src as an unaligned 64-bit int and duplicate it in
285+
* every 64-bit component of the 128-bit result (64-bit int splat).
286+
*/
287+
static inline z_vec128i_t v_load64_dup(const void* src) {
288+
int64_t in;
289+
Z_BUILTIN_MEMCPY(&in, src, sizeof(in));
290+
z_vec128i_t out;
291+
for (int i = 0; i < sizeof(out); i += sizeof(in)) {
292+
Z_BUILTIN_MEMCPY((uint8_t*)&out + i, &in, sizeof(in));
293+
}
294+
return out;
295+
}
296+
297+
/*
298+
* v_load32_dup(): load *src as an unaligned 32-bit int and duplicate it in
299+
* every 32-bit component of the 128-bit result (32-bit int splat).
300+
*/
301+
static inline z_vec128i_t v_load32_dup(const void* src) {
302+
int32_t in;
303+
Z_BUILTIN_MEMCPY(&in, src, sizeof(in));
304+
z_vec128i_t out;
305+
for (int i = 0; i < sizeof(out); i += sizeof(in)) {
306+
Z_BUILTIN_MEMCPY((uint8_t*)&out + i, &in, sizeof(in));
307+
}
308+
return out;
309+
}
310+
311+
/*
312+
* v_load16_dup(): load *src as an unaligned 16-bit int and duplicate it in
313+
* every 16-bit component of the 128-bit result (16-bit int splat).
314+
*/
315+
static inline z_vec128i_t v_load16_dup(const void* src) {
316+
int16_t in;
317+
Z_BUILTIN_MEMCPY(&in, src, sizeof(in));
318+
z_vec128i_t out;
319+
for (int i = 0; i < sizeof(out); i += sizeof(in)) {
320+
Z_BUILTIN_MEMCPY((uint8_t*)&out + i, &in, sizeof(in));
321+
}
322+
return out;
323+
}
324+
325+
/*
326+
* v_load8_dup(): load the 8-bit int *src and duplicate it in every 8-bit
327+
* component of the 128-bit result (8-bit int splat).
328+
*/
329+
static inline z_vec128i_t v_load8_dup(const void* src) {
330+
int8_t in = *(const uint8_t*)src;
331+
z_vec128i_t out;
332+
Z_BUILTIN_MEMSET(&out, in, sizeof(out));
333+
return out;
334+
}
335+
336+
/*
337+
* v_store_128(): store the 128-bit vec in a memory destination (that might
338+
* not be 16-byte aligned) void* out.
339+
*/
340+
static inline void v_store_128(void* out, const z_vec128i_t vec) {
341+
Z_BUILTIN_MEMCPY(out, &vec, sizeof(vec));
342+
}
268343
#endif
269344

270345
/*

‎deps/zlib/contrib/tests/utils_unittest.cc

+22-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@
2020

2121
#include "zlib.h"
2222

23-
void TestPayloads(size_t input_size, zlib_internal::WrapperType type) {
23+
void TestPayloads(size_t input_size, zlib_internal::WrapperType type,
24+
const int compression_level = Z_DEFAULT_COMPRESSION) {
2425
std::vector<unsigned char> input;
2526
input.reserve(input_size);
2627
for (size_t i = 1; i <= input_size; ++i)
@@ -36,7 +37,7 @@ void TestPayloads(size_t input_size, zlib_internal::WrapperType type) {
3637
unsigned long compressed_size = static_cast<unsigned long>(compressed.size());
3738
int result = zlib_internal::CompressHelper(
3839
type, compressed.data(), &compressed_size, input.data(), input.size(),
39-
Z_DEFAULT_COMPRESSION, nullptr, nullptr);
40+
compression_level, nullptr, nullptr);
4041
ASSERT_EQ(result, Z_OK);
4142

4243
unsigned long decompressed_size =
@@ -67,6 +68,25 @@ TEST(ZlibTest, RawWrapper) {
6768
TestPayloads(i, zlib_internal::WrapperType::ZRAW);
6869
}
6970

71+
TEST(ZlibTest, LargePayloads) {
72+
static const size_t lengths[] = { 6000, 8000, 10'000, 15'000, 20'000, 30'000,
73+
50'000, 100'000, 150'000, 2'500'000,
74+
5'000'000, 10'000'000, 20'000'000 };
75+
76+
for (size_t length: lengths) {
77+
TestPayloads(length, zlib_internal::WrapperType::ZLIB);
78+
TestPayloads(length, zlib_internal::WrapperType::GZIP);
79+
}
80+
}
81+
82+
TEST(ZlibTest, CompressionLevels) {
83+
static const int levels[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
84+
for (int level: levels) {
85+
TestPayloads(5'000'000, zlib_internal::WrapperType::ZLIB, level);
86+
TestPayloads(5'000'000, zlib_internal::WrapperType::GZIP, level);
87+
}
88+
}
89+
7090
TEST(ZlibTest, InflateCover) {
7191
cover_support();
7292
cover_wrap();

‎deps/zlib/examples/zpipe.c

+209
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,209 @@
1+
/* zpipe.c: example of proper use of zlib's inflate() and deflate()
2+
Not copyrighted -- provided to the public domain
3+
Version 1.4 11 December 2005 Mark Adler */
4+
5+
/* Version history:
6+
1.0 30 Oct 2004 First version
7+
1.1 8 Nov 2004 Add void casting for unused return values
8+
Use switch statement for inflate() return values
9+
1.2 9 Nov 2004 Add assertions to document zlib guarantees
10+
1.3 6 Apr 2005 Remove incorrect assertion in inf()
11+
1.4 11 Dec 2005 Add hack to avoid MSDOS end-of-line conversions
12+
Avoid some compiler warnings for input and output buffers
13+
*/
14+
15+
#if defined(_WIN32) && !defined(_CRT_NONSTDC_NO_DEPRECATE)
16+
# define _CRT_NONSTDC_NO_DEPRECATE
17+
#endif
18+
19+
#include <stdio.h>
20+
#include <string.h>
21+
#include <assert.h>
22+
#include "zlib.h"
23+
24+
#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__)
25+
# include <fcntl.h>
26+
# include <io.h>
27+
# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY)
28+
#else
29+
# define SET_BINARY_MODE(file)
30+
#endif
31+
32+
#define CHUNK 16384
33+
34+
/* Compress from file source to file dest until EOF on source.
35+
def() returns Z_OK on success, Z_MEM_ERROR if memory could not be
36+
allocated for processing, Z_STREAM_ERROR if an invalid compression
37+
level is supplied, Z_VERSION_ERROR if the version of zlib.h and the
38+
version of the library linked do not match, or Z_ERRNO if there is
39+
an error reading or writing the files. */
40+
int def(FILE *source, FILE *dest, int level)
41+
{
42+
int ret, flush;
43+
unsigned have;
44+
z_stream strm;
45+
unsigned char in[CHUNK];
46+
unsigned char out[CHUNK];
47+
48+
/* allocate deflate state */
49+
strm.zalloc = Z_NULL;
50+
strm.zfree = Z_NULL;
51+
strm.opaque = Z_NULL;
52+
ret = deflateInit(&strm, level);
53+
if (ret != Z_OK)
54+
return ret;
55+
56+
/* compress until end of file */
57+
do {
58+
strm.avail_in = fread(in, 1, CHUNK, source);
59+
if (ferror(source)) {
60+
(void)deflateEnd(&strm);
61+
return Z_ERRNO;
62+
}
63+
flush = feof(source) ? Z_FINISH : Z_NO_FLUSH;
64+
strm.next_in = in;
65+
66+
/* run deflate() on input until output buffer not full, finish
67+
compression if all of source has been read in */
68+
do {
69+
strm.avail_out = CHUNK;
70+
strm.next_out = out;
71+
ret = deflate(&strm, flush); /* no bad return value */
72+
assert(ret != Z_STREAM_ERROR); /* state not clobbered */
73+
have = CHUNK - strm.avail_out;
74+
if (fwrite(out, 1, have, dest) != have || ferror(dest)) {
75+
(void)deflateEnd(&strm);
76+
return Z_ERRNO;
77+
}
78+
} while (strm.avail_out == 0);
79+
assert(strm.avail_in == 0); /* all input will be used */
80+
81+
/* done when last data in file processed */
82+
} while (flush != Z_FINISH);
83+
assert(ret == Z_STREAM_END); /* stream will be complete */
84+
85+
/* clean up and return */
86+
(void)deflateEnd(&strm);
87+
return Z_OK;
88+
}
89+
90+
/* Decompress from file source to file dest until stream ends or EOF.
91+
inf() returns Z_OK on success, Z_MEM_ERROR if memory could not be
92+
allocated for processing, Z_DATA_ERROR if the deflate data is
93+
invalid or incomplete, Z_VERSION_ERROR if the version of zlib.h and
94+
the version of the library linked do not match, or Z_ERRNO if there
95+
is an error reading or writing the files. */
96+
int inf(FILE *source, FILE *dest)
97+
{
98+
int ret;
99+
unsigned have;
100+
z_stream strm;
101+
unsigned char in[CHUNK];
102+
unsigned char out[CHUNK];
103+
104+
/* allocate inflate state */
105+
strm.zalloc = Z_NULL;
106+
strm.zfree = Z_NULL;
107+
strm.opaque = Z_NULL;
108+
strm.avail_in = 0;
109+
strm.next_in = Z_NULL;
110+
ret = inflateInit(&strm);
111+
if (ret != Z_OK)
112+
return ret;
113+
114+
/* decompress until deflate stream ends or end of file */
115+
do {
116+
strm.avail_in = fread(in, 1, CHUNK, source);
117+
if (ferror(source)) {
118+
(void)inflateEnd(&strm);
119+
return Z_ERRNO;
120+
}
121+
if (strm.avail_in == 0)
122+
break;
123+
strm.next_in = in;
124+
125+
/* run inflate() on input until output buffer not full */
126+
do {
127+
strm.avail_out = CHUNK;
128+
strm.next_out = out;
129+
ret = inflate(&strm, Z_NO_FLUSH);
130+
assert(ret != Z_STREAM_ERROR); /* state not clobbered */
131+
switch (ret) {
132+
case Z_NEED_DICT:
133+
ret = Z_DATA_ERROR; /* and fall through */
134+
case Z_DATA_ERROR:
135+
case Z_MEM_ERROR:
136+
(void)inflateEnd(&strm);
137+
return ret;
138+
}
139+
have = CHUNK - strm.avail_out;
140+
if (fwrite(out, 1, have, dest) != have || ferror(dest)) {
141+
(void)inflateEnd(&strm);
142+
return Z_ERRNO;
143+
}
144+
} while (strm.avail_out == 0);
145+
146+
/* done when inflate() says it's done */
147+
} while (ret != Z_STREAM_END);
148+
149+
/* clean up and return */
150+
(void)inflateEnd(&strm);
151+
return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR;
152+
}
153+
154+
/* report a zlib or i/o error */
155+
void zerr(int ret)
156+
{
157+
fputs("zpipe: ", stderr);
158+
switch (ret) {
159+
case Z_ERRNO:
160+
if (ferror(stdin))
161+
fputs("error reading stdin\n", stderr);
162+
if (ferror(stdout))
163+
fputs("error writing stdout\n", stderr);
164+
break;
165+
case Z_STREAM_ERROR:
166+
fputs("invalid compression level\n", stderr);
167+
break;
168+
case Z_DATA_ERROR:
169+
fputs("invalid or incomplete deflate data\n", stderr);
170+
break;
171+
case Z_MEM_ERROR:
172+
fputs("out of memory\n", stderr);
173+
break;
174+
case Z_VERSION_ERROR:
175+
fputs("zlib version mismatch!\n", stderr);
176+
}
177+
}
178+
179+
/* compress or decompress from stdin to stdout */
180+
int main(int argc, char **argv)
181+
{
182+
int ret;
183+
184+
/* avoid end-of-line conversions */
185+
SET_BINARY_MODE(stdin);
186+
SET_BINARY_MODE(stdout);
187+
188+
/* do compression if no arguments */
189+
if (argc == 1) {
190+
ret = def(stdin, stdout, Z_DEFAULT_COMPRESSION);
191+
if (ret != Z_OK)
192+
zerr(ret);
193+
return ret;
194+
}
195+
196+
/* do decompression if -d specified */
197+
else if (argc == 2 && strcmp(argv[1], "-d") == 0) {
198+
ret = inf(stdin, stdout);
199+
if (ret != Z_OK)
200+
zerr(ret);
201+
return ret;
202+
}
203+
204+
/* otherwise, report usage */
205+
else {
206+
fputs("zpipe usage: zpipe [-d] < source > dest\n", stderr);
207+
return 1;
208+
}
209+
}

‎deps/zlib/google/compression_utils.cc

-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
#include "base/check_op.h"
88
#include "base/process/memory.h"
9-
#include "base/sys_byteorder.h"
109

1110
#include "third_party/zlib/google/compression_utils_portable.h"
1211

‎deps/zlib/google/zip_reader_unittest.cc

+7-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class FileWrapper {
7272
// A mock that provides methods that can be used as callbacks in asynchronous
7373
// unzip functions. Tracks the number of calls and number of bytes reported.
7474
// Assumes that progress callbacks will be executed in-order.
75-
class MockUnzipListener : public base::SupportsWeakPtr<MockUnzipListener> {
75+
class MockUnzipListener final {
7676
public:
7777
MockUnzipListener()
7878
: success_calls_(0),
@@ -98,12 +98,18 @@ class MockUnzipListener : public base::SupportsWeakPtr<MockUnzipListener> {
9898
int progress_calls() { return progress_calls_; }
9999
int current_progress() { return current_progress_; }
100100

101+
base::WeakPtr<MockUnzipListener> AsWeakPtr() {
102+
return weak_ptr_factory_.GetWeakPtr();
103+
}
104+
101105
private:
102106
int success_calls_;
103107
int failure_calls_;
104108
int progress_calls_;
105109

106110
int64_t current_progress_;
111+
112+
base::WeakPtrFactory<MockUnzipListener> weak_ptr_factory_{this};
107113
};
108114

109115
class MockWriterDelegate : public zip::WriterDelegate {

‎deps/zlib/test/minigzip.c

+579
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)
Please sign in to comment.