From f878181f31c33f4bb7d29a478471aca7cc012aa7 Mon Sep 17 00:00:00 2001 From: Dan McDonald Date: Wed, 30 Mar 2022 21:20:17 -0400 Subject: [PATCH 1/2] OS-xxxx patch libz-1.2.3 for CVE-2018-25032 --- libz/Makefile | 2 + libz/Patches/CVE-2018-25032.patch | 381 ++++++++++++++++++++++++++++++ 2 files changed, 383 insertions(+) create mode 100644 libz/Patches/CVE-2018-25032.patch diff --git a/libz/Makefile b/libz/Makefile index 16c724c8..d6f6c4c1 100644 --- a/libz/Makefile +++ b/libz/Makefile @@ -42,6 +42,8 @@ AUTOCONF_LDFLAGS.64 = AUTOCONF_CC = CC="$(GCC.32) $(CPPFLAGS)" AUTOCONF_CC.64 = CC="$(GCC.64) $(CPPFLAGS)" +PATCHES = Patches/* + # # LDFLAGS is used by zlib's build system to build programs, not the library # itself. For that, it accepts only a combined linker+flags+libs command diff --git a/libz/Patches/CVE-2018-25032.patch b/libz/Patches/CVE-2018-25032.patch new file mode 100644 index 00000000..fde98e18 --- /dev/null +++ b/libz/Patches/CVE-2018-25032.patch @@ -0,0 +1,381 @@ +diff -ru a/deflate.c b/deflate.c +--- a/deflate.c 2005-07-17 22:27:31.000000000 -0400 ++++ b/deflate.c 2022-03-30 21:15:15.000000000 -0400 +@@ -228,11 +228,6 @@ + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + +- ushf *overlay; +- /* We overlay pending_buf and d_buf+l_buf. This works since the average +- * output size for (length,distance) codes is <= 24 bits. +- */ +- + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; +@@ -290,9 +285,47 @@ + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + +- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); +- s->pending_buf = (uchf *) overlay; +- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); ++ /* We overlay pending_buf and sym_buf. This works since the average size ++ * for length/distance pairs over any compressed block is assured to be 31 ++ * bits or less. ++ * ++ * Analysis: The longest fixed codes are a length code of 8 bits plus 5 ++ * extra bits, for lengths 131 to 257. The longest fixed distance codes are ++ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest ++ * possible fixed-codes length/distance pair is then 31 bits total. ++ * ++ * sym_buf starts one-fourth of the way into pending_buf. So there are ++ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol ++ * in sym_buf is three bytes -- two for the distance and one for the ++ * literal/length. As each symbol is consumed, the pointer to the next ++ * sym_buf value to read moves forward three bytes. From that symbol, up to ++ * 31 bits are written to pending_buf. The closest the written pending_buf ++ * bits gets to the next sym_buf symbol to read is just before the last ++ * code is written. At that time, 31*(n-2) bits have been written, just ++ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at ++ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1 ++ * symbols are written.) The closest the writing gets to what is unread is ++ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and ++ * can range from 128 to 32768. ++ * ++ * Therefore, at a minimum, there are 142 bits of space between what is ++ * written and what is read in the overlain buffers, so the symbols cannot ++ * be overwritten by the compressed data. That space is actually 139 bits, ++ * due to the three-bit fixed-code block header. ++ * ++ * That covers the case where either Z_FIXED is specified, forcing fixed ++ * codes, or when the use of fixed codes is chosen, because that choice ++ * results in a smaller compressed block than dynamic codes. That latter ++ * condition then assures that the above analysis also covers all dynamic ++ * blocks. A dynamic-code block will only be chosen to be emitted if it has ++ * fewer bits than a fixed-code block would for the same set of symbols. ++ * Therefore its average symbol length is assured to be less than 31. So ++ * the compressed data for a dynamic block also cannot overwrite the ++ * symbols from which it is being constructed. ++ */ ++ ++ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4); ++ s->pending_buf_size = (ulg)s->lit_bufsize * 4; + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { +@@ -301,8 +334,12 @@ + deflateEnd (strm); + return Z_MEM_ERROR; + } +- s->d_buf = overlay + s->lit_bufsize/sizeof(ush); +- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; ++ s->sym_buf = s->pending_buf + s->lit_bufsize; ++ s->sym_end = (s->lit_bufsize - 1) * 3; ++ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K ++ * on 16 bit machines and because stored blocks are restricted to ++ * 64K-1 bytes. ++ */ + + s->level = level; + s->strategy = strategy; +@@ -406,9 +443,23 @@ + int bits; + int value; + { ++ deflate_state *s; ++ int put; ++ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; +- strm->state->bi_valid = bits; +- strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); ++ s = strm->state; ++ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3)) ++ return Z_BUF_ERROR; ++ do { ++ put = Buf_size - s->bi_valid; ++ if (put > bits) ++ put = bits; ++ s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); ++ s->bi_valid += put; ++ _tr_flush_bits(s); ++ value >>= put; ++ bits -= put; ++ } while (bits); + return Z_OK; + } + +@@ -900,7 +951,6 @@ + #else + deflate_state *ds; + deflate_state *ss; +- ushf *overlay; + + + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { +@@ -920,8 +970,7 @@ + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); +- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); +- ds->pending_buf = (uchf *) overlay; ++ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4); + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { +@@ -935,8 +984,7 @@ + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); +- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); +- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; ++ ds->sym_buf = ds->pending_buf + ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; +@@ -1541,8 +1589,16 @@ + } + if (bflush) FLUSH_BLOCK(s, 0); + } +- FLUSH_BLOCK(s, flush == Z_FINISH); +- return flush == Z_FINISH ? finish_done : block_done; ++ ++ if (flush == Z_FINISH) { ++ FLUSH_BLOCK(s, 1); ++ return finish_done; ++ } ++ ++ if (s->sym_next) ++ FLUSH_BLOCK(s, 0); ++ ++ return block_done; + } + + #ifndef FASTEST +@@ -1669,8 +1725,15 @@ + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + s->match_available = 0; + } +- FLUSH_BLOCK(s, flush == Z_FINISH); +- return flush == Z_FINISH ? finish_done : block_done; ++ if (flush == Z_FINISH) { ++ FLUSH_BLOCK(s, 1); ++ return finish_done; ++ } ++ ++ if (s->sym_next) ++ FLUSH_BLOCK(s, 0); ++ ++ return block_done; + } + #endif /* FASTEST */ + +@@ -1730,7 +1793,14 @@ + } + if (bflush) FLUSH_BLOCK(s, 0); + } +- FLUSH_BLOCK(s, flush == Z_FINISH); +- return flush == Z_FINISH ? finish_done : block_done; ++ if (flush == Z_FINISH) { ++ FLUSH_BLOCK(s, 1); ++ return finish_done; ++ } ++ ++ if (s->sym_next) ++ FLUSH_BLOCK(s, 0); ++ ++ return block_done; + } + #endif +diff -ru a/deflate.h b/deflate.h +--- a/deflate.h 2005-05-29 11:55:22.000000000 -0400 ++++ b/deflate.h 2022-03-30 20:57:53.000000000 -0400 +@@ -48,6 +48,9 @@ + #define MAX_BITS 15 + /* All codes must not exceed MAX_BITS bits */ + ++#define Buf_size 16 ++/* size of bit buffer in bi_buf */ ++ + #define INIT_STATE 42 + #define EXTRA_STATE 69 + #define NAME_STATE 73 +@@ -211,7 +214,7 @@ + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + +- uchf *l_buf; /* buffer for literals or lengths */ ++ uchf *sym_buf; /* buffer for distances and literals/lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for +@@ -233,13 +236,8 @@ + * - I can't count above 4 + */ + +- uInt last_lit; /* running index in l_buf */ +- +- ushf *d_buf; +- /* Buffer for distances. To simplify the code, d_buf and l_buf have +- * the same number of elements. To use different lengths, an extra flag +- * array would be necessary. +- */ ++ uInt sym_next; /* running index in sym_buf */ ++ uInt sym_end; /* symbol table full when sym_next reaches this */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ +@@ -307,20 +305,22 @@ + + # define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ +- s->d_buf[s->last_lit] = 0; \ +- s->l_buf[s->last_lit++] = cc; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = cc; \ + s->dyn_ltree[cc].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + # define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (length); \ + ush dist = (distance); \ +- s->d_buf[s->last_lit] = dist; \ +- s->l_buf[s->last_lit++] = len; \ ++ s->sym_buf[s->sym_next++] = dist; \ ++ s->sym_buf[s->sym_next++] = dist >> 8; \ ++ s->sym_buf[s->sym_next++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + #else + # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +diff -ru a/trees.c b/trees.c +--- a/trees.c 2005-06-12 20:34:41.000000000 -0400 ++++ b/trees.c 2022-03-30 20:59:29.000000000 -0400 +@@ -73,11 +73,6 @@ + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +-#define Buf_size (8 * 2*sizeof(char)) +-/* Number of bits used within bi_buf. (bi_buf might be implemented on +- * more than 16 bits on some systems.) +- */ +- + /* =========================================================================== + * Local data. These are initialized only once. + */ +@@ -420,7 +415,7 @@ + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; +- s->last_lit = s->matches = 0; ++ s->sym_next = s->matches = 0; + } + + #define SMALLEST 1 +@@ -957,7 +952,7 @@ + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, +- s->last_lit)); ++ s->sym_next / 3)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + +@@ -1024,8 +1019,9 @@ + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ + { +- s->d_buf[s->last_lit] = (ush)dist; +- s->l_buf[s->last_lit++] = (uch)lc; ++ s->sym_buf[s->sym_next++] = dist; ++ s->sym_buf[s->sym_next++] = dist >> 8; ++ s->sym_buf[s->sym_next++] = lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; +@@ -1041,29 +1037,7 @@ + s->dyn_dtree[d_code(dist)].Freq++; + } + +-#ifdef TRUNCATE_BLOCK +- /* Try to guess if it is profitable to stop the current block here */ +- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { +- /* Compute an upper bound for the compressed length */ +- ulg out_length = (ulg)s->last_lit*8L; +- ulg in_length = (ulg)((long)s->strstart - s->block_start); +- int dcode; +- for (dcode = 0; dcode < D_CODES; dcode++) { +- out_length += (ulg)s->dyn_dtree[dcode].Freq * +- (5L+extra_dbits[dcode]); +- } +- out_length >>= 3; +- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +- s->last_lit, in_length, out_length, +- 100L - out_length*100L/in_length)); +- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; +- } +-#endif +- return (s->last_lit == s->lit_bufsize-1); +- /* We avoid equality with lit_bufsize because of wraparound at 64K +- * on 16 bit machines and because stored blocks are restricted to +- * 64K-1 bytes. +- */ ++ return (s->sym_next == s->sym_end); + } + + /* =========================================================================== +@@ -1076,13 +1050,14 @@ + { + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ +- unsigned lx = 0; /* running index in l_buf */ ++ unsigned sx = 0; /* running index in sym_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + +- if (s->last_lit != 0) do { +- dist = s->d_buf[lx]; +- lc = s->l_buf[lx++]; ++ if (s->sym_next != 0) do { ++ dist = s->sym_buf[sx++]; ++ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8; ++ lc = s->sym_buf[sx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); +@@ -1107,11 +1082,10 @@ + } + } /* literal or match pair ? */ + +- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ +- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, +- "pendingBuf overflow"); ++ /* Check that the overlay between pending_buf and sym_buf is ok: */ ++ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); + +- } while (lx < s->last_lit); ++ } while (sx < s->sym_next); + + send_code(s, END_BLOCK, ltree); + s->last_eob_len = ltree[END_BLOCK].Len; +diff -ru a/zlib.h b/zlib.h +--- a/zlib.h 2005-07-17 22:26:49.000000000 -0400 ++++ b/zlib.h 2022-03-30 20:59:06.000000000 -0400 +@@ -662,8 +662,9 @@ + less than or equal to 16, and that many of the least significant bits of + value will be inserted in the output. + +- deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source +- stream state was inconsistent. ++ deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough ++ room in the internal buffer to inser the bits, or Z_STREAM_ERROR if the ++ source stream state was inconsistent. + */ + + ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, From 6be2f27742c298ef05c76a75f54b543c7e428c1a Mon Sep 17 00:00:00 2001 From: Dan McDonald Date: Wed, 30 Mar 2022 22:11:35 -0400 Subject: [PATCH 2/2] Add _tr_flush_bits --- libz/Patches/0001-flush_bits.patch | 62 +++++++++++++++++++ ...-25032.patch => 0002-CVE-2018-25032.patch} | 0 2 files changed, 62 insertions(+) create mode 100644 libz/Patches/0001-flush_bits.patch rename libz/Patches/{CVE-2018-25032.patch => 0002-CVE-2018-25032.patch} (100%) diff --git a/libz/Patches/0001-flush_bits.patch b/libz/Patches/0001-flush_bits.patch new file mode 100644 index 00000000..eda04b6a --- /dev/null +++ b/libz/Patches/0001-flush_bits.patch @@ -0,0 +1,62 @@ +diff -ru a/deflate.c b/deflate.c +--- a/deflate.c Sun Jul 17 22:27:31 2005 ++++ b/deflate.c Wed Mar 30 22:07:47 2022 +@@ -532,19 +532,22 @@ + local void flush_pending(strm) + z_streamp strm; + { +- unsigned len = strm->state->pending; ++ unsigned len; ++ deflate_state *s = strm->state; + ++ _tr_flush_bits(s); ++ len = s->pending; + if (len > strm->avail_out) len = strm->avail_out; + if (len == 0) return; + +- zmemcpy(strm->next_out, strm->state->pending_out, len); ++ zmemcpy(strm->next_out, s->pending_out, len); + strm->next_out += len; +- strm->state->pending_out += len; ++ s->pending_out += len; + strm->total_out += len; + strm->avail_out -= len; +- strm->state->pending -= len; +- if (strm->state->pending == 0) { +- strm->state->pending_out = strm->state->pending_buf; ++ s->pending -= len; ++ if (s->pending == 0) { ++ s->pending_out = s->pending_buf; + } + } + +diff -ru a/deflate.h b/deflate.h +--- a/deflate.h Sun May 29 11:55:22 2005 ++++ b/deflate.h Wed Mar 30 22:08:34 2022 +@@ -283,6 +283,7 @@ + int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); + void _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, + int eof)); ++void _tr_flush_bits OF((deflate_state *s)); + void _tr_align OF((deflate_state *s)); + void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, + int eof)); +diff -ru a/trees.c b/trees.c +--- a/trees.c Sun Jun 12 20:34:41 2005 ++++ b/trees.c Wed Mar 30 22:07:47 2022 +@@ -879,6 +879,15 @@ + } + + /* =========================================================================== ++ * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) ++ */ ++void ZLIB_INTERNAL _tr_flush_bits(s) ++ deflate_state *s; ++{ ++ bi_flush(s); ++} ++ ++/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + * The current inflate code requires 9 bits of lookahead. If the diff --git a/libz/Patches/CVE-2018-25032.patch b/libz/Patches/0002-CVE-2018-25032.patch similarity index 100% rename from libz/Patches/CVE-2018-25032.patch rename to libz/Patches/0002-CVE-2018-25032.patch