1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/ossrv/compressionlibs/ziplib/test/oldezlib/EZLib/deflate.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1361 @@
1.4 +/* deflate.c -- compress data using the deflation algorithm
1.5 + * Copyright (C) 1995-2002 Jean-loup Gailly.
1.6 + * For conditions of distribution and use, see copyright notice in zlib.h
1.7 + */
1.8 +
1.9 +/*
1.10 + * ALGORITHM
1.11 + *
1.12 + * The "deflation" process depends on being able to identify portions
1.13 + * of the input text which are identical to earlier input (within a
1.14 + * sliding window trailing behind the input currently being processed).
1.15 + *
1.16 + * The most straightforward technique turns out to be the fastest for
1.17 + * most input files: try all possible matches and select the longest.
1.18 + * The key feature of this algorithm is that insertions into the string
1.19 + * dictionary are very simple and thus fast, and deletions are avoided
1.20 + * completely. Insertions are performed at each input character, whereas
1.21 + * string matches are performed only when the previous match ends. So it
1.22 + * is preferable to spend more time in matches to allow very fast string
1.23 + * insertions and avoid deletions. The matching algorithm for small
1.24 + * strings is inspired from that of Rabin & Karp. A brute force approach
1.25 + * is used to find longer strings when a small match has been found.
1.26 + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
1.27 + * (by Leonid Broukhis).
1.28 + * A previous version of this file used a more sophisticated algorithm
1.29 + * (by Fiala and Greene) which is guaranteed to run in linear amortized
1.30 + * time, but has a larger average cost, uses more memory and is patented.
1.31 + * However the F&G algorithm may be faster for some highly redundant
1.32 + * files if the parameter max_chain_length (described below) is too large.
1.33 + *
1.34 + * ACKNOWLEDGEMENTS
1.35 + *
1.36 + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
1.37 + * I found it in 'freeze' written by Leonid Broukhis.
1.38 + * Thanks to many people for bug reports and testing.
1.39 + *
1.40 + * REFERENCES
1.41 + *
1.42 + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
1.43 + * Available in ftp://ds.internic.net/rfc/rfc1951.txt
1.44 + *
1.45 + * A description of the Rabin and Karp algorithm is given in the book
1.46 + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
1.47 + *
1.48 + * Fiala,E.R., and Greene,D.H.
1.49 + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
1.50 + *
1.51 + */
1.52 +
1.53 +/* @(#) $Id$ */
1.54 +
1.55 +#include <e32def.h>
1.56 +
1.57 +#include "deflate.h"
1.58 +
1.59 +const char deflate_copyright[] =
1.60 + " deflate 1.1.4 Copyright 1995-2002 Jean-loup Gailly ";
1.61 +/*
1.62 + If you use the zlib library in a product, an acknowledgment is welcome
1.63 + in the documentation of your product. If for some reason you cannot
1.64 + include such an acknowledgment, I would appreciate that you keep this
1.65 + copyright string in the executable of your product.
1.66 + */
1.67 +
1.68 +/* ===========================================================================
1.69 + * Function prototypes.
1.70 + */
1.71 +typedef enum {
1.72 + need_more, /* block not completed, need more input or more output */
1.73 + block_done, /* block flush performed */
1.74 + finish_started, /* finish started, need only more output at next deflate */
1.75 + finish_done /* finish done, accept no more input or output */
1.76 +} block_state;
1.77 +
1.78 +typedef block_state (*compress_func) OF((deflate_state *s, int flush));
1.79 +/* Compression function. Returns the block state after the call. */
1.80 +
1.81 +local void fill_window OF((deflate_state *s));
1.82 +local block_state deflate_stored OF((deflate_state *s, int flush));
1.83 +local block_state deflate_fast OF((deflate_state *s, int flush));
1.84 +local block_state deflate_slow OF((deflate_state *s, int flush));
1.85 +local void lm_init OF((deflate_state *s));
1.86 +local void putShortMSB OF((deflate_state *s, uInt b));
1.87 +local void flush_pending OF((z_streamp strm));
1.88 +local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
1.89 +#ifdef ASMV
1.90 + void match_init OF((void)); /* asm code initialization */
1.91 + uInt longest_match OF((deflate_state *s, IPos cur_match));
1.92 +#else
1.93 +local uInt longest_match OF((deflate_state *s, IPos cur_match));
1.94 +#endif
1.95 +
1.96 +#ifdef DEBUG
1.97 +local void check_match OF((deflate_state *s, IPos start, IPos match,
1.98 + int length));
1.99 +#endif
1.100 +
1.101 +/* ===========================================================================
1.102 + * Local data
1.103 + */
1.104 +
1.105 +#define NIL 0
1.106 +/* Tail of hash chains */
1.107 +
1.108 +#ifndef TOO_FAR
1.109 +# define TOO_FAR 4096
1.110 +#endif
1.111 +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
1.112 +
1.113 +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
1.114 +/* Minimum amount of lookahead, except at the end of the input file.
1.115 + * See deflate.c for comments about the MIN_MATCH+1.
1.116 + */
1.117 +
1.118 +/* Values for max_lazy_match, good_match and max_chain_length, depending on
1.119 + * the desired pack level (0..9). The values given below have been tuned to
1.120 + * exclude worst case performance for pathological files. Better values may be
1.121 + * found for specific files.
1.122 + */
1.123 +typedef struct config_s {
1.124 + ush good_length; /* reduce lazy search above this match length */
1.125 + ush max_lazy; /* do not perform lazy search above this match length */
1.126 + ush nice_length; /* quit search above this match length */
1.127 + ush max_chain;
1.128 + compress_func func;
1.129 +} config;
1.130 +
1.131 +local const config configuration_table[10] = {
1.132 +/* good lazy nice chain */
1.133 +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
1.134 +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
1.135 +/* 2 */ {4, 5, 16, 8, deflate_fast},
1.136 +/* 3 */ {4, 6, 32, 32, deflate_fast},
1.137 +
1.138 +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
1.139 +/* 5 */ {8, 16, 32, 32, deflate_slow},
1.140 +/* 6 */ {8, 16, 128, 128, deflate_slow},
1.141 +/* 7 */ {8, 32, 128, 256, deflate_slow},
1.142 +/* 8 */ {32, 128, 258, 1024, deflate_slow},
1.143 +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
1.144 +
1.145 +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
1.146 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
1.147 + * meaning.
1.148 + */
1.149 +
1.150 +#define EQUAL 0
1.151 +/* result of memcmp for equal strings */
1.152 +
1.153 +struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
1.154 +
1.155 +/* ===========================================================================
1.156 + * Update a hash value with the given input byte
1.157 + * IN assertion: all calls to to UPDATE_HASH are made with consecutive
1.158 + * input characters, so that a running hash key can be computed from the
1.159 + * previous key instead of complete recalculation each time.
1.160 + */
1.161 +#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
1.162 +
1.163 +
1.164 +/* ===========================================================================
1.165 + * Insert string str in the dictionary and set match_head to the previous head
1.166 + * of the hash chain (the most recent string with same hash key). Return
1.167 + * the previous length of the hash chain.
1.168 + * If this file is compiled with -DFASTEST, the compression level is forced
1.169 + * to 1, and no hash chains are maintained.
1.170 + * IN assertion: all calls to to INSERT_STRING are made with consecutive
1.171 + * input characters and the first MIN_MATCH bytes of str are valid
1.172 + * (except for the last MIN_MATCH-1 bytes of the input file).
1.173 + */
1.174 +#ifdef FASTEST
1.175 +#define INSERT_STRING(s, str, match_head) \
1.176 + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
1.177 + match_head = s->head[s->ins_h], \
1.178 + s->head[s->ins_h] = STATIC_CAST(Pos,str))
1.179 +#else
1.180 +#define INSERT_STRING(s, str, match_head) \
1.181 + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
1.182 + s->prev[(str) & s->w_mask] = STATIC_CAST(Posf,match_head = s->head[s->ins_h]), \
1.183 + s->head[s->ins_h] = STATIC_CAST(Pos,str))
1.184 +#endif
1.185 +
1.186 +/* ===========================================================================
1.187 + * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
1.188 + * prev[] will be initialized on the fly.
1.189 + */
1.190 +#define CLEAR_HASH(s) \
1.191 + s->head[s->hash_size-1] = NIL; \
1.192 + zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
1.193 +
1.194 +/* ========================================================================= */
1.195 +EXPORT_C int ZEXPORT deflateInit_(
1.196 + z_streamp strm,
1.197 + int level,
1.198 + const char *version,
1.199 + int stream_size)
1.200 +{
1.201 + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
1.202 + Z_DEFAULT_STRATEGY, version, stream_size);
1.203 + /* To do: ignore strm->next_in if we use it as window */
1.204 +}
1.205 +
1.206 +/* ========================================================================= */
1.207 +EXPORT_C int ZEXPORT deflateInit2_(
1.208 + z_streamp strm,
1.209 + int level,
1.210 + int method,
1.211 + int windowBits,
1.212 + int memLevel,
1.213 + int strategy,
1.214 + const char *version,
1.215 + int stream_size)
1.216 +{
1.217 + deflate_state *s;
1.218 + int noheader = 0;
1.219 + static const char* const my_version = ZLIB_VERSION;
1.220 +
1.221 + ushf *overlay;
1.222 + /* We overlay pending_buf and d_buf+l_buf. This works since the average
1.223 + * output size for (length,distance) codes is <= 24 bits.
1.224 + */
1.225 +
1.226 + if (version == Z_NULL || version[0] != my_version[0] ||
1.227 + stream_size != sizeof(z_stream)) {
1.228 + return Z_VERSION_ERROR;
1.229 + }
1.230 + if (strm == Z_NULL) return Z_STREAM_ERROR;
1.231 +
1.232 + strm->msg = Z_NULL;
1.233 + if (strm->zalloc == Z_NULL) {
1.234 + strm->zalloc = zcalloc;
1.235 + strm->opaque = (voidpf)0;
1.236 + }
1.237 + if (strm->zfree == Z_NULL) strm->zfree = zcfree;
1.238 +
1.239 + if (level == Z_DEFAULT_COMPRESSION) level = 6;
1.240 +#ifdef FASTEST
1.241 + level = 1;
1.242 +#endif
1.243 +
1.244 + if (windowBits < 0) { /* undocumented feature: suppress zlib header */
1.245 + noheader = 1;
1.246 + windowBits = -windowBits;
1.247 + }
1.248 + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
1.249 + windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
1.250 + strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
1.251 + return Z_STREAM_ERROR;
1.252 + }
1.253 + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
1.254 + if (s == Z_NULL) return Z_MEM_ERROR;
1.255 + strm->state = (struct internal_state FAR *)s;
1.256 + s->strm = strm;
1.257 +
1.258 + s->noheader = noheader;
1.259 + s->w_bits = windowBits;
1.260 + s->w_size = 1 << s->w_bits;
1.261 + s->w_mask = s->w_size - 1;
1.262 +
1.263 + s->hash_bits = memLevel + 7;
1.264 + s->hash_size = 1 << s->hash_bits;
1.265 + s->hash_mask = s->hash_size - 1;
1.266 + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
1.267 +
1.268 + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
1.269 + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
1.270 + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
1.271 +
1.272 + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
1.273 +
1.274 + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
1.275 + s->pending_buf = (uchf *) overlay;
1.276 + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
1.277 +
1.278 + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
1.279 + s->pending_buf == Z_NULL) {
1.280 + strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
1.281 +
1.282 + // The following line has been added by Markr. Up to this point s->status has not been
1.283 + // initialised. If this is the case when deflateEnd is called then it will fail to reclaim
1.284 + // the memory allocated for s->window etc when alloc fails. Setting the status to INIT_STATE
1.285 + // allows deflateEnd to reclaim memory.
1.286 +
1.287 + s->status = INIT_STATE;
1.288 + deflateEnd (strm);
1.289 + return Z_MEM_ERROR;
1.290 + }
1.291 + s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
1.292 + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
1.293 +
1.294 + s->level = level;
1.295 + s->strategy = strategy;
1.296 + s->method = (Byte)method;
1.297 +
1.298 + return deflateReset(strm);
1.299 +}
1.300 +
1.301 +/* ========================================================================= */
1.302 +EXPORT_C int ZEXPORT deflateSetDictionary (
1.303 + z_streamp strm,
1.304 + const Bytef *dictionary,
1.305 + uInt dictLength)
1.306 +{
1.307 + // Line to stop compiler warning about unused mandatory global variable
1.308 + char __z=deflate_copyright[0]; __z=__z;
1.309 +
1.310 + deflate_state *s;
1.311 + uInt length = dictLength;
1.312 + uInt n;
1.313 + IPos hash_head = 0;
1.314 +
1.315 + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL ||
1.316 + strm->state->status != INIT_STATE) return Z_STREAM_ERROR;
1.317 +
1.318 + s = strm->state;
1.319 + strm->adler = adler32(strm->adler, dictionary, dictLength);
1.320 +
1.321 + if (length < MIN_MATCH) return Z_OK;
1.322 + if (length > MAX_DIST(s)) {
1.323 + length = MAX_DIST(s);
1.324 +#ifndef USE_DICT_HEAD
1.325 + dictionary += dictLength - length; /* use the tail of the dictionary */
1.326 +#endif
1.327 + }
1.328 + zmemcpy(s->window, dictionary, length);
1.329 + s->strstart = length;
1.330 + s->block_start = (long)length;
1.331 +
1.332 + /* Insert all strings in the hash table (except for the last two bytes).
1.333 + * s->lookahead stays null, so s->ins_h will be recomputed at the next
1.334 + * call of fill_window.
1.335 + */
1.336 + s->ins_h = s->window[0];
1.337 + UPDATE_HASH(s, s->ins_h, s->window[1]);
1.338 + for (n = 0; n <= length - MIN_MATCH; n++) {
1.339 + INSERT_STRING(s, n, hash_head);
1.340 + }
1.341 + if (hash_head) hash_head = 0; /* to make compiler happy */
1.342 + return Z_OK;
1.343 +}
1.344 +
1.345 +/* ========================================================================= */
1.346 +EXPORT_C int ZEXPORT deflateReset (
1.347 + z_streamp strm)
1.348 +{
1.349 + deflate_state *s;
1.350 +
1.351 + if (strm == Z_NULL || strm->state == Z_NULL ||
1.352 + strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
1.353 +
1.354 + strm->total_in = strm->total_out = 0;
1.355 + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
1.356 + strm->data_type = Z_UNKNOWN;
1.357 +
1.358 + s = (deflate_state *)strm->state;
1.359 + s->pending = 0;
1.360 + s->pending_out = s->pending_buf;
1.361 +
1.362 + if (s->noheader < 0) {
1.363 + s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
1.364 + }
1.365 + s->status = s->noheader ? BUSY_STATE : INIT_STATE;
1.366 + strm->adler = 1;
1.367 + s->last_flush = Z_NO_FLUSH;
1.368 +
1.369 + _tr_init(s);
1.370 + lm_init(s);
1.371 +
1.372 + return Z_OK;
1.373 +}
1.374 +
1.375 +/* ========================================================================= */
1.376 +EXPORT_C int ZEXPORT deflateParams(
1.377 + z_streamp strm,
1.378 + int level,
1.379 + int strategy)
1.380 +{
1.381 + deflate_state *s;
1.382 + compress_func func;
1.383 + int err = Z_OK;
1.384 +
1.385 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
1.386 + s = strm->state;
1.387 +
1.388 + if (level == Z_DEFAULT_COMPRESSION) {
1.389 + level = 6;
1.390 + }
1.391 + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
1.392 + return Z_STREAM_ERROR;
1.393 + }
1.394 + func = configuration_table[s->level].func;
1.395 +
1.396 + if (func != configuration_table[level].func && strm->total_in != 0) {
1.397 + /* Flush the last buffer: */
1.398 + err = deflate(strm, Z_PARTIAL_FLUSH);
1.399 + }
1.400 + if (s->level != level) {
1.401 + s->level = level;
1.402 + s->max_lazy_match = configuration_table[level].max_lazy;
1.403 + s->good_match = configuration_table[level].good_length;
1.404 + s->nice_match = configuration_table[level].nice_length;
1.405 + s->max_chain_length = configuration_table[level].max_chain;
1.406 + }
1.407 + s->strategy = strategy;
1.408 + return err;
1.409 +}
1.410 +
1.411 +/* =========================================================================
1.412 + * Put a short in the pending buffer. The 16-bit value is put in MSB order.
1.413 + * IN assertion: the stream state is correct and there is enough room in
1.414 + * pending_buf.
1.415 + */
1.416 +local void putShortMSB (
1.417 + deflate_state *s,
1.418 + uInt b)
1.419 +{
1.420 + put_byte(s, (Byte)(b >> 8));
1.421 + put_byte(s, (Byte)(b & 0xff));
1.422 +}
1.423 +
1.424 +/* =========================================================================
1.425 + * Flush as much pending output as possible. All deflate() output goes
1.426 + * through this function so some applications may wish to modify it
1.427 + * to avoid allocating a large strm->next_out buffer and copying into it.
1.428 + * (See also read_buf()).
1.429 + */
1.430 +local void flush_pending(
1.431 + z_streamp strm)
1.432 +{
1.433 + unsigned len = strm->state->pending;
1.434 +
1.435 + if (len > strm->avail_out) len = strm->avail_out;
1.436 + if (len == 0) return;
1.437 +
1.438 + zmemcpy(strm->next_out, strm->state->pending_out, len);
1.439 + strm->next_out += len;
1.440 + strm->state->pending_out += len;
1.441 + strm->total_out += len;
1.442 + strm->avail_out -= len;
1.443 + strm->state->pending -= len;
1.444 + if (strm->state->pending == 0) {
1.445 + strm->state->pending_out = strm->state->pending_buf;
1.446 + }
1.447 +}
1.448 +
1.449 +/* ========================================================================= */
1.450 +EXPORT_C int ZEXPORT deflate (
1.451 + z_streamp strm,
1.452 + int flush)
1.453 +{
1.454 + int old_flush; /* value of flush param for previous deflate call */
1.455 + deflate_state *s;
1.456 +
1.457 + if (strm == Z_NULL || strm->state == Z_NULL ||
1.458 + flush > Z_FINISH || flush < 0) {
1.459 + return Z_STREAM_ERROR;
1.460 + }
1.461 + s = strm->state;
1.462 +
1.463 + if (strm->next_out == Z_NULL ||
1.464 + (strm->next_in == Z_NULL && strm->avail_in != 0) ||
1.465 + (s->status == FINISH_STATE && flush != Z_FINISH)) {
1.466 + ERR_RETURN(strm, Z_STREAM_ERROR);
1.467 + }
1.468 + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
1.469 +
1.470 + s->strm = strm; /* just in case */
1.471 + old_flush = s->last_flush;
1.472 + s->last_flush = flush;
1.473 +
1.474 + /* Write the zlib header */
1.475 + if (s->status == INIT_STATE) {
1.476 +
1.477 + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
1.478 + uInt level_flags = (s->level-1) >> 1;
1.479 +
1.480 + if (level_flags > 3) level_flags = 3;
1.481 + header |= (level_flags << 6);
1.482 + if (s->strstart != 0) header |= PRESET_DICT;
1.483 + header += 31 - (header % 31);
1.484 +
1.485 + s->status = BUSY_STATE;
1.486 + putShortMSB(s, header);
1.487 +
1.488 + /* Save the adler32 of the preset dictionary: */
1.489 + if (s->strstart != 0) {
1.490 + putShortMSB(s, (uInt)(strm->adler >> 16));
1.491 + putShortMSB(s, (uInt)(strm->adler & 0xffff));
1.492 + }
1.493 + strm->adler = 1L;
1.494 + }
1.495 +
1.496 + /* Flush as much pending output as possible */
1.497 + if (s->pending != 0) {
1.498 + flush_pending(strm);
1.499 + if (strm->avail_out == 0) {
1.500 + /* Since avail_out is 0, deflate will be called again with
1.501 + * more output space, but possibly with both pending and
1.502 + * avail_in equal to zero. There won't be anything to do,
1.503 + * but this is not an error situation so make sure we
1.504 + * return OK instead of BUF_ERROR at next call of deflate:
1.505 + */
1.506 + s->last_flush = -1;
1.507 + return Z_OK;
1.508 + }
1.509 +
1.510 + /* Make sure there is something to do and avoid duplicate consecutive
1.511 + * flushes. For repeated and useless calls with Z_FINISH, we keep
1.512 + * returning Z_STREAM_END instead of Z_BUFF_ERROR.
1.513 + */
1.514 + } else if (strm->avail_in == 0 && flush <= old_flush &&
1.515 + flush != Z_FINISH) {
1.516 + ERR_RETURN(strm, Z_BUF_ERROR);
1.517 + }
1.518 +
1.519 + /* User must not provide more input after the first FINISH: */
1.520 + if (s->status == FINISH_STATE && strm->avail_in != 0) {
1.521 + ERR_RETURN(strm, Z_BUF_ERROR);
1.522 + }
1.523 +
1.524 + /* Start a new block or continue the current one.
1.525 + */
1.526 + if (strm->avail_in != 0 || s->lookahead != 0 ||
1.527 + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
1.528 + block_state bstate;
1.529 +
1.530 + bstate = (*(configuration_table[s->level].func))(s, flush);
1.531 +
1.532 + if (bstate == finish_started || bstate == finish_done) {
1.533 + s->status = FINISH_STATE;
1.534 + }
1.535 + if (bstate == need_more || bstate == finish_started) {
1.536 + if (strm->avail_out == 0) {
1.537 + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
1.538 + }
1.539 + return Z_OK;
1.540 + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
1.541 + * of deflate should use the same flush parameter to make sure
1.542 + * that the flush is complete. So we don't have to output an
1.543 + * empty block here, this will be done at next call. This also
1.544 + * ensures that for a very small output buffer, we emit at most
1.545 + * one empty block.
1.546 + */
1.547 + }
1.548 + if (bstate == block_done) {
1.549 + if (flush == Z_PARTIAL_FLUSH) {
1.550 + _tr_align(s);
1.551 + } else { /* FULL_FLUSH or SYNC_FLUSH */
1.552 + _tr_stored_block(s, (char*)0, 0L, 0);
1.553 + /* For a full flush, this empty block will be recognized
1.554 + * as a special marker by inflate_sync().
1.555 + */
1.556 + if (flush == Z_FULL_FLUSH) {
1.557 + CLEAR_HASH(s); /* forget history */
1.558 + }
1.559 + }
1.560 + flush_pending(strm);
1.561 + if (strm->avail_out == 0) {
1.562 + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
1.563 + return Z_OK;
1.564 + }
1.565 + }
1.566 + }
1.567 + Assert(strm->avail_out > 0, "bug2");
1.568 +
1.569 + if (flush != Z_FINISH) return Z_OK;
1.570 + if (s->noheader) return Z_STREAM_END;
1.571 +
1.572 + /* Write the zlib trailer (adler32) */
1.573 + putShortMSB(s, (uInt)(strm->adler >> 16));
1.574 + putShortMSB(s, (uInt)(strm->adler & 0xffff));
1.575 + flush_pending(strm);
1.576 + /* If avail_out is zero, the application will call deflate again
1.577 + * to flush the rest.
1.578 + */
1.579 + s->noheader = -1; /* write the trailer only once! */
1.580 + return s->pending != 0 ? Z_OK : Z_STREAM_END;
1.581 +}
1.582 +
1.583 +/* ========================================================================= */
1.584 +EXPORT_C int ZEXPORT deflateEnd (
1.585 + z_streamp strm)
1.586 +{
1.587 + int status;
1.588 +
1.589 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
1.590 +
1.591 + status = strm->state->status;
1.592 + if (status != INIT_STATE && status != BUSY_STATE &&
1.593 + status != FINISH_STATE) {
1.594 + return Z_STREAM_ERROR;
1.595 + }
1.596 +
1.597 + /* Deallocate in reverse order of allocations: */
1.598 + TRY_FREE(strm, strm->state->pending_buf);
1.599 + TRY_FREE(strm, strm->state->head);
1.600 + TRY_FREE(strm, strm->state->prev);
1.601 + TRY_FREE(strm, strm->state->window);
1.602 +
1.603 + ZFREE(strm, strm->state);
1.604 + strm->state = Z_NULL;
1.605 +
1.606 + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1.607 +}
1.608 +
1.609 +/* =========================================================================
1.610 + * Copy the source state to the destination state.
1.611 + * To simplify the source, this is not supported for 16-bit MSDOS (which
1.612 + * doesn't have enough memory anyway to duplicate compression states).
1.613 + */
1.614 +EXPORT_C int ZEXPORT deflateCopy (
1.615 + z_streamp dest,
1.616 + z_streamp source)
1.617 +{
1.618 +#ifdef MAXSEG_64K
1.619 + return Z_STREAM_ERROR;
1.620 +#else
1.621 + deflate_state *ds;
1.622 + deflate_state *ss;
1.623 + ushf *overlay;
1.624 +
1.625 +
1.626 + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
1.627 + return Z_STREAM_ERROR;
1.628 + }
1.629 +
1.630 + ss = source->state;
1.631 +
1.632 + *dest = *source;
1.633 +
1.634 + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
1.635 + if (ds == Z_NULL) return Z_MEM_ERROR;
1.636 + dest->state = (struct internal_state FAR *) ds;
1.637 + *ds = *ss;
1.638 + ds->strm = dest;
1.639 +
1.640 + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1.641 + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1.642 + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1.643 + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
1.644 + ds->pending_buf = (uchf *) overlay;
1.645 +
1.646 + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1.647 + ds->pending_buf == Z_NULL) {
1.648 + deflateEnd (dest);
1.649 + return Z_MEM_ERROR;
1.650 + }
1.651 + /* following zmemcpy do not work for 16-bit MSDOS */
1.652 + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1.653 + zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
1.654 + zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
1.655 + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1.656 +
1.657 + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1.658 + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
1.659 + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
1.660 +
1.661 + ds->l_desc.dyn_tree = ds->dyn_ltree;
1.662 + ds->d_desc.dyn_tree = ds->dyn_dtree;
1.663 + ds->bl_desc.dyn_tree = ds->bl_tree;
1.664 +
1.665 + return Z_OK;
1.666 +#endif
1.667 +}
1.668 +
1.669 +/* ===========================================================================
1.670 + * Read a new buffer from the current input stream, update the adler32
1.671 + * and total number of bytes read. All deflate() input goes through
1.672 + * this function so some applications may wish to modify it to avoid
1.673 + * allocating a large strm->next_in buffer and copying from it.
1.674 + * (See also flush_pending()).
1.675 + */
1.676 +local int read_buf(
1.677 + z_streamp strm,
1.678 + Bytef *buf,
1.679 + unsigned size)
1.680 +{
1.681 + unsigned len = strm->avail_in;
1.682 +
1.683 + if (len > size) len = size;
1.684 + if (len == 0) return 0;
1.685 +
1.686 + strm->avail_in -= len;
1.687 +
1.688 + if (!strm->state->noheader) {
1.689 + strm->adler = adler32(strm->adler, strm->next_in, len);
1.690 + }
1.691 + zmemcpy(buf, strm->next_in, len);
1.692 + strm->next_in += len;
1.693 + strm->total_in += len;
1.694 +
1.695 + return (int)len;
1.696 +}
1.697 +
1.698 +/* ===========================================================================
1.699 + * Initialize the "longest match" routines for a new zlib stream
1.700 + */
1.701 +local void lm_init (
1.702 + deflate_state *s)
1.703 +{
1.704 + s->window_size = (ulg)2L*s->w_size;
1.705 +
1.706 + CLEAR_HASH(s);
1.707 +
1.708 + /* Set the default configuration parameters:
1.709 + */
1.710 + s->max_lazy_match = configuration_table[s->level].max_lazy;
1.711 + s->good_match = configuration_table[s->level].good_length;
1.712 + s->nice_match = configuration_table[s->level].nice_length;
1.713 + s->max_chain_length = configuration_table[s->level].max_chain;
1.714 +
1.715 + s->strstart = 0;
1.716 + s->block_start = 0L;
1.717 + s->lookahead = 0;
1.718 + s->match_length = s->prev_length = MIN_MATCH-1;
1.719 + s->match_available = 0;
1.720 + s->ins_h = 0;
1.721 +#ifdef ASMV
1.722 + match_init(); /* initialize the asm code */
1.723 +#endif
1.724 +}
1.725 +
1.726 +/* ===========================================================================
1.727 + * Set match_start to the longest match starting at the given string and
1.728 + * return its length. Matches shorter or equal to prev_length are discarded,
1.729 + * in which case the result is equal to prev_length and match_start is
1.730 + * garbage.
1.731 + * IN assertions: cur_match is the head of the hash chain for the current
1.732 + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1.733 + * OUT assertion: the match length is not greater than s->lookahead.
1.734 + */
1.735 +#ifndef ASMV
1.736 +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
1.737 + * match.S. The code will be functionally equivalent.
1.738 + */
1.739 +#ifndef FASTEST
1.740 +local uInt longest_match(
1.741 + deflate_state *s,
1.742 + IPos cur_match) /* current match */
1.743 +{
1.744 + unsigned chain_length = s->max_chain_length;/* max hash chain length */
1.745 + register Bytef *scan = s->window + s->strstart; /* current string */
1.746 + register Bytef *match; /* matched string */
1.747 + register int len; /* length of current match */
1.748 + int best_len = s->prev_length; /* best match length so far */
1.749 + int nice_match = s->nice_match; /* stop if match long enough */
1.750 + IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1.751 + s->strstart - (IPos)MAX_DIST(s) : NIL;
1.752 + /* Stop when cur_match becomes <= limit. To simplify the code,
1.753 + * we prevent matches with the string of window index 0.
1.754 + */
1.755 + Posf *prev = s->prev;
1.756 + uInt wmask = s->w_mask;
1.757 +
1.758 +#ifdef UNALIGNED_OK
1.759 + /* Compare two bytes at a time. Note: this is not always beneficial.
1.760 + * Try with and without -DUNALIGNED_OK to check.
1.761 + */
1.762 + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1.763 + register ush scan_start = *(ushf*)scan;
1.764 + register ush scan_end = *(ushf*)(scan+best_len-1);
1.765 +#else
1.766 + register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1.767 + register Byte scan_end1 = scan[best_len-1];
1.768 + register Byte scan_end = scan[best_len];
1.769 +#endif
1.770 +
1.771 + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1.772 + * It is easy to get rid of this optimization if necessary.
1.773 + */
1.774 + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1.775 +
1.776 + /* Do not waste too much time if we already have a good match: */
1.777 + if (s->prev_length >= s->good_match) {
1.778 + chain_length >>= 2;
1.779 + }
1.780 + /* Do not look for matches beyond the end of the input. This is necessary
1.781 + * to make deflate deterministic.
1.782 + */
1.783 + if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
1.784 +
1.785 + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1.786 +
1.787 + do {
1.788 + Assert(cur_match < s->strstart, "no future");
1.789 + match = s->window + cur_match;
1.790 +
1.791 + /* Skip to next match if the match length cannot increase
1.792 + * or if the match length is less than 2:
1.793 + */
1.794 +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1.795 + /* This code assumes sizeof(unsigned short) == 2. Do not use
1.796 + * UNALIGNED_OK if your compiler uses a different size.
1.797 + */
1.798 + if (*(ushf*)(match+best_len-1) != scan_end ||
1.799 + *(ushf*)match != scan_start) continue;
1.800 +
1.801 + /* It is not necessary to compare scan[2] and match[2] since they are
1.802 + * always equal when the other bytes match, given that the hash keys
1.803 + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1.804 + * strstart+3, +5, ... up to strstart+257. We check for insufficient
1.805 + * lookahead only every 4th comparison; the 128th check will be made
1.806 + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
1.807 + * necessary to put more guard bytes at the end of the window, or
1.808 + * to check more often for insufficient lookahead.
1.809 + */
1.810 + Assert(scan[2] == match[2], "scan[2]?");
1.811 + scan++, match++;
1.812 + do {
1.813 + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1.814 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1.815 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1.816 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1.817 + scan < strend);
1.818 + /* The funny "do {}" generates better code on most compilers */
1.819 +
1.820 + /* Here, scan <= window+strstart+257 */
1.821 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1.822 + if (*scan == *match) scan++;
1.823 +
1.824 + len = (MAX_MATCH - 1) - (int)(strend-scan);
1.825 + scan = strend - (MAX_MATCH-1);
1.826 +
1.827 +#else /* UNALIGNED_OK */
1.828 +
1.829 + if (match[best_len] != scan_end ||
1.830 + match[best_len-1] != scan_end1 ||
1.831 + *match != *scan ||
1.832 + *++match != scan[1]) continue;
1.833 +
1.834 + /* The check at best_len-1 can be removed because it will be made
1.835 + * again later. (This heuristic is not always a win.)
1.836 + * It is not necessary to compare scan[2] and match[2] since they
1.837 + * are always equal when the other bytes match, given that
1.838 + * the hash keys are equal and that HASH_BITS >= 8.
1.839 + */
1.840 + scan += 2, match++;
1.841 + Assert(*scan == *match, "match[2]?");
1.842 +
1.843 + /* We check for insufficient lookahead only every 8th comparison;
1.844 + * the 256th check will be made at strstart+258.
1.845 + */
1.846 + do {
1.847 + } while (*++scan == *++match && *++scan == *++match &&
1.848 + *++scan == *++match && *++scan == *++match &&
1.849 + *++scan == *++match && *++scan == *++match &&
1.850 + *++scan == *++match && *++scan == *++match &&
1.851 + scan < strend);
1.852 +
1.853 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1.854 +
1.855 + len = MAX_MATCH - (int)(strend - scan);
1.856 + scan = strend - MAX_MATCH;
1.857 +
1.858 +#endif /* UNALIGNED_OK */
1.859 +
1.860 + if (len > best_len) {
1.861 + s->match_start = cur_match;
1.862 + best_len = len;
1.863 + if (len >= nice_match) break;
1.864 +#ifdef UNALIGNED_OK
1.865 + scan_end = *(ushf*)(scan+best_len-1);
1.866 +#else
1.867 + scan_end1 = scan[best_len-1];
1.868 + scan_end = scan[best_len];
1.869 +#endif
1.870 + }
1.871 + } while ((cur_match = prev[cur_match & wmask]) > limit
1.872 + && --chain_length != 0);
1.873 +
1.874 + if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1.875 + return s->lookahead;
1.876 +}
1.877 +
1.878 +#else /* FASTEST */
1.879 +/* ---------------------------------------------------------------------------
1.880 + * Optimized version for level == 1 only
1.881 + */
1.882 +local uInt longest_match(
1.883 + deflate_state *s,
1.884 + IPos cur_match) /* current match */
1.885 +{
1.886 + register Bytef *scan = s->window + s->strstart; /* current string */
1.887 + register Bytef *match; /* matched string */
1.888 + register int len; /* length of current match */
1.889 + register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1.890 +
1.891 + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1.892 + * It is easy to get rid of this optimization if necessary.
1.893 + */
1.894 + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1.895 +
1.896 + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1.897 +
1.898 + Assert(cur_match < s->strstart, "no future");
1.899 +
1.900 + match = s->window + cur_match;
1.901 +
1.902 + /* Return failure if the match length is less than 2:
1.903 + */
1.904 + if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1.905 +
1.906 + /* The check at best_len-1 can be removed because it will be made
1.907 + * again later. (This heuristic is not always a win.)
1.908 + * It is not necessary to compare scan[2] and match[2] since they
1.909 + * are always equal when the other bytes match, given that
1.910 + * the hash keys are equal and that HASH_BITS >= 8.
1.911 + */
1.912 + scan += 2, match += 2;
1.913 + Assert(*scan == *match, "match[2]?");
1.914 +
1.915 + /* We check for insufficient lookahead only every 8th comparison;
1.916 + * the 256th check will be made at strstart+258.
1.917 + */
1.918 + do {
1.919 + } while (*++scan == *++match && *++scan == *++match &&
1.920 + *++scan == *++match && *++scan == *++match &&
1.921 + *++scan == *++match && *++scan == *++match &&
1.922 + *++scan == *++match && *++scan == *++match &&
1.923 + scan < strend);
1.924 +
1.925 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1.926 +
1.927 + len = MAX_MATCH - (int)(strend - scan);
1.928 +
1.929 + if (len < MIN_MATCH) return MIN_MATCH - 1;
1.930 +
1.931 + s->match_start = cur_match;
1.932 + return len <= s->lookahead ? len : s->lookahead;
1.933 +}
1.934 +#endif /* FASTEST */
1.935 +#endif /* ASMV */
1.936 +
1.937 +#ifdef DEBUG
1.938 +/* ===========================================================================
1.939 + * Check that the match at match_start is indeed a match.
1.940 + */
1.941 +local void check_match(
1.942 + deflate_state *s,
1.943 + IPos start, match,
1.944 + int length)
1.945 +{
1.946 + /* check that the match is indeed a match */
1.947 + if (zmemcmp(s->window + match,
1.948 + s->window + start, length) != EQUAL) {
1.949 + fprintf(stderr, " start %u, match %u, length %d\n",
1.950 + start, match, length);
1.951 + do {
1.952 + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1.953 + } while (--length != 0);
1.954 + z_error("invalid match");
1.955 + }
1.956 + if (z_verbose > 1) {
1.957 + fprintf(stderr,"\\[%d,%d]", start-match, length);
1.958 + do { putc(s->window[start++], stderr); } while (--length != 0);
1.959 + }
1.960 +}
1.961 +#else
1.962 +# define check_match(s, start, match, length)
1.963 +#endif
1.964 +
1.965 +/* ===========================================================================
1.966 + * Fill the window when the lookahead becomes insufficient.
1.967 + * Updates strstart and lookahead.
1.968 + *
1.969 + * IN assertion: lookahead < MIN_LOOKAHEAD
1.970 + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1.971 + * At least one byte has been read, or avail_in == 0; reads are
1.972 + * performed for at least two bytes (required for the zip translate_eol
1.973 + * option -- not supported here).
1.974 + */
1.975 +local void fill_window(
1.976 + deflate_state *s)
1.977 +{
1.978 + register unsigned n, m;
1.979 + register Posf *p;
1.980 + unsigned more; /* Amount of free space at the end of the window. */
1.981 + uInt wsize = s->w_size;
1.982 +
1.983 + do {
1.984 + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
1.985 +
1.986 + /* Deal with !@#$% 64K limit: */
1.987 + if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1.988 + more = wsize;
1.989 +
1.990 + } else if (more == (unsigned)(-1)) {
1.991 + /* Very unlikely, but possible on 16 bit machine if strstart == 0
1.992 + * and lookahead == 1 (input done one byte at time)
1.993 + */
1.994 + more--;
1.995 +
1.996 + /* If the window is almost full and there is insufficient lookahead,
1.997 + * move the upper half to the lower one to make room in the upper half.
1.998 + */
1.999 + } else if (s->strstart >= wsize+MAX_DIST(s)) {
1.1000 +
1.1001 + zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
1.1002 + s->match_start -= wsize;
1.1003 + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1.1004 + s->block_start -= (long) wsize;
1.1005 +
1.1006 + /* Slide the hash table (could be avoided with 32 bit values
1.1007 + at the expense of memory usage). We slide even when level == 0
1.1008 + to keep the hash table consistent if we switch back to level > 0
1.1009 + later. (Using level 0 permanently is not an optimal usage of
1.1010 + zlib, so we don't care about this pathological case.)
1.1011 + */
1.1012 + n = s->hash_size;
1.1013 + p = &s->head[n];
1.1014 + do {
1.1015 + m = *--p;
1.1016 + *p = (Pos)(m >= wsize ? m-wsize : NIL);
1.1017 + } while (--n);
1.1018 +
1.1019 + n = wsize;
1.1020 +#ifndef FASTEST
1.1021 + p = &s->prev[n];
1.1022 + do {
1.1023 + m = *--p;
1.1024 + *p = (Pos)(m >= wsize ? m-wsize : NIL);
1.1025 + /* If n is not on any hash chain, prev[n] is garbage but
1.1026 + * its value will never be used.
1.1027 + */
1.1028 + } while (--n);
1.1029 +#endif
1.1030 + more += wsize;
1.1031 + }
1.1032 + if (s->strm->avail_in == 0) return;
1.1033 +
1.1034 + /* If there was no sliding:
1.1035 + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1.1036 + * more == window_size - lookahead - strstart
1.1037 + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1.1038 + * => more >= window_size - 2*WSIZE + 2
1.1039 + * In the BIG_MEM or MMAP case (not yet supported),
1.1040 + * window_size == input_size + MIN_LOOKAHEAD &&
1.1041 + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1.1042 + * Otherwise, window_size == 2*WSIZE so more >= 2.
1.1043 + * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1.1044 + */
1.1045 + Assert(more >= 2, "more < 2");
1.1046 +
1.1047 + n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
1.1048 + s->lookahead += n;
1.1049 +
1.1050 + /* Initialize the hash value now that we have some input: */
1.1051 + if (s->lookahead >= MIN_MATCH) {
1.1052 + s->ins_h = s->window[s->strstart];
1.1053 + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1.1054 +#if MIN_MATCH != 3
1.1055 +// Call UPDATE_HASH() MIN_MATCH-3 more times
1.1056 +#endif
1.1057 + }
1.1058 + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1.1059 + * but this is not important since only literal bytes will be emitted.
1.1060 + */
1.1061 +
1.1062 + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1.1063 +}
1.1064 +
1.1065 +/* ===========================================================================
1.1066 + * Flush the current block, with given end-of-file flag.
1.1067 + * IN assertion: strstart is set to the end of the current match.
1.1068 + */
1.1069 +#define FLUSH_BLOCK_ONLY(s, eof) { \
1.1070 + _tr_flush_block(s, (s->block_start >= 0L ? \
1.1071 + (charf *)&s->window[(unsigned)s->block_start] : \
1.1072 + (charf *)Z_NULL), \
1.1073 + (ulg)((long)s->strstart - s->block_start), \
1.1074 + (eof)); \
1.1075 + s->block_start = s->strstart; \
1.1076 + flush_pending(s->strm); \
1.1077 + Tracev((stderr,"[FLUSH]")); \
1.1078 +}
1.1079 +
1.1080 +/* Same but force premature exit if necessary. */
1.1081 +#define FLUSH_BLOCK(s, eof) { \
1.1082 + FLUSH_BLOCK_ONLY(s, eof); \
1.1083 + if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
1.1084 +}
1.1085 +
1.1086 +/* ===========================================================================
1.1087 + * Copy without compression as much as possible from the input stream, return
1.1088 + * the current block state.
1.1089 + * This function does not insert new strings in the dictionary since
1.1090 + * uncompressible data is probably not useful. This function is used
1.1091 + * only for the level=0 compression option.
1.1092 + * NOTE: this function should be optimized to avoid extra copying from
1.1093 + * window to pending_buf.
1.1094 + */
1.1095 +local block_state deflate_stored(
1.1096 + deflate_state *s,
1.1097 + int flush)
1.1098 +{
1.1099 + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
1.1100 + * to pending_buf_size, and each stored block has a 5 byte header:
1.1101 + */
1.1102 + ulg max_block_size = 0xffff;
1.1103 + ulg max_start;
1.1104 +
1.1105 + if (max_block_size > s->pending_buf_size - 5) {
1.1106 + max_block_size = s->pending_buf_size - 5;
1.1107 + }
1.1108 +
1.1109 + /* Copy as much as possible from input to output: */
1.1110 + for (;;) {
1.1111 + /* Fill the window as much as possible: */
1.1112 + if (s->lookahead <= 1) {
1.1113 +
1.1114 + Assert(s->strstart < s->w_size+MAX_DIST(s) ||
1.1115 + s->block_start >= (long)s->w_size, "slide too late");
1.1116 +
1.1117 + fill_window(s);
1.1118 + if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
1.1119 +
1.1120 + if (s->lookahead == 0) break; /* flush the current block */
1.1121 + }
1.1122 + Assert(s->block_start >= 0L, "block gone");
1.1123 +
1.1124 + s->strstart += s->lookahead;
1.1125 + s->lookahead = 0;
1.1126 +
1.1127 + /* Emit a stored block if pending_buf will be full: */
1.1128 + max_start = s->block_start + max_block_size;
1.1129 + if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
1.1130 + /* strstart == 0 is possible when wraparound on 16-bit machine */
1.1131 + s->lookahead = (uInt)(s->strstart - max_start);
1.1132 + s->strstart = (uInt)max_start;
1.1133 + FLUSH_BLOCK(s, 0);
1.1134 + }
1.1135 + /* Flush if we may have to slide, otherwise block_start may become
1.1136 + * negative and the data will be gone:
1.1137 + */
1.1138 + if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
1.1139 + FLUSH_BLOCK(s, 0);
1.1140 + }
1.1141 + }
1.1142 + FLUSH_BLOCK(s, flush == Z_FINISH);
1.1143 + return flush == Z_FINISH ? finish_done : block_done;
1.1144 +}
1.1145 +
1.1146 +/* ===========================================================================
1.1147 + * Compress as much as possible from the input stream, return the current
1.1148 + * block state.
1.1149 + * This function does not perform lazy evaluation of matches and inserts
1.1150 + * new strings in the dictionary only for unmatched strings or for short
1.1151 + * matches. It is used only for the fast compression options.
1.1152 + */
1.1153 +local block_state deflate_fast(
1.1154 + deflate_state *s,
1.1155 + int flush)
1.1156 +{
1.1157 + IPos hash_head = NIL; /* head of the hash chain */
1.1158 + int bflush; /* set if current block must be flushed */
1.1159 +
1.1160 + for (;;) {
1.1161 + /* Make sure that we always have enough lookahead, except
1.1162 + * at the end of the input file. We need MAX_MATCH bytes
1.1163 + * for the next match, plus MIN_MATCH bytes to insert the
1.1164 + * string following the next match.
1.1165 + */
1.1166 + if (s->lookahead < MIN_LOOKAHEAD) {
1.1167 + fill_window(s);
1.1168 + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1.1169 + return need_more;
1.1170 + }
1.1171 + if (s->lookahead == 0) break; /* flush the current block */
1.1172 + }
1.1173 +
1.1174 + /* Insert the string window[strstart .. strstart+2] in the
1.1175 + * dictionary, and set hash_head to the head of the hash chain:
1.1176 + */
1.1177 + if (s->lookahead >= MIN_MATCH) {
1.1178 + INSERT_STRING(s, s->strstart, hash_head);
1.1179 + }
1.1180 +
1.1181 + /* Find the longest match, discarding those <= prev_length.
1.1182 + * At this point we have always match_length < MIN_MATCH
1.1183 + */
1.1184 + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1.1185 + /* To simplify the code, we prevent matches with the string
1.1186 + * of window index 0 (in particular we have to avoid a match
1.1187 + * of the string with itself at the start of the input file).
1.1188 + */
1.1189 + if (s->strategy != Z_HUFFMAN_ONLY) {
1.1190 + s->match_length = longest_match (s, hash_head);
1.1191 + }
1.1192 + /* longest_match() sets match_start */
1.1193 + }
1.1194 + if (s->match_length >= MIN_MATCH) {
1.1195 + check_match(s, s->strstart, s->match_start, s->match_length);
1.1196 +
1.1197 + _tr_tally_dist(s, s->strstart - s->match_start,
1.1198 + s->match_length - MIN_MATCH, bflush);
1.1199 +
1.1200 + s->lookahead -= s->match_length;
1.1201 +
1.1202 + /* Insert new strings in the hash table only if the match length
1.1203 + * is not too large. This saves time but degrades compression.
1.1204 + */
1.1205 +#ifndef FASTEST
1.1206 + if (s->match_length <= s->max_insert_length &&
1.1207 + s->lookahead >= MIN_MATCH) {
1.1208 + s->match_length--; /* string at strstart already in hash table */
1.1209 + do {
1.1210 + s->strstart++;
1.1211 + INSERT_STRING(s, s->strstart, hash_head);
1.1212 + /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1.1213 + * always MIN_MATCH bytes ahead.
1.1214 + */
1.1215 + } while (--s->match_length != 0);
1.1216 + s->strstart++;
1.1217 + } else
1.1218 +#endif
1.1219 + {
1.1220 + s->strstart += s->match_length;
1.1221 + s->match_length = 0;
1.1222 + s->ins_h = s->window[s->strstart];
1.1223 + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1.1224 +#if MIN_MATCH != 3
1.1225 + // Call UPDATE_HASH() MIN_MATCH-3 more times
1.1226 +#endif
1.1227 + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1.1228 + * matter since it will be recomputed at next deflate call.
1.1229 + */
1.1230 + }
1.1231 + } else {
1.1232 + /* No match, output a literal byte */
1.1233 + Tracevv((stderr,"%c", s->window[s->strstart]));
1.1234 + _tr_tally_lit (s, s->window[s->strstart], bflush);
1.1235 + s->lookahead--;
1.1236 + s->strstart++;
1.1237 + }
1.1238 + if (bflush) FLUSH_BLOCK(s, 0);
1.1239 + }
1.1240 + FLUSH_BLOCK(s, flush == Z_FINISH);
1.1241 + return flush == Z_FINISH ? finish_done : block_done;
1.1242 +}
1.1243 +
1.1244 +/* ===========================================================================
1.1245 + * Same as above, but achieves better compression. We use a lazy
1.1246 + * evaluation for matches: a match is finally adopted only if there is
1.1247 + * no better match at the next window position.
1.1248 + */
1.1249 +local block_state deflate_slow(
1.1250 + deflate_state *s,
1.1251 + int flush)
1.1252 +{
1.1253 + IPos hash_head = NIL; /* head of hash chain */
1.1254 + int bflush; /* set if current block must be flushed */
1.1255 +
1.1256 + /* Process the input block. */
1.1257 + for (;;) {
1.1258 + /* Make sure that we always have enough lookahead, except
1.1259 + * at the end of the input file. We need MAX_MATCH bytes
1.1260 + * for the next match, plus MIN_MATCH bytes to insert the
1.1261 + * string following the next match.
1.1262 + */
1.1263 + if (s->lookahead < MIN_LOOKAHEAD) {
1.1264 + fill_window(s);
1.1265 + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1.1266 + return need_more;
1.1267 + }
1.1268 + if (s->lookahead == 0) break; /* flush the current block */
1.1269 + }
1.1270 +
1.1271 + /* Insert the string window[strstart .. strstart+2] in the
1.1272 + * dictionary, and set hash_head to the head of the hash chain:
1.1273 + */
1.1274 + if (s->lookahead >= MIN_MATCH) {
1.1275 + INSERT_STRING(s, s->strstart, hash_head);
1.1276 + }
1.1277 +
1.1278 + /* Find the longest match, discarding those <= prev_length.
1.1279 + */
1.1280 + s->prev_length = s->match_length, s->prev_match = s->match_start;
1.1281 + s->match_length = MIN_MATCH-1;
1.1282 +
1.1283 + if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1.1284 + s->strstart - hash_head <= MAX_DIST(s)) {
1.1285 + /* To simplify the code, we prevent matches with the string
1.1286 + * of window index 0 (in particular we have to avoid a match
1.1287 + * of the string with itself at the start of the input file).
1.1288 + */
1.1289 + if (s->strategy != Z_HUFFMAN_ONLY) {
1.1290 + s->match_length = longest_match (s, hash_head);
1.1291 + }
1.1292 + /* longest_match() sets match_start */
1.1293 +
1.1294 + if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
1.1295 + (s->match_length == MIN_MATCH &&
1.1296 + s->strstart - s->match_start > TOO_FAR))) {
1.1297 +
1.1298 + /* If prev_match is also MIN_MATCH, match_start is garbage
1.1299 + * but we will ignore the current match anyway.
1.1300 + */
1.1301 + s->match_length = MIN_MATCH-1;
1.1302 + }
1.1303 + }
1.1304 + /* If there was a match at the previous step and the current
1.1305 + * match is not better, output the previous match:
1.1306 + */
1.1307 + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1.1308 + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1.1309 + /* Do not insert strings in hash table beyond this. */
1.1310 +
1.1311 + check_match(s, s->strstart-1, s->prev_match, s->prev_length);
1.1312 +
1.1313 + _tr_tally_dist(s, s->strstart -1 - s->prev_match,
1.1314 + s->prev_length - MIN_MATCH, bflush);
1.1315 +
1.1316 + /* Insert in hash table all strings up to the end of the match.
1.1317 + * strstart-1 and strstart are already inserted. If there is not
1.1318 + * enough lookahead, the last two strings are not inserted in
1.1319 + * the hash table.
1.1320 + */
1.1321 + s->lookahead -= s->prev_length-1;
1.1322 + s->prev_length -= 2;
1.1323 + do {
1.1324 + if (++s->strstart <= max_insert) {
1.1325 + INSERT_STRING(s, s->strstart, hash_head);
1.1326 + }
1.1327 + } while (--s->prev_length != 0);
1.1328 + s->match_available = 0;
1.1329 + s->match_length = MIN_MATCH-1;
1.1330 + s->strstart++;
1.1331 +
1.1332 + if (bflush) FLUSH_BLOCK(s, 0);
1.1333 +
1.1334 + } else if (s->match_available) {
1.1335 + /* If there was no match at the previous position, output a
1.1336 + * single literal. If there was a match but the current match
1.1337 + * is longer, truncate the previous match to a single literal.
1.1338 + */
1.1339 + Tracevv((stderr,"%c", s->window[s->strstart-1]));
1.1340 + _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1.1341 + if (bflush) {
1.1342 + FLUSH_BLOCK_ONLY(s, 0);
1.1343 + }
1.1344 + s->strstart++;
1.1345 + s->lookahead--;
1.1346 + if (s->strm->avail_out == 0) return need_more;
1.1347 + } else {
1.1348 + /* There is no previous match to compare with, wait for
1.1349 + * the next step to decide.
1.1350 + */
1.1351 + s->match_available = 1;
1.1352 + s->strstart++;
1.1353 + s->lookahead--;
1.1354 + }
1.1355 + }
1.1356 + Assert (flush != Z_NO_FLUSH, "no flush?");
1.1357 + if (s->match_available) {
1.1358 + Tracevv((stderr,"%c", s->window[s->strstart-1]));
1.1359 + _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1.1360 + s->match_available = 0;
1.1361 + }
1.1362 + FLUSH_BLOCK(s, flush == Z_FINISH);
1.1363 + return flush == Z_FINISH ? finish_done : block_done;
1.1364 +}